source: libcfa/src/concurrency/locks.hfa@ bf7c7ea

ADT ast-experimental pthread-emulation qualifiedEnum
Last change on this file since bf7c7ea was db7a3ad, checked in by caparsons <caparson@…>, 3 years ago

fixed build (hopefully)

  • Property mode set to 100644
File size: 21.7 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2021 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// locks.hfa -- PUBLIC
8// Runtime locks that used with the runtime thread system.
9//
10// Author : Colby Alexander Parsons
11// Created On : Thu Jan 21 19:46:50 2021
12// Last Modified By :
13// Last Modified On :
14// Update Count :
15//
16
17#pragma once
18
19#include <stdbool.h>
20#include <stdio.h>
21
22#include "bits/weakso_locks.hfa"
23#include "containers/queueLockFree.hfa"
24#include "containers/list.hfa"
25
26#include "limits.hfa"
27#include "thread.hfa"
28
29#include "time_t.hfa"
30#include "time.hfa"
31
32//-----------------------------------------------------------------------------
33// Semaphore
34struct semaphore {
35 __spinlock_t lock;
36 int count;
37 __queue_t(thread$) waiting;
38};
39
40void ?{}(semaphore & this, int count = 1);
41void ^?{}(semaphore & this);
42bool P (semaphore & this);
43bool V (semaphore & this);
44bool V (semaphore & this, unsigned count);
45thread$ * V (semaphore & this, bool );
46
47//----------
48struct single_acquisition_lock {
49 inline blocking_lock;
50};
51
52static inline void ?{}( single_acquisition_lock & this ) {((blocking_lock &)this){ false, false };}
53static inline void ^?{}( single_acquisition_lock & this ) {}
54static inline void lock ( single_acquisition_lock & this ) { lock ( (blocking_lock &)this ); }
55static inline bool try_lock ( single_acquisition_lock & this ) { return try_lock( (blocking_lock &)this ); }
56static inline void unlock ( single_acquisition_lock & this ) { unlock ( (blocking_lock &)this ); }
57static inline size_t on_wait ( single_acquisition_lock & this ) { return on_wait ( (blocking_lock &)this ); }
58static inline void on_wakeup( single_acquisition_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
59static inline void on_notify( single_acquisition_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
60
61//----------
62struct owner_lock {
63 inline blocking_lock;
64};
65
66static inline void ?{}( owner_lock & this ) {((blocking_lock &)this){ true, true };}
67static inline void ^?{}( owner_lock & this ) {}
68static inline void lock ( owner_lock & this ) { lock ( (blocking_lock &)this ); }
69static inline bool try_lock ( owner_lock & this ) { return try_lock( (blocking_lock &)this ); }
70static inline void unlock ( owner_lock & this ) { unlock ( (blocking_lock &)this ); }
71static inline size_t on_wait ( owner_lock & this ) { return on_wait ( (blocking_lock &)this ); }
72static inline void on_wakeup( owner_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
73static inline void on_notify( owner_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
74
75//-----------------------------------------------------------------------------
76// MCS Lock
77struct mcs_node {
78 mcs_node * volatile next;
79 single_sem sem;
80};
81
82static inline void ?{}(mcs_node & this) { this.next = 0p; }
83
84static inline mcs_node * volatile & ?`next ( mcs_node * node ) {
85 return node->next;
86}
87
88struct mcs_lock {
89 mcs_queue(mcs_node) queue;
90};
91
92static inline void lock(mcs_lock & l, mcs_node & n) {
93 if(push(l.queue, &n))
94 wait(n.sem);
95}
96
97static inline void unlock(mcs_lock & l, mcs_node & n) {
98 mcs_node * next = advance(l.queue, &n);
99 if(next) post(next->sem);
100}
101
102//-----------------------------------------------------------------------------
103// MCS Spin Lock
104// - No recursive acquisition
105// - Needs to be released by owner
106
107struct mcs_spin_node {
108 mcs_spin_node * volatile next;
109 volatile bool locked;
110};
111
112struct mcs_spin_queue {
113 mcs_spin_node * volatile tail;
114};
115
116static inline void ?{}(mcs_spin_node & this) { this.next = 0p; this.locked = true; }
117
118static inline mcs_spin_node * volatile & ?`next ( mcs_spin_node * node ) {
119 return node->next;
120}
121
122struct mcs_spin_lock {
123 mcs_spin_queue queue;
124};
125
126static inline void lock(mcs_spin_lock & l, mcs_spin_node & n) {
127 mcs_spin_node * prev = __atomic_exchange_n(&l.queue.tail, &n, __ATOMIC_SEQ_CST);
128 if(prev != 0p) {
129 prev->next = &n;
130 while(n.locked) Pause();
131 }
132}
133
134static inline void unlock(mcs_spin_lock & l, mcs_spin_node & n) {
135 mcs_spin_node * n_ptr = &n;
136 if (!__atomic_compare_exchange_n(&l.queue.tail, &n_ptr, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) ) {
137 while (n.next == 0p) {}
138 n.next->locked = false;
139 }
140}
141
142//-----------------------------------------------------------------------------
143// CLH Spinlock
144// - No recursive acquisition
145// - Needs to be released by owner
146
147struct clh_lock {
148 volatile bool * volatile tail;
149};
150
151static inline void ?{}( clh_lock & this ) { this.tail = malloc(); *this.tail = true; }
152static inline void ^?{}( clh_lock & this ) { free(this.tail); }
153
154static inline void lock(clh_lock & l) {
155 thread$ * curr_thd = active_thread();
156 *(curr_thd->clh_node) = false;
157 volatile bool * prev = __atomic_exchange_n((bool **)(&l.tail), (bool *)(curr_thd->clh_node), __ATOMIC_SEQ_CST);
158 while(!__atomic_load_n(prev, __ATOMIC_ACQUIRE)) Pause();
159 curr_thd->clh_prev = prev;
160}
161
162static inline void unlock(clh_lock & l) {
163 thread$ * curr_thd = active_thread();
164 __atomic_store_n(curr_thd->clh_node, true, __ATOMIC_RELEASE);
165 curr_thd->clh_node = curr_thd->clh_prev;
166}
167
168//-----------------------------------------------------------------------------
169// Linear backoff Spinlock
170struct linear_backoff_then_block_lock {
171 // Spin lock used for mutual exclusion
172 __spinlock_t spinlock;
173
174 // Current thread owning the lock
175 struct thread$ * owner;
176
177 // List of blocked threads
178 dlist( thread$ ) blocked_threads;
179
180 // Used for comparing and exchanging
181 volatile size_t lock_value;
182
183 // used for linear backoff spinning
184 int spin_start;
185 int spin_end;
186 int spin_count;
187
188 // after unsuccessful linear backoff yield this many times
189 int yield_count;
190};
191
192static inline void ?{}( linear_backoff_then_block_lock & this, int spin_start, int spin_end, int spin_count, int yield_count ) {
193 this.spinlock{};
194 this.blocked_threads{};
195 this.lock_value = 0;
196 this.spin_start = spin_start;
197 this.spin_end = spin_end;
198 this.spin_count = spin_count;
199 this.yield_count = yield_count;
200}
201static inline void ?{}( linear_backoff_then_block_lock & this ) { this{4, 1024, 16, 0}; }
202static inline void ^?{}( linear_backoff_then_block_lock & this ) {}
203static inline void ?{}( linear_backoff_then_block_lock & this, linear_backoff_then_block_lock this2 ) = void;
204static inline void ?=?( linear_backoff_then_block_lock & this, linear_backoff_then_block_lock this2 ) = void;
205
206static inline bool internal_try_lock(linear_backoff_then_block_lock & this, size_t & compare_val) with(this) {
207 if (__atomic_compare_exchange_n(&lock_value, &compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
208 owner = active_thread();
209 return true;
210 }
211 return false;
212}
213
214static inline bool try_lock(linear_backoff_then_block_lock & this) { size_t compare_val = 0; return internal_try_lock(this, compare_val); }
215
216static inline bool try_lock_contention(linear_backoff_then_block_lock & this) with(this) {
217 if (__atomic_exchange_n(&lock_value, 2, __ATOMIC_ACQUIRE) == 0) {
218 owner = active_thread();
219 return true;
220 }
221 return false;
222}
223
224static inline bool block(linear_backoff_then_block_lock & this) with(this) {
225 lock( spinlock __cfaabi_dbg_ctx2 );
226 if (lock_value != 2) {
227 unlock( spinlock );
228 return true;
229 }
230 insert_last( blocked_threads, *active_thread() );
231 unlock( spinlock );
232 park( );
233 return true;
234}
235
236static inline void lock(linear_backoff_then_block_lock & this) with(this) {
237 // if owner just return
238 if (active_thread() == owner) return;
239 size_t compare_val = 0;
240 int spin = spin_start;
241 // linear backoff
242 for( ;; ) {
243 compare_val = 0;
244 if (internal_try_lock(this, compare_val)) return;
245 if (2 == compare_val) break;
246 for (int i = 0; i < spin; i++) Pause();
247 if (spin >= spin_end) break;
248 spin += spin;
249 }
250
251 if(2 != compare_val && try_lock_contention(this)) return;
252 // block until signalled
253 while (block(this)) if(try_lock_contention(this)) return;
254}
255
256static inline void unlock(linear_backoff_then_block_lock & this) with(this) {
257 verify(lock_value > 0);
258 owner = 0p;
259 if (__atomic_exchange_n(&lock_value, 0, __ATOMIC_RELEASE) == 1) return;
260 lock( spinlock __cfaabi_dbg_ctx2 );
261 thread$ * t = &try_pop_front( blocked_threads );
262 unlock( spinlock );
263 unpark( t );
264}
265
266static inline void on_notify(linear_backoff_then_block_lock & this, struct thread$ * t ) { unpark(t); }
267static inline size_t on_wait(linear_backoff_then_block_lock & this) { unlock(this); return 0; }
268static inline void on_wakeup(linear_backoff_then_block_lock & this, size_t recursion ) { lock(this); }
269
270//-----------------------------------------------------------------------------
271// Fast Block Lock
272
273// minimal blocking lock
274// - No reacquire for cond var
275// - No recursive acquisition
276// - No ownership
277struct fast_block_lock {
278 // List of blocked threads
279 dlist( thread$ ) blocked_threads;
280
281 // Spin lock used for mutual exclusion
282 __spinlock_t lock;
283
284 // flag showing if lock is held
285 bool held:1;
286
287 #ifdef __CFA_DEBUG__
288 // for deadlock detection
289 struct thread$ * owner;
290 #endif
291};
292
293static inline void ?{}( fast_block_lock & this ) with(this) {
294 lock{};
295 blocked_threads{};
296 held = false;
297}
298static inline void ^?{}( fast_block_lock & this ) {}
299static inline void ?{}( fast_block_lock & this, fast_block_lock this2 ) = void;
300static inline void ?=?( fast_block_lock & this, fast_block_lock this2 ) = void;
301
302// if this is called recursively IT WILL DEADLOCK!!!!!
303static inline void lock(fast_block_lock & this) with(this) {
304 lock( lock __cfaabi_dbg_ctx2 );
305
306 #ifdef __CFA_DEBUG__
307 assert(!(held && owner == active_thread()));
308 #endif
309 if (held) {
310 insert_last( blocked_threads, *active_thread() );
311 unlock( lock );
312 park( );
313 return;
314 }
315 held = true;
316 #ifdef __CFA_DEBUG__
317 owner = active_thread();
318 #endif
319 unlock( lock );
320}
321
322static inline void unlock(fast_block_lock & this) with(this) {
323 lock( lock __cfaabi_dbg_ctx2 );
324 /* paranoid */ verifyf( held != false, "Attempt to release lock %p that isn't held", &this );
325 thread$ * t = &try_pop_front( blocked_threads );
326 held = ( t ? true : false );
327 #ifdef __CFA_DEBUG__
328 owner = ( t ? t : 0p );
329 #endif
330 unpark( t );
331 unlock( lock );
332}
333
334static inline void on_notify(fast_block_lock & this, struct thread$ * t ) { unpark(t); }
335static inline size_t on_wait(fast_block_lock & this) { unlock(this); return 0; }
336static inline void on_wakeup(fast_block_lock & this, size_t recursion ) { }
337
338//-----------------------------------------------------------------------------
339// simple_owner_lock
340
341// pthread owner lock
342// - reacquire for cond var
343// - recursive acquisition
344// - ownership
345struct simple_owner_lock {
346 // List of blocked threads
347 dlist( thread$ ) blocked_threads;
348
349 // Spin lock used for mutual exclusion
350 __spinlock_t lock;
351
352 // owner showing if lock is held
353 struct thread$ * owner;
354
355 size_t recursion_count;
356};
357
358static inline void ?{}( simple_owner_lock & this ) with(this) {
359 lock{};
360 blocked_threads{};
361 owner = 0p;
362 recursion_count = 0;
363}
364static inline void ^?{}( simple_owner_lock & this ) {}
365static inline void ?{}( simple_owner_lock & this, simple_owner_lock this2 ) = void;
366static inline void ?=?( simple_owner_lock & this, simple_owner_lock this2 ) = void;
367
368static inline void lock(simple_owner_lock & this) with(this) {
369 if (owner == active_thread()) {
370 recursion_count++;
371 return;
372 }
373 lock( lock __cfaabi_dbg_ctx2 );
374
375 if (owner != 0p) {
376 insert_last( blocked_threads, *active_thread() );
377 unlock( lock );
378 park( );
379 return;
380 }
381 owner = active_thread();
382 recursion_count = 1;
383 unlock( lock );
384}
385
386// TODO: fix duplicate def issue and bring this back
387// void pop_and_set_new_owner( simple_owner_lock & this ) with( this ) {
388 // thread$ * t = &try_pop_front( blocked_threads );
389 // owner = t;
390 // recursion_count = ( t ? 1 : 0 );
391 // unpark( t );
392// }
393
394static inline void unlock(simple_owner_lock & this) with(this) {
395 lock( lock __cfaabi_dbg_ctx2 );
396 /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
397 /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );
398 // if recursion count is zero release lock and set new owner if one is waiting
399 recursion_count--;
400 if ( recursion_count == 0 ) {
401 // pop_and_set_new_owner( this );
402 thread$ * t = &try_pop_front( blocked_threads );
403 owner = t;
404 recursion_count = ( t ? 1 : 0 );
405 unpark( t );
406 }
407 unlock( lock );
408}
409
410static inline void on_notify(simple_owner_lock & this, struct thread$ * t ) with(this) {
411 lock( lock __cfaabi_dbg_ctx2 );
412 // lock held
413 if ( owner != 0p ) {
414 insert_last( blocked_threads, *t );
415 unlock( lock );
416 }
417 // lock not held
418 else {
419 owner = t;
420 recursion_count = 1;
421 unpark( t );
422 unlock( lock );
423 }
424}
425
426static inline size_t on_wait(simple_owner_lock & this) with(this) {
427 lock( lock __cfaabi_dbg_ctx2 );
428 /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
429 /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );
430
431 size_t ret = recursion_count;
432
433 // pop_and_set_new_owner( this );
434
435 thread$ * t = &try_pop_front( blocked_threads );
436 owner = t;
437 recursion_count = ( t ? 1 : 0 );
438 unpark( t );
439
440 unlock( lock );
441 return ret;
442}
443
444static inline void on_wakeup(simple_owner_lock & this, size_t recursion ) with(this) { recursion_count = recursion; }
445
446//-----------------------------------------------------------------------------
447// Spin Queue Lock
448
449// - No reacquire for cond var
450// - No recursive acquisition
451// - No ownership
452// - spin lock with no locking/atomics in unlock
453struct spin_queue_lock {
454 // Spin lock used for mutual exclusion
455 mcs_spin_lock lock;
456
457 // flag showing if lock is held
458 volatile bool held;
459
460 #ifdef __CFA_DEBUG__
461 // for deadlock detection
462 struct thread$ * owner;
463 #endif
464};
465
466static inline void ?{}( spin_queue_lock & this ) with(this) {
467 lock{};
468 held = false;
469}
470static inline void ^?{}( spin_queue_lock & this ) {}
471static inline void ?{}( spin_queue_lock & this, spin_queue_lock this2 ) = void;
472static inline void ?=?( spin_queue_lock & this, spin_queue_lock this2 ) = void;
473
474// if this is called recursively IT WILL DEADLOCK!!!!!
475static inline void lock(spin_queue_lock & this) with(this) {
476 mcs_spin_node node;
477 #ifdef __CFA_DEBUG__
478 assert(!(held && owner == active_thread()));
479 #endif
480 lock( lock, node );
481 while(held) Pause();
482 held = true;
483 // printf("locked\n");
484 unlock( lock, node );
485 #ifdef __CFA_DEBUG__
486 owner = active_thread();
487 #endif
488}
489
490static inline void unlock(spin_queue_lock & this) with(this) {
491 // printf("unlocked\n");
492 #ifdef __CFA_DEBUG__
493 owner = 0p;
494 #endif
495 held = false;
496}
497
498static inline void on_notify(spin_queue_lock & this, struct thread$ * t ) { unpark(t); }
499static inline size_t on_wait(spin_queue_lock & this) { unlock(this); return 0; }
500static inline void on_wakeup(spin_queue_lock & this, size_t recursion ) { }
501
502
503//-----------------------------------------------------------------------------
504// MCS Block Spin Lock
505
506// - No reacquire for cond var
507// - No recursive acquisition
508// - No ownership
509// - Blocks but first node spins (like spin queue but blocking for not first thd)
510struct mcs_block_spin_lock {
511 // Spin lock used for mutual exclusion
512 mcs_lock lock;
513
514 // flag showing if lock is held
515 volatile bool held;
516
517 #ifdef __CFA_DEBUG__
518 // for deadlock detection
519 struct thread$ * owner;
520 #endif
521};
522
523static inline void ?{}( mcs_block_spin_lock & this ) with(this) {
524 lock{};
525 held = false;
526}
527static inline void ^?{}( mcs_block_spin_lock & this ) {}
528static inline void ?{}( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;
529static inline void ?=?( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;
530
531// if this is called recursively IT WILL DEADLOCK!!!!!
532static inline void lock(mcs_block_spin_lock & this) with(this) {
533 mcs_node node;
534 #ifdef __CFA_DEBUG__
535 assert(!(held && owner == active_thread()));
536 #endif
537 lock( lock, node );
538 while(held) Pause();
539 held = true;
540 unlock( lock, node );
541 #ifdef __CFA_DEBUG__
542 owner = active_thread();
543 #endif
544}
545
546static inline void unlock(mcs_block_spin_lock & this) with(this) {
547 #ifdef __CFA_DEBUG__
548 owner = 0p;
549 #endif
550 held = false;
551}
552
553static inline void on_notify(mcs_block_spin_lock & this, struct thread$ * t ) { unpark(t); }
554static inline size_t on_wait(mcs_block_spin_lock & this) { unlock(this); return 0; }
555static inline void on_wakeup(mcs_block_spin_lock & this, size_t recursion ) { }
556
557//-----------------------------------------------------------------------------
558// Block Spin Lock
559
560// - No reacquire for cond var
561// - No recursive acquisition
562// - No ownership
563// - Blocks but first node spins (like spin queue but blocking for not first thd)
564struct block_spin_lock {
565 // Spin lock used for mutual exclusion
566 fast_block_lock lock;
567
568 // flag showing if lock is held
569 volatile bool held;
570
571 #ifdef __CFA_DEBUG__
572 // for deadlock detection
573 struct thread$ * owner;
574 #endif
575};
576
577static inline void ?{}( block_spin_lock & this ) with(this) {
578 lock{};
579 held = false;
580}
581static inline void ^?{}( block_spin_lock & this ) {}
582static inline void ?{}( block_spin_lock & this, block_spin_lock this2 ) = void;
583static inline void ?=?( block_spin_lock & this, block_spin_lock this2 ) = void;
584
585// if this is called recursively IT WILL DEADLOCK!!!!!
586static inline void lock(block_spin_lock & this) with(this) {
587 #ifdef __CFA_DEBUG__
588 assert(!(held && owner == active_thread()));
589 #endif
590 lock( lock );
591 while(held) Pause();
592 held = true;
593 unlock( lock );
594 #ifdef __CFA_DEBUG__
595 owner = active_thread();
596 #endif
597}
598
599static inline void unlock(block_spin_lock & this) with(this) {
600 #ifdef __CFA_DEBUG__
601 owner = 0p;
602 #endif
603 held = false;
604}
605
606static inline void on_notify(block_spin_lock & this, struct thread$ * t ) { unpark(t); }
607static inline size_t on_wait(block_spin_lock & this) { unlock(this); return 0; }
608static inline void on_wakeup(block_spin_lock & this, size_t recursion ) { }
609
610//-----------------------------------------------------------------------------
611// is_blocking_lock
612trait is_blocking_lock(L & | sized(L)) {
613 // For synchronization locks to use when acquiring
614 void on_notify( L &, struct thread$ * );
615
616 // For synchronization locks to use when releasing
617 size_t on_wait( L & );
618
619 // to set recursion count after getting signalled;
620 void on_wakeup( L &, size_t recursion );
621};
622
623//-----------------------------------------------------------------------------
624// // info_thread
625// // the info thread is a wrapper around a thread used
626// // to store extra data for use in the condition variable
627forall(L & | is_blocking_lock(L)) {
628 struct info_thread;
629
630 // // for use by sequence
631 // info_thread(L) *& Back( info_thread(L) * this );
632 // info_thread(L) *& Next( info_thread(L) * this );
633}
634
635//-----------------------------------------------------------------------------
636// Synchronization Locks
637forall(L & | is_blocking_lock(L)) {
638
639 //-----------------------------------------------------------------------------
640 // condition_variable
641
642 // The multi-tool condition variable
643 // - can pass timeouts to wait for either a signal or timeout
644 // - can wait without passing a lock
645 // - can have waiters reacquire different locks while waiting on the same cond var
646 // - has shadow queue
647 // - can be signalled outside of critical sections with no locks held
648 struct condition_variable {
649 // Spin lock used for mutual exclusion
650 __spinlock_t lock;
651
652 // List of blocked threads
653 dlist( info_thread(L) ) blocked_threads;
654
655 // Count of current blocked threads
656 int count;
657 };
658
659
660 void ?{}( condition_variable(L) & this );
661 void ^?{}( condition_variable(L) & this );
662
663 bool notify_one( condition_variable(L) & this );
664 bool notify_all( condition_variable(L) & this );
665
666 uintptr_t front( condition_variable(L) & this );
667
668 bool empty ( condition_variable(L) & this );
669 int counter( condition_variable(L) & this );
670
671 void wait( condition_variable(L) & this );
672 void wait( condition_variable(L) & this, uintptr_t info );
673 bool wait( condition_variable(L) & this, Duration duration );
674 bool wait( condition_variable(L) & this, uintptr_t info, Duration duration );
675
676 void wait( condition_variable(L) & this, L & l );
677 void wait( condition_variable(L) & this, L & l, uintptr_t info );
678 bool wait( condition_variable(L) & this, L & l, Duration duration );
679 bool wait( condition_variable(L) & this, L & l, uintptr_t info, Duration duration );
680
681 //-----------------------------------------------------------------------------
682 // fast_cond_var
683
684 // The trimmed and slim condition variable
685 // - no internal lock so you must hold a lock while using this cond var
686 // - signalling without holding branded lock is UNSAFE!
687 // - only allows usage of one lock, cond var is branded after usage
688
689 struct fast_cond_var {
690 // List of blocked threads
691 dlist( info_thread(L) ) blocked_threads;
692 #ifdef __CFA_DEBUG__
693 L * lock_used;
694 #endif
695 };
696
697 void ?{}( fast_cond_var(L) & this );
698 void ^?{}( fast_cond_var(L) & this );
699
700 bool notify_one( fast_cond_var(L) & this );
701 bool notify_all( fast_cond_var(L) & this );
702
703 uintptr_t front( fast_cond_var(L) & this );
704 bool empty ( fast_cond_var(L) & this );
705
706 void wait( fast_cond_var(L) & this, L & l );
707 void wait( fast_cond_var(L) & this, L & l, uintptr_t info );
708
709
710 //-----------------------------------------------------------------------------
711 // pthread_cond_var
712 //
713 // - cond var with minimal footprint
714 // - supports operations needed for phthread cond
715
716 struct pthread_cond_var {
717 dlist( info_thread(L) ) blocked_threads;
718 __spinlock_t lock;
719 };
720
721 void ?{}( pthread_cond_var(L) & this );
722 void ^?{}( pthread_cond_var(L) & this );
723
724 bool notify_one( pthread_cond_var(L) & this );
725 bool notify_all( pthread_cond_var(L) & this );
726
727 uintptr_t front( pthread_cond_var(L) & this );
728 bool empty ( pthread_cond_var(L) & this );
729
730 void wait( pthread_cond_var(L) & this, L & l );
731 void wait( pthread_cond_var(L) & this, L & l, uintptr_t info );
732 bool wait( pthread_cond_var(L) & this, L & l, timespec t );
733 bool wait( pthread_cond_var(L) & this, L & l, uintptr_t info, timespec t );
734}
Note: See TracBrowser for help on using the repository browser.