source: libcfa/src/concurrency/locks.hfa @ 639e4fc

ADTast-experimental
Last change on this file since 639e4fc was 88ac843e, checked in by Thierry Delisle <tdelisle@…>, 2 years ago

Moved lockfree containers to containers/lockfree.hfa.
Added poison_list, which is a lock-free bag with push and poison as only operations.

  • Property mode set to 100644
File size: 22.1 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2021 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// locks.hfa -- PUBLIC
8// Runtime locks that used with the runtime thread system.
9//
10// Author           : Colby Alexander Parsons
11// Created On       : Thu Jan 21 19:46:50 2021
12// Last Modified By :
13// Last Modified On :
14// Update Count     :
15//
16
17#pragma once
18
19#include <stdbool.h>
20#include <stdio.h>
21
22#include "bits/weakso_locks.hfa"
23#include "containers/lockfree.hfa"
24#include "containers/list.hfa"
25
26#include "limits.hfa"
27#include "thread.hfa"
28
29#include "time_t.hfa"
30#include "time.hfa"
31
32//-----------------------------------------------------------------------------
33// Semaphore
34struct semaphore {
35        __spinlock_t lock;
36        int count;
37        __queue_t(thread$) waiting;
38};
39
40void  ?{}(semaphore & this, int count = 1);
41void ^?{}(semaphore & this);
42bool   P (semaphore & this);
43bool   V (semaphore & this);
44bool   V (semaphore & this, unsigned count);
45thread$ * V (semaphore & this, bool );
46
47//----------
48struct single_acquisition_lock {
49        inline blocking_lock;
50};
51
52static inline void  ?{}( single_acquisition_lock & this ) {((blocking_lock &)this){ false, false };}
53static inline void ^?{}( single_acquisition_lock & this ) {}
54static inline void   lock     ( single_acquisition_lock & this ) { lock    ( (blocking_lock &)this ); }
55static inline bool   try_lock ( single_acquisition_lock & this ) { return try_lock( (blocking_lock &)this ); }
56static inline void   unlock   ( single_acquisition_lock & this ) { unlock  ( (blocking_lock &)this ); }
57static inline size_t on_wait  ( single_acquisition_lock & this ) { return on_wait ( (blocking_lock &)this ); }
58static inline void   on_wakeup( single_acquisition_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
59static inline void   on_notify( single_acquisition_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
60
61//----------
62struct owner_lock {
63        inline blocking_lock;
64};
65
66static inline void  ?{}( owner_lock & this ) {((blocking_lock &)this){ true, true };}
67static inline void ^?{}( owner_lock & this ) {}
68static inline void   lock     ( owner_lock & this ) { lock    ( (blocking_lock &)this ); }
69static inline bool   try_lock ( owner_lock & this ) { return try_lock( (blocking_lock &)this ); }
70static inline void   unlock   ( owner_lock & this ) { unlock  ( (blocking_lock &)this ); }
71static inline size_t on_wait  ( owner_lock & this ) { return on_wait ( (blocking_lock &)this ); }
72static inline void   on_wakeup( owner_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
73static inline void   on_notify( owner_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
74
75//-----------------------------------------------------------------------------
76// MCS Lock
77struct mcs_node {
78        mcs_node * volatile next;
79        single_sem sem;
80};
81
82static inline void ?{}(mcs_node & this) { this.next = 0p; }
83
84static inline mcs_node * volatile & ?`next ( mcs_node * node ) {
85        return node->next;
86}
87
88struct mcs_lock {
89        mcs_queue(mcs_node) queue;
90};
91
92static inline void lock(mcs_lock & l, mcs_node & n) {
93        if(push(l.queue, &n))
94                wait(n.sem);
95}
96
97static inline void unlock(mcs_lock & l, mcs_node & n) {
98        mcs_node * next = advance(l.queue, &n);
99        if(next) post(next->sem);
100}
101
102//-----------------------------------------------------------------------------
103// MCS Spin Lock
104// - No recursive acquisition
105// - Needs to be released by owner
106
107struct mcs_spin_node {
108        mcs_spin_node * volatile next;
109        volatile bool locked;
110};
111
112struct mcs_spin_queue {
113        mcs_spin_node * volatile tail;
114};
115
116static inline void ?{}(mcs_spin_node & this) { this.next = 0p; this.locked = true; }
117
118static inline mcs_spin_node * volatile & ?`next ( mcs_spin_node * node ) {
119        return node->next;
120}
121
122struct mcs_spin_lock {
123        mcs_spin_queue queue;
124};
125
126static inline void lock(mcs_spin_lock & l, mcs_spin_node & n) {
127        mcs_spin_node * prev = __atomic_exchange_n(&l.queue.tail, &n, __ATOMIC_SEQ_CST);
128        n.locked = true;
129        if(prev == 0p) return;
130        prev->next = &n;
131        while(__atomic_load_n(&n.locked, __ATOMIC_RELAXED)) Pause();
132}
133
134static inline void unlock(mcs_spin_lock & l, mcs_spin_node & n) {
135        mcs_spin_node * n_ptr = &n;
136        if (__atomic_compare_exchange_n(&l.queue.tail, &n_ptr, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) return;
137        while (__atomic_load_n(&n.next, __ATOMIC_RELAXED) == 0p) {}
138        n.next->locked = false;
139}
140
141//-----------------------------------------------------------------------------
142// CLH Spinlock
143// - No recursive acquisition
144// - Needs to be released by owner
145
146struct clh_lock {
147        volatile bool * volatile tail;
148};
149
150static inline void  ?{}( clh_lock & this ) { this.tail = malloc(); *this.tail = true; }
151static inline void ^?{}( clh_lock & this ) { free(this.tail); }
152
153static inline void lock(clh_lock & l) {
154        thread$ * curr_thd = active_thread();
155        *(curr_thd->clh_node) = false;
156        volatile bool * prev = __atomic_exchange_n((bool **)(&l.tail), (bool *)(curr_thd->clh_node), __ATOMIC_SEQ_CST);
157        while(!__atomic_load_n(prev, __ATOMIC_ACQUIRE)) Pause();
158        curr_thd->clh_prev = prev;
159}
160
161static inline void unlock(clh_lock & l) {
162        thread$ * curr_thd = active_thread();
163        __atomic_store_n(curr_thd->clh_node, true, __ATOMIC_RELEASE);
164        curr_thd->clh_node = curr_thd->clh_prev;
165}
166
167//-----------------------------------------------------------------------------
168// Linear backoff Spinlock
169struct linear_backoff_then_block_lock {
170        // Spin lock used for mutual exclusion
171        __spinlock_t spinlock;
172
173        // Current thread owning the lock
174        struct thread$ * owner;
175
176        // List of blocked threads
177        dlist( thread$ ) blocked_threads;
178
179        // Used for comparing and exchanging
180        volatile size_t lock_value;
181
182        // used for linear backoff spinning
183        int spin_start;
184        int spin_end;
185        int spin_count;
186
187        // after unsuccessful linear backoff yield this many times
188        int yield_count;
189};
190
191static inline void  ?{}( linear_backoff_then_block_lock & this, int spin_start, int spin_end, int spin_count, int yield_count ) {
192        this.spinlock{};
193        this.blocked_threads{};
194        this.lock_value = 0;
195        this.spin_start = spin_start;
196        this.spin_end = spin_end;
197        this.spin_count = spin_count;
198        this.yield_count = yield_count;
199}
200static inline void  ?{}( linear_backoff_then_block_lock & this ) { this{4, 1024, 16, 0}; }
201static inline void ^?{}( linear_backoff_then_block_lock & this ) {}
202static inline void ?{}( linear_backoff_then_block_lock & this, linear_backoff_then_block_lock this2 ) = void;
203static inline void ?=?( linear_backoff_then_block_lock & this, linear_backoff_then_block_lock this2 ) = void;
204
205static inline bool internal_try_lock(linear_backoff_then_block_lock & this, size_t & compare_val) with(this) {
206        if (__atomic_compare_exchange_n(&lock_value, &compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
207                owner = active_thread();
208                return true;
209        }
210        return false;
211}
212
213static inline bool try_lock(linear_backoff_then_block_lock & this) { size_t compare_val = 0; return internal_try_lock(this, compare_val); }
214
215static inline bool try_lock_contention(linear_backoff_then_block_lock & this) with(this) {
216        if (__atomic_exchange_n(&lock_value, 2, __ATOMIC_ACQUIRE) == 0) {
217                owner = active_thread();
218                return true;
219        }
220        return false;
221}
222
223static inline bool block(linear_backoff_then_block_lock & this) with(this) {
224        lock( spinlock __cfaabi_dbg_ctx2 );
225        if (lock_value != 2) {
226                unlock( spinlock );
227                return true;
228        }
229        insert_last( blocked_threads, *active_thread() );
230        unlock( spinlock );
231        park( );
232        return true;
233}
234
235static inline void lock(linear_backoff_then_block_lock & this) with(this) {
236        // if owner just return
237        if (active_thread() == owner) return;
238        size_t compare_val = 0;
239        int spin = spin_start;
240        // linear backoff
241        for( ;; ) {
242                compare_val = 0;
243                if (internal_try_lock(this, compare_val)) return;
244                if (2 == compare_val) break;
245                for (int i = 0; i < spin; i++) Pause();
246                if (spin >= spin_end) break;
247                spin += spin;
248        }
249
250        if(2 != compare_val && try_lock_contention(this)) return;
251        // block until signalled
252        while (block(this)) if(try_lock_contention(this)) return;
253}
254
255static inline void unlock(linear_backoff_then_block_lock & this) with(this) {
256        verify(lock_value > 0);
257    owner = 0p;
258    if (__atomic_exchange_n(&lock_value, 0, __ATOMIC_RELEASE) == 1) return;
259        lock( spinlock __cfaabi_dbg_ctx2 );
260        thread$ * t = &try_pop_front( blocked_threads );
261        unlock( spinlock );
262        unpark( t );
263}
264
265static inline void on_notify(linear_backoff_then_block_lock & this, struct thread$ * t ) { unpark(t); }
266static inline size_t on_wait(linear_backoff_then_block_lock & this) { unlock(this); return 0; }
267static inline void on_wakeup(linear_backoff_then_block_lock & this, size_t recursion ) { lock(this); }
268
269//-----------------------------------------------------------------------------
270// Fast Block Lock
271
272// minimal blocking lock
273// - No reacquire for cond var
274// - No recursive acquisition
275// - No ownership
276struct fast_block_lock {
277        // List of blocked threads
278        dlist( thread$ ) blocked_threads;
279
280        // Spin lock used for mutual exclusion
281        __spinlock_t lock;
282
283        // flag showing if lock is held
284        bool held:1;
285
286        #ifdef __CFA_DEBUG__
287        // for deadlock detection
288        struct thread$ * owner;
289        #endif
290};
291
292static inline void  ?{}( fast_block_lock & this ) with(this) {
293        lock{};
294        blocked_threads{};
295        held = false;
296}
297static inline void ^?{}( fast_block_lock & this ) {}
298static inline void ?{}( fast_block_lock & this, fast_block_lock this2 ) = void;
299static inline void ?=?( fast_block_lock & this, fast_block_lock this2 ) = void;
300
301// if this is called recursively IT WILL DEADLOCK!!!!!
302static inline void lock(fast_block_lock & this) with(this) {
303        lock( lock __cfaabi_dbg_ctx2 );
304
305        #ifdef __CFA_DEBUG__
306        assert(!(held && owner == active_thread()));
307        #endif
308        if (held) {
309                insert_last( blocked_threads, *active_thread() );
310                unlock( lock );
311                park( );
312                return;
313        }
314        held = true;
315        #ifdef __CFA_DEBUG__
316        owner = active_thread();
317        #endif
318        unlock( lock );
319}
320
321static inline void unlock(fast_block_lock & this) with(this) {
322        lock( lock __cfaabi_dbg_ctx2 );
323        /* paranoid */ verifyf( held != false, "Attempt to release lock %p that isn't held", &this );
324        thread$ * t = &try_pop_front( blocked_threads );
325        held = ( t ? true : false );
326        #ifdef __CFA_DEBUG__
327        owner = ( t ? t : 0p );
328        #endif
329        unpark( t );
330        unlock( lock );
331}
332
333static inline void on_notify(fast_block_lock & this, struct thread$ * t ) { unpark(t); }
334static inline size_t on_wait(fast_block_lock & this) { unlock(this); return 0; }
335static inline void on_wakeup(fast_block_lock & this, size_t recursion ) { }
336
337//-----------------------------------------------------------------------------
338// simple_owner_lock
339
340// pthread owner lock
341// - reacquire for cond var
342// - recursive acquisition
343// - ownership
344struct simple_owner_lock {
345        // List of blocked threads
346        dlist( thread$ ) blocked_threads;
347
348        // Spin lock used for mutual exclusion
349        __spinlock_t lock;
350
351        // owner showing if lock is held
352        struct thread$ * owner;
353
354        size_t recursion_count;
355};
356
357static inline void  ?{}( simple_owner_lock & this ) with(this) {
358        lock{};
359        blocked_threads{};
360        owner = 0p;
361        recursion_count = 0;
362}
363static inline void ^?{}( simple_owner_lock & this ) {}
364static inline void ?{}( simple_owner_lock & this, simple_owner_lock this2 ) = void;
365static inline void ?=?( simple_owner_lock & this, simple_owner_lock this2 ) = void;
366
367static inline void lock(simple_owner_lock & this) with(this) {
368        if (owner == active_thread()) {
369                recursion_count++;
370                return;
371        }
372        lock( lock __cfaabi_dbg_ctx2 );
373
374        if (owner != 0p) {
375                insert_last( blocked_threads, *active_thread() );
376                unlock( lock );
377                park( );
378                return;
379        }
380        owner = active_thread();
381        recursion_count = 1;
382        unlock( lock );
383}
384
385// TODO: fix duplicate def issue and bring this back
386// void pop_and_set_new_owner( simple_owner_lock & this ) with( this ) {
387        // thread$ * t = &try_pop_front( blocked_threads );
388        // owner = t;
389        // recursion_count = ( t ? 1 : 0 );
390        // unpark( t );
391// }
392
393static inline void unlock(simple_owner_lock & this) with(this) {
394        lock( lock __cfaabi_dbg_ctx2 );
395        /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
396        /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );
397        // if recursion count is zero release lock and set new owner if one is waiting
398        recursion_count--;
399        if ( recursion_count == 0 ) {
400                // pop_and_set_new_owner( this );
401                thread$ * t = &try_pop_front( blocked_threads );
402                owner = t;
403                recursion_count = ( t ? 1 : 0 );
404                unpark( t );
405        }
406        unlock( lock );
407}
408
409static inline void on_notify(simple_owner_lock & this, struct thread$ * t ) with(this) {
410        lock( lock __cfaabi_dbg_ctx2 );
411        // lock held
412        if ( owner != 0p ) {
413                insert_last( blocked_threads, *t );
414                unlock( lock );
415        }
416        // lock not held
417        else {
418                owner = t;
419                recursion_count = 1;
420                unpark( t );
421                unlock( lock );
422        }
423}
424
425static inline size_t on_wait(simple_owner_lock & this) with(this) {
426        lock( lock __cfaabi_dbg_ctx2 );
427        /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
428        /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );
429
430        size_t ret = recursion_count;
431
432        // pop_and_set_new_owner( this );
433
434        thread$ * t = &try_pop_front( blocked_threads );
435        owner = t;
436        recursion_count = ( t ? 1 : 0 );
437        unpark( t );
438
439        unlock( lock );
440        return ret;
441}
442
443static inline void on_wakeup(simple_owner_lock & this, size_t recursion ) with(this) { recursion_count = recursion; }
444
445//-----------------------------------------------------------------------------
446// Spin Queue Lock
447
448// - No reacquire for cond var
449// - No recursive acquisition
450// - No ownership
451// - spin lock with no locking/atomics in unlock
452struct spin_queue_lock {
453        // Spin lock used for mutual exclusion
454        mcs_spin_lock lock;
455
456        // flag showing if lock is held
457        volatile bool held;
458
459        #ifdef __CFA_DEBUG__
460        // for deadlock detection
461        struct thread$ * owner;
462        #endif
463};
464
465static inline void  ?{}( spin_queue_lock & this ) with(this) {
466        lock{};
467        held = false;
468}
469static inline void ^?{}( spin_queue_lock & this ) {}
470static inline void ?{}( spin_queue_lock & this, spin_queue_lock this2 ) = void;
471static inline void ?=?( spin_queue_lock & this, spin_queue_lock this2 ) = void;
472
473// if this is called recursively IT WILL DEADLOCK!!!!!
474static inline void lock(spin_queue_lock & this) with(this) {
475        mcs_spin_node node;
476        #ifdef __CFA_DEBUG__
477        assert(!(held && owner == active_thread()));
478        #endif
479        lock( lock, node );
480        while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
481        __atomic_store_n(&held, true, __ATOMIC_SEQ_CST);
482        unlock( lock, node );
483        #ifdef __CFA_DEBUG__
484        owner = active_thread();
485        #endif
486}
487
488static inline void unlock(spin_queue_lock & this) with(this) {
489        #ifdef __CFA_DEBUG__
490        owner = 0p;
491        #endif
492        __atomic_store_n(&held, false, __ATOMIC_RELEASE);
493}
494
495static inline void on_notify(spin_queue_lock & this, struct thread$ * t ) { unpark(t); }
496static inline size_t on_wait(spin_queue_lock & this) { unlock(this); return 0; }
497static inline void on_wakeup(spin_queue_lock & this, size_t recursion ) { }
498
499
500//-----------------------------------------------------------------------------
501// MCS Block Spin Lock
502
503// - No reacquire for cond var
504// - No recursive acquisition
505// - No ownership
506// - Blocks but first node spins (like spin queue but blocking for not first thd)
507struct mcs_block_spin_lock {
508        // Spin lock used for mutual exclusion
509        mcs_lock lock;
510
511        // flag showing if lock is held
512        volatile bool held;
513
514        #ifdef __CFA_DEBUG__
515        // for deadlock detection
516        struct thread$ * owner;
517        #endif
518};
519
520static inline void  ?{}( mcs_block_spin_lock & this ) with(this) {
521        lock{};
522        held = false;
523}
524static inline void ^?{}( mcs_block_spin_lock & this ) {}
525static inline void ?{}( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;
526static inline void ?=?( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;
527
528// if this is called recursively IT WILL DEADLOCK!!!!!
529static inline void lock(mcs_block_spin_lock & this) with(this) {
530        mcs_node node;
531        #ifdef __CFA_DEBUG__
532        assert(!(held && owner == active_thread()));
533        #endif
534        lock( lock, node );
535        while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
536        __atomic_store_n(&held, true, __ATOMIC_SEQ_CST);
537        unlock( lock, node );
538        #ifdef __CFA_DEBUG__
539        owner = active_thread();
540        #endif
541}
542
543static inline void unlock(mcs_block_spin_lock & this) with(this) {
544        #ifdef __CFA_DEBUG__
545        owner = 0p;
546        #endif
547        __atomic_store_n(&held, false, __ATOMIC_SEQ_CST);
548}
549
550static inline void on_notify(mcs_block_spin_lock & this, struct thread$ * t ) { unpark(t); }
551static inline size_t on_wait(mcs_block_spin_lock & this) { unlock(this); return 0; }
552static inline void on_wakeup(mcs_block_spin_lock & this, size_t recursion ) { }
553
554//-----------------------------------------------------------------------------
555// Block Spin Lock
556
557// - No reacquire for cond var
558// - No recursive acquisition
559// - No ownership
560// - Blocks but first node spins (like spin queue but blocking for not first thd)
561struct block_spin_lock {
562        // Spin lock used for mutual exclusion
563        fast_block_lock lock;
564
565        // flag showing if lock is held
566        volatile bool held;
567
568        #ifdef __CFA_DEBUG__
569        // for deadlock detection
570        struct thread$ * owner;
571        #endif
572};
573
574static inline void  ?{}( block_spin_lock & this ) with(this) {
575        lock{};
576        held = false;
577}
578static inline void ^?{}( block_spin_lock & this ) {}
579static inline void ?{}( block_spin_lock & this, block_spin_lock this2 ) = void;
580static inline void ?=?( block_spin_lock & this, block_spin_lock this2 ) = void;
581
582// if this is called recursively IT WILL DEADLOCK!!!!!
583static inline void lock(block_spin_lock & this) with(this) {
584        #ifdef __CFA_DEBUG__
585        assert(!(held && owner == active_thread()));
586        #endif
587        lock( lock );
588        while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
589        __atomic_store_n(&held, true, __ATOMIC_RELEASE);
590        unlock( lock );
591        #ifdef __CFA_DEBUG__
592        owner = active_thread();
593        #endif
594}
595
596static inline void unlock(block_spin_lock & this) with(this) {
597        #ifdef __CFA_DEBUG__
598        owner = 0p;
599        #endif
600        __atomic_store_n(&held, false, __ATOMIC_RELEASE);
601}
602
603static inline void on_notify(block_spin_lock & this, struct thread$ * t ) { unpark(t); }
604static inline size_t on_wait(block_spin_lock & this) { unlock(this); return 0; }
605static inline void on_wakeup(block_spin_lock & this, size_t recursion ) { }
606
607//-----------------------------------------------------------------------------
608// is_blocking_lock
609trait is_blocking_lock(L & | sized(L)) {
610        // For synchronization locks to use when acquiring
611        void on_notify( L &, struct thread$ * );
612
613        // For synchronization locks to use when releasing
614        size_t on_wait( L & );
615
616        // to set recursion count after getting signalled;
617        void on_wakeup( L &, size_t recursion );
618};
619
620//-----------------------------------------------------------------------------
621// // info_thread
622// // the info thread is a wrapper around a thread used
623// // to store extra data for use in the condition variable
624forall(L & | is_blocking_lock(L)) {
625        struct info_thread;
626
627        // // for use by sequence
628        // info_thread(L) *& Back( info_thread(L) * this );
629        // info_thread(L) *& Next( info_thread(L) * this );
630}
631
632//-----------------------------------------------------------------------------
633// Synchronization Locks
634forall(L & | is_blocking_lock(L)) {
635
636        //-----------------------------------------------------------------------------
637        // condition_variable
638
639        // The multi-tool condition variable
640        // - can pass timeouts to wait for either a signal or timeout
641        // - can wait without passing a lock
642        // - can have waiters reacquire different locks while waiting on the same cond var
643        // - has shadow queue
644        // - can be signalled outside of critical sections with no locks held
645        struct condition_variable {
646                // Spin lock used for mutual exclusion
647                __spinlock_t lock;
648
649                // List of blocked threads
650                dlist( info_thread(L) ) blocked_threads;
651
652                // Count of current blocked threads
653                int count;
654        };
655
656
657        void  ?{}( condition_variable(L) & this );
658        void ^?{}( condition_variable(L) & this );
659
660        bool notify_one( condition_variable(L) & this );
661        bool notify_all( condition_variable(L) & this );
662
663        uintptr_t front( condition_variable(L) & this );
664
665        bool empty  ( condition_variable(L) & this );
666        int  counter( condition_variable(L) & this );
667
668        void wait( condition_variable(L) & this );
669        void wait( condition_variable(L) & this, uintptr_t info );
670        bool wait( condition_variable(L) & this, Duration duration );
671        bool wait( condition_variable(L) & this, uintptr_t info, Duration duration );
672
673        void wait( condition_variable(L) & this, L & l );
674        void wait( condition_variable(L) & this, L & l, uintptr_t info );
675        bool wait( condition_variable(L) & this, L & l, Duration duration );
676        bool wait( condition_variable(L) & this, L & l, uintptr_t info, Duration duration );
677
678        //-----------------------------------------------------------------------------
679        // fast_cond_var
680
681        // The trimmed and slim condition variable
682        // - no internal lock so you must hold a lock while using this cond var
683        // - signalling without holding branded lock is UNSAFE!
684        // - only allows usage of one lock, cond var is branded after usage
685
686        struct fast_cond_var {
687                // List of blocked threads
688                dlist( info_thread(L) ) blocked_threads;
689                #ifdef __CFA_DEBUG__
690                L * lock_used;
691                #endif
692        };
693
694        void  ?{}( fast_cond_var(L) & this );
695        void ^?{}( fast_cond_var(L) & this );
696
697        bool notify_one( fast_cond_var(L) & this );
698        bool notify_all( fast_cond_var(L) & this );
699
700        uintptr_t front( fast_cond_var(L) & this );
701        bool empty  ( fast_cond_var(L) & this );
702
703        void wait( fast_cond_var(L) & this, L & l );
704        void wait( fast_cond_var(L) & this, L & l, uintptr_t info );
705
706
707        //-----------------------------------------------------------------------------
708        // pthread_cond_var
709        //
710        // - cond var with minimal footprint
711        // - supports operations needed for phthread cond
712
713        struct pthread_cond_var {
714                dlist( info_thread(L) ) blocked_threads;
715                __spinlock_t lock;
716        };
717
718        void  ?{}( pthread_cond_var(L) & this );
719        void ^?{}( pthread_cond_var(L) & this );
720
721        bool notify_one( pthread_cond_var(L) & this );
722        bool notify_all( pthread_cond_var(L) & this );
723
724        uintptr_t front( pthread_cond_var(L) & this );
725        bool empty ( pthread_cond_var(L) & this );
726
727        void wait( pthread_cond_var(L) & this, L & l );
728        void wait( pthread_cond_var(L) & this, L & l, uintptr_t info );
729        bool wait( pthread_cond_var(L) & this, L & l, timespec t );
730        bool wait( pthread_cond_var(L) & this, L & l, uintptr_t info, timespec t );
731}
Note: See TracBrowser for help on using the repository browser.