source: libcfa/src/concurrency/locks.hfa @ f835806

pthread-emulationqualifiedEnum
Last change on this file since f835806 was f835806, checked in by caparsons <caparson@…>, 6 months ago

added some locks and cleaned up unused seqable field in thread block

  • Property mode set to 100644
File size: 20.5 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2021 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// locks.hfa -- PUBLIC
8// Runtime locks that used with the runtime thread system.
9//
10// Author           : Colby Alexander Parsons
11// Created On       : Thu Jan 21 19:46:50 2021
12// Last Modified By :
13// Last Modified On :
14// Update Count     :
15//
16
17#pragma once
18
19#include <stdbool.h>
20#include <stdio.h>
21
22#include "bits/weakso_locks.hfa"
23#include "containers/queueLockFree.hfa"
24#include "containers/list.hfa"
25
26#include "limits.hfa"
27#include "thread.hfa"
28
29#include "time_t.hfa"
30#include "time.hfa"
31
32//-----------------------------------------------------------------------------
33// Semaphore
34struct semaphore {
35        __spinlock_t lock;
36        int count;
37        __queue_t(thread$) waiting;
38};
39
40void  ?{}(semaphore & this, int count = 1);
41void ^?{}(semaphore & this);
42bool   P (semaphore & this);
43bool   V (semaphore & this);
44bool   V (semaphore & this, unsigned count);
45thread$ * V (semaphore & this, bool );
46
47//----------
48struct single_acquisition_lock {
49        inline blocking_lock;
50};
51
52static inline void  ?{}( single_acquisition_lock & this ) {((blocking_lock &)this){ false, false };}
53static inline void ^?{}( single_acquisition_lock & this ) {}
54static inline void   lock     ( single_acquisition_lock & this ) { lock    ( (blocking_lock &)this ); }
55static inline bool   try_lock ( single_acquisition_lock & this ) { return try_lock( (blocking_lock &)this ); }
56static inline void   unlock   ( single_acquisition_lock & this ) { unlock  ( (blocking_lock &)this ); }
57static inline size_t on_wait  ( single_acquisition_lock & this ) { return on_wait ( (blocking_lock &)this ); }
58static inline void   on_wakeup( single_acquisition_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
59static inline void   on_notify( single_acquisition_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
60
61//----------
62struct owner_lock {
63        inline blocking_lock;
64};
65
66static inline void  ?{}( owner_lock & this ) {((blocking_lock &)this){ true, true };}
67static inline void ^?{}( owner_lock & this ) {}
68static inline void   lock     ( owner_lock & this ) { lock    ( (blocking_lock &)this ); }
69static inline bool   try_lock ( owner_lock & this ) { return try_lock( (blocking_lock &)this ); }
70static inline void   unlock   ( owner_lock & this ) { unlock  ( (blocking_lock &)this ); }
71static inline size_t on_wait  ( owner_lock & this ) { return on_wait ( (blocking_lock &)this ); }
72static inline void   on_wakeup( owner_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
73static inline void   on_notify( owner_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
74
75//-----------------------------------------------------------------------------
76// MCS Lock
77struct mcs_node {
78        mcs_node * volatile next;
79        single_sem sem;
80};
81
82static inline void ?{}(mcs_node & this) { this.next = 0p; }
83
84static inline mcs_node * volatile & ?`next ( mcs_node * node ) {
85        return node->next;
86}
87
88struct mcs_lock {
89        mcs_queue(mcs_node) queue;
90};
91
92static inline void lock(mcs_lock & l, mcs_node & n) {
93        if(push(l.queue, &n))
94                wait(n.sem);
95}
96
97static inline void unlock(mcs_lock & l, mcs_node & n) {
98        mcs_node * next = advance(l.queue, &n);
99        if(next) post(next->sem);
100}
101
102//-----------------------------------------------------------------------------
103// MCS Spin Lock
104// - No recursive acquisition
105// - Needs to be released by owner
106
107struct mcs_spin_node {
108        mcs_spin_node * volatile next;
109        bool locked:1;
110};
111
112struct mcs_spin_queue {
113        mcs_spin_node * volatile tail;
114};
115
116static inline void ?{}(mcs_spin_node & this) { this.next = 0p; this.locked = true; }
117
118static inline mcs_spin_node * volatile & ?`next ( mcs_spin_node * node ) {
119        return node->next;
120}
121
122struct mcs_spin_lock {
123        mcs_spin_queue queue;
124};
125
126static inline void lock(mcs_spin_lock & l, mcs_spin_node & n) {
127        mcs_spin_node * prev = __atomic_exchange_n(&l.queue.tail, &n, __ATOMIC_SEQ_CST);
128        if(prev != 0p) {
129                prev->next = &n;
130                while(n.locked) Pause();
131        }
132}
133
134static inline void unlock(mcs_spin_lock & l, mcs_spin_node & n) {
135        mcs_spin_node * n_ptr = &n;
136        if (!__atomic_compare_exchange_n(&l.queue.tail, &n_ptr, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) ) {
137                while (n.next == 0p) {}
138                n.next->locked = false;
139        }
140}
141
142//-----------------------------------------------------------------------------
143// CLH Spinlock
144// - No recursive acquisition
145// - Needs to be released by owner
146
147struct clh_lock {
148        volatile bool * volatile tail;
149};
150
151static inline void  ?{}( clh_lock & this ) { this.tail = malloc(); *this.tail = true; }
152static inline void ^?{}( clh_lock & this ) { free(this.tail); }
153
154static inline void lock(clh_lock & l) {
155        thread$ * curr_thd = active_thread();
156        *(curr_thd->clh_node) = false;
157        volatile bool * prev = __atomic_exchange_n((bool **)(&l.tail), (bool *)(curr_thd->clh_node), __ATOMIC_SEQ_CST);
158        while(!__atomic_load_n(prev, __ATOMIC_ACQUIRE)) Pause();
159        curr_thd->clh_prev = prev;
160}
161
162static inline void unlock(clh_lock & l) {
163        thread$ * curr_thd = active_thread();
164        __atomic_store_n(curr_thd->clh_node, true, __ATOMIC_RELEASE);
165        curr_thd->clh_node = curr_thd->clh_prev;
166}
167
168//-----------------------------------------------------------------------------
169// Linear backoff Spinlock
170struct linear_backoff_then_block_lock {
171        // Spin lock used for mutual exclusion
172        __spinlock_t spinlock;
173
174        // Current thread owning the lock
175        struct thread$ * owner;
176
177        // List of blocked threads
178        dlist( thread$ ) blocked_threads;
179
180        // Used for comparing and exchanging
181        volatile size_t lock_value;
182
183        // used for linear backoff spinning
184        int spin_start;
185        int spin_end;
186        int spin_count;
187
188        // after unsuccessful linear backoff yield this many times
189        int yield_count;
190};
191
192static inline void  ?{}( linear_backoff_then_block_lock & this, int spin_start, int spin_end, int spin_count, int yield_count ) {
193        this.spinlock{};
194        this.blocked_threads{};
195        this.lock_value = 0;
196        this.spin_start = spin_start;
197        this.spin_end = spin_end;
198        this.spin_count = spin_count;
199        this.yield_count = yield_count;
200}
201static inline void  ?{}( linear_backoff_then_block_lock & this ) { this{4, 1024, 16, 0}; }
202static inline void ^?{}( linear_backoff_then_block_lock & this ) {}
203static inline void ?{}( linear_backoff_then_block_lock & this, linear_backoff_then_block_lock this2 ) = void;
204static inline void ?=?( linear_backoff_then_block_lock & this, linear_backoff_then_block_lock this2 ) = void;
205
206static inline bool internal_try_lock(linear_backoff_then_block_lock & this, size_t & compare_val) with(this) {
207        if (__atomic_compare_exchange_n(&lock_value, &compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
208                owner = active_thread();
209                return true;
210        }
211        return false;
212}
213
214static inline bool try_lock(linear_backoff_then_block_lock & this) { size_t compare_val = 0; return internal_try_lock(this, compare_val); }
215
216static inline bool try_lock_contention(linear_backoff_then_block_lock & this) with(this) {
217        if (__atomic_exchange_n(&lock_value, 2, __ATOMIC_ACQUIRE) == 0) {
218                owner = active_thread();
219                return true;
220        }
221        return false;
222}
223
224static inline bool block(linear_backoff_then_block_lock & this) with(this) {
225        lock( spinlock __cfaabi_dbg_ctx2 );
226        if (lock_value != 2) {
227                unlock( spinlock );
228                return true;
229        }
230        insert_last( blocked_threads, *active_thread() );
231        unlock( spinlock );
232        park( );
233        return true;
234}
235
236static inline void lock(linear_backoff_then_block_lock & this) with(this) {
237        // if owner just return
238        if (active_thread() == owner) return;
239        size_t compare_val = 0;
240        int spin = spin_start;
241        // linear backoff
242        for( ;; ) {
243                compare_val = 0;
244                if (internal_try_lock(this, compare_val)) return;
245                if (2 == compare_val) break;
246                for (int i = 0; i < spin; i++) Pause();
247                if (spin >= spin_end) break;
248                spin += spin;
249        }
250
251        if(2 != compare_val && try_lock_contention(this)) return;
252        // block until signalled
253        while (block(this)) if(try_lock_contention(this)) return;
254}
255
256static inline void unlock(linear_backoff_then_block_lock & this) with(this) {
257        verify(lock_value > 0);
258    owner = 0p;
259    if (__atomic_exchange_n(&lock_value, 0, __ATOMIC_RELEASE) == 1) return;
260        lock( spinlock __cfaabi_dbg_ctx2 );
261        thread$ * t = &try_pop_front( blocked_threads );
262        unlock( spinlock );
263        unpark( t );
264}
265
266static inline void on_notify(linear_backoff_then_block_lock & this, struct thread$ * t ) { unpark(t); }
267static inline size_t on_wait(linear_backoff_then_block_lock & this) { unlock(this); return 0; }
268static inline void on_wakeup(linear_backoff_then_block_lock & this, size_t recursion ) { lock(this); }
269
270//-----------------------------------------------------------------------------
271// Fast Block Lock
272
273// minimal blocking lock
274// - No reacquire for cond var
275// - No recursive acquisition
276// - No ownership
277struct fast_block_lock {
278        // List of blocked threads
279        dlist( thread$ ) blocked_threads;
280
281        // Spin lock used for mutual exclusion
282        __spinlock_t lock;
283
284        // flag showing if lock is held
285        bool held:1;
286
287        #ifdef __CFA_DEBUG__
288        // for deadlock detection
289        struct thread$ * owner;
290        #endif
291};
292
293static inline void  ?{}( fast_block_lock & this ) with(this) {
294        lock{};
295        blocked_threads{};
296        held = false;
297}
298static inline void ^?{}( fast_block_lock & this ) {}
299static inline void ?{}( fast_block_lock & this, fast_block_lock this2 ) = void;
300static inline void ?=?( fast_block_lock & this, fast_block_lock this2 ) = void;
301
302// if this is called recursively IT WILL DEADLOCK!!!!!
303static inline void lock(fast_block_lock & this) with(this) {
304        lock( lock __cfaabi_dbg_ctx2 );
305
306        #ifdef __CFA_DEBUG__
307        assert(!(held && owner == active_thread()));
308        #endif
309        if (held) {
310                insert_last( blocked_threads, *active_thread() );
311                unlock( lock );
312                park( );
313                return;
314        }
315        held = true;
316        #ifdef __CFA_DEBUG__
317        owner = active_thread();
318        #endif
319        unlock( lock );
320}
321
322static inline void unlock(fast_block_lock & this) with(this) {
323        lock( lock __cfaabi_dbg_ctx2 );
324        /* paranoid */ verifyf( held != false, "Attempt to release lock %p that isn't held", &this );
325        thread$ * t = &try_pop_front( blocked_threads );
326        held = ( t ? true : false );
327        #ifdef __CFA_DEBUG__
328        owner = ( t ? t : 0p );
329        #endif
330        unpark( t );
331        unlock( lock );
332}
333
334static inline void on_notify(fast_block_lock & this, struct thread$ * t ) { unpark(t); }
335static inline size_t on_wait(fast_block_lock & this) { unlock(this); return 0; }
336static inline void on_wakeup(fast_block_lock & this, size_t recursion ) { }
337
338//-----------------------------------------------------------------------------
339// simple_owner_lock
340
341// pthread owner lock
342// - reacquire for cond var
343// - recursive acquisition
344// - ownership
345struct simple_owner_lock {
346        // List of blocked threads
347        dlist( thread$ ) blocked_threads;
348
349        // Spin lock used for mutual exclusion
350        __spinlock_t lock;
351
352        // owner showing if lock is held
353        struct thread$ * owner;
354
355        size_t recursion_count;
356};
357
358static inline void  ?{}( simple_owner_lock & this ) with(this) {
359        lock{};
360        blocked_threads{};
361        owner = 0p;
362        recursion_count = 0;
363}
364static inline void ^?{}( simple_owner_lock & this ) {}
365static inline void ?{}( simple_owner_lock & this, simple_owner_lock this2 ) = void;
366static inline void ?=?( simple_owner_lock & this, simple_owner_lock this2 ) = void;
367
368static inline void lock(simple_owner_lock & this) with(this) {
369        if (owner == active_thread()) {
370                recursion_count++;
371                return;
372        }
373        lock( lock __cfaabi_dbg_ctx2 );
374
375        if (owner != 0p) {
376                insert_last( blocked_threads, *active_thread() );
377                unlock( lock );
378                park( );
379                return;
380        }
381        owner = active_thread();
382        recursion_count = 1;
383        unlock( lock );
384}
385
386void pop_and_set_new_owner( simple_owner_lock & this ) with( this ) {
387        thread$ * t = &try_pop_front( blocked_threads );
388        owner = t;
389        recursion_count = ( t ? 1 : 0 );
390        unpark( t );
391}
392
393static inline void unlock(simple_owner_lock & this) with(this) {
394        lock( lock __cfaabi_dbg_ctx2 );
395        /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
396        /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );
397        // if recursion count is zero release lock and set new owner if one is waiting
398        recursion_count--;
399        if ( recursion_count == 0 ) {
400                pop_and_set_new_owner( this );
401        }
402        unlock( lock );
403}
404
405static inline void on_notify(simple_owner_lock & this, struct thread$ * t ) with(this) {
406        lock( lock __cfaabi_dbg_ctx2 );
407        // lock held
408        if ( owner != 0p ) {
409                insert_last( blocked_threads, *t );
410                unlock( lock );
411        }
412        // lock not held
413        else {
414                owner = t;
415                recursion_count = 1;
416                unpark( t );
417                unlock( lock );
418        }
419}
420
421static inline size_t on_wait(simple_owner_lock & this) with(this) {
422        lock( lock __cfaabi_dbg_ctx2 );
423        /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
424        /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );
425
426        size_t ret = recursion_count;
427
428        pop_and_set_new_owner( this );
429
430        unlock( lock );
431        return ret;
432}
433
434static inline void on_wakeup(simple_owner_lock & this, size_t recursion ) with(this) { recursion_count = recursion; }
435
436//-----------------------------------------------------------------------------
437// Spin Queue Lock
438
439// - No reacquire for cond var
440// - No recursive acquisition
441// - No ownership
442// - spin lock with no locking/atomics in unlock
443struct spin_queue_lock {
444        // Spin lock used for mutual exclusion
445        mcs_spin_lock lock;
446
447        // flag showing if lock is held
448        bool held:1;
449
450        #ifdef __CFA_DEBUG__
451        // for deadlock detection
452        struct thread$ * owner;
453        #endif
454};
455
456static inline void  ?{}( spin_queue_lock & this ) with(this) {
457        lock{};
458        held = false;
459}
460static inline void ^?{}( spin_queue_lock & this ) {}
461static inline void ?{}( spin_queue_lock & this, spin_queue_lock this2 ) = void;
462static inline void ?=?( spin_queue_lock & this, spin_queue_lock this2 ) = void;
463
464// if this is called recursively IT WILL DEADLOCK!!!!!
465static inline void lock(spin_queue_lock & this) with(this) {
466        mcs_spin_node node;
467        #ifdef __CFA_DEBUG__
468        assert(!(held && owner == active_thread()));
469        #endif
470        lock( lock, node );
471        while(held) Pause();
472        held = true;
473        unlock( lock, node );
474        #ifdef __CFA_DEBUG__
475        owner = active_thread();
476        #endif
477}
478
479static inline void unlock(spin_queue_lock & this) with(this) {
480        #ifdef __CFA_DEBUG__
481        owner = 0p;
482        #endif
483        held = false;
484}
485
486static inline void on_notify(spin_queue_lock & this, struct thread$ * t ) { unpark(t); }
487static inline size_t on_wait(spin_queue_lock & this) { unlock(this); return 0; }
488static inline void on_wakeup(spin_queue_lock & this, size_t recursion ) { }
489
490
491//-----------------------------------------------------------------------------
492// MCS Block Spin Lock
493
494// - No reacquire for cond var
495// - No recursive acquisition
496// - No ownership
497// - Blocks but first node spins (like spin queue but blocking for not first thd)
498struct mcs_block_spin_lock {
499        // Spin lock used for mutual exclusion
500        mcs_lock lock;
501
502        // flag showing if lock is held
503        bool held:1;
504
505        #ifdef __CFA_DEBUG__
506        // for deadlock detection
507        struct thread$ * owner;
508        #endif
509};
510
511static inline void  ?{}( mcs_block_spin_lock & this ) with(this) {
512        lock{};
513        held = false;
514}
515static inline void ^?{}( mcs_block_spin_lock & this ) {}
516static inline void ?{}( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;
517static inline void ?=?( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;
518
519// if this is called recursively IT WILL DEADLOCK!!!!!
520static inline void lock(mcs_block_spin_lock & this) with(this) {
521        mcs_node node;
522        #ifdef __CFA_DEBUG__
523        assert(!(held && owner == active_thread()));
524        #endif
525        lock( lock, node );
526        while(held) Pause();
527        held = true;
528        unlock( lock, node );
529        #ifdef __CFA_DEBUG__
530        owner = active_thread();
531        #endif
532}
533
534static inline void unlock(mcs_block_spin_lock & this) with(this) {
535        #ifdef __CFA_DEBUG__
536        owner = 0p;
537        #endif
538        held = false;
539}
540
541static inline void on_notify(mcs_block_spin_lock & this, struct thread$ * t ) { unpark(t); }
542static inline size_t on_wait(mcs_block_spin_lock & this) { unlock(this); return 0; }
543static inline void on_wakeup(mcs_block_spin_lock & this, size_t recursion ) { }
544
545//-----------------------------------------------------------------------------
546// Block Spin Lock
547
548// - No reacquire for cond var
549// - No recursive acquisition
550// - No ownership
551// - Blocks but first node spins (like spin queue but blocking for not first thd)
552struct block_spin_lock {
553        // Spin lock used for mutual exclusion
554        fast_block_lock lock;
555
556        // flag showing if lock is held
557        bool held:1;
558
559        #ifdef __CFA_DEBUG__
560        // for deadlock detection
561        struct thread$ * owner;
562        #endif
563};
564
565static inline void  ?{}( block_spin_lock & this ) with(this) {
566        lock{};
567        held = false;
568}
569static inline void ^?{}( block_spin_lock & this ) {}
570static inline void ?{}( block_spin_lock & this, block_spin_lock this2 ) = void;
571static inline void ?=?( block_spin_lock & this, block_spin_lock this2 ) = void;
572
573// if this is called recursively IT WILL DEADLOCK!!!!!
574static inline void lock(block_spin_lock & this) with(this) {
575        #ifdef __CFA_DEBUG__
576        assert(!(held && owner == active_thread()));
577        #endif
578        lock( lock );
579        while(held) Pause();
580        held = true;
581        unlock( lock );
582        #ifdef __CFA_DEBUG__
583        owner = active_thread();
584        #endif
585}
586
587static inline void unlock(block_spin_lock & this) with(this) {
588        #ifdef __CFA_DEBUG__
589        owner = 0p;
590        #endif
591        held = false;
592}
593
594static inline void on_notify(block_spin_lock & this, struct thread$ * t ) { unpark(t); }
595static inline size_t on_wait(block_spin_lock & this) { unlock(this); return 0; }
596static inline void on_wakeup(block_spin_lock & this, size_t recursion ) { }
597
598//-----------------------------------------------------------------------------
599// is_blocking_lock
600trait is_blocking_lock(L & | sized(L)) {
601        // For synchronization locks to use when acquiring
602        void on_notify( L &, struct thread$ * );
603
604        // For synchronization locks to use when releasing
605        size_t on_wait( L & );
606
607        // to set recursion count after getting signalled;
608        void on_wakeup( L &, size_t recursion );
609};
610
611//-----------------------------------------------------------------------------
612// // info_thread
613// // the info thread is a wrapper around a thread used
614// // to store extra data for use in the condition variable
615forall(L & | is_blocking_lock(L)) {
616        struct info_thread;
617
618        // // for use by sequence
619        // info_thread(L) *& Back( info_thread(L) * this );
620        // info_thread(L) *& Next( info_thread(L) * this );
621}
622
623//-----------------------------------------------------------------------------
624// Synchronization Locks
625forall(L & | is_blocking_lock(L)) {
626
627        //-----------------------------------------------------------------------------
628        // condition_variable
629
630        // The multi-tool condition variable
631        // - can pass timeouts to wait for either a signal or timeout
632        // - can wait without passing a lock
633        // - can have waiters reacquire different locks while waiting on the same cond var
634        // - has shadow queue
635        // - can be signalled outside of critical sections with no locks held
636        struct condition_variable {
637                // Spin lock used for mutual exclusion
638                __spinlock_t lock;
639
640                // List of blocked threads
641                dlist( info_thread(L) ) blocked_threads;
642
643                // Count of current blocked threads
644                int count;
645        };
646
647
648        void  ?{}( condition_variable(L) & this );
649        void ^?{}( condition_variable(L) & this );
650
651        bool notify_one( condition_variable(L) & this );
652        bool notify_all( condition_variable(L) & this );
653
654        uintptr_t front( condition_variable(L) & this );
655
656        bool empty  ( condition_variable(L) & this );
657        int  counter( condition_variable(L) & this );
658
659        void wait( condition_variable(L) & this );
660        void wait( condition_variable(L) & this, uintptr_t info );
661        bool wait( condition_variable(L) & this, Duration duration );
662        bool wait( condition_variable(L) & this, uintptr_t info, Duration duration );
663
664        void wait( condition_variable(L) & this, L & l );
665        void wait( condition_variable(L) & this, L & l, uintptr_t info );
666        bool wait( condition_variable(L) & this, L & l, Duration duration );
667        bool wait( condition_variable(L) & this, L & l, uintptr_t info, Duration duration );
668
669        //-----------------------------------------------------------------------------
670        // fast_cond_var
671
672        // The trimmed and slim condition variable
673        // - no internal lock so you must hold a lock while using this cond var
674        // - signalling without holding branded lock is UNSAFE!
675        // - only allows usage of one lock, cond var is branded after usage
676        struct fast_cond_var {
677                // List of blocked threads
678                dlist( info_thread(L) ) blocked_threads;
679
680                #ifdef __CFA_DEBUG__
681                L * lock_used;
682                #endif
683        };
684
685
686        void  ?{}( fast_cond_var(L) & this );
687        void ^?{}( fast_cond_var(L) & this );
688
689        bool notify_one( fast_cond_var(L) & this );
690        bool notify_all( fast_cond_var(L) & this );
691
692        uintptr_t front( fast_cond_var(L) & this );
693
694        bool empty  ( fast_cond_var(L) & this );
695
696        void wait( fast_cond_var(L) & this, L & l );
697        void wait( fast_cond_var(L) & this, L & l, uintptr_t info );
698}
Note: See TracBrowser for help on using the repository browser.