source: libcfa/src/concurrency/locks.hfa @ 9e3d123

ADTast-experimentalpthread-emulationqualifiedEnum
Last change on this file since 9e3d123 was 9e3d123, checked in by caparsons <caparson@…>, 22 months ago

added atomic_load_n to mcs_spin in attempt at fixing it

  • Property mode set to 100644
File size: 21.8 KB
RevLine 
[ab1b971]1//
2// Cforall Version 1.0.0 Copyright (C) 2021 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// locks.hfa -- PUBLIC
8// Runtime locks that used with the runtime thread system.
9//
10// Author           : Colby Alexander Parsons
11// Created On       : Thu Jan 21 19:46:50 2021
12// Last Modified By :
13// Last Modified On :
14// Update Count     :
15//
16
[f4e35326]17#pragma once
18
[848439f]19#include <stdbool.h>
[5a46e09]20#include <stdio.h>
[848439f]21
[ab1b971]22#include "bits/weakso_locks.hfa"
[f4ec5e45]23#include "containers/queueLockFree.hfa"
[82f4063]24#include "containers/list.hfa"
[f4ec5e45]25
[07033ce]26#include "limits.hfa"
[f4ec5e45]27#include "thread.hfa"
[848439f]28
29#include "time_t.hfa"
30#include "time.hfa"
31
[f4ec5e45]32//-----------------------------------------------------------------------------
33// Semaphore
34struct semaphore {
35        __spinlock_t lock;
36        int count;
[e84ab3d]37        __queue_t(thread$) waiting;
[f4ec5e45]38};
39
40void  ?{}(semaphore & this, int count = 1);
41void ^?{}(semaphore & this);
42bool   P (semaphore & this);
43bool   V (semaphore & this);
44bool   V (semaphore & this, unsigned count);
[e84ab3d]45thread$ * V (semaphore & this, bool );
[f4ec5e45]46
[ab1b971]47//----------
48struct single_acquisition_lock {
49        inline blocking_lock;
50};
51
52static inline void  ?{}( single_acquisition_lock & this ) {((blocking_lock &)this){ false, false };}
53static inline void ^?{}( single_acquisition_lock & this ) {}
[22b7579]54static inline void   lock     ( single_acquisition_lock & this ) { lock    ( (blocking_lock &)this ); }
55static inline bool   try_lock ( single_acquisition_lock & this ) { return try_lock( (blocking_lock &)this ); }
56static inline void   unlock   ( single_acquisition_lock & this ) { unlock  ( (blocking_lock &)this ); }
57static inline size_t on_wait  ( single_acquisition_lock & this ) { return on_wait ( (blocking_lock &)this ); }
58static inline void   on_wakeup( single_acquisition_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
[e84ab3d]59static inline void   on_notify( single_acquisition_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
[ab1b971]60
61//----------
62struct owner_lock {
63        inline blocking_lock;
64};
65
66static inline void  ?{}( owner_lock & this ) {((blocking_lock &)this){ true, true };}
67static inline void ^?{}( owner_lock & this ) {}
[f19497c]68static inline void   lock     ( owner_lock & this ) { lock    ( (blocking_lock &)this ); }
[d27b6be]69static inline bool   try_lock ( owner_lock & this ) { return try_lock( (blocking_lock &)this ); }
[f19497c]70static inline void   unlock   ( owner_lock & this ) { unlock  ( (blocking_lock &)this ); }
[22b7579]71static inline size_t on_wait  ( owner_lock & this ) { return on_wait ( (blocking_lock &)this ); }
72static inline void   on_wakeup( owner_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
[e84ab3d]73static inline void   on_notify( owner_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
[ab1b971]74
[7f958c4]75//-----------------------------------------------------------------------------
76// MCS Lock
[f4ec5e45]77struct mcs_node {
78        mcs_node * volatile next;
79        single_sem sem;
80};
81
[8f5576d5]82static inline void ?{}(mcs_node & this) { this.next = 0p; }
[f4ec5e45]83
84static inline mcs_node * volatile & ?`next ( mcs_node * node ) {
85        return node->next;
86}
87
88struct mcs_lock {
89        mcs_queue(mcs_node) queue;
90};
91
92static inline void lock(mcs_lock & l, mcs_node & n) {
93        if(push(l.queue, &n))
94                wait(n.sem);
95}
96
97static inline void unlock(mcs_lock & l, mcs_node & n) {
98        mcs_node * next = advance(l.queue, &n);
99        if(next) post(next->sem);
100}
101
[f835806]102//-----------------------------------------------------------------------------
103// MCS Spin Lock
104// - No recursive acquisition
105// - Needs to be released by owner
106
107struct mcs_spin_node {
108        mcs_spin_node * volatile next;
[db7a3ad]109        volatile bool locked;
[f835806]110};
111
112struct mcs_spin_queue {
113        mcs_spin_node * volatile tail;
114};
115
116static inline void ?{}(mcs_spin_node & this) { this.next = 0p; this.locked = true; }
117
118static inline mcs_spin_node * volatile & ?`next ( mcs_spin_node * node ) {
119        return node->next;
120}
121
122struct mcs_spin_lock {
123        mcs_spin_queue queue;
124};
125
126static inline void lock(mcs_spin_lock & l, mcs_spin_node & n) {
127        mcs_spin_node * prev = __atomic_exchange_n(&l.queue.tail, &n, __ATOMIC_SEQ_CST);
[9e3d123]128        if(prev == 0p) return;
129        prev->next = &n;
130        while(__atomic_load_n(&n.locked, __ATOMIC_SEQ_CST)) Pause();
[f835806]131}
132
133static inline void unlock(mcs_spin_lock & l, mcs_spin_node & n) {
134        mcs_spin_node * n_ptr = &n;
[9e3d123]135        if (__atomic_compare_exchange_n(&l.queue.tail, &n_ptr, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) return;
136        while (__atomic_load_n(&n.next, __ATOMIC_SEQ_CST) == 0p) {}
137        n.next->locked = false;
[f835806]138}
139
140//-----------------------------------------------------------------------------
141// CLH Spinlock
142// - No recursive acquisition
143// - Needs to be released by owner
144
145struct clh_lock {
146        volatile bool * volatile tail;
147};
148
149static inline void  ?{}( clh_lock & this ) { this.tail = malloc(); *this.tail = true; }
150static inline void ^?{}( clh_lock & this ) { free(this.tail); }
151
152static inline void lock(clh_lock & l) {
153        thread$ * curr_thd = active_thread();
154        *(curr_thd->clh_node) = false;
155        volatile bool * prev = __atomic_exchange_n((bool **)(&l.tail), (bool *)(curr_thd->clh_node), __ATOMIC_SEQ_CST);
156        while(!__atomic_load_n(prev, __ATOMIC_ACQUIRE)) Pause();
157        curr_thd->clh_prev = prev;
158}
159
160static inline void unlock(clh_lock & l) {
161        thread$ * curr_thd = active_thread();
162        __atomic_store_n(curr_thd->clh_node, true, __ATOMIC_RELEASE);
163        curr_thd->clh_node = curr_thd->clh_prev;
164}
165
[7f958c4]166//-----------------------------------------------------------------------------
167// Linear backoff Spinlock
[5a46e09]168struct linear_backoff_then_block_lock {
169        // Spin lock used for mutual exclusion
170        __spinlock_t spinlock;
171
172        // Current thread owning the lock
[e84ab3d]173        struct thread$ * owner;
[5a46e09]174
175        // List of blocked threads
[e84ab3d]176        dlist( thread$ ) blocked_threads;
[5a46e09]177
178        // Used for comparing and exchanging
179        volatile size_t lock_value;
180
181        // used for linear backoff spinning
182        int spin_start;
183        int spin_end;
184        int spin_count;
185
186        // after unsuccessful linear backoff yield this many times
187        int yield_count;
188};
189
190static inline void  ?{}( linear_backoff_then_block_lock & this, int spin_start, int spin_end, int spin_count, int yield_count ) {
191        this.spinlock{};
192        this.blocked_threads{};
193        this.lock_value = 0;
194        this.spin_start = spin_start;
195        this.spin_end = spin_end;
196        this.spin_count = spin_count;
197        this.yield_count = yield_count;
198}
[55ad35c]199static inline void  ?{}( linear_backoff_then_block_lock & this ) { this{4, 1024, 16, 0}; }
[5a46e09]200static inline void ^?{}( linear_backoff_then_block_lock & this ) {}
[eba9d27]201static inline void ?{}( linear_backoff_then_block_lock & this, linear_backoff_then_block_lock this2 ) = void;
202static inline void ?=?( linear_backoff_then_block_lock & this, linear_backoff_then_block_lock this2 ) = void;
[5a46e09]203
204static inline bool internal_try_lock(linear_backoff_then_block_lock & this, size_t & compare_val) with(this) {
205        if (__atomic_compare_exchange_n(&lock_value, &compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
206                owner = active_thread();
207                return true;
208        }
209        return false;
210}
211
212static inline bool try_lock(linear_backoff_then_block_lock & this) { size_t compare_val = 0; return internal_try_lock(this, compare_val); }
213
214static inline bool try_lock_contention(linear_backoff_then_block_lock & this) with(this) {
215        if (__atomic_exchange_n(&lock_value, 2, __ATOMIC_ACQUIRE) == 0) {
216                owner = active_thread();
217                return true;
218        }
219        return false;
220}
221
222static inline bool block(linear_backoff_then_block_lock & this) with(this) {
223        lock( spinlock __cfaabi_dbg_ctx2 );
224        if (lock_value != 2) {
225                unlock( spinlock );
226                return true;
227        }
228        insert_last( blocked_threads, *active_thread() );
229        unlock( spinlock );
230        park( );
231        return true;
232}
233
[0d4f954]234static inline void lock(linear_backoff_then_block_lock & this) with(this) {
[5a46e09]235        // if owner just return
[0d4f954]236        if (active_thread() == owner) return;
[5a46e09]237        size_t compare_val = 0;
238        int spin = spin_start;
239        // linear backoff
240        for( ;; ) {
241                compare_val = 0;
[0d4f954]242                if (internal_try_lock(this, compare_val)) return;
[5a46e09]243                if (2 == compare_val) break;
244                for (int i = 0; i < spin; i++) Pause();
245                if (spin >= spin_end) break;
246                spin += spin;
247        }
248
[0d4f954]249        if(2 != compare_val && try_lock_contention(this)) return;
[b7763da]250        // block until signalled
[0d4f954]251        while (block(this)) if(try_lock_contention(this)) return;
[b7763da]252}
253
[5a46e09]254static inline void unlock(linear_backoff_then_block_lock & this) with(this) {
255        verify(lock_value > 0);
256    owner = 0p;
257    if (__atomic_exchange_n(&lock_value, 0, __ATOMIC_RELEASE) == 1) return;
258        lock( spinlock __cfaabi_dbg_ctx2 );
[e84ab3d]259        thread$ * t = &try_pop_front( blocked_threads );
[5a46e09]260        unlock( spinlock );
261        unpark( t );
262}
263
[e84ab3d]264static inline void on_notify(linear_backoff_then_block_lock & this, struct thread$ * t ) { unpark(t); }
[dcad80a]265static inline size_t on_wait(linear_backoff_then_block_lock & this) { unlock(this); return 0; }
[bbe3719]266static inline void on_wakeup(linear_backoff_then_block_lock & this, size_t recursion ) { lock(this); }
[5a46e09]267
[7f958c4]268//-----------------------------------------------------------------------------
269// Fast Block Lock
270
[f835806]271// minimal blocking lock
[7f958c4]272// - No reacquire for cond var
273// - No recursive acquisition
274// - No ownership
275struct fast_block_lock {
276        // List of blocked threads
277        dlist( thread$ ) blocked_threads;
278
[f835806]279        // Spin lock used for mutual exclusion
280        __spinlock_t lock;
281
282        // flag showing if lock is held
[7f958c4]283        bool held:1;
[f835806]284
285        #ifdef __CFA_DEBUG__
286        // for deadlock detection
287        struct thread$ * owner;
288        #endif
[7f958c4]289};
290
291static inline void  ?{}( fast_block_lock & this ) with(this) {
292        lock{};
293        blocked_threads{};
294        held = false;
295}
296static inline void ^?{}( fast_block_lock & this ) {}
297static inline void ?{}( fast_block_lock & this, fast_block_lock this2 ) = void;
298static inline void ?=?( fast_block_lock & this, fast_block_lock this2 ) = void;
299
300// if this is called recursively IT WILL DEADLOCK!!!!!
301static inline void lock(fast_block_lock & this) with(this) {
302        lock( lock __cfaabi_dbg_ctx2 );
[f835806]303
304        #ifdef __CFA_DEBUG__
305        assert(!(held && owner == active_thread()));
306        #endif
[7f958c4]307        if (held) {
308                insert_last( blocked_threads, *active_thread() );
309                unlock( lock );
310                park( );
311                return;
312        }
313        held = true;
[f835806]314        #ifdef __CFA_DEBUG__
315        owner = active_thread();
316        #endif
[7f958c4]317        unlock( lock );
318}
319
320static inline void unlock(fast_block_lock & this) with(this) {
321        lock( lock __cfaabi_dbg_ctx2 );
322        /* paranoid */ verifyf( held != false, "Attempt to release lock %p that isn't held", &this );
323        thread$ * t = &try_pop_front( blocked_threads );
324        held = ( t ? true : false );
[f835806]325        #ifdef __CFA_DEBUG__
326        owner = ( t ? t : 0p );
327        #endif
[7f958c4]328        unpark( t );
329        unlock( lock );
330}
331
332static inline void on_notify(fast_block_lock & this, struct thread$ * t ) { unpark(t); }
333static inline size_t on_wait(fast_block_lock & this) { unlock(this); return 0; }
334static inline void on_wakeup(fast_block_lock & this, size_t recursion ) { }
335
[f835806]336//-----------------------------------------------------------------------------
337// simple_owner_lock
338
339// pthread owner lock
340// - reacquire for cond var
341// - recursive acquisition
342// - ownership
343struct simple_owner_lock {
344        // List of blocked threads
345        dlist( thread$ ) blocked_threads;
346
347        // Spin lock used for mutual exclusion
348        __spinlock_t lock;
349
350        // owner showing if lock is held
351        struct thread$ * owner;
352
353        size_t recursion_count;
354};
355
356static inline void  ?{}( simple_owner_lock & this ) with(this) {
357        lock{};
358        blocked_threads{};
359        owner = 0p;
360        recursion_count = 0;
361}
362static inline void ^?{}( simple_owner_lock & this ) {}
363static inline void ?{}( simple_owner_lock & this, simple_owner_lock this2 ) = void;
364static inline void ?=?( simple_owner_lock & this, simple_owner_lock this2 ) = void;
365
[ae06e0b]366static inline void lock(simple_owner_lock & this) with(this) {
367        if (owner == active_thread()) {
368                recursion_count++;
369                return;
370        }
371        lock( lock __cfaabi_dbg_ctx2 );
372
373        if (owner != 0p) {
374                insert_last( blocked_threads, *active_thread() );
375                unlock( lock );
376                park( );
377                return;
378        }
379        owner = active_thread();
380        recursion_count = 1;
381        unlock( lock );
382}
383
384// TODO: fix duplicate def issue and bring this back
385// void pop_and_set_new_owner( simple_owner_lock & this ) with( this ) {
386        // thread$ * t = &try_pop_front( blocked_threads );
387        // owner = t;
388        // recursion_count = ( t ? 1 : 0 );
389        // unpark( t );
390// }
391
392static inline void unlock(simple_owner_lock & this) with(this) {
393        lock( lock __cfaabi_dbg_ctx2 );
394        /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
395        /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );
396        // if recursion count is zero release lock and set new owner if one is waiting
397        recursion_count--;
398        if ( recursion_count == 0 ) {
399                // pop_and_set_new_owner( this );
400                thread$ * t = &try_pop_front( blocked_threads );
401                owner = t;
402                recursion_count = ( t ? 1 : 0 );
403                unpark( t );
404        }
405        unlock( lock );
406}
407
408static inline void on_notify(simple_owner_lock & this, struct thread$ * t ) with(this) {
409        lock( lock __cfaabi_dbg_ctx2 );
410        // lock held
411        if ( owner != 0p ) {
412                insert_last( blocked_threads, *t );
413                unlock( lock );
414        }
415        // lock not held
416        else {
417                owner = t;
418                recursion_count = 1;
419                unpark( t );
420                unlock( lock );
421        }
422}
423
424static inline size_t on_wait(simple_owner_lock & this) with(this) {
425        lock( lock __cfaabi_dbg_ctx2 );
426        /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
427        /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );
428
429        size_t ret = recursion_count;
430
431        // pop_and_set_new_owner( this );
432
433        thread$ * t = &try_pop_front( blocked_threads );
434        owner = t;
435        recursion_count = ( t ? 1 : 0 );
436        unpark( t );
437
438        unlock( lock );
439        return ret;
440}
441
442static inline void on_wakeup(simple_owner_lock & this, size_t recursion ) with(this) { recursion_count = recursion; }
443
[f835806]444//-----------------------------------------------------------------------------
445// Spin Queue Lock
446
447// - No reacquire for cond var
448// - No recursive acquisition
449// - No ownership
450// - spin lock with no locking/atomics in unlock
451struct spin_queue_lock {
452        // Spin lock used for mutual exclusion
453        mcs_spin_lock lock;
454
455        // flag showing if lock is held
[db7a3ad]456        volatile bool held;
[f835806]457
458        #ifdef __CFA_DEBUG__
459        // for deadlock detection
460        struct thread$ * owner;
461        #endif
462};
463
464static inline void  ?{}( spin_queue_lock & this ) with(this) {
465        lock{};
466        held = false;
467}
468static inline void ^?{}( spin_queue_lock & this ) {}
469static inline void ?{}( spin_queue_lock & this, spin_queue_lock this2 ) = void;
470static inline void ?=?( spin_queue_lock & this, spin_queue_lock this2 ) = void;
471
472// if this is called recursively IT WILL DEADLOCK!!!!!
473static inline void lock(spin_queue_lock & this) with(this) {
474        mcs_spin_node node;
475        #ifdef __CFA_DEBUG__
476        assert(!(held && owner == active_thread()));
477        #endif
478        lock( lock, node );
479        while(held) Pause();
480        held = true;
[db7a3ad]481        // printf("locked\n");
[f835806]482        unlock( lock, node );
483        #ifdef __CFA_DEBUG__
484        owner = active_thread();
485        #endif
486}
487
488static inline void unlock(spin_queue_lock & this) with(this) {
[db7a3ad]489        // printf("unlocked\n");
[f835806]490        #ifdef __CFA_DEBUG__
491        owner = 0p;
492        #endif
493        held = false;
494}
495
496static inline void on_notify(spin_queue_lock & this, struct thread$ * t ) { unpark(t); }
497static inline size_t on_wait(spin_queue_lock & this) { unlock(this); return 0; }
498static inline void on_wakeup(spin_queue_lock & this, size_t recursion ) { }
499
500
501//-----------------------------------------------------------------------------
502// MCS Block Spin Lock
503
504// - No reacquire for cond var
505// - No recursive acquisition
506// - No ownership
507// - Blocks but first node spins (like spin queue but blocking for not first thd)
508struct mcs_block_spin_lock {
509        // Spin lock used for mutual exclusion
510        mcs_lock lock;
511
512        // flag showing if lock is held
[db7a3ad]513        volatile bool held;
[f835806]514
515        #ifdef __CFA_DEBUG__
516        // for deadlock detection
517        struct thread$ * owner;
518        #endif
519};
520
521static inline void  ?{}( mcs_block_spin_lock & this ) with(this) {
522        lock{};
523        held = false;
524}
525static inline void ^?{}( mcs_block_spin_lock & this ) {}
526static inline void ?{}( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;
527static inline void ?=?( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;
528
529// if this is called recursively IT WILL DEADLOCK!!!!!
530static inline void lock(mcs_block_spin_lock & this) with(this) {
531        mcs_node node;
532        #ifdef __CFA_DEBUG__
533        assert(!(held && owner == active_thread()));
534        #endif
535        lock( lock, node );
536        while(held) Pause();
537        held = true;
538        unlock( lock, node );
539        #ifdef __CFA_DEBUG__
540        owner = active_thread();
541        #endif
542}
543
544static inline void unlock(mcs_block_spin_lock & this) with(this) {
545        #ifdef __CFA_DEBUG__
546        owner = 0p;
547        #endif
548        held = false;
549}
550
551static inline void on_notify(mcs_block_spin_lock & this, struct thread$ * t ) { unpark(t); }
552static inline size_t on_wait(mcs_block_spin_lock & this) { unlock(this); return 0; }
553static inline void on_wakeup(mcs_block_spin_lock & this, size_t recursion ) { }
554
555//-----------------------------------------------------------------------------
556// Block Spin Lock
557
558// - No reacquire for cond var
559// - No recursive acquisition
560// - No ownership
561// - Blocks but first node spins (like spin queue but blocking for not first thd)
562struct block_spin_lock {
563        // Spin lock used for mutual exclusion
564        fast_block_lock lock;
565
566        // flag showing if lock is held
[db7a3ad]567        volatile bool held;
[f835806]568
569        #ifdef __CFA_DEBUG__
570        // for deadlock detection
571        struct thread$ * owner;
572        #endif
573};
574
575static inline void  ?{}( block_spin_lock & this ) with(this) {
576        lock{};
577        held = false;
578}
579static inline void ^?{}( block_spin_lock & this ) {}
580static inline void ?{}( block_spin_lock & this, block_spin_lock this2 ) = void;
581static inline void ?=?( block_spin_lock & this, block_spin_lock this2 ) = void;
582
583// if this is called recursively IT WILL DEADLOCK!!!!!
584static inline void lock(block_spin_lock & this) with(this) {
585        #ifdef __CFA_DEBUG__
586        assert(!(held && owner == active_thread()));
587        #endif
588        lock( lock );
589        while(held) Pause();
590        held = true;
591        unlock( lock );
592        #ifdef __CFA_DEBUG__
593        owner = active_thread();
594        #endif
595}
596
597static inline void unlock(block_spin_lock & this) with(this) {
598        #ifdef __CFA_DEBUG__
599        owner = 0p;
600        #endif
601        held = false;
602}
603
604static inline void on_notify(block_spin_lock & this, struct thread$ * t ) { unpark(t); }
605static inline size_t on_wait(block_spin_lock & this) { unlock(this); return 0; }
606static inline void on_wakeup(block_spin_lock & this, size_t recursion ) { }
607
[ac5816d]608//-----------------------------------------------------------------------------
609// is_blocking_lock
[fd54fef]610trait is_blocking_lock(L & | sized(L)) {
[ac5816d]611        // For synchronization locks to use when acquiring
[e84ab3d]612        void on_notify( L &, struct thread$ * );
[ac5816d]613
614        // For synchronization locks to use when releasing
[22b7579]615        size_t on_wait( L & );
[ac5816d]616
617        // to set recursion count after getting signalled;
[22b7579]618        void on_wakeup( L &, size_t recursion );
[ac5816d]619};
[848439f]620
[ac5816d]621//-----------------------------------------------------------------------------
[82f4063]622// // info_thread
623// // the info thread is a wrapper around a thread used
624// // to store extra data for use in the condition variable
[fd54fef]625forall(L & | is_blocking_lock(L)) {
[ac5816d]626        struct info_thread;
[c131a02]627
[82f4063]628        // // for use by sequence
629        // info_thread(L) *& Back( info_thread(L) * this );
630        // info_thread(L) *& Next( info_thread(L) * this );
[848439f]631}
632
[ac5816d]633//-----------------------------------------------------------------------------
634// Synchronization Locks
[fd54fef]635forall(L & | is_blocking_lock(L)) {
[7f958c4]636
637        //-----------------------------------------------------------------------------
638        // condition_variable
639
640        // The multi-tool condition variable
641        // - can pass timeouts to wait for either a signal or timeout
642        // - can wait without passing a lock
643        // - can have waiters reacquire different locks while waiting on the same cond var
644        // - has shadow queue
645        // - can be signalled outside of critical sections with no locks held
[eeb5023]646        struct condition_variable {
[848439f]647                // Spin lock used for mutual exclusion
648                __spinlock_t lock;
649
650                // List of blocked threads
[82f4063]651                dlist( info_thread(L) ) blocked_threads;
[848439f]652
653                // Count of current blocked threads
654                int count;
655        };
[e84ab3d]656
[848439f]657
[ac5816d]658        void  ?{}( condition_variable(L) & this );
[848439f]659        void ^?{}( condition_variable(L) & this );
660
[eeb5023]661        bool notify_one( condition_variable(L) & this );
662        bool notify_all( condition_variable(L) & this );
[848439f]663
[eeb5023]664        uintptr_t front( condition_variable(L) & this );
[848439f]665
[ac5816d]666        bool empty  ( condition_variable(L) & this );
667        int  counter( condition_variable(L) & this );
[848439f]668
[eeb5023]669        void wait( condition_variable(L) & this );
670        void wait( condition_variable(L) & this, uintptr_t info );
[dff1fd1]671        bool wait( condition_variable(L) & this, Duration duration );
672        bool wait( condition_variable(L) & this, uintptr_t info, Duration duration );
[848439f]673
[eeb5023]674        void wait( condition_variable(L) & this, L & l );
675        void wait( condition_variable(L) & this, L & l, uintptr_t info );
[dff1fd1]676        bool wait( condition_variable(L) & this, L & l, Duration duration );
677        bool wait( condition_variable(L) & this, L & l, uintptr_t info, Duration duration );
[7f958c4]678
679        //-----------------------------------------------------------------------------
680        // fast_cond_var
681
682        // The trimmed and slim condition variable
683        // - no internal lock so you must hold a lock while using this cond var
684        // - signalling without holding branded lock is UNSAFE!
685        // - only allows usage of one lock, cond var is branded after usage
[ae06e0b]686
[7f958c4]687        struct fast_cond_var {
688                // List of blocked threads
689                dlist( info_thread(L) ) blocked_threads;
690                #ifdef __CFA_DEBUG__
691                L * lock_used;
692                #endif
693        };
694
695        void  ?{}( fast_cond_var(L) & this );
696        void ^?{}( fast_cond_var(L) & this );
697
698        bool notify_one( fast_cond_var(L) & this );
699        bool notify_all( fast_cond_var(L) & this );
700
701        uintptr_t front( fast_cond_var(L) & this );
702        bool empty  ( fast_cond_var(L) & this );
703
704        void wait( fast_cond_var(L) & this, L & l );
705        void wait( fast_cond_var(L) & this, L & l, uintptr_t info );
[ae06e0b]706
707
708        //-----------------------------------------------------------------------------
709        // pthread_cond_var
710        //
711        // - cond var with minimal footprint
712        // - supports operations needed for phthread cond
713
714        struct pthread_cond_var {
715                dlist( info_thread(L) ) blocked_threads;
716                __spinlock_t lock;
717        };
718
719        void  ?{}( pthread_cond_var(L) & this );
720        void ^?{}( pthread_cond_var(L) & this );
721
722        bool notify_one( pthread_cond_var(L) & this );
723        bool notify_all( pthread_cond_var(L) & this );
724
725        uintptr_t front( pthread_cond_var(L) & this );
726        bool empty ( pthread_cond_var(L) & this );
727
728        void wait( pthread_cond_var(L) & this, L & l );
729        void wait( pthread_cond_var(L) & this, L & l, uintptr_t info );
730        bool wait( pthread_cond_var(L) & this, L & l, timespec t );
731        bool wait( pthread_cond_var(L) & this, L & l, uintptr_t info, timespec t );
[f4ec5e45]732}
Note: See TracBrowser for help on using the repository browser.