source: libcfa/src/concurrency/locks.hfa @ 50f3f3a

ADTast-experimental
Last change on this file since 50f3f3a was 8a97248, checked in by Peter A. Buhr <pabuhr@…>, 21 months ago

switch from old trait syntax to new trait syntax using forall clause

  • Property mode set to 100644
File size: 24.1 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2021 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// locks.hfa -- PUBLIC
8// Runtime locks that used with the runtime thread system.
9//
10// Author           : Colby Alexander Parsons
11// Created On       : Thu Jan 21 19:46:50 2021
12// Last Modified By :
13// Last Modified On :
14// Update Count     :
15//
16
17#pragma once
18
19#include <stdbool.h>
20#include <stdio.h>
21
22#include "bits/weakso_locks.hfa"
23#include "containers/lockfree.hfa"
24#include "containers/list.hfa"
25
26#include "limits.hfa"
27#include "thread.hfa"
28
29#include "time_t.hfa"
30#include "time.hfa"
31
32#include <fstream.hfa>
33
34
35// futex headers
36#include <linux/futex.h>      /* Definition of FUTEX_* constants */
37#include <sys/syscall.h>      /* Definition of SYS_* constants */
38#include <unistd.h>
39
40// C_TODO: cleanup this and locks.cfa
41// - appropriate separation of interface and impl
42// - clean up unused/unneeded locks
43// - change messy big blocking lock from inheritance to composition to remove need for flags
44
45//-----------------------------------------------------------------------------
46// Semaphore
47struct semaphore {
48        __spinlock_t lock;
49        int count;
50        __queue_t(thread$) waiting;
51};
52
53void  ?{}(semaphore & this, int count = 1);
54void ^?{}(semaphore & this);
55bool   P (semaphore & this);
56bool   V (semaphore & this);
57bool   V (semaphore & this, unsigned count);
58thread$ * V (semaphore & this, bool );
59
60//----------
61struct single_acquisition_lock {
62        inline blocking_lock;
63};
64
65static inline void  ?{}( single_acquisition_lock & this ) {((blocking_lock &)this){ false, false };}
66static inline void ^?{}( single_acquisition_lock & this ) {}
67static inline void   lock     ( single_acquisition_lock & this ) { lock    ( (blocking_lock &)this ); }
68static inline bool   try_lock ( single_acquisition_lock & this ) { return try_lock( (blocking_lock &)this ); }
69static inline void   unlock   ( single_acquisition_lock & this ) { unlock  ( (blocking_lock &)this ); }
70static inline size_t on_wait  ( single_acquisition_lock & this ) { return on_wait ( (blocking_lock &)this ); }
71static inline void   on_wakeup( single_acquisition_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
72static inline void   on_notify( single_acquisition_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
73
74//----------
75struct owner_lock {
76        inline blocking_lock;
77};
78
79static inline void  ?{}( owner_lock & this ) {((blocking_lock &)this){ true, true };}
80static inline void ^?{}( owner_lock & this ) {}
81static inline void   lock     ( owner_lock & this ) { lock    ( (blocking_lock &)this ); }
82static inline bool   try_lock ( owner_lock & this ) { return try_lock( (blocking_lock &)this ); }
83static inline void   unlock   ( owner_lock & this ) { unlock  ( (blocking_lock &)this ); }
84static inline size_t on_wait  ( owner_lock & this ) { return on_wait ( (blocking_lock &)this ); }
85static inline void   on_wakeup( owner_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
86static inline void   on_notify( owner_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
87
88//-----------------------------------------------------------------------------
89// MCS Lock
90struct mcs_node {
91        mcs_node * volatile next;
92        single_sem sem;
93};
94
95static inline void ?{}(mcs_node & this) { this.next = 0p; }
96
97static inline mcs_node * volatile & ?`next ( mcs_node * node ) {
98        return node->next;
99}
100
101struct mcs_lock {
102        mcs_queue(mcs_node) queue;
103};
104
105static inline void lock(mcs_lock & l, mcs_node & n) {
106        if(push(l.queue, &n))
107                wait(n.sem);
108}
109
110static inline void unlock(mcs_lock & l, mcs_node & n) {
111        mcs_node * next = advance(l.queue, &n);
112        if(next) post(next->sem);
113}
114
115//-----------------------------------------------------------------------------
116// MCS Spin Lock
117// - No recursive acquisition
118// - Needs to be released by owner
119
120struct mcs_spin_node {
121        mcs_spin_node * volatile next;
122        volatile bool locked;
123};
124
125struct mcs_spin_queue {
126        mcs_spin_node * volatile tail;
127};
128
129static inline void ?{}(mcs_spin_node & this) { this.next = 0p; this.locked = true; }
130
131static inline mcs_spin_node * volatile & ?`next ( mcs_spin_node * node ) {
132        return node->next;
133}
134
135struct mcs_spin_lock {
136        mcs_spin_queue queue;
137};
138
139static inline void lock(mcs_spin_lock & l, mcs_spin_node & n) {
140        mcs_spin_node * prev = __atomic_exchange_n(&l.queue.tail, &n, __ATOMIC_SEQ_CST);
141        n.locked = true;
142        if(prev == 0p) return;
143        prev->next = &n;
144        while(__atomic_load_n(&n.locked, __ATOMIC_RELAXED)) Pause();
145}
146
147static inline void unlock(mcs_spin_lock & l, mcs_spin_node & n) {
148        mcs_spin_node * n_ptr = &n;
149        if (__atomic_compare_exchange_n(&l.queue.tail, &n_ptr, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) return;
150        while (__atomic_load_n(&n.next, __ATOMIC_RELAXED) == 0p) {}
151        n.next->locked = false;
152}
153
154//-----------------------------------------------------------------------------
155// futex_mutex
156
157// - No cond var support
158// - Kernel thd blocking alternative to the spinlock
159// - No ownership (will deadlock on reacq)
160struct futex_mutex {
161        // lock state any state other than UNLOCKED is locked
162        // enum LockState { UNLOCKED = 0, UNCONTENDED = 1, CONTENDED = 2 };
163       
164        // stores a lock state
165        int val;
166};
167
168// to use for FUTEX_WAKE and FUTEX_WAIT (other futex calls will need more params)
169static inline int futex(int *uaddr, int futex_op, int val) {
170    return syscall(SYS_futex, uaddr, futex_op, val, NULL, NULL, 0);
171}
172
173static inline void  ?{}( futex_mutex & this ) with(this) { val = 0; }
174
175static inline bool internal_try_lock(futex_mutex & this, int & compare_val) with(this) {
176        return __atomic_compare_exchange_n((int*)&val, (int*)&compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
177}
178
179static inline int internal_exchange(futex_mutex & this) with(this) {
180        return __atomic_exchange_n((int*)&val, 2, __ATOMIC_ACQUIRE);
181}
182
183// if this is called recursively IT WILL DEADLOCK!!!!!
184static inline void lock(futex_mutex & this) with(this) {
185        int state;
186
187       
188        // // linear backoff omitted for now
189        // for( int spin = 4; spin < 1024; spin += spin) {
190        //      state = 0;
191        //      // if unlocked, lock and return
192        //      if (internal_try_lock(this, state)) return;
193        //      if (2 == state) break;
194        //      for (int i = 0; i < spin; i++) Pause();
195        // }
196
197        // no contention try to acquire
198        if (internal_try_lock(this, state)) return;
199       
200        // if not in contended state, set to be in contended state
201        if (state != 2) state = internal_exchange(this);
202
203        // block and spin until we win the lock
204        while (state != 0) {
205                futex((int*)&val, FUTEX_WAIT, 2); // if val is not 2 this returns with EWOULDBLOCK
206                state = internal_exchange(this);
207        }
208}
209
210static inline void unlock(futex_mutex & this) with(this) {
211        // if uncontended do atomice unlock and then return
212        if (__atomic_fetch_sub(&val, 1, __ATOMIC_RELEASE) == 1) return; // TODO: try acq/rel
213       
214        // otherwise threads are blocked so we must wake one
215        __atomic_store_n((int *)&val, 0, __ATOMIC_RELEASE);
216        futex((int *)&val, FUTEX_WAKE, 1);
217}
218
219static inline void on_notify( futex_mutex & f, thread$ * t){ unpark(t); }
220static inline size_t on_wait( futex_mutex & f ) {unlock(f); return 0;}
221
222// to set recursion count after getting signalled;
223static inline void on_wakeup( futex_mutex & f, size_t recursion ) {}
224
225//-----------------------------------------------------------------------------
226// CLH Spinlock
227// - No recursive acquisition
228// - Needs to be released by owner
229
230struct clh_lock {
231        volatile bool * volatile tail;
232    volatile bool * volatile head;
233};
234
235static inline void  ?{}( clh_lock & this ) { this.tail = malloc(); *this.tail = true; }
236static inline void ^?{}( clh_lock & this ) { free(this.tail); }
237
238static inline void lock(clh_lock & l) {
239        thread$ * curr_thd = active_thread();
240        *(curr_thd->clh_node) = false;
241        volatile bool * prev = __atomic_exchange_n((bool **)(&l.tail), (bool *)(curr_thd->clh_node), __ATOMIC_SEQ_CST);
242        while(!__atomic_load_n(prev, __ATOMIC_SEQ_CST)) Pause();
243    __atomic_store_n((bool **)(&l.head), (bool *)curr_thd->clh_node, __ATOMIC_SEQ_CST);
244    curr_thd->clh_node = prev;
245}
246
247static inline void unlock(clh_lock & l) {
248        __atomic_store_n((bool *)(l.head), true, __ATOMIC_SEQ_CST);
249}
250
251static inline void on_notify(clh_lock & this, struct thread$ * t ) { unpark(t); }
252static inline size_t on_wait(clh_lock & this) { unlock(this); return 0; }
253static inline void on_wakeup(clh_lock & this, size_t recursion ) { lock(this); }
254
255
256//-----------------------------------------------------------------------------
257// Exponential backoff then block lock
258struct exp_backoff_then_block_lock {
259        // Spin lock used for mutual exclusion
260        __spinlock_t spinlock;
261
262        // List of blocked threads
263        dlist( thread$ ) blocked_threads;
264
265        // Used for comparing and exchanging
266        volatile size_t lock_value;
267};
268
269static inline void  ?{}( exp_backoff_then_block_lock & this ) {
270        this.spinlock{};
271        this.blocked_threads{};
272        this.lock_value = 0;
273}
274static inline void ^?{}( exp_backoff_then_block_lock & this ) {}
275// static inline void ?{}( exp_backoff_then_block_lock & this, exp_backoff_then_block_lock this2 ) = void;
276// static inline void ?=?( exp_backoff_then_block_lock & this, exp_backoff_then_block_lock this2 ) = void;
277
278static inline bool internal_try_lock(exp_backoff_then_block_lock & this, size_t & compare_val) with(this) {
279        if (__atomic_compare_exchange_n(&lock_value, &compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
280                return true;
281        }
282        return false;
283}
284
285static inline bool try_lock(exp_backoff_then_block_lock & this) { size_t compare_val = 0; return internal_try_lock(this, compare_val); }
286
287static inline bool try_lock_contention(exp_backoff_then_block_lock & this) with(this) {
288        if (__atomic_exchange_n(&lock_value, 2, __ATOMIC_ACQUIRE) == 0) {
289                return true;
290        }
291        return false;
292}
293
294static inline bool block(exp_backoff_then_block_lock & this) with(this) {
295        lock( spinlock __cfaabi_dbg_ctx2 ); // TODO change to lockfree queue (MPSC)
296        if (lock_value != 2) {
297                unlock( spinlock );
298                return true;
299        }
300        insert_last( blocked_threads, *active_thread() );
301        unlock( spinlock );
302        park( );
303        return true;
304}
305
306static inline void lock(exp_backoff_then_block_lock & this) with(this) {
307        size_t compare_val = 0;
308        int spin = 4;
309        // linear backoff
310        for( ;; ) {
311                compare_val = 0;
312                if (internal_try_lock(this, compare_val)) return;
313                if (2 == compare_val) break;
314                for (int i = 0; i < spin; i++) Pause();
315                if (spin >= 1024) break;
316                spin += spin;
317        }
318
319        if(2 != compare_val && try_lock_contention(this)) return;
320        // block until signalled
321        while (block(this)) if(try_lock_contention(this)) return;
322}
323
324static inline void unlock(exp_backoff_then_block_lock & this) with(this) {
325    if (__atomic_exchange_n(&lock_value, 0, __ATOMIC_RELEASE) == 1) return;
326        lock( spinlock __cfaabi_dbg_ctx2 );
327        thread$ * t = &try_pop_front( blocked_threads );
328        unlock( spinlock );
329        unpark( t );
330}
331
332static inline void on_notify(exp_backoff_then_block_lock & this, struct thread$ * t ) { unpark(t); }
333static inline size_t on_wait(exp_backoff_then_block_lock & this) { unlock(this); return 0; }
334static inline void on_wakeup(exp_backoff_then_block_lock & this, size_t recursion ) { lock(this); }
335
336//-----------------------------------------------------------------------------
337// Fast Block Lock
338
339// minimal blocking lock
340// - No reacquire for cond var
341// - No recursive acquisition
342// - No ownership
343struct fast_block_lock {
344        // List of blocked threads
345        dlist( thread$ ) blocked_threads;
346
347        // Spin lock used for mutual exclusion
348        __spinlock_t lock;
349
350        // flag showing if lock is held
351        bool held:1;
352};
353
354static inline void  ?{}( fast_block_lock & this ) with(this) {
355        lock{};
356        blocked_threads{};
357        held = false;
358}
359static inline void ^?{}( fast_block_lock & this ) {}
360static inline void ?{}( fast_block_lock & this, fast_block_lock this2 ) = void;
361static inline void ?=?( fast_block_lock & this, fast_block_lock this2 ) = void;
362
363// if this is called recursively IT WILL DEADLOCK!!!!!
364static inline void lock(fast_block_lock & this) with(this) {
365        lock( lock __cfaabi_dbg_ctx2 );
366        if ( held ) {
367                insert_last( blocked_threads, *active_thread() );
368                unlock( lock );
369                park( );
370                return;
371        }
372        held = true;
373        unlock( lock );
374}
375
376static inline void unlock(fast_block_lock & this) with(this) {
377        lock( lock __cfaabi_dbg_ctx2 );
378        /* paranoid */ verifyf( held != false, "Attempt to release lock %p that isn't held", &this );
379        thread$ * t = &try_pop_front( blocked_threads );
380        held = ( t ? true : false );
381        unpark( t );
382        unlock( lock );
383}
384
385static inline void on_notify(fast_block_lock & this, struct thread$ * t ) with(this) {
386    lock( lock __cfaabi_dbg_ctx2 );
387    insert_last( blocked_threads, *t );
388    unlock( lock );
389}
390static inline size_t on_wait(fast_block_lock & this) { unlock(this); return 0; }
391static inline void on_wakeup(fast_block_lock & this, size_t recursion ) { }
392
393//-----------------------------------------------------------------------------
394// simple_owner_lock
395
396// pthread owner lock
397// - reacquire for cond var
398// - recursive acquisition
399// - ownership
400struct simple_owner_lock {
401        // List of blocked threads
402        dlist( thread$ ) blocked_threads;
403
404        // Spin lock used for mutual exclusion
405        __spinlock_t lock;
406
407        // owner showing if lock is held
408        struct thread$ * owner;
409
410        size_t recursion_count;
411};
412
413static inline void  ?{}( simple_owner_lock & this ) with(this) {
414        lock{};
415        blocked_threads{};
416        owner = 0p;
417        recursion_count = 0;
418}
419static inline void ^?{}( simple_owner_lock & this ) {}
420static inline void ?{}( simple_owner_lock & this, simple_owner_lock this2 ) = void;
421static inline void ?=?( simple_owner_lock & this, simple_owner_lock this2 ) = void;
422
423static inline void lock(simple_owner_lock & this) with(this) {
424        if (owner == active_thread()) {
425                recursion_count++;
426                return;
427        }
428        lock( lock __cfaabi_dbg_ctx2 );
429
430        if (owner != 0p) {
431                insert_last( blocked_threads, *active_thread() );
432                unlock( lock );
433                park( );
434                return;
435        }
436        owner = active_thread();
437        recursion_count = 1;
438        unlock( lock );
439}
440
441// TODO: fix duplicate def issue and bring this back
442// void pop_and_set_new_owner( simple_owner_lock & this ) with( this ) {
443        // thread$ * t = &try_pop_front( blocked_threads );
444        // owner = t;
445        // recursion_count = ( t ? 1 : 0 );
446        // unpark( t );
447// }
448
449static inline void unlock(simple_owner_lock & this) with(this) {
450        lock( lock __cfaabi_dbg_ctx2 );
451        /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
452        /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );
453        // if recursion count is zero release lock and set new owner if one is waiting
454        recursion_count--;
455        if ( recursion_count == 0 ) {
456                // pop_and_set_new_owner( this );
457                thread$ * t = &try_pop_front( blocked_threads );
458                owner = t;
459                recursion_count = ( t ? 1 : 0 );
460                unpark( t );
461        }
462        unlock( lock );
463}
464
465static inline void on_notify(simple_owner_lock & this, struct thread$ * t ) with(this) {
466        lock( lock __cfaabi_dbg_ctx2 );
467        // lock held
468        if ( owner != 0p ) {
469                insert_last( blocked_threads, *t );
470        }
471        // lock not held
472        else {
473                owner = t;
474                recursion_count = 1;
475                unpark( t );
476        }
477        unlock( lock );
478}
479
480static inline size_t on_wait(simple_owner_lock & this) with(this) {
481        lock( lock __cfaabi_dbg_ctx2 );
482        /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
483        /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );
484
485        size_t ret = recursion_count;
486
487        // pop_and_set_new_owner( this );
488
489        thread$ * t = &try_pop_front( blocked_threads );
490        owner = t;
491        recursion_count = ( t ? 1 : 0 );
492        unpark( t );
493
494        unlock( lock );
495        return ret;
496}
497
498static inline void on_wakeup(simple_owner_lock & this, size_t recursion ) with(this) { recursion_count = recursion; }
499
500//-----------------------------------------------------------------------------
501// Spin Queue Lock
502
503// - No reacquire for cond var
504// - No recursive acquisition
505// - No ownership
506// - spin lock with no locking/atomics in unlock
507struct spin_queue_lock {
508        // Spin lock used for mutual exclusion
509        mcs_spin_lock lock;
510
511        // flag showing if lock is held
512        volatile bool held;
513};
514
515static inline void  ?{}( spin_queue_lock & this ) with(this) {
516        lock{};
517        held = false;
518}
519static inline void ^?{}( spin_queue_lock & this ) {}
520static inline void ?{}( spin_queue_lock & this, spin_queue_lock this2 ) = void;
521static inline void ?=?( spin_queue_lock & this, spin_queue_lock this2 ) = void;
522
523// if this is called recursively IT WILL DEADLOCK!
524static inline void lock(spin_queue_lock & this) with(this) {
525        mcs_spin_node node;
526        lock( lock, node );
527        while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
528        __atomic_store_n(&held, true, __ATOMIC_SEQ_CST);
529        unlock( lock, node );
530}
531
532static inline void unlock(spin_queue_lock & this) with(this) {
533        __atomic_store_n(&held, false, __ATOMIC_RELEASE);
534}
535
536static inline void on_notify(spin_queue_lock & this, struct thread$ * t ) {
537        unpark(t);
538}
539static inline size_t on_wait(spin_queue_lock & this) { unlock(this); return 0; }
540static inline void on_wakeup(spin_queue_lock & this, size_t recursion ) { lock(this); }
541
542
543//-----------------------------------------------------------------------------
544// MCS Block Spin Lock
545
546// - No reacquire for cond var
547// - No recursive acquisition
548// - No ownership
549// - Blocks but first node spins (like spin queue but blocking for not first thd)
550struct mcs_block_spin_lock {
551        // Spin lock used for mutual exclusion
552        mcs_lock lock;
553
554        // flag showing if lock is held
555        volatile bool held;
556};
557
558static inline void  ?{}( mcs_block_spin_lock & this ) with(this) {
559        lock{};
560        held = false;
561}
562static inline void ^?{}( mcs_block_spin_lock & this ) {}
563static inline void ?{}( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;
564static inline void ?=?( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;
565
566// if this is called recursively IT WILL DEADLOCK!!!!!
567static inline void lock(mcs_block_spin_lock & this) with(this) {
568        mcs_node node;
569        lock( lock, node );
570        while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
571        __atomic_store_n(&held, true, __ATOMIC_SEQ_CST);
572        unlock( lock, node );
573}
574
575static inline void unlock(mcs_block_spin_lock & this) with(this) {
576        __atomic_store_n(&held, false, __ATOMIC_SEQ_CST);
577}
578
579static inline void on_notify(mcs_block_spin_lock & this, struct thread$ * t ) { unpark(t); }
580static inline size_t on_wait(mcs_block_spin_lock & this) { unlock(this); return 0; }
581static inline void on_wakeup(mcs_block_spin_lock & this, size_t recursion ) {lock(this); }
582
583//-----------------------------------------------------------------------------
584// Block Spin Lock
585
586// - No reacquire for cond var
587// - No recursive acquisition
588// - No ownership
589// - Blocks but first node spins (like spin queue but blocking for not first thd)
590struct block_spin_lock {
591        // Spin lock used for mutual exclusion
592        fast_block_lock lock;
593
594        // flag showing if lock is held
595        volatile bool held;
596};
597
598static inline void  ?{}( block_spin_lock & this ) with(this) {
599        lock{};
600        held = false;
601}
602static inline void ^?{}( block_spin_lock & this ) {}
603static inline void ?{}( block_spin_lock & this, block_spin_lock this2 ) = void;
604static inline void ?=?( block_spin_lock & this, block_spin_lock this2 ) = void;
605
606// if this is called recursively IT WILL DEADLOCK!!!!!
607static inline void lock(block_spin_lock & this) with(this) {
608        lock( lock );
609        while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
610        __atomic_store_n(&held, true, __ATOMIC_RELEASE);
611        unlock( lock );
612}
613
614static inline void unlock(block_spin_lock & this) with(this) {
615        __atomic_store_n(&held, false, __ATOMIC_RELEASE);
616}
617
618static inline void on_notify(block_spin_lock & this, struct thread$ * t ) with(this.lock) {
619        // first we acquire internal fast_block_lock
620        lock( lock __cfaabi_dbg_ctx2 );
621        if ( held ) { // if internal fast_block_lock is held
622                insert_last( blocked_threads, *t );
623                unlock( lock );
624                return;
625        }
626        // if internal fast_block_lock is not held
627        held = true;
628        unlock( lock );
629
630        unpark(t);
631}
632static inline size_t on_wait(block_spin_lock & this) { unlock(this); return 0; }
633static inline void on_wakeup(block_spin_lock & this, size_t recursion ) with(this) {
634        // now we acquire the entire block_spin_lock upon waking up
635        while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
636        __atomic_store_n(&held, true, __ATOMIC_RELEASE);
637        unlock( lock ); // Now we release the internal fast_spin_lock
638}
639
640//-----------------------------------------------------------------------------
641// is_blocking_lock
642forall( L & | sized(L) )
643trait is_blocking_lock {
644        // For synchronization locks to use when acquiring
645        void on_notify( L &, struct thread$ * );
646
647        // For synchronization locks to use when releasing
648        size_t on_wait( L & );
649
650        // to set recursion count after getting signalled;
651        void on_wakeup( L &, size_t recursion );
652};
653
654//-----------------------------------------------------------------------------
655// // info_thread
656// // the info thread is a wrapper around a thread used
657// // to store extra data for use in the condition variable
658forall(L & | is_blocking_lock(L)) {
659        struct info_thread;
660
661        // // for use by sequence
662        // info_thread(L) *& Back( info_thread(L) * this );
663        // info_thread(L) *& Next( info_thread(L) * this );
664}
665
666//-----------------------------------------------------------------------------
667// Synchronization Locks
668forall(L & | is_blocking_lock(L)) {
669
670        //-----------------------------------------------------------------------------
671        // condition_variable
672
673        // The multi-tool condition variable
674        // - can pass timeouts to wait for either a signal or timeout
675        // - can wait without passing a lock
676        // - can have waiters reacquire different locks while waiting on the same cond var
677        // - has shadow queue
678        // - can be signalled outside of critical sections with no locks held
679        struct condition_variable {
680                // Spin lock used for mutual exclusion
681                __spinlock_t lock;
682
683                // List of blocked threads
684                dlist( info_thread(L) ) blocked_threads;
685
686                // Count of current blocked threads
687                int count;
688        };
689
690
691        void  ?{}( condition_variable(L) & this );
692        void ^?{}( condition_variable(L) & this );
693
694        bool notify_one( condition_variable(L) & this );
695        bool notify_all( condition_variable(L) & this );
696
697        uintptr_t front( condition_variable(L) & this );
698
699        bool empty  ( condition_variable(L) & this );
700        int  counter( condition_variable(L) & this );
701
702        void wait( condition_variable(L) & this );
703        void wait( condition_variable(L) & this, uintptr_t info );
704        bool wait( condition_variable(L) & this, Duration duration );
705        bool wait( condition_variable(L) & this, uintptr_t info, Duration duration );
706
707        void wait( condition_variable(L) & this, L & l );
708        void wait( condition_variable(L) & this, L & l, uintptr_t info );
709        bool wait( condition_variable(L) & this, L & l, Duration duration );
710        bool wait( condition_variable(L) & this, L & l, uintptr_t info, Duration duration );
711
712        //-----------------------------------------------------------------------------
713        // fast_cond_var
714
715        // The trimmed and slim condition variable
716        // - no internal lock so you must hold a lock while using this cond var
717        // - signalling without holding branded lock is UNSAFE!
718        // - only allows usage of one lock, cond var is branded after usage
719
720        struct fast_cond_var {
721                // List of blocked threads
722                dlist( info_thread(L) ) blocked_threads;
723                #ifdef __CFA_DEBUG__
724                L * lock_used;
725                #endif
726        };
727
728        void  ?{}( fast_cond_var(L) & this );
729        void ^?{}( fast_cond_var(L) & this );
730
731        bool notify_one( fast_cond_var(L) & this );
732        bool notify_all( fast_cond_var(L) & this );
733
734        uintptr_t front( fast_cond_var(L) & this );
735        bool empty  ( fast_cond_var(L) & this );
736
737        void wait( fast_cond_var(L) & this, L & l );
738        void wait( fast_cond_var(L) & this, L & l, uintptr_t info );
739
740
741        //-----------------------------------------------------------------------------
742        // pthread_cond_var
743        //
744        // - cond var with minimal footprint
745        // - supports operations needed for phthread cond
746
747        struct pthread_cond_var {
748                dlist( info_thread(L) ) blocked_threads;
749                __spinlock_t lock;
750        };
751
752        void  ?{}( pthread_cond_var(L) & this );
753        void ^?{}( pthread_cond_var(L) & this );
754
755        bool notify_one( pthread_cond_var(L) & this );
756        bool notify_all( pthread_cond_var(L) & this );
757
758        uintptr_t front( pthread_cond_var(L) & this );
759        bool empty ( pthread_cond_var(L) & this );
760
761        void wait( pthread_cond_var(L) & this, L & l );
762        void wait( pthread_cond_var(L) & this, L & l, uintptr_t info );
763        bool wait( pthread_cond_var(L) & this, L & l, timespec t );
764        bool wait( pthread_cond_var(L) & this, L & l, uintptr_t info, timespec t );
765}
Note: See TracBrowser for help on using the repository browser.