source: libcfa/src/concurrency/locks.hfa @ 63be3387

ADTast-experimental
Last change on this file since 63be3387 was 63be3387, checked in by caparson <caparson@…>, 19 months ago

Merge branch 'master' of plg.uwaterloo.ca:software/cfa/cfa-cc

  • Property mode set to 100644
File size: 24.3 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2021 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// locks.hfa -- PUBLIC
8// Runtime locks that used with the runtime thread system.
9//
10// Author           : Colby Alexander Parsons
11// Created On       : Thu Jan 21 19:46:50 2021
12// Last Modified By :
13// Last Modified On :
14// Update Count     :
15//
16
17#pragma once
18
19#include <stdbool.h>
20#include <stdio.h>
21
22#include "bits/weakso_locks.hfa"
23#include "containers/lockfree.hfa"
24#include "containers/list.hfa"
25
26#include "limits.hfa"
27#include "thread.hfa"
28
29#include "time_t.hfa"
30#include "time.hfa"
31
32#include <fstream.hfa>
33
34
35// futex headers
36#include <linux/futex.h>      /* Definition of FUTEX_* constants */
37#include <sys/syscall.h>      /* Definition of SYS_* constants */
38#include <unistd.h>
39
40//-----------------------------------------------------------------------------
41// Semaphore
42struct semaphore {
43        __spinlock_t lock;
44        int count;
45        __queue_t(thread$) waiting;
46};
47
48void  ?{}(semaphore & this, int count = 1);
49void ^?{}(semaphore & this);
50bool   P (semaphore & this);
51bool   V (semaphore & this);
52bool   V (semaphore & this, unsigned count);
53thread$ * V (semaphore & this, bool );
54
55//----------
56struct single_acquisition_lock {
57        inline blocking_lock;
58};
59
60static inline void  ?{}( single_acquisition_lock & this ) {((blocking_lock &)this){ false, false };}
61static inline void ^?{}( single_acquisition_lock & this ) {}
62static inline void   lock     ( single_acquisition_lock & this ) { lock    ( (blocking_lock &)this ); }
63static inline bool   try_lock ( single_acquisition_lock & this ) { return try_lock( (blocking_lock &)this ); }
64static inline void   unlock   ( single_acquisition_lock & this ) { unlock  ( (blocking_lock &)this ); }
65static inline size_t on_wait  ( single_acquisition_lock & this ) { return on_wait ( (blocking_lock &)this ); }
66static inline void   on_wakeup( single_acquisition_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
67static inline void   on_notify( single_acquisition_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
68
69//----------
70struct owner_lock {
71        inline blocking_lock;
72};
73
74static inline void  ?{}( owner_lock & this ) {((blocking_lock &)this){ true, true };}
75static inline void ^?{}( owner_lock & this ) {}
76static inline void   lock     ( owner_lock & this ) { lock    ( (blocking_lock &)this ); }
77static inline bool   try_lock ( owner_lock & this ) { return try_lock( (blocking_lock &)this ); }
78static inline void   unlock   ( owner_lock & this ) { unlock  ( (blocking_lock &)this ); }
79static inline size_t on_wait  ( owner_lock & this ) { return on_wait ( (blocking_lock &)this ); }
80static inline void   on_wakeup( owner_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
81static inline void   on_notify( owner_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
82
83//-----------------------------------------------------------------------------
84// MCS Lock
85struct mcs_node {
86        mcs_node * volatile next;
87        single_sem sem;
88};
89
90static inline void ?{}(mcs_node & this) { this.next = 0p; }
91
92static inline mcs_node * volatile & ?`next ( mcs_node * node ) {
93        return node->next;
94}
95
96struct mcs_lock {
97        mcs_queue(mcs_node) queue;
98};
99
100static inline void lock(mcs_lock & l, mcs_node & n) {
101        if(push(l.queue, &n))
102                wait(n.sem);
103}
104
105static inline void unlock(mcs_lock & l, mcs_node & n) {
106        mcs_node * next = advance(l.queue, &n);
107        if(next) post(next->sem);
108}
109
110//-----------------------------------------------------------------------------
111// MCS Spin Lock
112// - No recursive acquisition
113// - Needs to be released by owner
114
115struct mcs_spin_node {
116        mcs_spin_node * volatile next;
117        volatile bool locked;
118};
119
120struct mcs_spin_queue {
121        mcs_spin_node * volatile tail;
122};
123
124static inline void ?{}(mcs_spin_node & this) { this.next = 0p; this.locked = true; }
125
126static inline mcs_spin_node * volatile & ?`next ( mcs_spin_node * node ) {
127        return node->next;
128}
129
130struct mcs_spin_lock {
131        mcs_spin_queue queue;
132};
133
134static inline void lock(mcs_spin_lock & l, mcs_spin_node & n) {
135        mcs_spin_node * prev = __atomic_exchange_n(&l.queue.tail, &n, __ATOMIC_SEQ_CST);
136        n.locked = true;
137        if(prev == 0p) return;
138        prev->next = &n;
139        while(__atomic_load_n(&n.locked, __ATOMIC_RELAXED)) Pause();
140}
141
142static inline void unlock(mcs_spin_lock & l, mcs_spin_node & n) {
143        mcs_spin_node * n_ptr = &n;
144        if (__atomic_compare_exchange_n(&l.queue.tail, &n_ptr, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) return;
145        while (__atomic_load_n(&n.next, __ATOMIC_RELAXED) == 0p) {}
146        n.next->locked = false;
147}
148
149//-----------------------------------------------------------------------------
150// futex_mutex
151
152// - No cond var support
153// - Kernel thd blocking alternative to the spinlock
154// - No ownership (will deadlock on reacq)
155struct futex_mutex {
156        // lock state any state other than UNLOCKED is locked
157        // enum LockState { UNLOCKED = 0, UNCONTENDED = 1, CONTENDED = 2 };
158       
159        // stores a lock state
160        int val;
161};
162
163// to use for FUTEX_WAKE and FUTEX_WAIT (other futex calls will need more params)
164static int futex(int *uaddr, int futex_op, int val) {
165    return syscall(SYS_futex, uaddr, futex_op, val, NULL, NULL, 0);
166}
167
168static inline void  ?{}( futex_mutex & this ) with(this) { val = 0; }
169
170static inline bool internal_try_lock(futex_mutex & this, int & compare_val) with(this) {
171        return __atomic_compare_exchange_n((int*)&val, (int*)&compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
172}
173
174static inline int internal_exchange(futex_mutex & this) with(this) {
175        return __atomic_exchange_n((int*)&val, 2, __ATOMIC_ACQUIRE);
176}
177
178// if this is called recursively IT WILL DEADLOCK!!!!!
179static inline void lock(futex_mutex & this) with(this) {
180        int state;
181
182       
183        // linear backoff
184        for( int spin = 4; spin < 1024; spin += spin) {
185                state = 0;
186                // if unlocked, lock and return
187                if (internal_try_lock(this, state)) return;
188                if (2 == state) break;
189                for (int i = 0; i < spin; i++) Pause();
190        }
191        // if (internal_try_lock(this, state)) return;
192       
193        // if not in contended state, set to be in contended state
194        if (state != 2) state = internal_exchange(this);
195
196        // block and spin until we win the lock
197        while (state != 0) {
198                futex((int*)&val, FUTEX_WAIT, 2); // if val is not 2 this returns with EWOULDBLOCK
199                state = internal_exchange(this);
200        }
201}
202
203static inline void unlock(futex_mutex & this) with(this) {
204        // if uncontended do atomice unlock and then return
205        if (__atomic_fetch_sub(&val, 1, __ATOMIC_RELEASE) == 1) return; // TODO: try acq/rel
206       
207        // otherwise threads are blocked so we must wake one
208        __atomic_store_n((int *)&val, 0, __ATOMIC_RELEASE);
209        futex((int *)&val, FUTEX_WAKE, 1);
210}
211
212static inline void on_notify( futex_mutex & f, thread$ * t){ unpark(t); }
213static inline size_t on_wait( futex_mutex & f ) {unlock(f); return 0;}
214
215// to set recursion count after getting signalled;
216static inline void on_wakeup( futex_mutex & f, size_t recursion ) {}
217
218//-----------------------------------------------------------------------------
219// CLH Spinlock
220// - No recursive acquisition
221// - Needs to be released by owner
222
223struct clh_lock {
224        volatile bool * volatile tail;
225};
226
227static inline void  ?{}( clh_lock & this ) { this.tail = malloc(); *this.tail = true; }
228static inline void ^?{}( clh_lock & this ) { free(this.tail); }
229
230static inline void lock(clh_lock & l) {
231        thread$ * curr_thd = active_thread();
232        *(curr_thd->clh_node) = false;
233        volatile bool * prev = __atomic_exchange_n((bool **)(&l.tail), (bool *)(curr_thd->clh_node), __ATOMIC_SEQ_CST);
234        while(!__atomic_load_n(prev, __ATOMIC_ACQUIRE)) Pause();
235        curr_thd->clh_prev = prev;
236}
237
238static inline void unlock(clh_lock & l) {
239        thread$ * curr_thd = active_thread();
240        __atomic_store_n(curr_thd->clh_node, true, __ATOMIC_RELEASE);
241        curr_thd->clh_node = curr_thd->clh_prev;
242}
243
244static inline void on_notify(clh_lock & this, struct thread$ * t ) { unpark(t); }
245static inline size_t on_wait(clh_lock & this) { unlock(this); return 0; }
246static inline void on_wakeup(clh_lock & this, size_t recursion ) {
247        #ifdef REACQ
248        lock(this);
249        #endif
250}
251
252
253//-----------------------------------------------------------------------------
254// Linear backoff Spinlock
255struct linear_backoff_then_block_lock {
256        // Spin lock used for mutual exclusion
257        __spinlock_t spinlock;
258
259        // List of blocked threads
260        dlist( thread$ ) blocked_threads;
261
262        // Used for comparing and exchanging
263        volatile size_t lock_value;
264};
265
266static inline void  ?{}( linear_backoff_then_block_lock & this ) {
267        this.spinlock{};
268        this.blocked_threads{};
269        this.lock_value = 0;
270}
271static inline void ^?{}( linear_backoff_then_block_lock & this ) {}
272// static inline void ?{}( linear_backoff_then_block_lock & this, linear_backoff_then_block_lock this2 ) = void;
273// static inline void ?=?( linear_backoff_then_block_lock & this, linear_backoff_then_block_lock this2 ) = void;
274
275static inline bool internal_try_lock(linear_backoff_then_block_lock & this, size_t & compare_val) with(this) {
276        if (__atomic_compare_exchange_n(&lock_value, &compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
277                return true;
278        }
279        return false;
280}
281
282static inline bool try_lock(linear_backoff_then_block_lock & this) { size_t compare_val = 0; return internal_try_lock(this, compare_val); }
283
284static inline bool try_lock_contention(linear_backoff_then_block_lock & this) with(this) {
285        if (__atomic_exchange_n(&lock_value, 2, __ATOMIC_ACQUIRE) == 0) {
286                return true;
287        }
288        return false;
289}
290
291static inline bool block(linear_backoff_then_block_lock & this) with(this) {
292        lock( spinlock __cfaabi_dbg_ctx2 );
293        if (lock_value != 2) {
294                unlock( spinlock );
295                return true;
296        }
297        insert_last( blocked_threads, *active_thread() );
298        unlock( spinlock );
299        park( );
300        return true;
301}
302
303static inline void lock(linear_backoff_then_block_lock & this) with(this) {
304        size_t compare_val = 0;
305        int spin = 4;
306        // linear backoff
307        for( ;; ) {
308                compare_val = 0;
309                if (internal_try_lock(this, compare_val)) return;
310                if (2 == compare_val) break;
311                for (int i = 0; i < spin; i++) Pause();
312                if (spin >= 1024) break;
313                spin += spin;
314        }
315
316        if(2 != compare_val && try_lock_contention(this)) return;
317        // block until signalled
318        while (block(this)) if(try_lock_contention(this)) return;
319}
320
321static inline void unlock(linear_backoff_then_block_lock & this) with(this) {
322    if (__atomic_exchange_n(&lock_value, 0, __ATOMIC_RELEASE) == 1) return;
323        lock( spinlock __cfaabi_dbg_ctx2 );
324        thread$ * t = &try_pop_front( blocked_threads );
325        unlock( spinlock );
326        unpark( t );
327}
328
329static inline void on_notify(linear_backoff_then_block_lock & this, struct thread$ * t ) { unpark(t); }
330static inline size_t on_wait(linear_backoff_then_block_lock & this) { unlock(this); return 0; }
331static inline void on_wakeup(linear_backoff_then_block_lock & this, size_t recursion ) {
332        #ifdef REACQ
333        lock(this);
334        #endif
335}
336
337//-----------------------------------------------------------------------------
338// Fast Block Lock
339
340// minimal blocking lock
341// - No reacquire for cond var
342// - No recursive acquisition
343// - No ownership
344struct fast_block_lock {
345        // List of blocked threads
346        dlist( thread$ ) blocked_threads;
347
348        // Spin lock used for mutual exclusion
349        __spinlock_t lock;
350
351        // flag showing if lock is held
352        bool held:1;
353
354        #ifdef __CFA_DEBUG__
355        // for deadlock detection
356        struct thread$ * owner;
357        #endif
358};
359
360static inline void  ?{}( fast_block_lock & this ) with(this) {
361        lock{};
362        blocked_threads{};
363        held = false;
364}
365static inline void ^?{}( fast_block_lock & this ) {}
366static inline void ?{}( fast_block_lock & this, fast_block_lock this2 ) = void;
367static inline void ?=?( fast_block_lock & this, fast_block_lock this2 ) = void;
368
369// if this is called recursively IT WILL DEADLOCK!!!!!
370static inline void lock(fast_block_lock & this) with(this) {
371        lock( lock __cfaabi_dbg_ctx2 );
372
373        #ifdef __CFA_DEBUG__
374        assert(!(held && owner == active_thread()));
375        #endif
376        if ( held ) {
377                insert_last( blocked_threads, *active_thread() );
378                unlock( lock );
379                park( );
380                return;
381        }
382        held = true;
383        #ifdef __CFA_DEBUG__
384        owner = active_thread();
385        #endif
386        unlock( lock );
387}
388
389static inline void unlock(fast_block_lock & this) with(this) {
390        lock( lock __cfaabi_dbg_ctx2 );
391        /* paranoid */ verifyf( held != false, "Attempt to release lock %p that isn't held", &this );
392        thread$ * t = &try_pop_front( blocked_threads );
393        held = ( t ? true : false );
394        #ifdef __CFA_DEBUG__
395        owner = ( t ? t : 0p );
396        #endif
397        unpark( t );
398        unlock( lock );
399}
400
401static inline void on_notify(fast_block_lock & this, struct thread$ * t ) with(this) {
402        #ifdef REACQ
403                lock( lock __cfaabi_dbg_ctx2 );
404                insert_last( blocked_threads, *t );
405                unlock( lock );
406        #else
407                unpark(t);
408        #endif
409}
410static inline size_t on_wait(fast_block_lock & this) { unlock(this); return 0; }
411static inline void on_wakeup(fast_block_lock & this, size_t recursion ) { }
412
413//-----------------------------------------------------------------------------
414// simple_owner_lock
415
416// pthread owner lock
417// - reacquire for cond var
418// - recursive acquisition
419// - ownership
420struct simple_owner_lock {
421        // List of blocked threads
422        dlist( thread$ ) blocked_threads;
423
424        // Spin lock used for mutual exclusion
425        __spinlock_t lock;
426
427        // owner showing if lock is held
428        struct thread$ * owner;
429
430        size_t recursion_count;
431};
432
433static inline void  ?{}( simple_owner_lock & this ) with(this) {
434        lock{};
435        blocked_threads{};
436        owner = 0p;
437        recursion_count = 0;
438}
439static inline void ^?{}( simple_owner_lock & this ) {}
440static inline void ?{}( simple_owner_lock & this, simple_owner_lock this2 ) = void;
441static inline void ?=?( simple_owner_lock & this, simple_owner_lock this2 ) = void;
442
443static inline void lock(simple_owner_lock & this) with(this) {
444        if (owner == active_thread()) {
445                recursion_count++;
446                return;
447        }
448        lock( lock __cfaabi_dbg_ctx2 );
449
450        if (owner != 0p) {
451                insert_last( blocked_threads, *active_thread() );
452                unlock( lock );
453                park( );
454                return;
455        }
456        owner = active_thread();
457        recursion_count = 1;
458        unlock( lock );
459}
460
461// TODO: fix duplicate def issue and bring this back
462// void pop_and_set_new_owner( simple_owner_lock & this ) with( this ) {
463        // thread$ * t = &try_pop_front( blocked_threads );
464        // owner = t;
465        // recursion_count = ( t ? 1 : 0 );
466        // unpark( t );
467// }
468
469static inline void unlock(simple_owner_lock & this) with(this) {
470        lock( lock __cfaabi_dbg_ctx2 );
471        /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
472        /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );
473        // if recursion count is zero release lock and set new owner if one is waiting
474        recursion_count--;
475        if ( recursion_count == 0 ) {
476                // pop_and_set_new_owner( this );
477                thread$ * t = &try_pop_front( blocked_threads );
478                owner = t;
479                recursion_count = ( t ? 1 : 0 );
480                unpark( t );
481        }
482        unlock( lock );
483}
484
485static inline void on_notify(simple_owner_lock & this, struct thread$ * t ) with(this) {
486        lock( lock __cfaabi_dbg_ctx2 );
487        // lock held
488        if ( owner != 0p ) {
489                insert_last( blocked_threads, *t );
490        }
491        // lock not held
492        else {
493                owner = t;
494                recursion_count = 1;
495                unpark( t );
496        }
497        unlock( lock );
498}
499
500static inline size_t on_wait(simple_owner_lock & this) with(this) {
501        lock( lock __cfaabi_dbg_ctx2 );
502        /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
503        /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );
504
505        size_t ret = recursion_count;
506
507        // pop_and_set_new_owner( this );
508
509        thread$ * t = &try_pop_front( blocked_threads );
510        owner = t;
511        recursion_count = ( t ? 1 : 0 );
512        unpark( t );
513
514        unlock( lock );
515        return ret;
516}
517
518static inline void on_wakeup(simple_owner_lock & this, size_t recursion ) with(this) { recursion_count = recursion; }
519
520//-----------------------------------------------------------------------------
521// Spin Queue Lock
522
523// - No reacquire for cond var
524// - No recursive acquisition
525// - No ownership
526// - spin lock with no locking/atomics in unlock
527struct spin_queue_lock {
528        // Spin lock used for mutual exclusion
529        mcs_spin_lock lock;
530
531        // flag showing if lock is held
532        volatile bool held;
533
534        #ifdef __CFA_DEBUG__
535        // for deadlock detection
536        struct thread$ * owner;
537        #endif
538};
539
540static inline void  ?{}( spin_queue_lock & this ) with(this) {
541        lock{};
542        held = false;
543}
544static inline void ^?{}( spin_queue_lock & this ) {}
545static inline void ?{}( spin_queue_lock & this, spin_queue_lock this2 ) = void;
546static inline void ?=?( spin_queue_lock & this, spin_queue_lock this2 ) = void;
547
548// if this is called recursively IT WILL DEADLOCK!!!!!
549static inline void lock(spin_queue_lock & this) with(this) {
550        mcs_spin_node node;
551        lock( lock, node );
552        while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
553        __atomic_store_n(&held, true, __ATOMIC_SEQ_CST);
554        unlock( lock, node );
555}
556
557static inline void unlock(spin_queue_lock & this) with(this) {
558        __atomic_store_n(&held, false, __ATOMIC_RELEASE);
559}
560
561static inline void on_notify(spin_queue_lock & this, struct thread$ * t ) {
562        unpark(t);
563}
564static inline size_t on_wait(spin_queue_lock & this) { unlock(this); return 0; }
565static inline void on_wakeup(spin_queue_lock & this, size_t recursion ) {
566        #ifdef REACQ
567        lock(this);
568        #endif
569}
570
571
572//-----------------------------------------------------------------------------
573// MCS Block Spin Lock
574
575// - No reacquire for cond var
576// - No recursive acquisition
577// - No ownership
578// - Blocks but first node spins (like spin queue but blocking for not first thd)
579struct mcs_block_spin_lock {
580        // Spin lock used for mutual exclusion
581        mcs_lock lock;
582
583        // flag showing if lock is held
584        volatile bool held;
585};
586
587static inline void  ?{}( mcs_block_spin_lock & this ) with(this) {
588        lock{};
589        held = false;
590}
591static inline void ^?{}( mcs_block_spin_lock & this ) {}
592static inline void ?{}( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;
593static inline void ?=?( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;
594
595// if this is called recursively IT WILL DEADLOCK!!!!!
596static inline void lock(mcs_block_spin_lock & this) with(this) {
597        mcs_node node;
598        lock( lock, node );
599        while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
600        __atomic_store_n(&held, true, __ATOMIC_SEQ_CST);
601        unlock( lock, node );
602}
603
604static inline void unlock(mcs_block_spin_lock & this) with(this) {
605        __atomic_store_n(&held, false, __ATOMIC_SEQ_CST);
606}
607
608static inline void on_notify(mcs_block_spin_lock & this, struct thread$ * t ) { unpark(t); }
609static inline size_t on_wait(mcs_block_spin_lock & this) { unlock(this); return 0; }
610static inline void on_wakeup(mcs_block_spin_lock & this, size_t recursion ) {
611        #ifdef REACQ
612        lock(this);
613        #endif
614}
615
616//-----------------------------------------------------------------------------
617// Block Spin Lock
618
619// - No reacquire for cond var
620// - No recursive acquisition
621// - No ownership
622// - Blocks but first node spins (like spin queue but blocking for not first thd)
623struct block_spin_lock {
624        // Spin lock used for mutual exclusion
625        fast_block_lock lock;
626
627        // flag showing if lock is held
628        volatile bool held;
629};
630
631static inline void  ?{}( block_spin_lock & this ) with(this) {
632        lock{};
633        held = false;
634}
635static inline void ^?{}( block_spin_lock & this ) {}
636static inline void ?{}( block_spin_lock & this, block_spin_lock this2 ) = void;
637static inline void ?=?( block_spin_lock & this, block_spin_lock this2 ) = void;
638
639// if this is called recursively IT WILL DEADLOCK!!!!!
640static inline void lock(block_spin_lock & this) with(this) {
641        lock( lock );
642        while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
643        __atomic_store_n(&held, true, __ATOMIC_RELEASE);
644        unlock( lock );
645}
646
647static inline void unlock(block_spin_lock & this) with(this) {
648        __atomic_store_n(&held, false, __ATOMIC_RELEASE);
649}
650
651static inline void on_notify(block_spin_lock & this, struct thread$ * t ) with(this.lock) {
652  #ifdef REACQ
653        // first we acquire internal fast_block_lock
654        lock( lock __cfaabi_dbg_ctx2 );
655        if ( held ) { // if internal fast_block_lock is held
656                insert_last( blocked_threads, *t );
657                unlock( lock );
658                return;
659        }
660        // if internal fast_block_lock is not held
661        held = true;
662        #ifdef __CFA_DEBUG__
663        owner = t;
664        #endif
665        unlock( lock );
666
667  #endif
668        unpark(t);
669       
670}
671static inline size_t on_wait(block_spin_lock & this) { unlock(this); return 0; }
672static inline void on_wakeup(block_spin_lock & this, size_t recursion ) with(this) {
673  #ifdef REACQ
674        // now we acquire the entire block_spin_lock upon waking up
675        while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
676        __atomic_store_n(&held, true, __ATOMIC_RELEASE);
677        unlock( lock ); // Now we release the internal fast_spin_lock
678  #endif
679}
680
681//-----------------------------------------------------------------------------
682// is_blocking_lock
683trait is_blocking_lock(L & | sized(L)) {
684        // For synchronization locks to use when acquiring
685        void on_notify( L &, struct thread$ * );
686
687        // For synchronization locks to use when releasing
688        size_t on_wait( L & );
689
690        // to set recursion count after getting signalled;
691        void on_wakeup( L &, size_t recursion );
692};
693
694//-----------------------------------------------------------------------------
695// // info_thread
696// // the info thread is a wrapper around a thread used
697// // to store extra data for use in the condition variable
698forall(L & | is_blocking_lock(L)) {
699        struct info_thread;
700
701        // // for use by sequence
702        // info_thread(L) *& Back( info_thread(L) * this );
703        // info_thread(L) *& Next( info_thread(L) * this );
704}
705
706//-----------------------------------------------------------------------------
707// Synchronization Locks
708forall(L & | is_blocking_lock(L)) {
709
710        //-----------------------------------------------------------------------------
711        // condition_variable
712
713        // The multi-tool condition variable
714        // - can pass timeouts to wait for either a signal or timeout
715        // - can wait without passing a lock
716        // - can have waiters reacquire different locks while waiting on the same cond var
717        // - has shadow queue
718        // - can be signalled outside of critical sections with no locks held
719        struct condition_variable {
720                // Spin lock used for mutual exclusion
721                __spinlock_t lock;
722
723                // List of blocked threads
724                dlist( info_thread(L) ) blocked_threads;
725
726                // Count of current blocked threads
727                int count;
728        };
729
730
731        void  ?{}( condition_variable(L) & this );
732        void ^?{}( condition_variable(L) & this );
733
734        bool notify_one( condition_variable(L) & this );
735        bool notify_all( condition_variable(L) & this );
736
737        uintptr_t front( condition_variable(L) & this );
738
739        bool empty  ( condition_variable(L) & this );
740        int  counter( condition_variable(L) & this );
741
742        void wait( condition_variable(L) & this );
743        void wait( condition_variable(L) & this, uintptr_t info );
744        bool wait( condition_variable(L) & this, Duration duration );
745        bool wait( condition_variable(L) & this, uintptr_t info, Duration duration );
746
747        void wait( condition_variable(L) & this, L & l );
748        void wait( condition_variable(L) & this, L & l, uintptr_t info );
749        bool wait( condition_variable(L) & this, L & l, Duration duration );
750        bool wait( condition_variable(L) & this, L & l, uintptr_t info, Duration duration );
751
752        //-----------------------------------------------------------------------------
753        // fast_cond_var
754
755        // The trimmed and slim condition variable
756        // - no internal lock so you must hold a lock while using this cond var
757        // - signalling without holding branded lock is UNSAFE!
758        // - only allows usage of one lock, cond var is branded after usage
759
760        struct fast_cond_var {
761                // List of blocked threads
762                dlist( info_thread(L) ) blocked_threads;
763                #ifdef __CFA_DEBUG__
764                L * lock_used;
765                #endif
766        };
767
768        void  ?{}( fast_cond_var(L) & this );
769        void ^?{}( fast_cond_var(L) & this );
770
771        bool notify_one( fast_cond_var(L) & this );
772        bool notify_all( fast_cond_var(L) & this );
773
774        uintptr_t front( fast_cond_var(L) & this );
775        bool empty  ( fast_cond_var(L) & this );
776
777        void wait( fast_cond_var(L) & this, L & l );
778        void wait( fast_cond_var(L) & this, L & l, uintptr_t info );
779
780
781        //-----------------------------------------------------------------------------
782        // pthread_cond_var
783        //
784        // - cond var with minimal footprint
785        // - supports operations needed for phthread cond
786
787        struct pthread_cond_var {
788                dlist( info_thread(L) ) blocked_threads;
789                __spinlock_t lock;
790        };
791
792        void  ?{}( pthread_cond_var(L) & this );
793        void ^?{}( pthread_cond_var(L) & this );
794
795        bool notify_one( pthread_cond_var(L) & this );
796        bool notify_all( pthread_cond_var(L) & this );
797
798        uintptr_t front( pthread_cond_var(L) & this );
799        bool empty ( pthread_cond_var(L) & this );
800
801        void wait( pthread_cond_var(L) & this, L & l );
802        void wait( pthread_cond_var(L) & this, L & l, uintptr_t info );
803        bool wait( pthread_cond_var(L) & this, L & l, timespec t );
804        bool wait( pthread_cond_var(L) & this, L & l, uintptr_t info, timespec t );
805}
Note: See TracBrowser for help on using the repository browser.