source: libcfa/src/concurrency/locks.hfa @ bf55f32

Last change on this file since bf55f32 was bf55f32, checked in by caparsons <caparson@…>, 10 months ago

added support for general channel operators

  • Property mode set to 100644
File size: 27.8 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2021 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// locks.hfa -- PUBLIC
8// Runtime locks that used with the runtime thread system.
9//
10// Author           : Colby Alexander Parsons
11// Created On       : Thu Jan 21 19:46:50 2021
12// Last Modified By :
13// Last Modified On :
14// Update Count     :
15//
16
17#pragma once
18
19#include <stdbool.h>
20#include <stdio.h>
21
22#include "bits/weakso_locks.hfa"
23#include "containers/lockfree.hfa"
24#include "containers/list.hfa"
25
26#include "limits.hfa"
27#include "thread.hfa"
28
29#include "time_t.hfa"
30#include "time.hfa"
31
32#include "select.hfa"
33
34// futex headers
35#include <linux/futex.h>      /* Definition of FUTEX_* constants */
36#include <sys/syscall.h>      /* Definition of SYS_* constants */
37#include <unistd.h>           /* Definition of syscall routine */
38
39typedef void (*__cfa_pre_park)( void * );
40
41static inline void pre_park_noop( void * ) {}
42
43//-----------------------------------------------------------------------------
44// is_blocking_lock
45forall( L & | sized(L) )
46trait is_blocking_lock {
47        // For synchronization locks to use when acquiring
48        void on_notify( L &, struct thread$ * );
49
50        // For synchronization locks to use when releasing
51        size_t on_wait( L &, __cfa_pre_park pp_fn, void * pp_datum );
52
53        // to set recursion count after getting signalled;
54        void on_wakeup( L &, size_t recursion );
55};
56
57static inline void pre_park_then_park( __cfa_pre_park pp_fn, void * pp_datum ) {
58    pp_fn( pp_datum );
59    park();
60}
61
62// macros for default routine impls for is_blocking_lock trait that do not wait-morph
63
64#define DEFAULT_ON_NOTIFY( lock_type ) \
65    static inline void on_notify( lock_type & this, thread$ * t ){ unpark(t); }
66
67#define DEFAULT_ON_WAIT( lock_type ) \
68    static inline size_t on_wait( lock_type & this, __cfa_pre_park pp_fn, void * pp_datum ) { \
69        unlock( this ); \
70        pre_park_then_park( pp_fn, pp_datum ); \
71        return 0; \
72    }
73
74// on_wakeup impl if lock should be reacquired after waking up
75#define DEFAULT_ON_WAKEUP_REACQ( lock_type ) \
76    static inline void on_wakeup( lock_type & this, size_t recursion ) { lock( this ); }
77
78// on_wakeup impl if lock will not be reacquired after waking up
79#define DEFAULT_ON_WAKEUP_NO_REACQ( lock_type ) \
80    static inline void on_wakeup( lock_type & this, size_t recursion ) {}
81
82
83
84//-----------------------------------------------------------------------------
85// Semaphore
86struct semaphore {
87        __spinlock_t lock;
88        int count;
89        __queue_t(thread$) waiting;
90};
91
92void  ?{}(semaphore & this, int count = 1);
93void ^?{}(semaphore & this);
94bool   P (semaphore & this);
95bool   V (semaphore & this);
96bool   V (semaphore & this, unsigned count);
97thread$ * V (semaphore & this, bool );
98
99//----------
100struct single_acquisition_lock {
101        inline blocking_lock;
102};
103
104static inline void  ?{}( single_acquisition_lock & this ) {((blocking_lock &)this){ false, false };}
105static inline void ^?{}( single_acquisition_lock & this ) {}
106static inline void   lock     ( single_acquisition_lock & this ) { lock    ( (blocking_lock &)this ); }
107static inline bool   try_lock ( single_acquisition_lock & this ) { return try_lock( (blocking_lock &)this ); }
108static inline void   unlock   ( single_acquisition_lock & this ) { unlock  ( (blocking_lock &)this ); }
109static inline size_t on_wait  ( single_acquisition_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) { return on_wait ( (blocking_lock &)this, pp_fn, pp_datum ); }
110static inline void   on_wakeup( single_acquisition_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
111static inline void   on_notify( single_acquisition_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
112static inline bool   register_select( single_acquisition_lock & this, select_node & node ) { return register_select( (blocking_lock &)this, node ); }
113static inline bool   unregister_select( single_acquisition_lock & this, select_node & node ) { return unregister_select( (blocking_lock &)this, node ); }
114static inline bool   on_selected( single_acquisition_lock & this, select_node & node ) { return on_selected( (blocking_lock &)this, node ); }
115__CFA_SELECT_GET_TYPE( single_acquisition_lock );
116
117//----------
118struct owner_lock {
119        inline blocking_lock;
120};
121
122static inline void  ?{}( owner_lock & this ) {((blocking_lock &)this){ true, true };}
123static inline void ^?{}( owner_lock & this ) {}
124static inline void   lock     ( owner_lock & this ) { lock    ( (blocking_lock &)this ); }
125static inline bool   try_lock ( owner_lock & this ) { return try_lock( (blocking_lock &)this ); }
126static inline void   unlock   ( owner_lock & this ) { unlock  ( (blocking_lock &)this ); }
127static inline size_t on_wait  ( owner_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) { return on_wait ( (blocking_lock &)this, pp_fn, pp_datum ); }
128static inline void   on_wakeup( owner_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
129static inline void   on_notify( owner_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
130static inline bool   register_select( owner_lock & this, select_node & node ) { return register_select( (blocking_lock &)this, node ); }
131static inline bool   unregister_select( owner_lock & this, select_node & node ) { return unregister_select( (blocking_lock &)this, node ); }
132static inline bool   on_selected( owner_lock & this, select_node & node ) { return on_selected( (blocking_lock &)this, node ); }
133__CFA_SELECT_GET_TYPE( owner_lock );
134
135//-----------------------------------------------------------------------------
136// MCS Lock
137struct mcs_node {
138        mcs_node * volatile next;
139        single_sem sem;
140};
141
142static inline void ?{}(mcs_node & this) { this.next = 0p; }
143
144static inline mcs_node * volatile & ?`next ( mcs_node * node ) {
145        return node->next;
146}
147
148struct mcs_lock {
149        mcs_queue(mcs_node) queue;
150};
151
152static inline void lock(mcs_lock & l, mcs_node & n) {
153        if(push(l.queue, &n))
154                wait(n.sem);
155}
156
157static inline void unlock(mcs_lock & l, mcs_node & n) {
158        mcs_node * next = advance(l.queue, &n);
159        if(next) post(next->sem);
160}
161
162//-----------------------------------------------------------------------------
163// MCS Spin Lock
164// - No recursive acquisition
165// - Needs to be released by owner
166
167struct mcs_spin_node {
168        mcs_spin_node * volatile next;
169        volatile bool locked;
170};
171
172struct mcs_spin_queue {
173        mcs_spin_node * volatile tail;
174};
175
176static inline void ?{}(mcs_spin_node & this) { this.next = 0p; this.locked = true; }
177
178struct mcs_spin_lock {
179        mcs_spin_queue queue;
180};
181
182static inline void lock(mcs_spin_lock & l, mcs_spin_node & n) {
183    n.locked = true;
184        mcs_spin_node * prev = __atomic_exchange_n(&l.queue.tail, &n, __ATOMIC_SEQ_CST);
185        if( prev == 0p ) return;
186        prev->next = &n;
187        while( __atomic_load_n(&n.locked, __ATOMIC_RELAXED) ) Pause();
188}
189
190static inline void unlock(mcs_spin_lock & l, mcs_spin_node & n) {
191        mcs_spin_node * n_ptr = &n;
192        if (__atomic_compare_exchange_n(&l.queue.tail, &n_ptr, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) return;
193        while (__atomic_load_n(&n.next, __ATOMIC_RELAXED) == 0p) Pause();
194        n.next->locked = false;
195}
196
197//-----------------------------------------------------------------------------
198// futex_mutex
199
200// - Kernel thd blocking alternative to the spinlock
201// - No ownership (will deadlock on reacq)
202// - no reacq on wakeup
203struct futex_mutex {
204        // lock state any state other than UNLOCKED is locked
205        // enum LockState { UNLOCKED = 0, UNCONTENDED = 1, CONTENDED = 2 };
206       
207        // stores a lock state
208        int val;
209};
210
211// to use for FUTEX_WAKE and FUTEX_WAIT (other futex calls will need more params)
212static inline int futex(int *uaddr, int futex_op, int val) {
213    return syscall(SYS_futex, uaddr, futex_op, val, NULL, NULL, 0);
214}
215
216static inline void ?{}( futex_mutex & this ) with(this) { val = 0; }
217
218static inline bool internal_try_lock( futex_mutex & this, int & compare_val) with(this) {
219        return __atomic_compare_exchange_n((int*)&val, (int*)&compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
220}
221
222static inline int internal_exchange( futex_mutex & this ) with(this) {
223        return __atomic_exchange_n((int*)&val, 2, __ATOMIC_ACQUIRE);
224}
225
226// if this is called recursively IT WILL DEADLOCK!!!!!
227static inline void lock( futex_mutex & this ) with(this) {
228        int state;
229
230        for( int spin = 4; spin < 1024; spin += spin) {
231                state = 0;
232                // if unlocked, lock and return
233                if (internal_try_lock(this, state)) return;
234                if (2 == state) break;
235                for (int i = 0; i < spin; i++) Pause();
236        }
237       
238        // if not in contended state, set to be in contended state
239        if (state != 2) state = internal_exchange(this);
240
241        // block and spin until we win the lock
242        while (state != 0) {
243                futex((int*)&val, FUTEX_WAIT, 2); // if val is not 2 this returns with EWOULDBLOCK
244                state = internal_exchange(this);
245        }
246}
247
248static inline void unlock(futex_mutex & this) with(this) {
249        // if uncontended do atomic unlock and then return
250    if (__atomic_exchange_n(&val, 0, __ATOMIC_RELEASE) == 1) return;
251       
252        // otherwise threads are blocked so we must wake one
253        futex((int *)&val, FUTEX_WAKE, 1);
254}
255
256DEFAULT_ON_NOTIFY( futex_mutex )
257DEFAULT_ON_WAIT( futex_mutex )
258DEFAULT_ON_WAKEUP_NO_REACQ( futex_mutex )
259
260//-----------------------------------------------------------------------------
261// go_mutex
262
263// - Kernel thd blocking alternative to the spinlock
264// - No ownership (will deadlock on reacq)
265// - Golang's flavour of mutex
266// - Impl taken from Golang: src/runtime/lock_futex.go
267struct go_mutex {
268        // lock state any state other than UNLOCKED is locked
269        // enum LockState { UNLOCKED = 0, LOCKED = 1, SLEEPING = 2 };
270       
271        // stores a lock state
272        int val;
273};
274static inline void  ?{}( go_mutex & this ) with(this) { val = 0; }
275// static inline void ?{}( go_mutex & this, go_mutex this2 ) = void; // these don't compile correctly at the moment so they should be omitted
276// static inline void ?=?( go_mutex & this, go_mutex this2 ) = void;
277
278static inline bool internal_try_lock(go_mutex & this, int & compare_val, int new_val ) with(this) {
279        return __atomic_compare_exchange_n((int*)&val, (int*)&compare_val, new_val, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
280}
281
282static inline int internal_exchange(go_mutex & this, int swap ) with(this) {
283        return __atomic_exchange_n((int*)&val, swap, __ATOMIC_ACQUIRE);
284}
285
286// if this is called recursively IT WILL DEADLOCK!!!!!
287static inline void lock( go_mutex & this ) with( this ) {
288        int state, init_state;
289
290    // speculative grab
291    state = internal_exchange(this, 1);
292    if ( !state ) return; // state == 0
293    init_state = state;
294    for (;;) {
295        for( int i = 0; i < 4; i++ ) {
296            while( !val ) { // lock unlocked
297                state = 0;
298                if ( internal_try_lock( this, state, init_state ) ) return;
299            }
300            for (int i = 0; i < 30; i++) Pause();
301        }
302
303        while( !val ) { // lock unlocked
304            state = 0;
305            if ( internal_try_lock( this, state, init_state ) ) return;
306        }
307        sched_yield();
308       
309        // if not in contended state, set to be in contended state
310        state = internal_exchange( this, 2 );
311        if ( !state ) return; // state == 0
312        init_state = 2;
313        futex( (int*)&val, FUTEX_WAIT, 2 ); // if val is not 2 this returns with EWOULDBLOCK
314    }
315}
316
317static inline void unlock( go_mutex & this ) with(this) {
318        // if uncontended do atomic unlock and then return
319    if ( __atomic_exchange_n(&val, 0, __ATOMIC_RELEASE) == 1 ) return;
320       
321        // otherwise threads are blocked so we must wake one
322        futex( (int *)&val, FUTEX_WAKE, 1 );
323}
324
325DEFAULT_ON_NOTIFY( go_mutex )
326DEFAULT_ON_WAIT( go_mutex )
327DEFAULT_ON_WAKEUP_NO_REACQ( go_mutex )
328
329//-----------------------------------------------------------------------------
330// Exponential backoff then block lock
331struct exp_backoff_then_block_lock {
332        // Spin lock used for mutual exclusion
333        __spinlock_t spinlock;
334
335        // List of blocked threads
336        dlist( thread$ ) blocked_threads;
337
338        // Used for comparing and exchanging
339        volatile size_t lock_value;
340};
341
342static inline void  ?{}( exp_backoff_then_block_lock & this ) {
343        this.spinlock{};
344        this.blocked_threads{};
345        this.lock_value = 0;
346}
347static inline void ?{}( exp_backoff_then_block_lock & this, exp_backoff_then_block_lock this2 ) = void;
348static inline void ?=?( exp_backoff_then_block_lock & this, exp_backoff_then_block_lock this2 ) = void;
349
350static inline void  ^?{}( exp_backoff_then_block_lock & this ){}
351
352static inline bool internal_try_lock( exp_backoff_then_block_lock & this, size_t & compare_val ) with(this) {
353        return __atomic_compare_exchange_n(&lock_value, &compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
354}
355
356static inline bool try_lock( exp_backoff_then_block_lock & this ) { size_t compare_val = 0; return internal_try_lock( this, compare_val ); }
357
358static inline bool try_lock_contention( exp_backoff_then_block_lock & this ) with(this) {
359        return !__atomic_exchange_n( &lock_value, 2, __ATOMIC_ACQUIRE );
360}
361
362static inline bool block( exp_backoff_then_block_lock & this ) with(this) {
363    lock( spinlock __cfaabi_dbg_ctx2 );
364    if (__atomic_load_n( &lock_value, __ATOMIC_SEQ_CST) != 2) {
365        unlock( spinlock );
366        return true;
367    }
368    insert_last( blocked_threads, *active_thread() );
369    unlock( spinlock );
370        park( );
371        return true;
372}
373
374static inline void lock( exp_backoff_then_block_lock & this ) with(this) {
375        size_t compare_val = 0;
376        int spin = 4;
377
378        // linear backoff
379        for( ;; ) {
380                compare_val = 0;
381                if (internal_try_lock(this, compare_val)) return;
382                if (2 == compare_val) break;
383                for (int i = 0; i < spin; i++) Pause();
384                if (spin >= 1024) break;
385                spin += spin;
386        }
387
388        if(2 != compare_val && try_lock_contention(this)) return;
389        // block until signalled
390        while (block(this)) if(try_lock_contention(this)) return;
391}
392
393static inline void unlock( exp_backoff_then_block_lock & this ) with(this) {
394    if (__atomic_exchange_n(&lock_value, 0, __ATOMIC_RELEASE) == 1) return;
395    lock( spinlock __cfaabi_dbg_ctx2 );
396    thread$ * t = &try_pop_front( blocked_threads );
397    unlock( spinlock );
398    unpark( t );
399}
400
401DEFAULT_ON_NOTIFY( exp_backoff_then_block_lock )
402DEFAULT_ON_WAIT( exp_backoff_then_block_lock )
403DEFAULT_ON_WAKEUP_REACQ( exp_backoff_then_block_lock )
404
405//-----------------------------------------------------------------------------
406// Fast Block Lock
407
408// minimal blocking lock
409// - No reacquire for cond var
410// - No recursive acquisition
411// - No ownership
412struct fast_block_lock {
413        // List of blocked threads
414        dlist( thread$ ) blocked_threads;
415
416        // Spin lock used for mutual exclusion
417        __spinlock_t lock;
418
419        // flag showing if lock is held
420        bool held:1;
421};
422
423static inline void  ?{}( fast_block_lock & this ) with(this) {
424        lock{};
425        blocked_threads{};
426        held = false;
427}
428static inline void ^?{}( fast_block_lock & this ) {}
429static inline void ?{}( fast_block_lock & this, fast_block_lock this2 ) = void;
430static inline void ?=?( fast_block_lock & this, fast_block_lock this2 ) = void;
431
432// if this is called recursively IT WILL DEADLOCK!!!!!
433static inline void lock( fast_block_lock & this ) with(this) {
434        lock( lock __cfaabi_dbg_ctx2 );
435        if ( held ) {
436                insert_last( blocked_threads, *active_thread() );
437                unlock( lock );
438                park( );
439                return;
440        }
441        held = true;
442        unlock( lock );
443}
444
445static inline void unlock( fast_block_lock & this ) with(this) {
446        lock( lock __cfaabi_dbg_ctx2 );
447        /* paranoid */ verifyf( held != false, "Attempt to release lock %p that isn't held", &this );
448        thread$ * t = &try_pop_front( blocked_threads );
449        held = ( t ? true : false );
450        unpark( t );
451        unlock( lock );
452}
453
454static inline void on_notify( fast_block_lock & this, struct thread$ * t ) with(this) {
455    lock( lock __cfaabi_dbg_ctx2 );
456    insert_last( blocked_threads, *t );
457    unlock( lock );
458}
459DEFAULT_ON_WAIT( fast_block_lock )
460DEFAULT_ON_WAKEUP_NO_REACQ( fast_block_lock )
461
462//-----------------------------------------------------------------------------
463// simple_owner_lock
464
465// pthread owner lock
466// - reacquire for cond var
467// - recursive acquisition
468// - ownership
469struct simple_owner_lock {
470        // List of blocked threads
471        dlist( select_node ) blocked_threads;
472
473        // Spin lock used for mutual exclusion
474        __spinlock_t lock;
475
476        // owner showing if lock is held
477        struct thread$ * owner;
478
479        size_t recursion_count;
480};
481
482static inline void  ?{}( simple_owner_lock & this ) with(this) {
483        lock{};
484        blocked_threads{};
485        owner = 0p;
486        recursion_count = 0;
487}
488static inline void ^?{}( simple_owner_lock & this ) {}
489static inline void ?{}( simple_owner_lock & this, simple_owner_lock this2 ) = void;
490static inline void ?=?( simple_owner_lock & this, simple_owner_lock this2 ) = void;
491
492static inline void lock( simple_owner_lock & this ) with(this) {
493        if ( owner == active_thread() ) {
494                recursion_count++;
495                return;
496        }
497        lock( lock __cfaabi_dbg_ctx2 );
498
499        if ( owner != 0p ) {
500        select_node node;
501                insert_last( blocked_threads, node );
502                unlock( lock );
503                park( );
504                return;
505        }
506        owner = active_thread();
507        recursion_count = 1;
508        unlock( lock );
509}
510
511static inline void pop_node( simple_owner_lock & this ) with(this) {
512    __handle_waituntil_OR( blocked_threads );
513    select_node * node = &try_pop_front( blocked_threads );
514    if ( node ) {
515        owner = node->blocked_thread;
516        recursion_count = 1;
517        // if ( !node->clause_status || __make_select_node_available( *node ) ) unpark( node->blocked_thread );
518        wake_one( blocked_threads, *node );
519    } else {
520        owner = 0p;
521        recursion_count = 0;
522    }
523}
524
525static inline void unlock( simple_owner_lock & this ) with(this) {
526        lock( lock __cfaabi_dbg_ctx2 );
527        /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
528        /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );
529        // if recursion count is zero release lock and set new owner if one is waiting
530        recursion_count--;
531        if ( recursion_count == 0 ) {
532                pop_node( this );
533        }
534        unlock( lock );
535}
536
537static inline void on_notify( simple_owner_lock & this, thread$ * t ) with(this) {
538        lock( lock __cfaabi_dbg_ctx2 );
539        // lock held
540        if ( owner != 0p ) {
541                insert_last( blocked_threads, *(select_node *)t->link_node );
542        }
543        // lock not held
544        else {
545                owner = t;
546                recursion_count = 1;
547                unpark( t );
548        }
549        unlock( lock );
550}
551
552static inline size_t on_wait( simple_owner_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) with(this) {
553        lock( lock __cfaabi_dbg_ctx2 );
554        /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
555        /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );
556
557        size_t ret = recursion_count;
558
559        pop_node( this );
560
561    select_node node;
562    active_thread()->link_node = (void *)&node;
563        unlock( lock );
564
565    pre_park_then_park( pp_fn, pp_datum );
566
567        return ret;
568}
569
570static inline void on_wakeup( simple_owner_lock & this, size_t recursion ) with(this) { recursion_count = recursion; }
571
572// waituntil() support
573static inline bool register_select( simple_owner_lock & this, select_node & node ) with(this) {
574    lock( lock __cfaabi_dbg_ctx2 );
575
576    // check if we can complete operation. If so race to establish winner in special OR case
577    if ( !node.park_counter && ( owner == active_thread() || owner == 0p ) ) {
578        if ( !__make_select_node_available( node ) ) { // we didn't win the race so give up on registering
579           unlock( lock );
580           return false;
581        }
582    }
583
584    if ( owner == active_thread() ) {
585                recursion_count++;
586        if ( node.park_counter ) __make_select_node_available( node );
587        unlock( lock );
588                return true;
589        }
590
591    if ( owner != 0p ) {
592                insert_last( blocked_threads, node );
593                unlock( lock );
594                return false;
595        }
596   
597        owner = active_thread();
598        recursion_count = 1;
599
600    if ( node.park_counter ) __make_select_node_available( node );
601    unlock( lock );
602    return true;
603}
604
605static inline bool unregister_select( simple_owner_lock & this, select_node & node ) with(this) {
606    lock( lock __cfaabi_dbg_ctx2 );
607    if ( node`isListed ) {
608        remove( node );
609        unlock( lock );
610        return false;
611    }
612
613    if ( owner == active_thread() ) {
614        recursion_count--;
615        if ( recursion_count == 0 ) {
616            pop_node( this );
617        }
618    }
619    unlock( lock );
620    return false;
621}
622
623static inline bool on_selected( simple_owner_lock & this, select_node & node ) { return true; }
624__CFA_SELECT_GET_TYPE( simple_owner_lock );
625
626//-----------------------------------------------------------------------------
627// Spin Queue Lock
628
629// - No reacquire for cond var
630// - No recursive acquisition
631// - No ownership
632// - spin lock with no locking/atomics in unlock
633struct spin_queue_lock {
634        // Spin lock used for mutual exclusion
635        mcs_spin_lock lock;
636
637        // flag showing if lock is held
638        volatile bool held;
639};
640
641static inline void  ?{}( spin_queue_lock & this ) with(this) {
642        lock{};
643        held = false;
644}
645static inline void ^?{}( spin_queue_lock & this ) {}
646static inline void ?{}( spin_queue_lock & this, spin_queue_lock this2 ) = void;
647static inline void ?=?( spin_queue_lock & this, spin_queue_lock this2 ) = void;
648
649// if this is called recursively IT WILL DEADLOCK!
650static inline void lock( spin_queue_lock & this ) with(this) {
651        mcs_spin_node node;
652        lock( lock, node );
653        while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
654        __atomic_store_n(&held, true, __ATOMIC_SEQ_CST);
655        unlock( lock, node );
656}
657
658static inline void unlock( spin_queue_lock & this ) with(this) {
659        __atomic_store_n(&held, false, __ATOMIC_RELEASE);
660}
661
662DEFAULT_ON_NOTIFY( spin_queue_lock )
663DEFAULT_ON_WAIT( spin_queue_lock )
664DEFAULT_ON_WAKEUP_REACQ( spin_queue_lock )
665
666//-----------------------------------------------------------------------------
667// MCS Block Spin Lock
668
669// - No reacquire for cond var
670// - No recursive acquisition
671// - No ownership
672// - Blocks but first node spins (like spin queue but blocking for not first thd)
673struct mcs_block_spin_lock {
674        // Spin lock used for mutual exclusion
675        mcs_lock lock;
676
677        // flag showing if lock is held
678        volatile bool held;
679};
680
681static inline void  ?{}( mcs_block_spin_lock & this ) with(this) {
682        lock{};
683        held = false;
684}
685static inline void ^?{}( mcs_block_spin_lock & this ) {}
686static inline void ?{}( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;
687static inline void ?=?( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;
688
689// if this is called recursively IT WILL DEADLOCK!!!!!
690static inline void lock( mcs_block_spin_lock & this ) with(this) {
691        mcs_node node;
692        lock( lock, node );
693        while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
694        __atomic_store_n(&held, true, __ATOMIC_SEQ_CST);
695        unlock( lock, node );
696}
697
698static inline void unlock(mcs_block_spin_lock & this) with(this) {
699        __atomic_store_n(&held, false, __ATOMIC_SEQ_CST);
700}
701
702DEFAULT_ON_NOTIFY( mcs_block_spin_lock )
703DEFAULT_ON_WAIT( mcs_block_spin_lock )
704DEFAULT_ON_WAKEUP_REACQ( mcs_block_spin_lock )
705
706//-----------------------------------------------------------------------------
707// Block Spin Lock
708
709// - No reacquire for cond var
710// - No recursive acquisition
711// - No ownership
712// - Blocks but first node spins (like spin queue but blocking for not first thd)
713struct block_spin_lock {
714        // Spin lock used for mutual exclusion
715        fast_block_lock lock;
716
717        // flag showing if lock is held
718        volatile bool held;
719};
720
721static inline void  ?{}( block_spin_lock & this ) with(this) {
722        lock{};
723        held = false;
724}
725static inline void ^?{}( block_spin_lock & this ) {}
726static inline void ?{}( block_spin_lock & this, block_spin_lock this2 ) = void;
727static inline void ?=?( block_spin_lock & this, block_spin_lock this2 ) = void;
728
729// if this is called recursively IT WILL DEADLOCK!!!!!
730static inline void lock( block_spin_lock & this ) with(this) {
731        lock( lock );
732        while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
733        __atomic_store_n(&held, true, __ATOMIC_RELEASE);
734        unlock( lock );
735}
736
737static inline void unlock( block_spin_lock & this ) with(this) {
738        __atomic_store_n(&held, false, __ATOMIC_RELEASE);
739}
740
741static inline void on_notify( block_spin_lock & this, struct thread$ * t ) with(this.lock) {
742        // first we acquire internal fast_block_lock
743        lock( lock __cfaabi_dbg_ctx2 );
744        if ( held ) { // if internal fast_block_lock is held
745                insert_last( blocked_threads, *t );
746                unlock( lock );
747                return;
748        }
749        // if internal fast_block_lock is not held
750        held = true;
751        unlock( lock );
752
753        unpark(t);
754}
755DEFAULT_ON_WAIT( block_spin_lock )
756static inline void on_wakeup( block_spin_lock & this, size_t recursion ) with(this) {
757        // now we acquire the entire block_spin_lock upon waking up
758        while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
759        __atomic_store_n(&held, true, __ATOMIC_RELEASE);
760        unlock( lock ); // Now we release the internal fast_spin_lock
761}
762
763//-----------------------------------------------------------------------------
764// // info_thread
765// // the info thread is a wrapper around a thread used
766// // to store extra data for use in the condition variable
767forall(L & | is_blocking_lock(L)) {
768        struct info_thread;
769}
770
771//-----------------------------------------------------------------------------
772// Synchronization Locks
773forall(L & | is_blocking_lock(L)) {
774
775        //-----------------------------------------------------------------------------
776        // condition_variable
777
778        // The multi-tool condition variable
779        // - can pass timeouts to wait for either a signal or timeout
780        // - can wait without passing a lock
781        // - can have waiters reacquire different locks while waiting on the same cond var
782        // - has shadow queue
783        // - can be signalled outside of critical sections with no locks held
784        struct condition_variable {
785                // Spin lock used for mutual exclusion
786                __spinlock_t lock;
787
788                // List of blocked threads
789                dlist( info_thread(L) ) blocked_threads;
790
791                // Count of current blocked threads
792                int count;
793        };
794
795
796        void  ?{}( condition_variable(L) & this );
797        void ^?{}( condition_variable(L) & this );
798
799        bool notify_one( condition_variable(L) & this );
800        bool notify_all( condition_variable(L) & this );
801
802        uintptr_t front( condition_variable(L) & this );
803
804        bool empty  ( condition_variable(L) & this );
805        int  counter( condition_variable(L) & this );
806
807        void wait( condition_variable(L) & this );
808        void wait( condition_variable(L) & this, uintptr_t info );
809        bool wait( condition_variable(L) & this, Duration duration );
810        bool wait( condition_variable(L) & this, uintptr_t info, Duration duration );
811
812        void wait( condition_variable(L) & this, L & l );
813        void wait( condition_variable(L) & this, L & l, uintptr_t info );
814        bool wait( condition_variable(L) & this, L & l, Duration duration );
815        bool wait( condition_variable(L) & this, L & l, uintptr_t info, Duration duration );
816
817        //-----------------------------------------------------------------------------
818        // fast_cond_var
819
820        // The trimmed and slim condition variable
821        // - no internal lock so you must hold a lock while using this cond var
822        // - signalling without holding branded lock is UNSAFE!
823        // - only allows usage of one lock, cond var is branded after usage
824
825        struct fast_cond_var {
826                // List of blocked threads
827                dlist( info_thread(L) ) blocked_threads;
828                #ifdef __CFA_DEBUG__
829                L * lock_used;
830                #endif
831        };
832
833        void  ?{}( fast_cond_var(L) & this );
834        void ^?{}( fast_cond_var(L) & this );
835
836        bool notify_one( fast_cond_var(L) & this );
837        bool notify_all( fast_cond_var(L) & this );
838
839        uintptr_t front( fast_cond_var(L) & this );
840        bool empty  ( fast_cond_var(L) & this );
841
842        void wait( fast_cond_var(L) & this, L & l );
843        void wait( fast_cond_var(L) & this, L & l, uintptr_t info );
844
845
846        //-----------------------------------------------------------------------------
847        // pthread_cond_var
848        //
849        // - cond var with minimal footprint
850        // - supports operations needed for phthread cond
851
852        struct pthread_cond_var {
853                dlist( info_thread(L) ) blocked_threads;
854                __spinlock_t lock;
855        };
856
857        void  ?{}( pthread_cond_var(L) & this );
858        void ^?{}( pthread_cond_var(L) & this );
859
860        bool notify_one( pthread_cond_var(L) & this );
861        bool notify_all( pthread_cond_var(L) & this );
862
863        uintptr_t front( pthread_cond_var(L) & this );
864        bool empty ( pthread_cond_var(L) & this );
865
866        void wait( pthread_cond_var(L) & this, L & l );
867        void wait( pthread_cond_var(L) & this, L & l, uintptr_t info );
868        bool wait( pthread_cond_var(L) & this, L & l, timespec t );
869        bool wait( pthread_cond_var(L) & this, L & l, uintptr_t info, timespec t );
870}
Note: See TracBrowser for help on using the repository browser.