source: libcfa/src/concurrency/locks.hfa @ cb69fba

ADTast-experimental
Last change on this file since cb69fba was beeff61e, checked in by caparsons <caparson@…>, 20 months ago

some cleanup and a bunch of changes to support waituntil statement

  • Property mode set to 100644
File size: 27.2 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2021 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// locks.hfa -- PUBLIC
8// Runtime locks that used with the runtime thread system.
9//
10// Author           : Colby Alexander Parsons
11// Created On       : Thu Jan 21 19:46:50 2021
12// Last Modified By :
13// Last Modified On :
14// Update Count     :
15//
16
17#pragma once
18
19#include <stdbool.h>
20#include <stdio.h>
21
22#include "bits/weakso_locks.hfa"
23#include "containers/lockfree.hfa"
24#include "containers/list.hfa"
25
26#include "limits.hfa"
27#include "thread.hfa"
28
29#include "time_t.hfa"
30#include "time.hfa"
31
32#include "select.hfa"
33
34#include <fstream.hfa>
35
36// futex headers
37#include <linux/futex.h>      /* Definition of FUTEX_* constants */
38#include <sys/syscall.h>      /* Definition of SYS_* constants */
39#include <unistd.h>
40
41// C_TODO: cleanup this and locks.cfa
42// - appropriate separation of interface and impl
43// - clean up unused/unneeded locks
44// - change messy big blocking lock from inheritance to composition to remove need for flags
45
46//-----------------------------------------------------------------------------
47// Semaphore
48struct semaphore {
49        __spinlock_t lock;
50        int count;
51        __queue_t(thread$) waiting;
52};
53
54void  ?{}(semaphore & this, int count = 1);
55void ^?{}(semaphore & this);
56bool   P (semaphore & this);
57bool   V (semaphore & this);
58bool   V (semaphore & this, unsigned count);
59thread$ * V (semaphore & this, bool );
60
61//----------
62struct single_acquisition_lock {
63        inline blocking_lock;
64};
65
66static inline void  ?{}( single_acquisition_lock & this ) {((blocking_lock &)this){ false, false };}
67static inline void ^?{}( single_acquisition_lock & this ) {}
68static inline void   lock     ( single_acquisition_lock & this ) { lock    ( (blocking_lock &)this ); }
69static inline bool   try_lock ( single_acquisition_lock & this ) { return try_lock( (blocking_lock &)this ); }
70static inline void   unlock   ( single_acquisition_lock & this ) { unlock  ( (blocking_lock &)this ); }
71static inline size_t on_wait  ( single_acquisition_lock & this ) { return on_wait ( (blocking_lock &)this ); }
72static inline void   on_wakeup( single_acquisition_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
73static inline void   on_notify( single_acquisition_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
74static inline bool   register_select( single_acquisition_lock & this, select_node & node ) { return register_select( (blocking_lock &)this, node ); }
75static inline bool   unregister_select( single_acquisition_lock & this, select_node & node ) { return unregister_select( (blocking_lock &)this, node ); }
76static inline bool   on_selected( single_acquisition_lock & this, select_node & node ) { return on_selected( (blocking_lock &)this, node ); }
77
78//----------
79struct owner_lock {
80        inline blocking_lock;
81};
82
83static inline void  ?{}( owner_lock & this ) {((blocking_lock &)this){ true, true };}
84static inline void ^?{}( owner_lock & this ) {}
85static inline void   lock     ( owner_lock & this ) { lock    ( (blocking_lock &)this ); }
86static inline bool   try_lock ( owner_lock & this ) { return try_lock( (blocking_lock &)this ); }
87static inline void   unlock   ( owner_lock & this ) { unlock  ( (blocking_lock &)this ); }
88static inline size_t on_wait  ( owner_lock & this ) { return on_wait ( (blocking_lock &)this ); }
89static inline void   on_wakeup( owner_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
90static inline void   on_notify( owner_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
91static inline bool   register_select( owner_lock & this, select_node & node ) { return register_select( (blocking_lock &)this, node ); }
92static inline bool   unregister_select( owner_lock & this, select_node & node ) { return unregister_select( (blocking_lock &)this, node ); }
93static inline bool   on_selected( owner_lock & this, select_node & node ) { return on_selected( (blocking_lock &)this, node ); }
94
95//-----------------------------------------------------------------------------
96// MCS Lock
97struct mcs_node {
98        mcs_node * volatile next;
99        single_sem sem;
100};
101
102static inline void ?{}(mcs_node & this) { this.next = 0p; }
103
104static inline mcs_node * volatile & ?`next ( mcs_node * node ) {
105        return node->next;
106}
107
108struct mcs_lock {
109        mcs_queue(mcs_node) queue;
110};
111
112static inline void lock(mcs_lock & l, mcs_node & n) {
113        if(push(l.queue, &n))
114                wait(n.sem);
115}
116
117static inline void unlock(mcs_lock & l, mcs_node & n) {
118        mcs_node * next = advance(l.queue, &n);
119        if(next) post(next->sem);
120}
121
122//-----------------------------------------------------------------------------
123// MCS Spin Lock
124// - No recursive acquisition
125// - Needs to be released by owner
126
127struct mcs_spin_node {
128        mcs_spin_node * volatile next;
129        volatile bool locked;
130};
131
132struct mcs_spin_queue {
133        mcs_spin_node * volatile tail;
134};
135
136static inline void ?{}(mcs_spin_node & this) { this.next = 0p; this.locked = true; }
137
138static inline mcs_spin_node * volatile & ?`next ( mcs_spin_node * node ) {
139        return node->next;
140}
141
142struct mcs_spin_lock {
143        mcs_spin_queue queue;
144};
145
146static inline void lock(mcs_spin_lock & l, mcs_spin_node & n) {
147        mcs_spin_node * prev = __atomic_exchange_n(&l.queue.tail, &n, __ATOMIC_SEQ_CST);
148        n.locked = true;
149        if(prev == 0p) return;
150        prev->next = &n;
151        while(__atomic_load_n(&n.locked, __ATOMIC_RELAXED)) Pause();
152}
153
154static inline void unlock(mcs_spin_lock & l, mcs_spin_node & n) {
155        mcs_spin_node * n_ptr = &n;
156        if (__atomic_compare_exchange_n(&l.queue.tail, &n_ptr, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) return;
157        while (__atomic_load_n(&n.next, __ATOMIC_RELAXED) == 0p) {}
158        n.next->locked = false;
159}
160
161//-----------------------------------------------------------------------------
162// futex_mutex
163
164// - Kernel thd blocking alternative to the spinlock
165// - No ownership (will deadlock on reacq)
166struct futex_mutex {
167        // lock state any state other than UNLOCKED is locked
168        // enum LockState { UNLOCKED = 0, UNCONTENDED = 1, CONTENDED = 2 };
169       
170        // stores a lock state
171        int val;
172};
173
174// to use for FUTEX_WAKE and FUTEX_WAIT (other futex calls will need more params)
175static inline int futex(int *uaddr, int futex_op, int val) {
176    return syscall(SYS_futex, uaddr, futex_op, val, NULL, NULL, 0);
177}
178
179static inline void  ?{}( futex_mutex & this ) with(this) { val = 0; }
180
181static inline bool internal_try_lock(futex_mutex & this, int & compare_val) with(this) {
182        return __atomic_compare_exchange_n((int*)&val, (int*)&compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
183}
184
185static inline int internal_exchange(futex_mutex & this) with(this) {
186        return __atomic_exchange_n((int*)&val, 2, __ATOMIC_ACQUIRE);
187}
188
189// if this is called recursively IT WILL DEADLOCK!!!!!
190static inline void lock( futex_mutex & this ) with(this) {
191        int state;
192
193        for( int spin = 4; spin < 1024; spin += spin) {
194                state = 0;
195                // if unlocked, lock and return
196                if (internal_try_lock(this, state)) return;
197                if (2 == state) break;
198                for (int i = 0; i < spin; i++) Pause();
199        }
200       
201        // if not in contended state, set to be in contended state
202        if (state != 2) state = internal_exchange(this);
203
204        // block and spin until we win the lock
205        while (state != 0) {
206                futex((int*)&val, FUTEX_WAIT, 2); // if val is not 2 this returns with EWOULDBLOCK
207                state = internal_exchange(this);
208        }
209}
210
211static inline void unlock(futex_mutex & this) with(this) {
212        // if uncontended do atomic unlock and then return
213    if (__atomic_exchange_n(&val, 0, __ATOMIC_RELEASE) == 1) return;
214       
215        // otherwise threads are blocked so we must wake one
216        futex((int *)&val, FUTEX_WAKE, 1);
217}
218
219static inline void on_notify( futex_mutex & f, thread$ * t){ unpark(t); }
220static inline size_t on_wait( futex_mutex & f ) { unlock(f); park(); return 0; }
221
222// to set recursion count after getting signalled;
223static inline void on_wakeup( futex_mutex & f, size_t recursion ) {}
224
225//-----------------------------------------------------------------------------
226// go_mutex
227
228// - Kernel thd blocking alternative to the spinlock
229// - No ownership (will deadlock on reacq)
230// - Golang's flavour of mutex
231// - Impl taken from Golang: src/runtime/lock_futex.go
232struct go_mutex {
233        // lock state any state other than UNLOCKED is locked
234        // enum LockState { UNLOCKED = 0, LOCKED = 1, SLEEPING = 2 };
235       
236        // stores a lock state
237        int val;
238};
239
240static inline void  ?{}( go_mutex & this ) with(this) { val = 0; }
241
242static inline bool internal_try_lock(go_mutex & this, int & compare_val, int new_val ) with(this) {
243        return __atomic_compare_exchange_n((int*)&val, (int*)&compare_val, new_val, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
244}
245
246static inline int internal_exchange(go_mutex & this, int swap ) with(this) {
247        return __atomic_exchange_n((int*)&val, swap, __ATOMIC_ACQUIRE);
248}
249
250// if this is called recursively IT WILL DEADLOCK!!!!!
251static inline void lock( go_mutex & this ) with( this ) {
252        int state, init_state;
253
254    // speculative grab
255    state = internal_exchange(this, 1);
256    if ( !state ) return; // state == 0
257    init_state = state;
258    for (;;) {
259        for( int i = 0; i < 4; i++ ) {
260            while( !val ) { // lock unlocked
261                state = 0;
262                if ( internal_try_lock( this, state, init_state ) ) return;
263            }
264            for (int i = 0; i < 30; i++) Pause();
265        }
266
267        while( !val ) { // lock unlocked
268            state = 0;
269            if ( internal_try_lock( this, state, init_state ) ) return;
270        }
271        sched_yield();
272       
273        // if not in contended state, set to be in contended state
274        state = internal_exchange( this, 2 );
275        if ( !state ) return; // state == 0
276        init_state = 2;
277        futex( (int*)&val, FUTEX_WAIT, 2 ); // if val is not 2 this returns with EWOULDBLOCK
278    }
279}
280
281static inline void unlock( go_mutex & this ) with(this) {
282        // if uncontended do atomic unlock and then return
283    if ( __atomic_exchange_n(&val, 0, __ATOMIC_RELEASE) == 1 ) return;
284       
285        // otherwise threads are blocked so we must wake one
286        futex( (int *)&val, FUTEX_WAKE, 1 );
287}
288
289static inline void on_notify( go_mutex & f, thread$ * t){ unpark( t ); }
290static inline size_t on_wait( go_mutex & f ) { unlock( f ); park(); return 0; }
291static inline void on_wakeup( go_mutex & f, size_t recursion ) {}
292
293//-----------------------------------------------------------------------------
294// Exponential backoff then block lock
295struct exp_backoff_then_block_lock {
296        // Spin lock used for mutual exclusion
297        __spinlock_t spinlock;
298
299        // List of blocked threads
300        dlist( thread$ ) blocked_threads;
301
302        // Used for comparing and exchanging
303        volatile size_t lock_value;
304};
305
306static inline void  ?{}( exp_backoff_then_block_lock & this ) {
307        this.spinlock{};
308        this.blocked_threads{};
309        this.lock_value = 0;
310}
311
312static inline void  ^?{}( exp_backoff_then_block_lock & this ){}
313
314static inline bool internal_try_lock( exp_backoff_then_block_lock & this, size_t & compare_val ) with(this) {
315        return __atomic_compare_exchange_n(&lock_value, &compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
316}
317
318static inline bool try_lock( exp_backoff_then_block_lock & this ) { size_t compare_val = 0; return internal_try_lock( this, compare_val ); }
319
320static inline bool try_lock_contention( exp_backoff_then_block_lock & this ) with(this) {
321        return !__atomic_exchange_n( &lock_value, 2, __ATOMIC_ACQUIRE );
322}
323
324static inline bool block( exp_backoff_then_block_lock & this ) with(this) {
325    lock( spinlock __cfaabi_dbg_ctx2 );
326    if (__atomic_load_n( &lock_value, __ATOMIC_SEQ_CST) != 2) {
327        unlock( spinlock );
328        return true;
329    }
330    insert_last( blocked_threads, *active_thread() );
331    unlock( spinlock );
332        park( );
333        return true;
334}
335
336static inline void lock( exp_backoff_then_block_lock & this ) with(this) {
337        size_t compare_val = 0;
338        int spin = 4;
339
340        // linear backoff
341        for( ;; ) {
342                compare_val = 0;
343                if (internal_try_lock(this, compare_val)) return;
344                if (2 == compare_val) break;
345                for (int i = 0; i < spin; i++) Pause();
346                if (spin >= 1024) break;
347                spin += spin;
348        }
349
350        if(2 != compare_val && try_lock_contention(this)) return;
351        // block until signalled
352        while (block(this)) if(try_lock_contention(this)) return;
353}
354
355static inline void unlock( exp_backoff_then_block_lock & this ) with(this) {
356    if (__atomic_exchange_n(&lock_value, 0, __ATOMIC_RELEASE) == 1) return;
357    lock( spinlock __cfaabi_dbg_ctx2 );
358    thread$ * t = &try_pop_front( blocked_threads );
359    unlock( spinlock );
360    unpark( t );
361}
362
363static inline void on_notify( exp_backoff_then_block_lock & this, struct thread$ * t ) { unpark( t ); }
364static inline size_t on_wait( exp_backoff_then_block_lock & this ) { unlock( this ); park(); return 0; }
365static inline void on_wakeup( exp_backoff_then_block_lock & this, size_t recursion ) { lock( this ); }
366
367//-----------------------------------------------------------------------------
368// Fast Block Lock
369
370// minimal blocking lock
371// - No reacquire for cond var
372// - No recursive acquisition
373// - No ownership
374struct fast_block_lock {
375        // List of blocked threads
376        dlist( thread$ ) blocked_threads;
377
378        // Spin lock used for mutual exclusion
379        __spinlock_t lock;
380
381        // flag showing if lock is held
382        bool held:1;
383};
384
385static inline void  ?{}( fast_block_lock & this ) with(this) {
386        lock{};
387        blocked_threads{};
388        held = false;
389}
390static inline void ^?{}( fast_block_lock & this ) {}
391static inline void ?{}( fast_block_lock & this, fast_block_lock this2 ) = void;
392static inline void ?=?( fast_block_lock & this, fast_block_lock this2 ) = void;
393
394// if this is called recursively IT WILL DEADLOCK!!!!!
395static inline void lock( fast_block_lock & this ) with(this) {
396        lock( lock __cfaabi_dbg_ctx2 );
397        if ( held ) {
398                insert_last( blocked_threads, *active_thread() );
399                unlock( lock );
400                park( );
401                return;
402        }
403        held = true;
404        unlock( lock );
405}
406
407static inline void unlock( fast_block_lock & this ) with(this) {
408        lock( lock __cfaabi_dbg_ctx2 );
409        /* paranoid */ verifyf( held != false, "Attempt to release lock %p that isn't held", &this );
410        thread$ * t = &try_pop_front( blocked_threads );
411        held = ( t ? true : false );
412        unpark( t );
413        unlock( lock );
414}
415
416static inline void on_notify( fast_block_lock & this, struct thread$ * t ) with(this) {
417    lock( lock __cfaabi_dbg_ctx2 );
418    insert_last( blocked_threads, *t );
419    unlock( lock );
420}
421static inline size_t on_wait( fast_block_lock & this) { unlock(this); park(); return 0; }
422static inline void on_wakeup( fast_block_lock & this, size_t recursion ) { }
423
424//-----------------------------------------------------------------------------
425// simple_owner_lock
426
427// pthread owner lock
428// - reacquire for cond var
429// - recursive acquisition
430// - ownership
431struct simple_owner_lock {
432        // List of blocked threads
433        dlist( select_node ) blocked_threads;
434
435        // Spin lock used for mutual exclusion
436        __spinlock_t lock;
437
438        // owner showing if lock is held
439        struct thread$ * owner;
440
441        size_t recursion_count;
442};
443
444static inline void  ?{}( simple_owner_lock & this ) with(this) {
445        lock{};
446        blocked_threads{};
447        owner = 0p;
448        recursion_count = 0;
449}
450static inline void ^?{}( simple_owner_lock & this ) {}
451static inline void ?{}( simple_owner_lock & this, simple_owner_lock this2 ) = void;
452static inline void ?=?( simple_owner_lock & this, simple_owner_lock this2 ) = void;
453
454static inline void lock( simple_owner_lock & this ) with(this) {
455        if ( owner == active_thread() ) {
456                recursion_count++;
457                return;
458        }
459        lock( lock __cfaabi_dbg_ctx2 );
460
461        if ( owner != 0p ) {
462        select_node node;
463                insert_last( blocked_threads, node );
464                unlock( lock );
465                park( );
466                return;
467        }
468        owner = active_thread();
469        recursion_count = 1;
470        unlock( lock );
471}
472
473static inline void pop_node( simple_owner_lock & this ) with(this) {
474    __handle_waituntil_OR( blocked_threads );
475    select_node * node = &try_pop_front( blocked_threads );
476    if ( node ) {
477        owner = node->blocked_thread;
478        recursion_count = 1;
479        // if ( !node->clause_status || __make_select_node_available( *node ) ) unpark( node->blocked_thread );
480        wake_one( blocked_threads, *node );
481    } else {
482        owner = 0p;
483        recursion_count = 0;
484    }
485}
486
487static inline void unlock( simple_owner_lock & this ) with(this) {
488        lock( lock __cfaabi_dbg_ctx2 );
489        /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
490        /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );
491        // if recursion count is zero release lock and set new owner if one is waiting
492        recursion_count--;
493        if ( recursion_count == 0 ) {
494                pop_node( this );
495        }
496        unlock( lock );
497}
498
499static inline void on_notify(simple_owner_lock & this, thread$ * t ) with(this) {
500        lock( lock __cfaabi_dbg_ctx2 );
501        // lock held
502        if ( owner != 0p ) {
503                insert_last( blocked_threads, *(select_node *)t->link_node );
504        }
505        // lock not held
506        else {
507                owner = t;
508                recursion_count = 1;
509                unpark( t );
510        }
511        unlock( lock );
512}
513
514static inline size_t on_wait( simple_owner_lock & this ) with(this) {
515        lock( lock __cfaabi_dbg_ctx2 );
516        /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
517        /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );
518
519        size_t ret = recursion_count;
520
521        pop_node( this );
522
523    select_node node;
524    active_thread()->link_node = (void *)&node;
525        unlock( lock );
526    park();
527
528        return ret;
529}
530
531static inline void on_wakeup( simple_owner_lock & this, size_t recursion ) with(this) { recursion_count = recursion; }
532
533// waituntil() support
534static inline bool register_select( simple_owner_lock & this, select_node & node ) with(this) {
535    lock( lock __cfaabi_dbg_ctx2 );
536
537    // check if we can complete operation. If so race to establish winner in special OR case
538    if ( !node.park_counter && ( owner == active_thread() || owner == 0p ) ) {
539        if ( !__make_select_node_available( node ) ) { // we didn't win the race so give up on registering
540           unlock( lock );
541           return false;
542        }
543    }
544
545    if ( owner == active_thread() ) {
546                recursion_count++;
547        if ( node.park_counter ) __make_select_node_available( node );
548        unlock( lock );
549                return true;
550        }
551
552    if ( owner != 0p ) {
553                insert_last( blocked_threads, node );
554                unlock( lock );
555                return false;
556        }
557   
558        owner = active_thread();
559        recursion_count = 1;
560
561    if ( node.park_counter ) __make_select_node_available( node );
562    unlock( lock );
563    return true;
564}
565
566static inline bool unregister_select( simple_owner_lock & this, select_node & node ) with(this) {
567    lock( lock __cfaabi_dbg_ctx2 );
568    if ( node`isListed ) {
569        remove( node );
570        unlock( lock );
571        return false;
572    }
573
574    if ( owner == active_thread() ) {
575        recursion_count--;
576        if ( recursion_count == 0 ) {
577            pop_node( this );
578        }
579    }
580    unlock( lock );
581    return false;
582}
583
584static inline bool on_selected( simple_owner_lock & this, select_node & node ) { return true; }
585
586
587//-----------------------------------------------------------------------------
588// Spin Queue Lock
589
590// - No reacquire for cond var
591// - No recursive acquisition
592// - No ownership
593// - spin lock with no locking/atomics in unlock
594struct spin_queue_lock {
595        // Spin lock used for mutual exclusion
596        mcs_spin_lock lock;
597
598        // flag showing if lock is held
599        volatile bool held;
600};
601
602static inline void  ?{}( spin_queue_lock & this ) with(this) {
603        lock{};
604        held = false;
605}
606static inline void ^?{}( spin_queue_lock & this ) {}
607static inline void ?{}( spin_queue_lock & this, spin_queue_lock this2 ) = void;
608static inline void ?=?( spin_queue_lock & this, spin_queue_lock this2 ) = void;
609
610// if this is called recursively IT WILL DEADLOCK!
611static inline void lock( spin_queue_lock & this ) with(this) {
612        mcs_spin_node node;
613        lock( lock, node );
614        while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
615        __atomic_store_n(&held, true, __ATOMIC_SEQ_CST);
616        unlock( lock, node );
617}
618
619static inline void unlock( spin_queue_lock & this ) with(this) {
620        __atomic_store_n(&held, false, __ATOMIC_RELEASE);
621}
622
623static inline void on_notify( spin_queue_lock & this, struct thread$ * t ) {
624        unpark(t);
625}
626static inline size_t on_wait( spin_queue_lock & this ) { unlock( this ); park(); return 0; }
627static inline void on_wakeup( spin_queue_lock & this, size_t recursion ) { lock( this ); }
628
629
630//-----------------------------------------------------------------------------
631// MCS Block Spin Lock
632
633// - No reacquire for cond var
634// - No recursive acquisition
635// - No ownership
636// - Blocks but first node spins (like spin queue but blocking for not first thd)
637struct mcs_block_spin_lock {
638        // Spin lock used for mutual exclusion
639        mcs_lock lock;
640
641        // flag showing if lock is held
642        volatile bool held;
643};
644
645static inline void  ?{}( mcs_block_spin_lock & this ) with(this) {
646        lock{};
647        held = false;
648}
649static inline void ^?{}( mcs_block_spin_lock & this ) {}
650static inline void ?{}( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;
651static inline void ?=?( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;
652
653// if this is called recursively IT WILL DEADLOCK!!!!!
654static inline void lock( mcs_block_spin_lock & this ) with(this) {
655        mcs_node node;
656        lock( lock, node );
657        while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
658        __atomic_store_n(&held, true, __ATOMIC_SEQ_CST);
659        unlock( lock, node );
660}
661
662static inline void unlock(mcs_block_spin_lock & this) with(this) {
663        __atomic_store_n(&held, false, __ATOMIC_SEQ_CST);
664}
665
666static inline void on_notify( mcs_block_spin_lock & this, struct thread$ * t ) { unpark( t ); }
667static inline size_t on_wait( mcs_block_spin_lock & this) { unlock( this ); park(); return 0; }
668static inline void on_wakeup( mcs_block_spin_lock & this, size_t recursion ) {lock( this ); }
669
670//-----------------------------------------------------------------------------
671// Block Spin Lock
672
673// - No reacquire for cond var
674// - No recursive acquisition
675// - No ownership
676// - Blocks but first node spins (like spin queue but blocking for not first thd)
677struct block_spin_lock {
678        // Spin lock used for mutual exclusion
679        fast_block_lock lock;
680
681        // flag showing if lock is held
682        volatile bool held;
683};
684
685static inline void  ?{}( block_spin_lock & this ) with(this) {
686        lock{};
687        held = false;
688}
689static inline void ^?{}( block_spin_lock & this ) {}
690static inline void ?{}( block_spin_lock & this, block_spin_lock this2 ) = void;
691static inline void ?=?( block_spin_lock & this, block_spin_lock this2 ) = void;
692
693// if this is called recursively IT WILL DEADLOCK!!!!!
694static inline void lock( block_spin_lock & this ) with(this) {
695        lock( lock );
696        while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
697        __atomic_store_n(&held, true, __ATOMIC_RELEASE);
698        unlock( lock );
699}
700
701static inline void unlock( block_spin_lock & this ) with(this) {
702        __atomic_store_n(&held, false, __ATOMIC_RELEASE);
703}
704
705static inline void on_notify( block_spin_lock & this, struct thread$ * t ) with(this.lock) {
706        // first we acquire internal fast_block_lock
707        lock( lock __cfaabi_dbg_ctx2 );
708        if ( held ) { // if internal fast_block_lock is held
709                insert_last( blocked_threads, *t );
710                unlock( lock );
711                return;
712        }
713        // if internal fast_block_lock is not held
714        held = true;
715        unlock( lock );
716
717        unpark(t);
718}
719static inline size_t on_wait( block_spin_lock & this ) { unlock( this ); park(); return 0; }
720static inline void on_wakeup( block_spin_lock & this, size_t recursion ) with(this) {
721        // now we acquire the entire block_spin_lock upon waking up
722        while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
723        __atomic_store_n(&held, true, __ATOMIC_RELEASE);
724        unlock( lock ); // Now we release the internal fast_spin_lock
725}
726
727//-----------------------------------------------------------------------------
728// is_blocking_lock
729forall( L & | sized(L) )
730trait is_blocking_lock {
731        // For synchronization locks to use when acquiring
732        void on_notify( L &, struct thread$ * );
733
734        // For synchronization locks to use when releasing
735        size_t on_wait( L & );
736
737        // to set recursion count after getting signalled;
738        void on_wakeup( L &, size_t recursion );
739};
740
741//-----------------------------------------------------------------------------
742// // info_thread
743// // the info thread is a wrapper around a thread used
744// // to store extra data for use in the condition variable
745forall(L & | is_blocking_lock(L)) {
746        struct info_thread;
747}
748
749//-----------------------------------------------------------------------------
750// Synchronization Locks
751forall(L & | is_blocking_lock(L)) {
752
753        //-----------------------------------------------------------------------------
754        // condition_variable
755
756        // The multi-tool condition variable
757        // - can pass timeouts to wait for either a signal or timeout
758        // - can wait without passing a lock
759        // - can have waiters reacquire different locks while waiting on the same cond var
760        // - has shadow queue
761        // - can be signalled outside of critical sections with no locks held
762        struct condition_variable {
763                // Spin lock used for mutual exclusion
764                __spinlock_t lock;
765
766                // List of blocked threads
767                dlist( info_thread(L) ) blocked_threads;
768
769                // Count of current blocked threads
770                int count;
771        };
772
773
774        void  ?{}( condition_variable(L) & this );
775        void ^?{}( condition_variable(L) & this );
776
777        bool notify_one( condition_variable(L) & this );
778        bool notify_all( condition_variable(L) & this );
779
780        uintptr_t front( condition_variable(L) & this );
781
782        bool empty  ( condition_variable(L) & this );
783        int  counter( condition_variable(L) & this );
784
785        void wait( condition_variable(L) & this );
786        void wait( condition_variable(L) & this, uintptr_t info );
787        bool wait( condition_variable(L) & this, Duration duration );
788        bool wait( condition_variable(L) & this, uintptr_t info, Duration duration );
789
790        void wait( condition_variable(L) & this, L & l );
791        void wait( condition_variable(L) & this, L & l, uintptr_t info );
792        bool wait( condition_variable(L) & this, L & l, Duration duration );
793        bool wait( condition_variable(L) & this, L & l, uintptr_t info, Duration duration );
794
795        //-----------------------------------------------------------------------------
796        // fast_cond_var
797
798        // The trimmed and slim condition variable
799        // - no internal lock so you must hold a lock while using this cond var
800        // - signalling without holding branded lock is UNSAFE!
801        // - only allows usage of one lock, cond var is branded after usage
802
803        struct fast_cond_var {
804                // List of blocked threads
805                dlist( info_thread(L) ) blocked_threads;
806                #ifdef __CFA_DEBUG__
807                L * lock_used;
808                #endif
809        };
810
811        void  ?{}( fast_cond_var(L) & this );
812        void ^?{}( fast_cond_var(L) & this );
813
814        bool notify_one( fast_cond_var(L) & this );
815        bool notify_all( fast_cond_var(L) & this );
816
817        uintptr_t front( fast_cond_var(L) & this );
818        bool empty  ( fast_cond_var(L) & this );
819
820        void wait( fast_cond_var(L) & this, L & l );
821        void wait( fast_cond_var(L) & this, L & l, uintptr_t info );
822
823
824        //-----------------------------------------------------------------------------
825        // pthread_cond_var
826        //
827        // - cond var with minimal footprint
828        // - supports operations needed for phthread cond
829
830        struct pthread_cond_var {
831                dlist( info_thread(L) ) blocked_threads;
832                __spinlock_t lock;
833        };
834
835        void  ?{}( pthread_cond_var(L) & this );
836        void ^?{}( pthread_cond_var(L) & this );
837
838        bool notify_one( pthread_cond_var(L) & this );
839        bool notify_all( pthread_cond_var(L) & this );
840
841        uintptr_t front( pthread_cond_var(L) & this );
842        bool empty ( pthread_cond_var(L) & this );
843
844        void wait( pthread_cond_var(L) & this, L & l );
845        void wait( pthread_cond_var(L) & this, L & l, uintptr_t info );
846        bool wait( pthread_cond_var(L) & this, L & l, timespec t );
847        bool wait( pthread_cond_var(L) & this, L & l, uintptr_t info, timespec t );
848}
Note: See TracBrowser for help on using the repository browser.