source: libcfa/src/concurrency/locks.hfa @ bd72c28

ADTast-experimental
Last change on this file since bd72c28 was bd72c28, checked in by caparsons <caparson@…>, 13 months ago

fixed build issue caused by globals

  • Property mode set to 100644
File size: 26.0 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2021 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// locks.hfa -- PUBLIC
8// Runtime locks that used with the runtime thread system.
9//
10// Author           : Colby Alexander Parsons
11// Created On       : Thu Jan 21 19:46:50 2021
12// Last Modified By :
13// Last Modified On :
14// Update Count     :
15//
16
17#pragma once
18
19#include <stdbool.h>
20#include <stdio.h>
21
22#include "bits/weakso_locks.hfa"
23#include "containers/lockfree.hfa"
24#include "containers/list.hfa"
25
26#include "limits.hfa"
27#include "thread.hfa"
28
29#include "time_t.hfa"
30#include "time.hfa"
31
32#include <fstream.hfa>
33
34// futex headers
35#include <linux/futex.h>      /* Definition of FUTEX_* constants */
36#include <sys/syscall.h>      /* Definition of SYS_* constants */
37#include <unistd.h>
38
39// C_TODO: cleanup this and locks.cfa
40// - appropriate separation of interface and impl
41// - clean up unused/unneeded locks
42// - change messy big blocking lock from inheritance to composition to remove need for flags
43
44//-----------------------------------------------------------------------------
45// Semaphore
46struct semaphore {
47        __spinlock_t lock;
48        int count;
49        __queue_t(thread$) waiting;
50};
51
52void  ?{}(semaphore & this, int count = 1);
53void ^?{}(semaphore & this);
54bool   P (semaphore & this);
55bool   V (semaphore & this);
56bool   V (semaphore & this, unsigned count);
57thread$ * V (semaphore & this, bool );
58
59//----------
60struct single_acquisition_lock {
61        inline blocking_lock;
62};
63
64static inline void  ?{}( single_acquisition_lock & this ) {((blocking_lock &)this){ false, false };}
65static inline void ^?{}( single_acquisition_lock & this ) {}
66static inline void   lock     ( single_acquisition_lock & this ) { lock    ( (blocking_lock &)this ); }
67static inline bool   try_lock ( single_acquisition_lock & this ) { return try_lock( (blocking_lock &)this ); }
68static inline void   unlock   ( single_acquisition_lock & this ) { unlock  ( (blocking_lock &)this ); }
69static inline size_t on_wait  ( single_acquisition_lock & this ) { return on_wait ( (blocking_lock &)this ); }
70static inline void   on_wakeup( single_acquisition_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
71static inline void   on_notify( single_acquisition_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
72
73//----------
74struct owner_lock {
75        inline blocking_lock;
76};
77
78static inline void  ?{}( owner_lock & this ) {((blocking_lock &)this){ true, true };}
79static inline void ^?{}( owner_lock & this ) {}
80static inline void   lock     ( owner_lock & this ) { lock    ( (blocking_lock &)this ); }
81static inline bool   try_lock ( owner_lock & this ) { return try_lock( (blocking_lock &)this ); }
82static inline void   unlock   ( owner_lock & this ) { unlock  ( (blocking_lock &)this ); }
83static inline size_t on_wait  ( owner_lock & this ) { return on_wait ( (blocking_lock &)this ); }
84static inline void   on_wakeup( owner_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
85static inline void   on_notify( owner_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
86
87//-----------------------------------------------------------------------------
88// MCS Lock
89struct mcs_node {
90        mcs_node * volatile next;
91        single_sem sem;
92};
93
94static inline void ?{}(mcs_node & this) { this.next = 0p; }
95
96static inline mcs_node * volatile & ?`next ( mcs_node * node ) {
97        return node->next;
98}
99
100struct mcs_lock {
101        mcs_queue(mcs_node) queue;
102};
103
104static inline void lock(mcs_lock & l, mcs_node & n) {
105        if(push(l.queue, &n))
106                wait(n.sem);
107}
108
109static inline void unlock(mcs_lock & l, mcs_node & n) {
110        mcs_node * next = advance(l.queue, &n);
111        if(next) post(next->sem);
112}
113
114//-----------------------------------------------------------------------------
115// MCS Spin Lock
116// - No recursive acquisition
117// - Needs to be released by owner
118
119struct mcs_spin_node {
120        mcs_spin_node * volatile next;
121        volatile bool locked;
122};
123
124struct mcs_spin_queue {
125        mcs_spin_node * volatile tail;
126};
127
128static inline void ?{}(mcs_spin_node & this) { this.next = 0p; this.locked = true; }
129
130static inline mcs_spin_node * volatile & ?`next ( mcs_spin_node * node ) {
131        return node->next;
132}
133
134struct mcs_spin_lock {
135        mcs_spin_queue queue;
136};
137
138static inline void lock(mcs_spin_lock & l, mcs_spin_node & n) {
139        mcs_spin_node * prev = __atomic_exchange_n(&l.queue.tail, &n, __ATOMIC_SEQ_CST);
140        n.locked = true;
141        if(prev == 0p) return;
142        prev->next = &n;
143        while(__atomic_load_n(&n.locked, __ATOMIC_RELAXED)) Pause();
144}
145
146static inline void unlock(mcs_spin_lock & l, mcs_spin_node & n) {
147        mcs_spin_node * n_ptr = &n;
148        if (__atomic_compare_exchange_n(&l.queue.tail, &n_ptr, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) return;
149        while (__atomic_load_n(&n.next, __ATOMIC_RELAXED) == 0p) {}
150        n.next->locked = false;
151}
152
153//-----------------------------------------------------------------------------
154// futex_mutex
155
156// - Kernel thd blocking alternative to the spinlock
157// - No ownership (will deadlock on reacq)
158struct futex_mutex {
159        // lock state any state other than UNLOCKED is locked
160        // enum LockState { UNLOCKED = 0, UNCONTENDED = 1, CONTENDED = 2 };
161       
162        // stores a lock state
163        int val;
164};
165
166// to use for FUTEX_WAKE and FUTEX_WAIT (other futex calls will need more params)
167static inline int futex(int *uaddr, int futex_op, int val) {
168    return syscall(SYS_futex, uaddr, futex_op, val, NULL, NULL, 0);
169}
170
171static inline void  ?{}( futex_mutex & this ) with(this) { val = 0; }
172
173static inline bool internal_try_lock(futex_mutex & this, int & compare_val) with(this) {
174        return __atomic_compare_exchange_n((int*)&val, (int*)&compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
175}
176
177static inline int internal_exchange(futex_mutex & this) with(this) {
178        return __atomic_exchange_n((int*)&val, 2, __ATOMIC_ACQUIRE);
179}
180
181// if this is called recursively IT WILL DEADLOCK!!!!!
182static inline void lock(futex_mutex & this) with(this) {
183        int state;
184
185        for( int spin = 4; spin < 1024; spin += spin) {
186                state = 0;
187                // if unlocked, lock and return
188                if (internal_try_lock(this, state)) return;
189                if (2 == state) break;
190                for (int i = 0; i < spin; i++) Pause();
191        }
192
193        // // no contention try to acquire
194        // if (internal_try_lock(this, state)) return;
195       
196        // if not in contended state, set to be in contended state
197        if (state != 2) state = internal_exchange(this);
198
199        // block and spin until we win the lock
200        while (state != 0) {
201                futex((int*)&val, FUTEX_WAIT, 2); // if val is not 2 this returns with EWOULDBLOCK
202                state = internal_exchange(this);
203        }
204}
205
206static inline void unlock(futex_mutex & this) with(this) {
207        // if uncontended do atomic unlock and then return
208    if (__atomic_exchange_n(&val, 0, __ATOMIC_RELEASE) == 1) return;
209       
210        // otherwise threads are blocked so we must wake one
211        futex((int *)&val, FUTEX_WAKE, 1);
212}
213
214static inline void on_notify( futex_mutex & f, thread$ * t){ unpark(t); }
215static inline size_t on_wait( futex_mutex & f ) {unlock(f); return 0;}
216
217// to set recursion count after getting signalled;
218static inline void on_wakeup( futex_mutex & f, size_t recursion ) {}
219
220//-----------------------------------------------------------------------------
221// go_mutex
222
223// - Kernel thd blocking alternative to the spinlock
224// - No ownership (will deadlock on reacq)
225// - Golang's flavour of mutex
226// - Impl taken from Golang: src/runtime/lock_futex.go
227struct go_mutex {
228        // lock state any state other than UNLOCKED is locked
229        // enum LockState { UNLOCKED = 0, LOCKED = 1, SLEEPING = 2 };
230       
231        // stores a lock state
232        int val;
233};
234
235static inline void  ?{}( go_mutex & this ) with(this) { val = 0; }
236
237static inline bool internal_try_lock(go_mutex & this, int & compare_val, int new_val ) with(this) {
238        return __atomic_compare_exchange_n((int*)&val, (int*)&compare_val, new_val, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
239}
240
241static inline int internal_exchange(go_mutex & this, int swap ) with(this) {
242        return __atomic_exchange_n((int*)&val, swap, __ATOMIC_ACQUIRE);
243}
244
245// if this is called recursively IT WILL DEADLOCK!!!!!
246static inline void lock(go_mutex & this) with(this) {
247        int state, init_state;
248
249    // speculative grab
250    state = internal_exchange(this, 1);
251    if ( !state ) return; // state == 0
252    init_state = state;
253    for (;;) {
254        for( int i = 0; i < 4; i++ ) {
255            while( !val ) { // lock unlocked
256                state = 0;
257                if (internal_try_lock(this, state, init_state)) return;
258            }
259            for (int i = 0; i < 30; i++) Pause();
260        }
261
262        while( !val ) { // lock unlocked
263            state = 0;
264            if (internal_try_lock(this, state, init_state)) return;
265        }
266        sched_yield();
267       
268        // if not in contended state, set to be in contended state
269        state = internal_exchange(this, 2);
270        if ( !state ) return; // state == 0
271        init_state = 2;
272        futex((int*)&val, FUTEX_WAIT, 2); // if val is not 2 this returns with EWOULDBLOCK
273    }
274}
275
276static inline void unlock( go_mutex & this ) with(this) {
277        // if uncontended do atomic unlock and then return
278    if (__atomic_exchange_n(&val, 0, __ATOMIC_RELEASE) == 1) return;
279       
280        // otherwise threads are blocked so we must wake one
281        futex((int *)&val, FUTEX_WAKE, 1);
282}
283
284static inline void on_notify( go_mutex & f, thread$ * t){ unpark(t); }
285static inline size_t on_wait( go_mutex & f ) {unlock(f); return 0;}
286static inline void on_wakeup( go_mutex & f, size_t recursion ) {}
287
288//-----------------------------------------------------------------------------
289// CLH Spinlock
290// - No recursive acquisition
291// - Needs to be released by owner
292
293struct clh_lock {
294        volatile bool * volatile tail;
295    volatile bool * volatile head;
296};
297
298static inline void  ?{}( clh_lock & this ) { this.tail = malloc(); *this.tail = true; }
299static inline void ^?{}( clh_lock & this ) { free(this.tail); }
300
301static inline void lock(clh_lock & l) {
302        thread$ * curr_thd = active_thread();
303        *(curr_thd->clh_node) = false;
304        volatile bool * prev = __atomic_exchange_n((bool **)(&l.tail), (bool *)(curr_thd->clh_node), __ATOMIC_SEQ_CST);
305        while(!__atomic_load_n(prev, __ATOMIC_SEQ_CST)) Pause();
306    __atomic_store_n((bool **)(&l.head), (bool *)curr_thd->clh_node, __ATOMIC_SEQ_CST);
307    curr_thd->clh_node = prev;
308}
309
310static inline void unlock(clh_lock & l) {
311        __atomic_store_n((bool *)(l.head), true, __ATOMIC_SEQ_CST);
312}
313
314static inline void on_notify(clh_lock & this, struct thread$ * t ) { unpark(t); }
315static inline size_t on_wait(clh_lock & this) { unlock(this); return 0; }
316static inline void on_wakeup(clh_lock & this, size_t recursion ) { lock(this); }
317
318//-----------------------------------------------------------------------------
319// Exponential backoff then block lock
320struct exp_backoff_then_block_lock {
321        // Spin lock used for mutual exclusion
322        __spinlock_t spinlock;
323
324        // List of blocked threads
325        dlist( thread$ ) blocked_threads;
326
327        // Used for comparing and exchanging
328        volatile size_t lock_value;
329};
330
331static inline void  ?{}( exp_backoff_then_block_lock & this ) {
332        this.spinlock{};
333        this.blocked_threads{};
334        this.lock_value = 0;
335}
336
337static inline void  ^?{}( exp_backoff_then_block_lock & this ){}
338
339static inline bool internal_try_lock(exp_backoff_then_block_lock & this, size_t & compare_val) with(this) {
340        return __atomic_compare_exchange_n(&lock_value, &compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
341}
342
343static inline bool try_lock(exp_backoff_then_block_lock & this) { size_t compare_val = 0; return internal_try_lock(this, compare_val); }
344
345static inline bool try_lock_contention(exp_backoff_then_block_lock & this) with(this) {
346        return !__atomic_exchange_n(&lock_value, 2, __ATOMIC_ACQUIRE);
347}
348
349static inline bool block(exp_backoff_then_block_lock & this) with(this) {
350    lock( spinlock __cfaabi_dbg_ctx2 );
351    if (__atomic_load_n( &lock_value, __ATOMIC_SEQ_CST) != 2) {
352        unlock( spinlock );
353        return true;
354    }
355    insert_last( blocked_threads, *active_thread() );
356    unlock( spinlock );
357        park( );
358        return true;
359}
360
361static inline void lock(exp_backoff_then_block_lock & this) with(this) {
362        size_t compare_val = 0;
363        int spin = 4;
364
365        // linear backoff
366        for( ;; ) {
367                compare_val = 0;
368                if (internal_try_lock(this, compare_val)) return;
369                if (2 == compare_val) break;
370                for (int i = 0; i < spin; i++) Pause();
371                if (spin >= 1024) break;
372                spin += spin;
373        }
374
375        if(2 != compare_val && try_lock_contention(this)) return;
376        // block until signalled
377        while (block(this)) if(try_lock_contention(this)) return;
378}
379
380static inline void unlock(exp_backoff_then_block_lock & this) with(this) {
381    if (__atomic_exchange_n(&lock_value, 0, __ATOMIC_RELEASE) == 1) return;
382    lock( spinlock __cfaabi_dbg_ctx2 );
383    thread$ * t = &try_pop_front( blocked_threads );
384    unlock( spinlock );
385    unpark( t );
386}
387
388static inline void on_notify(exp_backoff_then_block_lock & this, struct thread$ * t ) { unpark(t); }
389static inline size_t on_wait(exp_backoff_then_block_lock & this) { unlock(this); return 0; }
390static inline void on_wakeup(exp_backoff_then_block_lock & this, size_t recursion ) { lock(this); }
391
392//-----------------------------------------------------------------------------
393// Fast Block Lock
394
395// minimal blocking lock
396// - No reacquire for cond var
397// - No recursive acquisition
398// - No ownership
399struct fast_block_lock {
400        // List of blocked threads
401        dlist( thread$ ) blocked_threads;
402
403        // Spin lock used for mutual exclusion
404        __spinlock_t lock;
405
406        // flag showing if lock is held
407        bool held:1;
408};
409
410static inline void  ?{}( fast_block_lock & this ) with(this) {
411        lock{};
412        blocked_threads{};
413        held = false;
414}
415static inline void ^?{}( fast_block_lock & this ) {}
416static inline void ?{}( fast_block_lock & this, fast_block_lock this2 ) = void;
417static inline void ?=?( fast_block_lock & this, fast_block_lock this2 ) = void;
418
419// if this is called recursively IT WILL DEADLOCK!!!!!
420static inline void lock(fast_block_lock & this) with(this) {
421        lock( lock __cfaabi_dbg_ctx2 );
422        if ( held ) {
423                insert_last( blocked_threads, *active_thread() );
424                unlock( lock );
425                park( );
426                return;
427        }
428        held = true;
429        unlock( lock );
430}
431
432static inline void unlock(fast_block_lock & this) with(this) {
433        lock( lock __cfaabi_dbg_ctx2 );
434        /* paranoid */ verifyf( held != false, "Attempt to release lock %p that isn't held", &this );
435        thread$ * t = &try_pop_front( blocked_threads );
436        held = ( t ? true : false );
437        unpark( t );
438        unlock( lock );
439}
440
441static inline void on_notify(fast_block_lock & this, struct thread$ * t ) with(this) {
442    lock( lock __cfaabi_dbg_ctx2 );
443    insert_last( blocked_threads, *t );
444    unlock( lock );
445}
446static inline size_t on_wait(fast_block_lock & this) { unlock(this); return 0; }
447static inline void on_wakeup(fast_block_lock & this, size_t recursion ) { }
448
449//-----------------------------------------------------------------------------
450// simple_owner_lock
451
452// pthread owner lock
453// - reacquire for cond var
454// - recursive acquisition
455// - ownership
456struct simple_owner_lock {
457        // List of blocked threads
458        dlist( thread$ ) blocked_threads;
459
460        // Spin lock used for mutual exclusion
461        __spinlock_t lock;
462
463        // owner showing if lock is held
464        struct thread$ * owner;
465
466        size_t recursion_count;
467};
468
469static inline void  ?{}( simple_owner_lock & this ) with(this) {
470        lock{};
471        blocked_threads{};
472        owner = 0p;
473        recursion_count = 0;
474}
475static inline void ^?{}( simple_owner_lock & this ) {}
476static inline void ?{}( simple_owner_lock & this, simple_owner_lock this2 ) = void;
477static inline void ?=?( simple_owner_lock & this, simple_owner_lock this2 ) = void;
478
479static inline void lock(simple_owner_lock & this) with(this) {
480        if (owner == active_thread()) {
481                recursion_count++;
482                return;
483        }
484        lock( lock __cfaabi_dbg_ctx2 );
485
486        if (owner != 0p) {
487                insert_last( blocked_threads, *active_thread() );
488                unlock( lock );
489                park( );
490                return;
491        }
492        owner = active_thread();
493        recursion_count = 1;
494        unlock( lock );
495}
496
497// TODO: fix duplicate def issue and bring this back
498// void pop_and_set_new_owner( simple_owner_lock & this ) with( this ) {
499        // thread$ * t = &try_pop_front( blocked_threads );
500        // owner = t;
501        // recursion_count = ( t ? 1 : 0 );
502        // unpark( t );
503// }
504
505static inline void unlock(simple_owner_lock & this) with(this) {
506        lock( lock __cfaabi_dbg_ctx2 );
507        /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
508        /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );
509        // if recursion count is zero release lock and set new owner if one is waiting
510        recursion_count--;
511        if ( recursion_count == 0 ) {
512                // pop_and_set_new_owner( this );
513                thread$ * t = &try_pop_front( blocked_threads );
514                owner = t;
515                recursion_count = ( t ? 1 : 0 );
516                unpark( t );
517        }
518        unlock( lock );
519}
520
521static inline void on_notify(simple_owner_lock & this, struct thread$ * t ) with(this) {
522        lock( lock __cfaabi_dbg_ctx2 );
523        // lock held
524        if ( owner != 0p ) {
525                insert_last( blocked_threads, *t );
526        }
527        // lock not held
528        else {
529                owner = t;
530                recursion_count = 1;
531                unpark( t );
532        }
533        unlock( lock );
534}
535
536static inline size_t on_wait(simple_owner_lock & this) with(this) {
537        lock( lock __cfaabi_dbg_ctx2 );
538        /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
539        /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );
540
541        size_t ret = recursion_count;
542
543        // pop_and_set_new_owner( this );
544
545        thread$ * t = &try_pop_front( blocked_threads );
546        owner = t;
547        recursion_count = ( t ? 1 : 0 );
548        unpark( t );
549
550        unlock( lock );
551        return ret;
552}
553
554static inline void on_wakeup(simple_owner_lock & this, size_t recursion ) with(this) { recursion_count = recursion; }
555
556//-----------------------------------------------------------------------------
557// Spin Queue Lock
558
559// - No reacquire for cond var
560// - No recursive acquisition
561// - No ownership
562// - spin lock with no locking/atomics in unlock
563struct spin_queue_lock {
564        // Spin lock used for mutual exclusion
565        mcs_spin_lock lock;
566
567        // flag showing if lock is held
568        volatile bool held;
569};
570
571static inline void  ?{}( spin_queue_lock & this ) with(this) {
572        lock{};
573        held = false;
574}
575static inline void ^?{}( spin_queue_lock & this ) {}
576static inline void ?{}( spin_queue_lock & this, spin_queue_lock this2 ) = void;
577static inline void ?=?( spin_queue_lock & this, spin_queue_lock this2 ) = void;
578
579// if this is called recursively IT WILL DEADLOCK!
580static inline void lock(spin_queue_lock & this) with(this) {
581        mcs_spin_node node;
582        lock( lock, node );
583        while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
584        __atomic_store_n(&held, true, __ATOMIC_SEQ_CST);
585        unlock( lock, node );
586}
587
588static inline void unlock(spin_queue_lock & this) with(this) {
589        __atomic_store_n(&held, false, __ATOMIC_RELEASE);
590}
591
592static inline void on_notify(spin_queue_lock & this, struct thread$ * t ) {
593        unpark(t);
594}
595static inline size_t on_wait(spin_queue_lock & this) { unlock(this); return 0; }
596static inline void on_wakeup(spin_queue_lock & this, size_t recursion ) { lock(this); }
597
598
599//-----------------------------------------------------------------------------
600// MCS Block Spin Lock
601
602// - No reacquire for cond var
603// - No recursive acquisition
604// - No ownership
605// - Blocks but first node spins (like spin queue but blocking for not first thd)
606struct mcs_block_spin_lock {
607        // Spin lock used for mutual exclusion
608        mcs_lock lock;
609
610        // flag showing if lock is held
611        volatile bool held;
612};
613
614static inline void  ?{}( mcs_block_spin_lock & this ) with(this) {
615        lock{};
616        held = false;
617}
618static inline void ^?{}( mcs_block_spin_lock & this ) {}
619static inline void ?{}( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;
620static inline void ?=?( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;
621
622// if this is called recursively IT WILL DEADLOCK!!!!!
623static inline void lock(mcs_block_spin_lock & this) with(this) {
624        mcs_node node;
625        lock( lock, node );
626        while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
627        __atomic_store_n(&held, true, __ATOMIC_SEQ_CST);
628        unlock( lock, node );
629}
630
631static inline void unlock(mcs_block_spin_lock & this) with(this) {
632        __atomic_store_n(&held, false, __ATOMIC_SEQ_CST);
633}
634
635static inline void on_notify(mcs_block_spin_lock & this, struct thread$ * t ) { unpark(t); }
636static inline size_t on_wait(mcs_block_spin_lock & this) { unlock(this); return 0; }
637static inline void on_wakeup(mcs_block_spin_lock & this, size_t recursion ) {lock(this); }
638
639//-----------------------------------------------------------------------------
640// Block Spin Lock
641
642// - No reacquire for cond var
643// - No recursive acquisition
644// - No ownership
645// - Blocks but first node spins (like spin queue but blocking for not first thd)
646struct block_spin_lock {
647        // Spin lock used for mutual exclusion
648        fast_block_lock lock;
649
650        // flag showing if lock is held
651        volatile bool held;
652};
653
654static inline void  ?{}( block_spin_lock & this ) with(this) {
655        lock{};
656        held = false;
657}
658static inline void ^?{}( block_spin_lock & this ) {}
659static inline void ?{}( block_spin_lock & this, block_spin_lock this2 ) = void;
660static inline void ?=?( block_spin_lock & this, block_spin_lock this2 ) = void;
661
662// if this is called recursively IT WILL DEADLOCK!!!!!
663static inline void lock(block_spin_lock & this) with(this) {
664        lock( lock );
665        while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
666        __atomic_store_n(&held, true, __ATOMIC_RELEASE);
667        unlock( lock );
668}
669
670static inline void unlock(block_spin_lock & this) with(this) {
671        __atomic_store_n(&held, false, __ATOMIC_RELEASE);
672}
673
674static inline void on_notify(block_spin_lock & this, struct thread$ * t ) with(this.lock) {
675        // first we acquire internal fast_block_lock
676        lock( lock __cfaabi_dbg_ctx2 );
677        if ( held ) { // if internal fast_block_lock is held
678                insert_last( blocked_threads, *t );
679                unlock( lock );
680                return;
681        }
682        // if internal fast_block_lock is not held
683        held = true;
684        unlock( lock );
685
686        unpark(t);
687}
688static inline size_t on_wait(block_spin_lock & this) { unlock(this); return 0; }
689static inline void on_wakeup(block_spin_lock & this, size_t recursion ) with(this) {
690        // now we acquire the entire block_spin_lock upon waking up
691        while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
692        __atomic_store_n(&held, true, __ATOMIC_RELEASE);
693        unlock( lock ); // Now we release the internal fast_spin_lock
694}
695
696//-----------------------------------------------------------------------------
697// is_blocking_lock
698forall( L & | sized(L) )
699trait is_blocking_lock {
700        // For synchronization locks to use when acquiring
701        void on_notify( L &, struct thread$ * );
702
703        // For synchronization locks to use when releasing
704        size_t on_wait( L & );
705
706        // to set recursion count after getting signalled;
707        void on_wakeup( L &, size_t recursion );
708};
709
710//-----------------------------------------------------------------------------
711// // info_thread
712// // the info thread is a wrapper around a thread used
713// // to store extra data for use in the condition variable
714forall(L & | is_blocking_lock(L)) {
715        struct info_thread;
716
717        // // for use by sequence
718        // info_thread(L) *& Back( info_thread(L) * this );
719        // info_thread(L) *& Next( info_thread(L) * this );
720}
721
722//-----------------------------------------------------------------------------
723// Synchronization Locks
724forall(L & | is_blocking_lock(L)) {
725
726        //-----------------------------------------------------------------------------
727        // condition_variable
728
729        // The multi-tool condition variable
730        // - can pass timeouts to wait for either a signal or timeout
731        // - can wait without passing a lock
732        // - can have waiters reacquire different locks while waiting on the same cond var
733        // - has shadow queue
734        // - can be signalled outside of critical sections with no locks held
735        struct condition_variable {
736                // Spin lock used for mutual exclusion
737                __spinlock_t lock;
738
739                // List of blocked threads
740                dlist( info_thread(L) ) blocked_threads;
741
742                // Count of current blocked threads
743                int count;
744        };
745
746
747        void  ?{}( condition_variable(L) & this );
748        void ^?{}( condition_variable(L) & this );
749
750        bool notify_one( condition_variable(L) & this );
751        bool notify_all( condition_variable(L) & this );
752
753        uintptr_t front( condition_variable(L) & this );
754
755        bool empty  ( condition_variable(L) & this );
756        int  counter( condition_variable(L) & this );
757
758        void wait( condition_variable(L) & this );
759        void wait( condition_variable(L) & this, uintptr_t info );
760        bool wait( condition_variable(L) & this, Duration duration );
761        bool wait( condition_variable(L) & this, uintptr_t info, Duration duration );
762
763        void wait( condition_variable(L) & this, L & l );
764        void wait( condition_variable(L) & this, L & l, uintptr_t info );
765        bool wait( condition_variable(L) & this, L & l, Duration duration );
766        bool wait( condition_variable(L) & this, L & l, uintptr_t info, Duration duration );
767
768        //-----------------------------------------------------------------------------
769        // fast_cond_var
770
771        // The trimmed and slim condition variable
772        // - no internal lock so you must hold a lock while using this cond var
773        // - signalling without holding branded lock is UNSAFE!
774        // - only allows usage of one lock, cond var is branded after usage
775
776        struct fast_cond_var {
777                // List of blocked threads
778                dlist( info_thread(L) ) blocked_threads;
779                #ifdef __CFA_DEBUG__
780                L * lock_used;
781                #endif
782        };
783
784        void  ?{}( fast_cond_var(L) & this );
785        void ^?{}( fast_cond_var(L) & this );
786
787        bool notify_one( fast_cond_var(L) & this );
788        bool notify_all( fast_cond_var(L) & this );
789
790        uintptr_t front( fast_cond_var(L) & this );
791        bool empty  ( fast_cond_var(L) & this );
792
793        void wait( fast_cond_var(L) & this, L & l );
794        void wait( fast_cond_var(L) & this, L & l, uintptr_t info );
795
796
797        //-----------------------------------------------------------------------------
798        // pthread_cond_var
799        //
800        // - cond var with minimal footprint
801        // - supports operations needed for phthread cond
802
803        struct pthread_cond_var {
804                dlist( info_thread(L) ) blocked_threads;
805                __spinlock_t lock;
806        };
807
808        void  ?{}( pthread_cond_var(L) & this );
809        void ^?{}( pthread_cond_var(L) & this );
810
811        bool notify_one( pthread_cond_var(L) & this );
812        bool notify_all( pthread_cond_var(L) & this );
813
814        uintptr_t front( pthread_cond_var(L) & this );
815        bool empty ( pthread_cond_var(L) & this );
816
817        void wait( pthread_cond_var(L) & this, L & l );
818        void wait( pthread_cond_var(L) & this, L & l, uintptr_t info );
819        bool wait( pthread_cond_var(L) & this, L & l, timespec t );
820        bool wait( pthread_cond_var(L) & this, L & l, uintptr_t info, timespec t );
821}
Note: See TracBrowser for help on using the repository browser.