source: libcfa/src/concurrency/locks.hfa @ d02e547

ADTast-experimentalenumforall-pointer-decayjacob/cs343-translationnew-ast-unique-exprpthread-emulationqualifiedEnum
Last change on this file since d02e547 was 6ba6846, checked in by Thierry Delisle <tdelisle@…>, 3 years ago

Minor ready-queue fixes

  • Property mode set to 100644
File size: 9.4 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2021 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// locks.hfa -- PUBLIC
8// Runtime locks that used with the runtime thread system.
9//
10// Author           : Colby Alexander Parsons
11// Created On       : Thu Jan 21 19:46:50 2021
12// Last Modified By :
13// Last Modified On :
14// Update Count     :
15//
16
17#pragma once
18
19#include <stdbool.h>
20
21#include "bits/weakso_locks.hfa"
22#include "containers/queueLockFree.hfa"
23#include "limits.hfa"
24#include "thread.hfa"
25
26#include "time_t.hfa"
27#include "time.hfa"
28
29//-----------------------------------------------------------------------------
30// Semaphores
31
32// '0-nary' semaphore
33// Similar to a counting semaphore except the value of one is never reached
34// as a consequence, a V() that would bring the value to 1 *spins* until
35// a P consumes it
36struct Semaphore0nary {
37        __spinlock_t lock; // needed to protect
38        mpsc_queue($thread) queue;
39};
40
41static inline bool P(Semaphore0nary & this, $thread * thrd) {
42        /* paranoid */ verify(!(thrd->seqable.next));
43        /* paranoid */ verify(!(thrd`next));
44
45        push(this.queue, thrd);
46        return true;
47}
48
49static inline bool P(Semaphore0nary & this) {
50    $thread * thrd = active_thread();
51    P(this, thrd);
52    park();
53    return true;
54}
55
56static inline $thread * V(Semaphore0nary & this, bool doUnpark = true) {
57        $thread * next;
58        lock(this.lock __cfaabi_dbg_ctx2);
59                for (;;) {
60                        next = pop(this.queue);
61                        if (next) break;
62                        Pause();
63                }
64        unlock(this.lock);
65
66        if (doUnpark) unpark(next);
67        return next;
68}
69
70// Wrapper used on top of any sempahore to avoid potential locking
71struct BinaryBenaphore {
72        volatile ssize_t counter;
73};
74
75static inline {
76        void ?{}(BinaryBenaphore & this) { this.counter = 0; }
77        void ?{}(BinaryBenaphore & this, zero_t) { this.counter = 0; }
78        void ?{}(BinaryBenaphore & this, one_t ) { this.counter = 1; }
79
80        // returns true if no blocking needed
81        bool P(BinaryBenaphore & this) {
82                return __atomic_fetch_sub(&this.counter, 1, __ATOMIC_SEQ_CST) > 0;
83        }
84
85        bool tryP(BinaryBenaphore & this) {
86                ssize_t c = this.counter;
87                /* paranoid */ verify( c > MIN );
88                return (c >= 1) && __atomic_compare_exchange_n(&this.counter, &c, c-1, false, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED);
89        }
90
91        // returns true if notify needed
92        bool V(BinaryBenaphore & this) {
93                ssize_t c = 0;
94                for () {
95                        /* paranoid */ verify( this.counter < MAX );
96                        if (__atomic_compare_exchange_n(&this.counter, &c, c+1, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
97                                if (c == 0) return true;
98                                /* paranoid */ verify(c < 0);
99                                return false;
100                        } else {
101                                if (c == 1) return true;
102                                /* paranoid */ verify(c < 1);
103                                Pause();
104                        }
105                }
106        }
107}
108
109// Binary Semaphore based on the BinaryBenaphore on top of the 0-nary Semaphore
110struct ThreadBenaphore {
111        BinaryBenaphore ben;
112        Semaphore0nary  sem;
113};
114
115static inline void ?{}(ThreadBenaphore & this) {}
116static inline void ?{}(ThreadBenaphore & this, zero_t) { (this.ben){ 0 }; }
117static inline void ?{}(ThreadBenaphore & this, one_t ) { (this.ben){ 1 }; }
118
119static inline bool P(ThreadBenaphore & this)              { return P(this.ben) ? false : P(this.sem); }
120static inline bool tryP(ThreadBenaphore & this)           { return tryP(this.ben); }
121static inline bool P(ThreadBenaphore & this, bool wait)   { return wait ? P(this) : tryP(this); }
122
123static inline $thread * V(ThreadBenaphore & this, bool doUnpark = true) {
124        if (V(this.ben)) return 0p;
125        return V(this.sem, doUnpark);
126}
127
128//-----------------------------------------------------------------------------
129// Semaphore
130struct semaphore {
131        __spinlock_t lock;
132        int count;
133        __queue_t($thread) waiting;
134};
135
136void  ?{}(semaphore & this, int count = 1);
137void ^?{}(semaphore & this);
138bool   P (semaphore & this);
139bool   V (semaphore & this);
140bool   V (semaphore & this, unsigned count);
141$thread * V (semaphore & this, bool );
142
143//----------
144struct single_acquisition_lock {
145        inline blocking_lock;
146};
147
148static inline void  ?{}( single_acquisition_lock & this ) {((blocking_lock &)this){ false, false };}
149static inline void ^?{}( single_acquisition_lock & this ) {}
150static inline void   lock     ( single_acquisition_lock & this ) { lock    ( (blocking_lock &)this ); }
151static inline bool   try_lock ( single_acquisition_lock & this ) { return try_lock( (blocking_lock &)this ); }
152static inline void   unlock   ( single_acquisition_lock & this ) { unlock  ( (blocking_lock &)this ); }
153static inline size_t on_wait  ( single_acquisition_lock & this ) { return on_wait ( (blocking_lock &)this ); }
154static inline void   on_wakeup( single_acquisition_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
155static inline void   on_notify( single_acquisition_lock & this, struct $thread * t ) { on_notify( (blocking_lock &)this, t ); }
156
157//----------
158struct owner_lock {
159        inline blocking_lock;
160};
161
162static inline void  ?{}( owner_lock & this ) {((blocking_lock &)this){ true, true };}
163static inline void ^?{}( owner_lock & this ) {}
164static inline void   lock     ( owner_lock & this ) { lock    ( (blocking_lock &)this ); }
165static inline bool   try_lock ( owner_lock & this ) { return try_lock( (blocking_lock &)this ); }
166static inline void   unlock   ( owner_lock & this ) { unlock  ( (blocking_lock &)this ); }
167static inline size_t on_wait  ( owner_lock & this ) { return on_wait ( (blocking_lock &)this ); }
168static inline void   on_wakeup( owner_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
169static inline void   on_notify( owner_lock & this, struct $thread * t ) { on_notify( (blocking_lock &)this, t ); }
170
171struct fast_lock {
172        $thread * volatile owner;
173        ThreadBenaphore sem;
174};
175
176static inline void ?{}(fast_lock & this) { this.owner = 0p; }
177
178static inline bool $try_lock(fast_lock & this, $thread * thrd) {
179    $thread * exp = 0p;
180    return __atomic_compare_exchange_n(&this.owner, &exp, thrd, false, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED);
181}
182
183static inline void lock( fast_lock & this ) __attribute__((artificial));
184static inline void lock( fast_lock & this ) {
185        $thread * thrd = active_thread();
186        /* paranoid */verify(thrd != this.owner);
187
188        for (;;) {
189                if ($try_lock(this, thrd)) return;
190                P(this.sem);
191        }
192}
193
194static inline bool try_lock( fast_lock & this ) __attribute__((artificial));
195static inline bool try_lock ( fast_lock & this ) {
196        $thread * thrd = active_thread();
197        /* paranoid */ verify(thrd != this.owner);
198        return $try_lock(this, thrd);
199}
200
201static inline $thread * unlock( fast_lock & this ) __attribute__((artificial));
202static inline $thread * unlock( fast_lock & this ) {
203        /* paranoid */ verify(active_thread() == this.owner);
204
205        // open 'owner' before unlocking anyone
206        // so new and unlocked threads don't park incorrectly.
207        // This may require additional fencing on ARM.
208        this.owner = 0p;
209
210        return V(this.sem);
211}
212
213static inline size_t on_wait( fast_lock & this ) { unlock(this); return 0; }
214static inline void on_wakeup( fast_lock & this, size_t ) { lock(this); }
215static inline void on_notify( fast_lock &, struct $thread * t ) { unpark(t); }
216
217struct mcs_node {
218        mcs_node * volatile next;
219        single_sem sem;
220};
221
222static inline void ?{}(mcs_node & this) { this.next = 0p; }
223
224static inline mcs_node * volatile & ?`next ( mcs_node * node ) {
225        return node->next;
226}
227
228struct mcs_lock {
229        mcs_queue(mcs_node) queue;
230};
231
232static inline void lock(mcs_lock & l, mcs_node & n) {
233        if(push(l.queue, &n))
234                wait(n.sem);
235}
236
237static inline void unlock(mcs_lock & l, mcs_node & n) {
238        mcs_node * next = advance(l.queue, &n);
239        if(next) post(next->sem);
240}
241
242//-----------------------------------------------------------------------------
243// is_blocking_lock
244trait is_blocking_lock(L & | sized(L)) {
245        // For synchronization locks to use when acquiring
246        void on_notify( L &, struct $thread * );
247
248        // For synchronization locks to use when releasing
249        size_t on_wait( L & );
250
251        // to set recursion count after getting signalled;
252        void on_wakeup( L &, size_t recursion );
253};
254
255//-----------------------------------------------------------------------------
256// info_thread
257// the info thread is a wrapper around a thread used
258// to store extra data for use in the condition variable
259forall(L & | is_blocking_lock(L)) {
260        struct info_thread;
261
262        // for use by sequence
263        info_thread(L) *& Back( info_thread(L) * this );
264        info_thread(L) *& Next( info_thread(L) * this );
265}
266
267//-----------------------------------------------------------------------------
268// Synchronization Locks
269forall(L & | is_blocking_lock(L)) {
270        struct condition_variable {
271                // Spin lock used for mutual exclusion
272                __spinlock_t lock;
273
274                // List of blocked threads
275                Sequence( info_thread(L) ) blocked_threads;
276
277                // Count of current blocked threads
278                int count;
279        };
280
281        void  ?{}( condition_variable(L) & this );
282        void ^?{}( condition_variable(L) & this );
283
284        bool notify_one( condition_variable(L) & this );
285        bool notify_all( condition_variable(L) & this );
286
287        uintptr_t front( condition_variable(L) & this );
288
289        bool empty  ( condition_variable(L) & this );
290        int  counter( condition_variable(L) & this );
291
292        void wait( condition_variable(L) & this );
293        void wait( condition_variable(L) & this, uintptr_t info );
294        bool wait( condition_variable(L) & this, Duration duration );
295        bool wait( condition_variable(L) & this, uintptr_t info, Duration duration );
296
297        void wait( condition_variable(L) & this, L & l );
298        void wait( condition_variable(L) & this, L & l, uintptr_t info );
299        bool wait( condition_variable(L) & this, L & l, Duration duration );
300        bool wait( condition_variable(L) & this, L & l, uintptr_t info, Duration duration );
301}
Note: See TracBrowser for help on using the repository browser.