source: libcfa/src/concurrency/locks.hfa @ bd1d279

ADTast-experimentalpthread-emulationqualifiedEnum
Last change on this file since bd1d279 was 7f958c4, checked in by caparsons <caparson@…>, 3 years ago

added fast lock/cond var

  • Property mode set to 100644
File size: 11.4 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2021 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// locks.hfa -- PUBLIC
8// Runtime locks that used with the runtime thread system.
9//
10// Author           : Colby Alexander Parsons
11// Created On       : Thu Jan 21 19:46:50 2021
12// Last Modified By :
13// Last Modified On :
14// Update Count     :
15//
16
17#pragma once
18
19#include <stdbool.h>
20#include <stdio.h>
21
22#include "bits/weakso_locks.hfa"
23#include "containers/queueLockFree.hfa"
24#include "containers/list.hfa"
25
26#include "limits.hfa"
27#include "thread.hfa"
28
29#include "time_t.hfa"
30#include "time.hfa"
31
32//-----------------------------------------------------------------------------
33// Semaphore
34struct semaphore {
35        __spinlock_t lock;
36        int count;
37        __queue_t(thread$) waiting;
38};
39
40void  ?{}(semaphore & this, int count = 1);
41void ^?{}(semaphore & this);
42bool   P (semaphore & this);
43bool   V (semaphore & this);
44bool   V (semaphore & this, unsigned count);
45thread$ * V (semaphore & this, bool );
46
47//----------
48struct single_acquisition_lock {
49        inline blocking_lock;
50};
51
52static inline void  ?{}( single_acquisition_lock & this ) {((blocking_lock &)this){ false, false };}
53static inline void ^?{}( single_acquisition_lock & this ) {}
54static inline void   lock     ( single_acquisition_lock & this ) { lock    ( (blocking_lock &)this ); }
55static inline bool   try_lock ( single_acquisition_lock & this ) { return try_lock( (blocking_lock &)this ); }
56static inline void   unlock   ( single_acquisition_lock & this ) { unlock  ( (blocking_lock &)this ); }
57static inline size_t on_wait  ( single_acquisition_lock & this ) { return on_wait ( (blocking_lock &)this ); }
58static inline void   on_wakeup( single_acquisition_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
59static inline void   on_notify( single_acquisition_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
60
61//----------
62struct owner_lock {
63        inline blocking_lock;
64};
65
66static inline void  ?{}( owner_lock & this ) {((blocking_lock &)this){ true, true };}
67static inline void ^?{}( owner_lock & this ) {}
68static inline void   lock     ( owner_lock & this ) { lock    ( (blocking_lock &)this ); }
69static inline bool   try_lock ( owner_lock & this ) { return try_lock( (blocking_lock &)this ); }
70static inline void   unlock   ( owner_lock & this ) { unlock  ( (blocking_lock &)this ); }
71static inline size_t on_wait  ( owner_lock & this ) { return on_wait ( (blocking_lock &)this ); }
72static inline void   on_wakeup( owner_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
73static inline void   on_notify( owner_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
74
75//-----------------------------------------------------------------------------
76// MCS Lock
77struct mcs_node {
78        mcs_node * volatile next;
79        single_sem sem;
80};
81
82static inline void ?{}(mcs_node & this) { this.next = 0p; }
83
84static inline mcs_node * volatile & ?`next ( mcs_node * node ) {
85        return node->next;
86}
87
88struct mcs_lock {
89        mcs_queue(mcs_node) queue;
90};
91
92static inline void lock(mcs_lock & l, mcs_node & n) {
93        if(push(l.queue, &n))
94                wait(n.sem);
95}
96
97static inline void unlock(mcs_lock & l, mcs_node & n) {
98        mcs_node * next = advance(l.queue, &n);
99        if(next) post(next->sem);
100}
101
102//-----------------------------------------------------------------------------
103// Linear backoff Spinlock
104struct linear_backoff_then_block_lock {
105        // Spin lock used for mutual exclusion
106        __spinlock_t spinlock;
107
108        // Current thread owning the lock
109        struct thread$ * owner;
110
111        // List of blocked threads
112        dlist( thread$ ) blocked_threads;
113
114        // Used for comparing and exchanging
115        volatile size_t lock_value;
116
117        // used for linear backoff spinning
118        int spin_start;
119        int spin_end;
120        int spin_count;
121
122        // after unsuccessful linear backoff yield this many times
123        int yield_count;
124};
125
126static inline void  ?{}( linear_backoff_then_block_lock & this, int spin_start, int spin_end, int spin_count, int yield_count ) {
127        this.spinlock{};
128        this.blocked_threads{};
129        this.lock_value = 0;
130        this.spin_start = spin_start;
131        this.spin_end = spin_end;
132        this.spin_count = spin_count;
133        this.yield_count = yield_count;
134}
135static inline void  ?{}( linear_backoff_then_block_lock & this ) { this{4, 1024, 16, 0}; }
136static inline void ^?{}( linear_backoff_then_block_lock & this ) {}
137static inline void ?{}( linear_backoff_then_block_lock & this, linear_backoff_then_block_lock this2 ) = void;
138static inline void ?=?( linear_backoff_then_block_lock & this, linear_backoff_then_block_lock this2 ) = void;
139
140static inline bool internal_try_lock(linear_backoff_then_block_lock & this, size_t & compare_val) with(this) {
141        if (__atomic_compare_exchange_n(&lock_value, &compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
142                owner = active_thread();
143                return true;
144        }
145        return false;
146}
147
148static inline bool try_lock(linear_backoff_then_block_lock & this) { size_t compare_val = 0; return internal_try_lock(this, compare_val); }
149
150static inline bool try_lock_contention(linear_backoff_then_block_lock & this) with(this) {
151        if (__atomic_exchange_n(&lock_value, 2, __ATOMIC_ACQUIRE) == 0) {
152                owner = active_thread();
153                return true;
154        }
155        return false;
156}
157
158static inline bool block(linear_backoff_then_block_lock & this) with(this) {
159        lock( spinlock __cfaabi_dbg_ctx2 );
160        if (lock_value != 2) {
161                unlock( spinlock );
162                return true;
163        }
164        insert_last( blocked_threads, *active_thread() );
165        unlock( spinlock );
166        park( );
167        return true;
168}
169
170static inline void lock(linear_backoff_then_block_lock & this) with(this) {
171        // if owner just return
172        if (active_thread() == owner) return;
173        size_t compare_val = 0;
174        int spin = spin_start;
175        // linear backoff
176        for( ;; ) {
177                compare_val = 0;
178                if (internal_try_lock(this, compare_val)) return;
179                if (2 == compare_val) break;
180                for (int i = 0; i < spin; i++) Pause();
181                if (spin >= spin_end) break;
182                spin += spin;
183        }
184
185        if(2 != compare_val && try_lock_contention(this)) return;
186        // block until signalled
187        while (block(this)) if(try_lock_contention(this)) return;
188}
189
190static inline void unlock(linear_backoff_then_block_lock & this) with(this) {
191        verify(lock_value > 0);
192    owner = 0p;
193    if (__atomic_exchange_n(&lock_value, 0, __ATOMIC_RELEASE) == 1) return;
194        lock( spinlock __cfaabi_dbg_ctx2 );
195        thread$ * t = &try_pop_front( blocked_threads );
196        unlock( spinlock );
197        unpark( t );
198}
199
200static inline void on_notify(linear_backoff_then_block_lock & this, struct thread$ * t ) { unpark(t); }
201static inline size_t on_wait(linear_backoff_then_block_lock & this) { unlock(this); return 0; }
202static inline void on_wakeup(linear_backoff_then_block_lock & this, size_t recursion ) { lock(this); }
203
204//-----------------------------------------------------------------------------
205// Fast Block Lock
206
207// High efficiency minimal blocking lock
208// - No reacquire for cond var
209// - No recursive acquisition
210// - No ownership
211struct fast_block_lock {
212        // Spin lock used for mutual exclusion
213        __spinlock_t lock;
214
215        // List of blocked threads
216        dlist( thread$ ) blocked_threads;
217
218        bool held:1;
219};
220
221static inline void  ?{}( fast_block_lock & this ) with(this) {
222        lock{};
223        blocked_threads{};
224        held = false;
225}
226static inline void ^?{}( fast_block_lock & this ) {}
227static inline void ?{}( fast_block_lock & this, fast_block_lock this2 ) = void;
228static inline void ?=?( fast_block_lock & this, fast_block_lock this2 ) = void;
229
230// if this is called recursively IT WILL DEADLOCK!!!!!
231static inline void lock(fast_block_lock & this) with(this) {
232        lock( lock __cfaabi_dbg_ctx2 );
233        if (held) {
234                insert_last( blocked_threads, *active_thread() );
235                unlock( lock );
236                park( );
237                return;
238        }
239        held = true;
240        unlock( lock );
241}
242
243static inline void unlock(fast_block_lock & this) with(this) {
244        lock( lock __cfaabi_dbg_ctx2 );
245        /* paranoid */ verifyf( held != false, "Attempt to release lock %p that isn't held", &this );
246        thread$ * t = &try_pop_front( blocked_threads );
247        held = ( t ? true : false );
248        unpark( t );
249        unlock( lock );
250}
251
252static inline void on_notify(fast_block_lock & this, struct thread$ * t ) { unpark(t); }
253static inline size_t on_wait(fast_block_lock & this) { unlock(this); return 0; }
254static inline void on_wakeup(fast_block_lock & this, size_t recursion ) { }
255
256//-----------------------------------------------------------------------------
257// is_blocking_lock
258trait is_blocking_lock(L & | sized(L)) {
259        // For synchronization locks to use when acquiring
260        void on_notify( L &, struct thread$ * );
261
262        // For synchronization locks to use when releasing
263        size_t on_wait( L & );
264
265        // to set recursion count after getting signalled;
266        void on_wakeup( L &, size_t recursion );
267};
268
269//-----------------------------------------------------------------------------
270// // info_thread
271// // the info thread is a wrapper around a thread used
272// // to store extra data for use in the condition variable
273forall(L & | is_blocking_lock(L)) {
274        struct info_thread;
275
276        // // for use by sequence
277        // info_thread(L) *& Back( info_thread(L) * this );
278        // info_thread(L) *& Next( info_thread(L) * this );
279}
280
281//-----------------------------------------------------------------------------
282// Synchronization Locks
283forall(L & | is_blocking_lock(L)) {
284
285        //-----------------------------------------------------------------------------
286        // condition_variable
287
288        // The multi-tool condition variable
289        // - can pass timeouts to wait for either a signal or timeout
290        // - can wait without passing a lock
291        // - can have waiters reacquire different locks while waiting on the same cond var
292        // - has shadow queue
293        // - can be signalled outside of critical sections with no locks held
294        struct condition_variable {
295                // Spin lock used for mutual exclusion
296                __spinlock_t lock;
297
298                // List of blocked threads
299                dlist( info_thread(L) ) blocked_threads;
300
301                // Count of current blocked threads
302                int count;
303        };
304
305
306        void  ?{}( condition_variable(L) & this );
307        void ^?{}( condition_variable(L) & this );
308
309        bool notify_one( condition_variable(L) & this );
310        bool notify_all( condition_variable(L) & this );
311
312        uintptr_t front( condition_variable(L) & this );
313
314        bool empty  ( condition_variable(L) & this );
315        int  counter( condition_variable(L) & this );
316
317        void wait( condition_variable(L) & this );
318        void wait( condition_variable(L) & this, uintptr_t info );
319        bool wait( condition_variable(L) & this, Duration duration );
320        bool wait( condition_variable(L) & this, uintptr_t info, Duration duration );
321
322        void wait( condition_variable(L) & this, L & l );
323        void wait( condition_variable(L) & this, L & l, uintptr_t info );
324        bool wait( condition_variable(L) & this, L & l, Duration duration );
325        bool wait( condition_variable(L) & this, L & l, uintptr_t info, Duration duration );
326
327        //-----------------------------------------------------------------------------
328        // fast_cond_var
329
330        // The trimmed and slim condition variable
331        // - no internal lock so you must hold a lock while using this cond var
332        // - signalling without holding branded lock is UNSAFE!
333        // - only allows usage of one lock, cond var is branded after usage
334        struct fast_cond_var {
335                // List of blocked threads
336                dlist( info_thread(L) ) blocked_threads;
337
338                #ifdef __CFA_DEBUG__
339                L * lock_used;
340                #endif
341        };
342
343
344        void  ?{}( fast_cond_var(L) & this );
345        void ^?{}( fast_cond_var(L) & this );
346
347        bool notify_one( fast_cond_var(L) & this );
348        bool notify_all( fast_cond_var(L) & this );
349
350        uintptr_t front( fast_cond_var(L) & this );
351
352        bool empty  ( fast_cond_var(L) & this );
353
354        void wait( fast_cond_var(L) & this, L & l );
355        void wait( fast_cond_var(L) & this, L & l, uintptr_t info );
356}
Note: See TracBrowser for help on using the repository browser.