source: libcfa/src/concurrency/locks.hfa @ b7763da

ADTast-experimentalenumforall-pointer-decayjacob/cs343-translationnew-ast-unique-exprpthread-emulationqualifiedEnum
Last change on this file since b7763da was b7763da, checked in by caparsons <caparson@…>, 3 years ago

added martin lock and improvement

  • Property mode set to 100644
File size: 14.1 KB
RevLine 
[ab1b971]1//
2// Cforall Version 1.0.0 Copyright (C) 2021 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// locks.hfa -- PUBLIC
8// Runtime locks that used with the runtime thread system.
9//
10// Author           : Colby Alexander Parsons
11// Created On       : Thu Jan 21 19:46:50 2021
12// Last Modified By :
13// Last Modified On :
14// Update Count     :
15//
16
[f4e35326]17#pragma once
18
[848439f]19#include <stdbool.h>
[5a46e09]20#include <stdio.h>
[848439f]21
[ab1b971]22#include "bits/weakso_locks.hfa"
[f4ec5e45]23#include "containers/queueLockFree.hfa"
[82f4063]24#include "containers/list.hfa"
[f4ec5e45]25
[07033ce]26#include "limits.hfa"
[f4ec5e45]27#include "thread.hfa"
[848439f]28
29#include "time_t.hfa"
30#include "time.hfa"
31
[f4ec5e45]32//-----------------------------------------------------------------------------
33// Semaphores
34
35// '0-nary' semaphore
36// Similar to a counting semaphore except the value of one is never reached
37// as a consequence, a V() that would bring the value to 1 *spins* until
38// a P consumes it
39struct Semaphore0nary {
40        __spinlock_t lock; // needed to protect
41        mpsc_queue($thread) queue;
42};
43
44static inline bool P(Semaphore0nary & this, $thread * thrd) {
[82f4063]45        /* paranoid */ verify(!thrd`next);
46        /* paranoid */ verify(!(&(*thrd)`next));
[f4ec5e45]47
48        push(this.queue, thrd);
49        return true;
50}
51
52static inline bool P(Semaphore0nary & this) {
53    $thread * thrd = active_thread();
54    P(this, thrd);
55    park();
56    return true;
57}
58
[198e335]59static inline $thread * V(Semaphore0nary & this, bool doUnpark = true) {
[f4ec5e45]60        $thread * next;
61        lock(this.lock __cfaabi_dbg_ctx2);
62                for (;;) {
63                        next = pop(this.queue);
64                        if (next) break;
65                        Pause();
66                }
67        unlock(this.lock);
68
69        if (doUnpark) unpark(next);
70        return next;
71}
72
73// Wrapper used on top of any sempahore to avoid potential locking
74struct BinaryBenaphore {
75        volatile ssize_t counter;
76};
77
78static inline {
79        void ?{}(BinaryBenaphore & this) { this.counter = 0; }
80        void ?{}(BinaryBenaphore & this, zero_t) { this.counter = 0; }
81        void ?{}(BinaryBenaphore & this, one_t ) { this.counter = 1; }
82
83        // returns true if no blocking needed
[e20eaf4]84        bool P(BinaryBenaphore & this) {
85                return __atomic_fetch_sub(&this.counter, 1, __ATOMIC_SEQ_CST) > 0;
86        }
87
[f4ec5e45]88        bool tryP(BinaryBenaphore & this) {
89                ssize_t c = this.counter;
[07033ce]90                /* paranoid */ verify( c > MIN );
[f4ec5e45]91                return (c >= 1) && __atomic_compare_exchange_n(&this.counter, &c, c-1, false, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED);
92        }
93
94        // returns true if notify needed
95        bool V(BinaryBenaphore & this) {
96                ssize_t c = 0;
97                for () {
[07033ce]98                        /* paranoid */ verify( this.counter < MAX );
[f4ec5e45]99                        if (__atomic_compare_exchange_n(&this.counter, &c, c+1, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
100                                if (c == 0) return true;
101                                /* paranoid */ verify(c < 0);
102                                return false;
103                        } else {
104                                if (c == 1) return true;
105                                /* paranoid */ verify(c < 1);
106                                Pause();
107                        }
108                }
109        }
110}
111
112// Binary Semaphore based on the BinaryBenaphore on top of the 0-nary Semaphore
113struct ThreadBenaphore {
114        BinaryBenaphore ben;
115        Semaphore0nary  sem;
116};
117
118static inline void ?{}(ThreadBenaphore & this) {}
119static inline void ?{}(ThreadBenaphore & this, zero_t) { (this.ben){ 0 }; }
120static inline void ?{}(ThreadBenaphore & this, one_t ) { (this.ben){ 1 }; }
121
[e20eaf4]122static inline bool P(ThreadBenaphore & this)              { return P(this.ben) ? false : P(this.sem); }
[f4ec5e45]123static inline bool tryP(ThreadBenaphore & this)           { return tryP(this.ben); }
124static inline bool P(ThreadBenaphore & this, bool wait)   { return wait ? P(this) : tryP(this); }
125
[198e335]126static inline $thread * V(ThreadBenaphore & this, bool doUnpark = true) {
[e20eaf4]127        if (V(this.ben)) return 0p;
[f4ec5e45]128        return V(this.sem, doUnpark);
129}
130
131//-----------------------------------------------------------------------------
132// Semaphore
133struct semaphore {
134        __spinlock_t lock;
135        int count;
136        __queue_t($thread) waiting;
137};
138
139void  ?{}(semaphore & this, int count = 1);
140void ^?{}(semaphore & this);
141bool   P (semaphore & this);
142bool   V (semaphore & this);
143bool   V (semaphore & this, unsigned count);
[22b7579]144$thread * V (semaphore & this, bool );
[f4ec5e45]145
[ab1b971]146//----------
147struct single_acquisition_lock {
148        inline blocking_lock;
149};
150
151static inline void  ?{}( single_acquisition_lock & this ) {((blocking_lock &)this){ false, false };}
152static inline void ^?{}( single_acquisition_lock & this ) {}
[22b7579]153static inline void   lock     ( single_acquisition_lock & this ) { lock    ( (blocking_lock &)this ); }
154static inline bool   try_lock ( single_acquisition_lock & this ) { return try_lock( (blocking_lock &)this ); }
155static inline void   unlock   ( single_acquisition_lock & this ) { unlock  ( (blocking_lock &)this ); }
156static inline size_t on_wait  ( single_acquisition_lock & this ) { return on_wait ( (blocking_lock &)this ); }
157static inline void   on_wakeup( single_acquisition_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
158static inline void   on_notify( single_acquisition_lock & this, struct $thread * t ) { on_notify( (blocking_lock &)this, t ); }
[ab1b971]159
160//----------
161struct owner_lock {
162        inline blocking_lock;
163};
164
165static inline void  ?{}( owner_lock & this ) {((blocking_lock &)this){ true, true };}
166static inline void ^?{}( owner_lock & this ) {}
[f19497c]167static inline void   lock     ( owner_lock & this ) { lock    ( (blocking_lock &)this ); }
[d27b6be]168static inline bool   try_lock ( owner_lock & this ) { return try_lock( (blocking_lock &)this ); }
[f19497c]169static inline void   unlock   ( owner_lock & this ) { unlock  ( (blocking_lock &)this ); }
[22b7579]170static inline size_t on_wait  ( owner_lock & this ) { return on_wait ( (blocking_lock &)this ); }
171static inline void   on_wakeup( owner_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
[ab1b971]172static inline void   on_notify( owner_lock & this, struct $thread * t ) { on_notify( (blocking_lock &)this, t ); }
173
[f4ec5e45]174struct fast_lock {
175        $thread * volatile owner;
176        ThreadBenaphore sem;
177};
178
[6ba6846]179static inline void ?{}(fast_lock & this) { this.owner = 0p; }
180
[f4ec5e45]181static inline bool $try_lock(fast_lock & this, $thread * thrd) {
182    $thread * exp = 0p;
183    return __atomic_compare_exchange_n(&this.owner, &exp, thrd, false, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED);
184}
185
[22b7579]186static inline void lock( fast_lock & this ) __attribute__((artificial));
[f4ec5e45]187static inline void lock( fast_lock & this ) {
188        $thread * thrd = active_thread();
189        /* paranoid */verify(thrd != this.owner);
190
191        for (;;) {
192                if ($try_lock(this, thrd)) return;
193                P(this.sem);
194        }
195}
196
[22b7579]197static inline bool try_lock( fast_lock & this ) __attribute__((artificial));
198static inline bool try_lock ( fast_lock & this ) {
[f4ec5e45]199        $thread * thrd = active_thread();
200        /* paranoid */ verify(thrd != this.owner);
201        return $try_lock(this, thrd);
202}
203
[22b7579]204static inline $thread * unlock( fast_lock & this ) __attribute__((artificial));
205static inline $thread * unlock( fast_lock & this ) {
[ddd473f]206        /* paranoid */ verify(active_thread() == this.owner);
[f4ec5e45]207
[22b7579]208        // open 'owner' before unlocking anyone
209        // so new and unlocked threads don't park incorrectly.
[c7c178b]210        // This may require additional fencing on ARM.
[22b7579]211        this.owner = 0p;
[f4ec5e45]212
[198e335]213        return V(this.sem);
[f4ec5e45]214}
215
[22b7579]216static inline size_t on_wait( fast_lock & this ) { unlock(this); return 0; }
217static inline void on_wakeup( fast_lock & this, size_t ) { lock(this); }
218static inline void on_notify( fast_lock &, struct $thread * t ) { unpark(t); }
[f4ec5e45]219
220struct mcs_node {
221        mcs_node * volatile next;
222        single_sem sem;
223};
224
[8f5576d5]225static inline void ?{}(mcs_node & this) { this.next = 0p; }
[f4ec5e45]226
227static inline mcs_node * volatile & ?`next ( mcs_node * node ) {
228        return node->next;
229}
230
231struct mcs_lock {
232        mcs_queue(mcs_node) queue;
233};
234
235static inline void lock(mcs_lock & l, mcs_node & n) {
236        if(push(l.queue, &n))
237                wait(n.sem);
238}
239
240static inline void unlock(mcs_lock & l, mcs_node & n) {
241        mcs_node * next = advance(l.queue, &n);
242        if(next) post(next->sem);
243}
244
[5a46e09]245struct linear_backoff_then_block_lock {
246        // Spin lock used for mutual exclusion
247        __spinlock_t spinlock;
248
249        // Current thread owning the lock
250        struct $thread * owner;
251
252        // List of blocked threads
253        dlist( $thread ) blocked_threads;
254
255        // Used for comparing and exchanging
256        volatile size_t lock_value;
257
258        // used for linear backoff spinning
259        int spin_start;
260        int spin_end;
261        int spin_count;
262
263        // after unsuccessful linear backoff yield this many times
264        int yield_count;
265};
266
267static inline void  ?{}( linear_backoff_then_block_lock & this, int spin_start, int spin_end, int spin_count, int yield_count ) {
268        this.spinlock{};
269        this.blocked_threads{};
270        this.lock_value = 0;
271        this.spin_start = spin_start;
272        this.spin_end = spin_end;
273        this.spin_count = spin_count;
274        this.yield_count = yield_count;
275}
[b7763da]276static inline void  ?{}( linear_backoff_then_block_lock & this ) { this{4, 1024, 16, 0};
277printf("lock_ctor: %p\n", &this); }
[5a46e09]278static inline void ^?{}( linear_backoff_then_block_lock & this ) {}
279
280static inline bool internal_try_lock(linear_backoff_then_block_lock & this, size_t & compare_val) with(this) {
281        if (__atomic_compare_exchange_n(&lock_value, &compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
282                owner = active_thread();
283                return true;
284        }
285        return false;
286}
287
288static inline bool try_lock(linear_backoff_then_block_lock & this) { size_t compare_val = 0; return internal_try_lock(this, compare_val); }
289
290static inline bool try_lock_contention(linear_backoff_then_block_lock & this) with(this) {
291        if (__atomic_exchange_n(&lock_value, 2, __ATOMIC_ACQUIRE) == 0) {
292                owner = active_thread();
293                return true;
294        }
295        return false;
296}
297
298static inline bool block(linear_backoff_then_block_lock & this) with(this) {
299        lock( spinlock __cfaabi_dbg_ctx2 );
300        if (lock_value != 2) {
301                unlock( spinlock );
302                return true;
303        }
304        insert_last( blocked_threads, *active_thread() );
305        unlock( spinlock );
306        park( );
307        return true;
308}
309
310static inline bool lock(linear_backoff_then_block_lock & this) with(this) {
311        // if owner just return
312        if (active_thread() == owner) return true;
313        size_t compare_val = 0;
314        int spin = spin_start;
315        // linear backoff
316        for( ;; ) {
317                compare_val = 0;
318                if (internal_try_lock(this, compare_val)) return true;
319                if (2 == compare_val) break;
320                for (int i = 0; i < spin; i++) Pause();
321                if (spin >= spin_end) break;
322                spin += spin;
323        }
324
325        // linear backoff bounded by spin_count
326        spin = spin_start;
327        int spin_counter = 0;
328        int yield_counter = 0;
329        for ( ;; ) {
330                if(try_lock_contention(this)) return true;
331                if(spin_counter < spin_count) {
332                        for (int i = 0; i < spin; i++) Pause();
333                        if (spin < spin_end) spin += spin;
334                        else spin_counter++;
335                } else if (yield_counter < yield_count) {
336                        // after linear backoff yield yield_count times
337                        yield_counter++;
338                        yield();
339                } else { break; }
340        }
341
342        // block until signalled
343        while (block(this)) if(try_lock_contention(this)) return true;
344       
345        // this should never be reached as block(this) always returns true
346        return false;
347}
348
[b7763da]349static inline bool lock_improved(linear_backoff_then_block_lock & this) with(this) {
350        // if owner just return
351        if (active_thread() == owner) return true;
352        size_t compare_val = 0;
353        int spin = spin_start;
354        // linear backoff
355        for( ;; ) {
356                compare_val = 0;
357                if (internal_try_lock(this, compare_val)) return true;
358                if (2 == compare_val) break;
359                for (int i = 0; i < spin; i++) Pause();
360                if (spin >= spin_end) break;
361                spin += spin;
362        }
363
364        // linear backoff bounded by spin_count
365        spin = spin_start;
366        int spin_counter = 0;
367        int yield_counter = 0;
368        for ( ;; ) {
369                compare_val = 0;
370                if(internal_try_lock(this, compare_val)) return true;
371                if (2 == compare_val) break;
372                if(spin_counter < spin_count) {
373                        for (int i = 0; i < spin; i++) Pause();
374                        if (spin < spin_end) spin += spin;
375                        else spin_counter++;
376                } else if (yield_counter < yield_count) {
377                        // after linear backoff yield yield_count times
378                        yield_counter++;
379                        yield();
380                } else { break; }
381        }
382
383        if(2 != compare_val && try_lock_contention(this)) return true;
384        // block until signalled
385        while (block(this)) if(try_lock_contention(this)) return true;
386       
387        // this should never be reached as block(this) always returns true
388        return false;
389}
390
[5a46e09]391static inline void unlock(linear_backoff_then_block_lock & this) with(this) {
392        verify(lock_value > 0);
393    owner = 0p;
394    if (__atomic_exchange_n(&lock_value, 0, __ATOMIC_RELEASE) == 1) return;
395        lock( spinlock __cfaabi_dbg_ctx2 );
396        $thread * t = &try_pop_front( blocked_threads );
397        unlock( spinlock );
398        unpark( t );
399}
400
[dcad80a]401static inline void on_notify(linear_backoff_then_block_lock & this, struct $thread * t ) { unpark(t); }
402static inline size_t on_wait(linear_backoff_then_block_lock & this) { unlock(this); return 0; }
[b7763da]403static inline void on_wakeup(linear_backoff_then_block_lock & this, size_t recursion ) { lock_improved(this); }
[5a46e09]404
[ac5816d]405//-----------------------------------------------------------------------------
406// is_blocking_lock
[fd54fef]407trait is_blocking_lock(L & | sized(L)) {
[ac5816d]408        // For synchronization locks to use when acquiring
409        void on_notify( L &, struct $thread * );
410
411        // For synchronization locks to use when releasing
[22b7579]412        size_t on_wait( L & );
[ac5816d]413
414        // to set recursion count after getting signalled;
[22b7579]415        void on_wakeup( L &, size_t recursion );
[ac5816d]416};
[848439f]417
[ac5816d]418//-----------------------------------------------------------------------------
[82f4063]419// // info_thread
420// // the info thread is a wrapper around a thread used
421// // to store extra data for use in the condition variable
[fd54fef]422forall(L & | is_blocking_lock(L)) {
[ac5816d]423        struct info_thread;
[c131a02]424
[82f4063]425        // // for use by sequence
426        // info_thread(L) *& Back( info_thread(L) * this );
427        // info_thread(L) *& Next( info_thread(L) * this );
[848439f]428}
429
[ac5816d]430//-----------------------------------------------------------------------------
431// Synchronization Locks
[fd54fef]432forall(L & | is_blocking_lock(L)) {
[eeb5023]433        struct condition_variable {
[848439f]434                // Spin lock used for mutual exclusion
435                __spinlock_t lock;
436
437                // List of blocked threads
[82f4063]438                dlist( info_thread(L) ) blocked_threads;
[848439f]439
440                // Count of current blocked threads
441                int count;
442        };
[82f4063]443       
[848439f]444
[ac5816d]445        void  ?{}( condition_variable(L) & this );
[848439f]446        void ^?{}( condition_variable(L) & this );
447
[eeb5023]448        bool notify_one( condition_variable(L) & this );
449        bool notify_all( condition_variable(L) & this );
[848439f]450
[eeb5023]451        uintptr_t front( condition_variable(L) & this );
[848439f]452
[ac5816d]453        bool empty  ( condition_variable(L) & this );
454        int  counter( condition_variable(L) & this );
[848439f]455
[eeb5023]456        void wait( condition_variable(L) & this );
457        void wait( condition_variable(L) & this, uintptr_t info );
[dff1fd1]458        bool wait( condition_variable(L) & this, Duration duration );
459        bool wait( condition_variable(L) & this, uintptr_t info, Duration duration );
[848439f]460
[eeb5023]461        void wait( condition_variable(L) & this, L & l );
462        void wait( condition_variable(L) & this, L & l, uintptr_t info );
[dff1fd1]463        bool wait( condition_variable(L) & this, L & l, Duration duration );
464        bool wait( condition_variable(L) & this, L & l, uintptr_t info, Duration duration );
[f4ec5e45]465}
Note: See TracBrowser for help on using the repository browser.