source: libcfa/src/concurrency/locks.hfa @ 60c5b6d

Last change on this file since 60c5b6d was 2ad5e1d5, checked in by caparson <caparson@…>, 13 months ago

added missing semicolons

  • Property mode set to 100644
File size: 28.1 KB
RevLine 
[ab1b971]1//
2// Cforall Version 1.0.0 Copyright (C) 2021 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// locks.hfa -- PUBLIC
8// Runtime locks that used with the runtime thread system.
9//
10// Author           : Colby Alexander Parsons
11// Created On       : Thu Jan 21 19:46:50 2021
12// Last Modified By :
13// Last Modified On :
14// Update Count     :
15//
16
[f4e35326]17#pragma once
18
[848439f]19#include <stdbool.h>
[5a46e09]20#include <stdio.h>
[848439f]21
[ab1b971]22#include "bits/weakso_locks.hfa"
[55b060d]23#include "collections/lockfree.hfa"
24#include "collections/list.hfa"
[f4ec5e45]25
[07033ce]26#include "limits.hfa"
[f4ec5e45]27#include "thread.hfa"
[848439f]28
29#include "time_t.hfa"
30#include "time.hfa"
31
[beeff61e]32#include "select.hfa"
33
[b77f0e1]34// futex headers
35#include <linux/futex.h>      /* Definition of FUTEX_* constants */
36#include <sys/syscall.h>      /* Definition of SYS_* constants */
[1e538fb]37#include <unistd.h>           /* Definition of syscall routine */
[b77f0e1]38
[fece3d9]39typedef void (*__cfa_pre_park)( void * );
40
41static inline void pre_park_noop( void * ) {}
42
43//-----------------------------------------------------------------------------
44// is_blocking_lock
45forall( L & | sized(L) )
46trait is_blocking_lock {
47        // For synchronization locks to use when acquiring
48        void on_notify( L &, struct thread$ * );
49
50        // For synchronization locks to use when releasing
51        size_t on_wait( L &, __cfa_pre_park pp_fn, void * pp_datum );
52
53        // to set recursion count after getting signalled;
54        void on_wakeup( L &, size_t recursion );
55};
56
57static inline void pre_park_then_park( __cfa_pre_park pp_fn, void * pp_datum ) {
58    pp_fn( pp_datum );
59    park();
60}
61
[5a05946]62// macros for default routine impls for is_blocking_lock trait that do not wait-morph
63
64#define DEFAULT_ON_NOTIFY( lock_type ) \
65    static inline void on_notify( lock_type & this, thread$ * t ){ unpark(t); }
66
67#define DEFAULT_ON_WAIT( lock_type ) \
68    static inline size_t on_wait( lock_type & this, __cfa_pre_park pp_fn, void * pp_datum ) { \
69        unlock( this ); \
70        pre_park_then_park( pp_fn, pp_datum ); \
71        return 0; \
72    }
73
74// on_wakeup impl if lock should be reacquired after waking up
75#define DEFAULT_ON_WAKEUP_REACQ( lock_type ) \
76    static inline void on_wakeup( lock_type & this, size_t recursion ) { lock( this ); }
77
78// on_wakeup impl if lock will not be reacquired after waking up
79#define DEFAULT_ON_WAKEUP_NO_REACQ( lock_type ) \
80    static inline void on_wakeup( lock_type & this, size_t recursion ) {}
81
82
83
[f4ec5e45]84//-----------------------------------------------------------------------------
85// Semaphore
86struct semaphore {
87        __spinlock_t lock;
88        int count;
[e84ab3d]89        __queue_t(thread$) waiting;
[f4ec5e45]90};
91
92void  ?{}(semaphore & this, int count = 1);
93void ^?{}(semaphore & this);
94bool   P (semaphore & this);
95bool   V (semaphore & this);
96bool   V (semaphore & this, unsigned count);
[e84ab3d]97thread$ * V (semaphore & this, bool );
[f4ec5e45]98
[ab1b971]99//----------
100struct single_acquisition_lock {
101        inline blocking_lock;
102};
103
104static inline void  ?{}( single_acquisition_lock & this ) {((blocking_lock &)this){ false, false };}
105static inline void ^?{}( single_acquisition_lock & this ) {}
[22b7579]106static inline void   lock     ( single_acquisition_lock & this ) { lock    ( (blocking_lock &)this ); }
107static inline bool   try_lock ( single_acquisition_lock & this ) { return try_lock( (blocking_lock &)this ); }
108static inline void   unlock   ( single_acquisition_lock & this ) { unlock  ( (blocking_lock &)this ); }
[fece3d9]109static inline size_t on_wait  ( single_acquisition_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) { return on_wait ( (blocking_lock &)this, pp_fn, pp_datum ); }
[22b7579]110static inline void   on_wakeup( single_acquisition_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
[e84ab3d]111static inline void   on_notify( single_acquisition_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
[beeff61e]112static inline bool   register_select( single_acquisition_lock & this, select_node & node ) { return register_select( (blocking_lock &)this, node ); }
113static inline bool   unregister_select( single_acquisition_lock & this, select_node & node ) { return unregister_select( (blocking_lock &)this, node ); }
[b93bf85]114static inline bool   on_selected( single_acquisition_lock & this, select_node & node ) { return on_selected( (blocking_lock &)this, node ); }
[bf55f32]115__CFA_SELECT_GET_TYPE( single_acquisition_lock );
[ab1b971]116
117//----------
118struct owner_lock {
119        inline blocking_lock;
120};
121
122static inline void  ?{}( owner_lock & this ) {((blocking_lock &)this){ true, true };}
123static inline void ^?{}( owner_lock & this ) {}
[f19497c]124static inline void   lock     ( owner_lock & this ) { lock    ( (blocking_lock &)this ); }
[d27b6be]125static inline bool   try_lock ( owner_lock & this ) { return try_lock( (blocking_lock &)this ); }
[f19497c]126static inline void   unlock   ( owner_lock & this ) { unlock  ( (blocking_lock &)this ); }
[fece3d9]127static inline size_t on_wait  ( owner_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) { return on_wait ( (blocking_lock &)this, pp_fn, pp_datum ); }
[22b7579]128static inline void   on_wakeup( owner_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
[e84ab3d]129static inline void   on_notify( owner_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
[beeff61e]130static inline bool   register_select( owner_lock & this, select_node & node ) { return register_select( (blocking_lock &)this, node ); }
131static inline bool   unregister_select( owner_lock & this, select_node & node ) { return unregister_select( (blocking_lock &)this, node ); }
[b93bf85]132static inline bool   on_selected( owner_lock & this, select_node & node ) { return on_selected( (blocking_lock &)this, node ); }
[bf55f32]133__CFA_SELECT_GET_TYPE( owner_lock );
[ab1b971]134
[7f958c4]135//-----------------------------------------------------------------------------
136// MCS Lock
[f4ec5e45]137struct mcs_node {
138        mcs_node * volatile next;
139        single_sem sem;
140};
141
[7a2c6b18]142static inline void ?{}( mcs_node & this ) { this.next = 0p; }
[f4ec5e45]143
144static inline mcs_node * volatile & ?`next ( mcs_node * node ) {
145        return node->next;
146}
147
148struct mcs_lock {
149        mcs_queue(mcs_node) queue;
150};
151
[7a2c6b18]152static inline void lock( mcs_lock & l, mcs_node & n ) {
[f4ec5e45]153        if(push(l.queue, &n))
154                wait(n.sem);
155}
156
157static inline void unlock(mcs_lock & l, mcs_node & n) {
158        mcs_node * next = advance(l.queue, &n);
159        if(next) post(next->sem);
160}
161
[f835806]162//-----------------------------------------------------------------------------
163// MCS Spin Lock
164// - No recursive acquisition
165// - Needs to be released by owner
166
167struct mcs_spin_node {
168        mcs_spin_node * volatile next;
[db7a3ad]169        volatile bool locked;
[f835806]170};
171
172struct mcs_spin_queue {
173        mcs_spin_node * volatile tail;
174};
175
[7a2c6b18]176static inline void ?{}( mcs_spin_node & this ) { this.next = 0p; this.locked = true; }
[f835806]177
178struct mcs_spin_lock {
179        mcs_spin_queue queue;
180};
181
[7a2c6b18]182static inline void lock( mcs_spin_lock & l, mcs_spin_node & n ) {
[5ece8ce]183    n.locked = true;
[8df19af]184
185        #if defined(__ARM_ARCH)
[2ad5e1d5]186        __asm__ __volatile__ ( "DMB ISH" ::: );
[8df19af]187        #endif
188
[f835806]189        mcs_spin_node * prev = __atomic_exchange_n(&l.queue.tail, &n, __ATOMIC_SEQ_CST);
[5ece8ce]190        if( prev == 0p ) return;
[9e3d123]191        prev->next = &n;
[8df19af]192       
193        #if defined(__ARM_ARCH)
[2ad5e1d5]194        __asm__ __volatile__ ( "DMB ISH" ::: );
[8df19af]195        #endif
196
[5ece8ce]197        while( __atomic_load_n(&n.locked, __ATOMIC_RELAXED) ) Pause();
[8df19af]198
199        #if defined(__ARM_ARCH)
[2ad5e1d5]200        __asm__ __volatile__ ( "DMB ISH" ::: );
[8df19af]201        #endif
[f835806]202}
203
204static inline void unlock(mcs_spin_lock & l, mcs_spin_node & n) {
[8df19af]205        #if defined(__ARM_ARCH)
[2ad5e1d5]206        __asm__ __volatile__ ( "DMB ISH" ::: );
[8df19af]207        #endif
208
[f835806]209        mcs_spin_node * n_ptr = &n;
[9e3d123]210        if (__atomic_compare_exchange_n(&l.queue.tail, &n_ptr, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) return;
[5ece8ce]211        while (__atomic_load_n(&n.next, __ATOMIC_RELAXED) == 0p) Pause();
[8df19af]212
213        #if defined(__ARM_ARCH)
[2ad5e1d5]214        __asm__ __volatile__ ( "DMB ISH" ::: );
[8df19af]215        #endif
216
[9e3d123]217        n.next->locked = false;
[f835806]218}
219
[b77f0e1]220//-----------------------------------------------------------------------------
221// futex_mutex
222
223// - Kernel thd blocking alternative to the spinlock
224// - No ownership (will deadlock on reacq)
[5a05946]225// - no reacq on wakeup
[b77f0e1]226struct futex_mutex {
227        // lock state any state other than UNLOCKED is locked
228        // enum LockState { UNLOCKED = 0, UNCONTENDED = 1, CONTENDED = 2 };
229       
230        // stores a lock state
231        int val;
232};
233
234// to use for FUTEX_WAKE and FUTEX_WAIT (other futex calls will need more params)
[7d9598d8]235static inline int futex(int *uaddr, int futex_op, int val) {
[b77f0e1]236    return syscall(SYS_futex, uaddr, futex_op, val, NULL, NULL, 0);
237}
238
[5a05946]239static inline void ?{}( futex_mutex & this ) with(this) { val = 0; }
[b77f0e1]240
[5a05946]241static inline bool internal_try_lock( futex_mutex & this, int & compare_val) with(this) {
[b77f0e1]242        return __atomic_compare_exchange_n((int*)&val, (int*)&compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
243}
244
[5a05946]245static inline int internal_exchange( futex_mutex & this ) with(this) {
[b77f0e1]246        return __atomic_exchange_n((int*)&val, 2, __ATOMIC_ACQUIRE);
247}
248
249// if this is called recursively IT WILL DEADLOCK!!!!!
[beeff61e]250static inline void lock( futex_mutex & this ) with(this) {
[b77f0e1]251        int state;
252
[a45e21c]253        for( int spin = 4; spin < 1024; spin += spin) {
254                state = 0;
255                // if unlocked, lock and return
256                if (internal_try_lock(this, state)) return;
257                if (2 == state) break;
258                for (int i = 0; i < spin; i++) Pause();
259        }
[b77f0e1]260       
261        // if not in contended state, set to be in contended state
262        if (state != 2) state = internal_exchange(this);
263
264        // block and spin until we win the lock
265        while (state != 0) {
266                futex((int*)&val, FUTEX_WAIT, 2); // if val is not 2 this returns with EWOULDBLOCK
267                state = internal_exchange(this);
268        }
269}
270
271static inline void unlock(futex_mutex & this) with(this) {
[a45e21c]272        // if uncontended do atomic unlock and then return
273    if (__atomic_exchange_n(&val, 0, __ATOMIC_RELEASE) == 1) return;
[b77f0e1]274       
275        // otherwise threads are blocked so we must wake one
276        futex((int *)&val, FUTEX_WAKE, 1);
277}
278
[5a05946]279DEFAULT_ON_NOTIFY( futex_mutex )
280DEFAULT_ON_WAIT( futex_mutex )
281DEFAULT_ON_WAKEUP_NO_REACQ( futex_mutex )
[b77f0e1]282
[a45e21c]283//-----------------------------------------------------------------------------
284// go_mutex
285
286// - Kernel thd blocking alternative to the spinlock
287// - No ownership (will deadlock on reacq)
288// - Golang's flavour of mutex
289// - Impl taken from Golang: src/runtime/lock_futex.go
290struct go_mutex {
291        // lock state any state other than UNLOCKED is locked
292        // enum LockState { UNLOCKED = 0, LOCKED = 1, SLEEPING = 2 };
293       
294        // stores a lock state
295        int val;
296};
297static inline void  ?{}( go_mutex & this ) with(this) { val = 0; }
[7a2c6b18]298static inline void ?{}( go_mutex & this, go_mutex this2 ) = void;
299static inline void ?=?( go_mutex & this, go_mutex this2 ) = void;
[a45e21c]300
301static inline bool internal_try_lock(go_mutex & this, int & compare_val, int new_val ) with(this) {
302        return __atomic_compare_exchange_n((int*)&val, (int*)&compare_val, new_val, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
303}
304
305static inline int internal_exchange(go_mutex & this, int swap ) with(this) {
306        return __atomic_exchange_n((int*)&val, swap, __ATOMIC_ACQUIRE);
307}
308
309// if this is called recursively IT WILL DEADLOCK!!!!!
[beeff61e]310static inline void lock( go_mutex & this ) with( this ) {
[a45e21c]311        int state, init_state;
312
313    // speculative grab
314    state = internal_exchange(this, 1);
315    if ( !state ) return; // state == 0
316    init_state = state;
317    for (;;) {
[bd72c28]318        for( int i = 0; i < 4; i++ ) {
[a45e21c]319            while( !val ) { // lock unlocked
320                state = 0;
[beeff61e]321                if ( internal_try_lock( this, state, init_state ) ) return;
[a45e21c]322            }
[bd72c28]323            for (int i = 0; i < 30; i++) Pause();
[a45e21c]324        }
325
326        while( !val ) { // lock unlocked
327            state = 0;
[beeff61e]328            if ( internal_try_lock( this, state, init_state ) ) return;
[a45e21c]329        }
330        sched_yield();
331       
332        // if not in contended state, set to be in contended state
[beeff61e]333        state = internal_exchange( this, 2 );
[a45e21c]334        if ( !state ) return; // state == 0
335        init_state = 2;
[beeff61e]336        futex( (int*)&val, FUTEX_WAIT, 2 ); // if val is not 2 this returns with EWOULDBLOCK
[a45e21c]337    }
338}
339
340static inline void unlock( go_mutex & this ) with(this) {
341        // if uncontended do atomic unlock and then return
[beeff61e]342    if ( __atomic_exchange_n(&val, 0, __ATOMIC_RELEASE) == 1 ) return;
[a45e21c]343       
344        // otherwise threads are blocked so we must wake one
[beeff61e]345        futex( (int *)&val, FUTEX_WAKE, 1 );
[a45e21c]346}
347
[5a05946]348DEFAULT_ON_NOTIFY( go_mutex )
349DEFAULT_ON_WAIT( go_mutex )
350DEFAULT_ON_WAKEUP_NO_REACQ( go_mutex )
[a45e21c]351
[7f958c4]352//-----------------------------------------------------------------------------
[0cee082]353// Exponential backoff then block lock
354struct exp_backoff_then_block_lock {
[5a46e09]355        // Spin lock used for mutual exclusion
356        __spinlock_t spinlock;
357
358        // List of blocked threads
[e84ab3d]359        dlist( thread$ ) blocked_threads;
[5a46e09]360
361        // Used for comparing and exchanging
362        volatile size_t lock_value;
363};
364
[0cee082]365static inline void  ?{}( exp_backoff_then_block_lock & this ) {
[5a46e09]366        this.spinlock{};
367        this.blocked_threads{};
368        this.lock_value = 0;
369}
[5a05946]370static inline void ?{}( exp_backoff_then_block_lock & this, exp_backoff_then_block_lock this2 ) = void;
371static inline void ?=?( exp_backoff_then_block_lock & this, exp_backoff_then_block_lock this2 ) = void;
[5a46e09]372
[a45e21c]373static inline void  ^?{}( exp_backoff_then_block_lock & this ){}
374
[beeff61e]375static inline bool internal_try_lock( exp_backoff_then_block_lock & this, size_t & compare_val ) with(this) {
[d30e3eb]376        return __atomic_compare_exchange_n(&lock_value, &compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
[5a46e09]377}
378
[beeff61e]379static inline bool try_lock( exp_backoff_then_block_lock & this ) { size_t compare_val = 0; return internal_try_lock( this, compare_val ); }
[5a46e09]380
[beeff61e]381static inline bool try_lock_contention( exp_backoff_then_block_lock & this ) with(this) {
382        return !__atomic_exchange_n( &lock_value, 2, __ATOMIC_ACQUIRE );
[5a46e09]383}
384
[beeff61e]385static inline bool block( exp_backoff_then_block_lock & this ) with(this) {
[d30e3eb]386    lock( spinlock __cfaabi_dbg_ctx2 );
387    if (__atomic_load_n( &lock_value, __ATOMIC_SEQ_CST) != 2) {
388        unlock( spinlock );
389        return true;
390    }
391    insert_last( blocked_threads, *active_thread() );
392    unlock( spinlock );
[5a46e09]393        park( );
394        return true;
395}
396
[beeff61e]397static inline void lock( exp_backoff_then_block_lock & this ) with(this) {
[5a46e09]398        size_t compare_val = 0;
[b77f0e1]399        int spin = 4;
[d30e3eb]400
[5a46e09]401        // linear backoff
402        for( ;; ) {
403                compare_val = 0;
[0d4f954]404                if (internal_try_lock(this, compare_val)) return;
[5a46e09]405                if (2 == compare_val) break;
406                for (int i = 0; i < spin; i++) Pause();
[b77f0e1]407                if (spin >= 1024) break;
[5a46e09]408                spin += spin;
409        }
410
[0d4f954]411        if(2 != compare_val && try_lock_contention(this)) return;
[b7763da]412        // block until signalled
[0d4f954]413        while (block(this)) if(try_lock_contention(this)) return;
[b7763da]414}
415
[beeff61e]416static inline void unlock( exp_backoff_then_block_lock & this ) with(this) {
[5a46e09]417    if (__atomic_exchange_n(&lock_value, 0, __ATOMIC_RELEASE) == 1) return;
[d30e3eb]418    lock( spinlock __cfaabi_dbg_ctx2 );
419    thread$ * t = &try_pop_front( blocked_threads );
420    unlock( spinlock );
421    unpark( t );
[5a46e09]422}
423
[5a05946]424DEFAULT_ON_NOTIFY( exp_backoff_then_block_lock )
425DEFAULT_ON_WAIT( exp_backoff_then_block_lock )
426DEFAULT_ON_WAKEUP_REACQ( exp_backoff_then_block_lock )
[5a46e09]427
[7f958c4]428//-----------------------------------------------------------------------------
429// Fast Block Lock
430
[f835806]431// minimal blocking lock
[7f958c4]432// - No reacquire for cond var
433// - No recursive acquisition
434// - No ownership
435struct fast_block_lock {
436        // List of blocked threads
437        dlist( thread$ ) blocked_threads;
438
[f835806]439        // Spin lock used for mutual exclusion
440        __spinlock_t lock;
441
442        // flag showing if lock is held
[7f958c4]443        bool held:1;
444};
445
446static inline void  ?{}( fast_block_lock & this ) with(this) {
447        lock{};
448        blocked_threads{};
449        held = false;
450}
451static inline void ^?{}( fast_block_lock & this ) {}
452static inline void ?{}( fast_block_lock & this, fast_block_lock this2 ) = void;
453static inline void ?=?( fast_block_lock & this, fast_block_lock this2 ) = void;
454
455// if this is called recursively IT WILL DEADLOCK!!!!!
[beeff61e]456static inline void lock( fast_block_lock & this ) with(this) {
[7f958c4]457        lock( lock __cfaabi_dbg_ctx2 );
[b77f0e1]458        if ( held ) {
[7f958c4]459                insert_last( blocked_threads, *active_thread() );
460                unlock( lock );
461                park( );
462                return;
463        }
464        held = true;
465        unlock( lock );
466}
467
[beeff61e]468static inline void unlock( fast_block_lock & this ) with(this) {
[7f958c4]469        lock( lock __cfaabi_dbg_ctx2 );
470        /* paranoid */ verifyf( held != false, "Attempt to release lock %p that isn't held", &this );
471        thread$ * t = &try_pop_front( blocked_threads );
472        held = ( t ? true : false );
473        unpark( t );
474        unlock( lock );
475}
476
[beeff61e]477static inline void on_notify( fast_block_lock & this, struct thread$ * t ) with(this) {
[0cee082]478    lock( lock __cfaabi_dbg_ctx2 );
479    insert_last( blocked_threads, *t );
480    unlock( lock );
[b77f0e1]481}
[5a05946]482DEFAULT_ON_WAIT( fast_block_lock )
483DEFAULT_ON_WAKEUP_NO_REACQ( fast_block_lock )
[7f958c4]484
[f835806]485//-----------------------------------------------------------------------------
486// simple_owner_lock
487
488// pthread owner lock
489// - reacquire for cond var
490// - recursive acquisition
491// - ownership
492struct simple_owner_lock {
493        // List of blocked threads
[beeff61e]494        dlist( select_node ) blocked_threads;
[f835806]495
496        // Spin lock used for mutual exclusion
497        __spinlock_t lock;
498
499        // owner showing if lock is held
500        struct thread$ * owner;
501
502        size_t recursion_count;
503};
504
505static inline void  ?{}( simple_owner_lock & this ) with(this) {
506        lock{};
507        blocked_threads{};
508        owner = 0p;
509        recursion_count = 0;
510}
511static inline void ^?{}( simple_owner_lock & this ) {}
512static inline void ?{}( simple_owner_lock & this, simple_owner_lock this2 ) = void;
513static inline void ?=?( simple_owner_lock & this, simple_owner_lock this2 ) = void;
514
[beeff61e]515static inline void lock( simple_owner_lock & this ) with(this) {
516        if ( owner == active_thread() ) {
[ae06e0b]517                recursion_count++;
518                return;
519        }
520        lock( lock __cfaabi_dbg_ctx2 );
521
[beeff61e]522        if ( owner != 0p ) {
523        select_node node;
524                insert_last( blocked_threads, node );
[ae06e0b]525                unlock( lock );
526                park( );
527                return;
528        }
529        owner = active_thread();
530        recursion_count = 1;
531        unlock( lock );
532}
533
[beeff61e]534static inline void pop_node( simple_owner_lock & this ) with(this) {
535    __handle_waituntil_OR( blocked_threads );
536    select_node * node = &try_pop_front( blocked_threads );
537    if ( node ) {
538        owner = node->blocked_thread;
539        recursion_count = 1;
540        // if ( !node->clause_status || __make_select_node_available( *node ) ) unpark( node->blocked_thread );
541        wake_one( blocked_threads, *node );
542    } else {
543        owner = 0p;
544        recursion_count = 0;
545    }
546}
[ae06e0b]547
[beeff61e]548static inline void unlock( simple_owner_lock & this ) with(this) {
[ae06e0b]549        lock( lock __cfaabi_dbg_ctx2 );
550        /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
551        /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );
552        // if recursion count is zero release lock and set new owner if one is waiting
553        recursion_count--;
554        if ( recursion_count == 0 ) {
[beeff61e]555                pop_node( this );
[ae06e0b]556        }
557        unlock( lock );
558}
559
[fece3d9]560static inline void on_notify( simple_owner_lock & this, thread$ * t ) with(this) {
[ae06e0b]561        lock( lock __cfaabi_dbg_ctx2 );
562        // lock held
563        if ( owner != 0p ) {
[beeff61e]564                insert_last( blocked_threads, *(select_node *)t->link_node );
[ae06e0b]565        }
566        // lock not held
567        else {
568                owner = t;
569                recursion_count = 1;
570                unpark( t );
571        }
[b77f0e1]572        unlock( lock );
[ae06e0b]573}
574
[fece3d9]575static inline size_t on_wait( simple_owner_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) with(this) {
[ae06e0b]576        lock( lock __cfaabi_dbg_ctx2 );
577        /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
578        /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );
579
580        size_t ret = recursion_count;
581
[beeff61e]582        pop_node( this );
[ae06e0b]583
[beeff61e]584    select_node node;
585    active_thread()->link_node = (void *)&node;
[ae06e0b]586        unlock( lock );
[fece3d9]587
588    pre_park_then_park( pp_fn, pp_datum );
[beeff61e]589
[ae06e0b]590        return ret;
591}
592
[beeff61e]593static inline void on_wakeup( simple_owner_lock & this, size_t recursion ) with(this) { recursion_count = recursion; }
594
595// waituntil() support
596static inline bool register_select( simple_owner_lock & this, select_node & node ) with(this) {
597    lock( lock __cfaabi_dbg_ctx2 );
598
599    // check if we can complete operation. If so race to establish winner in special OR case
600    if ( !node.park_counter && ( owner == active_thread() || owner == 0p ) ) {
601        if ( !__make_select_node_available( node ) ) { // we didn't win the race so give up on registering
602           unlock( lock );
603           return false;
604        }
605    }
606
607    if ( owner == active_thread() ) {
608                recursion_count++;
609        if ( node.park_counter ) __make_select_node_available( node );
610        unlock( lock );
611                return true;
612        }
613
614    if ( owner != 0p ) {
615                insert_last( blocked_threads, node );
616                unlock( lock );
617                return false;
618        }
619   
620        owner = active_thread();
621        recursion_count = 1;
622
623    if ( node.park_counter ) __make_select_node_available( node );
624    unlock( lock );
625    return true;
626}
627
628static inline bool unregister_select( simple_owner_lock & this, select_node & node ) with(this) {
629    lock( lock __cfaabi_dbg_ctx2 );
630    if ( node`isListed ) {
631        remove( node );
632        unlock( lock );
633        return false;
634    }
635
636    if ( owner == active_thread() ) {
637        recursion_count--;
638        if ( recursion_count == 0 ) {
639            pop_node( this );
640        }
641    }
642    unlock( lock );
643    return false;
644}
645
[b93bf85]646static inline bool on_selected( simple_owner_lock & this, select_node & node ) { return true; }
[bf55f32]647__CFA_SELECT_GET_TYPE( simple_owner_lock );
[ae06e0b]648
[f835806]649//-----------------------------------------------------------------------------
650// Spin Queue Lock
651
652// - No reacquire for cond var
653// - No recursive acquisition
654// - No ownership
655// - spin lock with no locking/atomics in unlock
656struct spin_queue_lock {
657        // Spin lock used for mutual exclusion
658        mcs_spin_lock lock;
659
660        // flag showing if lock is held
[db7a3ad]661        volatile bool held;
[f835806]662};
663
664static inline void  ?{}( spin_queue_lock & this ) with(this) {
665        lock{};
666        held = false;
667}
668static inline void ^?{}( spin_queue_lock & this ) {}
669static inline void ?{}( spin_queue_lock & this, spin_queue_lock this2 ) = void;
670static inline void ?=?( spin_queue_lock & this, spin_queue_lock this2 ) = void;
671
[378de69]672// if this is called recursively IT WILL DEADLOCK!
[beeff61e]673static inline void lock( spin_queue_lock & this ) with(this) {
[f835806]674        mcs_spin_node node;
675        lock( lock, node );
[df932552]676        while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
677        __atomic_store_n(&held, true, __ATOMIC_SEQ_CST);
[f835806]678        unlock( lock, node );
679}
680
[beeff61e]681static inline void unlock( spin_queue_lock & this ) with(this) {
[2ed32fa7]682        __atomic_store_n(&held, false, __ATOMIC_RELEASE);
[f835806]683}
684
[5a05946]685DEFAULT_ON_NOTIFY( spin_queue_lock )
686DEFAULT_ON_WAIT( spin_queue_lock )
687DEFAULT_ON_WAKEUP_REACQ( spin_queue_lock )
[f835806]688
689//-----------------------------------------------------------------------------
690// MCS Block Spin Lock
691
692// - No reacquire for cond var
693// - No recursive acquisition
694// - No ownership
695// - Blocks but first node spins (like spin queue but blocking for not first thd)
696struct mcs_block_spin_lock {
697        // Spin lock used for mutual exclusion
698        mcs_lock lock;
699
700        // flag showing if lock is held
[db7a3ad]701        volatile bool held;
[f835806]702};
703
704static inline void  ?{}( mcs_block_spin_lock & this ) with(this) {
705        lock{};
706        held = false;
707}
708static inline void ^?{}( mcs_block_spin_lock & this ) {}
709static inline void ?{}( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;
710static inline void ?=?( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;
711
712// if this is called recursively IT WILL DEADLOCK!!!!!
[beeff61e]713static inline void lock( mcs_block_spin_lock & this ) with(this) {
[f835806]714        mcs_node node;
715        lock( lock, node );
[fd365da]716        while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
717        __atomic_store_n(&held, true, __ATOMIC_SEQ_CST);
[f835806]718        unlock( lock, node );
719}
720
721static inline void unlock(mcs_block_spin_lock & this) with(this) {
[fd365da]722        __atomic_store_n(&held, false, __ATOMIC_SEQ_CST);
[f835806]723}
724
[5a05946]725DEFAULT_ON_NOTIFY( mcs_block_spin_lock )
726DEFAULT_ON_WAIT( mcs_block_spin_lock )
727DEFAULT_ON_WAKEUP_REACQ( mcs_block_spin_lock )
[f835806]728
729//-----------------------------------------------------------------------------
730// Block Spin Lock
731
732// - No reacquire for cond var
733// - No recursive acquisition
734// - No ownership
735// - Blocks but first node spins (like spin queue but blocking for not first thd)
736struct block_spin_lock {
737        // Spin lock used for mutual exclusion
738        fast_block_lock lock;
739
740        // flag showing if lock is held
[db7a3ad]741        volatile bool held;
[f835806]742};
743
744static inline void  ?{}( block_spin_lock & this ) with(this) {
745        lock{};
746        held = false;
747}
748static inline void ^?{}( block_spin_lock & this ) {}
749static inline void ?{}( block_spin_lock & this, block_spin_lock this2 ) = void;
750static inline void ?=?( block_spin_lock & this, block_spin_lock this2 ) = void;
751
752// if this is called recursively IT WILL DEADLOCK!!!!!
[beeff61e]753static inline void lock( block_spin_lock & this ) with(this) {
[f835806]754        lock( lock );
[fd365da]755        while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
756        __atomic_store_n(&held, true, __ATOMIC_RELEASE);
[f835806]757        unlock( lock );
758}
759
[beeff61e]760static inline void unlock( block_spin_lock & this ) with(this) {
[fd365da]761        __atomic_store_n(&held, false, __ATOMIC_RELEASE);
[f835806]762}
763
[beeff61e]764static inline void on_notify( block_spin_lock & this, struct thread$ * t ) with(this.lock) {
[b77f0e1]765        // first we acquire internal fast_block_lock
766        lock( lock __cfaabi_dbg_ctx2 );
767        if ( held ) { // if internal fast_block_lock is held
768                insert_last( blocked_threads, *t );
769                unlock( lock );
770                return;
771        }
772        // if internal fast_block_lock is not held
773        held = true;
774        unlock( lock );
775
776        unpark(t);
777}
[5a05946]778DEFAULT_ON_WAIT( block_spin_lock )
[beeff61e]779static inline void on_wakeup( block_spin_lock & this, size_t recursion ) with(this) {
[b77f0e1]780        // now we acquire the entire block_spin_lock upon waking up
781        while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
782        __atomic_store_n(&held, true, __ATOMIC_RELEASE);
783        unlock( lock ); // Now we release the internal fast_spin_lock
784}
[f835806]785
[ac5816d]786//-----------------------------------------------------------------------------
[82f4063]787// // info_thread
788// // the info thread is a wrapper around a thread used
789// // to store extra data for use in the condition variable
[fd54fef]790forall(L & | is_blocking_lock(L)) {
[ac5816d]791        struct info_thread;
[848439f]792}
793
[ac5816d]794//-----------------------------------------------------------------------------
795// Synchronization Locks
[fd54fef]796forall(L & | is_blocking_lock(L)) {
[7f958c4]797
798        //-----------------------------------------------------------------------------
799        // condition_variable
800
801        // The multi-tool condition variable
802        // - can pass timeouts to wait for either a signal or timeout
803        // - can wait without passing a lock
804        // - can have waiters reacquire different locks while waiting on the same cond var
805        // - has shadow queue
806        // - can be signalled outside of critical sections with no locks held
[eeb5023]807        struct condition_variable {
[848439f]808                // Spin lock used for mutual exclusion
809                __spinlock_t lock;
810
811                // List of blocked threads
[82f4063]812                dlist( info_thread(L) ) blocked_threads;
[848439f]813
814                // Count of current blocked threads
815                int count;
816        };
[e84ab3d]817
[848439f]818
[ac5816d]819        void  ?{}( condition_variable(L) & this );
[848439f]820        void ^?{}( condition_variable(L) & this );
821
[eeb5023]822        bool notify_one( condition_variable(L) & this );
823        bool notify_all( condition_variable(L) & this );
[848439f]824
[eeb5023]825        uintptr_t front( condition_variable(L) & this );
[848439f]826
[ac5816d]827        bool empty  ( condition_variable(L) & this );
828        int  counter( condition_variable(L) & this );
[848439f]829
[eeb5023]830        void wait( condition_variable(L) & this );
831        void wait( condition_variable(L) & this, uintptr_t info );
[dff1fd1]832        bool wait( condition_variable(L) & this, Duration duration );
833        bool wait( condition_variable(L) & this, uintptr_t info, Duration duration );
[848439f]834
[eeb5023]835        void wait( condition_variable(L) & this, L & l );
836        void wait( condition_variable(L) & this, L & l, uintptr_t info );
[dff1fd1]837        bool wait( condition_variable(L) & this, L & l, Duration duration );
838        bool wait( condition_variable(L) & this, L & l, uintptr_t info, Duration duration );
[7f958c4]839
840        //-----------------------------------------------------------------------------
841        // fast_cond_var
842
843        // The trimmed and slim condition variable
844        // - no internal lock so you must hold a lock while using this cond var
845        // - signalling without holding branded lock is UNSAFE!
846        // - only allows usage of one lock, cond var is branded after usage
[ae06e0b]847
[7f958c4]848        struct fast_cond_var {
849                // List of blocked threads
850                dlist( info_thread(L) ) blocked_threads;
851                #ifdef __CFA_DEBUG__
852                L * lock_used;
853                #endif
854        };
855
856        void  ?{}( fast_cond_var(L) & this );
857        void ^?{}( fast_cond_var(L) & this );
858
859        bool notify_one( fast_cond_var(L) & this );
860        bool notify_all( fast_cond_var(L) & this );
861
862        uintptr_t front( fast_cond_var(L) & this );
863        bool empty  ( fast_cond_var(L) & this );
864
865        void wait( fast_cond_var(L) & this, L & l );
866        void wait( fast_cond_var(L) & this, L & l, uintptr_t info );
[ae06e0b]867
868
869        //-----------------------------------------------------------------------------
870        // pthread_cond_var
871        //
872        // - cond var with minimal footprint
873        // - supports operations needed for phthread cond
874
875        struct pthread_cond_var {
876                dlist( info_thread(L) ) blocked_threads;
877                __spinlock_t lock;
878        };
879
880        void  ?{}( pthread_cond_var(L) & this );
881        void ^?{}( pthread_cond_var(L) & this );
882
883        bool notify_one( pthread_cond_var(L) & this );
884        bool notify_all( pthread_cond_var(L) & this );
885
886        uintptr_t front( pthread_cond_var(L) & this );
887        bool empty ( pthread_cond_var(L) & this );
888
889        void wait( pthread_cond_var(L) & this, L & l );
890        void wait( pthread_cond_var(L) & this, L & l, uintptr_t info );
891        bool wait( pthread_cond_var(L) & this, L & l, timespec t );
892        bool wait( pthread_cond_var(L) & this, L & l, uintptr_t info, timespec t );
[8a97248]893}
Note: See TracBrowser for help on using the repository browser.