source: libcfa/src/concurrency/locks.hfa @ 7e4bd9b6

Last change on this file since 7e4bd9b6 was 1db6d70, checked in by caparsons <caparson@…>, 13 months ago

removed unneeded fstream include from locks.hfa

  • Property mode set to 100644
File size: 27.6 KB
RevLine 
[ab1b971]1//
2// Cforall Version 1.0.0 Copyright (C) 2021 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// locks.hfa -- PUBLIC
8// Runtime locks that used with the runtime thread system.
9//
10// Author           : Colby Alexander Parsons
11// Created On       : Thu Jan 21 19:46:50 2021
12// Last Modified By :
13// Last Modified On :
14// Update Count     :
15//
16
[f4e35326]17#pragma once
18
[848439f]19#include <stdbool.h>
[5a46e09]20#include <stdio.h>
[848439f]21
[ab1b971]22#include "bits/weakso_locks.hfa"
[88ac843e]23#include "containers/lockfree.hfa"
[82f4063]24#include "containers/list.hfa"
[f4ec5e45]25
[07033ce]26#include "limits.hfa"
[f4ec5e45]27#include "thread.hfa"
[848439f]28
29#include "time_t.hfa"
30#include "time.hfa"
31
[beeff61e]32#include "select.hfa"
33
[b77f0e1]34// futex headers
35#include <linux/futex.h>      /* Definition of FUTEX_* constants */
36#include <sys/syscall.h>      /* Definition of SYS_* constants */
37#include <unistd.h>
38
[fece3d9]39typedef void (*__cfa_pre_park)( void * );
40
41static inline void pre_park_noop( void * ) {}
42
43//-----------------------------------------------------------------------------
44// is_blocking_lock
45forall( L & | sized(L) )
46trait is_blocking_lock {
47        // For synchronization locks to use when acquiring
48        void on_notify( L &, struct thread$ * );
49
50        // For synchronization locks to use when releasing
51        size_t on_wait( L &, __cfa_pre_park pp_fn, void * pp_datum );
52
53        // to set recursion count after getting signalled;
54        void on_wakeup( L &, size_t recursion );
55};
56
57static inline void pre_park_then_park( __cfa_pre_park pp_fn, void * pp_datum ) {
58    pp_fn( pp_datum );
59    park();
60}
61
[5a05946]62// macros for default routine impls for is_blocking_lock trait that do not wait-morph
63
64#define DEFAULT_ON_NOTIFY( lock_type ) \
65    static inline void on_notify( lock_type & this, thread$ * t ){ unpark(t); }
66
67#define DEFAULT_ON_WAIT( lock_type ) \
68    static inline size_t on_wait( lock_type & this, __cfa_pre_park pp_fn, void * pp_datum ) { \
69        unlock( this ); \
70        pre_park_then_park( pp_fn, pp_datum ); \
71        return 0; \
72    }
73
74// on_wakeup impl if lock should be reacquired after waking up
75#define DEFAULT_ON_WAKEUP_REACQ( lock_type ) \
76    static inline void on_wakeup( lock_type & this, size_t recursion ) { lock( this ); }
77
78// on_wakeup impl if lock will not be reacquired after waking up
79#define DEFAULT_ON_WAKEUP_NO_REACQ( lock_type ) \
80    static inline void on_wakeup( lock_type & this, size_t recursion ) {}
81
82
83
[f4ec5e45]84//-----------------------------------------------------------------------------
85// Semaphore
86struct semaphore {
87        __spinlock_t lock;
88        int count;
[e84ab3d]89        __queue_t(thread$) waiting;
[f4ec5e45]90};
91
92void  ?{}(semaphore & this, int count = 1);
93void ^?{}(semaphore & this);
94bool   P (semaphore & this);
95bool   V (semaphore & this);
96bool   V (semaphore & this, unsigned count);
[e84ab3d]97thread$ * V (semaphore & this, bool );
[f4ec5e45]98
[ab1b971]99//----------
100struct single_acquisition_lock {
101        inline blocking_lock;
102};
103
104static inline void  ?{}( single_acquisition_lock & this ) {((blocking_lock &)this){ false, false };}
105static inline void ^?{}( single_acquisition_lock & this ) {}
[22b7579]106static inline void   lock     ( single_acquisition_lock & this ) { lock    ( (blocking_lock &)this ); }
107static inline bool   try_lock ( single_acquisition_lock & this ) { return try_lock( (blocking_lock &)this ); }
108static inline void   unlock   ( single_acquisition_lock & this ) { unlock  ( (blocking_lock &)this ); }
[fece3d9]109static inline size_t on_wait  ( single_acquisition_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) { return on_wait ( (blocking_lock &)this, pp_fn, pp_datum ); }
[22b7579]110static inline void   on_wakeup( single_acquisition_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
[e84ab3d]111static inline void   on_notify( single_acquisition_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
[beeff61e]112static inline bool   register_select( single_acquisition_lock & this, select_node & node ) { return register_select( (blocking_lock &)this, node ); }
113static inline bool   unregister_select( single_acquisition_lock & this, select_node & node ) { return unregister_select( (blocking_lock &)this, node ); }
[70a4ed5]114static inline void   on_selected( single_acquisition_lock & this, select_node & node ) { on_selected( (blocking_lock &)this, node ); }
[ab1b971]115
116//----------
117struct owner_lock {
118        inline blocking_lock;
119};
120
121static inline void  ?{}( owner_lock & this ) {((blocking_lock &)this){ true, true };}
122static inline void ^?{}( owner_lock & this ) {}
[f19497c]123static inline void   lock     ( owner_lock & this ) { lock    ( (blocking_lock &)this ); }
[d27b6be]124static inline bool   try_lock ( owner_lock & this ) { return try_lock( (blocking_lock &)this ); }
[f19497c]125static inline void   unlock   ( owner_lock & this ) { unlock  ( (blocking_lock &)this ); }
[fece3d9]126static inline size_t on_wait  ( owner_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) { return on_wait ( (blocking_lock &)this, pp_fn, pp_datum ); }
[22b7579]127static inline void   on_wakeup( owner_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
[e84ab3d]128static inline void   on_notify( owner_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
[beeff61e]129static inline bool   register_select( owner_lock & this, select_node & node ) { return register_select( (blocking_lock &)this, node ); }
130static inline bool   unregister_select( owner_lock & this, select_node & node ) { return unregister_select( (blocking_lock &)this, node ); }
[70a4ed5]131static inline void   on_selected( owner_lock & this, select_node & node ) { on_selected( (blocking_lock &)this, node ); }
[ab1b971]132
[7f958c4]133//-----------------------------------------------------------------------------
134// MCS Lock
[f4ec5e45]135struct mcs_node {
136        mcs_node * volatile next;
137        single_sem sem;
138};
139
[8f5576d5]140static inline void ?{}(mcs_node & this) { this.next = 0p; }
[f4ec5e45]141
142static inline mcs_node * volatile & ?`next ( mcs_node * node ) {
143        return node->next;
144}
145
146struct mcs_lock {
147        mcs_queue(mcs_node) queue;
148};
149
150static inline void lock(mcs_lock & l, mcs_node & n) {
151        if(push(l.queue, &n))
152                wait(n.sem);
153}
154
155static inline void unlock(mcs_lock & l, mcs_node & n) {
156        mcs_node * next = advance(l.queue, &n);
157        if(next) post(next->sem);
158}
159
[f835806]160//-----------------------------------------------------------------------------
161// MCS Spin Lock
162// - No recursive acquisition
163// - Needs to be released by owner
164
165struct mcs_spin_node {
166        mcs_spin_node * volatile next;
[db7a3ad]167        volatile bool locked;
[f835806]168};
169
170struct mcs_spin_queue {
171        mcs_spin_node * volatile tail;
172};
173
174static inline void ?{}(mcs_spin_node & this) { this.next = 0p; this.locked = true; }
175
176struct mcs_spin_lock {
177        mcs_spin_queue queue;
178};
179
180static inline void lock(mcs_spin_lock & l, mcs_spin_node & n) {
[5ece8ce]181    n.locked = true;
[f835806]182        mcs_spin_node * prev = __atomic_exchange_n(&l.queue.tail, &n, __ATOMIC_SEQ_CST);
[5ece8ce]183        if( prev == 0p ) return;
[9e3d123]184        prev->next = &n;
[5ece8ce]185        while( __atomic_load_n(&n.locked, __ATOMIC_RELAXED) ) Pause();
[f835806]186}
187
188static inline void unlock(mcs_spin_lock & l, mcs_spin_node & n) {
189        mcs_spin_node * n_ptr = &n;
[9e3d123]190        if (__atomic_compare_exchange_n(&l.queue.tail, &n_ptr, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) return;
[5ece8ce]191        while (__atomic_load_n(&n.next, __ATOMIC_RELAXED) == 0p) Pause();
[9e3d123]192        n.next->locked = false;
[f835806]193}
194
[b77f0e1]195//-----------------------------------------------------------------------------
196// futex_mutex
197
198// - Kernel thd blocking alternative to the spinlock
199// - No ownership (will deadlock on reacq)
[5a05946]200// - no reacq on wakeup
[b77f0e1]201struct futex_mutex {
202        // lock state any state other than UNLOCKED is locked
203        // enum LockState { UNLOCKED = 0, UNCONTENDED = 1, CONTENDED = 2 };
204       
205        // stores a lock state
206        int val;
207};
208
209// to use for FUTEX_WAKE and FUTEX_WAIT (other futex calls will need more params)
[7d9598d]210static inline int futex(int *uaddr, int futex_op, int val) {
[b77f0e1]211    return syscall(SYS_futex, uaddr, futex_op, val, NULL, NULL, 0);
212}
213
[5a05946]214static inline void ?{}( futex_mutex & this ) with(this) { val = 0; }
[b77f0e1]215
[5a05946]216static inline bool internal_try_lock( futex_mutex & this, int & compare_val) with(this) {
[b77f0e1]217        return __atomic_compare_exchange_n((int*)&val, (int*)&compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
218}
219
[5a05946]220static inline int internal_exchange( futex_mutex & this ) with(this) {
[b77f0e1]221        return __atomic_exchange_n((int*)&val, 2, __ATOMIC_ACQUIRE);
222}
223
224// if this is called recursively IT WILL DEADLOCK!!!!!
[beeff61e]225static inline void lock( futex_mutex & this ) with(this) {
[b77f0e1]226        int state;
227
[a45e21c]228        for( int spin = 4; spin < 1024; spin += spin) {
229                state = 0;
230                // if unlocked, lock and return
231                if (internal_try_lock(this, state)) return;
232                if (2 == state) break;
233                for (int i = 0; i < spin; i++) Pause();
234        }
[b77f0e1]235       
236        // if not in contended state, set to be in contended state
237        if (state != 2) state = internal_exchange(this);
238
239        // block and spin until we win the lock
240        while (state != 0) {
241                futex((int*)&val, FUTEX_WAIT, 2); // if val is not 2 this returns with EWOULDBLOCK
242                state = internal_exchange(this);
243        }
244}
245
246static inline void unlock(futex_mutex & this) with(this) {
[a45e21c]247        // if uncontended do atomic unlock and then return
248    if (__atomic_exchange_n(&val, 0, __ATOMIC_RELEASE) == 1) return;
[b77f0e1]249       
250        // otherwise threads are blocked so we must wake one
251        futex((int *)&val, FUTEX_WAKE, 1);
252}
253
[5a05946]254DEFAULT_ON_NOTIFY( futex_mutex )
255DEFAULT_ON_WAIT( futex_mutex )
256DEFAULT_ON_WAKEUP_NO_REACQ( futex_mutex )
[b77f0e1]257
[a45e21c]258//-----------------------------------------------------------------------------
259// go_mutex
260
261// - Kernel thd blocking alternative to the spinlock
262// - No ownership (will deadlock on reacq)
263// - Golang's flavour of mutex
264// - Impl taken from Golang: src/runtime/lock_futex.go
265struct go_mutex {
266        // lock state any state other than UNLOCKED is locked
267        // enum LockState { UNLOCKED = 0, LOCKED = 1, SLEEPING = 2 };
268       
269        // stores a lock state
270        int val;
271};
272static inline void  ?{}( go_mutex & this ) with(this) { val = 0; }
[5a05946]273// static inline void ?{}( go_mutex & this, go_mutex this2 ) = void; // these don't compile correctly at the moment so they should be omitted
274// static inline void ?=?( go_mutex & this, go_mutex this2 ) = void;
[a45e21c]275
276static inline bool internal_try_lock(go_mutex & this, int & compare_val, int new_val ) with(this) {
277        return __atomic_compare_exchange_n((int*)&val, (int*)&compare_val, new_val, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
278}
279
280static inline int internal_exchange(go_mutex & this, int swap ) with(this) {
281        return __atomic_exchange_n((int*)&val, swap, __ATOMIC_ACQUIRE);
282}
283
284// if this is called recursively IT WILL DEADLOCK!!!!!
[beeff61e]285static inline void lock( go_mutex & this ) with( this ) {
[a45e21c]286        int state, init_state;
287
288    // speculative grab
289    state = internal_exchange(this, 1);
290    if ( !state ) return; // state == 0
291    init_state = state;
292    for (;;) {
[bd72c28]293        for( int i = 0; i < 4; i++ ) {
[a45e21c]294            while( !val ) { // lock unlocked
295                state = 0;
[beeff61e]296                if ( internal_try_lock( this, state, init_state ) ) return;
[a45e21c]297            }
[bd72c28]298            for (int i = 0; i < 30; i++) Pause();
[a45e21c]299        }
300
301        while( !val ) { // lock unlocked
302            state = 0;
[beeff61e]303            if ( internal_try_lock( this, state, init_state ) ) return;
[a45e21c]304        }
305        sched_yield();
306       
307        // if not in contended state, set to be in contended state
[beeff61e]308        state = internal_exchange( this, 2 );
[a45e21c]309        if ( !state ) return; // state == 0
310        init_state = 2;
[beeff61e]311        futex( (int*)&val, FUTEX_WAIT, 2 ); // if val is not 2 this returns with EWOULDBLOCK
[a45e21c]312    }
313}
314
315static inline void unlock( go_mutex & this ) with(this) {
316        // if uncontended do atomic unlock and then return
[beeff61e]317    if ( __atomic_exchange_n(&val, 0, __ATOMIC_RELEASE) == 1 ) return;
[a45e21c]318       
319        // otherwise threads are blocked so we must wake one
[beeff61e]320        futex( (int *)&val, FUTEX_WAKE, 1 );
[a45e21c]321}
322
[5a05946]323DEFAULT_ON_NOTIFY( go_mutex )
324DEFAULT_ON_WAIT( go_mutex )
325DEFAULT_ON_WAKEUP_NO_REACQ( go_mutex )
[a45e21c]326
[7f958c4]327//-----------------------------------------------------------------------------
[0cee082]328// Exponential backoff then block lock
329struct exp_backoff_then_block_lock {
[5a46e09]330        // Spin lock used for mutual exclusion
331        __spinlock_t spinlock;
332
333        // List of blocked threads
[e84ab3d]334        dlist( thread$ ) blocked_threads;
[5a46e09]335
336        // Used for comparing and exchanging
337        volatile size_t lock_value;
338};
339
[0cee082]340static inline void  ?{}( exp_backoff_then_block_lock & this ) {
[5a46e09]341        this.spinlock{};
342        this.blocked_threads{};
343        this.lock_value = 0;
344}
[5a05946]345static inline void ?{}( exp_backoff_then_block_lock & this, exp_backoff_then_block_lock this2 ) = void;
346static inline void ?=?( exp_backoff_then_block_lock & this, exp_backoff_then_block_lock this2 ) = void;
[5a46e09]347
[a45e21c]348static inline void  ^?{}( exp_backoff_then_block_lock & this ){}
349
[beeff61e]350static inline bool internal_try_lock( exp_backoff_then_block_lock & this, size_t & compare_val ) with(this) {
[d30e3eb]351        return __atomic_compare_exchange_n(&lock_value, &compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
[5a46e09]352}
353
[beeff61e]354static inline bool try_lock( exp_backoff_then_block_lock & this ) { size_t compare_val = 0; return internal_try_lock( this, compare_val ); }
[5a46e09]355
[beeff61e]356static inline bool try_lock_contention( exp_backoff_then_block_lock & this ) with(this) {
357        return !__atomic_exchange_n( &lock_value, 2, __ATOMIC_ACQUIRE );
[5a46e09]358}
359
[beeff61e]360static inline bool block( exp_backoff_then_block_lock & this ) with(this) {
[d30e3eb]361    lock( spinlock __cfaabi_dbg_ctx2 );
362    if (__atomic_load_n( &lock_value, __ATOMIC_SEQ_CST) != 2) {
363        unlock( spinlock );
364        return true;
365    }
366    insert_last( blocked_threads, *active_thread() );
367    unlock( spinlock );
[5a46e09]368        park( );
369        return true;
370}
371
[beeff61e]372static inline void lock( exp_backoff_then_block_lock & this ) with(this) {
[5a46e09]373        size_t compare_val = 0;
[b77f0e1]374        int spin = 4;
[d30e3eb]375
[5a46e09]376        // linear backoff
377        for( ;; ) {
378                compare_val = 0;
[0d4f954]379                if (internal_try_lock(this, compare_val)) return;
[5a46e09]380                if (2 == compare_val) break;
381                for (int i = 0; i < spin; i++) Pause();
[b77f0e1]382                if (spin >= 1024) break;
[5a46e09]383                spin += spin;
384        }
385
[0d4f954]386        if(2 != compare_val && try_lock_contention(this)) return;
[b7763da]387        // block until signalled
[0d4f954]388        while (block(this)) if(try_lock_contention(this)) return;
[b7763da]389}
390
[beeff61e]391static inline void unlock( exp_backoff_then_block_lock & this ) with(this) {
[5a46e09]392    if (__atomic_exchange_n(&lock_value, 0, __ATOMIC_RELEASE) == 1) return;
[d30e3eb]393    lock( spinlock __cfaabi_dbg_ctx2 );
394    thread$ * t = &try_pop_front( blocked_threads );
395    unlock( spinlock );
396    unpark( t );
[5a46e09]397}
398
[5a05946]399DEFAULT_ON_NOTIFY( exp_backoff_then_block_lock )
400DEFAULT_ON_WAIT( exp_backoff_then_block_lock )
401DEFAULT_ON_WAKEUP_REACQ( exp_backoff_then_block_lock )
[5a46e09]402
[7f958c4]403//-----------------------------------------------------------------------------
404// Fast Block Lock
405
[f835806]406// minimal blocking lock
[7f958c4]407// - No reacquire for cond var
408// - No recursive acquisition
409// - No ownership
410struct fast_block_lock {
411        // List of blocked threads
412        dlist( thread$ ) blocked_threads;
413
[f835806]414        // Spin lock used for mutual exclusion
415        __spinlock_t lock;
416
417        // flag showing if lock is held
[7f958c4]418        bool held:1;
419};
420
421static inline void  ?{}( fast_block_lock & this ) with(this) {
422        lock{};
423        blocked_threads{};
424        held = false;
425}
426static inline void ^?{}( fast_block_lock & this ) {}
427static inline void ?{}( fast_block_lock & this, fast_block_lock this2 ) = void;
428static inline void ?=?( fast_block_lock & this, fast_block_lock this2 ) = void;
429
430// if this is called recursively IT WILL DEADLOCK!!!!!
[beeff61e]431static inline void lock( fast_block_lock & this ) with(this) {
[7f958c4]432        lock( lock __cfaabi_dbg_ctx2 );
[b77f0e1]433        if ( held ) {
[7f958c4]434                insert_last( blocked_threads, *active_thread() );
435                unlock( lock );
436                park( );
437                return;
438        }
439        held = true;
440        unlock( lock );
441}
442
[beeff61e]443static inline void unlock( fast_block_lock & this ) with(this) {
[7f958c4]444        lock( lock __cfaabi_dbg_ctx2 );
445        /* paranoid */ verifyf( held != false, "Attempt to release lock %p that isn't held", &this );
446        thread$ * t = &try_pop_front( blocked_threads );
447        held = ( t ? true : false );
448        unpark( t );
449        unlock( lock );
450}
451
[beeff61e]452static inline void on_notify( fast_block_lock & this, struct thread$ * t ) with(this) {
[0cee082]453    lock( lock __cfaabi_dbg_ctx2 );
454    insert_last( blocked_threads, *t );
455    unlock( lock );
[b77f0e1]456}
[5a05946]457DEFAULT_ON_WAIT( fast_block_lock )
458DEFAULT_ON_WAKEUP_NO_REACQ( fast_block_lock )
[7f958c4]459
[f835806]460//-----------------------------------------------------------------------------
461// simple_owner_lock
462
463// pthread owner lock
464// - reacquire for cond var
465// - recursive acquisition
466// - ownership
467struct simple_owner_lock {
468        // List of blocked threads
[beeff61e]469        dlist( select_node ) blocked_threads;
[f835806]470
471        // Spin lock used for mutual exclusion
472        __spinlock_t lock;
473
474        // owner showing if lock is held
475        struct thread$ * owner;
476
477        size_t recursion_count;
478};
479
480static inline void  ?{}( simple_owner_lock & this ) with(this) {
481        lock{};
482        blocked_threads{};
483        owner = 0p;
484        recursion_count = 0;
485}
486static inline void ^?{}( simple_owner_lock & this ) {}
487static inline void ?{}( simple_owner_lock & this, simple_owner_lock this2 ) = void;
488static inline void ?=?( simple_owner_lock & this, simple_owner_lock this2 ) = void;
489
[beeff61e]490static inline void lock( simple_owner_lock & this ) with(this) {
491        if ( owner == active_thread() ) {
[ae06e0b]492                recursion_count++;
493                return;
494        }
495        lock( lock __cfaabi_dbg_ctx2 );
496
[beeff61e]497        if ( owner != 0p ) {
498        select_node node;
499                insert_last( blocked_threads, node );
[ae06e0b]500                unlock( lock );
501                park( );
502                return;
503        }
504        owner = active_thread();
505        recursion_count = 1;
506        unlock( lock );
507}
508
[beeff61e]509static inline void pop_node( simple_owner_lock & this ) with(this) {
510    __handle_waituntil_OR( blocked_threads );
511    select_node * node = &try_pop_front( blocked_threads );
512    if ( node ) {
513        owner = node->blocked_thread;
514        recursion_count = 1;
515        // if ( !node->clause_status || __make_select_node_available( *node ) ) unpark( node->blocked_thread );
516        wake_one( blocked_threads, *node );
517    } else {
518        owner = 0p;
519        recursion_count = 0;
520    }
521}
[ae06e0b]522
[beeff61e]523static inline void unlock( simple_owner_lock & this ) with(this) {
[ae06e0b]524        lock( lock __cfaabi_dbg_ctx2 );
525        /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
526        /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );
527        // if recursion count is zero release lock and set new owner if one is waiting
528        recursion_count--;
529        if ( recursion_count == 0 ) {
[beeff61e]530                pop_node( this );
[ae06e0b]531        }
532        unlock( lock );
533}
534
[fece3d9]535static inline void on_notify( simple_owner_lock & this, thread$ * t ) with(this) {
[ae06e0b]536        lock( lock __cfaabi_dbg_ctx2 );
537        // lock held
538        if ( owner != 0p ) {
[beeff61e]539                insert_last( blocked_threads, *(select_node *)t->link_node );
[ae06e0b]540        }
541        // lock not held
542        else {
543                owner = t;
544                recursion_count = 1;
545                unpark( t );
546        }
[b77f0e1]547        unlock( lock );
[ae06e0b]548}
549
[fece3d9]550static inline size_t on_wait( simple_owner_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) with(this) {
[ae06e0b]551        lock( lock __cfaabi_dbg_ctx2 );
552        /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
553        /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );
554
555        size_t ret = recursion_count;
556
[beeff61e]557        pop_node( this );
[ae06e0b]558
[beeff61e]559    select_node node;
560    active_thread()->link_node = (void *)&node;
[ae06e0b]561        unlock( lock );
[fece3d9]562
563    pre_park_then_park( pp_fn, pp_datum );
[beeff61e]564
[ae06e0b]565        return ret;
566}
567
[beeff61e]568static inline void on_wakeup( simple_owner_lock & this, size_t recursion ) with(this) { recursion_count = recursion; }
569
570// waituntil() support
571static inline bool register_select( simple_owner_lock & this, select_node & node ) with(this) {
572    lock( lock __cfaabi_dbg_ctx2 );
573
574    // check if we can complete operation. If so race to establish winner in special OR case
575    if ( !node.park_counter && ( owner == active_thread() || owner == 0p ) ) {
576        if ( !__make_select_node_available( node ) ) { // we didn't win the race so give up on registering
577           unlock( lock );
578           return false;
579        }
580    }
581
582    if ( owner == active_thread() ) {
583                recursion_count++;
584        if ( node.park_counter ) __make_select_node_available( node );
585        unlock( lock );
586                return true;
587        }
588
589    if ( owner != 0p ) {
590                insert_last( blocked_threads, node );
591                unlock( lock );
592                return false;
593        }
594   
595        owner = active_thread();
596        recursion_count = 1;
597
598    if ( node.park_counter ) __make_select_node_available( node );
599    unlock( lock );
600    return true;
601}
602
603static inline bool unregister_select( simple_owner_lock & this, select_node & node ) with(this) {
604    lock( lock __cfaabi_dbg_ctx2 );
605    if ( node`isListed ) {
606        remove( node );
607        unlock( lock );
608        return false;
609    }
610
611    if ( owner == active_thread() ) {
612        recursion_count--;
613        if ( recursion_count == 0 ) {
614            pop_node( this );
615        }
616    }
617    unlock( lock );
618    return false;
619}
620
[70a4ed5]621static inline void on_selected( simple_owner_lock & this, select_node & node ) {}
[beeff61e]622
[ae06e0b]623
[f835806]624//-----------------------------------------------------------------------------
625// Spin Queue Lock
626
627// - No reacquire for cond var
628// - No recursive acquisition
629// - No ownership
630// - spin lock with no locking/atomics in unlock
631struct spin_queue_lock {
632        // Spin lock used for mutual exclusion
633        mcs_spin_lock lock;
634
635        // flag showing if lock is held
[db7a3ad]636        volatile bool held;
[f835806]637};
638
639static inline void  ?{}( spin_queue_lock & this ) with(this) {
640        lock{};
641        held = false;
642}
643static inline void ^?{}( spin_queue_lock & this ) {}
644static inline void ?{}( spin_queue_lock & this, spin_queue_lock this2 ) = void;
645static inline void ?=?( spin_queue_lock & this, spin_queue_lock this2 ) = void;
646
[378de69]647// if this is called recursively IT WILL DEADLOCK!
[beeff61e]648static inline void lock( spin_queue_lock & this ) with(this) {
[f835806]649        mcs_spin_node node;
650        lock( lock, node );
[df932552]651        while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
652        __atomic_store_n(&held, true, __ATOMIC_SEQ_CST);
[f835806]653        unlock( lock, node );
654}
655
[beeff61e]656static inline void unlock( spin_queue_lock & this ) with(this) {
[2ed32fa7]657        __atomic_store_n(&held, false, __ATOMIC_RELEASE);
[f835806]658}
659
[5a05946]660DEFAULT_ON_NOTIFY( spin_queue_lock )
661DEFAULT_ON_WAIT( spin_queue_lock )
662DEFAULT_ON_WAKEUP_REACQ( spin_queue_lock )
[f835806]663
664//-----------------------------------------------------------------------------
665// MCS Block Spin Lock
666
667// - No reacquire for cond var
668// - No recursive acquisition
669// - No ownership
670// - Blocks but first node spins (like spin queue but blocking for not first thd)
671struct mcs_block_spin_lock {
672        // Spin lock used for mutual exclusion
673        mcs_lock lock;
674
675        // flag showing if lock is held
[db7a3ad]676        volatile bool held;
[f835806]677};
678
679static inline void  ?{}( mcs_block_spin_lock & this ) with(this) {
680        lock{};
681        held = false;
682}
683static inline void ^?{}( mcs_block_spin_lock & this ) {}
684static inline void ?{}( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;
685static inline void ?=?( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;
686
687// if this is called recursively IT WILL DEADLOCK!!!!!
[beeff61e]688static inline void lock( mcs_block_spin_lock & this ) with(this) {
[f835806]689        mcs_node node;
690        lock( lock, node );
[fd365da]691        while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
692        __atomic_store_n(&held, true, __ATOMIC_SEQ_CST);
[f835806]693        unlock( lock, node );
694}
695
696static inline void unlock(mcs_block_spin_lock & this) with(this) {
[fd365da]697        __atomic_store_n(&held, false, __ATOMIC_SEQ_CST);
[f835806]698}
699
[5a05946]700DEFAULT_ON_NOTIFY( mcs_block_spin_lock )
701DEFAULT_ON_WAIT( mcs_block_spin_lock )
702DEFAULT_ON_WAKEUP_REACQ( mcs_block_spin_lock )
[f835806]703
704//-----------------------------------------------------------------------------
705// Block Spin Lock
706
707// - No reacquire for cond var
708// - No recursive acquisition
709// - No ownership
710// - Blocks but first node spins (like spin queue but blocking for not first thd)
711struct block_spin_lock {
712        // Spin lock used for mutual exclusion
713        fast_block_lock lock;
714
715        // flag showing if lock is held
[db7a3ad]716        volatile bool held;
[f835806]717};
718
719static inline void  ?{}( block_spin_lock & this ) with(this) {
720        lock{};
721        held = false;
722}
723static inline void ^?{}( block_spin_lock & this ) {}
724static inline void ?{}( block_spin_lock & this, block_spin_lock this2 ) = void;
725static inline void ?=?( block_spin_lock & this, block_spin_lock this2 ) = void;
726
727// if this is called recursively IT WILL DEADLOCK!!!!!
[beeff61e]728static inline void lock( block_spin_lock & this ) with(this) {
[f835806]729        lock( lock );
[fd365da]730        while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
731        __atomic_store_n(&held, true, __ATOMIC_RELEASE);
[f835806]732        unlock( lock );
733}
734
[beeff61e]735static inline void unlock( block_spin_lock & this ) with(this) {
[fd365da]736        __atomic_store_n(&held, false, __ATOMIC_RELEASE);
[f835806]737}
738
[beeff61e]739static inline void on_notify( block_spin_lock & this, struct thread$ * t ) with(this.lock) {
[b77f0e1]740        // first we acquire internal fast_block_lock
741        lock( lock __cfaabi_dbg_ctx2 );
742        if ( held ) { // if internal fast_block_lock is held
743                insert_last( blocked_threads, *t );
744                unlock( lock );
745                return;
746        }
747        // if internal fast_block_lock is not held
748        held = true;
749        unlock( lock );
750
751        unpark(t);
752}
[5a05946]753DEFAULT_ON_WAIT( block_spin_lock )
[beeff61e]754static inline void on_wakeup( block_spin_lock & this, size_t recursion ) with(this) {
[b77f0e1]755        // now we acquire the entire block_spin_lock upon waking up
756        while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
757        __atomic_store_n(&held, true, __ATOMIC_RELEASE);
758        unlock( lock ); // Now we release the internal fast_spin_lock
759}
[f835806]760
[ac5816d]761//-----------------------------------------------------------------------------
[82f4063]762// // info_thread
763// // the info thread is a wrapper around a thread used
764// // to store extra data for use in the condition variable
[fd54fef]765forall(L & | is_blocking_lock(L)) {
[ac5816d]766        struct info_thread;
[848439f]767}
768
[ac5816d]769//-----------------------------------------------------------------------------
770// Synchronization Locks
[fd54fef]771forall(L & | is_blocking_lock(L)) {
[7f958c4]772
773        //-----------------------------------------------------------------------------
774        // condition_variable
775
776        // The multi-tool condition variable
777        // - can pass timeouts to wait for either a signal or timeout
778        // - can wait without passing a lock
779        // - can have waiters reacquire different locks while waiting on the same cond var
780        // - has shadow queue
781        // - can be signalled outside of critical sections with no locks held
[eeb5023]782        struct condition_variable {
[848439f]783                // Spin lock used for mutual exclusion
784                __spinlock_t lock;
785
786                // List of blocked threads
[82f4063]787                dlist( info_thread(L) ) blocked_threads;
[848439f]788
789                // Count of current blocked threads
790                int count;
791        };
[e84ab3d]792
[848439f]793
[ac5816d]794        void  ?{}( condition_variable(L) & this );
[848439f]795        void ^?{}( condition_variable(L) & this );
796
[eeb5023]797        bool notify_one( condition_variable(L) & this );
798        bool notify_all( condition_variable(L) & this );
[848439f]799
[eeb5023]800        uintptr_t front( condition_variable(L) & this );
[848439f]801
[ac5816d]802        bool empty  ( condition_variable(L) & this );
803        int  counter( condition_variable(L) & this );
[848439f]804
[eeb5023]805        void wait( condition_variable(L) & this );
806        void wait( condition_variable(L) & this, uintptr_t info );
[dff1fd1]807        bool wait( condition_variable(L) & this, Duration duration );
808        bool wait( condition_variable(L) & this, uintptr_t info, Duration duration );
[848439f]809
[eeb5023]810        void wait( condition_variable(L) & this, L & l );
811        void wait( condition_variable(L) & this, L & l, uintptr_t info );
[dff1fd1]812        bool wait( condition_variable(L) & this, L & l, Duration duration );
813        bool wait( condition_variable(L) & this, L & l, uintptr_t info, Duration duration );
[7f958c4]814
815        //-----------------------------------------------------------------------------
816        // fast_cond_var
817
818        // The trimmed and slim condition variable
819        // - no internal lock so you must hold a lock while using this cond var
820        // - signalling without holding branded lock is UNSAFE!
821        // - only allows usage of one lock, cond var is branded after usage
[ae06e0b]822
[7f958c4]823        struct fast_cond_var {
824                // List of blocked threads
825                dlist( info_thread(L) ) blocked_threads;
826                #ifdef __CFA_DEBUG__
827                L * lock_used;
828                #endif
829        };
830
831        void  ?{}( fast_cond_var(L) & this );
832        void ^?{}( fast_cond_var(L) & this );
833
834        bool notify_one( fast_cond_var(L) & this );
835        bool notify_all( fast_cond_var(L) & this );
836
837        uintptr_t front( fast_cond_var(L) & this );
838        bool empty  ( fast_cond_var(L) & this );
839
840        void wait( fast_cond_var(L) & this, L & l );
841        void wait( fast_cond_var(L) & this, L & l, uintptr_t info );
[ae06e0b]842
843
844        //-----------------------------------------------------------------------------
845        // pthread_cond_var
846        //
847        // - cond var with minimal footprint
848        // - supports operations needed for phthread cond
849
850        struct pthread_cond_var {
851                dlist( info_thread(L) ) blocked_threads;
852                __spinlock_t lock;
853        };
854
855        void  ?{}( pthread_cond_var(L) & this );
856        void ^?{}( pthread_cond_var(L) & this );
857
858        bool notify_one( pthread_cond_var(L) & this );
859        bool notify_all( pthread_cond_var(L) & this );
860
861        uintptr_t front( pthread_cond_var(L) & this );
862        bool empty ( pthread_cond_var(L) & this );
863
864        void wait( pthread_cond_var(L) & this, L & l );
865        void wait( pthread_cond_var(L) & this, L & l, uintptr_t info );
866        bool wait( pthread_cond_var(L) & this, L & l, timespec t );
867        bool wait( pthread_cond_var(L) & this, L & l, uintptr_t info, timespec t );
[8a97248]868}
Note: See TracBrowser for help on using the repository browser.