// // Cforall Version 1.0.0 Copyright (C) 2021 University of Waterloo // // The contents of this file are covered under the licence agreement in the // file "LICENCE" distributed with Cforall. // // locks.hfa -- PUBLIC // Runtime locks that used with the runtime thread system. // // Author : Colby Alexander Parsons // Created On : Thu Jan 21 19:46:50 2021 // Last Modified By : // Last Modified On : // Update Count : // #pragma once #include #include #include "bits/weakso_locks.hfa" #include "containers/lockfree.hfa" #include "containers/list.hfa" #include "limits.hfa" #include "thread.hfa" #include "time_t.hfa" #include "time.hfa" #include // futex headers #include /* Definition of FUTEX_* constants */ #include /* Definition of SYS_* constants */ #include // C_TODO: cleanup this and locks.cfa // - appropriate separation of interface and impl // - clean up unused/unneeded locks // - change messy big blocking lock from inheritance to composition to remove need for flags //----------------------------------------------------------------------------- // Semaphore struct semaphore { __spinlock_t lock; int count; __queue_t(thread$) waiting; }; void ?{}(semaphore & this, int count = 1); void ^?{}(semaphore & this); bool P (semaphore & this); bool V (semaphore & this); bool V (semaphore & this, unsigned count); thread$ * V (semaphore & this, bool ); //---------- struct single_acquisition_lock { inline blocking_lock; }; static inline void ?{}( single_acquisition_lock & this ) {((blocking_lock &)this){ false, false };} static inline void ^?{}( single_acquisition_lock & this ) {} static inline void lock ( single_acquisition_lock & this ) { lock ( (blocking_lock &)this ); } static inline bool try_lock ( single_acquisition_lock & this ) { return try_lock( (blocking_lock &)this ); } static inline void unlock ( single_acquisition_lock & this ) { unlock ( (blocking_lock &)this ); } static inline size_t on_wait ( single_acquisition_lock & this ) { return on_wait ( (blocking_lock &)this ); } static inline void on_wakeup( single_acquisition_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); } static inline void on_notify( single_acquisition_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); } //---------- struct owner_lock { inline blocking_lock; }; static inline void ?{}( owner_lock & this ) {((blocking_lock &)this){ true, true };} static inline void ^?{}( owner_lock & this ) {} static inline void lock ( owner_lock & this ) { lock ( (blocking_lock &)this ); } static inline bool try_lock ( owner_lock & this ) { return try_lock( (blocking_lock &)this ); } static inline void unlock ( owner_lock & this ) { unlock ( (blocking_lock &)this ); } static inline size_t on_wait ( owner_lock & this ) { return on_wait ( (blocking_lock &)this ); } static inline void on_wakeup( owner_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); } static inline void on_notify( owner_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); } //----------------------------------------------------------------------------- // MCS Lock struct mcs_node { mcs_node * volatile next; single_sem sem; }; static inline void ?{}(mcs_node & this) { this.next = 0p; } static inline mcs_node * volatile & ?`next ( mcs_node * node ) { return node->next; } struct mcs_lock { mcs_queue(mcs_node) queue; }; static inline void lock(mcs_lock & l, mcs_node & n) { if(push(l.queue, &n)) wait(n.sem); } static inline void unlock(mcs_lock & l, mcs_node & n) { mcs_node * next = advance(l.queue, &n); if(next) post(next->sem); } //----------------------------------------------------------------------------- // MCS Spin Lock // - No recursive acquisition // - Needs to be released by owner struct mcs_spin_node { mcs_spin_node * volatile next; volatile bool locked; }; struct mcs_spin_queue { mcs_spin_node * volatile tail; }; static inline void ?{}(mcs_spin_node & this) { this.next = 0p; this.locked = true; } static inline mcs_spin_node * volatile & ?`next ( mcs_spin_node * node ) { return node->next; } struct mcs_spin_lock { mcs_spin_queue queue; }; static inline void lock(mcs_spin_lock & l, mcs_spin_node & n) { mcs_spin_node * prev = __atomic_exchange_n(&l.queue.tail, &n, __ATOMIC_SEQ_CST); n.locked = true; if(prev == 0p) return; prev->next = &n; while(__atomic_load_n(&n.locked, __ATOMIC_RELAXED)) Pause(); } static inline void unlock(mcs_spin_lock & l, mcs_spin_node & n) { mcs_spin_node * n_ptr = &n; if (__atomic_compare_exchange_n(&l.queue.tail, &n_ptr, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) return; while (__atomic_load_n(&n.next, __ATOMIC_RELAXED) == 0p) {} n.next->locked = false; } //----------------------------------------------------------------------------- // futex_mutex // - No cond var support // - Kernel thd blocking alternative to the spinlock // - No ownership (will deadlock on reacq) struct futex_mutex { // lock state any state other than UNLOCKED is locked // enum LockState { UNLOCKED = 0, UNCONTENDED = 1, CONTENDED = 2 }; // stores a lock state int val; }; // to use for FUTEX_WAKE and FUTEX_WAIT (other futex calls will need more params) static inline int futex(int *uaddr, int futex_op, int val) { return syscall(SYS_futex, uaddr, futex_op, val, NULL, NULL, 0); } static inline void ?{}( futex_mutex & this ) with(this) { val = 0; } static inline bool internal_try_lock(futex_mutex & this, int & compare_val) with(this) { return __atomic_compare_exchange_n((int*)&val, (int*)&compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE); } static inline int internal_exchange(futex_mutex & this) with(this) { return __atomic_exchange_n((int*)&val, 2, __ATOMIC_ACQUIRE); } // if this is called recursively IT WILL DEADLOCK!!!!! static inline void lock(futex_mutex & this) with(this) { int state; // // linear backoff omitted for now // for( int spin = 4; spin < 1024; spin += spin) { // state = 0; // // if unlocked, lock and return // if (internal_try_lock(this, state)) return; // if (2 == state) break; // for (int i = 0; i < spin; i++) Pause(); // } // no contention try to acquire if (internal_try_lock(this, state)) return; // if not in contended state, set to be in contended state if (state != 2) state = internal_exchange(this); // block and spin until we win the lock while (state != 0) { futex((int*)&val, FUTEX_WAIT, 2); // if val is not 2 this returns with EWOULDBLOCK state = internal_exchange(this); } } static inline void unlock(futex_mutex & this) with(this) { // if uncontended do atomice unlock and then return if (__atomic_fetch_sub(&val, 1, __ATOMIC_RELEASE) == 1) return; // TODO: try acq/rel // otherwise threads are blocked so we must wake one __atomic_store_n((int *)&val, 0, __ATOMIC_RELEASE); futex((int *)&val, FUTEX_WAKE, 1); } static inline void on_notify( futex_mutex & f, thread$ * t){ unpark(t); } static inline size_t on_wait( futex_mutex & f ) {unlock(f); return 0;} // to set recursion count after getting signalled; static inline void on_wakeup( futex_mutex & f, size_t recursion ) {} //----------------------------------------------------------------------------- // CLH Spinlock // - No recursive acquisition // - Needs to be released by owner struct clh_lock { volatile bool * volatile tail; volatile bool * volatile head; }; static inline void ?{}( clh_lock & this ) { this.tail = malloc(); *this.tail = true; } static inline void ^?{}( clh_lock & this ) { free(this.tail); } static inline void lock(clh_lock & l) { thread$ * curr_thd = active_thread(); *(curr_thd->clh_node) = false; volatile bool * prev = __atomic_exchange_n((bool **)(&l.tail), (bool *)(curr_thd->clh_node), __ATOMIC_SEQ_CST); while(!__atomic_load_n(prev, __ATOMIC_SEQ_CST)) Pause(); __atomic_store_n((bool **)(&l.head), (bool *)curr_thd->clh_node, __ATOMIC_SEQ_CST); curr_thd->clh_node = prev; } static inline void unlock(clh_lock & l) { __atomic_store_n((bool *)(l.head), true, __ATOMIC_SEQ_CST); } static inline void on_notify(clh_lock & this, struct thread$ * t ) { unpark(t); } static inline size_t on_wait(clh_lock & this) { unlock(this); return 0; } static inline void on_wakeup(clh_lock & this, size_t recursion ) { lock(this); } //----------------------------------------------------------------------------- // Exponential backoff then block lock struct exp_backoff_then_block_lock { // Spin lock used for mutual exclusion __spinlock_t spinlock; // List of blocked threads dlist( thread$ ) blocked_threads; // Used for comparing and exchanging volatile size_t lock_value; }; static inline void ?{}( exp_backoff_then_block_lock & this ) { this.spinlock{}; this.blocked_threads{}; this.lock_value = 0; } static inline void ^?{}( exp_backoff_then_block_lock & this ) {} // static inline void ?{}( exp_backoff_then_block_lock & this, exp_backoff_then_block_lock this2 ) = void; // static inline void ?=?( exp_backoff_then_block_lock & this, exp_backoff_then_block_lock this2 ) = void; static inline bool internal_try_lock(exp_backoff_then_block_lock & this, size_t & compare_val) with(this) { if (__atomic_compare_exchange_n(&lock_value, &compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) { return true; } return false; } static inline bool try_lock(exp_backoff_then_block_lock & this) { size_t compare_val = 0; return internal_try_lock(this, compare_val); } static inline bool try_lock_contention(exp_backoff_then_block_lock & this) with(this) { if (__atomic_exchange_n(&lock_value, 2, __ATOMIC_ACQUIRE) == 0) { return true; } return false; } static inline bool block(exp_backoff_then_block_lock & this) with(this) { lock( spinlock __cfaabi_dbg_ctx2 ); // TODO change to lockfree queue (MPSC) if (lock_value != 2) { unlock( spinlock ); return true; } insert_last( blocked_threads, *active_thread() ); unlock( spinlock ); park( ); return true; } static inline void lock(exp_backoff_then_block_lock & this) with(this) { size_t compare_val = 0; int spin = 4; // linear backoff for( ;; ) { compare_val = 0; if (internal_try_lock(this, compare_val)) return; if (2 == compare_val) break; for (int i = 0; i < spin; i++) Pause(); if (spin >= 1024) break; spin += spin; } if(2 != compare_val && try_lock_contention(this)) return; // block until signalled while (block(this)) if(try_lock_contention(this)) return; } static inline void unlock(exp_backoff_then_block_lock & this) with(this) { if (__atomic_exchange_n(&lock_value, 0, __ATOMIC_RELEASE) == 1) return; lock( spinlock __cfaabi_dbg_ctx2 ); thread$ * t = &try_pop_front( blocked_threads ); unlock( spinlock ); unpark( t ); } static inline void on_notify(exp_backoff_then_block_lock & this, struct thread$ * t ) { unpark(t); } static inline size_t on_wait(exp_backoff_then_block_lock & this) { unlock(this); return 0; } static inline void on_wakeup(exp_backoff_then_block_lock & this, size_t recursion ) { lock(this); } //----------------------------------------------------------------------------- // Fast Block Lock // minimal blocking lock // - No reacquire for cond var // - No recursive acquisition // - No ownership struct fast_block_lock { // List of blocked threads dlist( thread$ ) blocked_threads; // Spin lock used for mutual exclusion __spinlock_t lock; // flag showing if lock is held bool held:1; }; static inline void ?{}( fast_block_lock & this ) with(this) { lock{}; blocked_threads{}; held = false; } static inline void ^?{}( fast_block_lock & this ) {} static inline void ?{}( fast_block_lock & this, fast_block_lock this2 ) = void; static inline void ?=?( fast_block_lock & this, fast_block_lock this2 ) = void; // if this is called recursively IT WILL DEADLOCK!!!!! static inline void lock(fast_block_lock & this) with(this) { lock( lock __cfaabi_dbg_ctx2 ); if ( held ) { insert_last( blocked_threads, *active_thread() ); unlock( lock ); park( ); return; } held = true; unlock( lock ); } static inline void unlock(fast_block_lock & this) with(this) { lock( lock __cfaabi_dbg_ctx2 ); /* paranoid */ verifyf( held != false, "Attempt to release lock %p that isn't held", &this ); thread$ * t = &try_pop_front( blocked_threads ); held = ( t ? true : false ); unpark( t ); unlock( lock ); } static inline void on_notify(fast_block_lock & this, struct thread$ * t ) with(this) { lock( lock __cfaabi_dbg_ctx2 ); insert_last( blocked_threads, *t ); unlock( lock ); } static inline size_t on_wait(fast_block_lock & this) { unlock(this); return 0; } static inline void on_wakeup(fast_block_lock & this, size_t recursion ) { } //----------------------------------------------------------------------------- // simple_owner_lock // pthread owner lock // - reacquire for cond var // - recursive acquisition // - ownership struct simple_owner_lock { // List of blocked threads dlist( thread$ ) blocked_threads; // Spin lock used for mutual exclusion __spinlock_t lock; // owner showing if lock is held struct thread$ * owner; size_t recursion_count; }; static inline void ?{}( simple_owner_lock & this ) with(this) { lock{}; blocked_threads{}; owner = 0p; recursion_count = 0; } static inline void ^?{}( simple_owner_lock & this ) {} static inline void ?{}( simple_owner_lock & this, simple_owner_lock this2 ) = void; static inline void ?=?( simple_owner_lock & this, simple_owner_lock this2 ) = void; static inline void lock(simple_owner_lock & this) with(this) { if (owner == active_thread()) { recursion_count++; return; } lock( lock __cfaabi_dbg_ctx2 ); if (owner != 0p) { insert_last( blocked_threads, *active_thread() ); unlock( lock ); park( ); return; } owner = active_thread(); recursion_count = 1; unlock( lock ); } // TODO: fix duplicate def issue and bring this back // void pop_and_set_new_owner( simple_owner_lock & this ) with( this ) { // thread$ * t = &try_pop_front( blocked_threads ); // owner = t; // recursion_count = ( t ? 1 : 0 ); // unpark( t ); // } static inline void unlock(simple_owner_lock & this) with(this) { lock( lock __cfaabi_dbg_ctx2 ); /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this ); /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this ); // if recursion count is zero release lock and set new owner if one is waiting recursion_count--; if ( recursion_count == 0 ) { // pop_and_set_new_owner( this ); thread$ * t = &try_pop_front( blocked_threads ); owner = t; recursion_count = ( t ? 1 : 0 ); unpark( t ); } unlock( lock ); } static inline void on_notify(simple_owner_lock & this, struct thread$ * t ) with(this) { lock( lock __cfaabi_dbg_ctx2 ); // lock held if ( owner != 0p ) { insert_last( blocked_threads, *t ); } // lock not held else { owner = t; recursion_count = 1; unpark( t ); } unlock( lock ); } static inline size_t on_wait(simple_owner_lock & this) with(this) { lock( lock __cfaabi_dbg_ctx2 ); /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this ); /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this ); size_t ret = recursion_count; // pop_and_set_new_owner( this ); thread$ * t = &try_pop_front( blocked_threads ); owner = t; recursion_count = ( t ? 1 : 0 ); unpark( t ); unlock( lock ); return ret; } static inline void on_wakeup(simple_owner_lock & this, size_t recursion ) with(this) { recursion_count = recursion; } //----------------------------------------------------------------------------- // Spin Queue Lock // - No reacquire for cond var // - No recursive acquisition // - No ownership // - spin lock with no locking/atomics in unlock struct spin_queue_lock { // Spin lock used for mutual exclusion mcs_spin_lock lock; // flag showing if lock is held volatile bool held; #ifdef __CFA_DEBUG__ // for deadlock detection struct thread$ * owner; #endif }; static inline void ?{}( spin_queue_lock & this ) with(this) { lock{}; held = false; } static inline void ^?{}( spin_queue_lock & this ) {} static inline void ?{}( spin_queue_lock & this, spin_queue_lock this2 ) = void; static inline void ?=?( spin_queue_lock & this, spin_queue_lock this2 ) = void; // if this is called recursively IT WILL DEADLOCK!!!!! static inline void lock(spin_queue_lock & this) with(this) { mcs_spin_node node; lock( lock, node ); while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause(); __atomic_store_n(&held, true, __ATOMIC_SEQ_CST); unlock( lock, node ); } static inline void unlock(spin_queue_lock & this) with(this) { __atomic_store_n(&held, false, __ATOMIC_RELEASE); } static inline void on_notify(spin_queue_lock & this, struct thread$ * t ) { unpark(t); } static inline size_t on_wait(spin_queue_lock & this) { unlock(this); return 0; } static inline void on_wakeup(spin_queue_lock & this, size_t recursion ) { lock(this); } //----------------------------------------------------------------------------- // MCS Block Spin Lock // - No reacquire for cond var // - No recursive acquisition // - No ownership // - Blocks but first node spins (like spin queue but blocking for not first thd) struct mcs_block_spin_lock { // Spin lock used for mutual exclusion mcs_lock lock; // flag showing if lock is held volatile bool held; }; static inline void ?{}( mcs_block_spin_lock & this ) with(this) { lock{}; held = false; } static inline void ^?{}( mcs_block_spin_lock & this ) {} static inline void ?{}( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void; static inline void ?=?( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void; // if this is called recursively IT WILL DEADLOCK!!!!! static inline void lock(mcs_block_spin_lock & this) with(this) { mcs_node node; lock( lock, node ); while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause(); __atomic_store_n(&held, true, __ATOMIC_SEQ_CST); unlock( lock, node ); } static inline void unlock(mcs_block_spin_lock & this) with(this) { __atomic_store_n(&held, false, __ATOMIC_SEQ_CST); } static inline void on_notify(mcs_block_spin_lock & this, struct thread$ * t ) { unpark(t); } static inline size_t on_wait(mcs_block_spin_lock & this) { unlock(this); return 0; } static inline void on_wakeup(mcs_block_spin_lock & this, size_t recursion ) {lock(this); } //----------------------------------------------------------------------------- // Block Spin Lock // - No reacquire for cond var // - No recursive acquisition // - No ownership // - Blocks but first node spins (like spin queue but blocking for not first thd) struct block_spin_lock { // Spin lock used for mutual exclusion fast_block_lock lock; // flag showing if lock is held volatile bool held; }; static inline void ?{}( block_spin_lock & this ) with(this) { lock{}; held = false; } static inline void ^?{}( block_spin_lock & this ) {} static inline void ?{}( block_spin_lock & this, block_spin_lock this2 ) = void; static inline void ?=?( block_spin_lock & this, block_spin_lock this2 ) = void; // if this is called recursively IT WILL DEADLOCK!!!!! static inline void lock(block_spin_lock & this) with(this) { lock( lock ); while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause(); __atomic_store_n(&held, true, __ATOMIC_RELEASE); unlock( lock ); } static inline void unlock(block_spin_lock & this) with(this) { __atomic_store_n(&held, false, __ATOMIC_RELEASE); } static inline void on_notify(block_spin_lock & this, struct thread$ * t ) with(this.lock) { // first we acquire internal fast_block_lock lock( lock __cfaabi_dbg_ctx2 ); if ( held ) { // if internal fast_block_lock is held insert_last( blocked_threads, *t ); unlock( lock ); return; } // if internal fast_block_lock is not held held = true; unlock( lock ); unpark(t); } static inline size_t on_wait(block_spin_lock & this) { unlock(this); return 0; } static inline void on_wakeup(block_spin_lock & this, size_t recursion ) with(this) { // now we acquire the entire block_spin_lock upon waking up while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause(); __atomic_store_n(&held, true, __ATOMIC_RELEASE); unlock( lock ); // Now we release the internal fast_spin_lock } //----------------------------------------------------------------------------- // is_blocking_lock trait is_blocking_lock(L & | sized(L)) { // For synchronization locks to use when acquiring void on_notify( L &, struct thread$ * ); // For synchronization locks to use when releasing size_t on_wait( L & ); // to set recursion count after getting signalled; void on_wakeup( L &, size_t recursion ); }; //----------------------------------------------------------------------------- // // info_thread // // the info thread is a wrapper around a thread used // // to store extra data for use in the condition variable forall(L & | is_blocking_lock(L)) { struct info_thread; // // for use by sequence // info_thread(L) *& Back( info_thread(L) * this ); // info_thread(L) *& Next( info_thread(L) * this ); } //----------------------------------------------------------------------------- // Synchronization Locks forall(L & | is_blocking_lock(L)) { //----------------------------------------------------------------------------- // condition_variable // The multi-tool condition variable // - can pass timeouts to wait for either a signal or timeout // - can wait without passing a lock // - can have waiters reacquire different locks while waiting on the same cond var // - has shadow queue // - can be signalled outside of critical sections with no locks held struct condition_variable { // Spin lock used for mutual exclusion __spinlock_t lock; // List of blocked threads dlist( info_thread(L) ) blocked_threads; // Count of current blocked threads int count; }; void ?{}( condition_variable(L) & this ); void ^?{}( condition_variable(L) & this ); bool notify_one( condition_variable(L) & this ); bool notify_all( condition_variable(L) & this ); uintptr_t front( condition_variable(L) & this ); bool empty ( condition_variable(L) & this ); int counter( condition_variable(L) & this ); void wait( condition_variable(L) & this ); void wait( condition_variable(L) & this, uintptr_t info ); bool wait( condition_variable(L) & this, Duration duration ); bool wait( condition_variable(L) & this, uintptr_t info, Duration duration ); void wait( condition_variable(L) & this, L & l ); void wait( condition_variable(L) & this, L & l, uintptr_t info ); bool wait( condition_variable(L) & this, L & l, Duration duration ); bool wait( condition_variable(L) & this, L & l, uintptr_t info, Duration duration ); //----------------------------------------------------------------------------- // fast_cond_var // The trimmed and slim condition variable // - no internal lock so you must hold a lock while using this cond var // - signalling without holding branded lock is UNSAFE! // - only allows usage of one lock, cond var is branded after usage struct fast_cond_var { // List of blocked threads dlist( info_thread(L) ) blocked_threads; #ifdef __CFA_DEBUG__ L * lock_used; #endif }; void ?{}( fast_cond_var(L) & this ); void ^?{}( fast_cond_var(L) & this ); bool notify_one( fast_cond_var(L) & this ); bool notify_all( fast_cond_var(L) & this ); uintptr_t front( fast_cond_var(L) & this ); bool empty ( fast_cond_var(L) & this ); void wait( fast_cond_var(L) & this, L & l ); void wait( fast_cond_var(L) & this, L & l, uintptr_t info ); //----------------------------------------------------------------------------- // pthread_cond_var // // - cond var with minimal footprint // - supports operations needed for phthread cond struct pthread_cond_var { dlist( info_thread(L) ) blocked_threads; __spinlock_t lock; }; void ?{}( pthread_cond_var(L) & this ); void ^?{}( pthread_cond_var(L) & this ); bool notify_one( pthread_cond_var(L) & this ); bool notify_all( pthread_cond_var(L) & this ); uintptr_t front( pthread_cond_var(L) & this ); bool empty ( pthread_cond_var(L) & this ); void wait( pthread_cond_var(L) & this, L & l ); void wait( pthread_cond_var(L) & this, L & l, uintptr_t info ); bool wait( pthread_cond_var(L) & this, L & l, timespec t ); bool wait( pthread_cond_var(L) & this, L & l, uintptr_t info, timespec t ); }