// // Cforall Version 1.0.0 Copyright (C) 2021 University of Waterloo // // The contents of this file are covered under the licence agreement in the // file "LICENCE" distributed with Cforall. // // locks.hfa -- PUBLIC // Runtime locks that used with the runtime thread system. // // Author : Colby Alexander Parsons // Created On : Thu Jan 21 19:46:50 2021 // Last Modified By : // Last Modified On : // Update Count : // #pragma once #include #include #include "bits/weakso_locks.hfa" #include "containers/queueLockFree.hfa" #include "containers/list.hfa" #include "limits.hfa" #include "thread.hfa" #include "time_t.hfa" #include "time.hfa" //----------------------------------------------------------------------------- // Semaphore struct semaphore { __spinlock_t lock; int count; __queue_t(thread$) waiting; }; void ?{}(semaphore & this, int count = 1); void ^?{}(semaphore & this); bool P (semaphore & this); bool V (semaphore & this); bool V (semaphore & this, unsigned count); thread$ * V (semaphore & this, bool ); //---------- struct single_acquisition_lock { inline blocking_lock; }; static inline void ?{}( single_acquisition_lock & this ) {((blocking_lock &)this){ false, false };} static inline void ^?{}( single_acquisition_lock & this ) {} static inline void lock ( single_acquisition_lock & this ) { lock ( (blocking_lock &)this ); } static inline bool try_lock ( single_acquisition_lock & this ) { return try_lock( (blocking_lock &)this ); } static inline void unlock ( single_acquisition_lock & this ) { unlock ( (blocking_lock &)this ); } static inline size_t on_wait ( single_acquisition_lock & this ) { return on_wait ( (blocking_lock &)this ); } static inline void on_wakeup( single_acquisition_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); } static inline void on_notify( single_acquisition_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); } //---------- struct owner_lock { inline blocking_lock; }; static inline void ?{}( owner_lock & this ) {((blocking_lock &)this){ true, true };} static inline void ^?{}( owner_lock & this ) {} static inline void lock ( owner_lock & this ) { lock ( (blocking_lock &)this ); } static inline bool try_lock ( owner_lock & this ) { return try_lock( (blocking_lock &)this ); } static inline void unlock ( owner_lock & this ) { unlock ( (blocking_lock &)this ); } static inline size_t on_wait ( owner_lock & this ) { return on_wait ( (blocking_lock &)this ); } static inline void on_wakeup( owner_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); } static inline void on_notify( owner_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); } //----------------------------------------------------------------------------- // MCS Lock struct mcs_node { mcs_node * volatile next; single_sem sem; }; static inline void ?{}(mcs_node & this) { this.next = 0p; } static inline mcs_node * volatile & ?`next ( mcs_node * node ) { return node->next; } struct mcs_lock { mcs_queue(mcs_node) queue; }; static inline void lock(mcs_lock & l, mcs_node & n) { if(push(l.queue, &n)) wait(n.sem); } static inline void unlock(mcs_lock & l, mcs_node & n) { mcs_node * next = advance(l.queue, &n); if(next) post(next->sem); } //----------------------------------------------------------------------------- // Linear backoff Spinlock struct linear_backoff_then_block_lock { // Spin lock used for mutual exclusion __spinlock_t spinlock; // Current thread owning the lock struct thread$ * owner; // List of blocked threads dlist( thread$ ) blocked_threads; // Used for comparing and exchanging volatile size_t lock_value; // used for linear backoff spinning int spin_start; int spin_end; int spin_count; // after unsuccessful linear backoff yield this many times int yield_count; }; static inline void ?{}( linear_backoff_then_block_lock & this, int spin_start, int spin_end, int spin_count, int yield_count ) { this.spinlock{}; this.blocked_threads{}; this.lock_value = 0; this.spin_start = spin_start; this.spin_end = spin_end; this.spin_count = spin_count; this.yield_count = yield_count; } static inline void ?{}( linear_backoff_then_block_lock & this ) { this{4, 1024, 16, 0}; } static inline void ^?{}( linear_backoff_then_block_lock & this ) {} static inline void ?{}( linear_backoff_then_block_lock & this, linear_backoff_then_block_lock this2 ) = void; static inline void ?=?( linear_backoff_then_block_lock & this, linear_backoff_then_block_lock this2 ) = void; static inline bool internal_try_lock(linear_backoff_then_block_lock & this, size_t & compare_val) with(this) { if (__atomic_compare_exchange_n(&lock_value, &compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) { owner = active_thread(); return true; } return false; } static inline bool try_lock(linear_backoff_then_block_lock & this) { size_t compare_val = 0; return internal_try_lock(this, compare_val); } static inline bool try_lock_contention(linear_backoff_then_block_lock & this) with(this) { if (__atomic_exchange_n(&lock_value, 2, __ATOMIC_ACQUIRE) == 0) { owner = active_thread(); return true; } return false; } static inline bool block(linear_backoff_then_block_lock & this) with(this) { lock( spinlock __cfaabi_dbg_ctx2 ); if (lock_value != 2) { unlock( spinlock ); return true; } insert_last( blocked_threads, *active_thread() ); unlock( spinlock ); park( ); return true; } static inline void lock(linear_backoff_then_block_lock & this) with(this) { // if owner just return if (active_thread() == owner) return; size_t compare_val = 0; int spin = spin_start; // linear backoff for( ;; ) { compare_val = 0; if (internal_try_lock(this, compare_val)) return; if (2 == compare_val) break; for (int i = 0; i < spin; i++) Pause(); if (spin >= spin_end) break; spin += spin; } if(2 != compare_val && try_lock_contention(this)) return; // block until signalled while (block(this)) if(try_lock_contention(this)) return; } static inline void unlock(linear_backoff_then_block_lock & this) with(this) { verify(lock_value > 0); owner = 0p; if (__atomic_exchange_n(&lock_value, 0, __ATOMIC_RELEASE) == 1) return; lock( spinlock __cfaabi_dbg_ctx2 ); thread$ * t = &try_pop_front( blocked_threads ); unlock( spinlock ); unpark( t ); } static inline void on_notify(linear_backoff_then_block_lock & this, struct thread$ * t ) { unpark(t); } static inline size_t on_wait(linear_backoff_then_block_lock & this) { unlock(this); return 0; } static inline void on_wakeup(linear_backoff_then_block_lock & this, size_t recursion ) { lock(this); } //----------------------------------------------------------------------------- // Fast Block Lock // High efficiency minimal blocking lock // - No reacquire for cond var // - No recursive acquisition // - No ownership struct fast_block_lock { // Spin lock used for mutual exclusion __spinlock_t lock; // List of blocked threads dlist( thread$ ) blocked_threads; bool held:1; }; static inline void ?{}( fast_block_lock & this ) with(this) { lock{}; blocked_threads{}; held = false; } static inline void ^?{}( fast_block_lock & this ) {} static inline void ?{}( fast_block_lock & this, fast_block_lock this2 ) = void; static inline void ?=?( fast_block_lock & this, fast_block_lock this2 ) = void; // if this is called recursively IT WILL DEADLOCK!!!!! static inline void lock(fast_block_lock & this) with(this) { lock( lock __cfaabi_dbg_ctx2 ); if (held) { insert_last( blocked_threads, *active_thread() ); unlock( lock ); park( ); return; } held = true; unlock( lock ); } static inline void unlock(fast_block_lock & this) with(this) { lock( lock __cfaabi_dbg_ctx2 ); /* paranoid */ verifyf( held != false, "Attempt to release lock %p that isn't held", &this ); thread$ * t = &try_pop_front( blocked_threads ); held = ( t ? true : false ); unpark( t ); unlock( lock ); } static inline void on_notify(fast_block_lock & this, struct thread$ * t ) { unpark(t); } static inline size_t on_wait(fast_block_lock & this) { unlock(this); return 0; } static inline void on_wakeup(fast_block_lock & this, size_t recursion ) { } //----------------------------------------------------------------------------- // is_blocking_lock trait is_blocking_lock(L & | sized(L)) { // For synchronization locks to use when acquiring void on_notify( L &, struct thread$ * ); // For synchronization locks to use when releasing size_t on_wait( L & ); // to set recursion count after getting signalled; void on_wakeup( L &, size_t recursion ); }; //----------------------------------------------------------------------------- // // info_thread // // the info thread is a wrapper around a thread used // // to store extra data for use in the condition variable forall(L & | is_blocking_lock(L)) { struct info_thread; // // for use by sequence // info_thread(L) *& Back( info_thread(L) * this ); // info_thread(L) *& Next( info_thread(L) * this ); } //----------------------------------------------------------------------------- // Synchronization Locks forall(L & | is_blocking_lock(L)) { //----------------------------------------------------------------------------- // condition_variable // The multi-tool condition variable // - can pass timeouts to wait for either a signal or timeout // - can wait without passing a lock // - can have waiters reacquire different locks while waiting on the same cond var // - has shadow queue // - can be signalled outside of critical sections with no locks held struct condition_variable { // Spin lock used for mutual exclusion __spinlock_t lock; // List of blocked threads dlist( info_thread(L) ) blocked_threads; // Count of current blocked threads int count; }; void ?{}( condition_variable(L) & this ); void ^?{}( condition_variable(L) & this ); bool notify_one( condition_variable(L) & this ); bool notify_all( condition_variable(L) & this ); uintptr_t front( condition_variable(L) & this ); bool empty ( condition_variable(L) & this ); int counter( condition_variable(L) & this ); void wait( condition_variable(L) & this ); void wait( condition_variable(L) & this, uintptr_t info ); bool wait( condition_variable(L) & this, Duration duration ); bool wait( condition_variable(L) & this, uintptr_t info, Duration duration ); void wait( condition_variable(L) & this, L & l ); void wait( condition_variable(L) & this, L & l, uintptr_t info ); bool wait( condition_variable(L) & this, L & l, Duration duration ); bool wait( condition_variable(L) & this, L & l, uintptr_t info, Duration duration ); //----------------------------------------------------------------------------- // fast_cond_var // The trimmed and slim condition variable // - no internal lock so you must hold a lock while using this cond var // - signalling without holding branded lock is UNSAFE! // - only allows usage of one lock, cond var is branded after usage struct fast_cond_var { // List of blocked threads dlist( info_thread(L) ) blocked_threads; #ifdef __CFA_DEBUG__ L * lock_used; #endif }; void ?{}( fast_cond_var(L) & this ); void ^?{}( fast_cond_var(L) & this ); bool notify_one( fast_cond_var(L) & this ); bool notify_all( fast_cond_var(L) & this ); uintptr_t front( fast_cond_var(L) & this ); bool empty ( fast_cond_var(L) & this ); void wait( fast_cond_var(L) & this, L & l ); void wait( fast_cond_var(L) & this, L & l, uintptr_t info ); }