// // Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo // // The contents of this file are covered under the licence agreement in the // file "LICENCE" distributed with Cforall. // // bits/locks.hfa -- Fast internal locks. // // Author : Thierry Delisle // Created On : Tue Oct 31 15:14:38 2017 // Last Modified By : Peter A. Buhr // Last Modified On : Tue Feb 4 13:03:19 2020 // Update Count : 11 // #pragma once #include "bits/debug.hfa" #include "bits/defs.hfa" #include #ifdef __cforall extern "C" { #include } #endif // pause to prevent excess processor bus usage #if defined( __sparc ) #define Pause() __asm__ __volatile__ ( "rd %ccr,%g0" ) #elif defined( __i386 ) || defined( __x86_64 ) #define Pause() __asm__ __volatile__ ( "pause" : : : ) #elif defined( __ARM_ARCH ) #define Pause() __asm__ __volatile__ ( "nop" : : : ) #else #error unsupported architecture #endif struct __spinlock_t { // Wrap in struct to prevent false sharing with debug info volatile bool lock; #ifdef __CFA_DEBUG__ // previous function to acquire the lock const char * prev_name; // previous thread to acquire the lock void* prev_thrd; #endif }; #ifdef __cforall extern "C" { extern void disable_interrupts() OPTIONAL_THREAD; extern void enable_interrupts_noPoll() OPTIONAL_THREAD; #ifdef __CFA_DEBUG__ void __cfaabi_dbg_record(__spinlock_t & this, const char prev_name[]); #else #define __cfaabi_dbg_record(x, y) #endif } static inline void ?{}( __spinlock_t & this ) { this.lock = 0; } // Lock the spinlock, return false if already acquired static inline bool try_lock ( __spinlock_t & this __cfaabi_dbg_ctx_param2 ) { disable_interrupts(); bool result = (this.lock == 0) && (__atomic_test_and_set( &this.lock, __ATOMIC_ACQUIRE ) == 0); if( result ) { __cfaabi_dbg_record( this, caller ); } else { enable_interrupts_noPoll(); } return result; } // Lock the spinlock, spin if already acquired static inline void lock( __spinlock_t & this __cfaabi_dbg_ctx_param2 ) { #ifndef NOEXPBACK enum { SPIN_START = 4, SPIN_END = 64 * 1024, }; unsigned int spin = SPIN_START; #endif disable_interrupts(); for ( unsigned int i = 1;; i += 1 ) { if ( (this.lock == 0) && (__atomic_test_and_set( &this.lock, __ATOMIC_ACQUIRE ) == 0) ) break; #ifndef NOEXPBACK // exponential spin for ( volatile unsigned int s = 0; s < spin; s += 1 ) Pause(); // slowly increase by powers of 2 if ( i % 64 == 0 ) spin += spin; // prevent overflow if ( spin > SPIN_END ) spin = SPIN_START; #else Pause(); #endif } __cfaabi_dbg_record( this, caller ); } static inline void unlock( __spinlock_t & this ) { __atomic_clear( &this.lock, __ATOMIC_RELEASE ); enable_interrupts_noPoll(); } #ifdef __CFA_WITH_VERIFY__ extern bool __cfaabi_dbg_in_kernel(); #endif struct __bin_sem_t { bool signaled; pthread_mutex_t lock; pthread_cond_t cond; }; static inline void ?{}(__bin_sem_t & this) with( this ) { signaled = false; pthread_mutex_init(&lock, NULL); pthread_cond_init (&cond, NULL); } static inline void ^?{}(__bin_sem_t & this) with( this ) { pthread_mutex_destroy(&lock); pthread_cond_destroy (&cond); } static inline void wait(__bin_sem_t & this) with( this ) { verify(__cfaabi_dbg_in_kernel()); pthread_mutex_lock(&lock); if(!signaled) { // this must be a loop, not if! pthread_cond_wait(&cond, &lock); } signaled = false; pthread_mutex_unlock(&lock); } static inline void post(__bin_sem_t & this) with( this ) { verify(__cfaabi_dbg_in_kernel()); pthread_mutex_lock(&lock); bool needs_signal = !signaled; signaled = true; pthread_mutex_unlock(&lock); if (needs_signal) pthread_cond_signal(&cond); } #endif