Changeset 13073be for src/libcfa
- Timestamp:
- May 25, 2018, 1:37:34 PM (7 years ago)
- Branches:
- ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, with_gc
- Children:
- 8dbedfc
- Parents:
- a1a17a74
- Location:
- src/libcfa
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
src/libcfa/bits/locks.h
ra1a17a74 r13073be 39 39 #endif 40 40 41 #if __SIZEOF_SIZE_T__ == 842 #define __lock_test_and_test_and_set( lock ) (lock) == 0 && __sync_lock_test_and_set_8( &(lock), 1 ) == 043 #define __lock_release( lock ) __sync_lock_release_8( &(lock) );44 #elif __SIZEOF_SIZE_T__ == 445 #define __lock_test_and_test_and_set( lock ) (lock) == 0 && __sync_lock_test_and_set_4( &(lock), 1 ) == 046 #define __lock_release( lock ) __sync_lock_release_4( &(lock) );47 #else48 #error unsupported architecture49 #endif50 51 41 struct __spinlock_t { 52 __ALIGN__ volatile size_t lock; 42 // Wrap in struct to prevent false sharing with debug info 43 struct { 44 // Align lock on 128-bit boundary 45 __ALIGN__ volatile _Bool lock; 46 }; 53 47 #ifdef __CFA_DEBUG__ 48 // previous function to acquire the lock 54 49 const char * prev_name; 50 // previous thread to acquire the lock 55 51 void* prev_thrd; 56 52 #endif … … 78 74 // Lock the spinlock, return false if already acquired 79 75 static inline _Bool try_lock ( __spinlock_t & this __cfaabi_dbg_ctx_param2 ) { 80 _Bool result = __lock_test_and_test_and_set( this.lock);76 _Bool result = (this.lock == 0) && (__atomic_test_and_set( &this.lock, __ATOMIC_ACQUIRE ) == 0); 81 77 if( result ) { 82 78 disable_interrupts(); … … 94 90 95 91 for ( unsigned int i = 1;; i += 1 ) { 96 if ( __lock_test_and_test_and_set( this.lock) ) break;92 if ( (this.lock == 0) && (__atomic_test_and_set( &this.lock, __ATOMIC_ACQUIRE ) == 0) ) break; 97 93 #ifndef NOEXPBACK 98 94 // exponential spin … … 112 108 } 113 109 114 // // Lock the spinlock, yield if already acquired115 // static inline void lock_yield( __spinlock_t & this __cfaabi_dbg_ctx_param2 ) {116 // for ( unsigned int i = 1;; i += 1 ) {117 // if ( __lock_test_and_test_and_set( this.lock ) ) break;118 // yield( i );119 // }120 // disable_interrupts();121 // __cfaabi_dbg_debug_do(122 // this.prev_name = caller;123 // this.prev_thrd = this_thread;124 // )125 // }126 127 110 static inline void unlock( __spinlock_t & this ) { 128 111 enable_interrupts_noPoll(); 129 __ lock_release( this.lock);112 __atomic_clear( &this.lock, __ATOMIC_RELEASE ); 130 113 } 131 114 #endif -
src/libcfa/concurrency/preemption.c
ra1a17a74 r13073be 161 161 void disable_interrupts() { 162 162 with( kernelTLS.preemption_state ) { 163 enabled = false; 163 static_assert(__atomic_always_lock_free(sizeof(enabled), &enabled), "Must be lock-free"); 164 165 // Set enabled flag to false 166 // should be atomic to avoid preemption in the middle of the operation. 167 // use memory order RELAXED since there is no inter-thread on this variable requirements 168 __atomic_store_n(&enabled, false, __ATOMIC_RELAXED); 169 170 // Signal the compiler that a fence is needed but only for signal handlers 171 __atomic_signal_fence(__ATOMIC_ACQUIRE); 172 164 173 __attribute__((unused)) unsigned short new_val = disable_count + 1; 165 174 disable_count = new_val; … … 171 180 // If counter reaches 0, execute any pending CtxSwitch 172 181 void enable_interrupts( __cfaabi_dbg_ctx_param ) { 173 processor * proc = kernelTLS.this_processor; // Cache the processor now since interrupts can start happening after the atomic add174 thread_desc * thrd = kernelTLS.this_thread; // Cache the thread now since interrupts can start happening after the atomic add182 processor * proc = kernelTLS.this_processor; // Cache the processor now since interrupts can start happening after the atomic store 183 thread_desc * thrd = kernelTLS.this_thread; // Cache the thread now since interrupts can start happening after the atomic store 175 184 176 185 with( kernelTLS.preemption_state ){ … … 181 190 // Check if we need to prempt the thread because an interrupt was missed 182 191 if( prev == 1 ) { 183 enabled = true; 192 static_assert(__atomic_always_lock_free(sizeof(enabled), &enabled), "Must be lock-free"); 193 194 // Set enabled flag to true 195 // should be atomic to avoid preemption in the middle of the operation. 196 // use memory order RELAXED since there is no inter-thread on this variable requirements 197 __atomic_store_n(&enabled, true, __ATOMIC_RELAXED); 198 199 // Signal the compiler that a fence is needed but only for signal handlers 200 __atomic_signal_fence(__ATOMIC_RELEASE); 184 201 if( proc->pending_preemption ) { 185 202 proc->pending_preemption = false; … … 200 217 verifyf( prev != 0u, "Incremented from %u\n", prev ); // If this triggers someone is enabled already enabled interrupts 201 218 if( prev == 1 ) { 202 kernelTLS.preemption_state.enabled = true; 219 static_assert(__atomic_always_lock_free(sizeof(kernelTLS.preemption_state.enabled), &kernelTLS.preemption_state.enabled), "Must be lock-free"); 220 // Set enabled flag to true 221 // should be atomic to avoid preemption in the middle of the operation. 222 // use memory order RELAXED since there is no inter-thread on this variable requirements 223 __atomic_store_n(&kernelTLS.preemption_state.enabled, true, __ATOMIC_RELAXED); 224 225 // Signal the compiler that a fence is needed but only for signal handlers 226 __atomic_signal_fence(__ATOMIC_RELEASE); 203 227 } 204 228 }
Note:
See TracChangeset
for help on using the changeset viewer.