- File:
-
- 1 edited
-
libcfa/src/concurrency/kernel_private.hfa (modified) (5 diffs)
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/kernel_private.hfa
r1eb239e4 rfd9b524 10 10 // Created On : Mon Feb 13 12:27:26 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Sat Nov 30 19:25:02 201913 // Update Count : 812 // Last Modified On : Wed Aug 12 08:21:33 2020 13 // Update Count : 9 14 14 // 15 15 … … 121 121 void unregister( struct __processor_id_t * proc ); 122 122 123 //-----------------------------------------------------------------------124 // Cluster idle lock/unlock125 static inline void lock(__cluster_idles & this) {126 for() {127 uint64_t l = this.lock;128 if(129 (0 == (l % 2))130 && __atomic_compare_exchange_n(&this.lock, &l, l + 1, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)131 ) return;132 Pause();133 }134 }135 136 static inline void unlock(__cluster_idles & this) {137 /* paranoid */ verify( 1 == (this.lock % 2) );138 __atomic_fetch_add( &this.lock, 1, __ATOMIC_SEQ_CST );139 }140 141 123 //======================================================================= 142 124 // Reader-writer lock implementation … … 150 132 while( __builtin_expect(__atomic_exchange_n(ll, (bool)true, __ATOMIC_SEQ_CST), false) ) { 151 133 while(__atomic_load_n(ll, (int)__ATOMIC_RELAXED)) 152 asm volatile("pause");134 Pause(); 153 135 } 154 136 /* paranoid */ verify(*ll); … … 204 186 // Step 1 : make sure no writer are in the middle of the critical section 205 187 while(__atomic_load_n(&lock, (int)__ATOMIC_RELAXED)) 206 asm volatile("pause");188 Pause(); 207 189 208 190 // Fence needed because we don't want to start trying to acquire the lock … … 266 248 // pop thread from the ready queue of a cluster 267 249 // returns 0p if empty 268 // May return 0p spuriously269 250 __attribute__((hot)) struct $thread * pop(struct cluster * cltr); 270 271 //-----------------------------------------------------------------------272 // pop thread from the ready queue of a cluster273 // returns 0p if empty274 // guaranteed to find any threads added before this call275 __attribute__((hot)) struct $thread * pop_slow(struct cluster * cltr);276 251 277 252 //-----------------------------------------------------------------------
Note:
See TracChangeset
for help on using the changeset viewer.