- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/locks.hfa
reba9d27 rbbe3719 39 39 struct Semaphore0nary { 40 40 __spinlock_t lock; // needed to protect 41 mpsc_queue( $thread) queue;42 }; 43 44 static inline bool P(Semaphore0nary & this, $thread* thrd) {41 mpsc_queue(thread$) queue; 42 }; 43 44 static inline bool P(Semaphore0nary & this, thread$ * thrd) { 45 45 /* paranoid */ verify(!thrd`next); 46 46 /* paranoid */ verify(!(&(*thrd)`next)); … … 51 51 52 52 static inline bool P(Semaphore0nary & this) { 53 $thread* thrd = active_thread();53 thread$ * thrd = active_thread(); 54 54 P(this, thrd); 55 55 park(); … … 57 57 } 58 58 59 static inline $thread* V(Semaphore0nary & this, bool doUnpark = true) {60 $thread* next;59 static inline thread$ * V(Semaphore0nary & this, bool doUnpark = true) { 60 thread$ * next; 61 61 lock(this.lock __cfaabi_dbg_ctx2); 62 62 for (;;) { … … 124 124 static inline bool P(ThreadBenaphore & this, bool wait) { return wait ? P(this) : tryP(this); } 125 125 126 static inline $thread* V(ThreadBenaphore & this, bool doUnpark = true) {126 static inline thread$ * V(ThreadBenaphore & this, bool doUnpark = true) { 127 127 if (V(this.ben)) return 0p; 128 128 return V(this.sem, doUnpark); … … 134 134 __spinlock_t lock; 135 135 int count; 136 __queue_t( $thread) waiting;136 __queue_t(thread$) waiting; 137 137 }; 138 138 … … 142 142 bool V (semaphore & this); 143 143 bool V (semaphore & this, unsigned count); 144 $thread* V (semaphore & this, bool );144 thread$ * V (semaphore & this, bool ); 145 145 146 146 //---------- … … 156 156 static inline size_t on_wait ( single_acquisition_lock & this ) { return on_wait ( (blocking_lock &)this ); } 157 157 static inline void on_wakeup( single_acquisition_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); } 158 static inline void on_notify( single_acquisition_lock & this, struct $thread* t ) { on_notify( (blocking_lock &)this, t ); }158 static inline void on_notify( single_acquisition_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); } 159 159 160 160 //---------- … … 170 170 static inline size_t on_wait ( owner_lock & this ) { return on_wait ( (blocking_lock &)this ); } 171 171 static inline void on_wakeup( owner_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); } 172 static inline void on_notify( owner_lock & this, struct $thread* t ) { on_notify( (blocking_lock &)this, t ); }172 static inline void on_notify( owner_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); } 173 173 174 174 struct fast_lock { 175 $thread* volatile owner;175 thread$ * volatile owner; 176 176 ThreadBenaphore sem; 177 177 }; … … 179 179 static inline void ?{}(fast_lock & this) { this.owner = 0p; } 180 180 181 static inline bool $try_lock(fast_lock & this, $thread* thrd) {182 $thread* exp = 0p;181 static inline bool $try_lock(fast_lock & this, thread$ * thrd) { 182 thread$ * exp = 0p; 183 183 return __atomic_compare_exchange_n(&this.owner, &exp, thrd, false, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED); 184 184 } … … 186 186 static inline void lock( fast_lock & this ) __attribute__((artificial)); 187 187 static inline void lock( fast_lock & this ) { 188 $thread* thrd = active_thread();188 thread$ * thrd = active_thread(); 189 189 /* paranoid */verify(thrd != this.owner); 190 190 … … 197 197 static inline bool try_lock( fast_lock & this ) __attribute__((artificial)); 198 198 static inline bool try_lock ( fast_lock & this ) { 199 $thread* thrd = active_thread();199 thread$ * thrd = active_thread(); 200 200 /* paranoid */ verify(thrd != this.owner); 201 201 return $try_lock(this, thrd); 202 202 } 203 203 204 static inline $thread* unlock( fast_lock & this ) __attribute__((artificial));205 static inline $thread* unlock( fast_lock & this ) {204 static inline thread$ * unlock( fast_lock & this ) __attribute__((artificial)); 205 static inline thread$ * unlock( fast_lock & this ) { 206 206 /* paranoid */ verify(active_thread() == this.owner); 207 207 … … 216 216 static inline size_t on_wait( fast_lock & this ) { unlock(this); return 0; } 217 217 static inline void on_wakeup( fast_lock & this, size_t ) { lock(this); } 218 static inline void on_notify( fast_lock &, struct $thread* t ) { unpark(t); }218 static inline void on_notify( fast_lock &, struct thread$ * t ) { unpark(t); } 219 219 220 220 struct mcs_node { … … 248 248 249 249 // Current thread owning the lock 250 struct $thread* owner;250 struct thread$ * owner; 251 251 252 252 // List of blocked threads 253 dlist( $thread) blocked_threads;253 dlist( thread$ ) blocked_threads; 254 254 255 255 // Used for comparing and exchanging … … 324 324 } 325 325 326 // linear backoff bounded by spin_count327 spin = spin_start;328 int spin_counter = 0;329 int yield_counter = 0;330 for ( ;; ) {331 if(try_lock_contention(this)) return true;332 if(spin_counter < spin_count) {333 for (int i = 0; i < spin; i++) Pause();334 if (spin < spin_end) spin += spin;335 else spin_counter++;336 } else if (yield_counter < yield_count) {337 // after linear backoff yield yield_count times338 yield_counter++;339 yield();340 } else { break; }341 }342 343 // block until signalled344 while (block(this)) if(try_lock_contention(this)) return true;345 346 // this should never be reached as block(this) always returns true347 return false;348 }349 350 static inline bool lock_improved(linear_backoff_then_block_lock & this) with(this) {351 // if owner just return352 if (active_thread() == owner) return true;353 size_t compare_val = 0;354 int spin = spin_start;355 // linear backoff356 for( ;; ) {357 compare_val = 0;358 if (internal_try_lock(this, compare_val)) return true;359 if (2 == compare_val) break;360 for (int i = 0; i < spin; i++) Pause();361 if (spin >= spin_end) break;362 spin += spin;363 }364 365 // linear backoff bounded by spin_count366 spin = spin_start;367 int spin_counter = 0;368 int yield_counter = 0;369 for ( ;; ) {370 compare_val = 0;371 if(internal_try_lock(this, compare_val)) return true;372 if (2 == compare_val) break;373 if(spin_counter < spin_count) {374 for (int i = 0; i < spin; i++) Pause();375 if (spin < spin_end) spin += spin;376 else spin_counter++;377 } else if (yield_counter < yield_count) {378 // after linear backoff yield yield_count times379 yield_counter++;380 yield();381 } else { break; }382 }383 384 326 if(2 != compare_val && try_lock_contention(this)) return true; 385 327 // block until signalled 386 328 while (block(this)) if(try_lock_contention(this)) return true; 387 329 388 330 // this should never be reached as block(this) always returns true 389 331 return false; … … 395 337 if (__atomic_exchange_n(&lock_value, 0, __ATOMIC_RELEASE) == 1) return; 396 338 lock( spinlock __cfaabi_dbg_ctx2 ); 397 $thread* t = &try_pop_front( blocked_threads );339 thread$ * t = &try_pop_front( blocked_threads ); 398 340 unlock( spinlock ); 399 341 unpark( t ); 400 342 } 401 343 402 static inline void on_notify(linear_backoff_then_block_lock & this, struct $thread* t ) { unpark(t); }344 static inline void on_notify(linear_backoff_then_block_lock & this, struct thread$ * t ) { unpark(t); } 403 345 static inline size_t on_wait(linear_backoff_then_block_lock & this) { unlock(this); return 0; } 404 static inline void on_wakeup(linear_backoff_then_block_lock & this, size_t recursion ) { lock _improved(this); }346 static inline void on_wakeup(linear_backoff_then_block_lock & this, size_t recursion ) { lock(this); } 405 347 406 348 //----------------------------------------------------------------------------- … … 408 350 trait is_blocking_lock(L & | sized(L)) { 409 351 // For synchronization locks to use when acquiring 410 void on_notify( L &, struct $thread* );352 void on_notify( L &, struct thread$ * ); 411 353 412 354 // For synchronization locks to use when releasing … … 442 384 int count; 443 385 }; 444 386 445 387 446 388 void ?{}( condition_variable(L) & this );
Note: See TracChangeset
for help on using the changeset viewer.