Changeset a45e21c
- Timestamp:
- Mar 30, 2023, 3:52:00 PM (20 months ago)
- Branches:
- ADT, ast-experimental, master
- Children:
- 76a8400
- Parents:
- efdd18c
- Location:
- libcfa/src/concurrency
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/channel.hfa
refdd18c ra45e21c 3 3 #include <locks.hfa> 4 4 #include <list.hfa> 5 6 #define __COOP_CHANNEL 7 #ifdef __PREVENTION_CHANNEL 8 forall( T ) { 9 struct channel { 10 size_t size, count, front, back; 11 T * buffer; 12 thread$ * chair; 13 T * chair_elem; 14 exp_backoff_then_block_lock c_lock, p_lock; 15 __spinlock_t mutex_lock; 16 char __padding[64]; // avoid false sharing in arrays of channels 17 }; 18 19 static inline void ?{}( channel(T) &c, size_t _size ) with(c) { 20 size = _size; 21 front = back = count = 0; 22 buffer = aalloc( size ); 23 chair = 0p; 24 mutex_lock{}; 25 c_lock{}; 26 p_lock{}; 27 } 28 29 static inline void ?{}( channel(T) &c ){ ((channel(T) &)c){ 0 }; } 30 static inline void ^?{}( channel(T) &c ) with(c) { delete( buffer ); } 31 static inline size_t get_count( channel(T) & chan ) with(chan) { return count; } 32 static inline size_t get_size( channel(T) & chan ) with(chan) { return size; } 33 static inline bool has_waiters( channel(T) & chan ) with(chan) { return chair != 0p; } 34 35 static inline void insert_( channel(T) & chan, T & elem ) with(chan) { 36 memcpy((void *)&buffer[back], (void *)&elem, sizeof(T)); 37 count += 1; 38 back++; 39 if ( back == size ) back = 0; 40 } 41 42 static inline void insert( channel(T) & chan, T elem ) with( chan ) { 43 lock( p_lock ); 44 lock( mutex_lock __cfaabi_dbg_ctx2 ); 45 46 // have to check for the zero size channel case 47 if ( size == 0 && chair != 0p ) { 48 memcpy((void *)chair_elem, (void *)&elem, sizeof(T)); 49 unpark( chair ); 50 chair = 0p; 51 unlock( mutex_lock ); 52 unlock( p_lock ); 53 unlock( c_lock ); 54 return; 55 } 56 57 // wait if buffer is full, work will be completed by someone else 58 if ( count == size ) { 59 chair = active_thread(); 60 chair_elem = &elem; 61 unlock( mutex_lock ); 62 park( ); 63 return; 64 } // if 65 66 if ( chair != 0p ) { 67 memcpy((void *)chair_elem, (void *)&elem, sizeof(T)); 68 unpark( chair ); 69 chair = 0p; 70 unlock( mutex_lock ); 71 unlock( p_lock ); 72 unlock( c_lock ); 73 return; 74 } 75 insert_( chan, elem ); 76 77 unlock( mutex_lock ); 78 unlock( p_lock ); 79 } 80 81 static inline T remove( channel(T) & chan ) with(chan) { 82 lock( c_lock ); 83 lock( mutex_lock __cfaabi_dbg_ctx2 ); 84 T retval; 85 86 // have to check for the zero size channel case 87 if ( size == 0 && chair != 0p ) { 88 memcpy((void *)&retval, (void *)chair_elem, sizeof(T)); 89 unpark( chair ); 90 chair = 0p; 91 unlock( mutex_lock ); 92 unlock( p_lock ); 93 unlock( c_lock ); 94 return retval; 95 } 96 97 // wait if buffer is empty, work will be completed by someone else 98 if ( count == 0 ) { 99 chair = active_thread(); 100 chair_elem = &retval; 101 unlock( mutex_lock ); 102 park( ); 103 return retval; 104 } 105 106 // Remove from buffer 107 memcpy((void *)&retval, (void *)&buffer[front], sizeof(T)); 108 count -= 1; 109 front++; 110 if ( front == size ) front = 0; 111 112 if ( chair != 0p ) { 113 insert_( chan, *chair_elem ); // do waiting producer work 114 unpark( chair ); 115 chair = 0p; 116 unlock( mutex_lock ); 117 unlock( p_lock ); 118 unlock( c_lock ); 119 return retval; 120 } 121 122 unlock( mutex_lock ); 123 unlock( c_lock ); 124 return retval; 125 } 126 127 } // forall( T ) 128 #endif 129 130 #ifdef __COOP_CHANNEL 5 #include <mutex_stmt.hfa> 131 6 132 7 // link field used for threads waiting on channel … … 148 23 } 149 24 25 // wake one thread from the list 26 static inline void wake_one( dlist( wait_link ) & queue ) { 27 wait_link & popped = try_pop_front( queue ); 28 unpark( popped.t ); 29 } 30 31 // returns true if woken due to shutdown 32 // blocks thread on list and releases passed lock 33 static inline bool block( dlist( wait_link ) & queue, void * elem_ptr, go_mutex & lock ) { 34 wait_link w{ active_thread(), elem_ptr }; 35 insert_last( queue, w ); 36 unlock( lock ); 37 park(); 38 return w.elem == 0p; 39 } 40 41 // void * used for some fields since exceptions don't work with parametric polymorphism currently 42 exception channel_closed { 43 // on failed insert elem is a ptr to the element attempting to be inserted 44 // on failed remove elem ptr is 0p 45 // on resumption of a failed insert this elem will be inserted 46 // so a user may modify it in the resumption handler 47 void * elem; 48 49 // pointer to chan that is closed 50 void * closed_chan; 51 }; 52 vtable(channel_closed) channel_closed_vt; 53 54 // #define CHAN_STATS // define this to get channel stats printed in dtor 55 150 56 forall( T ) { 151 57 152 struct channel { 153 size_t size; 154 size_t front, back, count; 58 struct __attribute__((aligned(128))) channel { 59 size_t size, front, back, count; 155 60 T * buffer; 156 dlist( wait_link ) prods, cons; 157 exp_backoff_then_block_lock mutex_lock; 61 dlist( wait_link ) prods, cons; // lists of blocked threads 62 go_mutex mutex_lock; // MX lock 63 bool closed; // indicates channel close/open 64 #ifdef CHAN_STATS 65 size_t blocks, operations; // counts total ops and ops resulting in a blocked thd 66 #endif 158 67 }; 159 68 … … 165 74 cons{}; 166 75 mutex_lock{}; 76 closed = false; 77 #ifdef CHAN_STATS 78 blocks = 0; 79 operations = 0; 80 #endif 167 81 } 168 82 169 83 static inline void ?{}( channel(T) &c ){ ((channel(T) &)c){ 0 }; } 170 static inline void ^?{}( channel(T) &c ) with(c) { delete( buffer ); } 84 static inline void ^?{}( channel(T) &c ) with(c) { 85 #ifdef CHAN_STATS 86 printf("Channel %p Blocks: %lu, Operations: %lu, %.2f%% of ops blocked\n", &c, blocks, operations, ((double)blocks)/operations * 100); 87 #endif 88 verifyf( cons`isEmpty && prods`isEmpty, "Attempted to delete channel with waiting threads (Deadlock).\n" ); 89 delete( buffer ); 90 } 171 91 static inline size_t get_count( channel(T) & chan ) with(chan) { return count; } 172 92 static inline size_t get_size( channel(T) & chan ) with(chan) { return size; } … … 175 95 static inline bool has_waiting_producers( channel(T) & chan ) with(chan) { return !prods`isEmpty; } 176 96 177 static inline void insert_( channel(T) & chan, T & elem ) with(chan) { 97 // closes the channel and notifies all blocked threads 98 static inline void close( channel(T) & chan ) with(chan) { 99 lock( mutex_lock ); 100 closed = true; 101 102 // flush waiting consumers and producers 103 while ( has_waiting_consumers( chan ) ) { 104 cons`first.elem = 0p; 105 wake_one( cons ); 106 } 107 while ( has_waiting_producers( chan ) ) { 108 prods`first.elem = 0p; 109 wake_one( prods ); 110 } 111 unlock(mutex_lock); 112 } 113 114 static inline void is_closed( channel(T) & chan ) with(chan) { return closed; } 115 116 static inline void flush( channel(T) & chan, T elem ) with(chan) { 117 lock( mutex_lock ); 118 while ( count == 0 && !cons`isEmpty ) { 119 memcpy(cons`first.elem, (void *)&elem, sizeof(T)); // do waiting consumer work 120 wake_one( cons ); 121 } 122 unlock( mutex_lock ); 123 } 124 125 // handles buffer insert 126 static inline void __buf_insert( channel(T) & chan, T & elem ) with(chan) { 178 127 memcpy((void *)&buffer[back], (void *)&elem, sizeof(T)); 179 128 count += 1; … … 182 131 } 183 132 184 static inline void wake_one( dlist( wait_link ) & queue ) { 185 wait_link & popped = try_pop_front( queue ); 186 unpark( popped.t ); 187 } 188 189 static inline void block( dlist( wait_link ) & queue, void * elem_ptr, exp_backoff_then_block_lock & lock ) { 190 wait_link w{ active_thread(), elem_ptr }; 191 insert_last( queue, w ); 192 unlock( lock ); 193 park(); 133 // does the buffer insert or hands elem directly to consumer if one is waiting 134 static inline void __do_insert( channel(T) & chan, T & elem ) with(chan) { 135 if ( count == 0 && !cons`isEmpty ) { 136 memcpy(cons`first.elem, (void *)&elem, sizeof(T)); // do waiting consumer work 137 wake_one( cons ); 138 } else __buf_insert( chan, elem ); 139 } 140 141 // needed to avoid an extra copy in closed case 142 static inline bool __internal_try_insert( channel(T) & chan, T & elem ) with(chan) { 143 lock( mutex_lock ); 144 #ifdef CHAN_STATS 145 operations++; 146 #endif 147 if ( count == size ) { unlock( mutex_lock ); return false; } 148 __do_insert( chan, elem ); 149 unlock( mutex_lock ); 150 return true; 151 } 152 153 // attempts a nonblocking insert 154 // returns true if insert was successful, false otherwise 155 static inline bool try_insert( channel(T) & chan, T elem ) { return __internal_try_insert( chan, elem ); } 156 157 // handles closed case of insert routine 158 static inline void __closed_insert( channel(T) & chan, T & elem ) with(chan) { 159 channel_closed except{&channel_closed_vt, &elem, &chan }; 160 throwResume except; // throw closed resumption 161 if ( !__internal_try_insert( chan, elem ) ) throw except; // if try to insert fails (would block), throw termination 194 162 } 195 163 196 164 static inline void insert( channel(T) & chan, T elem ) with(chan) { 197 lock( mutex_lock ); 165 // check for close before acquire mx 166 if ( unlikely(closed) ) { 167 __closed_insert( chan, elem ); 168 return; 169 } 170 171 lock( mutex_lock ); 172 173 #ifdef CHAN_STATS 174 if ( !closed ) operations++; 175 #endif 176 177 // if closed handle 178 if ( unlikely(closed) ) { 179 unlock( mutex_lock ); 180 __closed_insert( chan, elem ); 181 return; 182 } 198 183 199 184 // have to check for the zero size channel case … … 202 187 wake_one( cons ); 203 188 unlock( mutex_lock ); 204 return ;189 return true; 205 190 } 206 191 207 192 // wait if buffer is full, work will be completed by someone else 208 193 if ( count == size ) { 209 block( prods, &elem, mutex_lock ); 194 #ifdef CHAN_STATS 195 blocks++; 196 #endif 197 198 // check for if woken due to close 199 if ( unlikely( block( prods, &elem, mutex_lock ) ) ) 200 __closed_insert( chan, elem ); 210 201 return; 211 202 } // if … … 214 205 memcpy(cons`first.elem, (void *)&elem, sizeof(T)); // do waiting consumer work 215 206 wake_one( cons ); 216 } else insert_( chan, elem );207 } else __buf_insert( chan, elem ); 217 208 218 209 unlock( mutex_lock ); 210 return; 211 } 212 213 // handles buffer remove 214 static inline void __buf_remove( channel(T) & chan, T & retval ) with(chan) { 215 memcpy((void *)&retval, (void *)&buffer[front], sizeof(T)); 216 count -= 1; 217 front = (front + 1) % size; 218 } 219 220 // does the buffer remove and potentially does waiting producer work 221 static inline void __do_remove( channel(T) & chan, T & retval ) with(chan) { 222 __buf_remove( chan, retval ); 223 if (count == size - 1 && !prods`isEmpty ) { 224 __buf_insert( chan, *(T *)prods`first.elem ); // do waiting producer work 225 wake_one( prods ); 226 } 227 } 228 229 // needed to avoid an extra copy in closed case and single return val case 230 static inline bool __internal_try_remove( channel(T) & chan, T & retval ) with(chan) { 231 lock( mutex_lock ); 232 #ifdef CHAN_STATS 233 operations++; 234 #endif 235 if ( count == 0 ) { unlock( mutex_lock ); return false; } 236 __do_remove( chan, retval ); 237 unlock( mutex_lock ); 238 return true; 239 } 240 241 // attempts a nonblocking remove 242 // returns [T, true] if insert was successful 243 // returns [T, false] if insert was successful (T uninit) 244 static inline [T, bool] try_remove( channel(T) & chan ) { 245 T retval; 246 return [ retval, __internal_try_remove( chan, retval ) ]; 247 } 248 249 static inline T try_remove( channel(T) & chan, T elem ) { 250 T retval; 251 __internal_try_remove( chan, retval ); 252 return retval; 253 } 254 255 // handles closed case of insert routine 256 static inline void __closed_remove( channel(T) & chan, T & retval ) with(chan) { 257 channel_closed except{&channel_closed_vt, 0p, &chan }; 258 throwResume except; // throw resumption 259 if ( !__internal_try_remove( chan, retval ) ) throw except; // if try to remove fails (would block), throw termination 219 260 } 220 261 221 262 static inline T remove( channel(T) & chan ) with(chan) { 222 lock( mutex_lock );223 263 T retval; 264 if ( unlikely(closed) ) { 265 __closed_remove( chan, retval ); 266 return retval; 267 } 268 lock( mutex_lock ); 269 270 #ifdef CHAN_STATS 271 if ( !closed ) operations++; 272 #endif 273 274 if ( unlikely(closed) ) { 275 unlock( mutex_lock ); 276 __closed_remove( chan, retval ); 277 return retval; 278 } 224 279 225 280 // have to check for the zero size channel case … … 233 288 // wait if buffer is empty, work will be completed by someone else 234 289 if (count == 0) { 235 block( cons, &retval, mutex_lock ); 290 #ifdef CHAN_STATS 291 blocks++; 292 #endif 293 // check for if woken due to close 294 if ( unlikely( block( cons, &retval, mutex_lock ) ) ) 295 __closed_remove( chan, retval ); 236 296 return retval; 237 297 } 238 298 239 299 // Remove from buffer 240 memcpy((void *)&retval, (void *)&buffer[front], sizeof(T)); 241 count -= 1; 242 front = (front + 1) % size; 243 244 if (count == size - 1 && !prods`isEmpty ) { 245 insert_( chan, *(T *)prods`first.elem ); // do waiting producer work 246 wake_one( prods ); 247 } 300 __do_remove( chan, retval ); 248 301 249 302 unlock( mutex_lock ); … … 251 304 } 252 305 } // forall( T ) 253 #endif254 255 #ifdef __BARGE_CHANNEL256 forall( T ) {257 struct channel {258 size_t size;259 size_t front, back, count;260 T * buffer;261 fast_cond_var( exp_backoff_then_block_lock ) prods, cons;262 exp_backoff_then_block_lock mutex_lock;263 };264 265 static inline void ?{}( channel(T) &c, size_t _size ) with(c) {266 size = _size;267 front = back = count = 0;268 buffer = aalloc( size );269 prods{};270 cons{};271 mutex_lock{};272 }273 274 static inline void ?{}( channel(T) &c ){ ((channel(T) &)c){ 0 }; }275 static inline void ^?{}( channel(T) &c ) with(c) { delete( buffer ); }276 static inline size_t get_count( channel(T) & chan ) with(chan) { return count; }277 static inline size_t get_size( channel(T) & chan ) with(chan) { return size; }278 static inline bool has_waiters( channel(T) & chan ) with(chan) { return !empty( cons ) || !empty( prods ); }279 static inline bool has_waiting_consumers( channel(T) & chan ) with(chan) { return !empty( cons ); }280 static inline bool has_waiting_producers( channel(T) & chan ) with(chan) { return !empty( prods ); }281 282 static inline void insert_( channel(T) & chan, T & elem ) with(chan) {283 memcpy((void *)&buffer[back], (void *)&elem, sizeof(T));284 count += 1;285 back++;286 if ( back == size ) back = 0;287 }288 289 290 static inline void insert( channel(T) & chan, T elem ) with(chan) {291 lock( mutex_lock );292 293 while ( count == size ) {294 wait( prods, mutex_lock );295 } // if296 297 insert_( chan, elem );298 299 if ( !notify_one( cons ) && count < size )300 notify_one( prods );301 302 unlock( mutex_lock );303 }304 305 static inline T remove( channel(T) & chan ) with(chan) {306 lock( mutex_lock );307 T retval;308 309 while (count == 0) {310 wait( cons, mutex_lock );311 }312 313 memcpy((void *)&retval, (void *)&buffer[front], sizeof(T));314 count -= 1;315 front = (front + 1) % size;316 317 if ( !notify_one( prods ) && count > 0 )318 notify_one( cons );319 320 unlock( mutex_lock );321 return retval;322 }323 324 } // forall( T )325 #endif326 327 #ifdef __NO_WAIT_CHANNEL328 forall( T ) {329 struct channel {330 size_t size;331 size_t front, back, count;332 T * buffer;333 thread$ * chair;334 T * chair_elem;335 exp_backoff_then_block_lock c_lock, p_lock;336 __spinlock_t mutex_lock;337 };338 339 static inline void ?{}( channel(T) &c, size_t _size ) with(c) {340 size = _size;341 front = back = count = 0;342 buffer = aalloc( size );343 chair = 0p;344 mutex_lock{};345 c_lock{};346 p_lock{};347 lock( c_lock );348 }349 350 static inline void ?{}( channel(T) &c ){ ((channel(T) &)c){ 0 }; }351 static inline void ^?{}( channel(T) &c ) with(c) { delete( buffer ); }352 static inline size_t get_count( channel(T) & chan ) with(chan) { return count; }353 static inline size_t get_size( channel(T) & chan ) with(chan) { return size; }354 static inline bool has_waiters( channel(T) & chan ) with(chan) { return c_lock.lock_value != 0; }355 356 static inline void insert_( channel(T) & chan, T & elem ) with(chan) {357 memcpy((void *)&buffer[back], (void *)&elem, sizeof(T));358 count += 1;359 back++;360 if ( back == size ) back = 0;361 }362 363 static inline void insert( channel(T) & chan, T elem ) with( chan ) {364 lock( p_lock );365 lock( mutex_lock __cfaabi_dbg_ctx2 );366 367 insert_( chan, elem );368 369 if ( count != size )370 unlock( p_lock );371 372 if ( count == 1 )373 unlock( c_lock );374 375 unlock( mutex_lock );376 }377 378 static inline T remove( channel(T) & chan ) with(chan) {379 lock( c_lock );380 lock( mutex_lock __cfaabi_dbg_ctx2 );381 T retval;382 383 // Remove from buffer384 memcpy((void *)&retval, (void *)&buffer[front], sizeof(T));385 count -= 1;386 front = (front + 1) % size;387 388 if ( count != 0 )389 unlock( c_lock );390 391 if ( count == size - 1 )392 unlock( p_lock );393 394 unlock( mutex_lock );395 return retval;396 }397 398 } // forall( T )399 #endif -
libcfa/src/concurrency/locks.hfa
refdd18c ra45e21c 32 32 #include <fstream.hfa> 33 33 34 35 34 // futex headers 36 35 #include <linux/futex.h> /* Definition of FUTEX_* constants */ … … 155 154 // futex_mutex 156 155 157 // - No cond var support158 156 // - Kernel thd blocking alternative to the spinlock 159 157 // - No ownership (will deadlock on reacq) … … 185 183 int state; 186 184 187 188 // // linear backoff omitted for now 189 // for( int spin = 4; spin < 1024; spin += spin) { 190 // state = 0; 191 // // if unlocked, lock and return 192 // if (internal_try_lock(this, state)) return; 193 // if (2 == state) break; 194 // for (int i = 0; i < spin; i++) Pause(); 195 // } 196 197 // no contention try to acquire 198 if (internal_try_lock(this, state)) return; 185 for( int spin = 4; spin < 1024; spin += spin) { 186 state = 0; 187 // if unlocked, lock and return 188 if (internal_try_lock(this, state)) return; 189 if (2 == state) break; 190 for (int i = 0; i < spin; i++) Pause(); 191 } 192 193 // // no contention try to acquire 194 // if (internal_try_lock(this, state)) return; 199 195 200 196 // if not in contended state, set to be in contended state … … 209 205 210 206 static inline void unlock(futex_mutex & this) with(this) { 211 // if uncontended do atomic eunlock and then return212 if (__atomic_fetch_sub(&val, 1, __ATOMIC_RELEASE) == 1) return; // TODO: try acq/rel 207 // if uncontended do atomic unlock and then return 208 if (__atomic_exchange_n(&val, 0, __ATOMIC_RELEASE) == 1) return; 213 209 214 210 // otherwise threads are blocked so we must wake one 215 __atomic_store_n((int *)&val, 0, __ATOMIC_RELEASE);216 211 futex((int *)&val, FUTEX_WAKE, 1); 217 212 } … … 222 217 // to set recursion count after getting signalled; 223 218 static inline void on_wakeup( futex_mutex & f, size_t recursion ) {} 219 220 //----------------------------------------------------------------------------- 221 // go_mutex 222 223 // - Kernel thd blocking alternative to the spinlock 224 // - No ownership (will deadlock on reacq) 225 // - Golang's flavour of mutex 226 // - Impl taken from Golang: src/runtime/lock_futex.go 227 struct go_mutex { 228 // lock state any state other than UNLOCKED is locked 229 // enum LockState { UNLOCKED = 0, LOCKED = 1, SLEEPING = 2 }; 230 231 // stores a lock state 232 int val; 233 }; 234 235 static inline void ?{}( go_mutex & this ) with(this) { val = 0; } 236 237 static inline bool internal_try_lock(go_mutex & this, int & compare_val, int new_val ) with(this) { 238 return __atomic_compare_exchange_n((int*)&val, (int*)&compare_val, new_val, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE); 239 } 240 241 static inline int internal_exchange(go_mutex & this, int swap ) with(this) { 242 return __atomic_exchange_n((int*)&val, swap, __ATOMIC_ACQUIRE); 243 } 244 245 const int __go_mtx_spins = 4; 246 const int __go_mtx_pauses = 30; 247 // if this is called recursively IT WILL DEADLOCK!!!!! 248 static inline void lock(go_mutex & this) with(this) { 249 int state, init_state; 250 251 // speculative grab 252 state = internal_exchange(this, 1); 253 if ( !state ) return; // state == 0 254 init_state = state; 255 for (;;) { 256 for( int i = 0; i < __go_mtx_spins; i++ ) { 257 while( !val ) { // lock unlocked 258 state = 0; 259 if (internal_try_lock(this, state, init_state)) return; 260 } 261 for (int i = 0; i < __go_mtx_pauses; i++) Pause(); 262 } 263 264 while( !val ) { // lock unlocked 265 state = 0; 266 if (internal_try_lock(this, state, init_state)) return; 267 } 268 sched_yield(); 269 270 // if not in contended state, set to be in contended state 271 state = internal_exchange(this, 2); 272 if ( !state ) return; // state == 0 273 init_state = 2; 274 futex((int*)&val, FUTEX_WAIT, 2); // if val is not 2 this returns with EWOULDBLOCK 275 } 276 } 277 278 static inline void unlock( go_mutex & this ) with(this) { 279 // if uncontended do atomic unlock and then return 280 if (__atomic_exchange_n(&val, 0, __ATOMIC_RELEASE) == 1) return; 281 282 // otherwise threads are blocked so we must wake one 283 futex((int *)&val, FUTEX_WAKE, 1); 284 } 285 286 static inline void on_notify( go_mutex & f, thread$ * t){ unpark(t); } 287 static inline size_t on_wait( go_mutex & f ) {unlock(f); return 0;} 288 static inline void on_wakeup( go_mutex & f, size_t recursion ) {} 224 289 225 290 //----------------------------------------------------------------------------- … … 271 336 this.lock_value = 0; 272 337 } 338 339 static inline void ^?{}( exp_backoff_then_block_lock & this ){} 273 340 274 341 static inline bool internal_try_lock(exp_backoff_then_block_lock & this, size_t & compare_val) with(this) {
Note: See TracChangeset
for help on using the changeset viewer.