Changeset a6b48f6
- Timestamp:
- Dec 24, 2024, 10:52:13 AM (2 months ago)
- Branches:
- master
- Children:
- 2853d6f, 5db580e
- Parents:
- 4f4ae60
- Location:
- libcfa/src
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
TabularUnified libcfa/src/collections/lockfree.hfa ¶
r4f4ae60 ra6b48f6 6 6 #include <bits/defs.hfa> 7 7 8 forall( T & ) {8 forall( T & ) { 9 9 //------------------------------------------------------------ 10 10 // Queue based on the MCS lock … … 200 200 forall( T & ) 201 201 struct LinkData { 202 T * volatile top; // pointer to stack top203 uintptr_t count; // count each push202 T * volatile top; // pointer to stack top 203 uintptr_t count; // count each push 204 204 }; 205 205 … … 215 215 }; // Link 216 216 217 forall( T | sized(T)| { Link(T) * ?`next( T * ); } ) {217 forall( T /*| sized(T)*/ | { Link(T) * ?`next( T * ); } ) { 218 218 struct StackLF { 219 219 Link(T) stack; … … 235 235 Link(T) t @= stack; // atomic assignment unnecessary, or use CAA 236 236 for () { // busy wait 237 if ( t.data.top == 0p ) return 0p; 237 if ( t.data.top == 0p ) return 0p; // empty stack ? 238 238 Link(T) * next = ( t.data.top )`next; 239 239 if ( __atomic_compare_exchange_n( &stack.atom, &t.atom, (Link(T))@{ (LinkData(T))@{ next->data.top, t.data.count } }.atom, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) ) return t.data.top; // attempt to update top node -
TabularUnified libcfa/src/concurrency/locks.hfa ¶
r4f4ae60 ra6b48f6 10 10 // Author : Colby Alexander Parsons 11 11 // Created On : Thu Jan 21 19:46:50 2021 12 // Last Modified By : 13 // Last Modified On : 14 // Update Count : 12 // Last Modified By : Peter A. Buhr 13 // Last Modified On : Tue Dec 24 09:36:52 2024 14 // Update Count : 16 15 15 // 16 16 … … 33 33 34 34 // futex headers 35 #include <linux/futex.h> /* Definition of FUTEX_* constants */36 #include <sys/syscall.h> /* Definition of SYS_* constants */37 #include <unistd.h> /* Definition of syscall routine */35 #include <linux/futex.h> // Definition of FUTEX_* constants 36 #include <sys/syscall.h> // Definition of SYS_* constants 37 #include <unistd.h> // Definition of syscall routine 38 38 39 39 typedef void (*__cfa_pre_park)( void * ); … … 43 43 //----------------------------------------------------------------------------- 44 44 // is_blocking_lock 45 forall( L & | sized(L))45 forall( L & /*| sized( L )*/ ) 46 46 trait is_blocking_lock { 47 47 // For synchronization locks to use when acquiring … … 63 63 64 64 #define DEFAULT_ON_NOTIFY( lock_type ) \ 65 static inline void on_notify( lock_type & this, thread$ * t ){ unpark(t); }65 static inline void on_notify( lock_type & /*this*/, thread$ * t ){ unpark( t ); } 66 66 67 67 #define DEFAULT_ON_WAIT( lock_type ) \ … … 74 74 // on_wakeup impl if lock should be reacquired after waking up 75 75 #define DEFAULT_ON_WAKEUP_REACQ( lock_type ) \ 76 static inline void on_wakeup( lock_type & this, size_t recursion) { lock( this ); }76 static inline void on_wakeup( lock_type & this, size_t /*recursion*/ ) { lock( this ); } 77 77 78 78 // on_wakeup impl if lock will not be reacquired after waking up 79 79 #define DEFAULT_ON_WAKEUP_NO_REACQ( lock_type ) \ 80 static inline void on_wakeup( lock_type & this, size_t recursion) {}80 static inline void on_wakeup( lock_type & /*this*/, size_t /*recursion*/ ) {} 81 81 82 82 … … 87 87 __spinlock_t lock; 88 88 int count; 89 __queue_t( thread$) waiting;90 }; 91 92 void ?{}(semaphore & this, int count = 1);93 void ^?{}( semaphore & this);94 bool P (semaphore & this);95 bool V (semaphore & this);96 bool V (semaphore & this, unsigned count);97 thread$ * V (semaphore & this, bool );89 __queue_t( thread$) waiting; 90 }; 91 92 void ?{}( semaphore & this, int count = 1 ); 93 void ^?{}( semaphore & this ); 94 bool P( semaphore & this ); 95 bool V( semaphore & this ); 96 bool V( semaphore & this, unsigned count ); 97 thread$ * V( semaphore & this, bool ); 98 98 99 99 //---------- … … 102 102 }; 103 103 104 static inline void ?{}( single_acquisition_lock & this ) {((blocking_lock &)this){ false, false };}104 static inline void ?{}( single_acquisition_lock & this ) { ((blocking_lock &)this){ false, false }; } 105 105 static inline void ^?{}( single_acquisition_lock & this ) {} 106 static inline void lock ( single_acquisition_lock & this ) { lock( (blocking_lock &)this ); }107 static inline bool try_lock( single_acquisition_lock & this ) { return try_lock( (blocking_lock &)this ); }108 static inline void unlock ( single_acquisition_lock & this ) { unlock( (blocking_lock &)this ); }109 static inline size_t on_wait 110 static inline void 111 static inline void 112 static inline bool 113 static inline bool 114 static inline bool 106 static inline void lock( single_acquisition_lock & this ) { lock( (blocking_lock &)this ); } 107 static inline bool try_lock( single_acquisition_lock & this ) { return try_lock( (blocking_lock &)this ); } 108 static inline void unlock( single_acquisition_lock & this ) { unlock( (blocking_lock &)this ); } 109 static inline size_t on_wait( single_acquisition_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) { return on_wait ( (blocking_lock &)this, pp_fn, pp_datum ); } 110 static inline void on_wakeup( single_acquisition_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); } 111 static inline void on_notify( single_acquisition_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); } 112 static inline bool register_select( single_acquisition_lock & this, select_node & node ) { return register_select( (blocking_lock &)this, node ); } 113 static inline bool unregister_select( single_acquisition_lock & this, select_node & node ) { return unregister_select( (blocking_lock &)this, node ); } 114 static inline bool on_selected( single_acquisition_lock & this, select_node & node ) { return on_selected( (blocking_lock &)this, node ); } 115 115 __CFA_SELECT_GET_TYPE( single_acquisition_lock ); 116 116 … … 120 120 }; 121 121 122 static inline void ?{}( owner_lock & this ) {((blocking_lock &)this){ true, true };}122 static inline void ?{}( owner_lock & this ) { ((blocking_lock &)this){ true, true }; } 123 123 static inline void ^?{}( owner_lock & this ) {} 124 static inline void lock ( owner_lock & this ) { lock( (blocking_lock &)this ); }125 static inline bool try_lock( owner_lock & this ) { return try_lock( (blocking_lock &)this ); }126 static inline void unlock ( owner_lock & this ) { unlock( (blocking_lock &)this ); }127 static inline size_t on_wait 128 static inline void 129 static inline void 130 static inline bool 131 static inline bool 132 static inline bool 124 static inline void lock( owner_lock & this ) { lock( (blocking_lock &)this ); } 125 static inline bool try_lock( owner_lock & this ) { return try_lock( (blocking_lock &)this ); } 126 static inline void unlock( owner_lock & this ) { unlock( (blocking_lock &)this ); } 127 static inline size_t on_wait( owner_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) { return on_wait ( (blocking_lock &)this, pp_fn, pp_datum ); } 128 static inline void on_wakeup( owner_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); } 129 static inline void on_notify( owner_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); } 130 static inline bool register_select( owner_lock & this, select_node & node ) { return register_select( (blocking_lock &)this, node ); } 131 static inline bool unregister_select( owner_lock & this, select_node & node ) { return unregister_select( (blocking_lock &)this, node ); } 132 static inline bool on_selected( owner_lock & this, select_node & node ) { return on_selected( (blocking_lock &)this, node ); } 133 133 __CFA_SELECT_GET_TYPE( owner_lock ); 134 134 … … 147 147 148 148 struct mcs_lock { 149 mcs_queue( mcs_node) queue;149 mcs_queue( mcs_node ) queue; 150 150 }; 151 151 152 152 static inline void lock( mcs_lock & l, mcs_node & n ) { 153 if (push(l.queue, &n))154 wait( n.sem);155 } 156 157 static inline void unlock( mcs_lock & l, mcs_node & n) {158 mcs_node * next = advance( l.queue, &n);159 if (next) post(next->sem);153 if ( push( l.queue, &n ) ) 154 wait( n.sem ); 155 } 156 157 static inline void unlock( mcs_lock & l, mcs_node & n ) { 158 mcs_node * next = advance( l.queue, &n ); 159 if ( next ) post( next->sem ); 160 160 } 161 161 … … 183 183 n.locked = true; 184 184 185 #if defined( __ARM_ARCH)185 #if defined( __ARM_ARCH ) 186 186 __asm__ __volatile__ ( "DMB ISH" ::: ); 187 187 #endif 188 188 189 mcs_spin_node * prev = __atomic_exchange_n( &l.queue.tail, &n, __ATOMIC_SEQ_CST);190 if ( prev == 0p ) return;189 mcs_spin_node * prev = __atomic_exchange_n( &l.queue.tail, &n, __ATOMIC_SEQ_CST ); 190 if ( prev == 0p ) return; 191 191 prev->next = &n; 192 192 193 #if defined( __ARM_ARCH)193 #if defined( __ARM_ARCH ) 194 194 __asm__ __volatile__ ( "DMB ISH" ::: ); 195 195 #endif 196 196 197 while ( __atomic_load_n(&n.locked, __ATOMIC_RELAXED) ) Pause();198 199 #if defined( __ARM_ARCH)197 while ( __atomic_load_n( &n.locked, __ATOMIC_RELAXED ) ) Pause(); 198 199 #if defined( __ARM_ARCH ) 200 200 __asm__ __volatile__ ( "DMB ISH" ::: ); 201 201 #endif 202 202 } 203 203 204 static inline void unlock( mcs_spin_lock & l, mcs_spin_node & n) {205 #if defined( __ARM_ARCH)204 static inline void unlock( mcs_spin_lock & l, mcs_spin_node & n ) { 205 #if defined( __ARM_ARCH ) 206 206 __asm__ __volatile__ ( "DMB ISH" ::: ); 207 207 #endif 208 208 209 209 mcs_spin_node * n_ptr = &n; 210 if ( __atomic_compare_exchange_n(&l.queue.tail, &n_ptr, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) return;211 while ( __atomic_load_n(&n.next, __ATOMIC_RELAXED) == 0p) Pause();212 213 #if defined( __ARM_ARCH)210 if ( __atomic_compare_exchange_n( &l.queue.tail, &n_ptr, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) ) return; 211 while ( __atomic_load_n( &n.next, __ATOMIC_RELAXED ) == 0p ) Pause(); 212 213 #if defined( __ARM_ARCH ) 214 214 __asm__ __volatile__ ( "DMB ISH" ::: ); 215 215 #endif … … 233 233 234 234 // to use for FUTEX_WAKE and FUTEX_WAIT (other futex calls will need more params) 235 static inline int futex( int *uaddr, int futex_op, int val) {236 return syscall( SYS_futex, uaddr, futex_op, val, NULL, NULL, 0);237 } 238 239 static inline void ?{}( futex_mutex & this ) with( this) { val = 0; }240 241 static inline bool internal_try_lock( futex_mutex & this, int & compare_val ) with(this) {242 return __atomic_compare_exchange_n( (int*)&val, (int*)&compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);243 } 244 245 static inline int internal_exchange( futex_mutex & this ) with( this) {246 return __atomic_exchange_n(( int*)&val, 2, __ATOMIC_ACQUIRE);235 static inline int futex( int *uaddr, int futex_op, int val ) { 236 return syscall( SYS_futex, uaddr, futex_op, val, NULL, NULL, 0 ); 237 } 238 239 static inline void ?{}( futex_mutex & this ) with( this ) { val = 0; } 240 241 static inline bool internal_try_lock( futex_mutex & this, int & compare_val ) with( this ) { 242 return __atomic_compare_exchange_n( (int*)&val, (int*)&compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE ); 243 } 244 245 static inline int internal_exchange( futex_mutex & this ) with( this ) { 246 return __atomic_exchange_n(( int*)&val, 2, __ATOMIC_ACQUIRE ); 247 247 } 248 248 249 249 // if this is called recursively IT WILL DEADLOCK!!!!! 250 static inline void lock( futex_mutex & this ) with( this) {250 static inline void lock( futex_mutex & this ) with( this ) { 251 251 int state; 252 252 253 for ( int spin = 4; spin < 1024; spin += spin) {253 for ( spin; 4 ~ 1024 ~ spin ) { 254 254 state = 0; 255 255 // if unlocked, lock and return 256 if ( internal_try_lock(this, state)) return;257 if ( 2 == state) break;258 for ( int i = 0; i < spin; i++) Pause();256 if ( internal_try_lock( this, state ) ) return; 257 if ( state == 2 ) break; 258 for ( spin ) Pause(); 259 259 } 260 260 261 261 // if not in contended state, set to be in contended state 262 if ( state != 2) state = internal_exchange(this);262 if ( state != 2 ) state = internal_exchange( this ); 263 263 264 264 // block and spin until we win the lock 265 while ( state != 0) {266 futex( (int*)&val, FUTEX_WAIT, 2); // if val is not 2 this returns with EWOULDBLOCK267 state = internal_exchange( this);268 } 269 } 270 271 static inline void unlock( futex_mutex & this) with(this) {265 while ( state != 0 ) { 266 futex( (int*)&val, FUTEX_WAIT, 2 ); // if val is not 2 this returns with EWOULDBLOCK 267 state = internal_exchange( this ); 268 } 269 } 270 271 static inline void unlock( futex_mutex & this ) with( this ) { 272 272 // if uncontended do atomic unlock and then return 273 if ( __atomic_exchange_n(&val, 0, __ATOMIC_RELEASE) == 1) return;273 if ( __atomic_exchange_n( &val, 0, __ATOMIC_RELEASE ) == 1 ) return; 274 274 275 275 // otherwise threads are blocked so we must wake one 276 futex(( int *)&val, FUTEX_WAKE, 1);276 futex(( int *)&val, FUTEX_WAKE, 1 ); 277 277 } 278 278 … … 295 295 int val; 296 296 }; 297 static inline void ?{}( go_mutex & this ) with( this) { val = 0; }297 static inline void ?{}( go_mutex & this ) with( this ) { val = 0; } 298 298 static inline void ?{}( go_mutex & this, go_mutex this2 ) = void; 299 299 static inline void ?=?( go_mutex & this, go_mutex this2 ) = void; 300 300 301 static inline bool internal_try_lock( go_mutex & this, int & compare_val, int new_val ) with(this) {302 return __atomic_compare_exchange_n( (int*)&val, (int*)&compare_val, new_val, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);303 } 304 305 static inline int internal_exchange( go_mutex & this, int swap ) with(this) {306 return __atomic_exchange_n( (int*)&val, swap, __ATOMIC_ACQUIRE);301 static inline bool internal_try_lock( go_mutex & this, int & compare_val, int new_val ) with( this ) { 302 return __atomic_compare_exchange_n( (int*)&val, (int*)&compare_val, new_val, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE ); 303 } 304 305 static inline int internal_exchange( go_mutex & this, int swap ) with( this ) { 306 return __atomic_exchange_n( (int*)&val, swap, __ATOMIC_ACQUIRE ); 307 307 } 308 308 … … 312 312 313 313 // speculative grab 314 state = internal_exchange( this, 1);315 if ( ! state ) return;// state == 0314 state = internal_exchange( this, 1 ); 315 if ( ! state ) return; // state == 0 316 316 init_state = state; 317 for ( ;;) {318 for ( int i = 0; i < 4; i++) {319 while ( !val ) {// lock unlocked317 for () { 318 for ( 4 ) { 319 while ( ! val ) { // lock unlocked 320 320 state = 0; 321 321 if ( internal_try_lock( this, state, init_state ) ) return; 322 322 } 323 for ( int i = 0; i < 30; i++) Pause();323 for ( 30 ) Pause(); 324 324 } 325 325 326 while ( !val ) {// lock unlocked326 while ( ! val ) { // lock unlocked 327 327 state = 0; 328 328 if ( internal_try_lock( this, state, init_state ) ) return; … … 332 332 // if not in contended state, set to be in contended state 333 333 state = internal_exchange( this, 2 ); 334 if ( ! state ) return;// state == 0334 if ( ! state ) return; // state == 0 335 335 init_state = 2; 336 futex( (int*)&val, FUTEX_WAIT, 2 ); 336 futex( (int*)&val, FUTEX_WAIT, 2 ); // if val is not 2 this returns with EWOULDBLOCK 337 337 } 338 338 } 339 339 340 static inline void unlock( go_mutex & this ) with( this) {340 static inline void unlock( go_mutex & this ) with( this ) { 341 341 // if uncontended do atomic unlock and then return 342 if ( __atomic_exchange_n( &val, 0, __ATOMIC_RELEASE) == 1 ) return;342 if ( __atomic_exchange_n( &val, 0, __ATOMIC_RELEASE ) == 1 ) return; 343 343 344 344 // otherwise threads are blocked so we must wake one … … 373 373 static inline void ^?{}( exp_backoff_then_block_lock & this ){} 374 374 375 static inline bool internal_try_lock( exp_backoff_then_block_lock & this, size_t & compare_val ) with( this) {376 return __atomic_compare_exchange_n( &lock_value, &compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED);375 static inline bool internal_try_lock( exp_backoff_then_block_lock & this, size_t & compare_val ) with( this ) { 376 return __atomic_compare_exchange_n( &lock_value, &compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED ); 377 377 } 378 378 379 379 static inline bool try_lock( exp_backoff_then_block_lock & this ) { size_t compare_val = 0; return internal_try_lock( this, compare_val ); } 380 380 381 static inline bool try_lock_contention( exp_backoff_then_block_lock & this ) with( this) {382 return ! __atomic_exchange_n( &lock_value, 2, __ATOMIC_ACQUIRE );383 } 384 385 static inline bool block( exp_backoff_then_block_lock & this ) with( this) {381 static inline bool try_lock_contention( exp_backoff_then_block_lock & this ) with( this ) { 382 return ! __atomic_exchange_n( &lock_value, 2, __ATOMIC_ACQUIRE ); 383 } 384 385 static inline bool block( exp_backoff_then_block_lock & this ) with( this ) { 386 386 lock( spinlock __cfaabi_dbg_ctx2 ); 387 if ( __atomic_load_n( &lock_value, __ATOMIC_SEQ_CST) != 2) {387 if ( __atomic_load_n( &lock_value, __ATOMIC_SEQ_CST ) != 2 ) { 388 388 unlock( spinlock ); 389 389 return true; … … 395 395 } 396 396 397 static inline void lock( exp_backoff_then_block_lock & this ) with( this) {397 static inline void lock( exp_backoff_then_block_lock & this ) with( this ) { 398 398 size_t compare_val = 0; 399 399 int spin = 4; 400 400 401 401 // linear backoff 402 for ( ;;) {402 for () { 403 403 compare_val = 0; 404 if ( internal_try_lock(this, compare_val)) return;405 if ( 2 == compare_val) break;406 for ( int i = 0; i < spin; i++) Pause();407 if ( spin >= 1024) break;404 if ( internal_try_lock( this, compare_val ) ) return; 405 if ( compare_val == 2 ) break; 406 for ( spin ) Pause(); 407 if ( spin >= 1024 ) break; 408 408 spin += spin; 409 409 } 410 410 411 if (2 != compare_val && try_lock_contention(this)) return;411 if ( 2 != compare_val && try_lock_contention( this ) ) return; 412 412 // block until signalled 413 while ( block(this)) if(try_lock_contention(this)) return;414 } 415 416 static inline void unlock( exp_backoff_then_block_lock & this ) with( this) {417 if ( __atomic_exchange_n(&lock_value, 0, __ATOMIC_RELEASE) == 1) return;413 while ( block( this ) ) if ( try_lock_contention( this ) ) return; 414 } 415 416 static inline void unlock( exp_backoff_then_block_lock & this ) with( this ) { 417 if ( __atomic_exchange_n( &lock_value, 0, __ATOMIC_RELEASE ) == 1 ) return; 418 418 lock( spinlock __cfaabi_dbg_ctx2 ); 419 419 thread$ * t = &try_pop_front( blocked_threads ); … … 444 444 }; 445 445 446 static inline void ?{}( fast_block_lock & this ) with( this) {446 static inline void ?{}( fast_block_lock & this ) with( this ) { 447 447 lock{}; 448 448 blocked_threads{}; … … 454 454 455 455 // if this is called recursively IT WILL DEADLOCK!!!!! 456 static inline void lock( fast_block_lock & this ) with( this) {456 static inline void lock( fast_block_lock & this ) with( this ) { 457 457 lock( lock __cfaabi_dbg_ctx2 ); 458 458 if ( held ) { … … 466 466 } 467 467 468 static inline void unlock( fast_block_lock & this ) with( this) {468 static inline void unlock( fast_block_lock & this ) with( this ) { 469 469 lock( lock __cfaabi_dbg_ctx2 ); 470 470 /* paranoid */ verifyf( held != false, "Attempt to release lock %p that isn't held", &this ); … … 475 475 } 476 476 477 static inline void on_notify( fast_block_lock & this, struct thread$ * t ) with( this) {477 static inline void on_notify( fast_block_lock & this, struct thread$ * t ) with( this ) { 478 478 lock( lock __cfaabi_dbg_ctx2 ); 479 479 insert_last( blocked_threads, *t ); … … 503 503 }; 504 504 505 static inline void ?{}( simple_owner_lock & this ) with( this) {505 static inline void ?{}( simple_owner_lock & this ) with( this ) { 506 506 lock{}; 507 507 blocked_threads{}; … … 513 513 static inline void ?=?( simple_owner_lock & this, simple_owner_lock this2 ) = void; 514 514 515 static inline void lock( simple_owner_lock & this ) with( this) {515 static inline void lock( simple_owner_lock & this ) with( this ) { 516 516 if ( owner == active_thread() ) { 517 517 recursion_count++; … … 532 532 } 533 533 534 static inline void pop_node( simple_owner_lock & this ) with( this) {534 static inline void pop_node( simple_owner_lock & this ) with( this ) { 535 535 __handle_waituntil_OR( blocked_threads ); 536 536 select_node * node = &try_pop_front( blocked_threads ); … … 538 538 owner = node->blocked_thread; 539 539 recursion_count = 1; 540 // if ( ! node->clause_status || __make_select_node_available( *node ) ) unpark( node->blocked_thread );540 // if ( ! node->clause_status || __make_select_node_available( *node ) ) unpark( node->blocked_thread ); 541 541 wake_one( blocked_threads, *node ); 542 542 } else { … … 546 546 } 547 547 548 static inline void unlock( simple_owner_lock & this ) with( this) {548 static inline void unlock( simple_owner_lock & this ) with( this ) { 549 549 lock( lock __cfaabi_dbg_ctx2 ); 550 550 /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this ); … … 558 558 } 559 559 560 static inline void on_notify( simple_owner_lock & this, thread$ * t ) with( this) {560 static inline void on_notify( simple_owner_lock & this, thread$ * t ) with( this ) { 561 561 lock( lock __cfaabi_dbg_ctx2 ); 562 562 // lock held … … 573 573 } 574 574 575 static inline size_t on_wait( simple_owner_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) with( this) {575 static inline size_t on_wait( simple_owner_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) with( this ) { 576 576 lock( lock __cfaabi_dbg_ctx2 ); 577 577 /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this ); … … 591 591 } 592 592 593 static inline void on_wakeup( simple_owner_lock & this, size_t recursion ) with( this) { recursion_count = recursion; }593 static inline void on_wakeup( simple_owner_lock & this, size_t recursion ) with( this ) { recursion_count = recursion; } 594 594 595 595 // waituntil() support 596 static inline bool register_select( simple_owner_lock & this, select_node & node ) with( this) {596 static inline bool register_select( simple_owner_lock & this, select_node & node ) with( this ) { 597 597 lock( lock __cfaabi_dbg_ctx2 ); 598 598 599 599 // check if we can complete operation. If so race to establish winner in special OR case 600 if ( ! node.park_counter && ( owner == active_thread() || owner == 0p ) ) {601 if ( ! __make_select_node_available( node ) ) { // we didn't win the race so give up on registering600 if ( ! node.park_counter && ( owner == active_thread() || owner == 0p ) ) { 601 if ( ! __make_select_node_available( node ) ) { // we didn't win the race so give up on registering 602 602 unlock( lock ); 603 603 return false; … … 626 626 } 627 627 628 static inline bool unregister_select( simple_owner_lock & this, select_node & node ) with( this) {628 static inline bool unregister_select( simple_owner_lock & this, select_node & node ) with( this ) { 629 629 lock( lock __cfaabi_dbg_ctx2 ); 630 630 if ( node`isListed ) { … … 644 644 } 645 645 646 static inline bool on_selected( simple_owner_lock & this, select_node & node) { return true; }646 static inline bool on_selected( simple_owner_lock & /*this*/, select_node & /*node*/ ) { return true; } 647 647 __CFA_SELECT_GET_TYPE( simple_owner_lock ); 648 648 … … 662 662 }; 663 663 664 static inline void ?{}( spin_queue_lock & this ) with( this) {664 static inline void ?{}( spin_queue_lock & this ) with( this ) { 665 665 lock{}; 666 666 held = false; … … 671 671 672 672 // if this is called recursively IT WILL DEADLOCK! 673 static inline void lock( spin_queue_lock & this ) with( this) {673 static inline void lock( spin_queue_lock & this ) with( this ) { 674 674 mcs_spin_node node; 675 675 lock( lock, node ); 676 while (__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();677 __atomic_store_n( &held, true, __ATOMIC_SEQ_CST);676 while ( __atomic_load_n( &held, __ATOMIC_SEQ_CST ) ) Pause(); 677 __atomic_store_n( &held, true, __ATOMIC_SEQ_CST ); 678 678 unlock( lock, node ); 679 679 } 680 680 681 static inline void unlock( spin_queue_lock & this ) with( this) {682 __atomic_store_n( &held, false, __ATOMIC_RELEASE);681 static inline void unlock( spin_queue_lock & this ) with( this ) { 682 __atomic_store_n( &held, false, __ATOMIC_RELEASE ); 683 683 } 684 684 … … 702 702 }; 703 703 704 static inline void ?{}( mcs_block_spin_lock & this ) with( this) {704 static inline void ?{}( mcs_block_spin_lock & this ) with( this ) { 705 705 lock{}; 706 706 held = false; … … 711 711 712 712 // if this is called recursively IT WILL DEADLOCK!!!!! 713 static inline void lock( mcs_block_spin_lock & this ) with( this) {713 static inline void lock( mcs_block_spin_lock & this ) with( this ) { 714 714 mcs_node node; 715 715 lock( lock, node ); 716 while (__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();717 __atomic_store_n( &held, true, __ATOMIC_SEQ_CST);716 while ( __atomic_load_n( &held, __ATOMIC_SEQ_CST ) ) Pause(); 717 __atomic_store_n( &held, true, __ATOMIC_SEQ_CST ); 718 718 unlock( lock, node ); 719 719 } 720 720 721 static inline void unlock( mcs_block_spin_lock & this) with(this) {722 __atomic_store_n( &held, false, __ATOMIC_SEQ_CST);721 static inline void unlock( mcs_block_spin_lock & this ) with( this ) { 722 __atomic_store_n( &held, false, __ATOMIC_SEQ_CST ); 723 723 } 724 724 … … 742 742 }; 743 743 744 static inline void ?{}( block_spin_lock & this ) with( this) {744 static inline void ?{}( block_spin_lock & this ) with( this ) { 745 745 lock{}; 746 746 held = false; … … 751 751 752 752 // if this is called recursively IT WILL DEADLOCK!!!!! 753 static inline void lock( block_spin_lock & this ) with( this) {753 static inline void lock( block_spin_lock & this ) with( this ) { 754 754 lock( lock ); 755 while (__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();756 __atomic_store_n( &held, true, __ATOMIC_RELEASE);755 while ( __atomic_load_n( &held, __ATOMIC_SEQ_CST ) ) Pause(); 756 __atomic_store_n( &held, true, __ATOMIC_RELEASE ); 757 757 unlock( lock ); 758 758 } 759 759 760 static inline void unlock( block_spin_lock & this ) with( this) {761 __atomic_store_n( &held, false, __ATOMIC_RELEASE);762 } 763 764 static inline void on_notify( block_spin_lock & this, struct thread$ * t ) with( this.lock) {760 static inline void unlock( block_spin_lock & this ) with( this ) { 761 __atomic_store_n( &held, false, __ATOMIC_RELEASE ); 762 } 763 764 static inline void on_notify( block_spin_lock & this, struct thread$ * t ) with( this.lock ) { 765 765 // first we acquire internal fast_block_lock 766 766 lock( lock __cfaabi_dbg_ctx2 ); … … 774 774 unlock( lock ); 775 775 776 unpark( t);776 unpark( t ); 777 777 } 778 778 DEFAULT_ON_WAIT( block_spin_lock ) 779 static inline void on_wakeup( block_spin_lock & this, size_t recursion ) with(this) {779 static inline void on_wakeup( block_spin_lock & this, size_t /*recursion*/ ) with( this ) { 780 780 // now we acquire the entire block_spin_lock upon waking up 781 while (__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();782 __atomic_store_n( &held, true, __ATOMIC_RELEASE);781 while ( __atomic_load_n( &held, __ATOMIC_SEQ_CST ) ) Pause(); 782 __atomic_store_n( &held, true, __ATOMIC_RELEASE ); 783 783 unlock( lock ); // Now we release the internal fast_spin_lock 784 784 } … … 788 788 // // the info thread is a wrapper around a thread used 789 789 // // to store extra data for use in the condition variable 790 forall( L & | is_blocking_lock(L)) {790 forall( L & | is_blocking_lock( L ) ) { 791 791 struct info_thread; 792 792 } … … 794 794 //----------------------------------------------------------------------------- 795 795 // Synchronization Locks 796 forall( L & | is_blocking_lock(L)) {796 forall( L & | is_blocking_lock( L ) ) { 797 797 798 798 //----------------------------------------------------------------------------- … … 810 810 811 811 // List of blocked threads 812 dlist( info_thread( L) ) blocked_threads;812 dlist( info_thread( L ) ) blocked_threads; 813 813 814 814 // Count of current blocked threads … … 816 816 }; 817 817 818 819 void ?{}( condition_variable(L) & this ); 820 void ^?{}( condition_variable(L) & this ); 821 822 bool notify_one( condition_variable(L) & this ); 823 bool notify_all( condition_variable(L) & this ); 824 825 uintptr_t front( condition_variable(L) & this ); 826 827 bool empty ( condition_variable(L) & this ); 828 int counter( condition_variable(L) & this ); 829 830 void wait( condition_variable(L) & this ); 831 void wait( condition_variable(L) & this, uintptr_t info ); 832 bool wait( condition_variable(L) & this, Duration duration ); 833 bool wait( condition_variable(L) & this, uintptr_t info, Duration duration ); 834 835 void wait( condition_variable(L) & this, L & l ); 836 void wait( condition_variable(L) & this, L & l, uintptr_t info ); 837 bool wait( condition_variable(L) & this, L & l, Duration duration ); 838 bool wait( condition_variable(L) & this, L & l, uintptr_t info, Duration duration ); 818 void ?{}( condition_variable( L ) & this ); 819 void ^?{}( condition_variable( L ) & this ); 820 821 bool notify_one( condition_variable( L ) & this ); 822 bool notify_all( condition_variable( L ) & this ); 823 824 uintptr_t front( condition_variable( L ) & this ); 825 826 bool empty ( condition_variable( L ) & this ); 827 int counter( condition_variable( L ) & this ); 828 829 void wait( condition_variable( L ) & this ); 830 void wait( condition_variable( L ) & this, uintptr_t info ); 831 bool wait( condition_variable( L ) & this, Duration duration ); 832 bool wait( condition_variable( L ) & this, uintptr_t info, Duration duration ); 833 834 void wait( condition_variable( L ) & this, L & l ); 835 void wait( condition_variable( L ) & this, L & l, uintptr_t info ); 836 bool wait( condition_variable( L ) & this, L & l, Duration duration ); 837 bool wait( condition_variable( L ) & this, L & l, uintptr_t info, Duration duration ); 839 838 840 839 //----------------------------------------------------------------------------- … … 848 847 struct fast_cond_var { 849 848 // List of blocked threads 850 dlist( info_thread( L) ) blocked_threads;849 dlist( info_thread( L ) ) blocked_threads; 851 850 #ifdef __CFA_DEBUG__ 852 851 L * lock_used; … … 854 853 }; 855 854 856 void ?{}( fast_cond_var(L) & this );857 void ^?{}( fast_cond_var( L) & this );858 859 bool notify_one( fast_cond_var( L) & this );860 bool notify_all( fast_cond_var( L) & this );861 862 uintptr_t front( fast_cond_var( L) & this );863 bool empty ( fast_cond_var( L) & this );864 865 void wait( fast_cond_var( L) & this, L & l );866 void wait( fast_cond_var( L) & this, L & l, uintptr_t info );855 void ?{}( fast_cond_var( L ) & this ); 856 void ^?{}( fast_cond_var( L ) & this ); 857 858 bool notify_one( fast_cond_var( L ) & this ); 859 bool notify_all( fast_cond_var( L ) & this ); 860 861 uintptr_t front( fast_cond_var( L ) & this ); 862 bool empty ( fast_cond_var( L ) & this ); 863 864 void wait( fast_cond_var( L ) & this, L & l ); 865 void wait( fast_cond_var( L ) & this, L & l, uintptr_t info ); 867 866 868 867 … … 874 873 875 874 struct pthread_cond_var { 876 dlist( info_thread( L) ) blocked_threads;875 dlist( info_thread( L ) ) blocked_threads; 877 876 __spinlock_t lock; 878 877 }; 879 878 880 void ?{}( pthread_cond_var( L) & this );881 void ^?{}( pthread_cond_var( L) & this );882 883 bool notify_one( pthread_cond_var( L) & this );884 bool notify_all( pthread_cond_var( L) & this );885 886 uintptr_t front( pthread_cond_var( L) & this );887 bool empty ( pthread_cond_var( L) & this );888 889 void wait( pthread_cond_var( L) & this, L & l );890 void wait( pthread_cond_var( L) & this, L & l, uintptr_t info );891 bool wait( pthread_cond_var( L) & this, L & l, timespec t );892 bool wait( pthread_cond_var( L) & this, L & l, uintptr_t info, timespec t );893 } 879 void ?{}( pthread_cond_var( L ) & this ); 880 void ^?{}( pthread_cond_var( L ) & this ); 881 882 bool notify_one( pthread_cond_var( L ) & this ); 883 bool notify_all( pthread_cond_var( L ) & this ); 884 885 uintptr_t front( pthread_cond_var( L ) & this ); 886 bool empty ( pthread_cond_var( L ) & this ); 887 888 void wait( pthread_cond_var( L ) & this, L & l ); 889 void wait( pthread_cond_var( L ) & this, L & l, uintptr_t info ); 890 bool wait( pthread_cond_var( L ) & this, L & l, timespec t ); 891 bool wait( pthread_cond_var( L ) & this, L & l, uintptr_t info, timespec t ); 892 } -
TabularUnified libcfa/src/concurrency/select.hfa ¶
r4f4ae60 ra6b48f6 184 184 185 185 // wake one thread from the list 186 static inline void wake_one( dlist( select_node ) & queue, select_node & popped ) {186 static inline void wake_one( dlist( select_node ) & /*queue*/, select_node & popped ) { 187 187 if ( !popped.clause_status // normal case, node is not a select node 188 188 || ( popped.clause_status && !popped.park_counter ) // If popped link is special case OR selecting unpark but don't call __make_select_node_available
Note: See TracChangeset
for help on using the changeset viewer.