Changeset f835806 for libcfa/src/concurrency
- Timestamp:
- Jun 2, 2022, 2:39:11 PM (2 years ago)
- Branches:
- ADT, ast-experimental, master, pthread-emulation, qualifiedEnum
- Children:
- fb63c70
- Parents:
- e5628db
- Location:
- libcfa/src/concurrency
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/invoke.h
re5628db rf835806 195 195 struct __monitor_group_t monitors; 196 196 197 // used to put threads on user data structures198 struct {199 struct thread$ * next;200 struct thread$ * back;201 } seqable;202 203 197 // used to put threads on dlist data structure 204 198 __cfa_dlink(thread$); … … 208 202 struct thread$ * prev; 209 203 } node; 204 205 // used to store state between clh lock/unlock 206 volatile bool * clh_prev; 207 208 // used to point to this thd's current clh node 209 volatile bool * clh_node; 210 210 211 211 struct processor * last_proc; … … 240 240 } 241 241 242 static inline thread$ * volatile & ?`next ( thread$ * this ) __attribute__((const)) {243 return this->seqable.next;244 }245 246 static inline thread$ *& Back( thread$ * this ) __attribute__((const)) {247 return this->seqable.back;248 }249 250 static inline thread$ *& Next( thread$ * this ) __attribute__((const)) {251 return this->seqable.next;252 }253 254 static inline bool listed( thread$ * this ) {255 return this->seqable.next != 0p;256 }257 258 242 static inline void ?{}(__monitor_group_t & this) { 259 243 (this.data){0p}; -
libcfa/src/concurrency/locks.hfa
re5628db rf835806 101 101 102 102 //----------------------------------------------------------------------------- 103 // MCS Spin Lock 104 // - No recursive acquisition 105 // - Needs to be released by owner 106 107 struct mcs_spin_node { 108 mcs_spin_node * volatile next; 109 bool locked:1; 110 }; 111 112 struct mcs_spin_queue { 113 mcs_spin_node * volatile tail; 114 }; 115 116 static inline void ?{}(mcs_spin_node & this) { this.next = 0p; this.locked = true; } 117 118 static inline mcs_spin_node * volatile & ?`next ( mcs_spin_node * node ) { 119 return node->next; 120 } 121 122 struct mcs_spin_lock { 123 mcs_spin_queue queue; 124 }; 125 126 static inline void lock(mcs_spin_lock & l, mcs_spin_node & n) { 127 mcs_spin_node * prev = __atomic_exchange_n(&l.queue.tail, &n, __ATOMIC_SEQ_CST); 128 if(prev != 0p) { 129 prev->next = &n; 130 while(n.locked) Pause(); 131 } 132 } 133 134 static inline void unlock(mcs_spin_lock & l, mcs_spin_node & n) { 135 mcs_spin_node * n_ptr = &n; 136 if (!__atomic_compare_exchange_n(&l.queue.tail, &n_ptr, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) ) { 137 while (n.next == 0p) {} 138 n.next->locked = false; 139 } 140 } 141 142 //----------------------------------------------------------------------------- 143 // CLH Spinlock 144 // - No recursive acquisition 145 // - Needs to be released by owner 146 147 struct clh_lock { 148 volatile bool * volatile tail; 149 }; 150 151 static inline void ?{}( clh_lock & this ) { this.tail = malloc(); *this.tail = true; } 152 static inline void ^?{}( clh_lock & this ) { free(this.tail); } 153 154 static inline void lock(clh_lock & l) { 155 thread$ * curr_thd = active_thread(); 156 *(curr_thd->clh_node) = false; 157 volatile bool * prev = __atomic_exchange_n((bool **)(&l.tail), (bool *)(curr_thd->clh_node), __ATOMIC_SEQ_CST); 158 while(!__atomic_load_n(prev, __ATOMIC_ACQUIRE)) Pause(); 159 curr_thd->clh_prev = prev; 160 } 161 162 static inline void unlock(clh_lock & l) { 163 thread$ * curr_thd = active_thread(); 164 __atomic_store_n(curr_thd->clh_node, true, __ATOMIC_RELEASE); 165 curr_thd->clh_node = curr_thd->clh_prev; 166 } 167 168 //----------------------------------------------------------------------------- 103 169 // Linear backoff Spinlock 104 170 struct linear_backoff_then_block_lock { … … 205 271 // Fast Block Lock 206 272 207 // High efficiencyminimal blocking lock273 // minimal blocking lock 208 274 // - No reacquire for cond var 209 275 // - No recursive acquisition 210 276 // - No ownership 211 277 struct fast_block_lock { 278 // List of blocked threads 279 dlist( thread$ ) blocked_threads; 280 212 281 // Spin lock used for mutual exclusion 213 282 __spinlock_t lock; 214 283 215 // List of blocked threads 216 dlist( thread$ ) blocked_threads; 217 284 // flag showing if lock is held 218 285 bool held:1; 286 287 #ifdef __CFA_DEBUG__ 288 // for deadlock detection 289 struct thread$ * owner; 290 #endif 219 291 }; 220 292 … … 231 303 static inline void lock(fast_block_lock & this) with(this) { 232 304 lock( lock __cfaabi_dbg_ctx2 ); 305 306 #ifdef __CFA_DEBUG__ 307 assert(!(held && owner == active_thread())); 308 #endif 233 309 if (held) { 234 310 insert_last( blocked_threads, *active_thread() ); … … 238 314 } 239 315 held = true; 316 #ifdef __CFA_DEBUG__ 317 owner = active_thread(); 318 #endif 240 319 unlock( lock ); 241 320 } … … 246 325 thread$ * t = &try_pop_front( blocked_threads ); 247 326 held = ( t ? true : false ); 327 #ifdef __CFA_DEBUG__ 328 owner = ( t ? t : 0p ); 329 #endif 248 330 unpark( t ); 249 331 unlock( lock ); … … 253 335 static inline size_t on_wait(fast_block_lock & this) { unlock(this); return 0; } 254 336 static inline void on_wakeup(fast_block_lock & this, size_t recursion ) { } 337 338 //----------------------------------------------------------------------------- 339 // simple_owner_lock 340 341 // pthread owner lock 342 // - reacquire for cond var 343 // - recursive acquisition 344 // - ownership 345 struct simple_owner_lock { 346 // List of blocked threads 347 dlist( thread$ ) blocked_threads; 348 349 // Spin lock used for mutual exclusion 350 __spinlock_t lock; 351 352 // owner showing if lock is held 353 struct thread$ * owner; 354 355 size_t recursion_count; 356 }; 357 358 static inline void ?{}( simple_owner_lock & this ) with(this) { 359 lock{}; 360 blocked_threads{}; 361 owner = 0p; 362 recursion_count = 0; 363 } 364 static inline void ^?{}( simple_owner_lock & this ) {} 365 static inline void ?{}( simple_owner_lock & this, simple_owner_lock this2 ) = void; 366 static inline void ?=?( simple_owner_lock & this, simple_owner_lock this2 ) = void; 367 368 static inline void lock(simple_owner_lock & this) with(this) { 369 if (owner == active_thread()) { 370 recursion_count++; 371 return; 372 } 373 lock( lock __cfaabi_dbg_ctx2 ); 374 375 if (owner != 0p) { 376 insert_last( blocked_threads, *active_thread() ); 377 unlock( lock ); 378 park( ); 379 return; 380 } 381 owner = active_thread(); 382 recursion_count = 1; 383 unlock( lock ); 384 } 385 386 void pop_and_set_new_owner( simple_owner_lock & this ) with( this ) { 387 thread$ * t = &try_pop_front( blocked_threads ); 388 owner = t; 389 recursion_count = ( t ? 1 : 0 ); 390 unpark( t ); 391 } 392 393 static inline void unlock(simple_owner_lock & this) with(this) { 394 lock( lock __cfaabi_dbg_ctx2 ); 395 /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this ); 396 /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this ); 397 // if recursion count is zero release lock and set new owner if one is waiting 398 recursion_count--; 399 if ( recursion_count == 0 ) { 400 pop_and_set_new_owner( this ); 401 } 402 unlock( lock ); 403 } 404 405 static inline void on_notify(simple_owner_lock & this, struct thread$ * t ) with(this) { 406 lock( lock __cfaabi_dbg_ctx2 ); 407 // lock held 408 if ( owner != 0p ) { 409 insert_last( blocked_threads, *t ); 410 unlock( lock ); 411 } 412 // lock not held 413 else { 414 owner = t; 415 recursion_count = 1; 416 unpark( t ); 417 unlock( lock ); 418 } 419 } 420 421 static inline size_t on_wait(simple_owner_lock & this) with(this) { 422 lock( lock __cfaabi_dbg_ctx2 ); 423 /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this ); 424 /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this ); 425 426 size_t ret = recursion_count; 427 428 pop_and_set_new_owner( this ); 429 430 unlock( lock ); 431 return ret; 432 } 433 434 static inline void on_wakeup(simple_owner_lock & this, size_t recursion ) with(this) { recursion_count = recursion; } 435 436 //----------------------------------------------------------------------------- 437 // Spin Queue Lock 438 439 // - No reacquire for cond var 440 // - No recursive acquisition 441 // - No ownership 442 // - spin lock with no locking/atomics in unlock 443 struct spin_queue_lock { 444 // Spin lock used for mutual exclusion 445 mcs_spin_lock lock; 446 447 // flag showing if lock is held 448 bool held:1; 449 450 #ifdef __CFA_DEBUG__ 451 // for deadlock detection 452 struct thread$ * owner; 453 #endif 454 }; 455 456 static inline void ?{}( spin_queue_lock & this ) with(this) { 457 lock{}; 458 held = false; 459 } 460 static inline void ^?{}( spin_queue_lock & this ) {} 461 static inline void ?{}( spin_queue_lock & this, spin_queue_lock this2 ) = void; 462 static inline void ?=?( spin_queue_lock & this, spin_queue_lock this2 ) = void; 463 464 // if this is called recursively IT WILL DEADLOCK!!!!! 465 static inline void lock(spin_queue_lock & this) with(this) { 466 mcs_spin_node node; 467 #ifdef __CFA_DEBUG__ 468 assert(!(held && owner == active_thread())); 469 #endif 470 lock( lock, node ); 471 while(held) Pause(); 472 held = true; 473 unlock( lock, node ); 474 #ifdef __CFA_DEBUG__ 475 owner = active_thread(); 476 #endif 477 } 478 479 static inline void unlock(spin_queue_lock & this) with(this) { 480 #ifdef __CFA_DEBUG__ 481 owner = 0p; 482 #endif 483 held = false; 484 } 485 486 static inline void on_notify(spin_queue_lock & this, struct thread$ * t ) { unpark(t); } 487 static inline size_t on_wait(spin_queue_lock & this) { unlock(this); return 0; } 488 static inline void on_wakeup(spin_queue_lock & this, size_t recursion ) { } 489 490 491 //----------------------------------------------------------------------------- 492 // MCS Block Spin Lock 493 494 // - No reacquire for cond var 495 // - No recursive acquisition 496 // - No ownership 497 // - Blocks but first node spins (like spin queue but blocking for not first thd) 498 struct mcs_block_spin_lock { 499 // Spin lock used for mutual exclusion 500 mcs_lock lock; 501 502 // flag showing if lock is held 503 bool held:1; 504 505 #ifdef __CFA_DEBUG__ 506 // for deadlock detection 507 struct thread$ * owner; 508 #endif 509 }; 510 511 static inline void ?{}( mcs_block_spin_lock & this ) with(this) { 512 lock{}; 513 held = false; 514 } 515 static inline void ^?{}( mcs_block_spin_lock & this ) {} 516 static inline void ?{}( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void; 517 static inline void ?=?( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void; 518 519 // if this is called recursively IT WILL DEADLOCK!!!!! 520 static inline void lock(mcs_block_spin_lock & this) with(this) { 521 mcs_node node; 522 #ifdef __CFA_DEBUG__ 523 assert(!(held && owner == active_thread())); 524 #endif 525 lock( lock, node ); 526 while(held) Pause(); 527 held = true; 528 unlock( lock, node ); 529 #ifdef __CFA_DEBUG__ 530 owner = active_thread(); 531 #endif 532 } 533 534 static inline void unlock(mcs_block_spin_lock & this) with(this) { 535 #ifdef __CFA_DEBUG__ 536 owner = 0p; 537 #endif 538 held = false; 539 } 540 541 static inline void on_notify(mcs_block_spin_lock & this, struct thread$ * t ) { unpark(t); } 542 static inline size_t on_wait(mcs_block_spin_lock & this) { unlock(this); return 0; } 543 static inline void on_wakeup(mcs_block_spin_lock & this, size_t recursion ) { } 544 545 //----------------------------------------------------------------------------- 546 // Block Spin Lock 547 548 // - No reacquire for cond var 549 // - No recursive acquisition 550 // - No ownership 551 // - Blocks but first node spins (like spin queue but blocking for not first thd) 552 struct block_spin_lock { 553 // Spin lock used for mutual exclusion 554 fast_block_lock lock; 555 556 // flag showing if lock is held 557 bool held:1; 558 559 #ifdef __CFA_DEBUG__ 560 // for deadlock detection 561 struct thread$ * owner; 562 #endif 563 }; 564 565 static inline void ?{}( block_spin_lock & this ) with(this) { 566 lock{}; 567 held = false; 568 } 569 static inline void ^?{}( block_spin_lock & this ) {} 570 static inline void ?{}( block_spin_lock & this, block_spin_lock this2 ) = void; 571 static inline void ?=?( block_spin_lock & this, block_spin_lock this2 ) = void; 572 573 // if this is called recursively IT WILL DEADLOCK!!!!! 574 static inline void lock(block_spin_lock & this) with(this) { 575 #ifdef __CFA_DEBUG__ 576 assert(!(held && owner == active_thread())); 577 #endif 578 lock( lock ); 579 while(held) Pause(); 580 held = true; 581 unlock( lock ); 582 #ifdef __CFA_DEBUG__ 583 owner = active_thread(); 584 #endif 585 } 586 587 static inline void unlock(block_spin_lock & this) with(this) { 588 #ifdef __CFA_DEBUG__ 589 owner = 0p; 590 #endif 591 held = false; 592 } 593 594 static inline void on_notify(block_spin_lock & this, struct thread$ * t ) { unpark(t); } 595 static inline size_t on_wait(block_spin_lock & this) { unlock(this); return 0; } 596 static inline void on_wakeup(block_spin_lock & this, size_t recursion ) { } 255 597 256 598 //----------------------------------------------------------------------------- -
libcfa/src/concurrency/thread.cfa
re5628db rf835806 50 50 #endif 51 51 52 seqable.next = 0p;53 seqable.back = 0p;54 55 52 node.next = 0p; 56 53 node.prev = 0p; 54 55 clh_node = new( false ); 56 57 57 doregister(curr_cluster, this); 58 59 58 monitors{ &self_mon_p, 1, (fptr_t)0 }; 60 59 } … … 64 63 canary = 0xDEADDEADDEADDEADp; 65 64 #endif 65 delete(clh_node); 66 66 unregister(curr_cluster, this); 67 67 ^self_cor{};
Note: See TracChangeset
for help on using the changeset viewer.