Changeset d3ba775 for libcfa/src/concurrency/ready_queue.cfa
- Timestamp:
- May 3, 2021, 5:04:05 PM (3 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- 9fa538c
- Parents:
- eeb9f9f
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/ready_queue.cfa
reeb9f9f rd3ba775 17 17 // #define __CFA_DEBUG_PRINT_READY_QUEUE__ 18 18 19 // #define USE_MPSC20 19 21 20 #define USE_RELAXED_FIFO … … 274 273 #endif 275 274 276 #if defined(USE_MPSC)277 // mpsc always succeeds278 } while( false );279 #else280 275 // If we can't lock it retry 281 276 } while( !__atomic_try_acquire( &lanes.data[i].lock ) ); 282 #endif283 277 284 278 // Actually push it 285 279 push(lanes.data[i], thrd); 286 280 287 #if !defined(USE_MPSC)288 281 // Unlock and return 289 282 __atomic_unlock( &lanes.data[i].lock ); 290 #endif291 283 292 284 // Mark the current index in the tls rng instance as having an item … … 347 339 __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr); 348 340 341 // #define USE_PREFERRED 342 #if !defined(USE_PREFERRED) 349 343 const bool external = (!kernelTLS().this_processor) || (cltr != kernelTLS().this_processor->cltr); 350 344 /* paranoid */ verify(external || kernelTLS().this_processor->rdq.id < lanes.count ); 351 352 // write timestamp 353 #if !defined(USE_NEW_SUBQUEUE) 354 thrd->link.ts = rdtscl(); 345 #else 346 unsigned preferred = thrd->preferred; 347 const bool external = (!kernelTLS().this_processor) || preferred == -1u || thrd->curr_cluster != cltr; 348 /* paranoid */ verifyf(external || preferred < lanes.count, "Invalid preferred queue %u for %u lanes", preferred, lanes.count ); 349 350 unsigned r = preferred % READYQ_SHARD_FACTOR; 351 const unsigned start = preferred - r; 355 352 #endif 356 353 … … 367 364 } 368 365 else { 366 #if !defined(USE_PREFERRED) 369 367 processor * proc = kernelTLS().this_processor; 370 368 unsigned r = proc->rdq.its++; 371 369 i = proc->rdq.id + (r % READYQ_SHARD_FACTOR); 370 #else 371 i = start + (r++ % READYQ_SHARD_FACTOR); 372 #endif 372 373 } 373 374 375 #if defined(USE_MPSC)376 // mpsc always succeeds377 } while( false );378 #else379 374 // If we can't lock it retry 380 375 } while( !__atomic_try_acquire( &lanes.data[i].lock ) ); 381 #endif382 376 383 377 // Actually push it 384 378 push(lanes.data[i], thrd); 385 379 386 #if !defined(USE_MPSC)387 380 // Unlock and return 388 381 __atomic_unlock( &lanes.data[i].lock ); 389 #endif390 382 391 383 #if !defined(__CFA_NO_STATISTICS__) … … 491 483 lanes.tscs[w].tv = thrd->link.ts; 492 484 #endif 485 486 thrd->preferred = w; 493 487 494 488 // return the popped thread … … 518 512 // Check that all the intrusive queues in the data structure are still consistent 519 513 static void check( __ready_queue_t & q ) with (q) { 520 #if defined(__CFA_WITH_VERIFY__) && !defined(USE_MPSC)514 #if defined(__CFA_WITH_VERIFY__) 521 515 { 522 516 for( idx ; lanes.count ) { … … 553 547 // fixes the list so that the pointers back to anchors aren't left dangling 554 548 static inline void fix(__intrusive_lane_t & ll) { 555 #if !defined(USE_MPSC)556 549 if(is_empty(ll)) { 557 550 verify(ll.anchor.next == 0p); 558 551 ll.prev = mock_head(ll); 559 552 } 560 #endif561 553 } 562 554
Note: See TracChangeset
for help on using the changeset viewer.