Changeset a017ee7 for libcfa/src
- Timestamp:
- Apr 15, 2021, 11:45:44 AM (4 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- a4b0aa4
- Parents:
- fc59b580
- Location:
- libcfa/src/concurrency
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/kernel/startup.cfa
rfc59b580 ra017ee7 469 469 this.name = name; 470 470 this.cltr = &_cltr; 471 this.cltr_id = -1u; 471 472 do_terminate = false; 472 473 preemption_alarm = 0p; … … 491 492 // Register and Lock the RWlock so no-one pushes/pops while we are changing the queue 492 493 uint_fast32_t last_size = ready_mutate_register((__processor_id_t*)&this); 493 int target =this.cltr->procs.total += 1u;494 this.cltr->procs.total += 1u; 494 495 insert_last(this.cltr->procs.actives, this); 495 496 496 497 // Adjust the ready queue size 497 this.cltr_id = ready_queue_grow( cltr, target);498 ready_queue_grow( cltr ); 498 499 499 500 // Unlock the RWlock … … 507 508 // Lock the RWlock so no-one pushes/pops while we are changing the queue 508 509 uint_fast32_t last_size = ready_mutate_lock(); 509 int target =this.cltr->procs.total -= 1u;510 this.cltr->procs.total -= 1u; 510 511 remove(this); 511 512 512 513 // Adjust the ready queue size 513 ready_queue_shrink( this.cltr , target);514 ready_queue_shrink( this.cltr ); 514 515 515 516 // Unlock the RWlock and unregister: we don't need the read_lock any more … … 586 587 587 588 // Adjust the ready queue size 588 ready_queue_grow( &this , 0);589 ready_queue_grow( &this ); 589 590 590 591 // Unlock the RWlock … … 601 602 602 603 // Adjust the ready queue size 603 ready_queue_shrink( &this , 0);604 ready_queue_shrink( &this ); 604 605 605 606 // Unlock the RWlock -
libcfa/src/concurrency/kernel_private.hfa
rfc59b580 ra017ee7 312 312 //----------------------------------------------------------------------- 313 313 // Increase the width of the ready queue (number of lanes) by 4 314 unsigned ready_queue_grow (struct cluster * cltr, int target);314 void ready_queue_grow (struct cluster * cltr); 315 315 316 316 //----------------------------------------------------------------------- 317 317 // Decrease the width of the ready queue (number of lanes) by 4 318 void ready_queue_shrink(struct cluster * cltr , int target);318 void ready_queue_shrink(struct cluster * cltr); 319 319 320 320 -
libcfa/src/concurrency/ready_queue.cfa
rfc59b580 ra017ee7 254 254 __attribute__((unused)) int preferred; 255 255 #if defined(BIAS) 256 /* paranoid */ verify(external || kernelTLS().this_processor->cltr_id < lanes.count ); 256 257 preferred = 257 258 //* … … 344 345 int preferred; 345 346 #if defined(BIAS) 346 / / Don't bother trying locally too much347 /* paranoid */ verify(kernelTLS().this_processor->cltr_id < lanes.count ); 347 348 preferred = kernelTLS().this_processor->cltr_id; 348 349 #endif … … 541 542 } 542 543 544 static void assign_list(unsigned & value, const int inc, dlist(processor, processor) & list, unsigned count) { 545 processor * it = &list`first; 546 for(unsigned i = 0; i < count; i++) { 547 /* paranoid */ verifyf( it, "Unexpected null iterator, at index %u of %u\n", i, count); 548 it->cltr_id = value; 549 value += inc; 550 it = &(*it)`next; 551 } 552 } 553 554 static void reassign_cltr_id(struct cluster * cltr, const int inc) { 555 unsigned preferred = 0; 556 assign_list(preferred, inc, cltr->procs.actives, cltr->procs.total - cltr->procs.idle); 557 assign_list(preferred, inc, cltr->procs.idles , cltr->procs.idle ); 558 } 559 543 560 // Grow the ready queue 544 unsigned ready_queue_grow(struct cluster * cltr, int target) { 545 unsigned preferred; 561 void ready_queue_grow(struct cluster * cltr) { 546 562 size_t ncount; 563 int target = cltr->procs.total; 547 564 548 565 /* paranoid */ verify( ready_mutate_islocked() ); … … 562 579 if(target >= 2) { 563 580 ncount = target * 4; 564 preferred = ncount - 4;565 581 } else { 566 582 ncount = 1; 567 preferred = 0;568 583 } 569 584 … … 595 610 } 596 611 612 reassign_cltr_id(cltr, 4); 613 597 614 // Make sure that everything is consistent 598 615 /* paranoid */ check( cltr->ready_queue ); … … 601 618 602 619 /* paranoid */ verify( ready_mutate_islocked() ); 603 return preferred;604 620 } 605 621 606 622 // Shrink the ready queue 607 void ready_queue_shrink(struct cluster * cltr , int target) {623 void ready_queue_shrink(struct cluster * cltr) { 608 624 /* paranoid */ verify( ready_mutate_islocked() ); 609 625 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue\n"); … … 611 627 // Make sure that everything is consistent 612 628 /* paranoid */ check( cltr->ready_queue ); 629 630 int target = cltr->procs.total; 613 631 614 632 with( cltr->ready_queue ) { … … 679 697 } 680 698 699 reassign_cltr_id(cltr, 4); 700 681 701 // Make sure that everything is consistent 682 702 /* paranoid */ check( cltr->ready_queue );
Note: See TracChangeset
for help on using the changeset viewer.