Changeset 13c5e19 for libcfa/src/concurrency/io.cfa
- Timestamp:
- Jun 23, 2020, 4:42:58 PM (4 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- de917da3
- Parents:
- b232745
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/io.cfa
rb232745 r13c5e19 167 167 struct { 168 168 struct { 169 __processor_id_t id; 169 170 void * stack; 170 171 pthread_t kthrd; … … 334 335 if( this.io->cltr_flags & CFA_CLUSTER_IO_POLLER_USER_THREAD ) { 335 336 with( this.io->poller.fast ) { 336 /* paranoid */ verify( this. procs.head == 0p|| &this == mainCluster );337 /* paranoid */ verify( this.idles.head == 0p || &this == mainCluster);337 /* paranoid */ verify( this.nprocessors == 0 || &this == mainCluster ); 338 /* paranoid */ verify( !ready_mutate_islocked() ); 338 339 339 340 // We need to adjust the clean-up based on where the thread is 340 341 if( thrd.state == Ready || thrd.preempted != __NO_PREEMPTION ) { 341 342 342 // This is the tricky case 343 // The thread was preempted and now it is on the ready queue 344 345 /* paranoid */ verify( thrd.next != 0p ); // The thread should be the last on the list 346 /* paranoid */ verify( this.ready_queue.head == &thrd ); // The thread should be the only thing on the list 347 348 // Remove the thread from the ready queue of this cluster 349 this.ready_queue.head = 1p; 350 thrd.next = 0p; 351 __cfaabi_dbg_debug_do( thrd.unpark_stale = true ); 352 353 // Fixup the thread state 354 thrd.state = Blocked; 355 thrd.preempted = __NO_PREEMPTION; 343 ready_schedule_lock( (struct __processor_id_t *)active_processor() ); 344 345 // This is the tricky case 346 // The thread was preempted and now it is on the ready queue 347 // The thread should be the last on the list 348 /* paranoid */ verify( thrd.link.next != 0p ); 349 350 // Remove the thread from the ready queue of this cluster 351 __attribute__((unused)) bool removed = remove_head( &this, &thrd ); 352 /* paranoid */ verify( removed ); 353 thrd.link.next = 0p; 354 thrd.link.prev = 0p; 355 __cfaabi_dbg_debug_do( thrd.unpark_stale = true ); 356 357 // Fixup the thread state 358 thrd.state = Blocked; 359 thrd.ticket = 0; 360 thrd.preempted = __NO_PREEMPTION; 361 362 ready_schedule_unlock( (struct __processor_id_t *)active_processor() ); 356 363 357 364 // Pretend like the thread was blocked all along … … 365 372 thrd.curr_cluster = active_cluster(); 366 373 367 // unpark the fast io_poller374 // unpark the fast io_poller 368 375 unpark( &thrd __cfaabi_dbg_ctx2 ); 369 376 } … … 458 465 } 459 466 460 verify( (shead + ret) == *ring.submit_q.head ); 467 uint32_t nhead = *ring.submit_q.head; 468 verifyf( (shead + ret) == nhead, "Expected %u got %u\n", (shead + ret), nhead ); 461 469 462 470 // Release the consumed SQEs … … 474 482 // update statistics 475 483 #if !defined(__CFA_NO_STATISTICS__) 476 __tls_stats()->io.submit_q.s tats.submit_avg.rdy += to_submit;477 __tls_stats()->io.submit_q.s tats.submit_avg.csm += ret;478 __tls_stats()->io.submit_q.s tats.submit_avg.avl += avail;479 __tls_stats()->io.submit_q.s tats.submit_avg.cnt += 1;484 __tls_stats()->io.submit_q.submit_avg.rdy += to_submit; 485 __tls_stats()->io.submit_q.submit_avg.csm += ret; 486 __tls_stats()->io.submit_q.submit_avg.avl += avail; 487 __tls_stats()->io.submit_q.submit_avg.cnt += 1; 480 488 #endif 481 489 … … 505 513 data->result = cqe.res; 506 514 if(!in_kernel) { unpark( data->thrd __cfaabi_dbg_ctx2 ); } 507 else { __unpark( data->thrd __cfaabi_dbg_ctx2 ); }515 else { __unpark( &ring.poller.slow.id, data->thrd __cfaabi_dbg_ctx2 ); } 508 516 } 509 517 … … 520 528 521 529 static void * __io_poller_slow( void * arg ) { 530 #if !defined( __CFA_NO_STATISTICS__ ) 531 __stats_t local_stats; 532 __init_stats( &local_stats ); 533 kernelTLS.this_stats = &local_stats; 534 #endif 535 522 536 cluster * cltr = (cluster *)arg; 523 537 struct __io_data & ring = *cltr->io; 538 539 ring.poller.slow.id.id = doregister( &ring.poller.slow.id ); 524 540 525 541 sigset_t mask; … … 551 567 // Update statistics 552 568 #if !defined(__CFA_NO_STATISTICS__) 553 __tls_stats()->io.complete_q. stats.completed_avg.val += count;554 __tls_stats()->io.complete_q. stats.completed_avg.slow_cnt += 1;569 __tls_stats()->io.complete_q.completed_avg.val += count; 570 __tls_stats()->io.complete_q.completed_avg.slow_cnt += 1; 555 571 #endif 556 572 557 573 if(again) { 558 574 __cfadbg_print_safe(io_core, "Kernel I/O : Moving to ring %p to fast poller\n", &ring); 559 __unpark( &ring.poller. fast.thrd __cfaabi_dbg_ctx2 );575 __unpark( &ring.poller.slow.id, &ring.poller.fast.thrd __cfaabi_dbg_ctx2 ); 560 576 wait( ring.poller.sem ); 561 577 } … … 571 587 // Update statistics 572 588 #if !defined(__CFA_NO_STATISTICS__) 573 __tls_stats()->io.complete_q. stats.completed_avg.val += count;574 __tls_stats()->io.complete_q. stats.completed_avg.slow_cnt += 1;589 __tls_stats()->io.complete_q.completed_avg.val += count; 590 __tls_stats()->io.complete_q.completed_avg.slow_cnt += 1; 575 591 #endif 576 592 } … … 578 594 579 595 __cfadbg_print_safe(io_core, "Kernel I/O : Slow poller for ring %p stopping\n", &ring); 596 597 unregister( &ring.poller.slow.id ); 580 598 581 599 return 0p; … … 598 616 int count; 599 617 bool again; 600 [count, again] = __drain_io( *this.ring, 0p, 0, false ); 601 602 if(!again) reset++; 603 604 // Update statistics 605 #if !defined(__CFA_NO_STATISTICS__) 606 __tls_stats()->io.complete_q.stats.completed_avg.val += count; 607 __tls_stats()->io.complete_q.stats.completed_avg.fast_cnt += 1; 608 #endif 618 disable_interrupts(); 619 [count, again] = __drain_io( *this.ring, 0p, 0, false ); 620 621 if(!again) reset++; 622 623 // Update statistics 624 #if !defined(__CFA_NO_STATISTICS__) 625 __tls_stats()->io.complete_q.completed_avg.val += count; 626 __tls_stats()->io.complete_q.completed_avg.fast_cnt += 1; 627 #endif 628 enable_interrupts( __cfaabi_dbg_ctx ); 609 629 610 630 // If we got something, just yield and check again … … 667 687 verify( data != 0 ); 668 688 689 disable_interrupts(); 690 669 691 // Prepare the data we need 670 692 __attribute((unused)) int len = 0; … … 675 697 676 698 // Loop around looking for an available spot 677 LOOKING:for() {699 for() { 678 700 // Look through the list starting at some offset 679 701 for(i; cnt) { … … 688 710 // update statistics 689 711 #if !defined(__CFA_NO_STATISTICS__) 690 __tls_stats()->io.submit_q. stats.alloc_avg.val += len;691 __tls_stats()->io.submit_q. stats.alloc_avg.block += block;692 __tls_stats()->io.submit_q. stats.alloc_avg.cnt += 1;712 __tls_stats()->io.submit_q.alloc_avg.val += len; 713 __tls_stats()->io.submit_q.alloc_avg.block += block; 714 __tls_stats()->io.submit_q.alloc_avg.cnt += 1; 693 715 #endif 716 717 enable_interrupts( __cfaabi_dbg_ctx ); 694 718 695 719 // Success return the data … … 710 734 uint32_t * const tail = ring.submit_q.tail; 711 735 const uint32_t mask = *ring.submit_q.mask; 736 737 disable_interrupts(); 712 738 713 739 // There are 2 submission schemes, check which one we are using … … 743 769 // update statistics 744 770 #if !defined(__CFA_NO_STATISTICS__) 745 __tls_stats()->io.submit_q. stats.look_avg.val += len;746 __tls_stats()->io.submit_q. stats.look_avg.block += block;747 __tls_stats()->io.submit_q. stats.look_avg.cnt += 1;771 __tls_stats()->io.submit_q.look_avg.val += len; 772 __tls_stats()->io.submit_q.look_avg.block += block; 773 __tls_stats()->io.submit_q.look_avg.cnt += 1; 748 774 #endif 749 775 … … 772 798 // update statistics 773 799 #if !defined(__CFA_NO_STATISTICS__) 774 __tls_stats()->io.submit_q.s tats.submit_avg.csm += 1;775 __tls_stats()->io.submit_q.s tats.submit_avg.cnt += 1;800 __tls_stats()->io.submit_q.submit_avg.csm += 1; 801 __tls_stats()->io.submit_q.submit_avg.cnt += 1; 776 802 #endif 777 803 … … 780 806 __cfadbg_print_safe( io, "Kernel I/O : Performed io_submit for %p, returned %d\n", active_thread(), ret ); 781 807 } 808 809 enable_interrupts( __cfaabi_dbg_ctx ); 782 810 } 783 811
Note: See TracChangeset
for help on using the changeset viewer.