Changeset 1d5e4711 for libcfa/src/concurrency/io.cfa
- Timestamp:
- Jul 8, 2020, 4:30:14 PM (4 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- 34b61882
- Parents:
- 1c49dc5
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/io.cfa
r1c49dc5 r1d5e4711 429 429 // I/O Polling 430 430 //============================================================================================= 431 static unsigned __collect_submitions( struct __io_data & ring ); 432 431 433 // Process a single completion message from the io_uring 432 434 // This is NOT thread-safe 433 static unsigned __collect_submitions( struct __io_data & ring );434 435 static [int, bool] __drain_io( & struct __io_data ring, * sigset_t mask, int waitcnt, bool in_kernel ) { 435 436 /* paranoid */ verify( !kernelTLS.preemption_state.enabled ); 437 const uint32_t smask = *ring.submit_q.mask; 436 438 437 439 unsigned to_submit = 0; … … 441 443 } 442 444 443 const uint32_t smask = *ring.submit_q.mask; 444 uint32_t shead = *ring.submit_q.head; 445 int ret = syscall( __NR_io_uring_enter, ring.fd, to_submit, waitcnt, IORING_ENTER_GETEVENTS, mask, _NSIG / 8); 446 if( ret < 0 ) { 447 switch((int)errno) { 448 case EAGAIN: 449 case EINTR: 450 return [0, true]; 451 default: 452 abort( "KERNEL ERROR: IO_URING WAIT - %s\n", strerror(errno) ); 453 } 454 } 455 456 // Release the consumed SQEs 457 for( i; ret ) { 458 uint32_t idx = ring.submit_q.array[ (i + shead) & smask ]; 459 ring.submit_q.sqes[ idx ].user_data = 0; 460 } 461 462 // update statistics 463 __STATS__( true, 464 if( to_submit > 0 ) { 465 io.submit_q.submit_avg.rdy += to_submit; 466 io.submit_q.submit_avg.csm += ret; 467 io.submit_q.submit_avg.cnt += 1; 468 } 469 ) 445 if (to_submit > 0 || waitcnt > 0) { 446 uint32_t shead = *ring.submit_q.head; 447 int ret = syscall( __NR_io_uring_enter, ring.fd, to_submit, waitcnt, IORING_ENTER_GETEVENTS, mask, _NSIG / 8); 448 if( ret < 0 ) { 449 switch((int)errno) { 450 case EAGAIN: 451 case EINTR: 452 return [0, true]; 453 default: 454 abort( "KERNEL ERROR: IO_URING WAIT - %s\n", strerror(errno) ); 455 } 456 } 457 458 // Release the consumed SQEs 459 for( i; ret ) { 460 uint32_t idx = ring.submit_q.array[ (i + shead) & smask ]; 461 ring.submit_q.sqes[ idx ].user_data = 0; 462 } 463 464 // update statistics 465 __STATS__( true, 466 if( to_submit > 0 ) { 467 io.submit_q.submit_avg.rdy += to_submit; 468 io.submit_q.submit_avg.csm += ret; 469 io.submit_q.submit_avg.cnt += 1; 470 } 471 ) 472 } 473 474 // Memory barrier 475 __atomic_thread_fence( __ATOMIC_SEQ_CST ); 470 476 471 477 // Drain the queue … … 474 480 const uint32_t mask = *ring.completion_q.mask; 475 481 476 // Memory barrier477 __atomic_thread_fence( __ATOMIC_SEQ_CST );478 479 482 // Nothing was new return 0 480 483 if (head == tail) { … … 483 486 484 487 uint32_t count = tail - head; 488 /* paranoid */ verify( count != 0 ); 485 489 for(i; count) { 486 490 unsigned idx = (head + i) & mask; … … 790 794 // We got the lock 791 795 unsigned to_submit = __collect_submitions( ring ); 792 const uint32_t smask = *ring.submit_q.mask;793 796 uint32_t shead = *ring.submit_q.head; 794 797 int ret = syscall( __NR_io_uring_enter, ring.fd, to_submit, 0, 0, 0p, _NSIG / 8); … … 807 810 808 811 // Release the consumed SQEs 812 const uint32_t smask = *ring.submit_q.mask; 809 813 for( i; ret ) { 810 814 uint32_t idx = ring.submit_q.array[ (i + shead) & smask ];
Note: See TracChangeset
for help on using the changeset viewer.