Changeset 5c581cc
- Timestamp:
- May 11, 2020, 3:33:21 PM (4 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- 70ac8d0
- Parents:
- 6c12fd28
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/io.cfa
r6c12fd28 r5c581cc 18 18 19 19 #include "kernel.hfa" 20 #include "bitmanip.hfa" 20 21 21 22 #if !defined(HAVE_LINUX_IO_URING_H) … … 196 197 void * stack; 197 198 pthread_t kthrd; 199 volatile bool blocked; 198 200 } slow; 199 201 __io_poller_fast fast; … … 280 282 281 283 if( io_flags & CFA_CLUSTER_IO_POLLER_THREAD_SUBMITS ) { 284 /* paranoid */ verify( is_pow2( io_flags >> CFA_CLUSTER_IO_BUFFLEN_OFFSET ) || ((io_flags >> CFA_CLUSTER_IO_BUFFLEN_OFFSET) < 8) ); 282 285 sq.ready_cnt = max(io_flags >> CFA_CLUSTER_IO_BUFFLEN_OFFSET, 8); 283 286 sq.ready = alloc_align( 64, sq.ready_cnt ); … … 344 347 // Create the poller thread 345 348 __cfadbg_print_safe(io_core, "Kernel I/O : Creating slow poller for cluter %p\n", &this); 349 this.io->poller.slow.blocked = false; 346 350 this.io->poller.slow.stack = __create_pthread( &this.io->poller.slow.kthrd, __io_poller_slow, &this ); 347 351 } … … 580 584 while(!__atomic_load_n(&ring.done, __ATOMIC_SEQ_CST)) { 581 585 586 __atomic_store_n( &ring.poller.slow.blocked, true, __ATOMIC_SEQ_CST ); 587 582 588 // In the user-thread approach drain and if anything was drained, 583 589 // batton pass to the user-thread 584 590 int count; 585 591 bool again; 586 [count, again] = __drain_io( ring, &mask, 0, true ); 592 [count, again] = __drain_io( ring, &mask, 1, true ); 593 594 __atomic_store_n( &ring.poller.slow.blocked, false, __ATOMIC_SEQ_CST ); 587 595 588 596 // Update statistics … … 667 675 static inline void __wake_poller( struct __io_data & ring ) __attribute__((artificial)); 668 676 static inline void __wake_poller( struct __io_data & ring ) { 669 // sigval val = { 1 }; 670 // pthread_sigqueue( ring.poller.slow.kthrd, SIGUSR1, val ); 677 if(!__atomic_load_n( &ring.poller.slow.blocked, __ATOMIC_SEQ_CST)) return; 678 679 sigval val = { 1 }; 680 pthread_sigqueue( ring.poller.slow.kthrd, SIGUSR1, val ); 671 681 } 672 682 … … 732 742 __attribute((unused)) int block = 0; 733 743 uint32_t expected = -1ul32; 734 LOOKING: for(;;) { 744 uint32_t ready_mask = ring.submit_q.ready_cnt - 1; 745 uint32_t off = __tls_rand(); 746 LOOKING: for() { 735 747 for(i; ring.submit_q.ready_cnt) { 736 if( __atomic_compare_exchange_n( &ring.submit_q.ready[i], &expected, idx, true, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED ) ) { 748 uint32_t ii = (i + off) & ready_mask; 749 if( __atomic_compare_exchange_n( &ring.submit_q.ready[ii], &expected, idx, true, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED ) ) { 737 750 break LOOKING; 738 751 }
Note: See TracChangeset
for help on using the changeset viewer.