Changeset a757ba1 for libcfa/src/concurrency
- Timestamp:
- Nov 10, 2022, 12:17:20 PM (2 years ago)
- Branches:
- ADT, ast-experimental, master
- Children:
- 639e4fc
- Parents:
- 6a4ef0c
- Location:
- libcfa/src/concurrency
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/io.cfa
r6a4ef0c ra757ba1 639 639 } 640 640 } 641 642 #if defined(CFA_WITH_IO_URING_IDLE)643 bool __kernel_read(struct processor * proc, io_future_t & future, iovec & iov, int fd) {644 io_context$ * ctx = proc->io.ctx;645 /* paranoid */ verify( ! __preemption_enabled() );646 /* paranoid */ verify( proc == __cfaabi_tls.this_processor );647 /* paranoid */ verify( ctx );648 649 __u32 idx;650 struct io_uring_sqe * sqe;651 652 // We can proceed to the fast path653 if( !__alloc(ctx, &idx, 1) ) {654 /* paranoid */ verify( false ); // for now check if this happens, next time just abort the sleep.655 return false;656 }657 658 // Allocation was successful659 __fill( &sqe, 1, &idx, ctx );660 661 sqe->user_data = (uintptr_t)&future;662 sqe->flags = 0;663 sqe->fd = fd;664 sqe->off = 0;665 sqe->ioprio = 0;666 sqe->fsync_flags = 0;667 sqe->__pad2[0] = 0;668 sqe->__pad2[1] = 0;669 sqe->__pad2[2] = 0;670 671 #if defined(CFA_HAVE_IORING_OP_READ)672 sqe->opcode = IORING_OP_READ;673 sqe->addr = (uint64_t)iov.iov_base;674 sqe->len = iov.iov_len;675 #elif defined(CFA_HAVE_READV) && defined(CFA_HAVE_IORING_OP_READV)676 sqe->opcode = IORING_OP_READV;677 sqe->addr = (uintptr_t)&iov;678 sqe->len = 1;679 #else680 #error CFA_WITH_IO_URING_IDLE but none of CFA_HAVE_READV, CFA_HAVE_IORING_OP_READV or CFA_HAVE_IORING_OP_READ defined681 #endif682 683 asm volatile("": : :"memory");684 685 /* paranoid */ verify( sqe->user_data == (uintptr_t)&future );686 __submit_only( ctx, &idx, 1 );687 688 /* paranoid */ verify( proc == __cfaabi_tls.this_processor );689 /* paranoid */ verify( ! __preemption_enabled() );690 691 return true;692 }693 694 void __cfa_io_idle( struct processor * proc ) {695 iovec iov;696 __atomic_acquire( &proc->io.ctx->cq.lock );697 698 __attribute__((used)) volatile bool was_reset = false;699 700 with( proc->idle_wctx) {701 702 // Do we already have a pending read703 if(available(*ftr)) {704 // There is no pending read, we need to add one705 reset(*ftr);706 707 iov.iov_base = rdbuf;708 iov.iov_len = sizeof(eventfd_t);709 __kernel_read(proc, *ftr, iov, evfd );710 ftr->result = 0xDEADDEAD;711 *((eventfd_t *)rdbuf) = 0xDEADDEADDEADDEAD;712 was_reset = true;713 }714 }715 716 if( !__atomic_load_n( &proc->do_terminate, __ATOMIC_SEQ_CST ) ) {717 __ioarbiter_flush( *proc->io.ctx );718 proc->idle_wctx.sleep_time = rdtscl();719 ioring_syscsll( *proc->io.ctx, 1, IORING_ENTER_GETEVENTS);720 }721 722 ready_schedule_lock();723 __cfa_do_drain( proc->io.ctx, proc->cltr );724 ready_schedule_unlock();725 726 asm volatile ("" :: "m" (was_reset));727 }728 #endif729 641 #endif -
libcfa/src/concurrency/io/setup.cfa
r6a4ef0c ra757ba1 34 34 bool __cfa_io_flush( processor * proc ) { return false; } 35 35 bool __cfa_io_drain( processor * proc ) __attribute__((nonnull (1))) { return false; } 36 void __cfa_io_idle ( processor * ) __attribute__((nonnull (1))) {}37 36 void __cfa_io_stop ( processor * proc ) {} 38 37 … … 317 316 } 318 317 319 //=============================================================================================320 // I/O Context Sleep321 //=============================================================================================322 // static inline void __epoll_ctl(io_context$ & ctx, int op, const char * error) {323 // struct epoll_event ev;324 // ev.events = EPOLLIN | EPOLLONESHOT;325 // ev.data.u64 = (__u64)&ctx;326 // int ret = epoll_ctl(iopoll.epollfd, op, ctx.efd, &ev);327 // if (ret < 0) {328 // abort( "KERNEL ERROR: EPOLL %s - (%d) %s\n", error, (int)errno, strerror(errno) );329 // }330 // }331 332 // static void __epoll_register(io_context$ & ctx) {333 // __epoll_ctl(ctx, EPOLL_CTL_ADD, "ADD");334 // }335 336 // static void __epoll_unregister(io_context$ & ctx) {337 // // Read the current epoch so we know when to stop338 // size_t curr = __atomic_load_n(&iopoll.epoch, __ATOMIC_SEQ_CST);339 340 // // Remove the fd from the iopoller341 // __epoll_ctl(ctx, EPOLL_CTL_DEL, "REMOVE");342 343 // // Notify the io poller thread of the shutdown344 // iopoll.run = false;345 // sigval val = { 1 };346 // __cfaabi_pthread_sigqueue( iopoll.thrd, SIGUSR1, val );347 348 // // Make sure all this is done349 // __atomic_thread_fence(__ATOMIC_SEQ_CST);350 351 // // Wait for the next epoch352 // while(curr == iopoll.epoch && !iopoll.stopped) Pause();353 // }354 355 // void __ioctx_prepare_block(io_context$ & ctx) {356 // __cfadbg_print_safe(io_core, "Kernel I/O - epoll : Re-arming io poller %d (%p)\n", ctx.fd, &ctx);357 // __epoll_ctl(ctx, EPOLL_CTL_MOD, "REARM");358 // }359 360 318 361 319 //============================================================================================= -
libcfa/src/concurrency/kernel.cfa
r6a4ef0c ra757ba1 138 138 extern bool __cfa_io_drain( processor * proc ) __attribute__((nonnull (1))); 139 139 extern bool __cfa_io_flush( processor * ) __attribute__((nonnull (1))); 140 extern void __cfa_io_idle( processor * ) __attribute__((nonnull (1))); 141 142 #if defined(CFA_WITH_IO_URING_IDLE) 143 extern bool __kernel_read(processor * proc, io_future_t & future, iovec &, int fd); 144 #endif 140 145 141 146 142 extern void __disable_interrupts_hard(); … … 161 157 processor * this = runner.proc; 162 158 verify(this); 163 164 /* paranoid */ verify( this->idle_wctx.ftr != 0p );165 /* paranoid */ verify( this->idle_wctx.rdbuf != 0p );166 167 // used for idle sleep when io_uring is present168 // mark it as already fulfilled so we know if there is a pending request or not169 this->idle_wctx.ftr->self.ptr = 1p;170 159 171 160 __cfadbg_print_safe(runtime_core, "Kernel : core %p starting\n", this); … … 730 719 731 720 732 #if !defined(CFA_WITH_IO_URING_IDLE) 733 #if !defined(__CFA_NO_STATISTICS__) 734 if(this->print_halts) { 735 __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this->unique_id, rdtscl()); 721 #if !defined(__CFA_NO_STATISTICS__) 722 if(this->print_halts) { 723 __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this->unique_id, rdtscl()); 724 } 725 #endif 726 727 __cfadbg_print_safe(runtime_core, "Kernel : core %p waiting on eventfd %d\n", this, this->idle_fd); 728 729 { 730 eventfd_t val; 731 ssize_t ret = read( this->idle_wctx.evfd, &val, sizeof(val) ); 732 if(ret < 0) { 733 switch((int)errno) { 734 case EAGAIN: 735 #if EAGAIN != EWOULDBLOCK 736 case EWOULDBLOCK: 737 #endif 738 case EINTR: 739 // No need to do anything special here, just assume it's a legitimate wake-up 740 break; 741 default: 742 abort( "KERNEL : internal error, read failure on idle eventfd, error(%d) %s.", (int)errno, strerror( (int)errno ) ); 736 743 } 737 #endif 738 739 __cfadbg_print_safe(runtime_core, "Kernel : core %p waiting on eventfd %d\n", this, this->idle_fd); 740 741 { 742 eventfd_t val; 743 ssize_t ret = read( this->idle_wctx.evfd, &val, sizeof(val) ); 744 if(ret < 0) { 745 switch((int)errno) { 746 case EAGAIN: 747 #if EAGAIN != EWOULDBLOCK 748 case EWOULDBLOCK: 749 #endif 750 case EINTR: 751 // No need to do anything special here, just assume it's a legitimate wake-up 752 break; 753 default: 754 abort( "KERNEL : internal error, read failure on idle eventfd, error(%d) %s.", (int)errno, strerror( (int)errno ) ); 755 } 756 } 757 } 758 759 #if !defined(__CFA_NO_STATISTICS__) 760 if(this->print_halts) { 761 __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 1\n", this->unique_id, rdtscl()); 762 } 763 #endif 764 #else 765 __cfa_io_idle( this ); 744 } 745 } 746 747 #if !defined(__CFA_NO_STATISTICS__) 748 if(this->print_halts) { 749 __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 1\n", this->unique_id, rdtscl()); 750 } 766 751 #endif 767 752 } … … 779 764 insert_first(this.idles, proc); 780 765 766 // update the pointer to the head wait context, which should now point to this proc. 781 767 __atomic_store_n(&this.fdw, &proc.idle_wctx, __ATOMIC_SEQ_CST); 782 768 unlock( this ); … … 795 781 796 782 { 783 // update the pointer to the head wait context 797 784 struct __fd_waitctx * wctx = 0; 798 785 if(!this.idles`isEmpty) wctx = &this.idles`first.idle_wctx; -
libcfa/src/concurrency/kernel.hfa
r6a4ef0c ra757ba1 64 64 // 1 - means the proc should wake-up immediately 65 65 // FD - means the proc is going asleep and should be woken by writing to the FD. 66 // The FD value should always be the evfd field just below. 66 67 volatile int sem; 67 68 … … 69 70 int evfd; 70 71 71 // buffer into which the proc will read from evfd 72 // unused if not using io_uring for idle sleep 73 void * rdbuf; 74 75 // future use to track the read of the eventfd 76 // unused if not using io_uring for idle sleep 77 io_future_t * ftr; 78 72 // Used for debugging, should be removed eventually. 79 73 volatile unsigned long long wake__time; 80 74 volatile unsigned long long sleep_time; -
libcfa/src/concurrency/kernel/private.hfa
r6a4ef0c ra757ba1 41 41 } 42 42 43 // Defines whether or not we *want* to use io_uring_enter as the idle_sleep blocking call44 // #define CFA_WANT_IO_URING_IDLE45 46 // Defines whether or not we *can* use io_uring_enter as the idle_sleep blocking call47 #if defined(CFA_WANT_IO_URING_IDLE) && defined(CFA_HAVE_LINUX_IO_URING_H)48 #if defined(CFA_HAVE_IORING_OP_READ) || (defined(CFA_HAVE_READV) && defined(CFA_HAVE_IORING_OP_READV))49 #define CFA_WITH_IO_URING_IDLE50 #endif51 #endif52 43 // #define READYQ_USE_LINEAR_AVG 53 44 #define READYQ_USE_LOGDBL_AVG -
libcfa/src/concurrency/kernel/startup.cfa
r6a4ef0c ra757ba1 115 115 KERNEL_STORAGE(thread$, mainThread); 116 116 KERNEL_STORAGE(__stack_t, mainThreadCtx); 117 // KERNEL_STORAGE(__scheduler_RWLock_t, __scheduler_lock);118 KERNEL_STORAGE(eventfd_t, mainIdleEventFd);119 KERNEL_STORAGE(io_future_t, mainIdleFuture);120 117 #if !defined(__CFA_NO_STATISTICS__) 121 118 KERNEL_STORAGE(__stats_t, mainProcStats); … … 235 232 (*mainProcessor){}; 236 233 237 mainProcessor->idle_wctx.rdbuf = &storage_mainIdleEventFd;238 mainProcessor->idle_wctx.ftr = (io_future_t*)&storage_mainIdleFuture;239 /* paranoid */ verify( sizeof(storage_mainIdleEventFd) == sizeof(eventfd_t) );240 241 234 __cfa_io_start( mainProcessor ); 242 235 register_tls( mainProcessor ); … … 384 377 register_tls( proc ); 385 378 386 // used for idle sleep when io_uring is present 387 io_future_t future; 388 eventfd_t idle_buf; 389 proc->idle_wctx.ftr = &future; 390 proc->idle_wctx.rdbuf = &idle_buf; 379 // io_future_t future; 380 // eventfd_t idle_buf; 381 // proc->idle_wctx.ftr = &future; 382 // proc->idle_wctx.rdbuf = &idle_buf; 391 383 392 384
Note: See TracChangeset
for help on using the changeset viewer.