Changeset 7ef162b2
- Timestamp:
- Nov 22, 2021, 1:07:05 PM (23 months ago)
- Branches:
- ADT, ast-experimental, enum, forall-pointer-decay, master, pthread-emulation, qualifiedEnum
- Children:
- 6ddef36, ddd2ec9
- Parents:
- 059ad16
- Location:
- libcfa/src/concurrency
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/io.cfa
r059ad16 r7ef162b2 173 173 174 174 ctx.proc->io.pending = false; 175 176 return __cfa_io_drain( proc ); 175 ready_schedule_lock(); 176 bool ret = __cfa_io_drain( proc ); 177 ready_schedule_unlock(); 178 return ret; 177 179 } 178 180 … … 278 280 } 279 281 280 281 282 //============================================================================================= 282 283 // submission … … 301 302 ctx->proc->io.dirty = true; 302 303 if(sq.to_submit > 30 || !lazy) { 303 ready_schedule_lock();304 304 __cfa_io_flush( ctx->proc, false ); 305 ready_schedule_unlock();306 305 } 307 306 } … … 502 501 } 503 502 } 503 504 bool __kernel_read(processor * proc, io_future_t & future, char buf[], int fd) { 505 $io_context * ctx = proc->io.ctx; 506 /* paranoid */ verify( ! __preemption_enabled() ); 507 /* paranoid */ verify( proc == __cfaabi_tls.this_processor ); 508 /* paranoid */ verify( ctx ); 509 510 __u32 idx; 511 struct io_uring_sqe * sqe; 512 513 // We can proceed to the fast path 514 if( !__alloc(ctx, &idx, 1) ) return false; 515 516 // Allocation was successful 517 __fill( &sqe, 1, &idx, ctx ); 518 519 sqe->opcode = IORING_OP_READ; 520 sqe->user_data = (uintptr_t)&future; 521 sqe->flags = 0; 522 sqe->ioprio = 0; 523 sqe->fd = 0; 524 sqe->off = 0; 525 sqe->fsync_flags = 0; 526 sqe->__pad2[0] = 0; 527 sqe->__pad2[1] = 0; 528 sqe->__pad2[2] = 0; 529 sqe->addr = (uintptr_t)buf; 530 sqe->len = sizeof(uint64_t); 531 532 asm volatile("": : :"memory"); 533 534 /* paranoid */ verify( sqe->user_data == (uintptr_t)&future ); 535 __submit( ctx, &idx, 1, true ); 536 537 /* paranoid */ verify( proc == __cfaabi_tls.this_processor ); 538 /* paranoid */ verify( ! __preemption_enabled() ); 539 } 504 540 #endif -
libcfa/src/concurrency/io/setup.cfa
r059ad16 r7ef162b2 220 220 cq.cqes = (struct io_uring_cqe *)(((intptr_t)cq.ring_ptr) + params.cq_off.cqes); 221 221 222 // Step 4 : eventfd 223 // io_uring_register is so f*cking slow on some machine that it 224 // will never succeed if preemption isn't hard blocked 225 __cfadbg_print_safe(io_core, "Kernel I/O : registering %d for completion with ring %d\n", procfd, fd); 226 227 __disable_interrupts_hard(); 228 229 int ret = syscall( __NR_io_uring_register, fd, IORING_REGISTER_EVENTFD, &procfd, 1); 230 if (ret < 0) { 231 abort("KERNEL ERROR: IO_URING EVENTFD REGISTER - %s\n", strerror(errno)); 232 } 233 234 __enable_interrupts_hard(); 235 236 __cfadbg_print_safe(io_core, "Kernel I/O : registered %d for completion with ring %d\n", procfd, fd); 222 #if !defined(IO_URING_IDLE) 223 // Step 4 : eventfd 224 // io_uring_register is so f*cking slow on some machine that it 225 // will never succeed if preemption isn't hard blocked 226 __cfadbg_print_safe(io_core, "Kernel I/O : registering %d for completion with ring %d\n", procfd, fd); 227 228 __disable_interrupts_hard(); 229 230 int ret = syscall( __NR_io_uring_register, fd, IORING_REGISTER_EVENTFD, &procfd, 1); 231 if (ret < 0) { 232 abort("KERNEL ERROR: IO_URING EVENTFD REGISTER - %s\n", strerror(errno)); 233 } 234 235 __enable_interrupts_hard(); 236 237 __cfadbg_print_safe(io_core, "Kernel I/O : registered %d for completion with ring %d\n", procfd, fd); 238 #endif 237 239 238 240 // some paranoid checks -
libcfa/src/concurrency/io/types.hfa
r059ad16 r7ef162b2 185 185 186 186 // Wait for the future to be fulfilled 187 bool wait( io_future_t & this ) { 188 return wait(this.self); 189 } 190 191 void reset( io_future_t & this ) { 192 return reset(this.self); 193 } 187 bool wait ( io_future_t & this ) { return wait (this.self); } 188 void reset ( io_future_t & this ) { return reset (this.self); } 189 bool available( io_future_t & this ) { return available(this.self); } 194 190 } -
libcfa/src/concurrency/kernel.cfa
r059ad16 r7ef162b2 34 34 #include "strstream.hfa" 35 35 #include "device/cpu.hfa" 36 #include "io/types.hfa" 36 37 37 38 //Private includes … … 124 125 static void __wake_one(cluster * cltr); 125 126 126 static void idle_sleep(processor * proc );127 static void idle_sleep(processor * proc, io_future_t & future, char buf[]); 127 128 static bool mark_idle (__cluster_proc_list & idles, processor & proc); 128 129 static void mark_awake(__cluster_proc_list & idles, processor & proc); … … 134 135 static inline bool __maybe_io_drain( processor * ); 135 136 137 extern bool __kernel_read(processor * proc, io_future_t & future, char buf[], int fd); 138 136 139 extern void __disable_interrupts_hard(); 137 140 extern void __enable_interrupts_hard(); … … 148 151 /* paranoid */ verify( __preemption_enabled() ); 149 152 } 153 150 154 151 155 //============================================================================================= … … 163 167 verify(this); 164 168 169 io_future_t future; // used for idle sleep when io_uring is present 170 future.self.ptr = 1p; // mark it as already fulfilled so we know if there is a pending request or not 171 char buf[sizeof(uint64_t)]; 172 165 173 __cfa_io_start( this ); 166 174 … … 196 204 197 205 if( !readyThread ) { 198 ready_schedule_lock();199 206 __cfa_io_flush( this, false ); 200 ready_schedule_unlock();201 207 202 208 readyThread = __next_thread_slow( this->cltr ); … … 229 235 } 230 236 231 idle_sleep( this );237 idle_sleep( this, future, buf ); 232 238 233 239 // We were woken up, remove self from idle … … 250 256 251 257 if(this->io.pending && !this->io.dirty) { 252 ready_schedule_lock();253 258 __cfa_io_flush( this, false ); 254 ready_schedule_unlock();255 259 } 256 260 … … 773 777 } 774 778 775 static void idle_sleep(processor * this) { 776 #if !defined(__CFA_NO_STATISTICS__) 777 if(this->print_halts) { 778 __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this->unique_id, rdtscl()); 779 } 780 #endif 781 782 __cfadbg_print_safe(runtime_core, "Kernel : core %p waiting on eventfd %d\n", this, this->idle_fd); 783 784 { 785 eventfd_t val; 786 ssize_t ret = read( this->idle_fd, &val, sizeof(val) ); 787 if(ret < 0) { 788 switch((int)errno) { 789 case EAGAIN: 790 #if EAGAIN != EWOULDBLOCK 791 case EWOULDBLOCK: 792 #endif 793 case EINTR: 794 // No need to do anything special here, just assume it's a legitimate wake-up 795 break; 796 default: 797 abort( "KERNEL : internal error, read failure on idle eventfd, error(%d) %s.", (int)errno, strerror( (int)errno ) ); 779 static void idle_sleep(processor * this, io_future_t & future, char buf[]) { 780 #if !defined(IO_URING_IDLE) || !defined(CFA_HAVE_LINUX_IO_URING_H) 781 #if !defined(__CFA_NO_STATISTICS__) 782 if(this->print_halts) { 783 __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this->unique_id, rdtscl()); 798 784 } 799 } 800 } 801 802 #if !defined(__CFA_NO_STATISTICS__) 803 if(this->print_halts) { 804 __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 1\n", this->unique_id, rdtscl()); 805 } 785 #endif 786 787 __cfadbg_print_safe(runtime_core, "Kernel : core %p waiting on eventfd %d\n", this, this->idle_fd); 788 789 { 790 eventfd_t val; 791 ssize_t ret = read( this->idle_fd, &val, sizeof(val) ); 792 if(ret < 0) { 793 switch((int)errno) { 794 case EAGAIN: 795 #if EAGAIN != EWOULDBLOCK 796 case EWOULDBLOCK: 797 #endif 798 case EINTR: 799 // No need to do anything special here, just assume it's a legitimate wake-up 800 break; 801 default: 802 abort( "KERNEL : internal error, read failure on idle eventfd, error(%d) %s.", (int)errno, strerror( (int)errno ) ); 803 } 804 } 805 } 806 807 #if !defined(__CFA_NO_STATISTICS__) 808 if(this->print_halts) { 809 __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 1\n", this->unique_id, rdtscl()); 810 } 811 #endif 812 #else 813 #if !defined(CFA_HAVE_IORING_OP_READ) 814 #error this is only implemented if the read is present 815 #endif 816 // Do we already have a pending read 817 if(available(future)) { 818 // There is no pending read, we need to add one 819 reset(future); 820 821 __kernel_read(this, future, buf, this->idle_fd ); 822 } 823 824 __cfa_io_flush( this, true ); 806 825 #endif 807 826 }
Note: See TracChangeset
for help on using the changeset viewer.