Changes in / [0351e9f:a1f3d93]
- Location:
- libcfa/src
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/io.cfa
r0351e9f ra1f3d93 133 133 } 134 134 135 bool __cfa_io_flush( processor * proc, bool wait) {135 bool __cfa_io_flush( processor * proc, int min_comp ) { 136 136 /* paranoid */ verify( ! __preemption_enabled() ); 137 137 /* paranoid */ verify( proc ); … … 144 144 145 145 __STATS__( true, io.calls.flush++; ) 146 int ret = syscall( __NR_io_uring_enter, ctx.fd, ctx.sq.to_submit, wait ? 1 : 0,0, (sigset_t *)0p, _NSIG / 8);146 int ret = syscall( __NR_io_uring_enter, ctx.fd, ctx.sq.to_submit, min_comp, min_comp > 0 ? IORING_ENTER_GETEVENTS : 0, (sigset_t *)0p, _NSIG / 8); 147 147 if( ret < 0 ) { 148 148 switch((int)errno) { … … 302 302 ctx->proc->io.dirty = true; 303 303 if(sq.to_submit > 30 || !lazy) { 304 __cfa_io_flush( ctx->proc, false);304 __cfa_io_flush( ctx->proc, 0 ); 305 305 } 306 306 } … … 522 522 sqe->flags = 0; 523 523 sqe->ioprio = 0; 524 sqe->fd = 0;524 sqe->fd = fd; 525 525 sqe->off = 0; 526 526 sqe->fsync_flags = 0; -
libcfa/src/concurrency/io/setup.cfa
r0351e9f ra1f3d93 32 32 33 33 void __cfa_io_start( processor * proc ) {} 34 bool __cfa_io_flush( processor * proc, bool) {}34 bool __cfa_io_flush( processor * proc, int ) {} 35 35 void __cfa_io_stop ( processor * proc ) {} 36 36 -
libcfa/src/concurrency/kernel.cfa
r0351e9f ra1f3d93 125 125 static void __wake_one(cluster * cltr); 126 126 127 static void idle_sleep(processor * proc, io_future_t & future, char buf[]);127 static void idle_sleep(processor * proc, io_future_t & future, eventfd_t & val); 128 128 static bool mark_idle (__cluster_proc_list & idles, processor & proc); 129 129 static void mark_awake(__cluster_proc_list & idles, processor & proc); … … 131 131 extern void __cfa_io_start( processor * ); 132 132 extern bool __cfa_io_drain( processor * ); 133 extern bool __cfa_io_flush( processor *, bool wait);133 extern bool __cfa_io_flush( processor *, int min_comp ); 134 134 extern void __cfa_io_stop ( processor * ); 135 135 static inline bool __maybe_io_drain( processor * ); … … 171 171 io_future_t future; // used for idle sleep when io_uring is present 172 172 future.self.ptr = 1p; // mark it as already fulfilled so we know if there is a pending request or not 173 char buf[sizeof(uint64_t)];173 eventfd_t idle_val; 174 174 175 175 __cfa_io_start( this ); … … 206 206 207 207 if( !readyThread ) { 208 __cfa_io_flush( this, false);208 __cfa_io_flush( this, 0 ); 209 209 210 210 readyThread = __next_thread_slow( this->cltr ); … … 237 237 } 238 238 239 idle_sleep( this, future, buf);239 idle_sleep( this, future, idle_val ); 240 240 241 241 // We were woken up, remove self from idle … … 258 258 259 259 if(this->io.pending && !this->io.dirty) { 260 __cfa_io_flush( this, false);260 __cfa_io_flush( this, 0 ); 261 261 } 262 262 … … 274 274 275 275 // If we can't find a thread, might as well flush any outstanding I/O 276 if(this->io.pending) { __cfa_io_flush( this, false); }276 if(this->io.pending) { __cfa_io_flush( this, 0 ); } 277 277 278 278 // Spin a little on I/O, just in case … … 369 369 370 370 if(this->io.pending && !this->io.dirty) { 371 __cfa_io_flush( this, false);371 __cfa_io_flush( this, 0 ); 372 372 } 373 373 … … 779 779 } 780 780 781 static void idle_sleep(processor * this, io_future_t & future, char buf[]) {781 static void idle_sleep(processor * this, io_future_t & future, eventfd_t & val) { 782 782 #if !defined(IO_URING_IDLE) || !defined(CFA_HAVE_LINUX_IO_URING_H) 783 783 #if !defined(__CFA_NO_STATISTICS__) … … 821 821 reset(future); 822 822 823 __kernel_read(this, future, buf, this->idle_fd );824 } 825 826 __cfa_io_flush( this, true);823 __kernel_read(this, future, (char *)&val, this->idle_fd ); 824 } 825 826 __cfa_io_flush( this, 1 ); 827 827 #endif 828 828 } -
libcfa/src/device/cpu.cfa
r0351e9f ra1f3d93 159 159 160 160 const char * _; 161 int cnt = read_width(buff, r - 1, &_); 162 /* paranoid */ verify(cnt == count_prefix_dirs("/sys/devices/system/cpu", "cpu")); 163 return cnt; 161 return read_width(buff, r - 1, &_);; 164 162 } 165 163 … … 238 236 { 239 237 raw_cache_instance ** raw = alloc(cpus); 238 239 // TODO: this loop is broken, it only works if the present cpu start at 0 and are contiguous which is not guaranteed. 240 240 for(i; cpus) { 241 241 raw[i] = alloc(cache_levels);
Note: See TracChangeset
for help on using the changeset viewer.