Index: libcfa/src/concurrency/io.cfa
===================================================================
--- libcfa/src/concurrency/io.cfa	(revision 059ad166c9e6faec1fbf213925fe3c23cffd787d)
+++ libcfa/src/concurrency/io.cfa	(revision 7ef162b296a22a5053fda4315076997f08c2f5cd)
@@ -173,6 +173,8 @@
 
 		ctx.proc->io.pending = false;
-
-		return __cfa_io_drain( proc );
+		ready_schedule_lock();
+		bool ret = __cfa_io_drain( proc );
+		ready_schedule_unlock();
+		return ret;
 	}
 
@@ -278,5 +280,4 @@
 	}
 
-
 	//=============================================================================================
 	// submission
@@ -301,7 +302,5 @@
 		ctx->proc->io.dirty   = true;
 		if(sq.to_submit > 30 || !lazy) {
-			ready_schedule_lock();
 			__cfa_io_flush( ctx->proc, false );
-			ready_schedule_unlock();
 		}
 	}
@@ -502,3 +501,40 @@
 		}
 	}
+
+	bool __kernel_read(processor * proc, io_future_t & future, char buf[], int fd) {
+		$io_context * ctx = proc->io.ctx;
+		/* paranoid */ verify( ! __preemption_enabled() );
+		/* paranoid */ verify( proc == __cfaabi_tls.this_processor );
+		/* paranoid */ verify( ctx );
+
+		__u32 idx;
+		struct io_uring_sqe * sqe;
+
+		// We can proceed to the fast path
+		if( !__alloc(ctx, &idx, 1) ) return false;
+
+		// Allocation was successful
+		__fill( &sqe, 1, &idx, ctx );
+
+		sqe->opcode = IORING_OP_READ;
+		sqe->user_data = (uintptr_t)&future;
+		sqe->flags = 0;
+		sqe->ioprio = 0;
+		sqe->fd = 0;
+		sqe->off = 0;
+		sqe->fsync_flags = 0;
+		sqe->__pad2[0] = 0;
+		sqe->__pad2[1] = 0;
+		sqe->__pad2[2] = 0;
+		sqe->addr = (uintptr_t)buf;
+		sqe->len = sizeof(uint64_t);
+
+		asm volatile("": : :"memory");
+
+		/* paranoid */ verify( sqe->user_data == (uintptr_t)&future );
+		__submit( ctx, &idx, 1, true );
+
+		/* paranoid */ verify( proc == __cfaabi_tls.this_processor );
+		/* paranoid */ verify( ! __preemption_enabled() );
+	}
 #endif
Index: libcfa/src/concurrency/io/setup.cfa
===================================================================
--- libcfa/src/concurrency/io/setup.cfa	(revision 059ad166c9e6faec1fbf213925fe3c23cffd787d)
+++ libcfa/src/concurrency/io/setup.cfa	(revision 7ef162b296a22a5053fda4315076997f08c2f5cd)
@@ -220,19 +220,21 @@
 		cq.cqes = (struct io_uring_cqe *)(((intptr_t)cq.ring_ptr) + params.cq_off.cqes);
 
-		// Step 4 : eventfd
-		// io_uring_register is so f*cking slow on some machine that it
-		// will never succeed if preemption isn't hard blocked
-		__cfadbg_print_safe(io_core, "Kernel I/O : registering %d for completion with ring %d\n", procfd, fd);
-
-		__disable_interrupts_hard();
-
-		int ret = syscall( __NR_io_uring_register, fd, IORING_REGISTER_EVENTFD, &procfd, 1);
-		if (ret < 0) {
-			abort("KERNEL ERROR: IO_URING EVENTFD REGISTER - %s\n", strerror(errno));
-		}
-
-		__enable_interrupts_hard();
-
-		__cfadbg_print_safe(io_core, "Kernel I/O : registered %d for completion with ring %d\n", procfd, fd);
+		#if !defined(IO_URING_IDLE)
+			// Step 4 : eventfd
+			// io_uring_register is so f*cking slow on some machine that it
+			// will never succeed if preemption isn't hard blocked
+			__cfadbg_print_safe(io_core, "Kernel I/O : registering %d for completion with ring %d\n", procfd, fd);
+
+			__disable_interrupts_hard();
+
+			int ret = syscall( __NR_io_uring_register, fd, IORING_REGISTER_EVENTFD, &procfd, 1);
+			if (ret < 0) {
+				abort("KERNEL ERROR: IO_URING EVENTFD REGISTER - %s\n", strerror(errno));
+			}
+
+			__enable_interrupts_hard();
+
+			__cfadbg_print_safe(io_core, "Kernel I/O : registered %d for completion with ring %d\n", procfd, fd);
+		#endif
 
 		// some paranoid checks
Index: libcfa/src/concurrency/io/types.hfa
===================================================================
--- libcfa/src/concurrency/io/types.hfa	(revision 059ad166c9e6faec1fbf213925fe3c23cffd787d)
+++ libcfa/src/concurrency/io/types.hfa	(revision 7ef162b296a22a5053fda4315076997f08c2f5cd)
@@ -185,10 +185,6 @@
 
 	// Wait for the future to be fulfilled
-	bool wait( io_future_t & this ) {
-		return wait(this.self);
-	}
-
-	void reset( io_future_t & this ) {
-		return reset(this.self);
-	}
+	bool wait     ( io_future_t & this ) { return wait     (this.self); }
+	void reset    ( io_future_t & this ) { return reset    (this.self); }
+	bool available( io_future_t & this ) { return available(this.self); }
 }
Index: libcfa/src/concurrency/kernel.cfa
===================================================================
--- libcfa/src/concurrency/kernel.cfa	(revision 059ad166c9e6faec1fbf213925fe3c23cffd787d)
+++ libcfa/src/concurrency/kernel.cfa	(revision 7ef162b296a22a5053fda4315076997f08c2f5cd)
@@ -34,4 +34,5 @@
 #include "strstream.hfa"
 #include "device/cpu.hfa"
+#include "io/types.hfa"
 
 //Private includes
@@ -124,5 +125,5 @@
 static void __wake_one(cluster * cltr);
 
-static void idle_sleep(processor * proc);
+static void idle_sleep(processor * proc, io_future_t & future, char buf[]);
 static bool mark_idle (__cluster_proc_list & idles, processor & proc);
 static void mark_awake(__cluster_proc_list & idles, processor & proc);
@@ -134,4 +135,6 @@
 static inline bool __maybe_io_drain( processor * );
 
+extern bool __kernel_read(processor * proc, io_future_t & future, char buf[], int fd);
+
 extern void __disable_interrupts_hard();
 extern void __enable_interrupts_hard();
@@ -148,4 +151,5 @@
 	/* paranoid */ verify( __preemption_enabled() );
 }
+
 
 //=============================================================================================
@@ -163,4 +167,8 @@
 	verify(this);
 
+	io_future_t future; // used for idle sleep when io_uring is present
+	future.self.ptr = 1p;  // mark it as already fulfilled so we know if there is a pending request or not
+	char buf[sizeof(uint64_t)];
+
 	__cfa_io_start( this );
 
@@ -196,7 +204,5 @@
 
 			if( !readyThread ) {
-				ready_schedule_lock();
 				__cfa_io_flush( this, false );
-				ready_schedule_unlock();
 
 				readyThread = __next_thread_slow( this->cltr );
@@ -229,5 +235,5 @@
 				}
 
-				idle_sleep( this );
+				idle_sleep( this, future, buf );
 
 				// We were woken up, remove self from idle
@@ -250,7 +256,5 @@
 
 			if(this->io.pending && !this->io.dirty) {
-				ready_schedule_lock();
 				__cfa_io_flush( this, false );
-				ready_schedule_unlock();
 			}
 
@@ -773,35 +777,50 @@
 }
 
-static void idle_sleep(processor * this) {
-	#if !defined(__CFA_NO_STATISTICS__)
-		if(this->print_halts) {
-			__cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this->unique_id, rdtscl());
-		}
-	#endif
-
-	__cfadbg_print_safe(runtime_core, "Kernel : core %p waiting on eventfd %d\n", this, this->idle_fd);
-
-	{
-		eventfd_t val;
-		ssize_t ret = read( this->idle_fd, &val, sizeof(val) );
-		if(ret < 0) {
-			switch((int)errno) {
-			case EAGAIN:
-			#if EAGAIN != EWOULDBLOCK
-				case EWOULDBLOCK:
-			#endif
-			case EINTR:
-				// No need to do anything special here, just assume it's a legitimate wake-up
-				break;
-			default:
-				abort( "KERNEL : internal error, read failure on idle eventfd, error(%d) %s.", (int)errno, strerror( (int)errno ) );
+static void idle_sleep(processor * this, io_future_t & future, char buf[]) {
+	#if !defined(IO_URING_IDLE) || !defined(CFA_HAVE_LINUX_IO_URING_H)
+		#if !defined(__CFA_NO_STATISTICS__)
+			if(this->print_halts) {
+				__cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this->unique_id, rdtscl());
 			}
-		}
-	}
-
-	#if !defined(__CFA_NO_STATISTICS__)
-		if(this->print_halts) {
-			__cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 1\n", this->unique_id, rdtscl());
-		}
+		#endif
+
+		__cfadbg_print_safe(runtime_core, "Kernel : core %p waiting on eventfd %d\n", this, this->idle_fd);
+
+		{
+			eventfd_t val;
+			ssize_t ret = read( this->idle_fd, &val, sizeof(val) );
+			if(ret < 0) {
+				switch((int)errno) {
+				case EAGAIN:
+				#if EAGAIN != EWOULDBLOCK
+					case EWOULDBLOCK:
+				#endif
+				case EINTR:
+					// No need to do anything special here, just assume it's a legitimate wake-up
+					break;
+				default:
+					abort( "KERNEL : internal error, read failure on idle eventfd, error(%d) %s.", (int)errno, strerror( (int)errno ) );
+				}
+			}
+		}
+
+		#if !defined(__CFA_NO_STATISTICS__)
+			if(this->print_halts) {
+				__cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 1\n", this->unique_id, rdtscl());
+			}
+		#endif
+	#else
+		#if !defined(CFA_HAVE_IORING_OP_READ)
+			#error this is only implemented if the read is present
+		#endif
+		// Do we already have a pending read
+		if(available(future)) {
+			// There is no pending read, we need to add one
+			reset(future);
+
+			__kernel_read(this, future, buf, this->idle_fd );
+		}
+
+		__cfa_io_flush( this, true );
 	#endif
 }
