Index: libcfa/src/concurrency/io.cfa
===================================================================
--- libcfa/src/concurrency/io.cfa	(revision 6a4ef0c58dbf5d0381bd99ba85707bbd8e3b205a)
+++ libcfa/src/concurrency/io.cfa	(revision a757ba179bb7ccc193cf855ee3b82d2d47d342eb)
@@ -639,91 +639,3 @@
 		}
 	}
-
-	#if defined(CFA_WITH_IO_URING_IDLE)
-		bool __kernel_read(struct processor * proc, io_future_t & future, iovec & iov, int fd) {
-			io_context$ * ctx = proc->io.ctx;
-			/* paranoid */ verify( ! __preemption_enabled() );
-			/* paranoid */ verify( proc == __cfaabi_tls.this_processor );
-			/* paranoid */ verify( ctx );
-
-			__u32 idx;
-			struct io_uring_sqe * sqe;
-
-			// We can proceed to the fast path
-			if( !__alloc(ctx, &idx, 1) ) {
-				/* paranoid */ verify( false ); // for now check if this happens, next time just abort the sleep.
-				return false;
-			}
-
-			// Allocation was successful
-			__fill( &sqe, 1, &idx, ctx );
-
-			sqe->user_data = (uintptr_t)&future;
-			sqe->flags = 0;
-			sqe->fd = fd;
-			sqe->off = 0;
-			sqe->ioprio = 0;
-			sqe->fsync_flags = 0;
-			sqe->__pad2[0] = 0;
-			sqe->__pad2[1] = 0;
-			sqe->__pad2[2] = 0;
-
-			#if defined(CFA_HAVE_IORING_OP_READ)
-				sqe->opcode = IORING_OP_READ;
-				sqe->addr = (uint64_t)iov.iov_base;
-				sqe->len = iov.iov_len;
-			#elif defined(CFA_HAVE_READV) && defined(CFA_HAVE_IORING_OP_READV)
-				sqe->opcode = IORING_OP_READV;
-				sqe->addr = (uintptr_t)&iov;
-				sqe->len = 1;
-			#else
-				#error CFA_WITH_IO_URING_IDLE but none of CFA_HAVE_READV, CFA_HAVE_IORING_OP_READV or CFA_HAVE_IORING_OP_READ defined
-			#endif
-
-			asm volatile("": : :"memory");
-
-			/* paranoid */ verify( sqe->user_data == (uintptr_t)&future );
-			__submit_only( ctx, &idx, 1 );
-
-			/* paranoid */ verify( proc == __cfaabi_tls.this_processor );
-			/* paranoid */ verify( ! __preemption_enabled() );
-
-			return true;
-		}
-
-		void __cfa_io_idle( struct processor * proc ) {
-			iovec iov;
-			__atomic_acquire( &proc->io.ctx->cq.lock );
-
-			__attribute__((used)) volatile bool was_reset = false;
-
-			with( proc->idle_wctx) {
-
-				// Do we already have a pending read
-				if(available(*ftr)) {
-					// There is no pending read, we need to add one
-					reset(*ftr);
-
-					iov.iov_base = rdbuf;
-					iov.iov_len  = sizeof(eventfd_t);
-					__kernel_read(proc, *ftr, iov, evfd );
-					ftr->result = 0xDEADDEAD;
-					*((eventfd_t *)rdbuf) = 0xDEADDEADDEADDEAD;
-					was_reset = true;
-				}
-			}
-
-			if( !__atomic_load_n( &proc->do_terminate, __ATOMIC_SEQ_CST ) ) {
-				__ioarbiter_flush( *proc->io.ctx );
-				proc->idle_wctx.sleep_time = rdtscl();
-				ioring_syscsll( *proc->io.ctx, 1, IORING_ENTER_GETEVENTS);
-			}
-
-			ready_schedule_lock();
-			__cfa_do_drain( proc->io.ctx, proc->cltr );
-			ready_schedule_unlock();
-
-			asm volatile ("" :: "m" (was_reset));
-		}
-	#endif
 #endif
Index: libcfa/src/concurrency/io/setup.cfa
===================================================================
--- libcfa/src/concurrency/io/setup.cfa	(revision 6a4ef0c58dbf5d0381bd99ba85707bbd8e3b205a)
+++ libcfa/src/concurrency/io/setup.cfa	(revision a757ba179bb7ccc193cf855ee3b82d2d47d342eb)
@@ -34,5 +34,4 @@
 	bool __cfa_io_flush( processor * proc ) { return false; }
 	bool __cfa_io_drain( processor * proc ) __attribute__((nonnull (1))) { return false; }
-	void __cfa_io_idle ( processor * ) __attribute__((nonnull (1))) {}
 	void __cfa_io_stop ( processor * proc ) {}
 
@@ -317,45 +316,4 @@
 	}
 
-//=============================================================================================
-// I/O Context Sleep
-//=============================================================================================
-	// static inline void __epoll_ctl(io_context$ & ctx, int op, const char * error) {
-	// 	struct epoll_event ev;
-	// 	ev.events = EPOLLIN | EPOLLONESHOT;
-	// 	ev.data.u64 = (__u64)&ctx;
-	// 	int ret = epoll_ctl(iopoll.epollfd, op, ctx.efd, &ev);
-	// 	if (ret < 0) {
-	// 		abort( "KERNEL ERROR: EPOLL %s - (%d) %s\n", error, (int)errno, strerror(errno) );
-	// 	}
-	// }
-
-	// static void __epoll_register(io_context$ & ctx) {
-	// 	__epoll_ctl(ctx, EPOLL_CTL_ADD, "ADD");
-	// }
-
-	// static void __epoll_unregister(io_context$ & ctx) {
-	// 	// Read the current epoch so we know when to stop
-	// 	size_t curr = __atomic_load_n(&iopoll.epoch, __ATOMIC_SEQ_CST);
-
-	// 	// Remove the fd from the iopoller
-	// 	__epoll_ctl(ctx, EPOLL_CTL_DEL, "REMOVE");
-
-	// 	// Notify the io poller thread of the shutdown
-	// 	iopoll.run = false;
-	// 	sigval val = { 1 };
-	// 	__cfaabi_pthread_sigqueue( iopoll.thrd, SIGUSR1, val );
-
-	// 	// Make sure all this is done
-	// 	__atomic_thread_fence(__ATOMIC_SEQ_CST);
-
-	// 	// Wait for the next epoch
-	// 	while(curr == iopoll.epoch && !iopoll.stopped) Pause();
-	// }
-
-	// void __ioctx_prepare_block(io_context$ & ctx) {
-	// 	__cfadbg_print_safe(io_core, "Kernel I/O - epoll : Re-arming io poller %d (%p)\n", ctx.fd, &ctx);
-	// 	__epoll_ctl(ctx, EPOLL_CTL_MOD, "REARM");
-	// }
-
 
 //=============================================================================================
Index: libcfa/src/concurrency/kernel.cfa
===================================================================
--- libcfa/src/concurrency/kernel.cfa	(revision 6a4ef0c58dbf5d0381bd99ba85707bbd8e3b205a)
+++ libcfa/src/concurrency/kernel.cfa	(revision a757ba179bb7ccc193cf855ee3b82d2d47d342eb)
@@ -138,9 +138,5 @@
 extern bool __cfa_io_drain( processor * proc ) __attribute__((nonnull (1)));
 extern bool __cfa_io_flush( processor * ) __attribute__((nonnull (1)));
-extern void __cfa_io_idle( processor * ) __attribute__((nonnull (1)));
-
-#if defined(CFA_WITH_IO_URING_IDLE)
-	extern bool __kernel_read(processor * proc, io_future_t & future, iovec &, int fd);
-#endif
+
 
 extern void __disable_interrupts_hard();
@@ -161,11 +157,4 @@
 	processor * this = runner.proc;
 	verify(this);
-
-	/* paranoid */ verify( this->idle_wctx.ftr   != 0p );
-	/* paranoid */ verify( this->idle_wctx.rdbuf != 0p );
-
-	// used for idle sleep when io_uring is present
-	// mark it as already fulfilled so we know if there is a pending request or not
-	this->idle_wctx.ftr->self.ptr = 1p;
 
 	__cfadbg_print_safe(runtime_core, "Kernel : core %p starting\n", this);
@@ -730,38 +719,34 @@
 
 
-	#if !defined(CFA_WITH_IO_URING_IDLE)
-		#if !defined(__CFA_NO_STATISTICS__)
-			if(this->print_halts) {
-				__cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this->unique_id, rdtscl());
+	#if !defined(__CFA_NO_STATISTICS__)
+		if(this->print_halts) {
+			__cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this->unique_id, rdtscl());
+		}
+	#endif
+
+	__cfadbg_print_safe(runtime_core, "Kernel : core %p waiting on eventfd %d\n", this, this->idle_fd);
+
+	{
+		eventfd_t val;
+		ssize_t ret = read( this->idle_wctx.evfd, &val, sizeof(val) );
+		if(ret < 0) {
+			switch((int)errno) {
+			case EAGAIN:
+			#if EAGAIN != EWOULDBLOCK
+				case EWOULDBLOCK:
+			#endif
+			case EINTR:
+				// No need to do anything special here, just assume it's a legitimate wake-up
+				break;
+			default:
+				abort( "KERNEL : internal error, read failure on idle eventfd, error(%d) %s.", (int)errno, strerror( (int)errno ) );
 			}
-		#endif
-
-		__cfadbg_print_safe(runtime_core, "Kernel : core %p waiting on eventfd %d\n", this, this->idle_fd);
-
-		{
-			eventfd_t val;
-			ssize_t ret = read( this->idle_wctx.evfd, &val, sizeof(val) );
-			if(ret < 0) {
-				switch((int)errno) {
-				case EAGAIN:
-				#if EAGAIN != EWOULDBLOCK
-					case EWOULDBLOCK:
-				#endif
-				case EINTR:
-					// No need to do anything special here, just assume it's a legitimate wake-up
-					break;
-				default:
-					abort( "KERNEL : internal error, read failure on idle eventfd, error(%d) %s.", (int)errno, strerror( (int)errno ) );
-				}
-			}
-		}
-
-		#if !defined(__CFA_NO_STATISTICS__)
-			if(this->print_halts) {
-				__cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 1\n", this->unique_id, rdtscl());
-			}
-		#endif
-	#else
-		__cfa_io_idle( this );
+		}
+	}
+
+	#if !defined(__CFA_NO_STATISTICS__)
+		if(this->print_halts) {
+			__cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 1\n", this->unique_id, rdtscl());
+		}
 	#endif
 }
@@ -779,4 +764,5 @@
 		insert_first(this.idles, proc);
 
+		// update the pointer to the head wait context, which should now point to this proc.
 		__atomic_store_n(&this.fdw, &proc.idle_wctx, __ATOMIC_SEQ_CST);
 	unlock( this );
@@ -795,4 +781,5 @@
 
 		{
+			// update the pointer to the head wait context
 			struct __fd_waitctx * wctx = 0;
 			if(!this.idles`isEmpty) wctx = &this.idles`first.idle_wctx;
Index: libcfa/src/concurrency/kernel.hfa
===================================================================
--- libcfa/src/concurrency/kernel.hfa	(revision 6a4ef0c58dbf5d0381bd99ba85707bbd8e3b205a)
+++ libcfa/src/concurrency/kernel.hfa	(revision a757ba179bb7ccc193cf855ee3b82d2d47d342eb)
@@ -64,4 +64,5 @@
 	// 1 - means the proc should wake-up immediately
 	// FD - means the proc is going asleep and should be woken by writing to the FD.
+	//      The FD value should always be the evfd field just below.
 	volatile int sem;
 
@@ -69,12 +70,5 @@
 	int evfd;
 
-	// buffer into which the proc will read from evfd
-	// unused if not using io_uring for idle sleep
-	void * rdbuf;
-
-	// future use to track the read of the eventfd
-	// unused if not using io_uring for idle sleep
-	io_future_t * ftr;
-
+	// Used for debugging, should be removed eventually.
 	volatile unsigned long long wake__time;
 	volatile unsigned long long sleep_time;
Index: libcfa/src/concurrency/kernel/private.hfa
===================================================================
--- libcfa/src/concurrency/kernel/private.hfa	(revision 6a4ef0c58dbf5d0381bd99ba85707bbd8e3b205a)
+++ libcfa/src/concurrency/kernel/private.hfa	(revision a757ba179bb7ccc193cf855ee3b82d2d47d342eb)
@@ -41,13 +41,4 @@
 }
 
-// Defines whether or not we *want* to use io_uring_enter as the idle_sleep blocking call
-// #define CFA_WANT_IO_URING_IDLE
-
-// Defines whether or not we *can* use io_uring_enter as the idle_sleep blocking call
-#if defined(CFA_WANT_IO_URING_IDLE) && defined(CFA_HAVE_LINUX_IO_URING_H)
-	#if defined(CFA_HAVE_IORING_OP_READ) || (defined(CFA_HAVE_READV) && defined(CFA_HAVE_IORING_OP_READV))
-		#define CFA_WITH_IO_URING_IDLE
-	#endif
-#endif
 // #define READYQ_USE_LINEAR_AVG
 #define READYQ_USE_LOGDBL_AVG
Index: libcfa/src/concurrency/kernel/startup.cfa
===================================================================
--- libcfa/src/concurrency/kernel/startup.cfa	(revision 6a4ef0c58dbf5d0381bd99ba85707bbd8e3b205a)
+++ libcfa/src/concurrency/kernel/startup.cfa	(revision a757ba179bb7ccc193cf855ee3b82d2d47d342eb)
@@ -115,7 +115,4 @@
 KERNEL_STORAGE(thread$,	             mainThread);
 KERNEL_STORAGE(__stack_t,            mainThreadCtx);
-// KERNEL_STORAGE(__scheduler_RWLock_t, __scheduler_lock);
-KERNEL_STORAGE(eventfd_t,            mainIdleEventFd);
-KERNEL_STORAGE(io_future_t,          mainIdleFuture);
 #if !defined(__CFA_NO_STATISTICS__)
 KERNEL_STORAGE(__stats_t, mainProcStats);
@@ -235,8 +232,4 @@
 	(*mainProcessor){};
 
-	mainProcessor->idle_wctx.rdbuf = &storage_mainIdleEventFd;
-	mainProcessor->idle_wctx.ftr   = (io_future_t*)&storage_mainIdleFuture;
-	/* paranoid */ verify( sizeof(storage_mainIdleEventFd) == sizeof(eventfd_t) );
-
 	__cfa_io_start( mainProcessor );
 	register_tls( mainProcessor );
@@ -384,9 +377,8 @@
 	register_tls( proc );
 
-	// used for idle sleep when io_uring is present
-	io_future_t future;
-	eventfd_t idle_buf;
-	proc->idle_wctx.ftr = &future;
-	proc->idle_wctx.rdbuf = &idle_buf;
+	// io_future_t future;
+	// eventfd_t idle_buf;
+	// proc->idle_wctx.ftr = &future;
+	// proc->idle_wctx.rdbuf = &idle_buf;
 
 
