Index: libcfa/src/concurrency/invoke.h
===================================================================
--- libcfa/src/concurrency/invoke.h	(revision e660761701e94bf2b2ae0130750e911d4122366f)
+++ libcfa/src/concurrency/invoke.h	(revision 3e2b9c93dff631bddb430d876a83bcc3bafe9d16)
@@ -17,4 +17,5 @@
 #include "bits/defs.hfa"
 #include "bits/locks.hfa"
+#include "kernel/fwd.hfa"
 
 #ifdef __cforall
@@ -25,44 +26,4 @@
 #ifndef _INVOKE_H_
 #define _INVOKE_H_
-
-#ifdef __ARM_ARCH
-	// function prototypes are only really used by these macros on ARM
-	void disable_global_interrupts();
-	void enable_global_interrupts();
-
-	#define TL_GET( member ) ( { __typeof__( kernelTLS.member ) target; \
-                disable_global_interrupts(); \
-                target = kernelTLS.member; \
-                enable_global_interrupts(); \
-                target; } )
-	#define TL_SET( member, value ) disable_global_interrupts(); \
-		kernelTLS.member = value; \
-		enable_global_interrupts();
-#else
-	#define TL_GET( member ) kernelTLS.member
-	#define TL_SET( member, value ) kernelTLS.member = value;
-#endif
-
-	#ifdef __cforall
-	extern "Cforall" {
-		extern __attribute__((aligned(128))) thread_local struct KernelThreadData {
-			struct $thread    * volatile this_thread;
-			struct processor  * volatile this_processor;
-			struct __stats_t  * volatile this_stats;
-
-			struct {
-				volatile unsigned short disable_count;
-				volatile bool enabled;
-				volatile bool in_progress;
-			} preemption_state;
-
-			#if defined(__SIZEOF_INT128__)
-				__uint128_t rand_seed;
-			#else
-				uint64_t rand_seed;
-			#endif
-		} kernelTLS __attribute__ ((tls_model ( "initial-exec" )));
-	}
-	#endif
 
 	struct __stack_context_t {
@@ -98,5 +59,4 @@
 
 	enum __Coroutine_State { Halted, Start, Primed, Blocked, Ready, Active };
-	enum __Preemption_Reason { __NO_PREEMPTION, __ALARM_PREEMPTION, __POLL_PREEMPTION, __MANUAL_PREEMPTION };
 
 	struct $coroutine {
Index: libcfa/src/concurrency/io.cfa
===================================================================
--- libcfa/src/concurrency/io.cfa	(revision e660761701e94bf2b2ae0130750e911d4122366f)
+++ libcfa/src/concurrency/io.cfa	(revision 3e2b9c93dff631bddb430d876a83bcc3bafe9d16)
@@ -14,4 +14,6 @@
 //
 
+#define __cforall_thread__
+
 #if defined(__CFA_DEBUG__)
 	// #define __CFA_DEBUG_PRINT_IO__
@@ -19,25 +21,9 @@
 #endif
 
-#include "kernel_private.hfa"
-#include "bitmanip.hfa"
-
-#if !defined(CFA_HAVE_LINUX_IO_URING_H)
-	void __kernel_io_startup() {
-		// Nothing to do without io_uring
-	}
-
-	void __kernel_io_shutdown() {
-		// Nothing to do without io_uring
-	}
-
-	void ?{}(io_context & this, struct cluster & cl) {}
-	void ?{}(io_context & this, struct cluster & cl, const io_context_params & params) {}
-
-	void ^?{}(io_context & this) {}
-	void ^?{}(io_context & this, bool cluster_context) {}
-
-#else
+
+#if defined(CFA_HAVE_LINUX_IO_URING_H)
 	#define _GNU_SOURCE         /* See feature_test_macros(7) */
 	#include <errno.h>
+	#include <signal.h>
 	#include <stdint.h>
 	#include <string.h>
@@ -46,5 +32,4 @@
 	extern "C" {
 		#include <sys/epoll.h>
-		#include <sys/mman.h>
 		#include <sys/syscall.h>
 
@@ -52,452 +37,13 @@
 	}
 
-	#include "bits/signal.hfa"
-	#include "kernel_private.hfa"
-	#include "thread.hfa"
-
-	void ?{}(io_context_params & this) {
-		this.num_entries = 256;
-		this.num_ready = 256;
-		this.submit_aff = -1;
-		this.eager_submits = false;
-		this.poller_submits = false;
-		this.poll_submit = false;
-		this.poll_complete = false;
-	}
-
-	static void * __io_poller_slow( void * arg );
-
-	// Weirdly, some systems that do support io_uring don't actually define these
-	#ifdef __alpha__
-		/*
-		* alpha is the only exception, all other architectures
-		* have common numbers for new system calls.
-		*/
-		#ifndef __NR_io_uring_setup
-			#define __NR_io_uring_setup           535
-		#endif
-		#ifndef __NR_io_uring_enter
-			#define __NR_io_uring_enter           536
-		#endif
-		#ifndef __NR_io_uring_register
-			#define __NR_io_uring_register        537
-		#endif
-	#else /* !__alpha__ */
-		#ifndef __NR_io_uring_setup
-			#define __NR_io_uring_setup           425
-		#endif
-		#ifndef __NR_io_uring_enter
-			#define __NR_io_uring_enter           426
-		#endif
-		#ifndef __NR_io_uring_register
-			#define __NR_io_uring_register        427
-		#endif
-	#endif
-
-	struct __submition_data {
-		// Head and tail of the ring (associated with array)
-		volatile uint32_t * head;
-		volatile uint32_t * tail;
-		volatile uint32_t prev_head;
-
-		// The actual kernel ring which uses head/tail
-		// indexes into the sqes arrays
-		uint32_t * array;
-
-		// number of entries and mask to go with it
-		const uint32_t * num;
-		const uint32_t * mask;
-
-		// Submission flags (Not sure what for)
-		uint32_t * flags;
-
-		// number of sqes not submitted (whatever that means)
-		uint32_t * dropped;
-
-		// Like head/tail but not seen by the kernel
-		volatile uint32_t * ready;
-		uint32_t ready_cnt;
-
-		__spinlock_t lock;
-		__spinlock_t release_lock;
-
-		// A buffer of sqes (not the actual ring)
-		struct io_uring_sqe * sqes;
-
-		// The location and size of the mmaped area
-		void * ring_ptr;
-		size_t ring_sz;
-	};
-
-	struct __completion_data {
-		// Head and tail of the ring
-		volatile uint32_t * head;
-		volatile uint32_t * tail;
-
-		// number of entries and mask to go with it
-		const uint32_t * mask;
-		const uint32_t * num;
-
-		// number of cqes not submitted (whatever that means)
-		uint32_t * overflow;
-
-		// the kernel ring
-		struct io_uring_cqe * cqes;
-
-		// The location and size of the mmaped area
-		void * ring_ptr;
-		size_t ring_sz;
-	};
-
-	struct __io_data {
-		struct __submition_data submit_q;
-		struct __completion_data completion_q;
-		uint32_t ring_flags;
-		int fd;
-		bool eager_submits:1;
-		bool poller_submits:1;
-	};
-
-//=============================================================================================
-// I/O Startup / Shutdown logic + Master Poller
-//=============================================================================================
-
-// IO Master poller loop forward
-static void * iopoll_loop( __attribute__((unused)) void * args );
-
-static struct {
-	pthread_t     thrd;    // pthread handle to io poller thread
-	void *        stack;   // pthread stack for io poller thread
-	int           epollfd; // file descriptor to the epoll instance
-	volatile bool run;     // Whether or not to continue
-} iopoll;
-
-void __kernel_io_startup(void) {
-	__cfaabi_dbg_print_safe( "Kernel : Creating EPOLL instance\n" );
-
-	iopoll.epollfd = epoll_create1(0);
-      if (iopoll.epollfd == -1) {
-            abort( "internal error, epoll_create1\n");
-      }
-
-	__cfaabi_dbg_print_safe( "Kernel : Starting io poller thread\n" );
-
-	iopoll.run = true;
-	iopoll.stack = __create_pthread( &iopoll.thrd, iopoll_loop, 0p );
-}
-
-void __kernel_io_shutdown(void) {
-	// Notify the io poller thread of the shutdown
-	iopoll.run = false;
-	sigval val = { 1 };
-	pthread_sigqueue( iopoll.thrd, SIGUSR1, val );
-
-	// Wait for the io poller thread to finish
-
-	pthread_join( iopoll.thrd, 0p );
-	free( iopoll.stack );
-
-	int ret = close(iopoll.epollfd);
-      if (ret == -1) {
-            abort( "internal error, close epoll\n");
-      }
-
-	// Io polling is now fully stopped
-
-	__cfaabi_dbg_print_safe( "Kernel : IO poller stopped\n" );
-}
-
-static void * iopoll_loop( __attribute__((unused)) void * args ) {
-	__processor_id_t id;
-	id.id = doregister(&id);
-	__cfaabi_dbg_print_safe( "Kernel : IO poller thread starting\n" );
-
-	// Block signals to control when they arrive
-	sigset_t mask;
-	sigfillset(&mask);
-	if ( pthread_sigmask( SIG_BLOCK, &mask, 0p ) == -1 ) {
-	    abort( "internal error, pthread_sigmask" );
-	}
-
-	sigdelset( &mask, SIGUSR1 );
-
-	// Create sufficient events
-	struct epoll_event events[10];
-	// Main loop
-	while( iopoll.run ) {
-		// Wait for events
-		int nfds = epoll_pwait( iopoll.epollfd, events, 10, -1, &mask );
-
-		// Check if an error occured
-            if (nfds == -1) {
-			if( errno == EINTR ) continue;
-                  abort( "internal error, pthread_sigmask" );
-            }
-
-		for(i; nfds) {
-			$io_ctx_thread * io_ctx = ($io_ctx_thread *)(uintptr_t)events[i].data.u64;
-			/* paranoid */ verify( io_ctx );
-			__cfadbg_print_safe(io_core, "Kernel I/O : Unparking io poller %p\n", io_ctx);
-			#if !defined( __CFA_NO_STATISTICS__ )
-				kernelTLS.this_stats = io_ctx->self.curr_cluster->stats;
-			#endif
-			__post( io_ctx->sem, &id );
-		}
-	}
-
-	__cfaabi_dbg_print_safe( "Kernel : IO poller thread stopping\n" );
-	unregister(&id);
-	return 0p;
-}
-
-//=============================================================================================
-// I/O Context Constrution/Destruction
-//=============================================================================================
-
-	void ?{}($io_ctx_thread & this, struct cluster & cl) { (this.self){ "IO Poller", cl }; }
-	void main( $io_ctx_thread & this );
-	static inline $thread * get_thread( $io_ctx_thread & this ) { return &this.self; }
-	void ^?{}( $io_ctx_thread & mutex this ) {}
-
-	static void __io_create ( __io_data & this, const io_context_params & params_in );
-	static void __io_destroy( __io_data & this );
-
-	void ?{}(io_context & this, struct cluster & cl, const io_context_params & params) {
-		(this.thrd){ cl };
-		this.thrd.ring = malloc();
-		__cfadbg_print_safe(io_core, "Kernel I/O : Creating ring for io_context %p\n", &this);
-		__io_create( *this.thrd.ring, params );
-
-		__cfadbg_print_safe(io_core, "Kernel I/O : Starting poller thread for io_context %p\n", &this);
-		this.thrd.done = false;
-		__thrd_start( this.thrd, main );
-
-		__cfadbg_print_safe(io_core, "Kernel I/O : io_context %p ready\n", &this);
-	}
-
-	void ?{}(io_context & this, struct cluster & cl) {
-		io_context_params params;
-		(this){ cl, params };
-	}
-
-	void ^?{}(io_context & this, bool cluster_context) {
-		__cfadbg_print_safe(io_core, "Kernel I/O : tearing down io_context %p\n", &this);
-
-		// Notify the thread of the shutdown
-		__atomic_store_n(&this.thrd.done, true, __ATOMIC_SEQ_CST);
-
-		// If this is an io_context within a cluster, things get trickier
-		$thread & thrd = this.thrd.self;
-		if( cluster_context ) {
-			cluster & cltr = *thrd.curr_cluster;
-			/* paranoid */ verify( cltr.nprocessors == 0 || &cltr == mainCluster );
-			/* paranoid */ verify( !ready_mutate_islocked() );
-
-			// We need to adjust the clean-up based on where the thread is
-			if( thrd.state == Ready || thrd.preempted != __NO_PREEMPTION ) {
-
-				ready_schedule_lock( (struct __processor_id_t *)active_processor() );
-
-					// This is the tricky case
-					// The thread was preempted and now it is on the ready queue
-					// The thread should be the last on the list
-					/* paranoid */ verify( thrd.link.next != 0p );
-
-					// Remove the thread from the ready queue of this cluster
-					__attribute__((unused)) bool removed = remove_head( &cltr, &thrd );
-					/* paranoid */ verify( removed );
-					thrd.link.next = 0p;
-					thrd.link.prev = 0p;
-					__cfaabi_dbg_debug_do( thrd.unpark_stale = true );
-
-					// Fixup the thread state
-					thrd.state = Blocked;
-					thrd.ticket = 0;
-					thrd.preempted = __NO_PREEMPTION;
-
-				ready_schedule_unlock( (struct __processor_id_t *)active_processor() );
-
-				// Pretend like the thread was blocked all along
-			}
-			// !!! This is not an else if !!!
-			if( thrd.state == Blocked ) {
-
-				// This is the "easy case"
-				// The thread is parked and can easily be moved to active cluster
-				verify( thrd.curr_cluster != active_cluster() || thrd.curr_cluster == mainCluster );
-				thrd.curr_cluster = active_cluster();
-
-				// unpark the fast io_poller
-				unpark( &thrd __cfaabi_dbg_ctx2 );
-			}
-			else {
-
-				// The thread is in a weird state
-				// I don't know what to do here
-				abort("io_context poller thread is in unexpected state, cannot clean-up correctly\n");
-			}
-		} else {
-			unpark( &thrd __cfaabi_dbg_ctx2 );
-		}
-
-		^(this.thrd){};
-		__cfadbg_print_safe(io_core, "Kernel I/O : Stopped poller thread for io_context %p\n", &this);
-
-		__io_destroy( *this.thrd.ring );
-		__cfadbg_print_safe(io_core, "Kernel I/O : Destroyed ring for io_context %p\n", &this);
-
-		free(this.thrd.ring);
-	}
-
-	void ^?{}(io_context & this) {
-		^(this){ false };
-	}
-
-	static void __io_create( __io_data & this, const io_context_params & params_in ) {
-		// Step 1 : call to setup
-		struct io_uring_params params;
-		memset(&params, 0, sizeof(params));
-		if( params_in.poll_submit   ) params.flags |= IORING_SETUP_SQPOLL;
-		if( params_in.poll_complete ) params.flags |= IORING_SETUP_IOPOLL;
-
-		uint32_t nentries = params_in.num_entries;
-
-		int fd = syscall(__NR_io_uring_setup, nentries, &params );
-		if(fd < 0) {
-			abort("KERNEL ERROR: IO_URING SETUP - %s\n", strerror(errno));
-		}
-
-		// Step 2 : mmap result
-		memset( &this, 0, sizeof(struct __io_data) );
-		struct __submition_data  & sq = this.submit_q;
-		struct __completion_data & cq = this.completion_q;
-
-		// calculate the right ring size
-		sq.ring_sz = params.sq_off.array + (params.sq_entries * sizeof(unsigned)           );
-		cq.ring_sz = params.cq_off.cqes  + (params.cq_entries * sizeof(struct io_uring_cqe));
-
-		// Requires features
-		#if defined(IORING_FEAT_SINGLE_MMAP)
-			// adjust the size according to the parameters
-			if ((params.features & IORING_FEAT_SINGLE_MMAP) != 0) {
-				cq.ring_sz = sq.ring_sz = max(cq.ring_sz, sq.ring_sz);
-			}
-		#endif
-
-		// mmap the Submit Queue into existence
-		sq.ring_ptr = mmap(0, sq.ring_sz, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_SQ_RING);
-		if (sq.ring_ptr == (void*)MAP_FAILED) {
-			abort("KERNEL ERROR: IO_URING MMAP1 - %s\n", strerror(errno));
-		}
-
-		// Requires features
-		#if defined(IORING_FEAT_SINGLE_MMAP)
-			// mmap the Completion Queue into existence (may or may not be needed)
-			if ((params.features & IORING_FEAT_SINGLE_MMAP) != 0) {
-				cq.ring_ptr = sq.ring_ptr;
-			}
-			else
-		#endif
-		{
-			// We need multiple call to MMAP
-			cq.ring_ptr = mmap(0, cq.ring_sz, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_CQ_RING);
-			if (cq.ring_ptr == (void*)MAP_FAILED) {
-				munmap(sq.ring_ptr, sq.ring_sz);
-				abort("KERNEL ERROR: IO_URING MMAP2 - %s\n", strerror(errno));
-			}
-		}
-
-		// mmap the submit queue entries
-		size_t size = params.sq_entries * sizeof(struct io_uring_sqe);
-		sq.sqes = (struct io_uring_sqe *)mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_SQES);
-		if (sq.sqes == (struct io_uring_sqe *)MAP_FAILED) {
-			munmap(sq.ring_ptr, sq.ring_sz);
-			if (cq.ring_ptr != sq.ring_ptr) munmap(cq.ring_ptr, cq.ring_sz);
-			abort("KERNEL ERROR: IO_URING MMAP3 - %s\n", strerror(errno));
-		}
-
-		// Get the pointers from the kernel to fill the structure
-		// submit queue
-		sq.head    = (volatile uint32_t *)(((intptr_t)sq.ring_ptr) + params.sq_off.head);
-		sq.tail    = (volatile uint32_t *)(((intptr_t)sq.ring_ptr) + params.sq_off.tail);
-		sq.mask    = (   const uint32_t *)(((intptr_t)sq.ring_ptr) + params.sq_off.ring_mask);
-		sq.num     = (   const uint32_t *)(((intptr_t)sq.ring_ptr) + params.sq_off.ring_entries);
-		sq.flags   = (         uint32_t *)(((intptr_t)sq.ring_ptr) + params.sq_off.flags);
-		sq.dropped = (         uint32_t *)(((intptr_t)sq.ring_ptr) + params.sq_off.dropped);
-		sq.array   = (         uint32_t *)(((intptr_t)sq.ring_ptr) + params.sq_off.array);
-		sq.prev_head = *sq.head;
-
-		{
-			const uint32_t num = *sq.num;
-			for( i; num ) {
-				sq.sqes[i].user_data = 0ul64;
-			}
-		}
-
-		(sq.lock){};
-		(sq.release_lock){};
-
-		if( params_in.poller_submits || params_in.eager_submits ) {
-			/* paranoid */ verify( is_pow2( params_in.num_ready ) || (params_in.num_ready < 8) );
-			sq.ready_cnt = max( params_in.num_ready, 8 );
-			sq.ready = alloc_align( 64, sq.ready_cnt );
-			for(i; sq.ready_cnt) {
-				sq.ready[i] = -1ul32;
-			}
-		}
-		else {
-			sq.ready_cnt = 0;
-			sq.ready = 0p;
-		}
-
-		// completion queue
-		cq.head     = (volatile uint32_t *)(((intptr_t)cq.ring_ptr) + params.cq_off.head);
-		cq.tail     = (volatile uint32_t *)(((intptr_t)cq.ring_ptr) + params.cq_off.tail);
-		cq.mask     = (   const uint32_t *)(((intptr_t)cq.ring_ptr) + params.cq_off.ring_mask);
-		cq.num      = (   const uint32_t *)(((intptr_t)cq.ring_ptr) + params.cq_off.ring_entries);
-		cq.overflow = (         uint32_t *)(((intptr_t)cq.ring_ptr) + params.cq_off.overflow);
-		cq.cqes   = (struct io_uring_cqe *)(((intptr_t)cq.ring_ptr) + params.cq_off.cqes);
-
-		// some paranoid checks
-		/* paranoid */ verifyf( (*cq.mask) == ((*cq.num) - 1ul32), "IO_URING Expected mask to be %u (%u entries), was %u", (*cq.num) - 1ul32, *cq.num, *cq.mask  );
-		/* paranoid */ verifyf( (*cq.num)  >= nentries, "IO_URING Expected %u entries, got %u", nentries, *cq.num );
-		/* paranoid */ verifyf( (*cq.head) == 0, "IO_URING Expected head to be 0, got %u", *cq.head );
-		/* paranoid */ verifyf( (*cq.tail) == 0, "IO_URING Expected tail to be 0, got %u", *cq.tail );
-
-		/* paranoid */ verifyf( (*sq.mask) == ((*sq.num) - 1ul32), "IO_URING Expected mask to be %u (%u entries), was %u", (*sq.num) - 1ul32, *sq.num, *sq.mask );
-		/* paranoid */ verifyf( (*sq.num) >= nentries, "IO_URING Expected %u entries, got %u", nentries, *sq.num );
-		/* paranoid */ verifyf( (*sq.head) == 0, "IO_URING Expected head to be 0, got %u", *sq.head );
-		/* paranoid */ verifyf( (*sq.tail) == 0, "IO_URING Expected tail to be 0, got %u", *sq.tail );
-
-		// Update the global ring info
-		this.ring_flags = params.flags;
-		this.fd         = fd;
-		this.eager_submits  = params_in.eager_submits;
-		this.poller_submits = params_in.poller_submits;
-	}
-
-	void __io_destroy( __io_data & this ) {
-		// Shutdown the io rings
-		struct __submition_data  & sq = this.submit_q;
-		struct __completion_data & cq = this.completion_q;
-
-		// unmap the submit queue entries
-		munmap(sq.sqes, (*sq.num) * sizeof(struct io_uring_sqe));
-
-		// unmap the Submit Queue ring
-		munmap(sq.ring_ptr, sq.ring_sz);
-
-		// unmap the Completion Queue ring, if it is different
-		if (cq.ring_ptr != sq.ring_ptr) {
-			munmap(cq.ring_ptr, cq.ring_sz);
-		}
-
-		// close the file descriptor
-		close(this.fd);
-
-		free( this.submit_q.ready ); // Maybe null, doesn't matter
-	}
-
-	int __io_uring_enter( struct __io_data & ring, unsigned to_submit, bool get ) {
+	#include "stats.hfa"
+	#include "kernel.hfa"
+	#include "kernel/fwd.hfa"
+	#include "io/types.hfa"
+
+//=============================================================================================
+// I/O Syscall
+//=============================================================================================
+	static int __io_uring_enter( struct __io_data & ring, unsigned to_submit, bool get ) {
 		bool need_sys_to_submit = false;
 		bool need_sys_to_complete = false;
@@ -618,10 +164,5 @@
 	void main( $io_ctx_thread & this ) {
 		epoll_event ev;
-		ev.events = EPOLLIN | EPOLLONESHOT;
-		ev.data.u64 = (uint64_t)&this;
-		int ret = epoll_ctl(iopoll.epollfd, EPOLL_CTL_ADD, this.ring->fd, &ev);
-		if (ret < 0) {
-			abort( "KERNEL ERROR: EPOLL ADD - (%d) %s\n", (int)errno, strerror(errno) );
-		}
+		__ioctx_register( this, ev );
 
 		__cfadbg_print_safe(io_core, "Kernel I/O : IO poller %p for ring %p ready\n", &this, &this.ring);
@@ -654,11 +195,6 @@
 				reset = 0;
 
-				// wake up the slow poller
-				ret = epoll_ctl(iopoll.epollfd, EPOLL_CTL_MOD, this.ring->fd, &ev);
-				if (ret < 0) {
-					abort( "KERNEL ERROR: EPOLL REARM - (%d) %s\n", (int)errno, strerror(errno) );
-				}
-
-				// park this thread
+				// block this thread
+				__ioctx_prepare_block( this, ev );
 				wait( this.sem );
 			}
@@ -933,22 +469,3 @@
 		return count;
 	}
-
-//=============================================================================================
-// I/O Submissions
-//=============================================================================================
-
-	void register_fixed_files( io_context & ctx, int * files, unsigned count ) {
-		int ret = syscall( __NR_io_uring_register, ctx.thrd.ring->fd, IORING_REGISTER_FILES, files, count );
-		if( ret < 0 ) {
-			abort( "KERNEL ERROR: IO_URING SYSCALL - (%d) %s\n", (int)errno, strerror(errno) );
-		}
-
-		__cfadbg_print_safe( io_core, "Kernel I/O : Performed io_register for %p, returned %d\n", active_thread(), ret );
-	}
-
-	void register_fixed_files( cluster & cltr, int * files, unsigned count ) {
-		for(i; cltr.io.cnt) {
-			register_fixed_files( cltr.io.ctxs[i], files, count );
-		}
-	}
 #endif
Index: libcfa/src/concurrency/io/setup.cfa
===================================================================
--- libcfa/src/concurrency/io/setup.cfa	(revision 3e2b9c93dff631bddb430d876a83bcc3bafe9d16)
+++ libcfa/src/concurrency/io/setup.cfa	(revision 3e2b9c93dff631bddb430d876a83bcc3bafe9d16)
@@ -0,0 +1,472 @@
+//
+// Cforall Version 1.0.0 Copyright (C) 2020 University of Waterloo
+//
+// The contents of this file are covered under the licence agreement in the
+// file "LICENCE" distributed with Cforall.
+//
+// io/setup.cfa --
+//
+// Author           : Thierry Delisle
+// Created On       : Fri Jul 31 16:25:51 2020
+// Last Modified By :
+// Last Modified On :
+// Update Count     :
+//
+
+#define __cforall_thread__
+#define _GNU_SOURCE         /* See feature_test_macros(7) */
+
+#include "io/types.hfa"
+
+#if !defined(CFA_HAVE_LINUX_IO_URING_H)
+	void __kernel_io_startup() {
+		// Nothing to do without io_uring
+	}
+
+	void __kernel_io_shutdown() {
+		// Nothing to do without io_uring
+	}
+
+	void ?{}(io_context & this, struct cluster & cl) {}
+	void ?{}(io_context & this, struct cluster & cl, const io_context_params & params) {}
+
+	void ^?{}(io_context & this) {}
+	void ^?{}(io_context & this, bool cluster_context) {}
+
+#else
+	#include <errno.h>
+	#include <stdint.h>
+	#include <string.h>
+	#include <signal.h>
+	#include <unistd.h>
+
+	extern "C" {
+		#include <pthread.h>
+		#include <sys/epoll.h>
+		#include <sys/mman.h>
+		#include <sys/syscall.h>
+
+		#include <linux/io_uring.h>
+	}
+
+	#include "bitmanip.hfa"
+	#include "kernel_private.hfa"
+	#include "thread.hfa"
+
+	void ?{}(io_context_params & this) {
+		this.num_entries = 256;
+		this.num_ready = 256;
+		this.submit_aff = -1;
+		this.eager_submits = false;
+		this.poller_submits = false;
+		this.poll_submit = false;
+		this.poll_complete = false;
+	}
+
+	static void * __io_poller_slow( void * arg );
+
+	// Weirdly, some systems that do support io_uring don't actually define these
+	#ifdef __alpha__
+		/*
+		* alpha is the only exception, all other architectures
+		* have common numbers for new system calls.
+		*/
+		#ifndef __NR_io_uring_setup
+			#define __NR_io_uring_setup           535
+		#endif
+		#ifndef __NR_io_uring_enter
+			#define __NR_io_uring_enter           536
+		#endif
+		#ifndef __NR_io_uring_register
+			#define __NR_io_uring_register        537
+		#endif
+	#else /* !__alpha__ */
+		#ifndef __NR_io_uring_setup
+			#define __NR_io_uring_setup           425
+		#endif
+		#ifndef __NR_io_uring_enter
+			#define __NR_io_uring_enter           426
+		#endif
+		#ifndef __NR_io_uring_register
+			#define __NR_io_uring_register        427
+		#endif
+	#endif
+
+//=============================================================================================
+// I/O Startup / Shutdown logic + Master Poller
+//=============================================================================================
+
+	// IO Master poller loop forward
+	static void * iopoll_loop( __attribute__((unused)) void * args );
+
+	static struct {
+		pthread_t     thrd;    // pthread handle to io poller thread
+		void *        stack;   // pthread stack for io poller thread
+		int           epollfd; // file descriptor to the epoll instance
+		volatile bool run;     // Whether or not to continue
+	} iopoll;
+
+	void __kernel_io_startup(void) {
+		__cfaabi_dbg_print_safe( "Kernel : Creating EPOLL instance\n" );
+
+		iopoll.epollfd = epoll_create1(0);
+		if (iopoll.epollfd == -1) {
+			abort( "internal error, epoll_create1\n");
+		}
+
+		__cfaabi_dbg_print_safe( "Kernel : Starting io poller thread\n" );
+
+		iopoll.run = true;
+		iopoll.stack = __create_pthread( &iopoll.thrd, iopoll_loop, 0p );
+	}
+
+	void __kernel_io_shutdown(void) {
+		// Notify the io poller thread of the shutdown
+		iopoll.run = false;
+		sigval val = { 1 };
+		pthread_sigqueue( iopoll.thrd, SIGUSR1, val );
+
+		// Wait for the io poller thread to finish
+
+		pthread_join( iopoll.thrd, 0p );
+		free( iopoll.stack );
+
+		int ret = close(iopoll.epollfd);
+		if (ret == -1) {
+			abort( "internal error, close epoll\n");
+		}
+
+		// Io polling is now fully stopped
+
+		__cfaabi_dbg_print_safe( "Kernel : IO poller stopped\n" );
+	}
+
+	static void * iopoll_loop( __attribute__((unused)) void * args ) {
+		__processor_id_t id;
+		id.id = doregister(&id);
+		__cfaabi_dbg_print_safe( "Kernel : IO poller thread starting\n" );
+
+		// Block signals to control when they arrive
+		sigset_t mask;
+		sigfillset(&mask);
+		if ( pthread_sigmask( SIG_BLOCK, &mask, 0p ) == -1 ) {
+		abort( "internal error, pthread_sigmask" );
+		}
+
+		sigdelset( &mask, SIGUSR1 );
+
+		// Create sufficient events
+		struct epoll_event events[10];
+		// Main loop
+		while( iopoll.run ) {
+			// Wait for events
+			int nfds = epoll_pwait( iopoll.epollfd, events, 10, -1, &mask );
+
+			// Check if an error occured
+			if (nfds == -1) {
+				if( errno == EINTR ) continue;
+				abort( "internal error, pthread_sigmask" );
+			}
+
+			for(i; nfds) {
+				$io_ctx_thread * io_ctx = ($io_ctx_thread *)(uintptr_t)events[i].data.u64;
+				/* paranoid */ verify( io_ctx );
+				__cfadbg_print_safe(io_core, "Kernel I/O : Unparking io poller %p\n", io_ctx);
+				#if !defined( __CFA_NO_STATISTICS__ )
+					kernelTLS.this_stats = io_ctx->self.curr_cluster->stats;
+				#endif
+				__post( io_ctx->sem, &id );
+			}
+		}
+
+		__cfaabi_dbg_print_safe( "Kernel : IO poller thread stopping\n" );
+		unregister(&id);
+		return 0p;
+	}
+
+//=============================================================================================
+// I/O Context Constrution/Destruction
+//=============================================================================================
+
+	void ?{}($io_ctx_thread & this, struct cluster & cl) { (this.self){ "IO Poller", cl }; }
+	void main( $io_ctx_thread & this );
+	static inline $thread * get_thread( $io_ctx_thread & this ) { return &this.self; }
+	void ^?{}( $io_ctx_thread & mutex this ) {}
+
+	static void __io_create ( __io_data & this, const io_context_params & params_in );
+	static void __io_destroy( __io_data & this );
+
+	void ?{}(io_context & this, struct cluster & cl, const io_context_params & params) {
+		(this.thrd){ cl };
+		this.thrd.ring = malloc();
+		__cfadbg_print_safe(io_core, "Kernel I/O : Creating ring for io_context %p\n", &this);
+		__io_create( *this.thrd.ring, params );
+
+		__cfadbg_print_safe(io_core, "Kernel I/O : Starting poller thread for io_context %p\n", &this);
+		this.thrd.done = false;
+		__thrd_start( this.thrd, main );
+
+		__cfadbg_print_safe(io_core, "Kernel I/O : io_context %p ready\n", &this);
+	}
+
+	void ?{}(io_context & this, struct cluster & cl) {
+		io_context_params params;
+		(this){ cl, params };
+	}
+
+	void ^?{}(io_context & this, bool cluster_context) {
+		__cfadbg_print_safe(io_core, "Kernel I/O : tearing down io_context %p\n", &this);
+
+		// Notify the thread of the shutdown
+		__atomic_store_n(&this.thrd.done, true, __ATOMIC_SEQ_CST);
+
+		// If this is an io_context within a cluster, things get trickier
+		$thread & thrd = this.thrd.self;
+		if( cluster_context ) {
+			cluster & cltr = *thrd.curr_cluster;
+			/* paranoid */ verify( cltr.nprocessors == 0 || &cltr == mainCluster );
+			/* paranoid */ verify( !ready_mutate_islocked() );
+
+			// We need to adjust the clean-up based on where the thread is
+			if( thrd.state == Ready || thrd.preempted != __NO_PREEMPTION ) {
+
+				ready_schedule_lock( (struct __processor_id_t *)active_processor() );
+
+					// This is the tricky case
+					// The thread was preempted and now it is on the ready queue
+					// The thread should be the last on the list
+					/* paranoid */ verify( thrd.link.next != 0p );
+
+					// Remove the thread from the ready queue of this cluster
+					__attribute__((unused)) bool removed = remove_head( &cltr, &thrd );
+					/* paranoid */ verify( removed );
+					thrd.link.next = 0p;
+					thrd.link.prev = 0p;
+					__cfaabi_dbg_debug_do( thrd.unpark_stale = true );
+
+					// Fixup the thread state
+					thrd.state = Blocked;
+					thrd.ticket = 0;
+					thrd.preempted = __NO_PREEMPTION;
+
+				ready_schedule_unlock( (struct __processor_id_t *)active_processor() );
+
+				// Pretend like the thread was blocked all along
+			}
+			// !!! This is not an else if !!!
+			if( thrd.state == Blocked ) {
+
+				// This is the "easy case"
+				// The thread is parked and can easily be moved to active cluster
+				verify( thrd.curr_cluster != active_cluster() || thrd.curr_cluster == mainCluster );
+				thrd.curr_cluster = active_cluster();
+
+				// unpark the fast io_poller
+				unpark( &thrd __cfaabi_dbg_ctx2 );
+			}
+			else {
+
+				// The thread is in a weird state
+				// I don't know what to do here
+				abort("io_context poller thread is in unexpected state, cannot clean-up correctly\n");
+			}
+		} else {
+			unpark( &thrd __cfaabi_dbg_ctx2 );
+		}
+
+		^(this.thrd){};
+		__cfadbg_print_safe(io_core, "Kernel I/O : Stopped poller thread for io_context %p\n", &this);
+
+		__io_destroy( *this.thrd.ring );
+		__cfadbg_print_safe(io_core, "Kernel I/O : Destroyed ring for io_context %p\n", &this);
+
+		free(this.thrd.ring);
+	}
+
+	void ^?{}(io_context & this) {
+		^(this){ false };
+	}
+
+	static void __io_create( __io_data & this, const io_context_params & params_in ) {
+		// Step 1 : call to setup
+		struct io_uring_params params;
+		memset(&params, 0, sizeof(params));
+		if( params_in.poll_submit   ) params.flags |= IORING_SETUP_SQPOLL;
+		if( params_in.poll_complete ) params.flags |= IORING_SETUP_IOPOLL;
+
+		uint32_t nentries = params_in.num_entries;
+
+		int fd = syscall(__NR_io_uring_setup, nentries, &params );
+		if(fd < 0) {
+			abort("KERNEL ERROR: IO_URING SETUP - %s\n", strerror(errno));
+		}
+
+		// Step 2 : mmap result
+		memset( &this, 0, sizeof(struct __io_data) );
+		struct __submition_data  & sq = this.submit_q;
+		struct __completion_data & cq = this.completion_q;
+
+		// calculate the right ring size
+		sq.ring_sz = params.sq_off.array + (params.sq_entries * sizeof(unsigned)           );
+		cq.ring_sz = params.cq_off.cqes  + (params.cq_entries * sizeof(struct io_uring_cqe));
+
+		// Requires features
+		#if defined(IORING_FEAT_SINGLE_MMAP)
+			// adjust the size according to the parameters
+			if ((params.features & IORING_FEAT_SINGLE_MMAP) != 0) {
+				cq.ring_sz = sq.ring_sz = max(cq.ring_sz, sq.ring_sz);
+			}
+		#endif
+
+		// mmap the Submit Queue into existence
+		sq.ring_ptr = mmap(0, sq.ring_sz, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_SQ_RING);
+		if (sq.ring_ptr == (void*)MAP_FAILED) {
+			abort("KERNEL ERROR: IO_URING MMAP1 - %s\n", strerror(errno));
+		}
+
+		// Requires features
+		#if defined(IORING_FEAT_SINGLE_MMAP)
+			// mmap the Completion Queue into existence (may or may not be needed)
+			if ((params.features & IORING_FEAT_SINGLE_MMAP) != 0) {
+				cq.ring_ptr = sq.ring_ptr;
+			}
+			else
+		#endif
+		{
+			// We need multiple call to MMAP
+			cq.ring_ptr = mmap(0, cq.ring_sz, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_CQ_RING);
+			if (cq.ring_ptr == (void*)MAP_FAILED) {
+				munmap(sq.ring_ptr, sq.ring_sz);
+				abort("KERNEL ERROR: IO_URING MMAP2 - %s\n", strerror(errno));
+			}
+		}
+
+		// mmap the submit queue entries
+		size_t size = params.sq_entries * sizeof(struct io_uring_sqe);
+		sq.sqes = (struct io_uring_sqe *)mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_SQES);
+		if (sq.sqes == (struct io_uring_sqe *)MAP_FAILED) {
+			munmap(sq.ring_ptr, sq.ring_sz);
+			if (cq.ring_ptr != sq.ring_ptr) munmap(cq.ring_ptr, cq.ring_sz);
+			abort("KERNEL ERROR: IO_URING MMAP3 - %s\n", strerror(errno));
+		}
+
+		// Get the pointers from the kernel to fill the structure
+		// submit queue
+		sq.head    = (volatile uint32_t *)(((intptr_t)sq.ring_ptr) + params.sq_off.head);
+		sq.tail    = (volatile uint32_t *)(((intptr_t)sq.ring_ptr) + params.sq_off.tail);
+		sq.mask    = (   const uint32_t *)(((intptr_t)sq.ring_ptr) + params.sq_off.ring_mask);
+		sq.num     = (   const uint32_t *)(((intptr_t)sq.ring_ptr) + params.sq_off.ring_entries);
+		sq.flags   = (         uint32_t *)(((intptr_t)sq.ring_ptr) + params.sq_off.flags);
+		sq.dropped = (         uint32_t *)(((intptr_t)sq.ring_ptr) + params.sq_off.dropped);
+		sq.array   = (         uint32_t *)(((intptr_t)sq.ring_ptr) + params.sq_off.array);
+		sq.prev_head = *sq.head;
+
+		{
+			const uint32_t num = *sq.num;
+			for( i; num ) {
+				sq.sqes[i].user_data = 0ul64;
+			}
+		}
+
+		(sq.lock){};
+		(sq.release_lock){};
+
+		if( params_in.poller_submits || params_in.eager_submits ) {
+			/* paranoid */ verify( is_pow2( params_in.num_ready ) || (params_in.num_ready < 8) );
+			sq.ready_cnt = max( params_in.num_ready, 8 );
+			sq.ready = alloc_align( 64, sq.ready_cnt );
+			for(i; sq.ready_cnt) {
+				sq.ready[i] = -1ul32;
+			}
+		}
+		else {
+			sq.ready_cnt = 0;
+			sq.ready = 0p;
+		}
+
+		// completion queue
+		cq.head     = (volatile uint32_t *)(((intptr_t)cq.ring_ptr) + params.cq_off.head);
+		cq.tail     = (volatile uint32_t *)(((intptr_t)cq.ring_ptr) + params.cq_off.tail);
+		cq.mask     = (   const uint32_t *)(((intptr_t)cq.ring_ptr) + params.cq_off.ring_mask);
+		cq.num      = (   const uint32_t *)(((intptr_t)cq.ring_ptr) + params.cq_off.ring_entries);
+		cq.overflow = (         uint32_t *)(((intptr_t)cq.ring_ptr) + params.cq_off.overflow);
+		cq.cqes   = (struct io_uring_cqe *)(((intptr_t)cq.ring_ptr) + params.cq_off.cqes);
+
+		// some paranoid checks
+		/* paranoid */ verifyf( (*cq.mask) == ((*cq.num) - 1ul32), "IO_URING Expected mask to be %u (%u entries), was %u", (*cq.num) - 1ul32, *cq.num, *cq.mask  );
+		/* paranoid */ verifyf( (*cq.num)  >= nentries, "IO_URING Expected %u entries, got %u", nentries, *cq.num );
+		/* paranoid */ verifyf( (*cq.head) == 0, "IO_URING Expected head to be 0, got %u", *cq.head );
+		/* paranoid */ verifyf( (*cq.tail) == 0, "IO_URING Expected tail to be 0, got %u", *cq.tail );
+
+		/* paranoid */ verifyf( (*sq.mask) == ((*sq.num) - 1ul32), "IO_URING Expected mask to be %u (%u entries), was %u", (*sq.num) - 1ul32, *sq.num, *sq.mask );
+		/* paranoid */ verifyf( (*sq.num) >= nentries, "IO_URING Expected %u entries, got %u", nentries, *sq.num );
+		/* paranoid */ verifyf( (*sq.head) == 0, "IO_URING Expected head to be 0, got %u", *sq.head );
+		/* paranoid */ verifyf( (*sq.tail) == 0, "IO_URING Expected tail to be 0, got %u", *sq.tail );
+
+		// Update the global ring info
+		this.ring_flags = params.flags;
+		this.fd         = fd;
+		this.eager_submits  = params_in.eager_submits;
+		this.poller_submits = params_in.poller_submits;
+	}
+
+	static void __io_destroy( __io_data & this ) {
+		// Shutdown the io rings
+		struct __submition_data  & sq = this.submit_q;
+		struct __completion_data & cq = this.completion_q;
+
+		// unmap the submit queue entries
+		munmap(sq.sqes, (*sq.num) * sizeof(struct io_uring_sqe));
+
+		// unmap the Submit Queue ring
+		munmap(sq.ring_ptr, sq.ring_sz);
+
+		// unmap the Completion Queue ring, if it is different
+		if (cq.ring_ptr != sq.ring_ptr) {
+			munmap(cq.ring_ptr, cq.ring_sz);
+		}
+
+		// close the file descriptor
+		close(this.fd);
+
+		free( this.submit_q.ready ); // Maybe null, doesn't matter
+	}
+
+//=============================================================================================
+// I/O Context Sleep
+//=============================================================================================
+
+	void __ioctx_register($io_ctx_thread & ctx, struct epoll_event & ev) {
+		ev.events = EPOLLIN | EPOLLONESHOT;
+		ev.data.u64 = (uint64_t)&ctx;
+		int ret = epoll_ctl(iopoll.epollfd, EPOLL_CTL_ADD, ctx.ring->fd, &ev);
+		if (ret < 0) {
+			abort( "KERNEL ERROR: EPOLL ADD - (%d) %s\n", (int)errno, strerror(errno) );
+		}
+	}
+
+	void __ioctx_prepare_block($io_ctx_thread & ctx, struct epoll_event & ev) {
+		int ret = epoll_ctl(iopoll.epollfd, EPOLL_CTL_MOD, ctx.ring->fd, &ev);
+		if (ret < 0) {
+			abort( "KERNEL ERROR: EPOLL REARM - (%d) %s\n", (int)errno, strerror(errno) );
+		}
+	}
+
+//=============================================================================================
+// I/O Context Misc Setup
+//=============================================================================================
+	void register_fixed_files( io_context & ctx, int * files, unsigned count ) {
+		int ret = syscall( __NR_io_uring_register, ctx.thrd.ring->fd, IORING_REGISTER_FILES, files, count );
+		if( ret < 0 ) {
+			abort( "KERNEL ERROR: IO_URING SYSCALL - (%d) %s\n", (int)errno, strerror(errno) );
+		}
+
+		__cfadbg_print_safe( io_core, "Kernel I/O : Performed io_register for %p, returned %d\n", active_thread(), ret );
+	}
+
+	void register_fixed_files( cluster & cltr, int * files, unsigned count ) {
+		for(i; cltr.io.cnt) {
+			register_fixed_files( cltr.io.ctxs[i], files, count );
+		}
+	}
+#endif
Index: libcfa/src/concurrency/io/types.hfa
===================================================================
--- libcfa/src/concurrency/io/types.hfa	(revision 3e2b9c93dff631bddb430d876a83bcc3bafe9d16)
+++ libcfa/src/concurrency/io/types.hfa	(revision 3e2b9c93dff631bddb430d876a83bcc3bafe9d16)
@@ -0,0 +1,128 @@
+//
+// Cforall Version 1.0.0 Copyright (C) 2020 University of Waterloo
+//
+// The contents of this file are covered under the licence agreement in the
+// file "LICENCE" distributed with Cforall.
+//
+// io/types.hfa --
+//
+// Author           : Thierry Delisle
+// Created On       : Fri Jul 31 16:22:47 2020
+// Last Modified By :
+// Last Modified On :
+// Update Count     :
+//
+
+#pragma once
+
+#if defined(CFA_HAVE_LINUX_IO_URING_H)
+      #include "bits/locks.hfa"
+
+	//-----------------------------------------------------------------------
+	// Ring Data structure
+      struct __submition_data {
+		// Head and tail of the ring (associated with array)
+		volatile uint32_t * head;
+		volatile uint32_t * tail;
+		volatile uint32_t prev_head;
+
+		// The actual kernel ring which uses head/tail
+		// indexes into the sqes arrays
+		uint32_t * array;
+
+		// number of entries and mask to go with it
+		const uint32_t * num;
+		const uint32_t * mask;
+
+		// Submission flags (Not sure what for)
+		uint32_t * flags;
+
+		// number of sqes not submitted (whatever that means)
+		uint32_t * dropped;
+
+		// Like head/tail but not seen by the kernel
+		volatile uint32_t * ready;
+		uint32_t ready_cnt;
+
+		__spinlock_t lock;
+		__spinlock_t release_lock;
+
+		// A buffer of sqes (not the actual ring)
+		struct io_uring_sqe * sqes;
+
+		// The location and size of the mmaped area
+		void * ring_ptr;
+		size_t ring_sz;
+	};
+
+	struct __completion_data {
+		// Head and tail of the ring
+		volatile uint32_t * head;
+		volatile uint32_t * tail;
+
+		// number of entries and mask to go with it
+		const uint32_t * mask;
+		const uint32_t * num;
+
+		// number of cqes not submitted (whatever that means)
+		uint32_t * overflow;
+
+		// the kernel ring
+		struct io_uring_cqe * cqes;
+
+		// The location and size of the mmaped area
+		void * ring_ptr;
+		size_t ring_sz;
+	};
+
+	struct __io_data {
+		struct __submition_data submit_q;
+		struct __completion_data completion_q;
+		uint32_t ring_flags;
+		int fd;
+		bool eager_submits:1;
+		bool poller_submits:1;
+	};
+
+
+	//-----------------------------------------------------------------------
+	// IO user data
+	struct __io_user_data_t {
+		int32_t result;
+		$thread * thrd;
+	};
+
+	//-----------------------------------------------------------------------
+	// Misc
+	// Weirdly, some systems that do support io_uring don't actually define these
+	#ifdef __alpha__
+		/*
+		* alpha is the only exception, all other architectures
+		* have common numbers for new system calls.
+		*/
+		#ifndef __NR_io_uring_setup
+			#define __NR_io_uring_setup           535
+		#endif
+		#ifndef __NR_io_uring_enter
+			#define __NR_io_uring_enter           536
+		#endif
+		#ifndef __NR_io_uring_register
+			#define __NR_io_uring_register        537
+		#endif
+	#else /* !__alpha__ */
+		#ifndef __NR_io_uring_setup
+			#define __NR_io_uring_setup           425
+		#endif
+		#ifndef __NR_io_uring_enter
+			#define __NR_io_uring_enter           426
+		#endif
+		#ifndef __NR_io_uring_register
+			#define __NR_io_uring_register        427
+		#endif
+	#endif
+
+	struct epoll_event;
+	struct $io_ctx_thread;
+	void __ioctx_register($io_ctx_thread & ctx, struct epoll_event & ev);
+	void __ioctx_prepare_block($io_ctx_thread & ctx, struct epoll_event & ev);
+#endif
Index: libcfa/src/concurrency/iocall.cfa
===================================================================
--- libcfa/src/concurrency/iocall.cfa	(revision e660761701e94bf2b2ae0130750e911d4122366f)
+++ libcfa/src/concurrency/iocall.cfa	(revision 3e2b9c93dff631bddb430d876a83bcc3bafe9d16)
@@ -14,4 +14,6 @@
 //
 
+#define __cforall_thread__
+
 #include "bits/defs.hfa"
 
@@ -21,9 +23,12 @@
 
 #if defined(CFA_HAVE_LINUX_IO_URING_H)
+	#include <assert.h>
 	#include <stdint.h>
 	#include <errno.h>
 	#include <linux/io_uring.h>
 
-	#include "kernel_private.hfa"
+	#include "kernel.hfa"
+	#include "kernel/fwd.hfa"
+	#include "io/types.hfa"
 
 	extern [* struct io_uring_sqe, uint32_t] __submit_alloc( struct __io_data & ring, uint64_t data );
@@ -53,4 +58,11 @@
 	}
 
+	static inline io_context * __get_io_context( void ) {
+		cluster * cltr = active_cluster();
+		/* paranoid */ verifyf( cltr, "No active cluster for io operation\n");
+		assertf( cltr->io.cnt > 0, "Cluster %p has no default io contexts and no context was specified\n", cltr );
+		/* paranoid */ verifyf( cltr->io.ctxs, "default io contexts for cluster %p are missing\n", cltr);
+		return &cltr->io.ctxs[ __tls_rand() % cltr->io.cnt ];
+	}
 
 
Index: libcfa/src/concurrency/kernel.hfa
===================================================================
--- libcfa/src/concurrency/kernel.hfa	(revision e660761701e94bf2b2ae0130750e911d4122366f)
+++ libcfa/src/concurrency/kernel.hfa	(revision 3e2b9c93dff631bddb430d876a83bcc3bafe9d16)
@@ -23,5 +23,5 @@
 
 extern "C" {
-#include <pthread.h>
+#include <bits/pthreadtypes.h>
 }
 
Index: libcfa/src/concurrency/kernel/fwd.hfa
===================================================================
--- libcfa/src/concurrency/kernel/fwd.hfa	(revision e660761701e94bf2b2ae0130750e911d4122366f)
+++ libcfa/src/concurrency/kernel/fwd.hfa	(revision 3e2b9c93dff631bddb430d876a83bcc3bafe9d16)
@@ -14,9 +14,11 @@
 //
 
+#pragma once
+
 #include "bits/defs.hfa"
 #include "bits/debug.hfa"
 
-#if !defined(__cforall_thread__)
-#error non-thread source file includes kernel/fwd.hfa
+#ifdef __cforall
+#include "bits/random.hfa"
 #endif
 
@@ -25,7 +27,11 @@
 struct cluster;
 
+enum __Preemption_Reason { __NO_PREEMPTION, __ALARM_PREEMPTION, __POLL_PREEMPTION, __MANUAL_PREEMPTION };
+
+#define KERNEL_STORAGE(T,X) __attribute((aligned(__alignof__(T)))) static char storage_##X[sizeof(T)]
+
 #ifdef __cforall
 extern "C" {
-      extern "Cforall" {
+	extern "Cforall" {
 		extern __attribute__((aligned(128))) thread_local struct KernelThreadData {
 			struct $thread    * volatile this_thread;
@@ -45,37 +51,74 @@
 			#endif
 		} kernelTLS __attribute__ ((tls_model ( "initial-exec" )));
+
+		static inline uint64_t __tls_rand() {
+			#if defined(__SIZEOF_INT128__)
+				return __lehmer64( kernelTLS.rand_seed );
+			#else
+				return __xorshift64( kernelTLS.rand_seed );
+			#endif
+		}
 	}
 
-      #ifdef __ARM_ARCH
-            // function prototypes are only really used by these macros on ARM
-            void disable_global_interrupts();
-            void enable_global_interrupts();
+	#ifdef __ARM_ARCH
+		// function prototypes are only really used by these macros on ARM
+		void disable_global_interrupts();
+		void enable_global_interrupts();
 
-            #define TL_GET( member ) ( { __typeof__( kernelTLS.member ) target; \
-                  disable_global_interrupts(); \
-                  target = kernelTLS.member; \
-                  enable_global_interrupts(); \
-                  target; } )
-            #define TL_SET( member, value ) disable_global_interrupts(); \
-                  kernelTLS.member = value; \
-                  enable_global_interrupts();
-      #else
-            #define TL_GET( member ) kernelTLS.member
-            #define TL_SET( member, value ) kernelTLS.member = value;
-      #endif
+		#define TL_GET( member ) ( { __typeof__( kernelTLS.member ) target; \
+			disable_global_interrupts(); \
+			target = kernelTLS.member; \
+			enable_global_interrupts(); \
+			target; } )
+		#define TL_SET( member, value ) disable_global_interrupts(); \
+			kernelTLS.member = value; \
+			enable_global_interrupts();
+	#else
+		#define TL_GET( member ) kernelTLS.member
+		#define TL_SET( member, value ) kernelTLS.member = value;
+	#endif
 
-      extern void disable_interrupts();
-      extern void enable_interrupts_noPoll();
+	extern void disable_interrupts();
+	extern void enable_interrupts_noPoll();
 	extern void enable_interrupts( __cfaabi_dbg_ctx_param );
 
-	enum __Preemption_Reason { __NO_PREEMPTION, __ALARM_PREEMPTION, __POLL_PREEMPTION, __MANUAL_PREEMPTION };
+	extern "Cforall" {
+		extern void park( __cfaabi_dbg_ctx_param );
+		extern void unpark( struct $thread * this __cfaabi_dbg_ctx_param2 );
+		static inline struct $thread * active_thread () { return TL_GET( this_thread ); }
 
-      extern "Cforall" {
-            extern void park( __cfaabi_dbg_ctx_param );
-            extern void unpark( struct $thread * this __cfaabi_dbg_ctx_param2 );
-            static inline struct $thread * active_thread () { return TL_GET( this_thread ); }
+		extern bool force_yield( enum __Preemption_Reason );
 
-            extern bool force_yield( enum __Preemption_Reason );
-      }
+		static inline void yield() {
+			force_yield(__MANUAL_PREEMPTION);
+		}
+
+		// Yield: yield N times
+		static inline void yield( unsigned times ) {
+			for( times ) {
+				yield();
+			}
+		}
+
+		//-----------------------------------------------------------------------
+		// Statics call at the end of each thread to register statistics
+		#if !defined(__CFA_NO_STATISTICS__)
+			static inline struct __stats_t * __tls_stats() {
+				/* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
+				/* paranoid */ verify( kernelTLS.this_stats );
+				return kernelTLS.this_stats;
+			}
+
+			#define __STATS__(in_kernel, ...) { \
+				if( !(in_kernel) ) disable_interrupts(); \
+				with( *__tls_stats() ) { \
+					__VA_ARGS__ \
+				} \
+				if( !(in_kernel) ) enable_interrupts( __cfaabi_dbg_ctx ); \
+			}
+		#else
+			#define __STATS__(in_kernel, ...)
+		#endif
+	}
 }
 #endif
Index: libcfa/src/concurrency/kernel/startup.cfa
===================================================================
--- libcfa/src/concurrency/kernel/startup.cfa	(revision e660761701e94bf2b2ae0130750e911d4122366f)
+++ libcfa/src/concurrency/kernel/startup.cfa	(revision 3e2b9c93dff631bddb430d876a83bcc3bafe9d16)
@@ -91,6 +91,4 @@
 //-----------------------------------------------------------------------------
 // Kernel storage
-#warning duplicated in preemption.cfa
-#define KERNEL_STORAGE(T,X) __attribute((aligned(__alignof__(T)))) static char storage_##X[sizeof(T)]
 KERNEL_STORAGE(cluster,	             mainCluster);
 KERNEL_STORAGE(processor,            mainProcessor);
Index: libcfa/src/concurrency/kernel_private.hfa
===================================================================
--- libcfa/src/concurrency/kernel_private.hfa	(revision e660761701e94bf2b2ae0130750e911d4122366f)
+++ libcfa/src/concurrency/kernel_private.hfa	(revision 3e2b9c93dff631bddb430d876a83bcc3bafe9d16)
@@ -21,7 +21,4 @@
 #include "alarm.hfa"
 #include "stats.hfa"
-
-#include "bits/random.hfa"
-
 
 //-----------------------------------------------------------------------------
@@ -89,12 +86,4 @@
 //-----------------------------------------------------------------------------
 // Utils
-static inline uint64_t __tls_rand() {
-	#if defined(__SIZEOF_INT128__)
-		return __lehmer64( kernelTLS.rand_seed );
-	#else
-		return __xorshift64( kernelTLS.rand_seed );
-	#endif
-}
-
 void doregister( struct cluster * cltr, struct $thread & thrd );
 void unregister( struct cluster * cltr, struct $thread & thrd );
@@ -102,15 +91,4 @@
 //-----------------------------------------------------------------------------
 // I/O
-void __kernel_io_startup     ();
-void __kernel_io_shutdown    ();
-
-static inline io_context * __get_io_context( void ) {
-	cluster * cltr = active_cluster();
-	/* paranoid */ verifyf( cltr, "No active cluster for io operation\n");
-	assertf( cltr->io.cnt > 0, "Cluster %p has no default io contexts and no context was specified\n", cltr );
-	/* paranoid */ verifyf( cltr->io.ctxs, "default io contexts for cluster %p are missing\n", cltr);
-	return &cltr->io.ctxs[ __tls_rand() % cltr->io.cnt ];
-}
-
 void ^?{}(io_context & this, bool );
 
@@ -285,30 +263,4 @@
 void ready_queue_shrink(struct cluster * cltr, int target);
 
-//-----------------------------------------------------------------------
-// IO user data
-struct __io_user_data_t {
-	int32_t result;
-	$thread * thrd;
-};
-
-//-----------------------------------------------------------------------
-// Statics call at the end of each thread to register statistics
-#if !defined(__CFA_NO_STATISTICS__)
-	static inline struct __stats_t * __tls_stats() {
-		/* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
-		/* paranoid */ verify( kernelTLS.this_stats );
-		return kernelTLS.this_stats;
-	}
-
-	#define __STATS__(in_kernel, ...) { \
-		if( !(in_kernel) ) disable_interrupts(); \
-		with( *__tls_stats() ) { \
-			__VA_ARGS__ \
-		} \
-		if( !(in_kernel) ) enable_interrupts( __cfaabi_dbg_ctx ); \
-	}
-#else
-	#define __STATS__(in_kernel, ...)
-#endif
 
 // Local Variables: //
Index: libcfa/src/concurrency/preemption.cfa
===================================================================
--- libcfa/src/concurrency/preemption.cfa	(revision e660761701e94bf2b2ae0130750e911d4122366f)
+++ libcfa/src/concurrency/preemption.cfa	(revision 3e2b9c93dff631bddb430d876a83bcc3bafe9d16)
@@ -61,7 +61,4 @@
 #error unknown hardware architecture
 #endif
-
-#warning duplicated in startup.cfa
-#define KERNEL_STORAGE(T,X) __attribute((aligned(__alignof__(T)))) static char storage_##X[sizeof(T)]
 
 KERNEL_STORAGE(event_kernel_t, event_kernel);         // private storage for event kernel
Index: libcfa/src/concurrency/thread.hfa
===================================================================
--- libcfa/src/concurrency/thread.hfa	(revision e660761701e94bf2b2ae0130750e911d4122366f)
+++ libcfa/src/concurrency/thread.hfa	(revision 3e2b9c93dff631bddb430d876a83bcc3bafe9d16)
@@ -84,8 +84,4 @@
 
 //-----------------------------------------------------------------------------
-// Thread getters
-static inline struct $thread * active_thread () { return TL_GET( this_thread ); }
-
-//-----------------------------------------------------------------------------
 // Scheduler API
 
@@ -106,15 +102,4 @@
 bool force_yield( enum __Preemption_Reason );
 
-static inline void yield() {
-	force_yield(__MANUAL_PREEMPTION);
-}
-
-// Yield: yield N times
-static inline void yield( unsigned times ) {
-	for( times ) {
-		yield();
-	}
-}
-
 //----------
 // sleep: force thread to block and be rescheduled after Duration duration
