// // Cforall Version 1.0.0 Copyright (C) 2020 University of Waterloo // // The contents of this file are covered under the licence agreement in the // file "LICENCE" distributed with Cforall. // // io/types.hfa -- PRIVATE // Types used by the I/O subsystem // // Author : Thierry Delisle // Created On : Fri Jul 31 16:22:47 2020 // Last Modified By : // Last Modified On : // Update Count : // #pragma once #include extern "C" { #include } #include "bits/locks.hfa" #include "bits/queue.hfa" #include "iofwd.hfa" #include "kernel/fwd.hfa" #if defined(CFA_HAVE_LINUX_IO_URING_H) #include "bits/sequence.hfa" #include "monitor.hfa" struct processor; monitor io_arbiter$; //----------------------------------------------------------------------- // Ring Data structure struct __sub_ring_t { struct { // Head and tail of the ring (associated with array) volatile __u32 * head; // one passed last index consumed by the kernel volatile __u32 * tail; // one passed last index visible to the kernel volatile __u32 released; // one passed last index released back to the free list // The actual kernel ring which uses head/tail // indexes into the sqes arrays __u32 * array; } kring; struct { volatile __u32 head; volatile __u32 tail; // The ring which contains free allocations // indexes into the sqes arrays __u32 * array; } free_ring; // number of sqes to submit on next system call. __u32 to_submit; // number of entries and mask to go with it const __u32 * num; const __u32 * mask; // Submission flags, currently only IORING_SETUP_SQPOLL __u32 * flags; // number of sqes not submitted // From documentation : [dropped] is incremented for each invalid submission queue entry encountered in the ring buffer. __u32 * dropped; // A buffer of sqes (not the actual ring) struct io_uring_sqe * sqes; // The location and size of the mmaped area void * ring_ptr; size_t ring_sz; }; struct __cmp_ring_t { volatile bool lock; unsigned id; unsigned long long ts; // Head and tail of the ring volatile __u32 * head; volatile __u32 * tail; // number of entries and mask to go with it const __u32 * mask; const __u32 * num; // I don't know what this value is for __u32 * overflow; // the kernel ring volatile struct io_uring_cqe * cqes; // The location and size of the mmaped area void * ring_ptr; size_t ring_sz; }; struct __outstanding_io { inline Colable; single_sem sem; }; static inline __outstanding_io *& Next( __outstanding_io * n ) { return (__outstanding_io *)Next( (Colable *)n ); } struct __outstanding_io_queue { __spinlock_t lock; Queue(__outstanding_io) queue; volatile bool empty; }; struct __external_io { inline __outstanding_io; __u32 * idxs; __u32 have; bool lazy; }; struct __attribute__((aligned(64))) io_context$ { io_arbiter$ * arbiter; processor * proc; __outstanding_io_queue ext_sq; struct __sub_ring_t sq; struct __cmp_ring_t cq; __u32 ring_flags; int fd; }; static inline unsigned long long ts(io_context$ *& this) { const __u32 head = *this->cq.head; const __u32 tail = *this->cq.tail; if(head == tail) return ULLONG_MAX; return this->cq.ts; } struct __pending_alloc { inline __outstanding_io; __u32 * idxs; __u32 want; io_context$ * ctx; }; monitor __attribute__((aligned(64))) io_arbiter$ { __outstanding_io_queue pending; }; //----------------------------------------------------------------------- // Misc // Weirdly, some systems that do support io_uring don't actually define these #ifdef __alpha__ /* * alpha is the only exception, all other architectures * have common numbers for new system calls. */ #ifndef __NR_io_uring_setup #define __NR_io_uring_setup 535 #endif #ifndef __NR_io_uring_enter #define __NR_io_uring_enter 536 #endif #ifndef __NR_io_uring_register #define __NR_io_uring_register 537 #endif #else /* !__alpha__ */ #ifndef __NR_io_uring_setup #define __NR_io_uring_setup 425 #endif #ifndef __NR_io_uring_enter #define __NR_io_uring_enter 426 #endif #ifndef __NR_io_uring_register #define __NR_io_uring_register 427 #endif #endif // void __ioctx_prepare_block(io_context$ & ctx); #endif