Ignore:
File:
1 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/concurrency/io/types.hfa

    r26544f9 ra55472cc  
    3737        //-----------------------------------------------------------------------
    3838        // Ring Data structure
    39         // represent the io_uring submission ring which contains operations that will be sent to io_uring for processing
    40         struct __sub_ring_t {
    41                 // lock needed because remote processors might need to flush the instance
    42                 __spinlock_t lock;
    43 
     39      struct __sub_ring_t {
    4440                struct {
    4541                        // Head and tail of the ring (associated with array)
     
    6258
    6359                // number of sqes to submit on next system call.
    64                 volatile __u32 to_submit;
     60                __u32 to_submit;
    6561
    6662                // number of entries and mask to go with it
     
    8177                void * ring_ptr;
    8278                size_t ring_sz;
    83 
    84                 // for debug purposes, whether or not the last flush was due to a arbiter flush
    85                 bool last_external;
    8679        };
    8780
    88         // represent the io_uring completion ring which contains operations that have completed
    8981        struct __cmp_ring_t {
    90                 // needed because remote processors can help drain the buffer
    91                 volatile bool try_lock;
     82                volatile bool lock;
    9283
    93                 // id of the ring, used for the helping/topology algorithms
    9484                unsigned id;
    9585
    96                 // timestamp from last time it was drained
    9786                unsigned long long ts;
    9887
     
    116105        };
    117106
    118         // struct representing an io operation that still needs processing
    119         // actual operations are expected to inherit from this
    120107        struct __outstanding_io {
    121                 // intrusive link fields
    122108                inline Colable;
    123 
    124                 // primitive on which to block until the io is processed
    125109                oneshot waitctx;
    126110        };
    127111        static inline __outstanding_io *& Next( __outstanding_io * n ) { return (__outstanding_io *)Next( (Colable *)n ); }
    128112
    129         // queue of operations that are outstanding
    130113        struct __outstanding_io_queue {
    131                 // spinlock for protection
    132                 // TODO: changing to a lock that blocks, I haven't examined whether it should be a kernel or user lock
    133114                __spinlock_t lock;
    134 
    135                 // the actual queue
    136115                Queue(__outstanding_io) queue;
    137 
    138                 // volatile used to avoid the need for taking the lock if it's empty
    139116                volatile bool empty;
    140117        };
    141118
    142         // struct representing an operation that was submitted
    143119        struct __external_io {
    144                 // inherits from outstanding io
    145120                inline __outstanding_io;
    146 
    147                 // pointer and count to an array of ids to be submitted
    148121                __u32 * idxs;
    149122                __u32 have;
    150 
    151                 // whether or not these can be accumulated before flushing the buffer
    152123                bool lazy;
    153124        };
    154125
    155         // complete io_context, contains all the data for io submission and completion
     126
    156127        struct __attribute__((aligned(64))) io_context$ {
    157                 // arbiter, used in cases where threads for migrated at unfortunate moments
    158128                io_arbiter$ * arbiter;
    159 
    160                 // which prcessor the context is tied to
    161129                struct processor * proc;
    162130
    163                 // queue of io submissions that haven't beeen processed.
    164131                __outstanding_io_queue ext_sq;
    165132
    166                 // io_uring ring data structures
    167133                struct __sub_ring_t sq;
    168134                struct __cmp_ring_t cq;
    169 
    170                 // flag the io_uring rings where created with
    171135                __u32 ring_flags;
    172 
    173                 // file descriptor that identifies the io_uring instance
    174136                int fd;
    175137        };
    176138
    177         // short hand to check when the io_context was last processed (io drained)
    178139        static inline unsigned long long ts(io_context$ *& this) {
    179140                const __u32 head = *this->cq.head;
    180141                const __u32 tail = *this->cq.tail;
    181142
    182                 // if there is no pending completions, just pretend it's infinetely recent
    183143                if(head == tail) return ULLONG_MAX;
    184144
     
    186146        }
    187147
    188         // structure represeting allocations that couldn't succeed locally
    189148        struct __pending_alloc {
    190                 // inherit from outstanding io
    191149                inline __outstanding_io;
    192 
    193                 // array and size of the desired allocation
    194150                __u32 * idxs;
    195151                __u32 want;
    196 
    197                 // output param, the context the io was allocated from
    198152                io_context$ * ctx;
    199153        };
    200154
    201         // arbiter that handles cases where the context tied to the local processor is unable to satisfy the io
    202155        monitor __attribute__((aligned(64))) io_arbiter$ {
    203                 // contains a queue of io for pending allocations
    204156                __outstanding_io_queue pending;
    205157        };
Note: See TracChangeset for help on using the changeset viewer.