source: libcfa/src/concurrency/io/types.hfa @ 454f478

ADTarm-ehast-experimentalenumforall-pointer-decayjacob/cs343-translationnew-ast-unique-exprpthread-emulationqualifiedEnum
Last change on this file since 454f478 was 454f478, checked in by Thierry Delisle <tdelisle@…>, 3 years ago

Re-arranged and commented low-level headers.
Main goal was for better support of weakso locks that are comming.

  • Property mode set to 100644
File size: 3.7 KB
RevLine 
[3e2b9c9]1//
2// Cforall Version 1.0.0 Copyright (C) 2020 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
[454f478]7// io/types.hfa -- PRIVATE
8// Types used by the I/O subsystem
[3e2b9c9]9//
10// Author           : Thierry Delisle
11// Created On       : Fri Jul 31 16:22:47 2020
12// Last Modified By :
13// Last Modified On :
14// Update Count     :
15//
16
17#pragma once
18
[930e57e]19extern "C" {
20        #include <linux/types.h>
21}
[4998155]22
[454f478]23#include "kernel/fwd.hfa"
[3e2b9c9]24
[930e57e]25#if defined(CFA_HAVE_LINUX_IO_URING_H)
[d2b5d2d]26        #define LEADER_LOCK
[2fafe7e]27        struct __leaderlock_t {
28                struct $thread * volatile value;        // ($thread) next_leader | (bool:1) is_locked
29        };
30
31        static inline void ?{}( __leaderlock_t & this ) { this.value = 0p; }
32
[3e2b9c9]33        //-----------------------------------------------------------------------
34        // Ring Data structure
35      struct __submition_data {
36                // Head and tail of the ring (associated with array)
[4998155]37                volatile __u32 * head;
38                volatile __u32 * tail;
39                volatile __u32 prev_head;
[3e2b9c9]40
41                // The actual kernel ring which uses head/tail
42                // indexes into the sqes arrays
[4998155]43                __u32 * array;
[3e2b9c9]44
45                // number of entries and mask to go with it
[4998155]46                const __u32 * num;
47                const __u32 * mask;
[3e2b9c9]48
49                // Submission flags (Not sure what for)
[4998155]50                __u32 * flags;
[3e2b9c9]51
52                // number of sqes not submitted (whatever that means)
[4998155]53                __u32 * dropped;
[3e2b9c9]54
55                // Like head/tail but not seen by the kernel
[4998155]56                volatile __u32 * ready;
57                __u32 ready_cnt;
[1095ccd]58                __u32 prev_ready;
[3e2b9c9]59
[2fafe7e]60                #if defined(LEADER_LOCK)
61                        __leaderlock_t submit_lock;
62                #else
63                        __spinlock_t submit_lock;
64                #endif
65                __spinlock_t  release_lock;
[3e2b9c9]66
67                // A buffer of sqes (not the actual ring)
[426f60c]68                volatile struct io_uring_sqe * sqes;
[3e2b9c9]69
70                // The location and size of the mmaped area
71                void * ring_ptr;
72                size_t ring_sz;
73        };
74
75        struct __completion_data {
76                // Head and tail of the ring
[4998155]77                volatile __u32 * head;
78                volatile __u32 * tail;
[3e2b9c9]79
80                // number of entries and mask to go with it
[4998155]81                const __u32 * mask;
82                const __u32 * num;
[3e2b9c9]83
84                // number of cqes not submitted (whatever that means)
[4998155]85                __u32 * overflow;
[3e2b9c9]86
87                // the kernel ring
[426f60c]88                volatile struct io_uring_cqe * cqes;
[3e2b9c9]89
90                // The location and size of the mmaped area
91                void * ring_ptr;
92                size_t ring_sz;
93        };
94
95        struct __io_data {
96                struct __submition_data submit_q;
97                struct __completion_data completion_q;
[4998155]98                __u32 ring_flags;
[3e2b9c9]99                int fd;
[426f60c]100                int efd;
[3e2b9c9]101                bool eager_submits:1;
102                bool poller_submits:1;
103        };
104
105        //-----------------------------------------------------------------------
106        // Misc
107        // Weirdly, some systems that do support io_uring don't actually define these
108        #ifdef __alpha__
109                /*
110                * alpha is the only exception, all other architectures
111                * have common numbers for new system calls.
112                */
113                #ifndef __NR_io_uring_setup
114                        #define __NR_io_uring_setup           535
115                #endif
116                #ifndef __NR_io_uring_enter
117                        #define __NR_io_uring_enter           536
118                #endif
119                #ifndef __NR_io_uring_register
120                        #define __NR_io_uring_register        537
121                #endif
122        #else /* !__alpha__ */
123                #ifndef __NR_io_uring_setup
124                        #define __NR_io_uring_setup           425
125                #endif
126                #ifndef __NR_io_uring_enter
127                        #define __NR_io_uring_enter           426
128                #endif
129                #ifndef __NR_io_uring_register
130                        #define __NR_io_uring_register        427
131                #endif
132        #endif
133
134        struct $io_ctx_thread;
[d48b174]135        void __ioctx_register($io_ctx_thread & ctx);
[d611995]136        void __ioctx_unregister($io_ctx_thread & ctx);
[d48b174]137        void __ioctx_prepare_block($io_ctx_thread & ctx);
[35285fd]138        void __sqe_clean( volatile struct io_uring_sqe * sqe );
[930e57e]139#endif
140
141//-----------------------------------------------------------------------
142// IO user data
143struct io_future_t {
144        future_t self;
145        __s32 result;
146};
147
148static inline {
149        bool fulfil( io_future_t & this, __s32 result ) {
150                this.result = result;
151                return fulfil(this.self);
152        }
153
154        // Wait for the future to be fulfilled
155        bool wait( io_future_t & this ) {
156                return wait(this.self);
157        }
158}
Note: See TracBrowser for help on using the repository browser.