Changeset 7affcda for libcfa/src/concurrency
- Timestamp:
- Jun 3, 2022, 6:53:57 PM (2 years ago)
- Branches:
- ADT, ast-experimental, master, pthread-emulation, qualifiedEnum
- Children:
- 153570d
- Parents:
- 90a8125
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/io.cfa
r90a8125 r7affcda 159 159 160 160 const __u32 mask = *ctx->cq.mask; 161 const __u32 num = *ctx->cq.num; 161 162 unsigned long long ts_prev = ctx->cq.ts; 162 163 // re-read the head and tail in case it already changed. 164 const __u32 head = *ctx->cq.head; 165 const __u32 tail = *ctx->cq.tail; 166 const __u32 count = tail - head; 167 __STATS__( false, io.calls.drain++; io.calls.completed += count; ) 168 169 for(i; count) { 170 unsigned idx = (head + i) & mask; 171 volatile struct io_uring_cqe & cqe = ctx->cq.cqes[idx]; 172 173 /* paranoid */ verify(&cqe); 174 175 struct io_future_t * future = (struct io_future_t *)(uintptr_t)cqe.user_data; 176 // __cfadbg_print_safe( io, "Kernel I/O : Syscall completed : cqe %p, result %d for %p\n", &cqe, cqe.res, future ); 177 178 __kernel_unpark( fulfil( *future, cqe.res, false ), UNPARK_LOCAL ); 179 } 180 181 unsigned long long ts_next = ctx->cq.ts = rdtscl(); 182 183 // Mark to the kernel that the cqe has been seen 184 // Ensure that the kernel only sees the new value of the head index after the CQEs have been read. 185 __atomic_store_n( ctx->cq.head, head + count, __ATOMIC_SEQ_CST ); 186 ctx->proc->idle_wctx.drain_time = ts_next; 163 unsigned long long ts_next; 164 165 // We might need to do this multiple times if more events completed than can fit in the queue. 166 for() { 167 // re-read the head and tail in case it already changed. 168 const __u32 head = *ctx->cq.head; 169 const __u32 tail = *ctx->cq.tail; 170 const __u32 count = tail - head; 171 __STATS__( false, io.calls.drain++; io.calls.completed += count; ) 172 173 for(i; count) { 174 unsigned idx = (head + i) & mask; 175 volatile struct io_uring_cqe & cqe = ctx->cq.cqes[idx]; 176 177 /* paranoid */ verify(&cqe); 178 179 struct io_future_t * future = (struct io_future_t *)(uintptr_t)cqe.user_data; 180 // __cfadbg_print_safe( io, "Kernel I/O : Syscall completed : cqe %p, result %d for %p\n", &cqe, cqe.res, future ); 181 182 __kernel_unpark( fulfil( *future, cqe.res, false ), UNPARK_LOCAL ); 183 } 184 185 ts_next = ctx->cq.ts = rdtscl(); 186 187 // Mark to the kernel that the cqe has been seen 188 // Ensure that the kernel only sees the new value of the head index after the CQEs have been read. 189 __atomic_store_n( ctx->cq.head, head + count, __ATOMIC_SEQ_CST ); 190 ctx->proc->idle_wctx.drain_time = ts_next; 191 192 if(likely(count < num)) break; 193 194 ioring_syscsll( *ctx, 0, IORING_ENTER_GETEVENTS); 195 } 187 196 188 197 __cfadbg_print_safe(io, "Kernel I/O : %u completed age %llu\n", count, ts_next);
Note: See TracChangeset
for help on using the changeset viewer.