Changeset 8bee858 for libcfa/src/concurrency/io.cfa
- Timestamp:
- Aug 15, 2022, 11:19:28 AM (23 months ago)
- Branches:
- ADT, ast-experimental, master, pthread-emulation
- Children:
- d93ea1d
- Parents:
- 41a6a78
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/io.cfa
r41a6a78 r8bee858 83 83 }; 84 84 85 static $io_context * __ioarbiter_allocate( $io_arbiter& this, __u32 idxs[], __u32 want );86 static void __ioarbiter_submit( $io_context* , __u32 idxs[], __u32 have, bool lazy );87 static void __ioarbiter_flush ( $io_context& );88 static inline void __ioarbiter_notify( $io_context& ctx );85 static io_context$ * __ioarbiter_allocate( io_arbiter$ & this, __u32 idxs[], __u32 want ); 86 static void __ioarbiter_submit( io_context$ * , __u32 idxs[], __u32 have, bool lazy ); 87 static void __ioarbiter_flush ( io_context$ & ); 88 static inline void __ioarbiter_notify( io_context$ & ctx ); 89 89 //============================================================================================= 90 90 // I/O Polling 91 91 //============================================================================================= 92 static inline unsigned __flush( struct $io_context& );93 static inline __u32 __release_sqes( struct $io_context& );92 static inline unsigned __flush( struct io_context$ & ); 93 static inline __u32 __release_sqes( struct io_context$ & ); 94 94 extern void __kernel_unpark( thread$ * thrd, unpark_hint ); 95 95 96 static void ioring_syscsll( struct $io_context& ctx, unsigned int min_comp, unsigned int flags ) {96 static void ioring_syscsll( struct io_context$ & ctx, unsigned int min_comp, unsigned int flags ) { 97 97 __STATS__( true, io.calls.flush++; ) 98 98 int ret; … … 132 132 } 133 133 134 static bool try_acquire( $io_context* ctx ) __attribute__((nonnull(1))) {134 static bool try_acquire( io_context$ * ctx ) __attribute__((nonnull(1))) { 135 135 /* paranoid */ verify( ! __preemption_enabled() ); 136 136 /* paranoid */ verify( ready_schedule_islocked() ); … … 153 153 } 154 154 155 static bool __cfa_do_drain( $io_context* ctx, cluster * cltr ) __attribute__((nonnull(1, 2))) {155 static bool __cfa_do_drain( io_context$ * ctx, cluster * cltr ) __attribute__((nonnull(1, 2))) { 156 156 /* paranoid */ verify( ! __preemption_enabled() ); 157 157 /* paranoid */ verify( ready_schedule_islocked() ); … … 213 213 214 214 cluster * const cltr = proc->cltr; 215 $io_context* const ctx = proc->io.ctx;215 io_context$ * const ctx = proc->io.ctx; 216 216 /* paranoid */ verify( cltr ); 217 217 /* paranoid */ verify( ctx ); … … 278 278 /* paranoid */ verify( proc->io.ctx ); 279 279 280 $io_context& ctx = *proc->io.ctx;280 io_context$ & ctx = *proc->io.ctx; 281 281 282 282 __ioarbiter_flush( ctx ); … … 312 312 // Allocation 313 313 // for user's convenience fill the sqes from the indexes 314 static inline void __fill(struct io_uring_sqe * out_sqes[], __u32 want, __u32 idxs[], struct $io_context* ctx) {314 static inline void __fill(struct io_uring_sqe * out_sqes[], __u32 want, __u32 idxs[], struct io_context$ * ctx) { 315 315 struct io_uring_sqe * sqes = ctx->sq.sqes; 316 316 for(i; want) { … … 322 322 // Try to directly allocate from the a given context 323 323 // Not thread-safe 324 static inline bool __alloc(struct $io_context* ctx, __u32 idxs[], __u32 want) {324 static inline bool __alloc(struct io_context$ * ctx, __u32 idxs[], __u32 want) { 325 325 __sub_ring_t & sq = ctx->sq; 326 326 const __u32 mask = *sq.mask; … … 349 349 // for convenience, return both the index and the pointer to the sqe 350 350 // sqe == &sqes[idx] 351 struct $io_context* cfa_io_allocate(struct io_uring_sqe * sqes[], __u32 idxs[], __u32 want) libcfa_public {351 struct io_context$ * cfa_io_allocate(struct io_uring_sqe * sqes[], __u32 idxs[], __u32 want) libcfa_public { 352 352 // __cfadbg_print_safe(io, "Kernel I/O : attempting to allocate %u\n", want); 353 353 354 354 disable_interrupts(); 355 355 processor * proc = __cfaabi_tls.this_processor; 356 $io_context* ctx = proc->io.ctx;356 io_context$ * ctx = proc->io.ctx; 357 357 /* paranoid */ verify( __cfaabi_tls.this_processor ); 358 358 /* paranoid */ verify( ctx ); … … 378 378 enable_interrupts(); 379 379 380 $io_arbiter* ioarb = proc->cltr->io.arbiter;380 io_arbiter$ * ioarb = proc->cltr->io.arbiter; 381 381 /* paranoid */ verify( ioarb ); 382 382 383 383 // __cfadbg_print_safe(io, "Kernel I/O : falling back on arbiter for allocation\n"); 384 384 385 struct $io_context* ret = __ioarbiter_allocate(*ioarb, idxs, want);385 struct io_context$ * ret = __ioarbiter_allocate(*ioarb, idxs, want); 386 386 387 387 // __cfadbg_print_safe(io, "Kernel I/O : slow allocation completed from ring %d\n", ret->fd); … … 393 393 //============================================================================================= 394 394 // submission 395 static inline void __submit_only( struct $io_context* ctx, __u32 idxs[], __u32 have) {395 static inline void __submit_only( struct io_context$ * ctx, __u32 idxs[], __u32 have) { 396 396 // We can proceed to the fast path 397 397 // Get the right objects … … 414 414 } 415 415 416 static inline void __submit( struct $io_context* ctx, __u32 idxs[], __u32 have, bool lazy) {416 static inline void __submit( struct io_context$ * ctx, __u32 idxs[], __u32 have, bool lazy) { 417 417 __sub_ring_t & sq = ctx->sq; 418 418 __submit_only(ctx, idxs, have); … … 428 428 } 429 429 430 void cfa_io_submit( struct $io_context* inctx, __u32 idxs[], __u32 have, bool lazy ) __attribute__((nonnull (1))) libcfa_public {430 void cfa_io_submit( struct io_context$ * inctx, __u32 idxs[], __u32 have, bool lazy ) __attribute__((nonnull (1))) libcfa_public { 431 431 // __cfadbg_print_safe(io, "Kernel I/O : attempting to submit %u (%s)\n", have, lazy ? "lazy" : "eager"); 432 432 … … 434 434 __STATS__( true, if(!lazy) io.submit.eagr += 1; ) 435 435 processor * proc = __cfaabi_tls.this_processor; 436 $io_context* ctx = proc->io.ctx;436 io_context$ * ctx = proc->io.ctx; 437 437 /* paranoid */ verify( __cfaabi_tls.this_processor ); 438 438 /* paranoid */ verify( ctx ); … … 465 465 // by io_uring 466 466 // This cannot be done by multiple threads 467 static __u32 __release_sqes( struct $io_context& ctx ) {467 static __u32 __release_sqes( struct io_context$ & ctx ) { 468 468 const __u32 mask = *ctx.sq.mask; 469 469 … … 538 538 } 539 539 540 static $io_context * __ioarbiter_allocate( $io_arbiter& this, __u32 idxs[], __u32 want ) {540 static io_context$ * __ioarbiter_allocate( io_arbiter$ & this, __u32 idxs[], __u32 want ) { 541 541 // __cfadbg_print_safe(io, "Kernel I/O : arbiter allocating\n"); 542 542 … … 557 557 } 558 558 559 static void __ioarbiter_notify( $io_arbiter & this, $io_context* ctx ) {559 static void __ioarbiter_notify( io_arbiter$ & this, io_context$ * ctx ) { 560 560 /* paranoid */ verify( !empty(this.pending.queue) ); 561 561 … … 587 587 } 588 588 589 static void __ioarbiter_notify( $io_context& ctx ) {589 static void __ioarbiter_notify( io_context$ & ctx ) { 590 590 if(!empty( ctx.arbiter->pending )) { 591 591 __ioarbiter_notify( *ctx.arbiter, &ctx ); … … 594 594 595 595 // Simply append to the pending 596 static void __ioarbiter_submit( $io_context* ctx, __u32 idxs[], __u32 have, bool lazy ) {596 static void __ioarbiter_submit( io_context$ * ctx, __u32 idxs[], __u32 have, bool lazy ) { 597 597 __cfadbg_print_safe(io, "Kernel I/O : submitting %u from the arbiter to context %u\n", have, ctx->fd); 598 598 … … 618 618 } 619 619 620 static void __ioarbiter_flush( $io_context& ctx ) {620 static void __ioarbiter_flush( io_context$ & ctx ) { 621 621 if(!empty( ctx.ext_sq )) { 622 622 __STATS__( false, io.flush.external += 1; ) … … 642 642 #if defined(CFA_WITH_IO_URING_IDLE) 643 643 bool __kernel_read(processor * proc, io_future_t & future, iovec & iov, int fd) { 644 $io_context* ctx = proc->io.ctx;644 io_context$ * ctx = proc->io.ctx; 645 645 /* paranoid */ verify( ! __preemption_enabled() ); 646 646 /* paranoid */ verify( proc == __cfaabi_tls.this_processor );
Note: See TracChangeset
for help on using the changeset viewer.