Changeset d611995
- Timestamp:
- Jan 18, 2021, 12:42:41 PM (4 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- 78a8440
- Parents:
- 6f94958
- Location:
- libcfa/src/concurrency
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/io.cfa
r6f94958 rd611995 318 318 319 319 __cfadbg_print_safe(io_core, "Kernel I/O : Fast poller %d (%p) stopping\n", this.ring->fd, &this); 320 321 __ioctx_unregister( this ); 320 322 } 321 323 -
libcfa/src/concurrency/io/setup.cfa
r6f94958 rd611995 113 113 114 114 static struct { 115 pthread_t thrd; // pthread handle to io poller thread 116 void * stack; // pthread stack for io poller thread 117 int epollfd; // file descriptor to the epoll instance 118 volatile bool run; // Whether or not to continue 115 pthread_t thrd; // pthread handle to io poller thread 116 void * stack; // pthread stack for io poller thread 117 int epollfd; // file descriptor to the epoll instance 118 volatile bool run; // Whether or not to continue 119 volatile size_t epoch; // Epoch used for memory reclamation 119 120 } iopoll; 120 121 … … 131 132 iopoll.run = true; 132 133 iopoll.stack = __create_pthread( &iopoll.thrd, iopoll_loop, 0p ); 134 iopoll.epoch = 0; 133 135 } 134 136 … … 174 176 while( iopoll.run ) { 175 177 __cfadbg_print_safe(io_core, "Kernel I/O - epoll : waiting on io_uring contexts\n"); 178 179 // increment the epoch to notify any deleters we are starting a new cycle 180 __atomic_fetch_add(&iopoll.epoch, 1, __ATOMIC_SEQ_CST); 176 181 177 182 // Wait for events … … 496 501 // I/O Context Sleep 497 502 //============================================================================================= 498 #define IOEVENTS EPOLLIN | EPOLLONESHOT499 500 503 static inline void __ioctx_epoll_ctl($io_ctx_thread & ctx, int op, const char * error) { 501 504 struct epoll_event ev; 502 ev.events = IOEVENTS;505 ev.events = EPOLLIN | EPOLLONESHOT; 503 506 ev.data.u64 = (__u64)&ctx; 504 507 int ret = epoll_ctl(iopoll.epollfd, op, ctx.ring->efd, &ev); … … 517 520 } 518 521 522 void __ioctx_unregister($io_ctx_thread & ctx) { 523 // Read the current epoch so we know when to stop 524 size_t curr = __atomic_load_n(&iopoll.epoch, __ATOMIC_SEQ_CST); 525 526 // Remove the fd from the iopoller 527 __ioctx_epoll_ctl(ctx, EPOLL_CTL_DEL, "REMOVE"); 528 529 // Notify the io poller thread of the shutdown 530 iopoll.run = false; 531 sigval val = { 1 }; 532 pthread_sigqueue( iopoll.thrd, SIGUSR1, val ); 533 534 // Make sure all this is done 535 __atomic_thread_fence(__ATOMIC_SEQ_CST); 536 537 // Wait for the next epoch 538 while(curr == __atomic_load_n(&iopoll.epoch, __ATOMIC_RELAXED)) yield(); 539 } 540 519 541 //============================================================================================= 520 542 // I/O Context Misc Setup -
libcfa/src/concurrency/io/types.hfa
r6f94958 rd611995 133 133 struct $io_ctx_thread; 134 134 void __ioctx_register($io_ctx_thread & ctx); 135 void __ioctx_unregister($io_ctx_thread & ctx); 135 136 void __ioctx_prepare_block($io_ctx_thread & ctx); 136 137 void __sqe_clean( volatile struct io_uring_sqe * sqe );
Note: See TracChangeset
for help on using the changeset viewer.