Changeset 7f6a7c9 for libcfa/src/concurrency/io/setup.cfa
- Timestamp:
- Sep 21, 2022, 11:02:15 AM (2 years ago)
- Branches:
- ADT, ast-experimental, master, pthread-emulation
- Children:
- 95dab9e
- Parents:
- 428adbc (diff), 0bd46fd (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/io/setup.cfa
r428adbc r7f6a7c9 28 28 void ?{}(io_context_params & this) libcfa_public {} 29 29 30 void ?{}( $io_context& this, struct cluster & cl) {}31 void ^?{}( $io_context& this) {}30 void ?{}(io_context$ & this, struct cluster & cl) {} 31 void ^?{}(io_context$ & this) {} 32 32 33 33 void __cfa_io_start( processor * proc ) {} … … 37 37 void __cfa_io_stop ( processor * proc ) {} 38 38 39 $io_arbiter* create(void) { return 0p; }40 void destroy( $io_arbiter*) {}39 io_arbiter$ * create(void) { return 0p; } 40 void destroy(io_arbiter$ *) {} 41 41 42 42 #else … … 105 105 106 106 107 static void __io_uring_setup ( $io_context& this, const io_context_params & params_in, int procfd );108 static void __io_uring_teardown( $io_context& this );109 static void __epoll_register( $io_context& ctx);110 static void __epoll_unregister( $io_context& ctx);111 void __ioarbiter_register( $io_arbiter & mutex, $io_context& ctx );112 void __ioarbiter_unregister( $io_arbiter & mutex, $io_context& ctx );113 114 void ?{}( $io_context& this, processor * proc, struct cluster & cl) {107 static void __io_uring_setup ( io_context$ & this, const io_context_params & params_in, int procfd ); 108 static void __io_uring_teardown( io_context$ & this ); 109 static void __epoll_register(io_context$ & ctx); 110 static void __epoll_unregister(io_context$ & ctx); 111 void __ioarbiter_register( io_arbiter$ & mutex, io_context$ & ctx ); 112 void __ioarbiter_unregister( io_arbiter$ & mutex, io_context$ & ctx ); 113 114 void ?{}(io_context$ & this, processor * proc, struct cluster & cl) { 115 115 /* paranoid */ verify( cl.io.arbiter ); 116 116 this.proc = proc; … … 122 122 } 123 123 124 void ^?{}( $io_context& this) {124 void ^?{}(io_context$ & this) { 125 125 __cfadbg_print_safe(io_core, "Kernel I/O : tearing down io_context %u\n", this.fd); 126 126 … … 129 129 } 130 130 131 static void __io_uring_setup( $io_context& this, const io_context_params & params_in, int procfd ) {131 static void __io_uring_setup( io_context$ & this, const io_context_params & params_in, int procfd ) { 132 132 // Step 1 : call to setup 133 133 struct io_uring_params params; … … 228 228 229 229 #if !defined(CFA_WITH_IO_URING_IDLE) 230 { 230 231 // Step 4 : eventfd 231 232 __cfadbg_print_safe(io_core, "Kernel I/O : registering %d for completion with ring %d\n", procfd, fd); … … 237 238 238 239 __cfadbg_print_safe(io_core, "Kernel I/O : registered %d for completion with ring %d\n", procfd, fd); 239 #endif 240 240 } 241 #endif 242 243 // TODO: implement a proper version of this. 244 // I have not found a better maximum that works in general but users should be able to configure it 245 // the same way they configure other I/O options 241 246 // #if defined(CFA_HAVE_IORING_REGISTER_IOWQ_MAX_WORKERS) 247 // { 242 248 // // Step 5 : max worker count 243 249 // __cfadbg_print_safe(io_core, "Kernel I/O : lmiting max workers for ring %d\n", fd); … … 252 258 253 259 // __cfadbg_print_safe(io_core, "Kernel I/O : lmited max workers for ring %d\n", fd); 260 // } 254 261 // #endif 255 262 … … 270 277 } 271 278 272 static void __io_uring_teardown( $io_context& this ) {279 static void __io_uring_teardown( io_context$ & this ) { 273 280 // Shutdown the io rings 274 281 struct __sub_ring_t & sq = this.sq; … … 313 320 // I/O Context Sleep 314 321 //============================================================================================= 315 // static inline void __epoll_ctl( $io_context& ctx, int op, const char * error) {322 // static inline void __epoll_ctl(io_context$ & ctx, int op, const char * error) { 316 323 // struct epoll_event ev; 317 324 // ev.events = EPOLLIN | EPOLLONESHOT; … … 323 330 // } 324 331 325 // static void __epoll_register( $io_context& ctx) {332 // static void __epoll_register(io_context$ & ctx) { 326 333 // __epoll_ctl(ctx, EPOLL_CTL_ADD, "ADD"); 327 334 // } 328 335 329 // static void __epoll_unregister( $io_context& ctx) {336 // static void __epoll_unregister(io_context$ & ctx) { 330 337 // // Read the current epoch so we know when to stop 331 338 // size_t curr = __atomic_load_n(&iopoll.epoch, __ATOMIC_SEQ_CST); … … 346 353 // } 347 354 348 // void __ioctx_prepare_block( $io_context& ctx) {355 // void __ioctx_prepare_block(io_context$ & ctx) { 349 356 // __cfadbg_print_safe(io_core, "Kernel I/O - epoll : Re-arming io poller %d (%p)\n", ctx.fd, &ctx); 350 357 // __epoll_ctl(ctx, EPOLL_CTL_MOD, "REARM"); … … 355 362 // I/O Context Misc Setup 356 363 //============================================================================================= 357 void ?{}( $io_arbiter& this ) {364 void ?{}( io_arbiter$ & this ) { 358 365 this.pending.empty = true; 359 366 } 360 367 361 void ^?{}( $io_arbiter& mutex this ) {}362 363 $io_arbiter* create(void) {368 void ^?{}( io_arbiter$ & mutex this ) {} 369 370 io_arbiter$ * create(void) { 364 371 return new(); 365 372 } 366 void destroy( $io_arbiter* arbiter) {373 void destroy(io_arbiter$ * arbiter) { 367 374 delete(arbiter); 368 375 }
Note: See TracChangeset
for help on using the changeset viewer.