Changeset 22226e4
- Timestamp:
- Mar 18, 2022, 12:42:39 PM (3 years ago)
- Branches:
- ADT, ast-experimental, enum, master, pthread-emulation, qualifiedEnum
- Children:
- 0b4ddb71, 51239d1b
- Parents:
- 3bc69f2
- Location:
- libcfa/src/concurrency
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/io/setup.cfa
r3bc69f2 r22226e4 115 115 this.ext_sq.empty = true; 116 116 (this.ext_sq.queue){}; 117 __io_uring_setup( this, cl.io.params, proc->idle_ fd );117 __io_uring_setup( this, cl.io.params, proc->idle_wctx.evfd ); 118 118 __cfadbg_print_safe(io_core, "Kernel I/O : Created ring for io_context %u (%p)\n", this.fd, &this); 119 119 } … … 125 125 __cfadbg_print_safe(io_core, "Kernel I/O : Destroyed ring for io_context %u\n", this.fd); 126 126 } 127 128 extern void __disable_interrupts_hard();129 extern void __enable_interrupts_hard();130 127 131 128 static void __io_uring_setup( $io_context & this, const io_context_params & params_in, int procfd ) { … … 230 227 __cfadbg_print_safe(io_core, "Kernel I/O : registering %d for completion with ring %d\n", procfd, fd); 231 228 232 __disable_interrupts_hard();233 234 229 int ret = syscall( __NR_io_uring_register, fd, IORING_REGISTER_EVENTFD, &procfd, 1); 235 230 if (ret < 0) { 236 231 abort("KERNEL ERROR: IO_URING EVENTFD REGISTER - %s\n", strerror(errno)); 237 232 } 238 239 __enable_interrupts_hard();240 233 241 234 __cfadbg_print_safe(io_core, "Kernel I/O : registered %d for completion with ring %d\n", procfd, fd); -
libcfa/src/concurrency/kernel.cfa
r3bc69f2 r22226e4 136 136 static void mark_awake(__cluster_proc_list & idles, processor & proc); 137 137 138 extern void __cfa_io_start( processor * );139 138 extern bool __cfa_io_drain( processor * ); 140 139 extern bool __cfa_io_flush( processor *, int min_comp ); 141 extern void __cfa_io_stop ( processor * );142 140 static inline bool __maybe_io_drain( processor * ); 143 141 … … 164 162 verify(this); 165 163 166 io_future_t future; // used for idle sleep when io_uring is present 167 future.self.ptr = 1p; // mark it as already fulfilled so we know if there is a pending request or not 168 eventfd_t idle_val; 169 iovec idle_iovec = { &idle_val, sizeof(idle_val) }; 170 171 __cfa_io_start( this ); 164 /* paranoid */ verify( this->idle_wctx.ftr != 0p ); 165 /* paranoid */ verify( this->idle_wctx.rdbuf != 0p ); 166 167 // used for idle sleep when io_uring is present 168 // mark it as already fulfilled so we know if there is a pending request or not 169 this->idle_wctx.ftr->self.ptr = 1p; 170 iovec idle_iovec = { this->idle_wctx.rdbuf, sizeof(eventfd_t) }; 172 171 173 172 __cfadbg_print_safe(runtime_core, "Kernel : core %p starting\n", this); … … 236 235 } 237 236 238 idle_sleep( this, future, idle_iovec );237 idle_sleep( this, *this->idle_wctx.ftr, idle_iovec ); 239 238 240 239 // We were woken up, remove self from idle … … 264 263 __cfadbg_print_safe(runtime_core, "Kernel : core %p stopping\n", this); 265 264 } 266 267 for(int i = 0; !available(future); i++) {268 if(i > 1000) __cfaabi_dbg_write( "ERROR: kernel has bin spinning on a flush after exit loop.\n", 60);269 __cfa_io_flush( this, 1 );270 }271 272 __cfa_io_stop( this );273 265 274 266 post( this->terminated ); … … 639 631 640 632 int fd = 1; 641 if( __atomic_load_n(&fdp-> fd, __ATOMIC_SEQ_CST) != 1 ) {642 fd = __atomic_exchange_n(&fdp-> fd, 1, __ATOMIC_RELAXED);633 if( __atomic_load_n(&fdp->sem, __ATOMIC_SEQ_CST) != 1 ) { 634 fd = __atomic_exchange_n(&fdp->sem, 1, __ATOMIC_RELAXED); 643 635 } 644 636 … … 682 674 __cfadbg_print_safe(runtime_core, "Kernel : waking Processor %p\n", this); 683 675 684 this->idle_wctx. fd= 1;676 this->idle_wctx.sem = 1; 685 677 686 678 eventfd_t val; 687 679 val = 1; 688 eventfd_write( this->idle_ fd, val );680 eventfd_write( this->idle_wctx.evfd, val ); 689 681 690 682 /* paranoid */ verify( ! __preemption_enabled() ); … … 694 686 // Tell everyone we are ready to go do sleep 695 687 for() { 696 int expected = this->idle_wctx. fd;688 int expected = this->idle_wctx.sem; 697 689 698 690 // Someone already told us to wake-up! No time for a nap. … … 700 692 701 693 // Try to mark that we are going to sleep 702 if(__atomic_compare_exchange_n(&this->idle_wctx. fd, &expected, this->idle_fd, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) ) {694 if(__atomic_compare_exchange_n(&this->idle_wctx.sem, &expected, this->idle_wctx.evfd, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) ) { 703 695 // Every one agreed, taking a nap 704 696 break; … … 718 710 { 719 711 eventfd_t val; 720 ssize_t ret = read( this->idle_ fd, &val, sizeof(val) );712 ssize_t ret = read( this->idle_wctx.evfd, &val, sizeof(val) ); 721 713 if(ret < 0) { 722 714 switch((int)errno) { … … 745 737 reset(future); 746 738 747 __kernel_read(this, future, iov, this->idle_ fd );739 __kernel_read(this, future, iov, this->idle_wctx.evfd ); 748 740 } 749 741 … … 755 747 __STATS__(true, ready.sleep.halts++; ) 756 748 757 proc.idle_wctx. fd= 0;749 proc.idle_wctx.sem = 0; 758 750 759 751 /* paranoid */ verify( ! __preemption_enabled() ); -
libcfa/src/concurrency/kernel.hfa
r3bc69f2 r22226e4 48 48 extern struct cluster * mainCluster; 49 49 50 // Processor id, required for scheduling threads 51 52 50 // Coroutine used py processors for the 2-step context switch 53 51 coroutine processorCtx_t { 54 52 struct processor * proc; 55 53 }; 56 54 57 55 struct io_future_t; 56 57 // Information needed for idle sleep 58 58 struct __fd_waitctx { 59 volatile int fd; 59 // semaphore/future like object 60 // values can be 0, 1 or some file descriptor. 61 // 0 - is the default state 62 // 1 - means the proc should wake-up immediately 63 // FD - means the proc is going asleep and should be woken by writing to the FD. 64 volatile int sem; 65 66 // The event FD that corresponds to this processor 67 int evfd; 68 69 // buffer into which the proc will read from evfd 70 // unused if not using io_uring for idle sleep 71 void * rdbuf; 72 73 // future use to track the read of the eventfd 74 // unused if not using io_uring for idle sleep 75 io_future_t * ftr; 60 76 }; 61 77 … … 103 119 bool pending_preemption; 104 120 105 // Idle lock (kernel semaphore) 106 int idle_fd; 107 108 // Idle waitctx 121 // context for idle sleep 109 122 struct __fd_waitctx idle_wctx; 110 123 … … 168 181 volatile unsigned id; 169 182 }; 170 171 // //TODO adjust cache size to ARCHITECTURE172 // // Structure holding the ready queue173 // struct __ready_queue_t {174 // // Data tracking the actual lanes175 // // On a seperate cacheline from the used struct since176 // // used can change on each push/pop but this data177 // // only changes on shrink/grow178 // struct {179 // // Arary of lanes180 // __intrusive_lane_t * volatile data;181 182 // __cache_id_t * volatile caches;183 184 // // Number of lanes (empty or not)185 // volatile size_t count;186 // } lanes;187 // };188 189 // void ?{}(__ready_queue_t & this);190 // void ^?{}(__ready_queue_t & this);191 183 192 184 // Idle Sleep -
libcfa/src/concurrency/kernel/startup.cfa
r3bc69f2 r22226e4 33 33 // CFA Includes 34 34 #include "kernel/private.hfa" 35 #include "iofwd.hfa" 35 36 #include "startup.hfa" // STARTUP_PRIORITY_XXX 36 37 #include "limits.hfa" … … 97 98 extern void __kernel_alarm_startup(void); 98 99 extern void __kernel_alarm_shutdown(void); 100 extern void __cfa_io_start( processor * ); 101 extern void __cfa_io_stop ( processor * ); 99 102 100 103 //----------------------------------------------------------------------------- … … 111 114 KERNEL_STORAGE(__stack_t, mainThreadCtx); 112 115 KERNEL_STORAGE(__scheduler_RWLock_t, __scheduler_lock); 116 KERNEL_STORAGE(eventfd_t, mainIdleEventFd); 117 KERNEL_STORAGE(io_future_t, mainIdleFuture); 113 118 #if !defined(__CFA_NO_STATISTICS__) 114 119 KERNEL_STORAGE(__stats_t, mainProcStats); … … 224 229 (*mainProcessor){}; 225 230 231 mainProcessor->idle_wctx.rdbuf = &storage_mainIdleEventFd; 232 mainProcessor->idle_wctx.ftr = (io_future_t*)&storage_mainIdleFuture; 233 /* paranoid */ verify( sizeof(storage_mainIdleEventFd) == sizeof(eventfd_t) ); 234 226 235 register_tls( mainProcessor ); 236 __cfa_io_start( mainProcessor ); 227 237 228 238 // Start by initializing the main thread … … 304 314 mainProcessor->local_data = 0p; 305 315 316 __cfa_io_stop( mainProcessor ); 306 317 unregister_tls( mainProcessor ); 307 318 … … 355 366 register_tls( proc ); 356 367 368 __cfa_io_start( proc ); 369 370 // used for idle sleep when io_uring is present 371 io_future_t future; 372 eventfd_t idle_buf; 373 proc->idle_wctx.ftr = &future; 374 proc->idle_wctx.rdbuf = &idle_buf; 375 376 357 377 // SKULLDUGGERY: We want to create a context for the processor coroutine 358 378 // which is needed for the 2-step context switch. However, there is no reason … … 381 401 // Main routine of the core returned, the core is now fully terminated 382 402 __cfadbg_print_safe(runtime_core, "Kernel : core %p main ended (%p)\n", proc, &proc->runner); 403 404 __cfa_io_stop( proc ); 383 405 384 406 #if !defined(__CFA_NO_STATISTICS__) … … 532 554 this.local_data = 0p; 533 555 534 this.idle_fd = eventfd(0, 0);535 if (idle_ fd < 0) {556 idle_wctx.evfd = eventfd(0, 0); 557 if (idle_wctx.evfd < 0) { 536 558 abort("KERNEL ERROR: PROCESSOR EVENTFD - %s\n", strerror(errno)); 537 559 } 538 560 539 this.idle_wctx.fd= 0;561 idle_wctx.sem = 0; 540 562 541 563 // I'm assuming these two are reserved for standard input and output 542 564 // so I'm using them as sentinels with idle_wctx. 543 /* paranoid */ verify( this.idle_fd != 0 );544 /* paranoid */ verify( this.idle_fd != 1 );565 /* paranoid */ verify( idle_wctx.evfd != 0 ); 566 /* paranoid */ verify( idle_wctx.evfd != 1 ); 545 567 546 568 #if !defined(__CFA_NO_STATISTICS__) … … 554 576 // Not a ctor, it just preps the destruction but should not destroy members 555 577 static void deinit(processor & this) { 556 close(this.idle_ fd);578 close(this.idle_wctx.evfd); 557 579 } 558 580
Note: See TracChangeset
for help on using the changeset viewer.