Changeset 295dd61 for libcfa/src/concurrency/kernel.cfa
- Timestamp:
- Dec 6, 2021, 5:06:14 PM (2 years ago)
- Branches:
- ADT, ast-experimental, enum, forall-pointer-decay, master, pthread-emulation, qualifiedEnum
- Children:
- 75873cf
- Parents:
- 813dfd86 (diff), a83012bf (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/kernel.cfa
r813dfd86 r295dd61 27 27 extern "C" { 28 28 #include <sys/eventfd.h> 29 #include <sys/uio.h> 29 30 } 30 31 … … 34 35 #include "strstream.hfa" 35 36 #include "device/cpu.hfa" 37 #include "io/types.hfa" 36 38 37 39 //Private includes … … 124 126 static void __wake_one(cluster * cltr); 125 127 128 static void idle_sleep(processor * proc, io_future_t & future, iovec & iov); 126 129 static bool mark_idle (__cluster_proc_list & idles, processor & proc); 127 130 static void mark_awake(__cluster_proc_list & idles, processor & proc); … … 129 132 extern void __cfa_io_start( processor * ); 130 133 extern bool __cfa_io_drain( processor * ); 131 extern void __cfa_io_flush( processor *);134 extern bool __cfa_io_flush( processor *, int min_comp ); 132 135 extern void __cfa_io_stop ( processor * ); 133 136 static inline bool __maybe_io_drain( processor * ); 137 138 #if defined(CFA_WITH_IO_URING_IDLE) 139 extern bool __kernel_read(processor * proc, io_future_t & future, iovec &, int fd); 140 #endif 134 141 135 142 extern void __disable_interrupts_hard(); … … 147 154 /* paranoid */ verify( __preemption_enabled() ); 148 155 } 156 149 157 150 158 //============================================================================================= … … 162 170 verify(this); 163 171 172 io_future_t future; // used for idle sleep when io_uring is present 173 future.self.ptr = 1p; // mark it as already fulfilled so we know if there is a pending request or not 174 eventfd_t idle_val; 175 iovec idle_iovec = { &idle_val, sizeof(idle_val) }; 176 164 177 __cfa_io_start( this ); 165 178 … … 195 208 196 209 if( !readyThread ) { 197 ready_schedule_lock(); 198 __cfa_io_flush( this ); 199 ready_schedule_unlock(); 210 __cfa_io_flush( this, 0 ); 200 211 201 212 readyThread = __next_thread_slow( this->cltr ); … … 228 239 } 229 240 230 #if !defined(__CFA_NO_STATISTICS__) 231 if(this->print_halts) { 232 __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this->unique_id, rdtscl()); 241 idle_sleep( this, future, idle_iovec ); 242 243 // We were woken up, remove self from idle 244 mark_awake(this->cltr->procs, * this); 245 246 // DON'T just proceed, start looking again 247 continue MAIN_LOOP; 248 } 249 250 /* paranoid */ verify( readyThread ); 251 252 // Reset io dirty bit 253 this->io.dirty = false; 254 255 // We found a thread run it 256 __run_thread(this, readyThread); 257 258 // Are we done? 259 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP; 260 261 if(this->io.pending && !this->io.dirty) { 262 __cfa_io_flush( this, 0 ); 263 } 264 265 #else 266 #warning new kernel loop 267 SEARCH: { 268 /* paranoid */ verify( ! __preemption_enabled() ); 269 270 // First, lock the scheduler since we are searching for a thread 271 ready_schedule_lock(); 272 273 // Try to get the next thread 274 readyThread = pop_fast( this->cltr ); 275 if(readyThread) { ready_schedule_unlock(); break SEARCH; } 276 277 // If we can't find a thread, might as well flush any outstanding I/O 278 if(this->io.pending) { __cfa_io_flush( this, 0 ); } 279 280 // Spin a little on I/O, just in case 281 for(5) { 282 __maybe_io_drain( this ); 283 readyThread = pop_fast( this->cltr ); 284 if(readyThread) { ready_schedule_unlock(); break SEARCH; } 285 } 286 287 // no luck, try stealing a few times 288 for(5) { 289 if( __maybe_io_drain( this ) ) { 290 readyThread = pop_fast( this->cltr ); 291 } else { 292 readyThread = pop_slow( this->cltr ); 233 293 } 234 #endif 235 236 __cfadbg_print_safe(runtime_core, "Kernel : core %p waiting on eventfd %d\n", this, this->idle); 294 if(readyThread) { ready_schedule_unlock(); break SEARCH; } 295 } 296 297 // still no luck, search for a thread 298 readyThread = pop_search( this->cltr ); 299 if(readyThread) { ready_schedule_unlock(); break SEARCH; } 300 301 // Don't block if we are done 302 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) { 303 ready_schedule_unlock(); 304 break MAIN_LOOP; 305 } 306 307 __STATS( __tls_stats()->ready.sleep.halts++; ) 308 309 // Push self to idle stack 310 ready_schedule_unlock(); 311 if(!mark_idle(this->cltr->procs, * this)) goto SEARCH; 312 ready_schedule_lock(); 313 314 // Confirm the ready-queue is empty 315 __maybe_io_drain( this ); 316 readyThread = pop_search( this->cltr ); 317 ready_schedule_unlock(); 318 319 if( readyThread ) { 320 // A thread was found, cancel the halt 321 mark_awake(this->cltr->procs, * this); 322 323 __STATS( __tls_stats()->ready.sleep.cancels++; ) 324 325 // continue the main loop 326 break SEARCH; 327 } 328 329 __STATS( if(this->print_halts) __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this->unique_id, rdtscl()); ) 330 __cfadbg_print_safe(runtime_core, "Kernel : core %p waiting on eventfd %d\n", this, this->idle_fd); 237 331 238 332 { 239 333 eventfd_t val; 240 ssize_t ret = read( this->idle , &val, sizeof(val) );334 ssize_t ret = read( this->idle_fd, &val, sizeof(val) ); 241 335 if(ret < 0) { 242 336 switch((int)errno) { … … 254 348 } 255 349 256 #if !defined(__CFA_NO_STATISTICS__) 257 if(this->print_halts) { 258 __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 1\n", this->unique_id, rdtscl()); 259 } 260 #endif 350 __STATS( if(this->print_halts) __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 1\n", this->unique_id, rdtscl()); ) 261 351 262 352 // We were woken up, remove self from idle … … 267 357 } 268 358 269 /* paranoid */ verify( readyThread );270 271 // Reset io dirty bit272 this->io.dirty = false;273 274 // We found a thread run it275 __run_thread(this, readyThread);276 277 // Are we done?278 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP;279 280 if(this->io.pending && !this->io.dirty) {281 ready_schedule_lock();282 __cfa_io_flush( this );283 ready_schedule_unlock();284 }285 286 #else287 #warning new kernel loop288 SEARCH: {289 /* paranoid */ verify( ! __preemption_enabled() );290 291 // First, lock the scheduler since we are searching for a thread292 ready_schedule_lock();293 294 // Try to get the next thread295 readyThread = pop_fast( this->cltr );296 if(readyThread) { ready_schedule_unlock(); break SEARCH; }297 298 // If we can't find a thread, might as well flush any outstanding I/O299 if(this->io.pending) { __cfa_io_flush( this ); }300 301 // Spin a little on I/O, just in case302 for(5) {303 __maybe_io_drain( this );304 readyThread = pop_fast( this->cltr );305 if(readyThread) { ready_schedule_unlock(); break SEARCH; }306 }307 308 // no luck, try stealing a few times309 for(5) {310 if( __maybe_io_drain( this ) ) {311 readyThread = pop_fast( this->cltr );312 } else {313 readyThread = pop_slow( this->cltr );314 }315 if(readyThread) { ready_schedule_unlock(); break SEARCH; }316 }317 318 // still no luck, search for a thread319 readyThread = pop_search( this->cltr );320 if(readyThread) { ready_schedule_unlock(); break SEARCH; }321 322 // Don't block if we are done323 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) {324 ready_schedule_unlock();325 break MAIN_LOOP;326 }327 328 __STATS( __tls_stats()->ready.sleep.halts++; )329 330 // Push self to idle stack331 ready_schedule_unlock();332 if(!mark_idle(this->cltr->procs, * this)) goto SEARCH;333 ready_schedule_lock();334 335 // Confirm the ready-queue is empty336 __maybe_io_drain( this );337 readyThread = pop_search( this->cltr );338 ready_schedule_unlock();339 340 if( readyThread ) {341 // A thread was found, cancel the halt342 mark_awake(this->cltr->procs, * this);343 344 __STATS( __tls_stats()->ready.sleep.cancels++; )345 346 // continue the main loop347 break SEARCH;348 }349 350 __STATS( if(this->print_halts) __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this->unique_id, rdtscl()); )351 __cfadbg_print_safe(runtime_core, "Kernel : core %p waiting on eventfd %d\n", this, this->idle);352 353 {354 eventfd_t val;355 ssize_t ret = read( this->idle, &val, sizeof(val) );356 if(ret < 0) {357 switch((int)errno) {358 case EAGAIN:359 #if EAGAIN != EWOULDBLOCK360 case EWOULDBLOCK:361 #endif362 case EINTR:363 // No need to do anything special here, just assume it's a legitimate wake-up364 break;365 default:366 abort( "KERNEL : internal error, read failure on idle eventfd, error(%d) %s.", (int)errno, strerror( (int)errno ) );367 }368 }369 }370 371 __STATS( if(this->print_halts) __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 1\n", this->unique_id, rdtscl()); )372 373 // We were woken up, remove self from idle374 mark_awake(this->cltr->procs, * this);375 376 // DON'T just proceed, start looking again377 continue MAIN_LOOP;378 }379 380 359 RUN_THREAD: 381 360 /* paranoid */ verify( ! __preemption_enabled() ); … … 392 371 393 372 if(this->io.pending && !this->io.dirty) { 394 __cfa_io_flush( this );373 __cfa_io_flush( this, 0 ); 395 374 } 396 375 … … 402 381 403 382 __cfadbg_print_safe(runtime_core, "Kernel : core %p stopping\n", this); 383 } 384 385 for(int i = 0; !available(future); i++) { 386 if(i > 1000) __cfaabi_dbg_write( "ERROR: kernel has bin spinning on a flush after exit loop.\n", 60); 387 __cfa_io_flush( this, 1 ); 404 388 } 405 389 … … 798 782 eventfd_t val; 799 783 val = 1; 800 eventfd_write( this->idle , val );784 eventfd_write( this->idle_fd, val ); 801 785 __enable_interrupts_checked(); 786 } 787 788 static void idle_sleep(processor * this, io_future_t & future, iovec & iov) { 789 #if !defined(CFA_WITH_IO_URING_IDLE) 790 #if !defined(__CFA_NO_STATISTICS__) 791 if(this->print_halts) { 792 __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this->unique_id, rdtscl()); 793 } 794 #endif 795 796 __cfadbg_print_safe(runtime_core, "Kernel : core %p waiting on eventfd %d\n", this, this->idle_fd); 797 798 { 799 eventfd_t val; 800 ssize_t ret = read( this->idle_fd, &val, sizeof(val) ); 801 if(ret < 0) { 802 switch((int)errno) { 803 case EAGAIN: 804 #if EAGAIN != EWOULDBLOCK 805 case EWOULDBLOCK: 806 #endif 807 case EINTR: 808 // No need to do anything special here, just assume it's a legitimate wake-up 809 break; 810 default: 811 abort( "KERNEL : internal error, read failure on idle eventfd, error(%d) %s.", (int)errno, strerror( (int)errno ) ); 812 } 813 } 814 } 815 816 #if !defined(__CFA_NO_STATISTICS__) 817 if(this->print_halts) { 818 __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 1\n", this->unique_id, rdtscl()); 819 } 820 #endif 821 #else 822 // Do we already have a pending read 823 if(available(future)) { 824 // There is no pending read, we need to add one 825 reset(future); 826 827 __kernel_read(this, future, iov, this->idle_fd ); 828 } 829 830 __cfa_io_flush( this, 1 ); 831 #endif 802 832 } 803 833 … … 810 840 insert_first(this.idles, proc); 811 841 812 __atomic_store_n(&this.fd, proc.idle , __ATOMIC_SEQ_CST);842 __atomic_store_n(&this.fd, proc.idle_fd, __ATOMIC_SEQ_CST); 813 843 unlock( this ); 814 844 /* paranoid */ verify( ! __preemption_enabled() ); … … 827 857 { 828 858 int fd = 0; 829 if(!this.idles`isEmpty) fd = this.idles`first.idle ;859 if(!this.idles`isEmpty) fd = this.idles`first.idle_fd; 830 860 __atomic_store_n(&this.fd, fd, __ATOMIC_SEQ_CST); 831 861 }
Note: See TracChangeset
for help on using the changeset viewer.