- File:
-
- 1 edited
-
libcfa/src/concurrency/kernel.cfa (modified) (15 diffs)
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/kernel.cfa
rd3605f8 ra633f6f 27 27 extern "C" { 28 28 #include <sys/eventfd.h> 29 #include <sys/uio.h>30 29 } 31 30 … … 35 34 #include "strstream.hfa" 36 35 #include "device/cpu.hfa" 37 #include "io/types.hfa"38 36 39 37 //Private includes … … 126 124 static void __wake_one(cluster * cltr); 127 125 128 static void idle_sleep(processor * proc, io_future_t & future, iovec & iov);129 126 static bool mark_idle (__cluster_proc_list & idles, processor & proc); 130 127 static void mark_awake(__cluster_proc_list & idles, processor & proc); … … 132 129 extern void __cfa_io_start( processor * ); 133 130 extern bool __cfa_io_drain( processor * ); 134 extern bool __cfa_io_flush( processor *, int min_comp);131 extern void __cfa_io_flush( processor * ); 135 132 extern void __cfa_io_stop ( processor * ); 136 133 static inline bool __maybe_io_drain( processor * ); 137 138 #if defined(CFA_WITH_IO_URING_IDLE)139 extern bool __kernel_read(processor * proc, io_future_t & future, iovec &, int fd);140 #endif141 134 142 135 extern void __disable_interrupts_hard(); … … 154 147 /* paranoid */ verify( __preemption_enabled() ); 155 148 } 156 157 149 158 150 //============================================================================================= … … 170 162 verify(this); 171 163 172 io_future_t future; // used for idle sleep when io_uring is present173 future.self.ptr = 1p; // mark it as already fulfilled so we know if there is a pending request or not174 eventfd_t idle_val;175 iovec idle_iovec = { &idle_val, sizeof(idle_val) };176 177 164 __cfa_io_start( this ); 178 165 … … 208 195 209 196 if( !readyThread ) { 210 __cfa_io_flush( this, 0 ); 197 ready_schedule_lock(); 198 __cfa_io_flush( this ); 199 ready_schedule_unlock(); 211 200 212 201 readyThread = __next_thread_slow( this->cltr ); … … 239 228 } 240 229 241 idle_sleep( this, future, idle_iovec ); 242 243 // We were woken up, remove self from idle 244 mark_awake(this->cltr->procs, * this); 245 246 // DON'T just proceed, start looking again 247 continue MAIN_LOOP; 248 } 249 250 /* paranoid */ verify( readyThread ); 251 252 // Reset io dirty bit 253 this->io.dirty = false; 254 255 // We found a thread run it 256 __run_thread(this, readyThread); 257 258 // Are we done? 259 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP; 260 261 if(this->io.pending && !this->io.dirty) { 262 __cfa_io_flush( this, 0 ); 263 } 264 265 #else 266 #warning new kernel loop 267 SEARCH: { 268 /* paranoid */ verify( ! __preemption_enabled() ); 269 270 // First, lock the scheduler since we are searching for a thread 271 ready_schedule_lock(); 272 273 // Try to get the next thread 274 readyThread = pop_fast( this->cltr ); 275 if(readyThread) { ready_schedule_unlock(); break SEARCH; } 276 277 // If we can't find a thread, might as well flush any outstanding I/O 278 if(this->io.pending) { __cfa_io_flush( this, 0 ); } 279 280 // Spin a little on I/O, just in case 281 for(5) { 282 __maybe_io_drain( this ); 283 readyThread = pop_fast( this->cltr ); 284 if(readyThread) { ready_schedule_unlock(); break SEARCH; } 285 } 286 287 // no luck, try stealing a few times 288 for(5) { 289 if( __maybe_io_drain( this ) ) { 290 readyThread = pop_fast( this->cltr ); 291 } else { 292 readyThread = pop_slow( this->cltr ); 230 #if !defined(__CFA_NO_STATISTICS__) 231 if(this->print_halts) { 232 __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this->unique_id, rdtscl()); 293 233 } 294 if(readyThread) { ready_schedule_unlock(); break SEARCH; } 295 } 296 297 // still no luck, search for a thread 298 readyThread = pop_search( this->cltr ); 299 if(readyThread) { ready_schedule_unlock(); break SEARCH; } 300 301 // Don't block if we are done 302 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) { 303 ready_schedule_unlock(); 304 break MAIN_LOOP; 305 } 306 307 __STATS( __tls_stats()->ready.sleep.halts++; ) 308 309 // Push self to idle stack 310 ready_schedule_unlock(); 311 if(!mark_idle(this->cltr->procs, * this)) goto SEARCH; 312 ready_schedule_lock(); 313 314 // Confirm the ready-queue is empty 315 __maybe_io_drain( this ); 316 readyThread = pop_search( this->cltr ); 317 ready_schedule_unlock(); 318 319 if( readyThread ) { 320 // A thread was found, cancel the halt 321 mark_awake(this->cltr->procs, * this); 322 323 __STATS( __tls_stats()->ready.sleep.cancels++; ) 324 325 // continue the main loop 326 break SEARCH; 327 } 328 329 __STATS( if(this->print_halts) __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this->unique_id, rdtscl()); ) 330 __cfadbg_print_safe(runtime_core, "Kernel : core %p waiting on eventfd %d\n", this, this->idle_fd); 234 #endif 235 236 __cfadbg_print_safe(runtime_core, "Kernel : core %p waiting on eventfd %d\n", this, this->idle); 331 237 332 238 { 333 239 eventfd_t val; 334 ssize_t ret = read( this->idle _fd, &val, sizeof(val) );240 ssize_t ret = read( this->idle, &val, sizeof(val) ); 335 241 if(ret < 0) { 336 242 switch((int)errno) { … … 348 254 } 349 255 350 __STATS( if(this->print_halts) __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 1\n", this->unique_id, rdtscl()); ) 256 #if !defined(__CFA_NO_STATISTICS__) 257 if(this->print_halts) { 258 __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 1\n", this->unique_id, rdtscl()); 259 } 260 #endif 351 261 352 262 // We were woken up, remove self from idle … … 357 267 } 358 268 269 /* paranoid */ verify( readyThread ); 270 271 // Reset io dirty bit 272 this->io.dirty = false; 273 274 // We found a thread run it 275 __run_thread(this, readyThread); 276 277 // Are we done? 278 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP; 279 280 if(this->io.pending && !this->io.dirty) { 281 ready_schedule_lock(); 282 __cfa_io_flush( this ); 283 ready_schedule_unlock(); 284 } 285 286 #else 287 #warning new kernel loop 288 SEARCH: { 289 /* paranoid */ verify( ! __preemption_enabled() ); 290 291 // First, lock the scheduler since we are searching for a thread 292 ready_schedule_lock(); 293 294 // Try to get the next thread 295 readyThread = pop_fast( this->cltr ); 296 if(readyThread) { ready_schedule_unlock(); break SEARCH; } 297 298 // If we can't find a thread, might as well flush any outstanding I/O 299 if(this->io.pending) { __cfa_io_flush( this ); } 300 301 // Spin a little on I/O, just in case 302 for(5) { 303 __maybe_io_drain( this ); 304 readyThread = pop_fast( this->cltr ); 305 if(readyThread) { ready_schedule_unlock(); break SEARCH; } 306 } 307 308 // no luck, try stealing a few times 309 for(5) { 310 if( __maybe_io_drain( this ) ) { 311 readyThread = pop_fast( this->cltr ); 312 } else { 313 readyThread = pop_slow( this->cltr ); 314 } 315 if(readyThread) { ready_schedule_unlock(); break SEARCH; } 316 } 317 318 // still no luck, search for a thread 319 readyThread = pop_search( this->cltr ); 320 if(readyThread) { ready_schedule_unlock(); break SEARCH; } 321 322 // Don't block if we are done 323 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) { 324 ready_schedule_unlock(); 325 break MAIN_LOOP; 326 } 327 328 __STATS( __tls_stats()->ready.sleep.halts++; ) 329 330 // Push self to idle stack 331 ready_schedule_unlock(); 332 if(!mark_idle(this->cltr->procs, * this)) goto SEARCH; 333 ready_schedule_lock(); 334 335 // Confirm the ready-queue is empty 336 __maybe_io_drain( this ); 337 readyThread = pop_search( this->cltr ); 338 ready_schedule_unlock(); 339 340 if( readyThread ) { 341 // A thread was found, cancel the halt 342 mark_awake(this->cltr->procs, * this); 343 344 __STATS( __tls_stats()->ready.sleep.cancels++; ) 345 346 // continue the main loop 347 break SEARCH; 348 } 349 350 __STATS( if(this->print_halts) __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this->unique_id, rdtscl()); ) 351 __cfadbg_print_safe(runtime_core, "Kernel : core %p waiting on eventfd %d\n", this, this->idle); 352 353 { 354 eventfd_t val; 355 ssize_t ret = read( this->idle, &val, sizeof(val) ); 356 if(ret < 0) { 357 switch((int)errno) { 358 case EAGAIN: 359 #if EAGAIN != EWOULDBLOCK 360 case EWOULDBLOCK: 361 #endif 362 case EINTR: 363 // No need to do anything special here, just assume it's a legitimate wake-up 364 break; 365 default: 366 abort( "KERNEL : internal error, read failure on idle eventfd, error(%d) %s.", (int)errno, strerror( (int)errno ) ); 367 } 368 } 369 } 370 371 __STATS( if(this->print_halts) __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 1\n", this->unique_id, rdtscl()); ) 372 373 // We were woken up, remove self from idle 374 mark_awake(this->cltr->procs, * this); 375 376 // DON'T just proceed, start looking again 377 continue MAIN_LOOP; 378 } 379 359 380 RUN_THREAD: 360 381 /* paranoid */ verify( ! __preemption_enabled() ); … … 371 392 372 393 if(this->io.pending && !this->io.dirty) { 373 __cfa_io_flush( this , 0);394 __cfa_io_flush( this ); 374 395 } 375 396 … … 381 402 382 403 __cfadbg_print_safe(runtime_core, "Kernel : core %p stopping\n", this); 383 }384 385 for(int i = 0; !available(future); i++) {386 if(i > 1000) __cfaabi_dbg_write( "ERROR: kernel has bin spinning on a flush after exit loop.\n", 60);387 __cfa_io_flush( this, 1 );388 404 } 389 405 … … 782 798 eventfd_t val; 783 799 val = 1; 784 eventfd_write( this->idle _fd, val );800 eventfd_write( this->idle, val ); 785 801 __enable_interrupts_checked(); 786 }787 788 static void idle_sleep(processor * this, io_future_t & future, iovec & iov) {789 #if !defined(CFA_WITH_IO_URING_IDLE)790 #if !defined(__CFA_NO_STATISTICS__)791 if(this->print_halts) {792 __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this->unique_id, rdtscl());793 }794 #endif795 796 __cfadbg_print_safe(runtime_core, "Kernel : core %p waiting on eventfd %d\n", this, this->idle_fd);797 798 {799 eventfd_t val;800 ssize_t ret = read( this->idle_fd, &val, sizeof(val) );801 if(ret < 0) {802 switch((int)errno) {803 case EAGAIN:804 #if EAGAIN != EWOULDBLOCK805 case EWOULDBLOCK:806 #endif807 case EINTR:808 // No need to do anything special here, just assume it's a legitimate wake-up809 break;810 default:811 abort( "KERNEL : internal error, read failure on idle eventfd, error(%d) %s.", (int)errno, strerror( (int)errno ) );812 }813 }814 }815 816 #if !defined(__CFA_NO_STATISTICS__)817 if(this->print_halts) {818 __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 1\n", this->unique_id, rdtscl());819 }820 #endif821 #else822 // Do we already have a pending read823 if(available(future)) {824 // There is no pending read, we need to add one825 reset(future);826 827 __kernel_read(this, future, iov, this->idle_fd );828 }829 830 __cfa_io_flush( this, 1 );831 #endif832 802 } 833 803 … … 840 810 insert_first(this.idles, proc); 841 811 842 __atomic_store_n(&this.fd, proc.idle _fd, __ATOMIC_SEQ_CST);812 __atomic_store_n(&this.fd, proc.idle, __ATOMIC_SEQ_CST); 843 813 unlock( this ); 844 814 /* paranoid */ verify( ! __preemption_enabled() ); … … 857 827 { 858 828 int fd = 0; 859 if(!this.idles`isEmpty) fd = this.idles`first.idle _fd;829 if(!this.idles`isEmpty) fd = this.idles`first.idle; 860 830 __atomic_store_n(&this.fd, fd, __ATOMIC_SEQ_CST); 861 831 }
Note:
See TracChangeset
for help on using the changeset viewer.