Changeset 34c32f0 for libcfa/src
- Timestamp:
- Feb 1, 2022, 12:06:24 PM (4 years ago)
- Branches:
- ADT, ast-experimental, enum, forall-pointer-decay, master, pthread-emulation, qualifiedEnum, stuck-waitfor-destruct
- Children:
- ab1a9ea
- Parents:
- 3e5db5b4 (diff), 7b2c8c3c (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)links above to see all the changes relative to each parent. - Location:
- libcfa/src/concurrency
- Files:
-
- 2 edited
-
kernel.cfa (modified) (13 diffs)
-
kernel/fwd.hfa (modified) (1 diff)
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/kernel.cfa
r3e5db5b4 r34c32f0 42 42 43 43 #if !defined(__CFA_NO_STATISTICS__) 44 #define __STATS ( ...) __VA_ARGS__44 #define __STATS_DEF( ...) __VA_ARGS__ 45 45 #else 46 #define __STATS ( ...)46 #define __STATS_DEF( ...) 47 47 #endif 48 48 … … 122 122 static thread$ * __next_thread(cluster * this); 123 123 static thread$ * __next_thread_slow(cluster * this); 124 static thread$ * __next_thread_search(cluster * this); 124 125 static inline bool __must_unpark( thread$ * thrd ) __attribute((nonnull(1))); 125 126 static void __run_thread(processor * this, thread$ * dst); … … 187 188 MAIN_LOOP: 188 189 for() { 189 #define OLD_MAIN 1190 #if OLD_MAIN191 190 // Check if there is pending io 192 191 __maybe_io_drain( this ); … … 196 195 197 196 if( !readyThread ) { 198 __ tls_stats()->io.flush.idle++;197 __IO_STATS__(true, io.flush.idle++; ) 199 198 __cfa_io_flush( this, 0 ); 200 199 200 readyThread = __next_thread( this->cltr ); 201 } 202 203 if( !readyThread ) for(5) { 204 __IO_STATS__(true, io.flush.idle++; ) 205 201 206 readyThread = __next_thread_slow( this->cltr ); 207 208 if( readyThread ) break; 209 210 __cfa_io_flush( this, 0 ); 202 211 } 203 212 … … 211 220 212 221 // Confirm the ready-queue is empty 213 readyThread = __next_thread_s low( this->cltr );222 readyThread = __next_thread_search( this->cltr ); 214 223 if( readyThread ) { 215 224 // A thread was found, cancel the halt 216 225 mark_awake(this->cltr->procs, * this); 217 226 218 #if !defined(__CFA_NO_STATISTICS__) 219 __tls_stats()->ready.sleep.cancels++; 220 #endif 227 __STATS__(true, ready.sleep.cancels++; ) 221 228 222 229 // continue the mai loop … … 245 252 246 253 if(this->io.pending && !this->io.dirty) { 247 __ tls_stats()->io.flush.dirty++;254 __IO_STATS__(true, io.flush.dirty++; ) 248 255 __cfa_io_flush( this, 0 ); 249 256 } 250 251 #else252 #warning new kernel loop253 SEARCH: {254 /* paranoid */ verify( ! __preemption_enabled() );255 256 // First, lock the scheduler since we are searching for a thread257 ready_schedule_lock();258 259 // Try to get the next thread260 readyThread = pop_fast( this->cltr );261 if(readyThread) { ready_schedule_unlock(); break SEARCH; }262 263 // If we can't find a thread, might as well flush any outstanding I/O264 if(this->io.pending) { __cfa_io_flush( this, 0 ); }265 266 // Spin a little on I/O, just in case267 for(5) {268 __maybe_io_drain( this );269 readyThread = pop_fast( this->cltr );270 if(readyThread) { ready_schedule_unlock(); break SEARCH; }271 }272 273 // no luck, try stealing a few times274 for(5) {275 if( __maybe_io_drain( this ) ) {276 readyThread = pop_fast( this->cltr );277 } else {278 readyThread = pop_slow( this->cltr );279 }280 if(readyThread) { ready_schedule_unlock(); break SEARCH; }281 }282 283 // still no luck, search for a thread284 readyThread = pop_search( this->cltr );285 if(readyThread) { ready_schedule_unlock(); break SEARCH; }286 287 // Don't block if we are done288 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) {289 ready_schedule_unlock();290 break MAIN_LOOP;291 }292 293 __STATS( __tls_stats()->ready.sleep.halts++; )294 295 // Push self to idle stack296 ready_schedule_unlock();297 if(!mark_idle(this->cltr->procs, * this)) goto SEARCH;298 ready_schedule_lock();299 300 // Confirm the ready-queue is empty301 __maybe_io_drain( this );302 readyThread = pop_search( this->cltr );303 ready_schedule_unlock();304 305 if( readyThread ) {306 // A thread was found, cancel the halt307 mark_awake(this->cltr->procs, * this);308 309 __STATS( __tls_stats()->ready.sleep.cancels++; )310 311 // continue the main loop312 break SEARCH;313 }314 315 __STATS( if(this->print_halts) __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this->unique_id, rdtscl()); )316 __cfadbg_print_safe(runtime_core, "Kernel : core %p waiting on eventfd %d\n", this, this->idle_fd);317 318 {319 eventfd_t val;320 ssize_t ret = read( this->idle_fd, &val, sizeof(val) );321 if(ret < 0) {322 switch((int)errno) {323 case EAGAIN:324 #if EAGAIN != EWOULDBLOCK325 case EWOULDBLOCK:326 #endif327 case EINTR:328 // No need to do anything special here, just assume it's a legitimate wake-up329 break;330 default:331 abort( "KERNEL : internal error, read failure on idle eventfd, error(%d) %s.", (int)errno, strerror( (int)errno ) );332 }333 }334 }335 336 __STATS( if(this->print_halts) __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 1\n", this->unique_id, rdtscl()); )337 338 // We were woken up, remove self from idle339 mark_awake(this->cltr->procs, * this);340 341 // DON'T just proceed, start looking again342 continue MAIN_LOOP;343 }344 345 RUN_THREAD:346 /* paranoid */ verify( ! __preemption_enabled() );347 /* paranoid */ verify( readyThread );348 349 // Reset io dirty bit350 this->io.dirty = false;351 352 // We found a thread run it353 __run_thread(this, readyThread);354 355 // Are we done?356 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP;357 358 if(this->io.pending && !this->io.dirty) {359 __cfa_io_flush( this, 0 );360 }361 362 ready_schedule_lock();363 __maybe_io_drain( this );364 ready_schedule_unlock();365 #endif366 257 } 367 258 … … 474 365 break RUNNING; 475 366 case TICKET_UNBLOCK: 476 #if !defined(__CFA_NO_STATISTICS__) 477 __tls_stats()->ready.threads.threads++; 478 #endif 367 __STATS__(true, ready.threads.threads++; ) 479 368 // This is case 2, the racy case, someone tried to run this thread before it finished blocking 480 369 // In this case, just run it again. … … 491 380 __cfadbg_print_safe(runtime_core, "Kernel : core %p finished running thread %p\n", this, thrd_dst); 492 381 493 #if !defined(__CFA_NO_STATISTICS__) 494 __tls_stats()->ready.threads.threads--; 495 #endif 382 __STATS__(true, ready.threads.threads--; ) 496 383 497 384 /* paranoid */ verify( ! __preemption_enabled() ); … … 504 391 thread$ * thrd_src = kernelTLS().this_thread; 505 392 506 __STATS ( thrd_src->last_proc = kernelTLS().this_processor; )393 __STATS_DEF( thrd_src->last_proc = kernelTLS().this_processor; ) 507 394 508 395 // Run the thread on this processor … … 556 443 // Dereference the thread now because once we push it, there is not guaranteed it's still valid. 557 444 struct cluster * cl = thrd->curr_cluster; 558 __STATS (bool outside = hint == UNPARK_LOCAL && thrd->last_proc && thrd->last_proc != kernelTLS().this_processor; )445 __STATS_DEF(bool outside = hint == UNPARK_LOCAL && thrd->last_proc && thrd->last_proc != kernelTLS().this_processor; ) 559 446 560 447 // push the thread to the cluster ready-queue … … 607 494 608 495 ready_schedule_lock(); 609 thread$ * thrd; 610 for(25) { 611 thrd = pop_slow( this ); 612 if(thrd) goto RET; 613 } 614 thrd = pop_search( this ); 615 616 RET: 496 thread$ * thrd = pop_slow( this ); 497 ready_schedule_unlock(); 498 499 /* paranoid */ verify( ! __preemption_enabled() ); 500 return thrd; 501 } 502 503 // KERNEL ONLY 504 static inline thread$ * __next_thread_search(cluster * this) with( *this ) { 505 /* paranoid */ verify( ! __preemption_enabled() ); 506 507 ready_schedule_lock(); 508 thread$ * thrd = pop_search( this ); 617 509 ready_schedule_unlock(); 618 510 … … 856 748 857 749 static bool mark_idle(__cluster_proc_list & this, processor & proc) { 858 #if !defined(__CFA_NO_STATISTICS__) 859 __tls_stats()->ready.sleep.halts++; 860 #endif 750 __STATS__(true, ready.sleep.halts++; ) 861 751 862 752 proc.idle_wctx.fd = 0; … … 951 841 unsigned tail = *ctx->cq.tail; 952 842 if(head == tail) return false; 953 #if OLD_MAIN 954 ready_schedule_lock(); 955 ret = __cfa_io_drain( proc ); 956 ready_schedule_unlock(); 957 #else 958 ret = __cfa_io_drain( proc ); 959 #endif 843 ready_schedule_lock(); 844 ret = __cfa_io_drain( proc ); 845 ready_schedule_unlock(); 960 846 #endif 961 847 return ret; -
libcfa/src/concurrency/kernel/fwd.hfa
r3e5db5b4 r34c32f0 396 396 if( !(in_kernel) ) enable_interrupts(); \ 397 397 } 398 #if defined(CFA_HAVE_LINUX_IO_URING_H) 399 #define __IO_STATS__(in_kernel, ...) { \ 400 if( !(in_kernel) ) disable_interrupts(); \ 401 with( *__tls_stats() ) { \ 402 __VA_ARGS__ \ 403 } \ 404 if( !(in_kernel) ) enable_interrupts(); \ 405 } 406 #else 407 #define __IO_STATS__(in_kernel, ...) 408 #endif 398 409 #else 399 410 #define __STATS__(in_kernel, ...) 411 #define __IO_STATS__(in_kernel, ...) 400 412 #endif 401 413 }
Note:
See TracChangeset
for help on using the changeset viewer.