Changeset 8d66610 for libcfa/src/concurrency/kernel.cfa
- Timestamp:
- May 21, 2021, 4:48:10 PM (5 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- f1bce515
- Parents:
- 5407cdc (diff), 7404cdc (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)links above to see all the changes relative to each parent. - File:
-
- 1 edited
-
libcfa/src/concurrency/kernel.cfa (modified) (16 diffs)
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/kernel.cfa
r5407cdc r8d66610 163 163 #if !defined(__CFA_NO_STATISTICS__) 164 164 if( this->print_halts ) { 165 __cfaabi_bits_print_safe( STDOUT_FILENO, "Processor : %d - %s (%p)\n", this-> id, this->name, (void*)this);165 __cfaabi_bits_print_safe( STDOUT_FILENO, "Processor : %d - %s (%p)\n", this->unique_id, this->name, (void*)this); 166 166 } 167 167 #endif … … 170 170 // Setup preemption data 171 171 preemption_scope scope = { this }; 172 173 __STATS( unsigned long long last_tally = rdtscl(); )174 172 175 173 // if we need to run some special setup, now is the time to do it. … … 184 182 MAIN_LOOP: 185 183 for() { 184 #define OLD_MAIN 1 185 #if OLD_MAIN 186 186 // Check if there is pending io 187 187 __maybe_io_drain( this ); … … 223 223 #if !defined(__CFA_NO_STATISTICS__) 224 224 if(this->print_halts) { 225 __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this-> id, rdtscl());225 __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this->unique_id, rdtscl()); 226 226 } 227 227 #endif … … 236 236 #if !defined(__CFA_NO_STATISTICS__) 237 237 if(this->print_halts) { 238 __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 1\n", this-> id, rdtscl());238 __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 1\n", this->unique_id, rdtscl()); 239 239 } 240 240 #endif … … 258 258 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP; 259 259 260 #if !defined(__CFA_NO_STATISTICS__)261 unsigned long long curr = rdtscl();262 if(curr > (last_tally + 500000000)) {263 __tally_stats(this->cltr->stats, __cfaabi_tls.this_stats);264 last_tally = curr;265 }266 #endif267 268 260 if(this->io.pending && !this->io.dirty) { 269 261 __cfa_io_flush( this ); 270 262 } 271 263 272 // SEARCH: { 273 // /* paranoid */ verify( ! __preemption_enabled() ); 274 // /* paranoid */ verify( kernelTLS().this_proc_id ); 275 276 // // First, lock the scheduler since we are searching for a thread 277 278 // // Try to get the next thread 279 // ready_schedule_lock(); 280 // readyThread = pop_fast( this->cltr ); 281 // ready_schedule_unlock(); 282 // if(readyThread) { break SEARCH; } 283 284 // // If we can't find a thread, might as well flush any outstanding I/O 285 // if(this->io.pending) { __cfa_io_flush( this ); } 286 287 // // Spin a little on I/O, just in case 288 // for(25) { 289 // __maybe_io_drain( this ); 290 // ready_schedule_lock(); 291 // readyThread = pop_fast( this->cltr ); 292 // ready_schedule_unlock(); 293 // if(readyThread) { break SEARCH; } 294 // } 295 296 // // no luck, try stealing a few times 297 // for(25) { 298 // if( __maybe_io_drain( this ) ) { 299 // ready_schedule_lock(); 300 // readyThread = pop_fast( this->cltr ); 301 // } else { 302 // ready_schedule_lock(); 303 // readyThread = pop_slow( this->cltr ); 304 // } 305 // ready_schedule_unlock(); 306 // if(readyThread) { break SEARCH; } 307 // } 308 309 // // still no luck, search for a thread 310 // ready_schedule_lock(); 311 // readyThread = pop_search( this->cltr ); 312 // ready_schedule_unlock(); 313 // if(readyThread) { break SEARCH; } 314 315 // // Don't block if we are done 316 // if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP; 317 318 // __STATS( __tls_stats()->ready.sleep.halts++; ) 319 320 // // Push self to idle stack 321 // mark_idle(this->cltr->procs, * this); 322 323 // // Confirm the ready-queue is empty 324 // __maybe_io_drain( this ); 325 // ready_schedule_lock(); 326 // readyThread = pop_search( this->cltr ); 327 // ready_schedule_unlock(); 328 329 // if( readyThread ) { 330 // // A thread was found, cancel the halt 331 // mark_awake(this->cltr->procs, * this); 332 333 // __STATS( __tls_stats()->ready.sleep.cancels++; ) 334 335 // // continue the main loop 336 // break SEARCH; 337 // } 338 339 // __STATS( if(this->print_halts) __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this->id, rdtscl()); ) 340 // __cfadbg_print_safe(runtime_core, "Kernel : core %p waiting on eventfd %d\n", this, this->idle); 341 342 // // __disable_interrupts_hard(); 343 // eventfd_t val; 344 // eventfd_read( this->idle, &val ); 345 // // __enable_interrupts_hard(); 346 347 // __STATS( if(this->print_halts) __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 1\n", this->id, rdtscl()); ) 348 349 // // We were woken up, remove self from idle 350 // mark_awake(this->cltr->procs, * this); 351 352 // // DON'T just proceed, start looking again 353 // continue MAIN_LOOP; 354 // } 355 356 // RUN_THREAD: 357 // /* paranoid */ verify( kernelTLS().this_proc_id ); 358 // /* paranoid */ verify( ! __preemption_enabled() ); 359 // /* paranoid */ verify( readyThread ); 360 361 // // Reset io dirty bit 362 // this->io.dirty = false; 363 364 // // We found a thread run it 365 // __run_thread(this, readyThread); 366 367 // // Are we done? 368 // if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP; 369 370 // #if !defined(__CFA_NO_STATISTICS__) 371 // unsigned long long curr = rdtscl(); 372 // if(curr > (last_tally + 500000000)) { 373 // __tally_stats(this->cltr->stats, __cfaabi_tls.this_stats); 374 // last_tally = curr; 375 // } 376 // #endif 377 378 // if(this->io.pending && !this->io.dirty) { 379 // __cfa_io_flush( this ); 380 // } 381 382 // // Check if there is pending io 383 // __maybe_io_drain( this ); 264 #else 265 #warning new kernel loop 266 SEARCH: { 267 /* paranoid */ verify( ! __preemption_enabled() ); 268 269 // First, lock the scheduler since we are searching for a thread 270 ready_schedule_lock(); 271 272 // Try to get the next thread 273 readyThread = pop_fast( this->cltr ); 274 if(readyThread) { ready_schedule_unlock(); break SEARCH; } 275 276 // If we can't find a thread, might as well flush any outstanding I/O 277 if(this->io.pending) { __cfa_io_flush( this ); } 278 279 // Spin a little on I/O, just in case 280 for(5) { 281 __maybe_io_drain( this ); 282 readyThread = pop_fast( this->cltr ); 283 if(readyThread) { ready_schedule_unlock(); break SEARCH; } 284 } 285 286 // no luck, try stealing a few times 287 for(5) { 288 if( __maybe_io_drain( this ) ) { 289 readyThread = pop_fast( this->cltr ); 290 } else { 291 readyThread = pop_slow( this->cltr ); 292 } 293 if(readyThread) { ready_schedule_unlock(); break SEARCH; } 294 } 295 296 // still no luck, search for a thread 297 readyThread = pop_search( this->cltr ); 298 if(readyThread) { ready_schedule_unlock(); break SEARCH; } 299 300 // Don't block if we are done 301 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP; 302 303 __STATS( __tls_stats()->ready.sleep.halts++; ) 304 305 // Push self to idle stack 306 ready_schedule_unlock(); 307 mark_idle(this->cltr->procs, * this); 308 ready_schedule_lock(); 309 310 // Confirm the ready-queue is empty 311 __maybe_io_drain( this ); 312 readyThread = pop_search( this->cltr ); 313 ready_schedule_unlock(); 314 315 if( readyThread ) { 316 // A thread was found, cancel the halt 317 mark_awake(this->cltr->procs, * this); 318 319 __STATS( __tls_stats()->ready.sleep.cancels++; ) 320 321 // continue the main loop 322 break SEARCH; 323 } 324 325 __STATS( if(this->print_halts) __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this->unique_id, rdtscl()); ) 326 __cfadbg_print_safe(runtime_core, "Kernel : core %p waiting on eventfd %d\n", this, this->idle); 327 328 // __disable_interrupts_hard(); 329 eventfd_t val; 330 eventfd_read( this->idle, &val ); 331 // __enable_interrupts_hard(); 332 333 __STATS( if(this->print_halts) __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 1\n", this->unique_id, rdtscl()); ) 334 335 // We were woken up, remove self from idle 336 mark_awake(this->cltr->procs, * this); 337 338 // DON'T just proceed, start looking again 339 continue MAIN_LOOP; 340 } 341 342 RUN_THREAD: 343 /* paranoid */ verify( ! __preemption_enabled() ); 344 /* paranoid */ verify( readyThread ); 345 346 // Reset io dirty bit 347 this->io.dirty = false; 348 349 // We found a thread run it 350 __run_thread(this, readyThread); 351 352 // Are we done? 353 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP; 354 355 if(this->io.pending && !this->io.dirty) { 356 __cfa_io_flush( this ); 357 } 358 359 ready_schedule_lock(); 360 __maybe_io_drain( this ); 361 ready_schedule_unlock(); 362 #endif 384 363 } 385 364 … … 390 369 391 370 post( this->terminated ); 392 393 371 394 372 if(this == mainProcessor) { … … 553 531 static void __schedule_thread( $thread * thrd ) { 554 532 /* paranoid */ verify( ! __preemption_enabled() ); 555 /* paranoid */ verify( kernelTLS().this_proc_id );556 533 /* paranoid */ verify( ready_schedule_islocked()); 557 534 /* paranoid */ verify( thrd ); … … 567 544 /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd->canary ); 568 545 569 546 const bool local = thrd->state != Start; 570 547 if (thrd->preempted == __NO_PREEMPTION) thrd->state = Ready; 571 548 … … 575 552 576 553 // push the thread to the cluster ready-queue 577 push( cl, thrd );554 push( cl, thrd, local ); 578 555 579 556 // variable thrd is no longer safe to use … … 611 588 static inline $thread * __next_thread(cluster * this) with( *this ) { 612 589 /* paranoid */ verify( ! __preemption_enabled() ); 613 /* paranoid */ verify( kernelTLS().this_proc_id );614 590 615 591 ready_schedule_lock(); … … 617 593 ready_schedule_unlock(); 618 594 619 /* paranoid */ verify( kernelTLS().this_proc_id );620 595 /* paranoid */ verify( ! __preemption_enabled() ); 621 596 return thrd; … … 625 600 static inline $thread * __next_thread_slow(cluster * this) with( *this ) { 626 601 /* paranoid */ verify( ! __preemption_enabled() ); 627 /* paranoid */ verify( kernelTLS().this_proc_id );628 602 629 603 ready_schedule_lock(); … … 638 612 ready_schedule_unlock(); 639 613 640 /* paranoid */ verify( kernelTLS().this_proc_id );641 614 /* paranoid */ verify( ! __preemption_enabled() ); 642 615 return thrd; … … 895 868 unsigned tail = *ctx->cq.tail; 896 869 if(head == tail) return false; 870 #if OLD_MAIN 897 871 ready_schedule_lock(); 898 872 ret = __cfa_io_drain( proc ); 899 873 ready_schedule_unlock(); 874 #else 875 ret = __cfa_io_drain( proc ); 876 #endif 900 877 #endif 901 878 return ret; … … 926 903 } 927 904 905 static void crawl_list( cluster * cltr, dlist(processor) & list, unsigned count ) { 906 /* paranoid */ verify( cltr->stats ); 907 908 processor * it = &list`first; 909 for(unsigned i = 0; i < count; i++) { 910 /* paranoid */ verifyf( it, "Unexpected null iterator, at index %u of %u\n", i, count); 911 /* paranoid */ verify( it->local_data->this_stats ); 912 __tally_stats( cltr->stats, it->local_data->this_stats ); 913 it = &(*it)`next; 914 } 915 } 916 917 void crawl_cluster_stats( cluster & this ) { 918 // Stop the world, otherwise stats could get really messed-up 919 // this doesn't solve all problems but does solve many 920 // so it's probably good enough 921 uint_fast32_t last_size = ready_mutate_lock(); 922 923 crawl_list(&this, this.procs.actives, this.procs.total - this.procs.idle); 924 crawl_list(&this, this.procs.idles , this.procs.idle ); 925 926 // Unlock the RWlock 927 ready_mutate_unlock( last_size ); 928 } 929 930 928 931 void print_stats_now( cluster & this, int flags ) { 932 crawl_cluster_stats( this ); 929 933 __print_stats( this.stats, this.print_stats, "Cluster", this.name, (void*)&this ); 930 934 }
Note:
See TracChangeset
for help on using the changeset viewer.