Changeset 89eff25
- Timestamp:
- Apr 24, 2021, 7:44:52 PM (4 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- cfff639
- Parents:
- fc59df78
- Location:
- libcfa/src/concurrency
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/invoke.h
rfc59df78 r89eff25 148 148 struct $thread * prev; 149 149 volatile unsigned long long ts; 150 unsigned preferred; 150 151 }; 151 152 … … 199 200 } node; 200 201 202 struct processor * last_proc; 203 201 204 #if defined( __CFA_WITH_VERIFY__ ) 202 205 void * canary; -
libcfa/src/concurrency/kernel.cfa
rfc59df78 r89eff25 34 34 #include "invoke.h" 35 35 36 #if !defined(__CFA_NO_STATISTICS__) 37 #define __STATS( ...) __VA_ARGS__ 38 #else 39 #define __STATS( ...) 40 #endif 36 41 37 42 //----------------------------------------------------------------------------- … … 153 158 preemption_scope scope = { this }; 154 159 155 #if !defined(__CFA_NO_STATISTICS__) 156 unsigned long long last_tally = rdtscl(); 157 #endif 160 __STATS( unsigned long long last_tally = rdtscl(); ) 158 161 159 162 // if we need to run some special setup, now is the time to do it. … … 253 256 __cfa_io_flush( this ); 254 257 } 258 259 // SEARCH: { 260 // /* paranoid */ verify( ! __preemption_enabled() ); 261 // /* paranoid */ verify( kernelTLS().this_proc_id ); 262 263 // // First, lock the scheduler since we are searching for a thread 264 265 // // Try to get the next thread 266 // ready_schedule_lock(); 267 // readyThread = pop_fast( this->cltr ); 268 // ready_schedule_unlock(); 269 // if(readyThread) { break SEARCH; } 270 271 // // If we can't find a thread, might as well flush any outstanding I/O 272 // if(this->io.pending) { __cfa_io_flush( this ); } 273 274 // // Spin a little on I/O, just in case 275 // for(25) { 276 // __maybe_io_drain( this ); 277 // ready_schedule_lock(); 278 // readyThread = pop_fast( this->cltr ); 279 // ready_schedule_unlock(); 280 // if(readyThread) { break SEARCH; } 281 // } 282 283 // // no luck, try stealing a few times 284 // for(25) { 285 // if( __maybe_io_drain( this ) ) { 286 // ready_schedule_lock(); 287 // readyThread = pop_fast( this->cltr ); 288 // } else { 289 // ready_schedule_lock(); 290 // readyThread = pop_slow( this->cltr ); 291 // } 292 // ready_schedule_unlock(); 293 // if(readyThread) { break SEARCH; } 294 // } 295 296 // // still no luck, search for a thread 297 // ready_schedule_lock(); 298 // readyThread = pop_search( this->cltr ); 299 // ready_schedule_unlock(); 300 // if(readyThread) { break SEARCH; } 301 302 // // Don't block if we are done 303 // if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP; 304 305 // __STATS( __tls_stats()->ready.sleep.halts++; ) 306 307 // // Push self to idle stack 308 // mark_idle(this->cltr->procs, * this); 309 310 // // Confirm the ready-queue is empty 311 // __maybe_io_drain( this ); 312 // ready_schedule_lock(); 313 // readyThread = pop_search( this->cltr ); 314 // ready_schedule_unlock(); 315 316 // if( readyThread ) { 317 // // A thread was found, cancel the halt 318 // mark_awake(this->cltr->procs, * this); 319 320 // __STATS( __tls_stats()->ready.sleep.cancels++; ) 321 322 // // continue the main loop 323 // break SEARCH; 324 // } 325 326 // __STATS( if(this->print_halts) __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this->id, rdtscl()); ) 327 // __cfadbg_print_safe(runtime_core, "Kernel : core %p waiting on eventfd %d\n", this, this->idle); 328 329 // // __disable_interrupts_hard(); 330 // eventfd_t val; 331 // eventfd_read( this->idle, &val ); 332 // // __enable_interrupts_hard(); 333 334 // __STATS( if(this->print_halts) __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 1\n", this->id, rdtscl()); ) 335 336 // // We were woken up, remove self from idle 337 // mark_awake(this->cltr->procs, * this); 338 339 // // DON'T just proceed, start looking again 340 // continue MAIN_LOOP; 341 // } 342 343 // RUN_THREAD: 344 // /* paranoid */ verify( kernelTLS().this_proc_id ); 345 // /* paranoid */ verify( ! __preemption_enabled() ); 346 // /* paranoid */ verify( readyThread ); 347 348 // // Reset io dirty bit 349 // this->io.dirty = false; 350 351 // // We found a thread run it 352 // __run_thread(this, readyThread); 353 354 // // Are we done? 355 // if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP; 356 357 // #if !defined(__CFA_NO_STATISTICS__) 358 // unsigned long long curr = rdtscl(); 359 // if(curr > (last_tally + 500000000)) { 360 // __tally_stats(this->cltr->stats, __cfaabi_tls.this_stats); 361 // last_tally = curr; 362 // } 363 // #endif 364 365 // if(this->io.pending && !this->io.dirty) { 366 // __cfa_io_flush( this ); 367 // } 368 369 // // Check if there is pending io 370 // __maybe_io_drain( this ); 255 371 } 256 372 … … 389 505 $thread * thrd_src = kernelTLS().this_thread; 390 506 391 #if !defined(__CFA_NO_STATISTICS__) 392 struct processor * last_proc = kernelTLS().this_processor; 393 #endif 507 __STATS( thrd_src->last_proc = kernelTLS().this_processor; ) 394 508 395 509 // Run the thread on this processor … … 410 524 411 525 #if !defined(__CFA_NO_STATISTICS__) 412 if(last_proc != kernelTLS().this_processor) { 526 /* paranoid */ verify( thrd_src->last_proc != 0p ); 527 if(thrd_src->last_proc != kernelTLS().this_processor) { 413 528 __tls_stats()->ready.threads.migration++; 414 529 } … … 443 558 // Dereference the thread now because once we push it, there is not guaranteed it's still valid. 444 559 struct cluster * cl = thrd->curr_cluster; 560 __STATS(bool outside = thrd->last_proc && thrd->last_proc != kernelTLS().this_processor; ) 445 561 446 562 ready_schedule_lock(); … … 457 573 if( kernelTLS().this_stats ) { 458 574 __tls_stats()->ready.threads.threads++; 575 if(outside) { 576 __tls_stats()->ready.threads.extunpark++; 577 } 459 578 __push_stat( __tls_stats(), __tls_stats()->ready.threads.threads, false, "Processor", kernelTLS().this_processor ); 460 579 } 461 580 else { 462 581 __atomic_fetch_add(&cl->stats->ready.threads.threads, 1, __ATOMIC_RELAXED); 582 __atomic_fetch_add(&cl->stats->ready.threads.extunpark, 1, __ATOMIC_RELAXED); 463 583 __push_stat( cl->stats, cl->stats->ready.threads.threads, true, "Cluster", cl ); 464 584 } -
libcfa/src/concurrency/kernel/startup.cfa
rfc59df78 r89eff25 447 447 link.next = 0p; 448 448 link.prev = 0p; 449 link.preferred = -1u; 450 last_proc = 0p; 449 451 #if defined( __CFA_WITH_VERIFY__ ) 450 452 canary = 0x0D15EA5E0D15EA5Ep; -
libcfa/src/concurrency/thread.cfa
rfc59df78 r89eff25 39 39 link.next = 0p; 40 40 link.prev = 0p; 41 link.preferred = -1u; 42 last_proc = 0p; 41 43 #if defined( __CFA_WITH_VERIFY__ ) 42 44 canary = 0x0D15EA5E0D15EA5Ep;
Note: See TracChangeset
for help on using the changeset viewer.