Changes in / [c86ee4c:b1a2c4a]
- Location:
- libcfa/src/concurrency
- Files:
-
- 6 edited
-
invoke.h (modified) (1 diff)
-
kernel.cfa (modified) (4 diffs)
-
kernel.hfa (modified) (1 diff)
-
kernel/startup.cfa (modified) (1 diff)
-
ready_queue.cfa (modified) (1 diff)
-
thread.cfa (modified) (1 diff)
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/invoke.h
rc86ee4c rb1a2c4a 170 170 bool corctx_flag; 171 171 172 int last_cpu;173 174 172 //SKULLDUGGERY errno is not save in the thread data structure because returnToKernel appears to be the only function to require saving and restoring it 175 173 -
libcfa/src/concurrency/kernel.cfa
rc86ee4c rb1a2c4a 394 394 __builtin_prefetch( thrd_dst->context.SP ); 395 395 396 int curr = __kernel_getcpu();397 if(thrd_dst->last_cpu != curr) {398 int64_t l = thrd_dst->last_cpu;399 int64_t c = curr;400 int64_t v = (l << 32) | c;401 __push_stat( __tls_stats(), v, false, "Processor", this );402 }403 404 thrd_dst->last_cpu = curr;405 406 396 __cfadbg_print_safe(runtime_core, "Kernel : core %p running thread %p (%s)\n", this, thrd_dst, thrd_dst->self_cor.name); 407 397 … … 480 470 #if !defined(__CFA_NO_STATISTICS__) 481 471 __tls_stats()->ready.threads.threads++; 472 __push_stat( __tls_stats(), __tls_stats()->ready.threads.threads, false, "Processor", this ); 482 473 #endif 483 474 // This is case 2, the racy case, someone tried to run this thread before it finished blocking … … 497 488 #if !defined(__CFA_NO_STATISTICS__) 498 489 __tls_stats()->ready.threads.threads--; 490 __push_stat( __tls_stats(), __tls_stats()->ready.threads.threads, false, "Processor", this ); 499 491 #endif 500 492 … … 578 570 __tls_stats()->ready.threads.extunpark++; 579 571 } 572 __push_stat( __tls_stats(), __tls_stats()->ready.threads.threads, false, "Processor", kernelTLS().this_processor ); 580 573 } 581 574 else { 582 575 __atomic_fetch_add(&cl->stats->ready.threads.threads, 1, __ATOMIC_RELAXED); 583 576 __atomic_fetch_add(&cl->stats->ready.threads.extunpark, 1, __ATOMIC_RELAXED); 577 __push_stat( cl->stats, cl->stats->ready.threads.threads, true, "Cluster", cl ); 584 578 } 585 579 #endif -
libcfa/src/concurrency/kernel.hfa
rc86ee4c rb1a2c4a 67 67 unsigned target; 68 68 unsigned last; 69 unsigned cnt;70 69 unsigned long long int cutoff; 71 70 } rdq; -
libcfa/src/concurrency/kernel/startup.cfa
rc86ee4c rb1a2c4a 478 478 state = Start; 479 479 self_cor{ info }; 480 last_cpu = __kernel_getcpu();481 480 curr_cor = &self_cor; 482 481 curr_cluster = mainCluster; -
libcfa/src/concurrency/ready_queue.cfa
rc86ee4c rb1a2c4a 345 345 /* paranoid */ verify(lanes.count < 65536); // The following code assumes max 65536 cores. 346 346 /* paranoid */ verify(map.count < 65536); // The following code assumes max 65536 cores. 347 348 if(0 == (__tls_rand() % 10_000)) { 349 proc->rdq.target = __tls_rand() % lanes.count; 347 uint64_t chaos = __tls_rand(); 348 uint64_t high_chaos = (chaos >> 32); 349 uint64_t mid_chaos = (chaos >> 16) & 0xffff; 350 uint64_t low_chaos = chaos & 0xffff; 351 352 unsigned me = map.self; 353 unsigned cpu_chaos = map.start + (mid_chaos % map.count); 354 bool global = cpu_chaos == me; 355 356 if(global) { 357 proc->rdq.target = high_chaos % lanes.count; 350 358 } else { 351 unsigned cpu_chaos = map.start + (__tls_rand() % map.count); 352 proc->rdq.target = (cpu_chaos * READYQ_SHARD_FACTOR) + (__tls_rand() % READYQ_SHARD_FACTOR); 359 proc->rdq.target = (cpu_chaos * READYQ_SHARD_FACTOR) + (low_chaos % READYQ_SHARD_FACTOR); 353 360 /* paranoid */ verify(proc->rdq.target >= (map.start * READYQ_SHARD_FACTOR)); 354 361 /* paranoid */ verify(proc->rdq.target < ((map.start + map.count) * READYQ_SHARD_FACTOR)); -
libcfa/src/concurrency/thread.cfa
rc86ee4c rb1a2c4a 34 34 preempted = __NO_PREEMPTION; 35 35 corctx_flag = false; 36 last_cpu = __kernel_getcpu();37 36 curr_cor = &self_cor; 38 37 self_mon.owner = &this;
Note:
See TracChangeset
for help on using the changeset viewer.