// // Cforall Version 1.0.0 Copyright (C) 2019 University of Waterloo // // The contents of this file are covered under the licence agreement in the // file "LICENCE" distributed with Cforall. // // ready_queue.cfa -- // // Author : Thierry Delisle // Created On : Mon Nov dd 16:29:18 2019 // Last Modified By : // Last Modified On : // Update Count : // #define __cforall_thread__ #define _GNU_SOURCE // #define __CFA_DEBUG_PRINT_READY_QUEUE__ #define USE_AWARE_STEALING #include "bits/defs.hfa" #include "device/cpu.hfa" #include "kernel/cluster.hfa" #include "kernel/private.hfa" // #include // #include #include "ready_subqueue.hfa" static const size_t cache_line_size = 64; #if !defined(__CFA_NO_STATISTICS__) #define __STATS(...) __VA_ARGS__ #else #define __STATS(...) #endif static inline struct thread$ * try_pop(struct cluster * cltr, unsigned w __STATS(, __stats_readyQ_pop_t & stats)); static inline struct thread$ * try_pop(struct cluster * cltr, unsigned i, unsigned j __STATS(, __stats_readyQ_pop_t & stats)); static inline struct thread$ * search(struct cluster * cltr); //======================================================================= // Cforall Ready Queue used for scheduling //======================================================================= // void ?{}(__ready_queue_t & this) with (this) { // lanes.data = 0p; // lanes.tscs = 0p; // lanes.caches = 0p; // lanes.count = 0; // } // void ^?{}(__ready_queue_t & this) with (this) { // free(lanes.data); // free(lanes.tscs); // free(lanes.caches); // } //----------------------------------------------------------------------- __attribute__((hot)) void push(struct cluster * cltr, struct thread$ * thrd, unpark_hint hint) with (cltr->sched) { struct processor * const proc = kernelTLS().this_processor; const bool external = (!proc) || (cltr != proc->cltr); const bool remote = hint == UNPARK_REMOTE; const size_t lanes_count = readyQ.count; /* paranoid */ verify( __shard_factor.readyq > 0 ); /* paranoid */ verify( lanes_count > 0 ); unsigned i; if( external || remote ) { // Figure out where thread was last time and make sure it's valid /* paranoid */ verify(thrd->preferred >= 0); unsigned start = thrd->preferred * __shard_factor.readyq; if(start < lanes_count) { do { unsigned r = __tls_rand(); i = start + (r % __shard_factor.readyq); /* paranoid */ verify( i < lanes_count ); // If we can't lock it retry } while( !__atomic_try_acquire( &readyQ.data[i].l.lock ) ); } else { do { i = __tls_rand() % lanes_count; } while( !__atomic_try_acquire( &readyQ.data[i].l.lock ) ); } } else { do { unsigned r = proc->rdq.its++; i = proc->rdq.id + (r % __shard_factor.readyq); /* paranoid */ verify( i < lanes_count ); // If we can't lock it retry } while( !__atomic_try_acquire( &readyQ.data[i].l.lock ) ); } // Actually push it push(readyQ.data[i], thrd); // Unlock and return __atomic_unlock( &readyQ.data[i].l.lock ); #if !defined(__CFA_NO_STATISTICS__) if(unlikely(external || remote)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.success, 1, __ATOMIC_RELAXED); else __tls_stats()->ready.push.local.success++; #endif } __attribute__((hot)) struct thread$ * pop_fast(struct cluster * cltr) with (cltr->sched) { const size_t lanes_count = readyQ.count; /* paranoid */ verify( __shard_factor.readyq > 0 ); /* paranoid */ verify( lanes_count > 0 ); /* paranoid */ verify( kernelTLS().this_processor ); /* paranoid */ verify( kernelTLS().this_processor->rdq.id < lanes_count ); struct processor * const proc = kernelTLS().this_processor; unsigned this = proc->rdq.id; /* paranoid */ verify( this < lanes_count ); __cfadbg_print_safe(ready_queue, "Kernel : pop from %u\n", this); // Figure out the current cache is const unsigned this_cache = cache_id(cltr, this / __shard_factor.readyq); const unsigned long long ctsc = rdtscl(); if(proc->rdq.target == UINT_MAX) { uint64_t chaos = __tls_rand(); unsigned ext = chaos & 0xff; unsigned other = (chaos >> 8) % (lanes_count); if(ext < 3 || __atomic_load_n(&caches[other / __shard_factor.readyq].id, __ATOMIC_RELAXED) == this_cache) { proc->rdq.target = other; } } else { const unsigned target = proc->rdq.target; __cfadbg_print_safe(ready_queue, "Kernel : %u considering helping %u, tcsc %llu\n", this, target, readyQ.tscs[target].t.tv); /* paranoid */ verify( readyQ.tscs[target].t.tv != ULLONG_MAX ); if(target < lanes_count) { const __readyQ_avg_t cutoff = calc_cutoff(ctsc, proc->rdq.id, lanes_count, cltr->sched.readyQ.data, cltr->sched.readyQ.tscs, __shard_factor.readyq, true); const __readyQ_avg_t age = moving_average(ctsc, readyQ.tscs[target].t.tv, readyQ.tscs[target].t.ma, false); __cfadbg_print_safe(ready_queue, "Kernel : Help attempt on %u from %u, age %'llu vs cutoff %'llu, %s\n", target, this, age, cutoff, age > cutoff ? "yes" : "no"); if(age > cutoff) { thread$ * t = try_pop(cltr, target __STATS(, __tls_stats()->ready.pop.help)); if(t) return t; } } proc->rdq.target = UINT_MAX; } for(__shard_factor.readyq) { unsigned i = this + (proc->rdq.itr++ % __shard_factor.readyq); if(thread$ * t = try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.local))) return t; } // All lanes where empty return 0p return 0p; } __attribute__((hot)) struct thread$ * pop_slow(struct cluster * cltr) { unsigned i = __tls_rand() % (cltr->sched.readyQ.count); return try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.steal)); } __attribute__((hot)) struct thread$ * pop_search(struct cluster * cltr) { return search(cltr); } //======================================================================= // Various Ready Queue utilities //======================================================================= // these function work the same or almost the same // whether they are using work-stealing or relaxed fifo scheduling //----------------------------------------------------------------------- // try to pop from a lane given by index w static inline struct thread$ * try_pop(struct cluster * cltr, unsigned w __STATS(, __stats_readyQ_pop_t & stats)) with (cltr->sched) { /* paranoid */ verify( w < readyQ.count ); __STATS( stats.attempt++; ) // Get relevant elements locally __intrusive_lane_t & lane = readyQ.data[w]; // If list looks empty retry if( is_empty(lane) ) { return 0p; } // If we can't get the lock retry if( !__atomic_try_acquire(&lane.l.lock) ) { return 0p; } // If list is empty, unlock and retry if( is_empty(lane) ) { __atomic_unlock(&lane.l.lock); return 0p; } // Actually pop the list struct thread$ * thrd; unsigned long long ts_prev = ts(lane); unsigned long long ts_next; [thrd, ts_next] = pop(lane); /* paranoid */ verify(thrd); /* paranoid */ verify(ts_next); /* paranoid */ verify(lane.l.lock); // Unlock and return __atomic_unlock(&lane.l.lock); // Update statistics __STATS( stats.success++; ) touch_tsc(readyQ.tscs, w, ts_prev, ts_next, true); thrd->preferred = w / __shard_factor.readyq; // return the popped thread return thrd; } //----------------------------------------------------------------------- // try to pop from any lanes making sure you don't miss any threads push // before the start of the function static inline struct thread$ * search(struct cluster * cltr) { const size_t lanes_count = cltr->sched.readyQ.count; /* paranoid */ verify( lanes_count > 0 ); unsigned count = __atomic_load_n( &lanes_count, __ATOMIC_RELAXED ); unsigned offset = __tls_rand(); for(i; count) { unsigned idx = (offset + i) % count; struct thread$ * thrd = try_pop(cltr, idx __STATS(, __tls_stats()->ready.pop.search)); if(thrd) { return thrd; } } // All lanes where empty return 0p return 0p; } //----------------------------------------------------------------------- // get preferred ready for new thread unsigned ready_queue_new_preferred() { unsigned pref = UINT_MAX; if(struct thread$ * thrd = publicTLS_get( this_thread )) { pref = thrd->preferred; } return pref; } //----------------------------------------------------------------------- // Given 2 indexes, pick the list with the oldest push an try to pop from it static inline struct thread$ * try_pop(struct cluster * cltr, unsigned i, unsigned j __STATS(, __stats_readyQ_pop_t & stats)) with (cltr->sched) { // Pick the bet list int w = i; if( __builtin_expect(!is_empty(readyQ.data[j]), true) ) { w = (ts(readyQ.data[i]) < ts(readyQ.data[j])) ? i : j; } return try_pop(cltr, w __STATS(, stats)); }