| 1 | // | 
|---|
| 2 | // Cforall Version 1.0.0 Copyright (C) 2019 University of Waterloo | 
|---|
| 3 | // | 
|---|
| 4 | // The contents of this file are covered under the licence agreement in the | 
|---|
| 5 | // file "LICENCE" distributed with Cforall. | 
|---|
| 6 | // | 
|---|
| 7 | // ready_queue.cfa -- | 
|---|
| 8 | // | 
|---|
| 9 | // Author           : Thierry Delisle | 
|---|
| 10 | // Created On       : Mon Nov dd 16:29:18 2019 | 
|---|
| 11 | // Last Modified By : | 
|---|
| 12 | // Last Modified On : | 
|---|
| 13 | // Update Count     : | 
|---|
| 14 | // | 
|---|
| 15 |  | 
|---|
| 16 | #define __cforall_thread__ | 
|---|
| 17 |  | 
|---|
| 18 | // #define __CFA_DEBUG_PRINT_READY_QUEUE__ | 
|---|
| 19 |  | 
|---|
| 20 |  | 
|---|
| 21 | #define USE_AWARE_STEALING | 
|---|
| 22 |  | 
|---|
| 23 | #include "bits/defs.hfa" | 
|---|
| 24 | #include "device/cpu.hfa" | 
|---|
| 25 | #include "kernel/cluster.hfa" | 
|---|
| 26 | #include "kernel/private.hfa" | 
|---|
| 27 |  | 
|---|
| 28 | // #include <errno.h> | 
|---|
| 29 | // #include <unistd.h> | 
|---|
| 30 |  | 
|---|
| 31 | #include "ready_subqueue.hfa" | 
|---|
| 32 |  | 
|---|
| 33 | static const size_t cache_line_size = 64; | 
|---|
| 34 |  | 
|---|
| 35 | #if !defined(__CFA_NO_STATISTICS__) | 
|---|
| 36 | #define __STATS(...) __VA_ARGS__ | 
|---|
| 37 | #else | 
|---|
| 38 | #define __STATS(...) | 
|---|
| 39 | #endif | 
|---|
| 40 |  | 
|---|
| 41 | static inline struct thread$ * try_pop(struct cluster * cltr, unsigned w __STATS(, __stats_readyQ_pop_t & stats)); | 
|---|
| 42 | static inline struct thread$ * try_pop(struct cluster * cltr, unsigned i, unsigned j __STATS(, __stats_readyQ_pop_t & stats)); | 
|---|
| 43 | static inline struct thread$ * search(struct cluster * cltr); | 
|---|
| 44 |  | 
|---|
| 45 | //======================================================================= | 
|---|
| 46 | // Cforall Ready Queue used for scheduling | 
|---|
| 47 | //======================================================================= | 
|---|
| 48 | // void ?{}(__ready_queue_t & this) with (this) { | 
|---|
| 49 | //      lanes.data   = 0p; | 
|---|
| 50 | //      lanes.tscs   = 0p; | 
|---|
| 51 | //      lanes.caches = 0p; | 
|---|
| 52 | //      lanes.count  = 0; | 
|---|
| 53 | // } | 
|---|
| 54 |  | 
|---|
| 55 | // void ^?{}(__ready_queue_t & this) with (this) { | 
|---|
| 56 | //      free(lanes.data); | 
|---|
| 57 | //      free(lanes.tscs); | 
|---|
| 58 | //      free(lanes.caches); | 
|---|
| 59 | // } | 
|---|
| 60 |  | 
|---|
| 61 | //----------------------------------------------------------------------- | 
|---|
| 62 | __attribute__((hot)) void push(struct cluster * cltr, struct thread$ * thrd, unpark_hint hint) with (cltr->sched) { | 
|---|
| 63 | struct processor * const proc = kernelTLS().this_processor; | 
|---|
| 64 | const bool external = (!proc) || (cltr != proc->cltr); | 
|---|
| 65 | const bool remote   = hint == UNPARK_REMOTE; | 
|---|
| 66 | const size_t lanes_count = readyQ.count; | 
|---|
| 67 |  | 
|---|
| 68 | /* paranoid */ verify( __shard_factor.readyq > 0 ); | 
|---|
| 69 | /* paranoid */ verify( lanes_count > 0 ); | 
|---|
| 70 |  | 
|---|
| 71 | unsigned i; | 
|---|
| 72 | if( external || remote ) { | 
|---|
| 73 | // Figure out where thread was last time and make sure it's valid | 
|---|
| 74 | /* paranoid */ verify(thrd->preferred >= 0); | 
|---|
| 75 | unsigned start = thrd->preferred * __shard_factor.readyq; | 
|---|
| 76 | if(start < lanes_count) { | 
|---|
| 77 | do { | 
|---|
| 78 | unsigned r = __tls_rand(); | 
|---|
| 79 | i = start + (r % __shard_factor.readyq); | 
|---|
| 80 | /* paranoid */ verify( i < lanes_count ); | 
|---|
| 81 | // If we can't lock it retry | 
|---|
| 82 | } while( !__atomic_try_acquire( &readyQ.data[i].l.lock ) ); | 
|---|
| 83 | } else { | 
|---|
| 84 | do { | 
|---|
| 85 | i = __tls_rand() % lanes_count; | 
|---|
| 86 | } while( !__atomic_try_acquire( &readyQ.data[i].l.lock ) ); | 
|---|
| 87 | } | 
|---|
| 88 | } else { | 
|---|
| 89 | do { | 
|---|
| 90 | unsigned r = proc->rdq.its++; | 
|---|
| 91 | i = proc->rdq.id + (r % __shard_factor.readyq); | 
|---|
| 92 | /* paranoid */ verify( i < lanes_count ); | 
|---|
| 93 | // If we can't lock it retry | 
|---|
| 94 | } while( !__atomic_try_acquire( &readyQ.data[i].l.lock ) ); | 
|---|
| 95 | } | 
|---|
| 96 |  | 
|---|
| 97 | // Actually push it | 
|---|
| 98 | push(readyQ.data[i], thrd); | 
|---|
| 99 |  | 
|---|
| 100 | // Unlock and return | 
|---|
| 101 | __atomic_unlock( &readyQ.data[i].l.lock ); | 
|---|
| 102 |  | 
|---|
| 103 | #if !defined(__CFA_NO_STATISTICS__) | 
|---|
| 104 | if(unlikely(external || remote)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.success, 1, __ATOMIC_RELAXED); | 
|---|
| 105 | else __tls_stats()->ready.push.local.success++; | 
|---|
| 106 | #endif | 
|---|
| 107 | } | 
|---|
| 108 |  | 
|---|
| 109 | __attribute__((hot)) struct thread$ * pop_fast(struct cluster * cltr) with (cltr->sched) { | 
|---|
| 110 | const size_t lanes_count = readyQ.count; | 
|---|
| 111 |  | 
|---|
| 112 | /* paranoid */ verify( __shard_factor.readyq > 0 ); | 
|---|
| 113 | /* paranoid */ verify( lanes_count > 0 ); | 
|---|
| 114 | /* paranoid */ verify( kernelTLS().this_processor ); | 
|---|
| 115 | /* paranoid */ verify( kernelTLS().this_processor->rdq.id < lanes_count ); | 
|---|
| 116 |  | 
|---|
| 117 | struct processor * const proc = kernelTLS().this_processor; | 
|---|
| 118 | unsigned this = proc->rdq.id; | 
|---|
| 119 | /* paranoid */ verify( this < lanes_count ); | 
|---|
| 120 | __cfadbg_print_safe(ready_queue, "Kernel : pop from %u\n", this); | 
|---|
| 121 |  | 
|---|
| 122 | // Figure out the current cache is | 
|---|
| 123 | const unsigned this_cache = cache_id(cltr, this / __shard_factor.readyq); | 
|---|
| 124 | const unsigned long long ctsc = rdtscl(); | 
|---|
| 125 |  | 
|---|
| 126 | if(proc->rdq.target == UINT_MAX) { | 
|---|
| 127 | uint64_t chaos = __tls_rand(); | 
|---|
| 128 | unsigned ext = chaos & 0xff; | 
|---|
| 129 | unsigned other  = (chaos >> 8) % (lanes_count); | 
|---|
| 130 |  | 
|---|
| 131 | if(ext < 3 || __atomic_load_n(&caches[other / __shard_factor.readyq].id, __ATOMIC_RELAXED) == this_cache) { | 
|---|
| 132 | proc->rdq.target = other; | 
|---|
| 133 | } | 
|---|
| 134 | } | 
|---|
| 135 | else { | 
|---|
| 136 | const unsigned target = proc->rdq.target; | 
|---|
| 137 | __cfadbg_print_safe(ready_queue, "Kernel : %u considering helping %u, tcsc %llu\n", this, target, readyQ.tscs[target].t.tv); | 
|---|
| 138 | /* paranoid */ verify( readyQ.tscs[target].t.tv != ULLONG_MAX ); | 
|---|
| 139 | if(target < lanes_count) { | 
|---|
| 140 | const __readyQ_avg_t cutoff = calc_cutoff(ctsc, proc->rdq.id, lanes_count, cltr->sched.readyQ.data, cltr->sched.readyQ.tscs, __shard_factor.readyq, true); | 
|---|
| 141 | const __readyQ_avg_t age = moving_average(ctsc, readyQ.tscs[target].t.tv, readyQ.tscs[target].t.ma, false); | 
|---|
| 142 | __cfadbg_print_safe(ready_queue, "Kernel : Help attempt on %u from %u, age %'llu vs cutoff %'llu, %s\n", target, this, age, cutoff, age > cutoff ? "yes" : "no"); | 
|---|
| 143 | if(age > cutoff) { | 
|---|
| 144 | thread$ * t = try_pop(cltr, target __STATS(, __tls_stats()->ready.pop.help)); | 
|---|
| 145 | if(t) return t; | 
|---|
| 146 | } | 
|---|
| 147 | } | 
|---|
| 148 | proc->rdq.target = UINT_MAX; | 
|---|
| 149 | } | 
|---|
| 150 |  | 
|---|
| 151 | for(__shard_factor.readyq) { | 
|---|
| 152 | unsigned i = this + (proc->rdq.itr++ % __shard_factor.readyq); | 
|---|
| 153 | if(thread$ * t = try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.local))) return t; | 
|---|
| 154 | } | 
|---|
| 155 |  | 
|---|
| 156 | // All lanes where empty return 0p | 
|---|
| 157 | return 0p; | 
|---|
| 158 |  | 
|---|
| 159 | } | 
|---|
| 160 | __attribute__((hot)) struct thread$ * pop_slow(struct cluster * cltr) { | 
|---|
| 161 | unsigned i = __tls_rand() % (cltr->sched.readyQ.count); | 
|---|
| 162 | return try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.steal)); | 
|---|
| 163 | } | 
|---|
| 164 | __attribute__((hot)) struct thread$ * pop_search(struct cluster * cltr) { | 
|---|
| 165 | return search(cltr); | 
|---|
| 166 | } | 
|---|
| 167 |  | 
|---|
| 168 | //======================================================================= | 
|---|
| 169 | // Various Ready Queue utilities | 
|---|
| 170 | //======================================================================= | 
|---|
| 171 | // these function work the same or almost the same | 
|---|
| 172 | // whether they are using work-stealing or relaxed fifo scheduling | 
|---|
| 173 |  | 
|---|
| 174 | //----------------------------------------------------------------------- | 
|---|
| 175 | // try to pop from a lane given by index w | 
|---|
| 176 | static inline struct thread$ * try_pop(struct cluster * cltr, unsigned w __STATS(, __stats_readyQ_pop_t & stats)) with (cltr->sched) { | 
|---|
| 177 | /* paranoid */ verify( w < readyQ.count ); | 
|---|
| 178 | __STATS( stats.attempt++; ) | 
|---|
| 179 |  | 
|---|
| 180 | // Get relevant elements locally | 
|---|
| 181 | __intrusive_lane_t & lane = readyQ.data[w]; | 
|---|
| 182 |  | 
|---|
| 183 | // If list looks empty retry | 
|---|
| 184 | if( is_empty(lane) ) { | 
|---|
| 185 | return 0p; | 
|---|
| 186 | } | 
|---|
| 187 |  | 
|---|
| 188 | // If we can't get the lock retry | 
|---|
| 189 | if( !__atomic_try_acquire(&lane.l.lock) ) { | 
|---|
| 190 | return 0p; | 
|---|
| 191 | } | 
|---|
| 192 |  | 
|---|
| 193 | // If list is empty, unlock and retry | 
|---|
| 194 | if( is_empty(lane) ) { | 
|---|
| 195 | __atomic_unlock(&lane.l.lock); | 
|---|
| 196 | return 0p; | 
|---|
| 197 | } | 
|---|
| 198 |  | 
|---|
| 199 | // Actually pop the list | 
|---|
| 200 | struct thread$ * thrd; | 
|---|
| 201 | unsigned long long ts_prev = ts(lane); | 
|---|
| 202 | unsigned long long ts_next; | 
|---|
| 203 | [thrd, ts_next] = pop(lane); | 
|---|
| 204 |  | 
|---|
| 205 | /* paranoid */ verify(thrd); | 
|---|
| 206 | /* paranoid */ verify(ts_next); | 
|---|
| 207 | /* paranoid */ verify(lane.l.lock); | 
|---|
| 208 |  | 
|---|
| 209 | // Unlock and return | 
|---|
| 210 | __atomic_unlock(&lane.l.lock); | 
|---|
| 211 |  | 
|---|
| 212 | // Update statistics | 
|---|
| 213 | __STATS( stats.success++; ) | 
|---|
| 214 |  | 
|---|
| 215 | touch_tsc(readyQ.tscs, w, ts_prev, ts_next, true); | 
|---|
| 216 |  | 
|---|
| 217 | thrd->preferred = w / __shard_factor.readyq; | 
|---|
| 218 |  | 
|---|
| 219 | // return the popped thread | 
|---|
| 220 | return thrd; | 
|---|
| 221 | } | 
|---|
| 222 |  | 
|---|
| 223 | //----------------------------------------------------------------------- | 
|---|
| 224 | // try to pop from any lanes making sure you don't miss any threads push | 
|---|
| 225 | // before the start of the function | 
|---|
| 226 | static inline struct thread$ * search(struct cluster * cltr) { | 
|---|
| 227 | const size_t lanes_count = cltr->sched.readyQ.count; | 
|---|
| 228 | /* paranoid */ verify( lanes_count > 0 ); | 
|---|
| 229 | unsigned count = __atomic_load_n( &lanes_count, __ATOMIC_RELAXED ); | 
|---|
| 230 | unsigned offset = __tls_rand(); | 
|---|
| 231 | for(i; count) { | 
|---|
| 232 | unsigned idx = (offset + i) % count; | 
|---|
| 233 | struct thread$ * thrd = try_pop(cltr, idx __STATS(, __tls_stats()->ready.pop.search)); | 
|---|
| 234 | if(thrd) { | 
|---|
| 235 | return thrd; | 
|---|
| 236 | } | 
|---|
| 237 | } | 
|---|
| 238 |  | 
|---|
| 239 | // All lanes where empty return 0p | 
|---|
| 240 | return 0p; | 
|---|
| 241 | } | 
|---|
| 242 |  | 
|---|
| 243 | //----------------------------------------------------------------------- | 
|---|
| 244 | // get preferred ready for new thread | 
|---|
| 245 | unsigned ready_queue_new_preferred() { | 
|---|
| 246 | unsigned pref = UINT_MAX; | 
|---|
| 247 | if(struct thread$ * thrd = publicTLS_get( this_thread )) { | 
|---|
| 248 | pref = thrd->preferred; | 
|---|
| 249 | } | 
|---|
| 250 |  | 
|---|
| 251 | return pref; | 
|---|
| 252 | } | 
|---|
| 253 |  | 
|---|
| 254 | //----------------------------------------------------------------------- | 
|---|
| 255 | // Given 2 indexes, pick the list with the oldest push an try to pop from it | 
|---|
| 256 | static inline struct thread$ * try_pop(struct cluster * cltr, unsigned i, unsigned j __STATS(, __stats_readyQ_pop_t & stats)) with (cltr->sched) { | 
|---|
| 257 | // Pick the bet list | 
|---|
| 258 | int w = i; | 
|---|
| 259 | if( __builtin_expect(!is_empty(readyQ.data[j]), true) ) { | 
|---|
| 260 | w = (ts(readyQ.data[i]) < ts(readyQ.data[j])) ? i : j; | 
|---|
| 261 | } | 
|---|
| 262 |  | 
|---|
| 263 | return try_pop(cltr, w __STATS(, stats)); | 
|---|
| 264 | } | 
|---|