[7768b8d] | 1 | // |
---|
| 2 | // Cforall Version 1.0.0 Copyright (C) 2019 University of Waterloo |
---|
| 3 | // |
---|
| 4 | // The contents of this file are covered under the licence agreement in the |
---|
| 5 | // file "LICENCE" distributed with Cforall. |
---|
| 6 | // |
---|
| 7 | // ready_queue.cfa -- |
---|
| 8 | // |
---|
| 9 | // Author : Thierry Delisle |
---|
| 10 | // Created On : Mon Nov dd 16:29:18 2019 |
---|
| 11 | // Last Modified By : |
---|
| 12 | // Last Modified On : |
---|
| 13 | // Update Count : |
---|
| 14 | // |
---|
| 15 | |
---|
| 16 | #define __cforall_thread__ |
---|
[1b143de] | 17 | // #define __CFA_DEBUG_PRINT_READY_QUEUE__ |
---|
[7768b8d] | 18 | |
---|
[7a2972b9] | 19 | // #define USE_MPSC |
---|
[1eb239e4] | 20 | |
---|
[9cc3a18] | 21 | #define USE_RELAXED_FIFO |
---|
| 22 | // #define USE_WORK_STEALING |
---|
| 23 | |
---|
[7768b8d] | 24 | #include "bits/defs.hfa" |
---|
| 25 | #include "kernel_private.hfa" |
---|
| 26 | |
---|
| 27 | #define _GNU_SOURCE |
---|
| 28 | #include "stdlib.hfa" |
---|
[61d7bec] | 29 | #include "math.hfa" |
---|
[7768b8d] | 30 | |
---|
[04b5cef] | 31 | #include <unistd.h> |
---|
| 32 | |
---|
[13c5e19] | 33 | #include "ready_subqueue.hfa" |
---|
| 34 | |
---|
[7768b8d] | 35 | static const size_t cache_line_size = 64; |
---|
| 36 | |
---|
[d2fadeb] | 37 | #if !defined(__CFA_NO_STATISTICS__) |
---|
| 38 | #define __STATS(...) __VA_ARGS__ |
---|
| 39 | #else |
---|
| 40 | #define __STATS(...) |
---|
| 41 | #endif |
---|
| 42 | |
---|
[dca5802] | 43 | // No overriden function, no environment variable, no define |
---|
| 44 | // fall back to a magic number |
---|
| 45 | #ifndef __CFA_MAX_PROCESSORS__ |
---|
[b388ee81] | 46 | #define __CFA_MAX_PROCESSORS__ 1024 |
---|
[dca5802] | 47 | #endif |
---|
[7768b8d] | 48 | |
---|
[9cc3a18] | 49 | #if defined(USE_RELAXED_FIFO) |
---|
| 50 | #define BIAS 4 |
---|
| 51 | #define READYQ_SHARD_FACTOR 4 |
---|
[5f6a172] | 52 | #define SEQUENTIAL_SHARD 1 |
---|
[9cc3a18] | 53 | #elif defined(USE_WORK_STEALING) |
---|
| 54 | #define READYQ_SHARD_FACTOR 2 |
---|
[5f6a172] | 55 | #define SEQUENTIAL_SHARD 2 |
---|
[9cc3a18] | 56 | #else |
---|
| 57 | #error no scheduling strategy selected |
---|
| 58 | #endif |
---|
| 59 | |
---|
[d2fadeb] | 60 | static inline struct $thread * try_pop(struct cluster * cltr, unsigned w __STATS(, __stats_readyQ_pop_t & stats)); |
---|
| 61 | static inline struct $thread * try_pop(struct cluster * cltr, unsigned i, unsigned j __STATS(, __stats_readyQ_pop_t & stats)); |
---|
[431cd4f] | 62 | static inline struct $thread * search(struct cluster * cltr); |
---|
[d2fadeb] | 63 | static inline [unsigned, bool] idx_from_r(unsigned r, unsigned preferred); |
---|
[9cc3a18] | 64 | |
---|
[04b5cef] | 65 | |
---|
[dca5802] | 66 | // returns the maximum number of processors the RWLock support |
---|
[7768b8d] | 67 | __attribute__((weak)) unsigned __max_processors() { |
---|
| 68 | const char * max_cores_s = getenv("CFA_MAX_PROCESSORS"); |
---|
| 69 | if(!max_cores_s) { |
---|
[504a7dc] | 70 | __cfadbg_print_nolock(ready_queue, "No CFA_MAX_PROCESSORS in ENV\n"); |
---|
[dca5802] | 71 | return __CFA_MAX_PROCESSORS__; |
---|
[7768b8d] | 72 | } |
---|
| 73 | |
---|
| 74 | char * endptr = 0p; |
---|
| 75 | long int max_cores_l = strtol(max_cores_s, &endptr, 10); |
---|
| 76 | if(max_cores_l < 1 || max_cores_l > 65535) { |
---|
[504a7dc] | 77 | __cfadbg_print_nolock(ready_queue, "CFA_MAX_PROCESSORS out of range : %ld\n", max_cores_l); |
---|
[dca5802] | 78 | return __CFA_MAX_PROCESSORS__; |
---|
[7768b8d] | 79 | } |
---|
| 80 | if('\0' != *endptr) { |
---|
[504a7dc] | 81 | __cfadbg_print_nolock(ready_queue, "CFA_MAX_PROCESSORS not a decimal number : %s\n", max_cores_s); |
---|
[dca5802] | 82 | return __CFA_MAX_PROCESSORS__; |
---|
[7768b8d] | 83 | } |
---|
| 84 | |
---|
| 85 | return max_cores_l; |
---|
| 86 | } |
---|
| 87 | |
---|
| 88 | //======================================================================= |
---|
| 89 | // Cluster wide reader-writer lock |
---|
| 90 | //======================================================================= |
---|
[b388ee81] | 91 | void ?{}(__scheduler_RWLock_t & this) { |
---|
[7768b8d] | 92 | this.max = __max_processors(); |
---|
| 93 | this.alloc = 0; |
---|
| 94 | this.ready = 0; |
---|
| 95 | this.lock = false; |
---|
| 96 | this.data = alloc(this.max); |
---|
| 97 | |
---|
| 98 | /*paranoid*/ verify( 0 == (((uintptr_t)(this.data )) % 64) ); |
---|
| 99 | /*paranoid*/ verify( 0 == (((uintptr_t)(this.data + 1)) % 64) ); |
---|
| 100 | /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.alloc), &this.alloc)); |
---|
| 101 | /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.ready), &this.ready)); |
---|
| 102 | |
---|
| 103 | } |
---|
[b388ee81] | 104 | void ^?{}(__scheduler_RWLock_t & this) { |
---|
[7768b8d] | 105 | free(this.data); |
---|
| 106 | } |
---|
| 107 | |
---|
[9b1dcc2] | 108 | void ?{}( __scheduler_lock_id_t & this, __processor_id_t * proc ) { |
---|
[7768b8d] | 109 | this.handle = proc; |
---|
| 110 | this.lock = false; |
---|
[64a7146] | 111 | #ifdef __CFA_WITH_VERIFY__ |
---|
| 112 | this.owned = false; |
---|
| 113 | #endif |
---|
[7768b8d] | 114 | } |
---|
| 115 | |
---|
| 116 | //======================================================================= |
---|
| 117 | // Lock-Free registering/unregistering of threads |
---|
[a33c113] | 118 | void register_proc_id( struct __processor_id_t * proc ) with(*__scheduler_lock) { |
---|
[b388ee81] | 119 | __cfadbg_print_safe(ready_queue, "Kernel : Registering proc %p for RW-Lock\n", proc); |
---|
[504a7dc] | 120 | |
---|
[7768b8d] | 121 | // Step - 1 : check if there is already space in the data |
---|
| 122 | uint_fast32_t s = ready; |
---|
| 123 | |
---|
| 124 | // Check among all the ready |
---|
| 125 | for(uint_fast32_t i = 0; i < s; i++) { |
---|
[9b1dcc2] | 126 | __processor_id_t * null = 0p; // Re-write every loop since compare thrashes it |
---|
[7768b8d] | 127 | if( __atomic_load_n(&data[i].handle, (int)__ATOMIC_RELAXED) == null |
---|
| 128 | && __atomic_compare_exchange_n( &data[i].handle, &null, proc, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) { |
---|
| 129 | /*paranoid*/ verify(i < ready); |
---|
[64a7146] | 130 | /*paranoid*/ verify(0 == (__alignof__(data[i]) % cache_line_size)); |
---|
[7768b8d] | 131 | /*paranoid*/ verify((((uintptr_t)&data[i]) % cache_line_size) == 0); |
---|
[a33c113] | 132 | proc->id = i; |
---|
[b2fc7ad9] | 133 | return; |
---|
[7768b8d] | 134 | } |
---|
| 135 | } |
---|
| 136 | |
---|
[b388ee81] | 137 | if(max <= alloc) abort("Trying to create more than %ud processors", __scheduler_lock->max); |
---|
[7768b8d] | 138 | |
---|
| 139 | // Step - 2 : F&A to get a new spot in the array. |
---|
| 140 | uint_fast32_t n = __atomic_fetch_add(&alloc, 1, __ATOMIC_SEQ_CST); |
---|
[b388ee81] | 141 | if(max <= n) abort("Trying to create more than %ud processors", __scheduler_lock->max); |
---|
[7768b8d] | 142 | |
---|
| 143 | // Step - 3 : Mark space as used and then publish it. |
---|
[9b1dcc2] | 144 | __scheduler_lock_id_t * storage = (__scheduler_lock_id_t *)&data[n]; |
---|
[7768b8d] | 145 | (*storage){ proc }; |
---|
[fd9b524] | 146 | while() { |
---|
[7768b8d] | 147 | unsigned copy = n; |
---|
| 148 | if( __atomic_load_n(&ready, __ATOMIC_RELAXED) == n |
---|
| 149 | && __atomic_compare_exchange_n(&ready, ©, n + 1, true, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) |
---|
| 150 | break; |
---|
[fd9b524] | 151 | Pause(); |
---|
[7768b8d] | 152 | } |
---|
| 153 | |
---|
[1b143de] | 154 | __cfadbg_print_safe(ready_queue, "Kernel : Registering proc %p done, id %lu\n", proc, n); |
---|
[504a7dc] | 155 | |
---|
[7768b8d] | 156 | // Return new spot. |
---|
| 157 | /*paranoid*/ verify(n < ready); |
---|
[37ba662] | 158 | /*paranoid*/ verify(__alignof__(data[n]) == (2 * cache_line_size)); |
---|
[7768b8d] | 159 | /*paranoid*/ verify((((uintptr_t)&data[n]) % cache_line_size) == 0); |
---|
[a33c113] | 160 | proc->id = n; |
---|
[7768b8d] | 161 | } |
---|
| 162 | |
---|
[a33c113] | 163 | void unregister_proc_id( struct __processor_id_t * proc ) with(*__scheduler_lock) { |
---|
[7768b8d] | 164 | unsigned id = proc->id; |
---|
| 165 | /*paranoid*/ verify(id < ready); |
---|
| 166 | /*paranoid*/ verify(proc == __atomic_load_n(&data[id].handle, __ATOMIC_RELAXED)); |
---|
| 167 | __atomic_store_n(&data[id].handle, 0p, __ATOMIC_RELEASE); |
---|
[504a7dc] | 168 | |
---|
| 169 | __cfadbg_print_safe(ready_queue, "Kernel : Unregister proc %p\n", proc); |
---|
[7768b8d] | 170 | } |
---|
| 171 | |
---|
| 172 | //----------------------------------------------------------------------- |
---|
| 173 | // Writer side : acquire when changing the ready queue, e.g. adding more |
---|
| 174 | // queues or removing them. |
---|
[b388ee81] | 175 | uint_fast32_t ready_mutate_lock( void ) with(*__scheduler_lock) { |
---|
[8fc652e0] | 176 | /* paranoid */ verify( ! __preemption_enabled() ); |
---|
[62502cc4] | 177 | |
---|
[7768b8d] | 178 | // Step 1 : lock global lock |
---|
| 179 | // It is needed to avoid processors that register mid Critical-Section |
---|
| 180 | // to simply lock their own lock and enter. |
---|
| 181 | __atomic_acquire( &lock ); |
---|
| 182 | |
---|
| 183 | // Step 2 : lock per-proc lock |
---|
| 184 | // Processors that are currently being registered aren't counted |
---|
| 185 | // but can't be in read_lock or in the critical section. |
---|
| 186 | // All other processors are counted |
---|
| 187 | uint_fast32_t s = ready; |
---|
| 188 | for(uint_fast32_t i = 0; i < s; i++) { |
---|
| 189 | __atomic_acquire( &data[i].lock ); |
---|
| 190 | } |
---|
| 191 | |
---|
[8fc652e0] | 192 | /* paranoid */ verify( ! __preemption_enabled() ); |
---|
[7768b8d] | 193 | return s; |
---|
| 194 | } |
---|
| 195 | |
---|
[b388ee81] | 196 | void ready_mutate_unlock( uint_fast32_t last_s ) with(*__scheduler_lock) { |
---|
[8fc652e0] | 197 | /* paranoid */ verify( ! __preemption_enabled() ); |
---|
[62502cc4] | 198 | |
---|
[7768b8d] | 199 | // Step 1 : release local locks |
---|
| 200 | // This must be done while the global lock is held to avoid |
---|
| 201 | // threads that where created mid critical section |
---|
| 202 | // to race to lock their local locks and have the writer |
---|
| 203 | // immidiately unlock them |
---|
| 204 | // Alternative solution : return s in write_lock and pass it to write_unlock |
---|
| 205 | for(uint_fast32_t i = 0; i < last_s; i++) { |
---|
| 206 | verify(data[i].lock); |
---|
| 207 | __atomic_store_n(&data[i].lock, (bool)false, __ATOMIC_RELEASE); |
---|
| 208 | } |
---|
| 209 | |
---|
| 210 | // Step 2 : release global lock |
---|
| 211 | /*paranoid*/ assert(true == lock); |
---|
| 212 | __atomic_store_n(&lock, (bool)false, __ATOMIC_RELEASE); |
---|
[62502cc4] | 213 | |
---|
[8fc652e0] | 214 | /* paranoid */ verify( ! __preemption_enabled() ); |
---|
[7768b8d] | 215 | } |
---|
| 216 | |
---|
| 217 | //======================================================================= |
---|
[9cc3a18] | 218 | // Cforall Ready Queue used for scheduling |
---|
[b798713] | 219 | //======================================================================= |
---|
| 220 | void ?{}(__ready_queue_t & this) with (this) { |
---|
[28d73c1] | 221 | lanes.data = 0p; |
---|
[9cc3a18] | 222 | lanes.tscs = 0p; |
---|
[28d73c1] | 223 | lanes.count = 0; |
---|
[b798713] | 224 | } |
---|
| 225 | |
---|
| 226 | void ^?{}(__ready_queue_t & this) with (this) { |
---|
[5f6a172] | 227 | verify( SEQUENTIAL_SHARD == lanes.count ); |
---|
[dca5802] | 228 | free(lanes.data); |
---|
[9cc3a18] | 229 | free(lanes.tscs); |
---|
[dca5802] | 230 | } |
---|
| 231 | |
---|
[64a7146] | 232 | //----------------------------------------------------------------------- |
---|
[431cd4f] | 233 | #if defined(USE_RELAXED_FIFO) |
---|
| 234 | //----------------------------------------------------------------------- |
---|
| 235 | // get index from random number with or without bias towards queues |
---|
| 236 | static inline [unsigned, bool] idx_from_r(unsigned r, unsigned preferred) { |
---|
| 237 | unsigned i; |
---|
| 238 | bool local; |
---|
| 239 | unsigned rlow = r % BIAS; |
---|
| 240 | unsigned rhigh = r / BIAS; |
---|
| 241 | if((0 != rlow) && preferred >= 0) { |
---|
| 242 | // (BIAS - 1) out of BIAS chances |
---|
| 243 | // Use perferred queues |
---|
| 244 | i = preferred + (rhigh % READYQ_SHARD_FACTOR); |
---|
| 245 | local = true; |
---|
| 246 | } |
---|
| 247 | else { |
---|
| 248 | // 1 out of BIAS chances |
---|
| 249 | // Use all queues |
---|
| 250 | i = rhigh; |
---|
| 251 | local = false; |
---|
| 252 | } |
---|
| 253 | return [i, local]; |
---|
| 254 | } |
---|
| 255 | |
---|
| 256 | __attribute__((hot)) void push(struct cluster * cltr, struct $thread * thrd) with (cltr->ready_queue) { |
---|
| 257 | __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr); |
---|
[1b143de] | 258 | |
---|
[431cd4f] | 259 | const bool external = (!kernelTLS().this_processor) || (cltr != kernelTLS().this_processor->cltr); |
---|
| 260 | /* paranoid */ verify(external || kernelTLS().this_processor->rdq.id < lanes.count ); |
---|
[fd1f65e] | 261 | |
---|
[431cd4f] | 262 | // write timestamp |
---|
| 263 | thrd->link.ts = rdtscl(); |
---|
[b798713] | 264 | |
---|
[431cd4f] | 265 | bool local; |
---|
| 266 | int preferred = external ? -1 : kernelTLS().this_processor->rdq.id; |
---|
[52769ba] | 267 | |
---|
[431cd4f] | 268 | // Try to pick a lane and lock it |
---|
| 269 | unsigned i; |
---|
| 270 | do { |
---|
| 271 | // Pick the index of a lane |
---|
| 272 | unsigned r = __tls_rand_fwd(); |
---|
| 273 | [i, local] = idx_from_r(r, preferred); |
---|
[772411a] | 274 | |
---|
[431cd4f] | 275 | i %= __atomic_load_n( &lanes.count, __ATOMIC_RELAXED ); |
---|
| 276 | |
---|
| 277 | #if !defined(__CFA_NO_STATISTICS__) |
---|
[d2fadeb] | 278 | if(unlikely(external)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.attempt, 1, __ATOMIC_RELAXED); |
---|
| 279 | else if(local) __tls_stats()->ready.push.local.attempt++; |
---|
| 280 | else __tls_stats()->ready.push.share.attempt++; |
---|
[431cd4f] | 281 | #endif |
---|
[b798713] | 282 | |
---|
[431cd4f] | 283 | #if defined(USE_MPSC) |
---|
| 284 | // mpsc always succeeds |
---|
| 285 | } while( false ); |
---|
| 286 | #else |
---|
| 287 | // If we can't lock it retry |
---|
| 288 | } while( !__atomic_try_acquire( &lanes.data[i].lock ) ); |
---|
| 289 | #endif |
---|
| 290 | |
---|
| 291 | // Actually push it |
---|
| 292 | push(lanes.data[i], thrd); |
---|
| 293 | |
---|
| 294 | #if !defined(USE_MPSC) |
---|
| 295 | // Unlock and return |
---|
| 296 | __atomic_unlock( &lanes.data[i].lock ); |
---|
| 297 | #endif |
---|
| 298 | |
---|
| 299 | // Mark the current index in the tls rng instance as having an item |
---|
| 300 | __tls_rand_advance_bck(); |
---|
| 301 | |
---|
| 302 | __cfadbg_print_safe(ready_queue, "Kernel : Pushed %p on cluster %p (idx: %u, mask %llu, first %d)\n", thrd, cltr, i, used.mask[0], lane_first); |
---|
| 303 | |
---|
| 304 | // Update statistics |
---|
[b798713] | 305 | #if !defined(__CFA_NO_STATISTICS__) |
---|
[d2fadeb] | 306 | if(unlikely(external)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.success, 1, __ATOMIC_RELAXED); |
---|
| 307 | else if(local) __tls_stats()->ready.push.local.success++; |
---|
| 308 | else __tls_stats()->ready.push.share.success++; |
---|
[b798713] | 309 | #endif |
---|
[431cd4f] | 310 | } |
---|
[b798713] | 311 | |
---|
[431cd4f] | 312 | // Pop from the ready queue from a given cluster |
---|
| 313 | __attribute__((hot)) $thread * pop_fast(struct cluster * cltr) with (cltr->ready_queue) { |
---|
| 314 | /* paranoid */ verify( lanes.count > 0 ); |
---|
| 315 | /* paranoid */ verify( kernelTLS().this_processor ); |
---|
| 316 | /* paranoid */ verify( kernelTLS().this_processor->rdq.id < lanes.count ); |
---|
[b798713] | 317 | |
---|
[431cd4f] | 318 | unsigned count = __atomic_load_n( &lanes.count, __ATOMIC_RELAXED ); |
---|
| 319 | int preferred = kernelTLS().this_processor->rdq.id; |
---|
[dca5802] | 320 | |
---|
| 321 | |
---|
[431cd4f] | 322 | // As long as the list is not empty, try finding a lane that isn't empty and pop from it |
---|
| 323 | for(25) { |
---|
| 324 | // Pick two lists at random |
---|
| 325 | unsigned ri = __tls_rand_bck(); |
---|
| 326 | unsigned rj = __tls_rand_bck(); |
---|
[c426b03] | 327 | |
---|
[431cd4f] | 328 | unsigned i, j; |
---|
| 329 | __attribute__((unused)) bool locali, localj; |
---|
| 330 | [i, locali] = idx_from_r(ri, preferred); |
---|
| 331 | [j, localj] = idx_from_r(rj, preferred); |
---|
[1b143de] | 332 | |
---|
[431cd4f] | 333 | i %= count; |
---|
| 334 | j %= count; |
---|
[9cc3a18] | 335 | |
---|
[431cd4f] | 336 | // try popping from the 2 picked lists |
---|
[d2fadeb] | 337 | struct $thread * thrd = try_pop(cltr, i, j __STATS(, *(locali || localj ? &__tls_stats()->ready.pop.local : &__tls_stats()->ready.pop.help))); |
---|
[431cd4f] | 338 | if(thrd) { |
---|
| 339 | return thrd; |
---|
| 340 | } |
---|
| 341 | } |
---|
[13c5e19] | 342 | |
---|
[431cd4f] | 343 | // All lanes where empty return 0p |
---|
| 344 | return 0p; |
---|
| 345 | } |
---|
[772411a] | 346 | |
---|
[fc59df78] | 347 | __attribute__((hot)) struct $thread * pop_slow(struct cluster * cltr) { return pop_fast(cltr); } |
---|
| 348 | __attribute__((hot)) struct $thread * pop_search(struct cluster * cltr) { |
---|
[431cd4f] | 349 | return search(cltr); |
---|
| 350 | } |
---|
| 351 | #endif |
---|
| 352 | #if defined(USE_WORK_STEALING) |
---|
| 353 | __attribute__((hot)) void push(struct cluster * cltr, struct $thread * thrd) with (cltr->ready_queue) { |
---|
| 354 | __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr); |
---|
[772411a] | 355 | |
---|
[431cd4f] | 356 | const bool external = (!kernelTLS().this_processor) || (cltr != kernelTLS().this_processor->cltr); |
---|
| 357 | /* paranoid */ verify(external || kernelTLS().this_processor->rdq.id < lanes.count ); |
---|
[772411a] | 358 | |
---|
[431cd4f] | 359 | // write timestamp |
---|
| 360 | thrd->link.ts = rdtscl(); |
---|
| 361 | |
---|
| 362 | // Try to pick a lane and lock it |
---|
| 363 | unsigned i; |
---|
| 364 | do { |
---|
[d2fadeb] | 365 | #if !defined(__CFA_NO_STATISTICS__) |
---|
| 366 | if(unlikely(external)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.attempt, 1, __ATOMIC_RELAXED); |
---|
| 367 | else __tls_stats()->ready.push.local.attempt++; |
---|
| 368 | #endif |
---|
| 369 | |
---|
[431cd4f] | 370 | if(unlikely(external)) { |
---|
| 371 | i = __tls_rand() % lanes.count; |
---|
| 372 | } |
---|
| 373 | else { |
---|
| 374 | processor * proc = kernelTLS().this_processor; |
---|
| 375 | unsigned r = proc->rdq.its++; |
---|
| 376 | i = proc->rdq.id + (r % READYQ_SHARD_FACTOR); |
---|
[13c5e19] | 377 | } |
---|
[431cd4f] | 378 | |
---|
| 379 | |
---|
| 380 | #if defined(USE_MPSC) |
---|
| 381 | // mpsc always succeeds |
---|
| 382 | } while( false ); |
---|
| 383 | #else |
---|
| 384 | // If we can't lock it retry |
---|
| 385 | } while( !__atomic_try_acquire( &lanes.data[i].lock ) ); |
---|
[13c5e19] | 386 | #endif |
---|
| 387 | |
---|
[431cd4f] | 388 | // Actually push it |
---|
| 389 | push(lanes.data[i], thrd); |
---|
[13c5e19] | 390 | |
---|
[431cd4f] | 391 | #if !defined(USE_MPSC) |
---|
| 392 | // Unlock and return |
---|
| 393 | __atomic_unlock( &lanes.data[i].lock ); |
---|
| 394 | #endif |
---|
| 395 | |
---|
[d2fadeb] | 396 | #if !defined(__CFA_NO_STATISTICS__) |
---|
| 397 | if(unlikely(external)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.success, 1, __ATOMIC_RELAXED); |
---|
| 398 | else __tls_stats()->ready.push.local.success++; |
---|
| 399 | #endif |
---|
| 400 | |
---|
[431cd4f] | 401 | __cfadbg_print_safe(ready_queue, "Kernel : Pushed %p on cluster %p (idx: %u, mask %llu, first %d)\n", thrd, cltr, i, used.mask[0], lane_first); |
---|
[13c5e19] | 402 | } |
---|
| 403 | |
---|
[431cd4f] | 404 | // Pop from the ready queue from a given cluster |
---|
| 405 | __attribute__((hot)) $thread * pop_fast(struct cluster * cltr) with (cltr->ready_queue) { |
---|
| 406 | /* paranoid */ verify( lanes.count > 0 ); |
---|
| 407 | /* paranoid */ verify( kernelTLS().this_processor ); |
---|
| 408 | /* paranoid */ verify( kernelTLS().this_processor->rdq.id < lanes.count ); |
---|
| 409 | |
---|
| 410 | processor * proc = kernelTLS().this_processor; |
---|
| 411 | |
---|
| 412 | if(proc->rdq.target == -1u) { |
---|
| 413 | proc->rdq.target = __tls_rand() % lanes.count; |
---|
| 414 | unsigned it1 = proc->rdq.itr; |
---|
| 415 | unsigned it2 = proc->rdq.itr + 1; |
---|
| 416 | unsigned idx1 = proc->rdq.id + (it1 % READYQ_SHARD_FACTOR); |
---|
[ddd473f] | 417 | unsigned idx2 = proc->rdq.id + (it2 % READYQ_SHARD_FACTOR); |
---|
[431cd4f] | 418 | unsigned long long tsc1 = ts(lanes.data[idx1]); |
---|
| 419 | unsigned long long tsc2 = ts(lanes.data[idx2]); |
---|
| 420 | proc->rdq.cutoff = min(tsc1, tsc2); |
---|
[341aa39] | 421 | if(proc->rdq.cutoff == 0) proc->rdq.cutoff = -1ull; |
---|
[431cd4f] | 422 | } |
---|
[341aa39] | 423 | else { |
---|
| 424 | unsigned target = proc->rdq.target; |
---|
[431cd4f] | 425 | proc->rdq.target = -1u; |
---|
[341aa39] | 426 | if(lanes.tscs[target].tv < proc->rdq.cutoff) { |
---|
| 427 | $thread * t = try_pop(cltr, target __STATS(, __tls_stats()->ready.pop.help)); |
---|
| 428 | if(t) return t; |
---|
| 429 | } |
---|
[431cd4f] | 430 | } |
---|
[13c5e19] | 431 | |
---|
[431cd4f] | 432 | for(READYQ_SHARD_FACTOR) { |
---|
| 433 | unsigned i = proc->rdq.id + (--proc->rdq.itr % READYQ_SHARD_FACTOR); |
---|
[d2fadeb] | 434 | if($thread * t = try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.local))) return t; |
---|
[431cd4f] | 435 | } |
---|
| 436 | return 0p; |
---|
[1eb239e4] | 437 | } |
---|
| 438 | |
---|
[431cd4f] | 439 | __attribute__((hot)) struct $thread * pop_slow(struct cluster * cltr) with (cltr->ready_queue) { |
---|
[fc59df78] | 440 | unsigned i = __tls_rand() % lanes.count; |
---|
| 441 | return try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.steal)); |
---|
| 442 | } |
---|
[431cd4f] | 443 | |
---|
[fc59df78] | 444 | __attribute__((hot)) struct $thread * pop_search(struct cluster * cltr) with (cltr->ready_queue) { |
---|
[431cd4f] | 445 | return search(cltr); |
---|
| 446 | } |
---|
| 447 | #endif |
---|
[1eb239e4] | 448 | |
---|
[9cc3a18] | 449 | //======================================================================= |
---|
| 450 | // Various Ready Queue utilities |
---|
| 451 | //======================================================================= |
---|
| 452 | // these function work the same or almost the same |
---|
| 453 | // whether they are using work-stealing or relaxed fifo scheduling |
---|
[1eb239e4] | 454 | |
---|
[9cc3a18] | 455 | //----------------------------------------------------------------------- |
---|
| 456 | // try to pop from a lane given by index w |
---|
[d2fadeb] | 457 | static inline struct $thread * try_pop(struct cluster * cltr, unsigned w __STATS(, __stats_readyQ_pop_t & stats)) with (cltr->ready_queue) { |
---|
| 458 | __STATS( stats.attempt++; ) |
---|
| 459 | |
---|
[dca5802] | 460 | // Get relevant elements locally |
---|
| 461 | __intrusive_lane_t & lane = lanes.data[w]; |
---|
| 462 | |
---|
[b798713] | 463 | // If list looks empty retry |
---|
[d2fadeb] | 464 | if( is_empty(lane) ) { |
---|
| 465 | __STATS( stats.espec++; ) |
---|
| 466 | return 0p; |
---|
| 467 | } |
---|
[b798713] | 468 | |
---|
| 469 | // If we can't get the lock retry |
---|
[d2fadeb] | 470 | if( !__atomic_try_acquire(&lane.lock) ) { |
---|
| 471 | __STATS( stats.elock++; ) |
---|
| 472 | return 0p; |
---|
| 473 | } |
---|
[b798713] | 474 | |
---|
| 475 | // If list is empty, unlock and retry |
---|
[dca5802] | 476 | if( is_empty(lane) ) { |
---|
| 477 | __atomic_unlock(&lane.lock); |
---|
[d2fadeb] | 478 | __STATS( stats.eempty++; ) |
---|
[b798713] | 479 | return 0p; |
---|
| 480 | } |
---|
| 481 | |
---|
| 482 | // Actually pop the list |
---|
[504a7dc] | 483 | struct $thread * thrd; |
---|
[343d10e] | 484 | thrd = pop(lane); |
---|
[b798713] | 485 | |
---|
[dca5802] | 486 | /* paranoid */ verify(thrd); |
---|
| 487 | /* paranoid */ verify(lane.lock); |
---|
[b798713] | 488 | |
---|
| 489 | // Unlock and return |
---|
[dca5802] | 490 | __atomic_unlock(&lane.lock); |
---|
[b798713] | 491 | |
---|
[dca5802] | 492 | // Update statistics |
---|
[d2fadeb] | 493 | __STATS( stats.success++; ) |
---|
[b798713] | 494 | |
---|
[431cd4f] | 495 | #if defined(USE_WORK_STEALING) |
---|
| 496 | lanes.tscs[w].tv = thrd->link.ts; |
---|
[9cc3a18] | 497 | #endif |
---|
[d72c074] | 498 | |
---|
[dca5802] | 499 | // return the popped thread |
---|
[b798713] | 500 | return thrd; |
---|
| 501 | } |
---|
[04b5cef] | 502 | |
---|
[9cc3a18] | 503 | //----------------------------------------------------------------------- |
---|
| 504 | // try to pop from any lanes making sure you don't miss any threads push |
---|
| 505 | // before the start of the function |
---|
[431cd4f] | 506 | static inline struct $thread * search(struct cluster * cltr) with (cltr->ready_queue) { |
---|
[9cc3a18] | 507 | /* paranoid */ verify( lanes.count > 0 ); |
---|
| 508 | unsigned count = __atomic_load_n( &lanes.count, __ATOMIC_RELAXED ); |
---|
| 509 | unsigned offset = __tls_rand(); |
---|
| 510 | for(i; count) { |
---|
| 511 | unsigned idx = (offset + i) % count; |
---|
[d2fadeb] | 512 | struct $thread * thrd = try_pop(cltr, idx __STATS(, __tls_stats()->ready.pop.search)); |
---|
[9cc3a18] | 513 | if(thrd) { |
---|
| 514 | return thrd; |
---|
| 515 | } |
---|
[13c5e19] | 516 | } |
---|
[9cc3a18] | 517 | |
---|
| 518 | // All lanes where empty return 0p |
---|
| 519 | return 0p; |
---|
[b798713] | 520 | } |
---|
| 521 | |
---|
| 522 | //----------------------------------------------------------------------- |
---|
[9cc3a18] | 523 | // Check that all the intrusive queues in the data structure are still consistent |
---|
[b798713] | 524 | static void check( __ready_queue_t & q ) with (q) { |
---|
[7a2972b9] | 525 | #if defined(__CFA_WITH_VERIFY__) && !defined(USE_MPSC) |
---|
[b798713] | 526 | { |
---|
[dca5802] | 527 | for( idx ; lanes.count ) { |
---|
| 528 | __intrusive_lane_t & sl = lanes.data[idx]; |
---|
| 529 | assert(!lanes.data[idx].lock); |
---|
[b798713] | 530 | |
---|
| 531 | assert(head(sl)->link.prev == 0p ); |
---|
| 532 | assert(head(sl)->link.next->link.prev == head(sl) ); |
---|
| 533 | assert(tail(sl)->link.next == 0p ); |
---|
| 534 | assert(tail(sl)->link.prev->link.next == tail(sl) ); |
---|
| 535 | |
---|
[7a2972b9] | 536 | if(is_empty(sl)) { |
---|
[b798713] | 537 | assert(tail(sl)->link.prev == head(sl)); |
---|
| 538 | assert(head(sl)->link.next == tail(sl)); |
---|
[1b143de] | 539 | } else { |
---|
| 540 | assert(tail(sl)->link.prev != head(sl)); |
---|
| 541 | assert(head(sl)->link.next != tail(sl)); |
---|
[b798713] | 542 | } |
---|
| 543 | } |
---|
| 544 | } |
---|
| 545 | #endif |
---|
| 546 | } |
---|
| 547 | |
---|
[9cc3a18] | 548 | //----------------------------------------------------------------------- |
---|
| 549 | // Given 2 indexes, pick the list with the oldest push an try to pop from it |
---|
[d2fadeb] | 550 | static inline struct $thread * try_pop(struct cluster * cltr, unsigned i, unsigned j __STATS(, __stats_readyQ_pop_t & stats)) with (cltr->ready_queue) { |
---|
[9cc3a18] | 551 | // Pick the bet list |
---|
| 552 | int w = i; |
---|
| 553 | if( __builtin_expect(!is_empty(lanes.data[j]), true) ) { |
---|
| 554 | w = (ts(lanes.data[i]) < ts(lanes.data[j])) ? i : j; |
---|
| 555 | } |
---|
| 556 | |
---|
[d2fadeb] | 557 | return try_pop(cltr, w __STATS(, stats)); |
---|
[9cc3a18] | 558 | } |
---|
| 559 | |
---|
[b798713] | 560 | // Call this function of the intrusive list was moved using memcpy |
---|
[dca5802] | 561 | // fixes the list so that the pointers back to anchors aren't left dangling |
---|
| 562 | static inline void fix(__intrusive_lane_t & ll) { |
---|
[7a2972b9] | 563 | #if !defined(USE_MPSC) |
---|
| 564 | // if the list is not empty then follow he pointer and fix its reverse |
---|
| 565 | if(!is_empty(ll)) { |
---|
| 566 | head(ll)->link.next->link.prev = head(ll); |
---|
| 567 | tail(ll)->link.prev->link.next = tail(ll); |
---|
| 568 | } |
---|
| 569 | // Otherwise just reset the list |
---|
| 570 | else { |
---|
| 571 | verify(tail(ll)->link.next == 0p); |
---|
| 572 | tail(ll)->link.prev = head(ll); |
---|
| 573 | head(ll)->link.next = tail(ll); |
---|
| 574 | verify(head(ll)->link.prev == 0p); |
---|
| 575 | } |
---|
| 576 | #endif |
---|
[b798713] | 577 | } |
---|
| 578 | |
---|
[9cc3a18] | 579 | static void assign_list(unsigned & value, dlist(processor, processor) & list, unsigned count) { |
---|
[a017ee7] | 580 | processor * it = &list`first; |
---|
| 581 | for(unsigned i = 0; i < count; i++) { |
---|
| 582 | /* paranoid */ verifyf( it, "Unexpected null iterator, at index %u of %u\n", i, count); |
---|
[431cd4f] | 583 | it->rdq.id = value; |
---|
| 584 | it->rdq.target = -1u; |
---|
[9cc3a18] | 585 | value += READYQ_SHARD_FACTOR; |
---|
[a017ee7] | 586 | it = &(*it)`next; |
---|
| 587 | } |
---|
| 588 | } |
---|
| 589 | |
---|
[9cc3a18] | 590 | static void reassign_cltr_id(struct cluster * cltr) { |
---|
[a017ee7] | 591 | unsigned preferred = 0; |
---|
[9cc3a18] | 592 | assign_list(preferred, cltr->procs.actives, cltr->procs.total - cltr->procs.idle); |
---|
| 593 | assign_list(preferred, cltr->procs.idles , cltr->procs.idle ); |
---|
[a017ee7] | 594 | } |
---|
| 595 | |
---|
[431cd4f] | 596 | static void fix_times( struct cluster * cltr ) with( cltr->ready_queue ) { |
---|
| 597 | #if defined(USE_WORK_STEALING) |
---|
| 598 | lanes.tscs = alloc(lanes.count, lanes.tscs`realloc); |
---|
| 599 | for(i; lanes.count) { |
---|
| 600 | lanes.tscs[i].tv = ts(lanes.data[i]); |
---|
| 601 | } |
---|
| 602 | #endif |
---|
| 603 | } |
---|
| 604 | |
---|
[dca5802] | 605 | // Grow the ready queue |
---|
[a017ee7] | 606 | void ready_queue_grow(struct cluster * cltr) { |
---|
[bd0bdd37] | 607 | size_t ncount; |
---|
[a017ee7] | 608 | int target = cltr->procs.total; |
---|
[bd0bdd37] | 609 | |
---|
[64a7146] | 610 | /* paranoid */ verify( ready_mutate_islocked() ); |
---|
[504a7dc] | 611 | __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue\n"); |
---|
[b798713] | 612 | |
---|
[dca5802] | 613 | // Make sure that everything is consistent |
---|
| 614 | /* paranoid */ check( cltr->ready_queue ); |
---|
| 615 | |
---|
| 616 | // grow the ready queue |
---|
[b798713] | 617 | with( cltr->ready_queue ) { |
---|
[39fc03e] | 618 | // Find new count |
---|
| 619 | // Make sure we always have atleast 1 list |
---|
[bd0bdd37] | 620 | if(target >= 2) { |
---|
[9cc3a18] | 621 | ncount = target * READYQ_SHARD_FACTOR; |
---|
[bd0bdd37] | 622 | } else { |
---|
[5f6a172] | 623 | ncount = SEQUENTIAL_SHARD; |
---|
[bd0bdd37] | 624 | } |
---|
[b798713] | 625 | |
---|
[dca5802] | 626 | // Allocate new array (uses realloc and memcpies the data) |
---|
[ceb7db8] | 627 | lanes.data = alloc( ncount, lanes.data`realloc ); |
---|
[b798713] | 628 | |
---|
| 629 | // Fix the moved data |
---|
[dca5802] | 630 | for( idx; (size_t)lanes.count ) { |
---|
| 631 | fix(lanes.data[idx]); |
---|
[b798713] | 632 | } |
---|
| 633 | |
---|
| 634 | // Construct new data |
---|
[dca5802] | 635 | for( idx; (size_t)lanes.count ~ ncount) { |
---|
| 636 | (lanes.data[idx]){}; |
---|
[b798713] | 637 | } |
---|
| 638 | |
---|
| 639 | // Update original |
---|
[dca5802] | 640 | lanes.count = ncount; |
---|
[b798713] | 641 | } |
---|
| 642 | |
---|
[9cc3a18] | 643 | fix_times(cltr); |
---|
| 644 | |
---|
| 645 | reassign_cltr_id(cltr); |
---|
[a017ee7] | 646 | |
---|
[b798713] | 647 | // Make sure that everything is consistent |
---|
[dca5802] | 648 | /* paranoid */ check( cltr->ready_queue ); |
---|
| 649 | |
---|
[504a7dc] | 650 | __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue done\n"); |
---|
[dca5802] | 651 | |
---|
[64a7146] | 652 | /* paranoid */ verify( ready_mutate_islocked() ); |
---|
[b798713] | 653 | } |
---|
| 654 | |
---|
[dca5802] | 655 | // Shrink the ready queue |
---|
[a017ee7] | 656 | void ready_queue_shrink(struct cluster * cltr) { |
---|
[64a7146] | 657 | /* paranoid */ verify( ready_mutate_islocked() ); |
---|
[504a7dc] | 658 | __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue\n"); |
---|
[dca5802] | 659 | |
---|
| 660 | // Make sure that everything is consistent |
---|
| 661 | /* paranoid */ check( cltr->ready_queue ); |
---|
| 662 | |
---|
[a017ee7] | 663 | int target = cltr->procs.total; |
---|
| 664 | |
---|
[b798713] | 665 | with( cltr->ready_queue ) { |
---|
[39fc03e] | 666 | // Remember old count |
---|
[dca5802] | 667 | size_t ocount = lanes.count; |
---|
[b798713] | 668 | |
---|
[39fc03e] | 669 | // Find new count |
---|
| 670 | // Make sure we always have atleast 1 list |
---|
[5f6a172] | 671 | lanes.count = target >= 2 ? target * READYQ_SHARD_FACTOR: SEQUENTIAL_SHARD; |
---|
[39fc03e] | 672 | /* paranoid */ verify( ocount >= lanes.count ); |
---|
[9cc3a18] | 673 | /* paranoid */ verify( lanes.count == target * READYQ_SHARD_FACTOR || target < 2 ); |
---|
[dca5802] | 674 | |
---|
| 675 | // for printing count the number of displaced threads |
---|
[504a7dc] | 676 | #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__) |
---|
[dca5802] | 677 | __attribute__((unused)) size_t displaced = 0; |
---|
| 678 | #endif |
---|
[b798713] | 679 | |
---|
| 680 | // redistribute old data |
---|
[dca5802] | 681 | for( idx; (size_t)lanes.count ~ ocount) { |
---|
| 682 | // Lock is not strictly needed but makes checking invariants much easier |
---|
[1b143de] | 683 | __attribute__((unused)) bool locked = __atomic_try_acquire(&lanes.data[idx].lock); |
---|
[b798713] | 684 | verify(locked); |
---|
[dca5802] | 685 | |
---|
| 686 | // As long as we can pop from this lane to push the threads somewhere else in the queue |
---|
| 687 | while(!is_empty(lanes.data[idx])) { |
---|
[504a7dc] | 688 | struct $thread * thrd; |
---|
[343d10e] | 689 | thrd = pop(lanes.data[idx]); |
---|
[dca5802] | 690 | |
---|
[b798713] | 691 | push(cltr, thrd); |
---|
[dca5802] | 692 | |
---|
| 693 | // for printing count the number of displaced threads |
---|
[504a7dc] | 694 | #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__) |
---|
[dca5802] | 695 | displaced++; |
---|
| 696 | #endif |
---|
[b798713] | 697 | } |
---|
| 698 | |
---|
[dca5802] | 699 | // Unlock the lane |
---|
| 700 | __atomic_unlock(&lanes.data[idx].lock); |
---|
[b798713] | 701 | |
---|
| 702 | // TODO print the queue statistics here |
---|
| 703 | |
---|
[dca5802] | 704 | ^(lanes.data[idx]){}; |
---|
[b798713] | 705 | } |
---|
| 706 | |
---|
[504a7dc] | 707 | __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue displaced %zu threads\n", displaced); |
---|
[c84b4be] | 708 | |
---|
[dca5802] | 709 | // Allocate new array (uses realloc and memcpies the data) |
---|
[ceb7db8] | 710 | lanes.data = alloc( lanes.count, lanes.data`realloc ); |
---|
[b798713] | 711 | |
---|
| 712 | // Fix the moved data |
---|
[dca5802] | 713 | for( idx; (size_t)lanes.count ) { |
---|
| 714 | fix(lanes.data[idx]); |
---|
[b798713] | 715 | } |
---|
| 716 | } |
---|
| 717 | |
---|
[9cc3a18] | 718 | fix_times(cltr); |
---|
| 719 | |
---|
| 720 | reassign_cltr_id(cltr); |
---|
[a017ee7] | 721 | |
---|
[b798713] | 722 | // Make sure that everything is consistent |
---|
[dca5802] | 723 | /* paranoid */ check( cltr->ready_queue ); |
---|
| 724 | |
---|
[504a7dc] | 725 | __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue done\n"); |
---|
[64a7146] | 726 | /* paranoid */ verify( ready_mutate_islocked() ); |
---|
[fd9b524] | 727 | } |
---|