[7768b8d] | 1 | //
|
---|
| 2 | // Cforall Version 1.0.0 Copyright (C) 2019 University of Waterloo
|
---|
| 3 | //
|
---|
| 4 | // The contents of this file are covered under the licence agreement in the
|
---|
| 5 | // file "LICENCE" distributed with Cforall.
|
---|
| 6 | //
|
---|
| 7 | // ready_queue.cfa --
|
---|
| 8 | //
|
---|
| 9 | // Author : Thierry Delisle
|
---|
| 10 | // Created On : Mon Nov dd 16:29:18 2019
|
---|
| 11 | // Last Modified By :
|
---|
| 12 | // Last Modified On :
|
---|
| 13 | // Update Count :
|
---|
| 14 | //
|
---|
| 15 |
|
---|
| 16 | #define __cforall_thread__
|
---|
[43784ac] | 17 | #define _GNU_SOURCE
|
---|
| 18 |
|
---|
[1b143de] | 19 | // #define __CFA_DEBUG_PRINT_READY_QUEUE__
|
---|
[7768b8d] | 20 |
|
---|
[1eb239e4] | 21 |
|
---|
[6ba6846] | 22 | #define USE_RELAXED_FIFO
|
---|
[9cc3a18] | 23 | // #define USE_WORK_STEALING
|
---|
[6ba6846] | 24 | // #define USE_CPU_WORK_STEALING
|
---|
[9cc3a18] | 25 |
|
---|
[7768b8d] | 26 | #include "bits/defs.hfa"
|
---|
[12daa43] | 27 | #include "device/cpu.hfa"
|
---|
[7768b8d] | 28 | #include "kernel_private.hfa"
|
---|
| 29 |
|
---|
| 30 | #include "stdlib.hfa"
|
---|
[61d7bec] | 31 | #include "math.hfa"
|
---|
[7768b8d] | 32 |
|
---|
[0ee224b] | 33 | #include <errno.h>
|
---|
[04b5cef] | 34 | #include <unistd.h>
|
---|
| 35 |
|
---|
[0ee224b] | 36 | extern "C" {
|
---|
| 37 | #include <sys/syscall.h> // __NR_xxx
|
---|
| 38 | }
|
---|
| 39 |
|
---|
[13c5e19] | 40 | #include "ready_subqueue.hfa"
|
---|
| 41 |
|
---|
[7768b8d] | 42 | static const size_t cache_line_size = 64;
|
---|
| 43 |
|
---|
[d2fadeb] | 44 | #if !defined(__CFA_NO_STATISTICS__)
|
---|
| 45 | #define __STATS(...) __VA_ARGS__
|
---|
| 46 | #else
|
---|
| 47 | #define __STATS(...)
|
---|
| 48 | #endif
|
---|
| 49 |
|
---|
[dca5802] | 50 | // No overriden function, no environment variable, no define
|
---|
| 51 | // fall back to a magic number
|
---|
| 52 | #ifndef __CFA_MAX_PROCESSORS__
|
---|
[b388ee81] | 53 | #define __CFA_MAX_PROCESSORS__ 1024
|
---|
[dca5802] | 54 | #endif
|
---|
[7768b8d] | 55 |
|
---|
[12daa43] | 56 | #if defined(USE_CPU_WORK_STEALING)
|
---|
| 57 | #define READYQ_SHARD_FACTOR 2
|
---|
| 58 | #elif defined(USE_RELAXED_FIFO)
|
---|
[9cc3a18] | 59 | #define BIAS 4
|
---|
| 60 | #define READYQ_SHARD_FACTOR 4
|
---|
[5f6a172] | 61 | #define SEQUENTIAL_SHARD 1
|
---|
[9cc3a18] | 62 | #elif defined(USE_WORK_STEALING)
|
---|
| 63 | #define READYQ_SHARD_FACTOR 2
|
---|
[5f6a172] | 64 | #define SEQUENTIAL_SHARD 2
|
---|
[9cc3a18] | 65 | #else
|
---|
| 66 | #error no scheduling strategy selected
|
---|
| 67 | #endif
|
---|
| 68 |
|
---|
[e84ab3d] | 69 | static inline struct thread$ * try_pop(struct cluster * cltr, unsigned w __STATS(, __stats_readyQ_pop_t & stats));
|
---|
| 70 | static inline struct thread$ * try_pop(struct cluster * cltr, unsigned i, unsigned j __STATS(, __stats_readyQ_pop_t & stats));
|
---|
| 71 | static inline struct thread$ * search(struct cluster * cltr);
|
---|
[d2fadeb] | 72 | static inline [unsigned, bool] idx_from_r(unsigned r, unsigned preferred);
|
---|
[9cc3a18] | 73 |
|
---|
[04b5cef] | 74 |
|
---|
[dca5802] | 75 | // returns the maximum number of processors the RWLock support
|
---|
[7768b8d] | 76 | __attribute__((weak)) unsigned __max_processors() {
|
---|
| 77 | const char * max_cores_s = getenv("CFA_MAX_PROCESSORS");
|
---|
| 78 | if(!max_cores_s) {
|
---|
[504a7dc] | 79 | __cfadbg_print_nolock(ready_queue, "No CFA_MAX_PROCESSORS in ENV\n");
|
---|
[dca5802] | 80 | return __CFA_MAX_PROCESSORS__;
|
---|
[7768b8d] | 81 | }
|
---|
| 82 |
|
---|
| 83 | char * endptr = 0p;
|
---|
| 84 | long int max_cores_l = strtol(max_cores_s, &endptr, 10);
|
---|
| 85 | if(max_cores_l < 1 || max_cores_l > 65535) {
|
---|
[504a7dc] | 86 | __cfadbg_print_nolock(ready_queue, "CFA_MAX_PROCESSORS out of range : %ld\n", max_cores_l);
|
---|
[dca5802] | 87 | return __CFA_MAX_PROCESSORS__;
|
---|
[7768b8d] | 88 | }
|
---|
| 89 | if('\0' != *endptr) {
|
---|
[504a7dc] | 90 | __cfadbg_print_nolock(ready_queue, "CFA_MAX_PROCESSORS not a decimal number : %s\n", max_cores_s);
|
---|
[dca5802] | 91 | return __CFA_MAX_PROCESSORS__;
|
---|
[7768b8d] | 92 | }
|
---|
| 93 |
|
---|
| 94 | return max_cores_l;
|
---|
| 95 | }
|
---|
| 96 |
|
---|
[0ee224b] | 97 | #if defined(CFA_HAVE_LINUX_LIBRSEQ)
|
---|
| 98 | // No forward declaration needed
|
---|
| 99 | #define __kernel_rseq_register rseq_register_current_thread
|
---|
| 100 | #define __kernel_rseq_unregister rseq_unregister_current_thread
|
---|
| 101 | #elif defined(CFA_HAVE_LINUX_RSEQ_H)
|
---|
| 102 | void __kernel_raw_rseq_register (void);
|
---|
| 103 | void __kernel_raw_rseq_unregister(void);
|
---|
| 104 |
|
---|
| 105 | #define __kernel_rseq_register __kernel_raw_rseq_register
|
---|
| 106 | #define __kernel_rseq_unregister __kernel_raw_rseq_unregister
|
---|
| 107 | #else
|
---|
| 108 | // No forward declaration needed
|
---|
| 109 | // No initialization needed
|
---|
| 110 | static inline void noop(void) {}
|
---|
| 111 |
|
---|
| 112 | #define __kernel_rseq_register noop
|
---|
| 113 | #define __kernel_rseq_unregister noop
|
---|
| 114 | #endif
|
---|
| 115 |
|
---|
[7768b8d] | 116 | //=======================================================================
|
---|
| 117 | // Cluster wide reader-writer lock
|
---|
| 118 | //=======================================================================
|
---|
[b388ee81] | 119 | void ?{}(__scheduler_RWLock_t & this) {
|
---|
[7768b8d] | 120 | this.max = __max_processors();
|
---|
| 121 | this.alloc = 0;
|
---|
| 122 | this.ready = 0;
|
---|
| 123 | this.data = alloc(this.max);
|
---|
[c993b15] | 124 | this.write_lock = false;
|
---|
[7768b8d] | 125 |
|
---|
| 126 | /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.alloc), &this.alloc));
|
---|
| 127 | /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.ready), &this.ready));
|
---|
| 128 |
|
---|
| 129 | }
|
---|
[b388ee81] | 130 | void ^?{}(__scheduler_RWLock_t & this) {
|
---|
[7768b8d] | 131 | free(this.data);
|
---|
| 132 | }
|
---|
| 133 |
|
---|
| 134 |
|
---|
| 135 | //=======================================================================
|
---|
| 136 | // Lock-Free registering/unregistering of threads
|
---|
[c993b15] | 137 | unsigned register_proc_id( void ) with(*__scheduler_lock) {
|
---|
[0ee224b] | 138 | __kernel_rseq_register();
|
---|
| 139 |
|
---|
[b388ee81] | 140 | __cfadbg_print_safe(ready_queue, "Kernel : Registering proc %p for RW-Lock\n", proc);
|
---|
[c993b15] | 141 | bool * handle = (bool *)&kernelTLS().sched_lock;
|
---|
[504a7dc] | 142 |
|
---|
[7768b8d] | 143 | // Step - 1 : check if there is already space in the data
|
---|
| 144 | uint_fast32_t s = ready;
|
---|
| 145 |
|
---|
| 146 | // Check among all the ready
|
---|
| 147 | for(uint_fast32_t i = 0; i < s; i++) {
|
---|
[c993b15] | 148 | bool * volatile * cell = (bool * volatile *)&data[i]; // Cforall is bugged and the double volatiles causes problems
|
---|
| 149 | /* paranoid */ verify( handle != *cell );
|
---|
| 150 |
|
---|
| 151 | bool * null = 0p; // Re-write every loop since compare thrashes it
|
---|
| 152 | if( __atomic_load_n(cell, (int)__ATOMIC_RELAXED) == null
|
---|
| 153 | && __atomic_compare_exchange_n( cell, &null, handle, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
|
---|
| 154 | /* paranoid */ verify(i < ready);
|
---|
| 155 | /* paranoid */ verify( (kernelTLS().sched_id = i, true) );
|
---|
| 156 | return i;
|
---|
[7768b8d] | 157 | }
|
---|
| 158 | }
|
---|
| 159 |
|
---|
[b388ee81] | 160 | if(max <= alloc) abort("Trying to create more than %ud processors", __scheduler_lock->max);
|
---|
[7768b8d] | 161 |
|
---|
| 162 | // Step - 2 : F&A to get a new spot in the array.
|
---|
| 163 | uint_fast32_t n = __atomic_fetch_add(&alloc, 1, __ATOMIC_SEQ_CST);
|
---|
[b388ee81] | 164 | if(max <= n) abort("Trying to create more than %ud processors", __scheduler_lock->max);
|
---|
[7768b8d] | 165 |
|
---|
| 166 | // Step - 3 : Mark space as used and then publish it.
|
---|
[c993b15] | 167 | data[n] = handle;
|
---|
[fd9b524] | 168 | while() {
|
---|
[7768b8d] | 169 | unsigned copy = n;
|
---|
| 170 | if( __atomic_load_n(&ready, __ATOMIC_RELAXED) == n
|
---|
| 171 | && __atomic_compare_exchange_n(&ready, ©, n + 1, true, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
|
---|
| 172 | break;
|
---|
[fd9b524] | 173 | Pause();
|
---|
[7768b8d] | 174 | }
|
---|
| 175 |
|
---|
[1b143de] | 176 | __cfadbg_print_safe(ready_queue, "Kernel : Registering proc %p done, id %lu\n", proc, n);
|
---|
[504a7dc] | 177 |
|
---|
[7768b8d] | 178 | // Return new spot.
|
---|
[c993b15] | 179 | /* paranoid */ verify(n < ready);
|
---|
| 180 | /* paranoid */ verify( (kernelTLS().sched_id = n, true) );
|
---|
| 181 | return n;
|
---|
[7768b8d] | 182 | }
|
---|
| 183 |
|
---|
[c993b15] | 184 | void unregister_proc_id( unsigned id ) with(*__scheduler_lock) {
|
---|
| 185 | /* paranoid */ verify(id < ready);
|
---|
| 186 | /* paranoid */ verify(id == kernelTLS().sched_id);
|
---|
| 187 | /* paranoid */ verify(data[id] == &kernelTLS().sched_lock);
|
---|
| 188 |
|
---|
| 189 | bool * volatile * cell = (bool * volatile *)&data[id]; // Cforall is bugged and the double volatiles causes problems
|
---|
| 190 |
|
---|
| 191 | __atomic_store_n(cell, 0p, __ATOMIC_RELEASE);
|
---|
[504a7dc] | 192 |
|
---|
| 193 | __cfadbg_print_safe(ready_queue, "Kernel : Unregister proc %p\n", proc);
|
---|
[0ee224b] | 194 |
|
---|
| 195 | __kernel_rseq_unregister();
|
---|
[7768b8d] | 196 | }
|
---|
| 197 |
|
---|
| 198 | //-----------------------------------------------------------------------
|
---|
| 199 | // Writer side : acquire when changing the ready queue, e.g. adding more
|
---|
| 200 | // queues or removing them.
|
---|
[b388ee81] | 201 | uint_fast32_t ready_mutate_lock( void ) with(*__scheduler_lock) {
|
---|
[8fc652e0] | 202 | /* paranoid */ verify( ! __preemption_enabled() );
|
---|
[c993b15] | 203 | /* paranoid */ verify( ! kernelTLS().sched_lock );
|
---|
[62502cc4] | 204 |
|
---|
[7768b8d] | 205 | // Step 1 : lock global lock
|
---|
| 206 | // It is needed to avoid processors that register mid Critical-Section
|
---|
| 207 | // to simply lock their own lock and enter.
|
---|
[c993b15] | 208 | __atomic_acquire( &write_lock );
|
---|
[7768b8d] | 209 |
|
---|
| 210 | // Step 2 : lock per-proc lock
|
---|
| 211 | // Processors that are currently being registered aren't counted
|
---|
| 212 | // but can't be in read_lock or in the critical section.
|
---|
| 213 | // All other processors are counted
|
---|
| 214 | uint_fast32_t s = ready;
|
---|
| 215 | for(uint_fast32_t i = 0; i < s; i++) {
|
---|
[c993b15] | 216 | volatile bool * llock = data[i];
|
---|
| 217 | if(llock) __atomic_acquire( llock );
|
---|
[7768b8d] | 218 | }
|
---|
| 219 |
|
---|
[8fc652e0] | 220 | /* paranoid */ verify( ! __preemption_enabled() );
|
---|
[7768b8d] | 221 | return s;
|
---|
| 222 | }
|
---|
| 223 |
|
---|
[b388ee81] | 224 | void ready_mutate_unlock( uint_fast32_t last_s ) with(*__scheduler_lock) {
|
---|
[8fc652e0] | 225 | /* paranoid */ verify( ! __preemption_enabled() );
|
---|
[62502cc4] | 226 |
|
---|
[7768b8d] | 227 | // Step 1 : release local locks
|
---|
| 228 | // This must be done while the global lock is held to avoid
|
---|
| 229 | // threads that where created mid critical section
|
---|
| 230 | // to race to lock their local locks and have the writer
|
---|
| 231 | // immidiately unlock them
|
---|
| 232 | // Alternative solution : return s in write_lock and pass it to write_unlock
|
---|
| 233 | for(uint_fast32_t i = 0; i < last_s; i++) {
|
---|
[c993b15] | 234 | volatile bool * llock = data[i];
|
---|
| 235 | if(llock) __atomic_store_n(llock, (bool)false, __ATOMIC_RELEASE);
|
---|
[7768b8d] | 236 | }
|
---|
| 237 |
|
---|
| 238 | // Step 2 : release global lock
|
---|
[c993b15] | 239 | /*paranoid*/ assert(true == write_lock);
|
---|
| 240 | __atomic_store_n(&write_lock, (bool)false, __ATOMIC_RELEASE);
|
---|
[62502cc4] | 241 |
|
---|
[8fc652e0] | 242 | /* paranoid */ verify( ! __preemption_enabled() );
|
---|
[7768b8d] | 243 | }
|
---|
| 244 |
|
---|
| 245 | //=======================================================================
|
---|
[9cc3a18] | 246 | // Cforall Ready Queue used for scheduling
|
---|
[b798713] | 247 | //=======================================================================
|
---|
| 248 | void ?{}(__ready_queue_t & this) with (this) {
|
---|
[12daa43] | 249 | #if defined(USE_CPU_WORK_STEALING)
|
---|
| 250 | lanes.count = cpu_info.hthrd_count * READYQ_SHARD_FACTOR;
|
---|
| 251 | lanes.data = alloc( lanes.count );
|
---|
| 252 | lanes.tscs = alloc( lanes.count );
|
---|
| 253 |
|
---|
| 254 | for( idx; (size_t)lanes.count ) {
|
---|
| 255 | (lanes.data[idx]){};
|
---|
| 256 | lanes.tscs[idx].tv = rdtscl();
|
---|
| 257 | }
|
---|
| 258 | #else
|
---|
| 259 | lanes.data = 0p;
|
---|
| 260 | lanes.tscs = 0p;
|
---|
| 261 | lanes.count = 0;
|
---|
| 262 | #endif
|
---|
[b798713] | 263 | }
|
---|
| 264 |
|
---|
| 265 | void ^?{}(__ready_queue_t & this) with (this) {
|
---|
[12daa43] | 266 | #if !defined(USE_CPU_WORK_STEALING)
|
---|
| 267 | verify( SEQUENTIAL_SHARD == lanes.count );
|
---|
| 268 | #endif
|
---|
| 269 |
|
---|
[dca5802] | 270 | free(lanes.data);
|
---|
[9cc3a18] | 271 | free(lanes.tscs);
|
---|
[dca5802] | 272 | }
|
---|
| 273 |
|
---|
[64a7146] | 274 | //-----------------------------------------------------------------------
|
---|
[12daa43] | 275 | #if defined(USE_CPU_WORK_STEALING)
|
---|
[e84ab3d] | 276 | __attribute__((hot)) void push(struct cluster * cltr, struct thread$ * thrd, bool push_local) with (cltr->ready_queue) {
|
---|
[12daa43] | 277 | __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr);
|
---|
| 278 |
|
---|
| 279 | processor * const proc = kernelTLS().this_processor;
|
---|
| 280 | const bool external = !push_local || (!proc) || (cltr != proc->cltr);
|
---|
| 281 |
|
---|
| 282 | const int cpu = __kernel_getcpu();
|
---|
| 283 | /* paranoid */ verify(cpu >= 0);
|
---|
| 284 | /* paranoid */ verify(cpu < cpu_info.hthrd_count);
|
---|
| 285 | /* paranoid */ verify(cpu * READYQ_SHARD_FACTOR < lanes.count);
|
---|
| 286 |
|
---|
[df7597e0] | 287 | const cpu_map_entry_t & map = cpu_info.llc_map[cpu];
|
---|
| 288 | /* paranoid */ verify(map.start * READYQ_SHARD_FACTOR < lanes.count);
|
---|
| 289 | /* paranoid */ verify(map.self * READYQ_SHARD_FACTOR < lanes.count);
|
---|
[5614552a] | 290 | /* paranoid */ verifyf((map.start + map.count) * READYQ_SHARD_FACTOR <= lanes.count, "have %zu lanes but map can go up to %u", lanes.count, (map.start + map.count) * READYQ_SHARD_FACTOR);
|
---|
[df7597e0] | 291 |
|
---|
| 292 | const int start = map.self * READYQ_SHARD_FACTOR;
|
---|
[12daa43] | 293 | unsigned i;
|
---|
| 294 | do {
|
---|
| 295 | unsigned r;
|
---|
| 296 | if(unlikely(external)) { r = __tls_rand(); }
|
---|
| 297 | else { r = proc->rdq.its++; }
|
---|
| 298 | i = start + (r % READYQ_SHARD_FACTOR);
|
---|
| 299 | // If we can't lock it retry
|
---|
| 300 | } while( !__atomic_try_acquire( &lanes.data[i].lock ) );
|
---|
| 301 |
|
---|
| 302 | // Actually push it
|
---|
| 303 | push(lanes.data[i], thrd);
|
---|
| 304 |
|
---|
| 305 | // Unlock and return
|
---|
| 306 | __atomic_unlock( &lanes.data[i].lock );
|
---|
| 307 |
|
---|
| 308 | #if !defined(__CFA_NO_STATISTICS__)
|
---|
| 309 | if(unlikely(external)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.success, 1, __ATOMIC_RELAXED);
|
---|
| 310 | else __tls_stats()->ready.push.local.success++;
|
---|
| 311 | #endif
|
---|
| 312 |
|
---|
| 313 | __cfadbg_print_safe(ready_queue, "Kernel : Pushed %p on cluster %p (idx: %u, mask %llu, first %d)\n", thrd, cltr, i, used.mask[0], lane_first);
|
---|
| 314 |
|
---|
| 315 | }
|
---|
| 316 |
|
---|
| 317 | // Pop from the ready queue from a given cluster
|
---|
[e84ab3d] | 318 | __attribute__((hot)) thread$ * pop_fast(struct cluster * cltr) with (cltr->ready_queue) {
|
---|
[12daa43] | 319 | /* paranoid */ verify( lanes.count > 0 );
|
---|
| 320 | /* paranoid */ verify( kernelTLS().this_processor );
|
---|
| 321 |
|
---|
| 322 | const int cpu = __kernel_getcpu();
|
---|
| 323 | /* paranoid */ verify(cpu >= 0);
|
---|
| 324 | /* paranoid */ verify(cpu < cpu_info.hthrd_count);
|
---|
[df7597e0] | 325 | /* paranoid */ verify(cpu * READYQ_SHARD_FACTOR < lanes.count);
|
---|
| 326 |
|
---|
| 327 | const cpu_map_entry_t & map = cpu_info.llc_map[cpu];
|
---|
| 328 | /* paranoid */ verify(map.start * READYQ_SHARD_FACTOR < lanes.count);
|
---|
| 329 | /* paranoid */ verify(map.self * READYQ_SHARD_FACTOR < lanes.count);
|
---|
[5614552a] | 330 | /* paranoid */ verifyf((map.start + map.count) * READYQ_SHARD_FACTOR <= lanes.count, "have %zu lanes but map can go up to %u", lanes.count, (map.start + map.count) * READYQ_SHARD_FACTOR);
|
---|
[12daa43] | 331 |
|
---|
| 332 | processor * const proc = kernelTLS().this_processor;
|
---|
[df7597e0] | 333 | const int start = map.self * READYQ_SHARD_FACTOR;
|
---|
[12daa43] | 334 |
|
---|
| 335 | // Did we already have a help target
|
---|
| 336 | if(proc->rdq.target == -1u) {
|
---|
| 337 | // if We don't have a
|
---|
| 338 | unsigned long long min = ts(lanes.data[start]);
|
---|
| 339 | for(i; READYQ_SHARD_FACTOR) {
|
---|
| 340 | unsigned long long tsc = ts(lanes.data[start + i]);
|
---|
| 341 | if(tsc < min) min = tsc;
|
---|
| 342 | }
|
---|
| 343 | proc->rdq.cutoff = min;
|
---|
[953827a] | 344 |
|
---|
| 345 | /* paranoid */ verify(lanes.count < 65536); // The following code assumes max 65536 cores.
|
---|
| 346 | /* paranoid */ verify(map.count < 65536); // The following code assumes max 65536 cores.
|
---|
| 347 | uint64_t chaos = __tls_rand();
|
---|
| 348 | uint64_t high_chaos = (chaos >> 32);
|
---|
| 349 | uint64_t mid_chaos = (chaos >> 16) & 0xffff;
|
---|
| 350 | uint64_t low_chaos = chaos & 0xffff;
|
---|
| 351 |
|
---|
| 352 | unsigned me = map.self;
|
---|
| 353 | unsigned cpu_chaos = map.start + (mid_chaos % map.count);
|
---|
| 354 | bool global = cpu_chaos == me;
|
---|
| 355 |
|
---|
| 356 | if(global) {
|
---|
| 357 | proc->rdq.target = high_chaos % lanes.count;
|
---|
| 358 | } else {
|
---|
| 359 | proc->rdq.target = (cpu_chaos * READYQ_SHARD_FACTOR) + (low_chaos % READYQ_SHARD_FACTOR);
|
---|
| 360 | /* paranoid */ verify(proc->rdq.target >= (map.start * READYQ_SHARD_FACTOR));
|
---|
| 361 | /* paranoid */ verify(proc->rdq.target < ((map.start + map.count) * READYQ_SHARD_FACTOR));
|
---|
| 362 | }
|
---|
| 363 |
|
---|
| 364 | /* paranoid */ verify(proc->rdq.target != -1u);
|
---|
[12daa43] | 365 | }
|
---|
| 366 | else {
|
---|
| 367 | const unsigned long long bias = 0; //2_500_000_000;
|
---|
| 368 | const unsigned long long cutoff = proc->rdq.cutoff > bias ? proc->rdq.cutoff - bias : proc->rdq.cutoff;
|
---|
| 369 | {
|
---|
| 370 | unsigned target = proc->rdq.target;
|
---|
| 371 | proc->rdq.target = -1u;
|
---|
| 372 | if(lanes.tscs[target].tv < cutoff && ts(lanes.data[target]) < cutoff) {
|
---|
[e84ab3d] | 373 | thread$ * t = try_pop(cltr, target __STATS(, __tls_stats()->ready.pop.help));
|
---|
[12daa43] | 374 | proc->rdq.last = target;
|
---|
| 375 | if(t) return t;
|
---|
| 376 | }
|
---|
| 377 | }
|
---|
| 378 |
|
---|
| 379 | unsigned last = proc->rdq.last;
|
---|
| 380 | if(last != -1u && lanes.tscs[last].tv < cutoff && ts(lanes.data[last]) < cutoff) {
|
---|
[e84ab3d] | 381 | thread$ * t = try_pop(cltr, last __STATS(, __tls_stats()->ready.pop.help));
|
---|
[12daa43] | 382 | if(t) return t;
|
---|
| 383 | }
|
---|
| 384 | else {
|
---|
| 385 | proc->rdq.last = -1u;
|
---|
| 386 | }
|
---|
| 387 | }
|
---|
| 388 |
|
---|
| 389 | for(READYQ_SHARD_FACTOR) {
|
---|
| 390 | unsigned i = start + (proc->rdq.itr++ % READYQ_SHARD_FACTOR);
|
---|
[e84ab3d] | 391 | if(thread$ * t = try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.local))) return t;
|
---|
[12daa43] | 392 | }
|
---|
| 393 |
|
---|
| 394 | // All lanes where empty return 0p
|
---|
| 395 | return 0p;
|
---|
| 396 | }
|
---|
| 397 |
|
---|
[e84ab3d] | 398 | __attribute__((hot)) struct thread$ * pop_slow(struct cluster * cltr) with (cltr->ready_queue) {
|
---|
[12daa43] | 399 | processor * const proc = kernelTLS().this_processor;
|
---|
| 400 | unsigned last = proc->rdq.last;
|
---|
[953827a] | 401 | if(last != -1u) {
|
---|
[e84ab3d] | 402 | struct thread$ * t = try_pop(cltr, last __STATS(, __tls_stats()->ready.pop.steal));
|
---|
[953827a] | 403 | if(t) return t;
|
---|
| 404 | proc->rdq.last = -1u;
|
---|
| 405 | }
|
---|
[12daa43] | 406 |
|
---|
| 407 | unsigned i = __tls_rand() % lanes.count;
|
---|
| 408 | return try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.steal));
|
---|
| 409 | }
|
---|
[e84ab3d] | 410 | __attribute__((hot)) struct thread$ * pop_search(struct cluster * cltr) {
|
---|
[12daa43] | 411 | return search(cltr);
|
---|
| 412 | }
|
---|
| 413 | #endif
|
---|
[431cd4f] | 414 | #if defined(USE_RELAXED_FIFO)
|
---|
| 415 | //-----------------------------------------------------------------------
|
---|
| 416 | // get index from random number with or without bias towards queues
|
---|
| 417 | static inline [unsigned, bool] idx_from_r(unsigned r, unsigned preferred) {
|
---|
| 418 | unsigned i;
|
---|
| 419 | bool local;
|
---|
| 420 | unsigned rlow = r % BIAS;
|
---|
| 421 | unsigned rhigh = r / BIAS;
|
---|
| 422 | if((0 != rlow) && preferred >= 0) {
|
---|
| 423 | // (BIAS - 1) out of BIAS chances
|
---|
| 424 | // Use perferred queues
|
---|
| 425 | i = preferred + (rhigh % READYQ_SHARD_FACTOR);
|
---|
| 426 | local = true;
|
---|
| 427 | }
|
---|
| 428 | else {
|
---|
| 429 | // 1 out of BIAS chances
|
---|
| 430 | // Use all queues
|
---|
| 431 | i = rhigh;
|
---|
| 432 | local = false;
|
---|
| 433 | }
|
---|
| 434 | return [i, local];
|
---|
| 435 | }
|
---|
| 436 |
|
---|
[e84ab3d] | 437 | __attribute__((hot)) void push(struct cluster * cltr, struct thread$ * thrd, bool push_local) with (cltr->ready_queue) {
|
---|
[431cd4f] | 438 | __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr);
|
---|
[1b143de] | 439 |
|
---|
[b808625] | 440 | const bool external = !push_local || (!kernelTLS().this_processor) || (cltr != kernelTLS().this_processor->cltr);
|
---|
[431cd4f] | 441 | /* paranoid */ verify(external || kernelTLS().this_processor->rdq.id < lanes.count );
|
---|
[fd1f65e] | 442 |
|
---|
[431cd4f] | 443 | bool local;
|
---|
| 444 | int preferred = external ? -1 : kernelTLS().this_processor->rdq.id;
|
---|
[52769ba] | 445 |
|
---|
[431cd4f] | 446 | // Try to pick a lane and lock it
|
---|
| 447 | unsigned i;
|
---|
| 448 | do {
|
---|
| 449 | // Pick the index of a lane
|
---|
| 450 | unsigned r = __tls_rand_fwd();
|
---|
| 451 | [i, local] = idx_from_r(r, preferred);
|
---|
[772411a] | 452 |
|
---|
[431cd4f] | 453 | i %= __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
|
---|
| 454 |
|
---|
| 455 | #if !defined(__CFA_NO_STATISTICS__)
|
---|
[d2fadeb] | 456 | if(unlikely(external)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.attempt, 1, __ATOMIC_RELAXED);
|
---|
| 457 | else if(local) __tls_stats()->ready.push.local.attempt++;
|
---|
| 458 | else __tls_stats()->ready.push.share.attempt++;
|
---|
[431cd4f] | 459 | #endif
|
---|
[b798713] | 460 |
|
---|
[431cd4f] | 461 | // If we can't lock it retry
|
---|
| 462 | } while( !__atomic_try_acquire( &lanes.data[i].lock ) );
|
---|
| 463 |
|
---|
| 464 | // Actually push it
|
---|
| 465 | push(lanes.data[i], thrd);
|
---|
| 466 |
|
---|
[b808625] | 467 | // Unlock and return
|
---|
| 468 | __atomic_unlock( &lanes.data[i].lock );
|
---|
[431cd4f] | 469 |
|
---|
| 470 | // Mark the current index in the tls rng instance as having an item
|
---|
| 471 | __tls_rand_advance_bck();
|
---|
| 472 |
|
---|
| 473 | __cfadbg_print_safe(ready_queue, "Kernel : Pushed %p on cluster %p (idx: %u, mask %llu, first %d)\n", thrd, cltr, i, used.mask[0], lane_first);
|
---|
| 474 |
|
---|
| 475 | // Update statistics
|
---|
[b798713] | 476 | #if !defined(__CFA_NO_STATISTICS__)
|
---|
[d2fadeb] | 477 | if(unlikely(external)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.success, 1, __ATOMIC_RELAXED);
|
---|
| 478 | else if(local) __tls_stats()->ready.push.local.success++;
|
---|
| 479 | else __tls_stats()->ready.push.share.success++;
|
---|
[b798713] | 480 | #endif
|
---|
[431cd4f] | 481 | }
|
---|
[b798713] | 482 |
|
---|
[431cd4f] | 483 | // Pop from the ready queue from a given cluster
|
---|
[e84ab3d] | 484 | __attribute__((hot)) thread$ * pop_fast(struct cluster * cltr) with (cltr->ready_queue) {
|
---|
[431cd4f] | 485 | /* paranoid */ verify( lanes.count > 0 );
|
---|
| 486 | /* paranoid */ verify( kernelTLS().this_processor );
|
---|
| 487 | /* paranoid */ verify( kernelTLS().this_processor->rdq.id < lanes.count );
|
---|
[b798713] | 488 |
|
---|
[431cd4f] | 489 | unsigned count = __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
|
---|
| 490 | int preferred = kernelTLS().this_processor->rdq.id;
|
---|
[dca5802] | 491 |
|
---|
| 492 |
|
---|
[431cd4f] | 493 | // As long as the list is not empty, try finding a lane that isn't empty and pop from it
|
---|
| 494 | for(25) {
|
---|
| 495 | // Pick two lists at random
|
---|
| 496 | unsigned ri = __tls_rand_bck();
|
---|
| 497 | unsigned rj = __tls_rand_bck();
|
---|
[c426b03] | 498 |
|
---|
[431cd4f] | 499 | unsigned i, j;
|
---|
| 500 | __attribute__((unused)) bool locali, localj;
|
---|
| 501 | [i, locali] = idx_from_r(ri, preferred);
|
---|
| 502 | [j, localj] = idx_from_r(rj, preferred);
|
---|
[1b143de] | 503 |
|
---|
[431cd4f] | 504 | i %= count;
|
---|
| 505 | j %= count;
|
---|
[9cc3a18] | 506 |
|
---|
[431cd4f] | 507 | // try popping from the 2 picked lists
|
---|
[e84ab3d] | 508 | struct thread$ * thrd = try_pop(cltr, i, j __STATS(, *(locali || localj ? &__tls_stats()->ready.pop.local : &__tls_stats()->ready.pop.help)));
|
---|
[431cd4f] | 509 | if(thrd) {
|
---|
| 510 | return thrd;
|
---|
| 511 | }
|
---|
| 512 | }
|
---|
[13c5e19] | 513 |
|
---|
[431cd4f] | 514 | // All lanes where empty return 0p
|
---|
| 515 | return 0p;
|
---|
| 516 | }
|
---|
[772411a] | 517 |
|
---|
[e84ab3d] | 518 | __attribute__((hot)) struct thread$ * pop_slow(struct cluster * cltr) { return pop_fast(cltr); }
|
---|
| 519 | __attribute__((hot)) struct thread$ * pop_search(struct cluster * cltr) {
|
---|
[431cd4f] | 520 | return search(cltr);
|
---|
| 521 | }
|
---|
| 522 | #endif
|
---|
| 523 | #if defined(USE_WORK_STEALING)
|
---|
[e84ab3d] | 524 | __attribute__((hot)) void push(struct cluster * cltr, struct thread$ * thrd, bool push_local) with (cltr->ready_queue) {
|
---|
[431cd4f] | 525 | __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr);
|
---|
[772411a] | 526 |
|
---|
[d3ba775] | 527 | // #define USE_PREFERRED
|
---|
| 528 | #if !defined(USE_PREFERRED)
|
---|
[b808625] | 529 | const bool external = !push_local || (!kernelTLS().this_processor) || (cltr != kernelTLS().this_processor->cltr);
|
---|
[431cd4f] | 530 | /* paranoid */ verify(external || kernelTLS().this_processor->rdq.id < lanes.count );
|
---|
[d3ba775] | 531 | #else
|
---|
| 532 | unsigned preferred = thrd->preferred;
|
---|
[b808625] | 533 | const bool external = push_local || (!kernelTLS().this_processor) || preferred == -1u || thrd->curr_cluster != cltr;
|
---|
[d3ba775] | 534 | /* paranoid */ verifyf(external || preferred < lanes.count, "Invalid preferred queue %u for %u lanes", preferred, lanes.count );
|
---|
[772411a] | 535 |
|
---|
[d3ba775] | 536 | unsigned r = preferred % READYQ_SHARD_FACTOR;
|
---|
| 537 | const unsigned start = preferred - r;
|
---|
[2b96031] | 538 | #endif
|
---|
[431cd4f] | 539 |
|
---|
| 540 | // Try to pick a lane and lock it
|
---|
| 541 | unsigned i;
|
---|
| 542 | do {
|
---|
[d2fadeb] | 543 | #if !defined(__CFA_NO_STATISTICS__)
|
---|
| 544 | if(unlikely(external)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.attempt, 1, __ATOMIC_RELAXED);
|
---|
| 545 | else __tls_stats()->ready.push.local.attempt++;
|
---|
| 546 | #endif
|
---|
| 547 |
|
---|
[431cd4f] | 548 | if(unlikely(external)) {
|
---|
| 549 | i = __tls_rand() % lanes.count;
|
---|
| 550 | }
|
---|
| 551 | else {
|
---|
[d3ba775] | 552 | #if !defined(USE_PREFERRED)
|
---|
[b808625] | 553 | processor * proc = kernelTLS().this_processor;
|
---|
| 554 | unsigned r = proc->rdq.its++;
|
---|
| 555 | i = proc->rdq.id + (r % READYQ_SHARD_FACTOR);
|
---|
| 556 | #else
|
---|
[d3ba775] | 557 | i = start + (r++ % READYQ_SHARD_FACTOR);
|
---|
| 558 | #endif
|
---|
| 559 | }
|
---|
[431cd4f] | 560 | // If we can't lock it retry
|
---|
| 561 | } while( !__atomic_try_acquire( &lanes.data[i].lock ) );
|
---|
[13c5e19] | 562 |
|
---|
[431cd4f] | 563 | // Actually push it
|
---|
| 564 | push(lanes.data[i], thrd);
|
---|
[13c5e19] | 565 |
|
---|
[b808625] | 566 | // Unlock and return
|
---|
| 567 | __atomic_unlock( &lanes.data[i].lock );
|
---|
[431cd4f] | 568 |
|
---|
[d2fadeb] | 569 | #if !defined(__CFA_NO_STATISTICS__)
|
---|
| 570 | if(unlikely(external)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.success, 1, __ATOMIC_RELAXED);
|
---|
| 571 | else __tls_stats()->ready.push.local.success++;
|
---|
| 572 | #endif
|
---|
| 573 |
|
---|
[431cd4f] | 574 | __cfadbg_print_safe(ready_queue, "Kernel : Pushed %p on cluster %p (idx: %u, mask %llu, first %d)\n", thrd, cltr, i, used.mask[0], lane_first);
|
---|
[13c5e19] | 575 | }
|
---|
| 576 |
|
---|
[431cd4f] | 577 | // Pop from the ready queue from a given cluster
|
---|
[e84ab3d] | 578 | __attribute__((hot)) thread$ * pop_fast(struct cluster * cltr) with (cltr->ready_queue) {
|
---|
[431cd4f] | 579 | /* paranoid */ verify( lanes.count > 0 );
|
---|
| 580 | /* paranoid */ verify( kernelTLS().this_processor );
|
---|
| 581 | /* paranoid */ verify( kernelTLS().this_processor->rdq.id < lanes.count );
|
---|
| 582 |
|
---|
| 583 | processor * proc = kernelTLS().this_processor;
|
---|
| 584 |
|
---|
| 585 | if(proc->rdq.target == -1u) {
|
---|
[1680072] | 586 | unsigned long long min = ts(lanes.data[proc->rdq.id]);
|
---|
| 587 | for(int i = 0; i < READYQ_SHARD_FACTOR; i++) {
|
---|
| 588 | unsigned long long tsc = ts(lanes.data[proc->rdq.id + i]);
|
---|
| 589 | if(tsc < min) min = tsc;
|
---|
| 590 | }
|
---|
| 591 | proc->rdq.cutoff = min;
|
---|
[f55d54d] | 592 | proc->rdq.target = __tls_rand() % lanes.count;
|
---|
[431cd4f] | 593 | }
|
---|
[341aa39] | 594 | else {
|
---|
| 595 | unsigned target = proc->rdq.target;
|
---|
[431cd4f] | 596 | proc->rdq.target = -1u;
|
---|
[9cac0da] | 597 | const unsigned long long bias = 0; //2_500_000_000;
|
---|
| 598 | const unsigned long long cutoff = proc->rdq.cutoff > bias ? proc->rdq.cutoff - bias : proc->rdq.cutoff;
|
---|
| 599 | if(lanes.tscs[target].tv < cutoff && ts(lanes.data[target]) < cutoff) {
|
---|
[e84ab3d] | 600 | thread$ * t = try_pop(cltr, target __STATS(, __tls_stats()->ready.pop.help));
|
---|
[341aa39] | 601 | if(t) return t;
|
---|
| 602 | }
|
---|
[431cd4f] | 603 | }
|
---|
[13c5e19] | 604 |
|
---|
[431cd4f] | 605 | for(READYQ_SHARD_FACTOR) {
|
---|
[f55d54d] | 606 | unsigned i = proc->rdq.id + (proc->rdq.itr++ % READYQ_SHARD_FACTOR);
|
---|
[e84ab3d] | 607 | if(thread$ * t = try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.local))) return t;
|
---|
[431cd4f] | 608 | }
|
---|
| 609 | return 0p;
|
---|
[1eb239e4] | 610 | }
|
---|
| 611 |
|
---|
[e84ab3d] | 612 | __attribute__((hot)) struct thread$ * pop_slow(struct cluster * cltr) with (cltr->ready_queue) {
|
---|
[fc59df78] | 613 | unsigned i = __tls_rand() % lanes.count;
|
---|
| 614 | return try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.steal));
|
---|
| 615 | }
|
---|
[431cd4f] | 616 |
|
---|
[e84ab3d] | 617 | __attribute__((hot)) struct thread$ * pop_search(struct cluster * cltr) with (cltr->ready_queue) {
|
---|
[431cd4f] | 618 | return search(cltr);
|
---|
| 619 | }
|
---|
| 620 | #endif
|
---|
[1eb239e4] | 621 |
|
---|
[9cc3a18] | 622 | //=======================================================================
|
---|
| 623 | // Various Ready Queue utilities
|
---|
| 624 | //=======================================================================
|
---|
| 625 | // these function work the same or almost the same
|
---|
| 626 | // whether they are using work-stealing or relaxed fifo scheduling
|
---|
[1eb239e4] | 627 |
|
---|
[9cc3a18] | 628 | //-----------------------------------------------------------------------
|
---|
| 629 | // try to pop from a lane given by index w
|
---|
[e84ab3d] | 630 | static inline struct thread$ * try_pop(struct cluster * cltr, unsigned w __STATS(, __stats_readyQ_pop_t & stats)) with (cltr->ready_queue) {
|
---|
[d2fadeb] | 631 | __STATS( stats.attempt++; )
|
---|
| 632 |
|
---|
[dca5802] | 633 | // Get relevant elements locally
|
---|
| 634 | __intrusive_lane_t & lane = lanes.data[w];
|
---|
| 635 |
|
---|
[b798713] | 636 | // If list looks empty retry
|
---|
[d2fadeb] | 637 | if( is_empty(lane) ) {
|
---|
| 638 | return 0p;
|
---|
| 639 | }
|
---|
[b798713] | 640 |
|
---|
| 641 | // If we can't get the lock retry
|
---|
[d2fadeb] | 642 | if( !__atomic_try_acquire(&lane.lock) ) {
|
---|
| 643 | return 0p;
|
---|
| 644 | }
|
---|
[b798713] | 645 |
|
---|
| 646 | // If list is empty, unlock and retry
|
---|
[dca5802] | 647 | if( is_empty(lane) ) {
|
---|
| 648 | __atomic_unlock(&lane.lock);
|
---|
[b798713] | 649 | return 0p;
|
---|
| 650 | }
|
---|
| 651 |
|
---|
| 652 | // Actually pop the list
|
---|
[e84ab3d] | 653 | struct thread$ * thrd;
|
---|
[f302d80] | 654 | unsigned long long tsv;
|
---|
| 655 | [thrd, tsv] = pop(lane);
|
---|
[b798713] | 656 |
|
---|
[dca5802] | 657 | /* paranoid */ verify(thrd);
|
---|
[78ea291] | 658 | /* paranoid */ verify(tsv);
|
---|
[dca5802] | 659 | /* paranoid */ verify(lane.lock);
|
---|
[b798713] | 660 |
|
---|
| 661 | // Unlock and return
|
---|
[dca5802] | 662 | __atomic_unlock(&lane.lock);
|
---|
[b798713] | 663 |
|
---|
[dca5802] | 664 | // Update statistics
|
---|
[d2fadeb] | 665 | __STATS( stats.success++; )
|
---|
[b798713] | 666 |
|
---|
[431cd4f] | 667 | #if defined(USE_WORK_STEALING)
|
---|
[f302d80] | 668 | lanes.tscs[w].tv = tsv;
|
---|
[9cc3a18] | 669 | #endif
|
---|
[d72c074] | 670 |
|
---|
[d3ba775] | 671 | thrd->preferred = w;
|
---|
| 672 |
|
---|
[dca5802] | 673 | // return the popped thread
|
---|
[b798713] | 674 | return thrd;
|
---|
| 675 | }
|
---|
[04b5cef] | 676 |
|
---|
[9cc3a18] | 677 | //-----------------------------------------------------------------------
|
---|
| 678 | // try to pop from any lanes making sure you don't miss any threads push
|
---|
| 679 | // before the start of the function
|
---|
[e84ab3d] | 680 | static inline struct thread$ * search(struct cluster * cltr) with (cltr->ready_queue) {
|
---|
[9cc3a18] | 681 | /* paranoid */ verify( lanes.count > 0 );
|
---|
| 682 | unsigned count = __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
|
---|
| 683 | unsigned offset = __tls_rand();
|
---|
| 684 | for(i; count) {
|
---|
| 685 | unsigned idx = (offset + i) % count;
|
---|
[e84ab3d] | 686 | struct thread$ * thrd = try_pop(cltr, idx __STATS(, __tls_stats()->ready.pop.search));
|
---|
[9cc3a18] | 687 | if(thrd) {
|
---|
| 688 | return thrd;
|
---|
| 689 | }
|
---|
[13c5e19] | 690 | }
|
---|
[9cc3a18] | 691 |
|
---|
| 692 | // All lanes where empty return 0p
|
---|
| 693 | return 0p;
|
---|
[b798713] | 694 | }
|
---|
| 695 |
|
---|
| 696 | //-----------------------------------------------------------------------
|
---|
[9cc3a18] | 697 | // Check that all the intrusive queues in the data structure are still consistent
|
---|
[b798713] | 698 | static void check( __ready_queue_t & q ) with (q) {
|
---|
[d3ba775] | 699 | #if defined(__CFA_WITH_VERIFY__)
|
---|
[b798713] | 700 | {
|
---|
[dca5802] | 701 | for( idx ; lanes.count ) {
|
---|
| 702 | __intrusive_lane_t & sl = lanes.data[idx];
|
---|
| 703 | assert(!lanes.data[idx].lock);
|
---|
[b798713] | 704 |
|
---|
[2b96031] | 705 | if(is_empty(sl)) {
|
---|
| 706 | assert( sl.anchor.next == 0p );
|
---|
[ef94ae7] | 707 | assert( sl.anchor.ts == -1llu );
|
---|
[2b96031] | 708 | assert( mock_head(sl) == sl.prev );
|
---|
| 709 | } else {
|
---|
| 710 | assert( sl.anchor.next != 0p );
|
---|
[ef94ae7] | 711 | assert( sl.anchor.ts != -1llu );
|
---|
[2b96031] | 712 | assert( mock_head(sl) != sl.prev );
|
---|
| 713 | }
|
---|
[b798713] | 714 | }
|
---|
| 715 | }
|
---|
| 716 | #endif
|
---|
| 717 | }
|
---|
| 718 |
|
---|
[9cc3a18] | 719 | //-----------------------------------------------------------------------
|
---|
| 720 | // Given 2 indexes, pick the list with the oldest push an try to pop from it
|
---|
[e84ab3d] | 721 | static inline struct thread$ * try_pop(struct cluster * cltr, unsigned i, unsigned j __STATS(, __stats_readyQ_pop_t & stats)) with (cltr->ready_queue) {
|
---|
[9cc3a18] | 722 | // Pick the bet list
|
---|
| 723 | int w = i;
|
---|
| 724 | if( __builtin_expect(!is_empty(lanes.data[j]), true) ) {
|
---|
| 725 | w = (ts(lanes.data[i]) < ts(lanes.data[j])) ? i : j;
|
---|
| 726 | }
|
---|
| 727 |
|
---|
[d2fadeb] | 728 | return try_pop(cltr, w __STATS(, stats));
|
---|
[9cc3a18] | 729 | }
|
---|
| 730 |
|
---|
[b798713] | 731 | // Call this function of the intrusive list was moved using memcpy
|
---|
[dca5802] | 732 | // fixes the list so that the pointers back to anchors aren't left dangling
|
---|
| 733 | static inline void fix(__intrusive_lane_t & ll) {
|
---|
[2b96031] | 734 | if(is_empty(ll)) {
|
---|
| 735 | verify(ll.anchor.next == 0p);
|
---|
| 736 | ll.prev = mock_head(ll);
|
---|
| 737 | }
|
---|
[b798713] | 738 | }
|
---|
| 739 |
|
---|
[69914cbc] | 740 | static void assign_list(unsigned & value, dlist(processor) & list, unsigned count) {
|
---|
[a017ee7] | 741 | processor * it = &list`first;
|
---|
| 742 | for(unsigned i = 0; i < count; i++) {
|
---|
| 743 | /* paranoid */ verifyf( it, "Unexpected null iterator, at index %u of %u\n", i, count);
|
---|
[431cd4f] | 744 | it->rdq.id = value;
|
---|
| 745 | it->rdq.target = -1u;
|
---|
[9cc3a18] | 746 | value += READYQ_SHARD_FACTOR;
|
---|
[a017ee7] | 747 | it = &(*it)`next;
|
---|
| 748 | }
|
---|
| 749 | }
|
---|
| 750 |
|
---|
[9cc3a18] | 751 | static void reassign_cltr_id(struct cluster * cltr) {
|
---|
[a017ee7] | 752 | unsigned preferred = 0;
|
---|
[9cc3a18] | 753 | assign_list(preferred, cltr->procs.actives, cltr->procs.total - cltr->procs.idle);
|
---|
| 754 | assign_list(preferred, cltr->procs.idles , cltr->procs.idle );
|
---|
[a017ee7] | 755 | }
|
---|
| 756 |
|
---|
[431cd4f] | 757 | static void fix_times( struct cluster * cltr ) with( cltr->ready_queue ) {
|
---|
| 758 | #if defined(USE_WORK_STEALING)
|
---|
| 759 | lanes.tscs = alloc(lanes.count, lanes.tscs`realloc);
|
---|
| 760 | for(i; lanes.count) {
|
---|
[ef94ae7] | 761 | unsigned long long tsc1 = ts(lanes.data[i]);
|
---|
[6ba6846] | 762 | unsigned long long tsc2 = rdtscl();
|
---|
[ef94ae7] | 763 | lanes.tscs[i].tv = min(tsc1, tsc2);
|
---|
[431cd4f] | 764 | }
|
---|
| 765 | #endif
|
---|
| 766 | }
|
---|
| 767 |
|
---|
[12daa43] | 768 | #if defined(USE_CPU_WORK_STEALING)
|
---|
| 769 | // ready_queue size is fixed in this case
|
---|
| 770 | void ready_queue_grow(struct cluster * cltr) {}
|
---|
| 771 | void ready_queue_shrink(struct cluster * cltr) {}
|
---|
| 772 | #else
|
---|
| 773 | // Grow the ready queue
|
---|
| 774 | void ready_queue_grow(struct cluster * cltr) {
|
---|
| 775 | size_t ncount;
|
---|
| 776 | int target = cltr->procs.total;
|
---|
| 777 |
|
---|
| 778 | /* paranoid */ verify( ready_mutate_islocked() );
|
---|
| 779 | __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue\n");
|
---|
| 780 |
|
---|
| 781 | // Make sure that everything is consistent
|
---|
| 782 | /* paranoid */ check( cltr->ready_queue );
|
---|
| 783 |
|
---|
| 784 | // grow the ready queue
|
---|
| 785 | with( cltr->ready_queue ) {
|
---|
| 786 | // Find new count
|
---|
| 787 | // Make sure we always have atleast 1 list
|
---|
| 788 | if(target >= 2) {
|
---|
| 789 | ncount = target * READYQ_SHARD_FACTOR;
|
---|
| 790 | } else {
|
---|
| 791 | ncount = SEQUENTIAL_SHARD;
|
---|
| 792 | }
|
---|
[b798713] | 793 |
|
---|
[12daa43] | 794 | // Allocate new array (uses realloc and memcpies the data)
|
---|
| 795 | lanes.data = alloc( ncount, lanes.data`realloc );
|
---|
[b798713] | 796 |
|
---|
[12daa43] | 797 | // Fix the moved data
|
---|
| 798 | for( idx; (size_t)lanes.count ) {
|
---|
| 799 | fix(lanes.data[idx]);
|
---|
| 800 | }
|
---|
[b798713] | 801 |
|
---|
[12daa43] | 802 | // Construct new data
|
---|
| 803 | for( idx; (size_t)lanes.count ~ ncount) {
|
---|
| 804 | (lanes.data[idx]){};
|
---|
| 805 | }
|
---|
[b798713] | 806 |
|
---|
[12daa43] | 807 | // Update original
|
---|
| 808 | lanes.count = ncount;
|
---|
| 809 | }
|
---|
[b798713] | 810 |
|
---|
[12daa43] | 811 | fix_times(cltr);
|
---|
[9cc3a18] | 812 |
|
---|
[12daa43] | 813 | reassign_cltr_id(cltr);
|
---|
[a017ee7] | 814 |
|
---|
[12daa43] | 815 | // Make sure that everything is consistent
|
---|
| 816 | /* paranoid */ check( cltr->ready_queue );
|
---|
[dca5802] | 817 |
|
---|
[12daa43] | 818 | __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue done\n");
|
---|
[dca5802] | 819 |
|
---|
[12daa43] | 820 | /* paranoid */ verify( ready_mutate_islocked() );
|
---|
| 821 | }
|
---|
[b798713] | 822 |
|
---|
[12daa43] | 823 | // Shrink the ready queue
|
---|
| 824 | void ready_queue_shrink(struct cluster * cltr) {
|
---|
| 825 | /* paranoid */ verify( ready_mutate_islocked() );
|
---|
| 826 | __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue\n");
|
---|
[dca5802] | 827 |
|
---|
[12daa43] | 828 | // Make sure that everything is consistent
|
---|
| 829 | /* paranoid */ check( cltr->ready_queue );
|
---|
[dca5802] | 830 |
|
---|
[12daa43] | 831 | int target = cltr->procs.total;
|
---|
[a017ee7] | 832 |
|
---|
[12daa43] | 833 | with( cltr->ready_queue ) {
|
---|
| 834 | // Remember old count
|
---|
| 835 | size_t ocount = lanes.count;
|
---|
[b798713] | 836 |
|
---|
[12daa43] | 837 | // Find new count
|
---|
| 838 | // Make sure we always have atleast 1 list
|
---|
| 839 | lanes.count = target >= 2 ? target * READYQ_SHARD_FACTOR: SEQUENTIAL_SHARD;
|
---|
| 840 | /* paranoid */ verify( ocount >= lanes.count );
|
---|
| 841 | /* paranoid */ verify( lanes.count == target * READYQ_SHARD_FACTOR || target < 2 );
|
---|
[dca5802] | 842 |
|
---|
[12daa43] | 843 | // for printing count the number of displaced threads
|
---|
| 844 | #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__)
|
---|
| 845 | __attribute__((unused)) size_t displaced = 0;
|
---|
| 846 | #endif
|
---|
[b798713] | 847 |
|
---|
[12daa43] | 848 | // redistribute old data
|
---|
| 849 | for( idx; (size_t)lanes.count ~ ocount) {
|
---|
| 850 | // Lock is not strictly needed but makes checking invariants much easier
|
---|
| 851 | __attribute__((unused)) bool locked = __atomic_try_acquire(&lanes.data[idx].lock);
|
---|
| 852 | verify(locked);
|
---|
[dca5802] | 853 |
|
---|
[12daa43] | 854 | // As long as we can pop from this lane to push the threads somewhere else in the queue
|
---|
| 855 | while(!is_empty(lanes.data[idx])) {
|
---|
[e84ab3d] | 856 | struct thread$ * thrd;
|
---|
[12daa43] | 857 | unsigned long long _;
|
---|
| 858 | [thrd, _] = pop(lanes.data[idx]);
|
---|
[dca5802] | 859 |
|
---|
[12daa43] | 860 | push(cltr, thrd, true);
|
---|
[dca5802] | 861 |
|
---|
[12daa43] | 862 | // for printing count the number of displaced threads
|
---|
| 863 | #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__)
|
---|
| 864 | displaced++;
|
---|
| 865 | #endif
|
---|
| 866 | }
|
---|
[b798713] | 867 |
|
---|
[12daa43] | 868 | // Unlock the lane
|
---|
| 869 | __atomic_unlock(&lanes.data[idx].lock);
|
---|
[b798713] | 870 |
|
---|
[12daa43] | 871 | // TODO print the queue statistics here
|
---|
[b798713] | 872 |
|
---|
[12daa43] | 873 | ^(lanes.data[idx]){};
|
---|
| 874 | }
|
---|
[b798713] | 875 |
|
---|
[12daa43] | 876 | __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue displaced %zu threads\n", displaced);
|
---|
[c84b4be] | 877 |
|
---|
[12daa43] | 878 | // Allocate new array (uses realloc and memcpies the data)
|
---|
| 879 | lanes.data = alloc( lanes.count, lanes.data`realloc );
|
---|
[b798713] | 880 |
|
---|
[12daa43] | 881 | // Fix the moved data
|
---|
| 882 | for( idx; (size_t)lanes.count ) {
|
---|
| 883 | fix(lanes.data[idx]);
|
---|
| 884 | }
|
---|
[b798713] | 885 | }
|
---|
| 886 |
|
---|
[12daa43] | 887 | fix_times(cltr);
|
---|
[9cc3a18] | 888 |
|
---|
[12daa43] | 889 | reassign_cltr_id(cltr);
|
---|
[a017ee7] | 890 |
|
---|
[12daa43] | 891 | // Make sure that everything is consistent
|
---|
| 892 | /* paranoid */ check( cltr->ready_queue );
|
---|
[dca5802] | 893 |
|
---|
[12daa43] | 894 | __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue done\n");
|
---|
| 895 | /* paranoid */ verify( ready_mutate_islocked() );
|
---|
| 896 | }
|
---|
| 897 | #endif
|
---|
[8cd5434] | 898 |
|
---|
| 899 | #if !defined(__CFA_NO_STATISTICS__)
|
---|
| 900 | unsigned cnt(const __ready_queue_t & this, unsigned idx) {
|
---|
| 901 | /* paranoid */ verify(this.lanes.count > idx);
|
---|
| 902 | return this.lanes.data[idx].cnt;
|
---|
| 903 | }
|
---|
| 904 | #endif
|
---|
[0ee224b] | 905 |
|
---|
| 906 |
|
---|
| 907 | #if defined(CFA_HAVE_LINUX_LIBRSEQ)
|
---|
| 908 | // No definition needed
|
---|
| 909 | #elif defined(CFA_HAVE_LINUX_RSEQ_H)
|
---|
| 910 |
|
---|
| 911 | #if defined( __x86_64 ) || defined( __i386 )
|
---|
| 912 | #define RSEQ_SIG 0x53053053
|
---|
| 913 | #elif defined( __ARM_ARCH )
|
---|
| 914 | #ifdef __ARMEB__
|
---|
| 915 | #define RSEQ_SIG 0xf3def5e7 /* udf #24035 ; 0x5de3 (ARMv6+) */
|
---|
| 916 | #else
|
---|
| 917 | #define RSEQ_SIG 0xe7f5def3 /* udf #24035 ; 0x5de3 */
|
---|
| 918 | #endif
|
---|
| 919 | #endif
|
---|
| 920 |
|
---|
| 921 | extern void __disable_interrupts_hard();
|
---|
| 922 | extern void __enable_interrupts_hard();
|
---|
| 923 |
|
---|
| 924 | void __kernel_raw_rseq_register (void) {
|
---|
| 925 | /* paranoid */ verify( __cfaabi_rseq.cpu_id == RSEQ_CPU_ID_UNINITIALIZED );
|
---|
| 926 |
|
---|
| 927 | // int ret = syscall(__NR_rseq, &__cfaabi_rseq, sizeof(struct rseq), 0, (sigset_t *)0p, _NSIG / 8);
|
---|
| 928 | int ret = syscall(__NR_rseq, &__cfaabi_rseq, sizeof(struct rseq), 0, RSEQ_SIG);
|
---|
| 929 | if(ret != 0) {
|
---|
| 930 | int e = errno;
|
---|
| 931 | switch(e) {
|
---|
| 932 | case EINVAL: abort("KERNEL ERROR: rseq register invalid argument");
|
---|
| 933 | case ENOSYS: abort("KERNEL ERROR: rseq register no supported");
|
---|
| 934 | case EFAULT: abort("KERNEL ERROR: rseq register with invalid argument");
|
---|
| 935 | case EBUSY : abort("KERNEL ERROR: rseq register already registered");
|
---|
| 936 | case EPERM : abort("KERNEL ERROR: rseq register sig argument on unregistration does not match the signature received on registration");
|
---|
| 937 | default: abort("KERNEL ERROR: rseq register unexpected return %d", e);
|
---|
| 938 | }
|
---|
| 939 | }
|
---|
| 940 | }
|
---|
| 941 |
|
---|
| 942 | void __kernel_raw_rseq_unregister(void) {
|
---|
| 943 | /* paranoid */ verify( __cfaabi_rseq.cpu_id >= 0 );
|
---|
| 944 |
|
---|
| 945 | // int ret = syscall(__NR_rseq, &__cfaabi_rseq, sizeof(struct rseq), RSEQ_FLAG_UNREGISTER, (sigset_t *)0p, _NSIG / 8);
|
---|
| 946 | int ret = syscall(__NR_rseq, &__cfaabi_rseq, sizeof(struct rseq), RSEQ_FLAG_UNREGISTER, RSEQ_SIG);
|
---|
| 947 | if(ret != 0) {
|
---|
| 948 | int e = errno;
|
---|
| 949 | switch(e) {
|
---|
| 950 | case EINVAL: abort("KERNEL ERROR: rseq unregister invalid argument");
|
---|
| 951 | case ENOSYS: abort("KERNEL ERROR: rseq unregister no supported");
|
---|
| 952 | case EFAULT: abort("KERNEL ERROR: rseq unregister with invalid argument");
|
---|
| 953 | case EBUSY : abort("KERNEL ERROR: rseq unregister already registered");
|
---|
| 954 | case EPERM : abort("KERNEL ERROR: rseq unregister sig argument on unregistration does not match the signature received on registration");
|
---|
| 955 | default: abort("KERNEL ERROR: rseq unregisteunexpected return %d", e);
|
---|
| 956 | }
|
---|
| 957 | }
|
---|
| 958 | }
|
---|
| 959 | #else
|
---|
| 960 | // No definition needed
|
---|
| 961 | #endif
|
---|