| 1 | //
|
|---|
| 2 | // Cforall Version 1.0.0 Copyright (C) 2019 University of Waterloo
|
|---|
| 3 | //
|
|---|
| 4 | // The contents of this file are covered under the licence agreement in the
|
|---|
| 5 | // file "LICENCE" distributed with Cforall.
|
|---|
| 6 | //
|
|---|
| 7 | // ready_queue.cfa --
|
|---|
| 8 | //
|
|---|
| 9 | // Author : Thierry Delisle
|
|---|
| 10 | // Created On : Mon Nov dd 16:29:18 2019
|
|---|
| 11 | // Last Modified By :
|
|---|
| 12 | // Last Modified On :
|
|---|
| 13 | // Update Count :
|
|---|
| 14 | //
|
|---|
| 15 |
|
|---|
| 16 | #define __cforall_thread__
|
|---|
| 17 | // #define __CFA_DEBUG_PRINT_READY_QUEUE__
|
|---|
| 18 |
|
|---|
| 19 | // #define USE_SNZI
|
|---|
| 20 |
|
|---|
| 21 | #include "bits/defs.hfa"
|
|---|
| 22 | #include "kernel_private.hfa"
|
|---|
| 23 |
|
|---|
| 24 | #define _GNU_SOURCE
|
|---|
| 25 | #include "stdlib.hfa"
|
|---|
| 26 | #include "math.hfa"
|
|---|
| 27 |
|
|---|
| 28 | #include <unistd.h>
|
|---|
| 29 |
|
|---|
| 30 | #include "snzi.hfa"
|
|---|
| 31 | #include "ready_subqueue.hfa"
|
|---|
| 32 |
|
|---|
| 33 | static const size_t cache_line_size = 64;
|
|---|
| 34 |
|
|---|
| 35 | // No overriden function, no environment variable, no define
|
|---|
| 36 | // fall back to a magic number
|
|---|
| 37 | #ifndef __CFA_MAX_PROCESSORS__
|
|---|
| 38 | #define __CFA_MAX_PROCESSORS__ 1024
|
|---|
| 39 | #endif
|
|---|
| 40 |
|
|---|
| 41 | #define BIAS 16
|
|---|
| 42 |
|
|---|
| 43 | // returns the maximum number of processors the RWLock support
|
|---|
| 44 | __attribute__((weak)) unsigned __max_processors() {
|
|---|
| 45 | const char * max_cores_s = getenv("CFA_MAX_PROCESSORS");
|
|---|
| 46 | if(!max_cores_s) {
|
|---|
| 47 | __cfadbg_print_nolock(ready_queue, "No CFA_MAX_PROCESSORS in ENV\n");
|
|---|
| 48 | return __CFA_MAX_PROCESSORS__;
|
|---|
| 49 | }
|
|---|
| 50 |
|
|---|
| 51 | char * endptr = 0p;
|
|---|
| 52 | long int max_cores_l = strtol(max_cores_s, &endptr, 10);
|
|---|
| 53 | if(max_cores_l < 1 || max_cores_l > 65535) {
|
|---|
| 54 | __cfadbg_print_nolock(ready_queue, "CFA_MAX_PROCESSORS out of range : %ld\n", max_cores_l);
|
|---|
| 55 | return __CFA_MAX_PROCESSORS__;
|
|---|
| 56 | }
|
|---|
| 57 | if('\0' != *endptr) {
|
|---|
| 58 | __cfadbg_print_nolock(ready_queue, "CFA_MAX_PROCESSORS not a decimal number : %s\n", max_cores_s);
|
|---|
| 59 | return __CFA_MAX_PROCESSORS__;
|
|---|
| 60 | }
|
|---|
| 61 |
|
|---|
| 62 | return max_cores_l;
|
|---|
| 63 | }
|
|---|
| 64 |
|
|---|
| 65 | //=======================================================================
|
|---|
| 66 | // Cluster wide reader-writer lock
|
|---|
| 67 | //=======================================================================
|
|---|
| 68 | void ?{}(__scheduler_RWLock_t & this) {
|
|---|
| 69 | this.max = __max_processors();
|
|---|
| 70 | this.alloc = 0;
|
|---|
| 71 | this.ready = 0;
|
|---|
| 72 | this.lock = false;
|
|---|
| 73 | this.data = alloc(this.max);
|
|---|
| 74 |
|
|---|
| 75 | /*paranoid*/ verify( 0 == (((uintptr_t)(this.data )) % 64) );
|
|---|
| 76 | /*paranoid*/ verify( 0 == (((uintptr_t)(this.data + 1)) % 64) );
|
|---|
| 77 | /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.alloc), &this.alloc));
|
|---|
| 78 | /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.ready), &this.ready));
|
|---|
| 79 |
|
|---|
| 80 | }
|
|---|
| 81 | void ^?{}(__scheduler_RWLock_t & this) {
|
|---|
| 82 | free(this.data);
|
|---|
| 83 | }
|
|---|
| 84 |
|
|---|
| 85 | void ?{}( __scheduler_lock_id_t & this, __processor_id_t * proc ) {
|
|---|
| 86 | this.handle = proc;
|
|---|
| 87 | this.lock = false;
|
|---|
| 88 | #ifdef __CFA_WITH_VERIFY__
|
|---|
| 89 | this.owned = false;
|
|---|
| 90 | #endif
|
|---|
| 91 | }
|
|---|
| 92 |
|
|---|
| 93 | //=======================================================================
|
|---|
| 94 | // Lock-Free registering/unregistering of threads
|
|---|
| 95 | unsigned doregister( struct __processor_id_t * proc ) with(*__scheduler_lock) {
|
|---|
| 96 | __cfadbg_print_safe(ready_queue, "Kernel : Registering proc %p for RW-Lock\n", proc);
|
|---|
| 97 |
|
|---|
| 98 | // Step - 1 : check if there is already space in the data
|
|---|
| 99 | uint_fast32_t s = ready;
|
|---|
| 100 |
|
|---|
| 101 | // Check among all the ready
|
|---|
| 102 | for(uint_fast32_t i = 0; i < s; i++) {
|
|---|
| 103 | __processor_id_t * null = 0p; // Re-write every loop since compare thrashes it
|
|---|
| 104 | if( __atomic_load_n(&data[i].handle, (int)__ATOMIC_RELAXED) == null
|
|---|
| 105 | && __atomic_compare_exchange_n( &data[i].handle, &null, proc, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
|
|---|
| 106 | /*paranoid*/ verify(i < ready);
|
|---|
| 107 | /*paranoid*/ verify(0 == (__alignof__(data[i]) % cache_line_size));
|
|---|
| 108 | /*paranoid*/ verify((((uintptr_t)&data[i]) % cache_line_size) == 0);
|
|---|
| 109 | return i;
|
|---|
| 110 | }
|
|---|
| 111 | }
|
|---|
| 112 |
|
|---|
| 113 | if(max <= alloc) abort("Trying to create more than %ud processors", __scheduler_lock->max);
|
|---|
| 114 |
|
|---|
| 115 | // Step - 2 : F&A to get a new spot in the array.
|
|---|
| 116 | uint_fast32_t n = __atomic_fetch_add(&alloc, 1, __ATOMIC_SEQ_CST);
|
|---|
| 117 | if(max <= n) abort("Trying to create more than %ud processors", __scheduler_lock->max);
|
|---|
| 118 |
|
|---|
| 119 | // Step - 3 : Mark space as used and then publish it.
|
|---|
| 120 | __scheduler_lock_id_t * storage = (__scheduler_lock_id_t *)&data[n];
|
|---|
| 121 | (*storage){ proc };
|
|---|
| 122 | while(true) {
|
|---|
| 123 | unsigned copy = n;
|
|---|
| 124 | if( __atomic_load_n(&ready, __ATOMIC_RELAXED) == n
|
|---|
| 125 | && __atomic_compare_exchange_n(&ready, ©, n + 1, true, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
|
|---|
| 126 | break;
|
|---|
| 127 | asm volatile("pause");
|
|---|
| 128 | }
|
|---|
| 129 |
|
|---|
| 130 | __cfadbg_print_safe(ready_queue, "Kernel : Registering proc %p done, id %lu\n", proc, n);
|
|---|
| 131 |
|
|---|
| 132 | // Return new spot.
|
|---|
| 133 | /*paranoid*/ verify(n < ready);
|
|---|
| 134 | /*paranoid*/ verify(__alignof__(data[n]) == (2 * cache_line_size));
|
|---|
| 135 | /*paranoid*/ verify((((uintptr_t)&data[n]) % cache_line_size) == 0);
|
|---|
| 136 | return n;
|
|---|
| 137 | }
|
|---|
| 138 |
|
|---|
| 139 | void unregister( struct __processor_id_t * proc ) with(*__scheduler_lock) {
|
|---|
| 140 | unsigned id = proc->id;
|
|---|
| 141 | /*paranoid*/ verify(id < ready);
|
|---|
| 142 | /*paranoid*/ verify(proc == __atomic_load_n(&data[id].handle, __ATOMIC_RELAXED));
|
|---|
| 143 | __atomic_store_n(&data[id].handle, 0p, __ATOMIC_RELEASE);
|
|---|
| 144 |
|
|---|
| 145 | __cfadbg_print_safe(ready_queue, "Kernel : Unregister proc %p\n", proc);
|
|---|
| 146 | }
|
|---|
| 147 |
|
|---|
| 148 | //-----------------------------------------------------------------------
|
|---|
| 149 | // Writer side : acquire when changing the ready queue, e.g. adding more
|
|---|
| 150 | // queues or removing them.
|
|---|
| 151 | uint_fast32_t ready_mutate_lock( void ) with(*__scheduler_lock) {
|
|---|
| 152 | // Step 1 : lock global lock
|
|---|
| 153 | // It is needed to avoid processors that register mid Critical-Section
|
|---|
| 154 | // to simply lock their own lock and enter.
|
|---|
| 155 | __atomic_acquire( &lock );
|
|---|
| 156 |
|
|---|
| 157 | // Step 2 : lock per-proc lock
|
|---|
| 158 | // Processors that are currently being registered aren't counted
|
|---|
| 159 | // but can't be in read_lock or in the critical section.
|
|---|
| 160 | // All other processors are counted
|
|---|
| 161 | uint_fast32_t s = ready;
|
|---|
| 162 | for(uint_fast32_t i = 0; i < s; i++) {
|
|---|
| 163 | __atomic_acquire( &data[i].lock );
|
|---|
| 164 | }
|
|---|
| 165 |
|
|---|
| 166 | return s;
|
|---|
| 167 | }
|
|---|
| 168 |
|
|---|
| 169 | void ready_mutate_unlock( uint_fast32_t last_s ) with(*__scheduler_lock) {
|
|---|
| 170 | // Step 1 : release local locks
|
|---|
| 171 | // This must be done while the global lock is held to avoid
|
|---|
| 172 | // threads that where created mid critical section
|
|---|
| 173 | // to race to lock their local locks and have the writer
|
|---|
| 174 | // immidiately unlock them
|
|---|
| 175 | // Alternative solution : return s in write_lock and pass it to write_unlock
|
|---|
| 176 | for(uint_fast32_t i = 0; i < last_s; i++) {
|
|---|
| 177 | verify(data[i].lock);
|
|---|
| 178 | __atomic_store_n(&data[i].lock, (bool)false, __ATOMIC_RELEASE);
|
|---|
| 179 | }
|
|---|
| 180 |
|
|---|
| 181 | // Step 2 : release global lock
|
|---|
| 182 | /*paranoid*/ assert(true == lock);
|
|---|
| 183 | __atomic_store_n(&lock, (bool)false, __ATOMIC_RELEASE);
|
|---|
| 184 | }
|
|---|
| 185 |
|
|---|
| 186 | //=======================================================================
|
|---|
| 187 | // Cforall Reqdy Queue used for scheduling
|
|---|
| 188 | //=======================================================================
|
|---|
| 189 | void ?{}(__ready_queue_t & this) with (this) {
|
|---|
| 190 | lanes.data = 0p;
|
|---|
| 191 | lanes.count = 0;
|
|---|
| 192 | }
|
|---|
| 193 |
|
|---|
| 194 | void ^?{}(__ready_queue_t & this) with (this) {
|
|---|
| 195 | verify( 1 == lanes.count );
|
|---|
| 196 | #ifdef USE_SNZI
|
|---|
| 197 | verify( !query( snzi ) );
|
|---|
| 198 | #endif
|
|---|
| 199 | free(lanes.data);
|
|---|
| 200 | }
|
|---|
| 201 |
|
|---|
| 202 | //-----------------------------------------------------------------------
|
|---|
| 203 | __attribute__((hot)) bool query(struct cluster * cltr) {
|
|---|
| 204 | #ifdef USE_SNZI
|
|---|
| 205 | return query(cltr->ready_queue.snzi);
|
|---|
| 206 | #endif
|
|---|
| 207 | return true;
|
|---|
| 208 | }
|
|---|
| 209 |
|
|---|
| 210 | //-----------------------------------------------------------------------
|
|---|
| 211 | __attribute__((hot)) bool push(struct cluster * cltr, struct $thread * thrd) with (cltr->ready_queue) {
|
|---|
| 212 | __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr);
|
|---|
| 213 |
|
|---|
| 214 | // write timestamp
|
|---|
| 215 | thrd->link.ts = rdtscl();
|
|---|
| 216 |
|
|---|
| 217 | #if defined(BIAS) && !defined(__CFA_NO_STATISTICS__)
|
|---|
| 218 | bool local = false;
|
|---|
| 219 | int preferred =
|
|---|
| 220 | //*
|
|---|
| 221 | kernelTLS.this_processor ? kernelTLS.this_processor->id * 4 : -1;
|
|---|
| 222 | /*/
|
|---|
| 223 | thrd->link.preferred * 4;
|
|---|
| 224 | //*/
|
|---|
| 225 |
|
|---|
| 226 |
|
|---|
| 227 | #endif
|
|---|
| 228 |
|
|---|
| 229 | // Try to pick a lane and lock it
|
|---|
| 230 | unsigned i;
|
|---|
| 231 | do {
|
|---|
| 232 | // Pick the index of a lane
|
|---|
| 233 | #if defined(BIAS)
|
|---|
| 234 | unsigned r = __tls_rand();
|
|---|
| 235 | unsigned rlow = r % BIAS;
|
|---|
| 236 | unsigned rhigh = r / BIAS;
|
|---|
| 237 | if((0 != rlow) && preferred >= 0) {
|
|---|
| 238 | // (BIAS - 1) out of BIAS chances
|
|---|
| 239 | // Use perferred queues
|
|---|
| 240 | i = preferred + (rhigh % 4);
|
|---|
| 241 |
|
|---|
| 242 | #if !defined(__CFA_NO_STATISTICS__)
|
|---|
| 243 | local = true;
|
|---|
| 244 | __tls_stats()->ready.pick.push.local++;
|
|---|
| 245 | #endif
|
|---|
| 246 | }
|
|---|
| 247 | else {
|
|---|
| 248 | // 1 out of BIAS chances
|
|---|
| 249 | // Use all queues
|
|---|
| 250 | i = rhigh;
|
|---|
| 251 | local = false;
|
|---|
| 252 | }
|
|---|
| 253 | #else
|
|---|
| 254 | i = __tls_rand();
|
|---|
| 255 | #endif
|
|---|
| 256 |
|
|---|
| 257 | i %= __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
|
|---|
| 258 |
|
|---|
| 259 | #if !defined(__CFA_NO_STATISTICS__)
|
|---|
| 260 | __tls_stats()->ready.pick.push.attempt++;
|
|---|
| 261 | #endif
|
|---|
| 262 |
|
|---|
| 263 | // If we can't lock it retry
|
|---|
| 264 | } while( !__atomic_try_acquire( &lanes.data[i].lock ) );
|
|---|
| 265 |
|
|---|
| 266 | bool first = false;
|
|---|
| 267 |
|
|---|
| 268 | // Actually push it
|
|---|
| 269 | bool lane_first = push(lanes.data[i], thrd);
|
|---|
| 270 |
|
|---|
| 271 | #ifdef USE_SNZI
|
|---|
| 272 | // If this lane used to be empty we need to do more
|
|---|
| 273 | if(lane_first) {
|
|---|
| 274 | // Check if the entire queue used to be empty
|
|---|
| 275 | first = !query(snzi);
|
|---|
| 276 |
|
|---|
| 277 | // Update the snzi
|
|---|
| 278 | arrive( snzi, i );
|
|---|
| 279 | }
|
|---|
| 280 | #endif
|
|---|
| 281 |
|
|---|
| 282 | // Unlock and return
|
|---|
| 283 | __atomic_unlock( &lanes.data[i].lock );
|
|---|
| 284 |
|
|---|
| 285 | __cfadbg_print_safe(ready_queue, "Kernel : Pushed %p on cluster %p (idx: %u, mask %llu, first %d)\n", thrd, cltr, i, used.mask[0], lane_first);
|
|---|
| 286 |
|
|---|
| 287 | // Update statistics
|
|---|
| 288 | #if !defined(__CFA_NO_STATISTICS__)
|
|---|
| 289 | #if defined(BIAS)
|
|---|
| 290 | if( local ) __tls_stats()->ready.pick.push.lsuccess++;
|
|---|
| 291 | #endif
|
|---|
| 292 | __tls_stats()->ready.pick.push.success++;
|
|---|
| 293 | #endif
|
|---|
| 294 |
|
|---|
| 295 | // return whether or not the list was empty before this push
|
|---|
| 296 | return first;
|
|---|
| 297 | }
|
|---|
| 298 |
|
|---|
| 299 | static struct $thread * try_pop(struct cluster * cltr, unsigned i, unsigned j);
|
|---|
| 300 | static struct $thread * try_pop(struct cluster * cltr, unsigned i);
|
|---|
| 301 |
|
|---|
| 302 | // Pop from the ready queue from a given cluster
|
|---|
| 303 | __attribute__((hot)) $thread * pop(struct cluster * cltr) with (cltr->ready_queue) {
|
|---|
| 304 | /* paranoid */ verify( lanes.count > 0 );
|
|---|
| 305 | unsigned count = __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
|
|---|
| 306 | #if defined(BIAS)
|
|---|
| 307 | // Don't bother trying locally too much
|
|---|
| 308 | int local_tries = 8;
|
|---|
| 309 | #endif
|
|---|
| 310 |
|
|---|
| 311 | // As long as the list is not empty, try finding a lane that isn't empty and pop from it
|
|---|
| 312 | #ifdef USE_SNZI
|
|---|
| 313 | while( query(snzi) ) {
|
|---|
| 314 | #else
|
|---|
| 315 | for(25) {
|
|---|
| 316 | #endif
|
|---|
| 317 | // Pick two lists at random
|
|---|
| 318 | unsigned i,j;
|
|---|
| 319 | #if defined(BIAS)
|
|---|
| 320 | #if !defined(__CFA_NO_STATISTICS__)
|
|---|
| 321 | bool local = false;
|
|---|
| 322 | #endif
|
|---|
| 323 | uint64_t r = __tls_rand();
|
|---|
| 324 | unsigned rlow = r % BIAS;
|
|---|
| 325 | uint64_t rhigh = r / BIAS;
|
|---|
| 326 | if(local_tries && 0 != rlow) {
|
|---|
| 327 | // (BIAS - 1) out of BIAS chances
|
|---|
| 328 | // Use perferred queues
|
|---|
| 329 | unsigned pid = kernelTLS.this_processor->id * 4;
|
|---|
| 330 | i = pid + (rhigh % 4);
|
|---|
| 331 | j = pid + ((rhigh >> 32ull) % 4);
|
|---|
| 332 |
|
|---|
| 333 | // count the tries
|
|---|
| 334 | local_tries--;
|
|---|
| 335 |
|
|---|
| 336 | #if !defined(__CFA_NO_STATISTICS__)
|
|---|
| 337 | local = true;
|
|---|
| 338 | __tls_stats()->ready.pick.pop.local++;
|
|---|
| 339 | #endif
|
|---|
| 340 | }
|
|---|
| 341 | else {
|
|---|
| 342 | // 1 out of BIAS chances
|
|---|
| 343 | // Use all queues
|
|---|
| 344 | i = rhigh;
|
|---|
| 345 | j = rhigh >> 32ull;
|
|---|
| 346 | }
|
|---|
| 347 | #else
|
|---|
| 348 | i = __tls_rand();
|
|---|
| 349 | j = __tls_rand();
|
|---|
| 350 | #endif
|
|---|
| 351 |
|
|---|
| 352 | i %= count;
|
|---|
| 353 | j %= count;
|
|---|
| 354 |
|
|---|
| 355 | // try popping from the 2 picked lists
|
|---|
| 356 | struct $thread * thrd = try_pop(cltr, i, j);
|
|---|
| 357 | if(thrd) {
|
|---|
| 358 | #if defined(BIAS) && !defined(__CFA_NO_STATISTICS__)
|
|---|
| 359 | if( local ) __tls_stats()->ready.pick.pop.lsuccess++;
|
|---|
| 360 | #endif
|
|---|
| 361 | return thrd;
|
|---|
| 362 | }
|
|---|
| 363 | }
|
|---|
| 364 |
|
|---|
| 365 | // All lanes where empty return 0p
|
|---|
| 366 | return 0p;
|
|---|
| 367 | }
|
|---|
| 368 |
|
|---|
| 369 | __attribute__((hot)) struct $thread * pop_slow(struct cluster * cltr) with (cltr->ready_queue) {
|
|---|
| 370 | /* paranoid */ verify( lanes.count > 0 );
|
|---|
| 371 | unsigned count = __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
|
|---|
| 372 | unsigned offset = __tls_rand();
|
|---|
| 373 | for(i; count) {
|
|---|
| 374 | unsigned idx = (offset + i) % count;
|
|---|
| 375 | struct $thread * thrd = try_pop(cltr, idx);
|
|---|
| 376 | if(thrd) {
|
|---|
| 377 | return thrd;
|
|---|
| 378 | }
|
|---|
| 379 | }
|
|---|
| 380 |
|
|---|
| 381 | // All lanes where empty return 0p
|
|---|
| 382 | return 0p;
|
|---|
| 383 | }
|
|---|
| 384 |
|
|---|
| 385 |
|
|---|
| 386 | //-----------------------------------------------------------------------
|
|---|
| 387 | // Given 2 indexes, pick the list with the oldest push an try to pop from it
|
|---|
| 388 | static inline struct $thread * try_pop(struct cluster * cltr, unsigned i, unsigned j) with (cltr->ready_queue) {
|
|---|
| 389 | #if !defined(__CFA_NO_STATISTICS__)
|
|---|
| 390 | __tls_stats()->ready.pick.pop.attempt++;
|
|---|
| 391 | #endif
|
|---|
| 392 |
|
|---|
| 393 | // Pick the bet list
|
|---|
| 394 | int w = i;
|
|---|
| 395 | if( __builtin_expect(!is_empty(lanes.data[j]), true) ) {
|
|---|
| 396 | w = (ts(lanes.data[i]) < ts(lanes.data[j])) ? i : j;
|
|---|
| 397 | }
|
|---|
| 398 |
|
|---|
| 399 | return try_pop(cltr, w);
|
|---|
| 400 | }
|
|---|
| 401 |
|
|---|
| 402 | static inline struct $thread * try_pop(struct cluster * cltr, unsigned w) with (cltr->ready_queue) {
|
|---|
| 403 | // Get relevant elements locally
|
|---|
| 404 | __intrusive_lane_t & lane = lanes.data[w];
|
|---|
| 405 |
|
|---|
| 406 | // If list looks empty retry
|
|---|
| 407 | if( is_empty(lane) ) return 0p;
|
|---|
| 408 |
|
|---|
| 409 | // If we can't get the lock retry
|
|---|
| 410 | if( !__atomic_try_acquire(&lane.lock) ) return 0p;
|
|---|
| 411 |
|
|---|
| 412 |
|
|---|
| 413 | // If list is empty, unlock and retry
|
|---|
| 414 | if( is_empty(lane) ) {
|
|---|
| 415 | __atomic_unlock(&lane.lock);
|
|---|
| 416 | return 0p;
|
|---|
| 417 | }
|
|---|
| 418 |
|
|---|
| 419 | // Actually pop the list
|
|---|
| 420 | struct $thread * thrd;
|
|---|
| 421 | bool emptied;
|
|---|
| 422 | [thrd, emptied] = pop(lane);
|
|---|
| 423 |
|
|---|
| 424 | /* paranoid */ verify(thrd);
|
|---|
| 425 | /* paranoid */ verify(lane.lock);
|
|---|
| 426 |
|
|---|
| 427 | #ifdef USE_SNZI
|
|---|
| 428 | // If this was the last element in the lane
|
|---|
| 429 | if(emptied) {
|
|---|
| 430 | depart( snzi, w );
|
|---|
| 431 | }
|
|---|
| 432 | #endif
|
|---|
| 433 |
|
|---|
| 434 | // Unlock and return
|
|---|
| 435 | __atomic_unlock(&lane.lock);
|
|---|
| 436 |
|
|---|
| 437 | // Update statistics
|
|---|
| 438 | #if !defined(__CFA_NO_STATISTICS__)
|
|---|
| 439 | __tls_stats()->ready.pick.pop.success++;
|
|---|
| 440 | #endif
|
|---|
| 441 |
|
|---|
| 442 | // Update the thread bias
|
|---|
| 443 | thrd->link.preferred = w / 4;
|
|---|
| 444 |
|
|---|
| 445 | // return the popped thread
|
|---|
| 446 | return thrd;
|
|---|
| 447 | }
|
|---|
| 448 | //-----------------------------------------------------------------------
|
|---|
| 449 |
|
|---|
| 450 | bool remove_head(struct cluster * cltr, struct $thread * thrd) with (cltr->ready_queue) {
|
|---|
| 451 | for(i; lanes.count) {
|
|---|
| 452 | __intrusive_lane_t & lane = lanes.data[i];
|
|---|
| 453 |
|
|---|
| 454 | bool removed = false;
|
|---|
| 455 |
|
|---|
| 456 | __atomic_acquire(&lane.lock);
|
|---|
| 457 | if(head(lane)->link.next == thrd) {
|
|---|
| 458 | $thread * pthrd;
|
|---|
| 459 | bool emptied;
|
|---|
| 460 | [pthrd, emptied] = pop(lane);
|
|---|
| 461 |
|
|---|
| 462 | /* paranoid */ verify( pthrd == thrd );
|
|---|
| 463 |
|
|---|
| 464 | removed = true;
|
|---|
| 465 | #ifdef USE_SNZI
|
|---|
| 466 | if(emptied) {
|
|---|
| 467 | depart( snzi, i );
|
|---|
| 468 | }
|
|---|
| 469 | #endif
|
|---|
| 470 | }
|
|---|
| 471 | __atomic_unlock(&lane.lock);
|
|---|
| 472 |
|
|---|
| 473 | if( removed ) return true;
|
|---|
| 474 | }
|
|---|
| 475 | return false;
|
|---|
| 476 | }
|
|---|
| 477 |
|
|---|
| 478 | //-----------------------------------------------------------------------
|
|---|
| 479 |
|
|---|
| 480 | static void check( __ready_queue_t & q ) with (q) {
|
|---|
| 481 | #if defined(__CFA_WITH_VERIFY__)
|
|---|
| 482 | {
|
|---|
| 483 | for( idx ; lanes.count ) {
|
|---|
| 484 | __intrusive_lane_t & sl = lanes.data[idx];
|
|---|
| 485 | assert(!lanes.data[idx].lock);
|
|---|
| 486 |
|
|---|
| 487 | assert(head(sl)->link.prev == 0p );
|
|---|
| 488 | assert(head(sl)->link.next->link.prev == head(sl) );
|
|---|
| 489 | assert(tail(sl)->link.next == 0p );
|
|---|
| 490 | assert(tail(sl)->link.prev->link.next == tail(sl) );
|
|---|
| 491 |
|
|---|
| 492 | if(sl.before.link.ts == 0l) {
|
|---|
| 493 | assert(tail(sl)->link.prev == head(sl));
|
|---|
| 494 | assert(head(sl)->link.next == tail(sl));
|
|---|
| 495 | } else {
|
|---|
| 496 | assert(tail(sl)->link.prev != head(sl));
|
|---|
| 497 | assert(head(sl)->link.next != tail(sl));
|
|---|
| 498 | }
|
|---|
| 499 | }
|
|---|
| 500 | }
|
|---|
| 501 | #endif
|
|---|
| 502 | }
|
|---|
| 503 |
|
|---|
| 504 | // Call this function of the intrusive list was moved using memcpy
|
|---|
| 505 | // fixes the list so that the pointers back to anchors aren't left dangling
|
|---|
| 506 | static inline void fix(__intrusive_lane_t & ll) {
|
|---|
| 507 | // if the list is not empty then follow he pointer and fix its reverse
|
|---|
| 508 | if(!is_empty(ll)) {
|
|---|
| 509 | head(ll)->link.next->link.prev = head(ll);
|
|---|
| 510 | tail(ll)->link.prev->link.next = tail(ll);
|
|---|
| 511 | }
|
|---|
| 512 | // Otherwise just reset the list
|
|---|
| 513 | else {
|
|---|
| 514 | verify(tail(ll)->link.next == 0p);
|
|---|
| 515 | tail(ll)->link.prev = head(ll);
|
|---|
| 516 | head(ll)->link.next = tail(ll);
|
|---|
| 517 | verify(head(ll)->link.prev == 0p);
|
|---|
| 518 | }
|
|---|
| 519 | }
|
|---|
| 520 |
|
|---|
| 521 | // Grow the ready queue
|
|---|
| 522 | void ready_queue_grow (struct cluster * cltr, int target) {
|
|---|
| 523 | /* paranoid */ verify( ready_mutate_islocked() );
|
|---|
| 524 | __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue\n");
|
|---|
| 525 |
|
|---|
| 526 | // Make sure that everything is consistent
|
|---|
| 527 | /* paranoid */ check( cltr->ready_queue );
|
|---|
| 528 |
|
|---|
| 529 | // grow the ready queue
|
|---|
| 530 | with( cltr->ready_queue ) {
|
|---|
| 531 | #ifdef USE_SNZI
|
|---|
| 532 | ^(snzi){};
|
|---|
| 533 | #endif
|
|---|
| 534 |
|
|---|
| 535 | // Find new count
|
|---|
| 536 | // Make sure we always have atleast 1 list
|
|---|
| 537 | size_t ncount = target >= 2 ? target * 4: 1;
|
|---|
| 538 |
|
|---|
| 539 | // Allocate new array (uses realloc and memcpies the data)
|
|---|
| 540 | lanes.data = alloc(lanes.data, ncount);
|
|---|
| 541 |
|
|---|
| 542 | // Fix the moved data
|
|---|
| 543 | for( idx; (size_t)lanes.count ) {
|
|---|
| 544 | fix(lanes.data[idx]);
|
|---|
| 545 | }
|
|---|
| 546 |
|
|---|
| 547 | // Construct new data
|
|---|
| 548 | for( idx; (size_t)lanes.count ~ ncount) {
|
|---|
| 549 | (lanes.data[idx]){};
|
|---|
| 550 | }
|
|---|
| 551 |
|
|---|
| 552 | // Update original
|
|---|
| 553 | lanes.count = ncount;
|
|---|
| 554 |
|
|---|
| 555 | #ifdef USE_SNZI
|
|---|
| 556 | // Re-create the snzi
|
|---|
| 557 | snzi{ log2( lanes.count / 8 ) };
|
|---|
| 558 | for( idx; (size_t)lanes.count ) {
|
|---|
| 559 | if( !is_empty(lanes.data[idx]) ) {
|
|---|
| 560 | arrive(snzi, idx);
|
|---|
| 561 | }
|
|---|
| 562 | }
|
|---|
| 563 | #endif
|
|---|
| 564 | }
|
|---|
| 565 |
|
|---|
| 566 | // Make sure that everything is consistent
|
|---|
| 567 | /* paranoid */ check( cltr->ready_queue );
|
|---|
| 568 |
|
|---|
| 569 | __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue done\n");
|
|---|
| 570 |
|
|---|
| 571 | /* paranoid */ verify( ready_mutate_islocked() );
|
|---|
| 572 | }
|
|---|
| 573 |
|
|---|
| 574 | // Shrink the ready queue
|
|---|
| 575 | void ready_queue_shrink(struct cluster * cltr, int target) {
|
|---|
| 576 | /* paranoid */ verify( ready_mutate_islocked() );
|
|---|
| 577 | __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue\n");
|
|---|
| 578 |
|
|---|
| 579 | // Make sure that everything is consistent
|
|---|
| 580 | /* paranoid */ check( cltr->ready_queue );
|
|---|
| 581 |
|
|---|
| 582 | with( cltr->ready_queue ) {
|
|---|
| 583 | #ifdef USE_SNZI
|
|---|
| 584 | ^(snzi){};
|
|---|
| 585 | #endif
|
|---|
| 586 |
|
|---|
| 587 | // Remember old count
|
|---|
| 588 | size_t ocount = lanes.count;
|
|---|
| 589 |
|
|---|
| 590 | // Find new count
|
|---|
| 591 | // Make sure we always have atleast 1 list
|
|---|
| 592 | lanes.count = target >= 2 ? target * 4: 1;
|
|---|
| 593 | /* paranoid */ verify( ocount >= lanes.count );
|
|---|
| 594 | /* paranoid */ verify( lanes.count == target * 4 || target < 2 );
|
|---|
| 595 |
|
|---|
| 596 | // for printing count the number of displaced threads
|
|---|
| 597 | #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__)
|
|---|
| 598 | __attribute__((unused)) size_t displaced = 0;
|
|---|
| 599 | #endif
|
|---|
| 600 |
|
|---|
| 601 | // redistribute old data
|
|---|
| 602 | for( idx; (size_t)lanes.count ~ ocount) {
|
|---|
| 603 | // Lock is not strictly needed but makes checking invariants much easier
|
|---|
| 604 | __attribute__((unused)) bool locked = __atomic_try_acquire(&lanes.data[idx].lock);
|
|---|
| 605 | verify(locked);
|
|---|
| 606 |
|
|---|
| 607 | // As long as we can pop from this lane to push the threads somewhere else in the queue
|
|---|
| 608 | while(!is_empty(lanes.data[idx])) {
|
|---|
| 609 | struct $thread * thrd;
|
|---|
| 610 | __attribute__((unused)) bool _;
|
|---|
| 611 | [thrd, _] = pop(lanes.data[idx]);
|
|---|
| 612 |
|
|---|
| 613 | push(cltr, thrd);
|
|---|
| 614 |
|
|---|
| 615 | // for printing count the number of displaced threads
|
|---|
| 616 | #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__)
|
|---|
| 617 | displaced++;
|
|---|
| 618 | #endif
|
|---|
| 619 | }
|
|---|
| 620 |
|
|---|
| 621 | // Unlock the lane
|
|---|
| 622 | __atomic_unlock(&lanes.data[idx].lock);
|
|---|
| 623 |
|
|---|
| 624 | // TODO print the queue statistics here
|
|---|
| 625 |
|
|---|
| 626 | ^(lanes.data[idx]){};
|
|---|
| 627 | }
|
|---|
| 628 |
|
|---|
| 629 | __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue displaced %zu threads\n", displaced);
|
|---|
| 630 |
|
|---|
| 631 | // Allocate new array (uses realloc and memcpies the data)
|
|---|
| 632 | lanes.data = alloc(lanes.data, lanes.count);
|
|---|
| 633 |
|
|---|
| 634 | // Fix the moved data
|
|---|
| 635 | for( idx; (size_t)lanes.count ) {
|
|---|
| 636 | fix(lanes.data[idx]);
|
|---|
| 637 | }
|
|---|
| 638 |
|
|---|
| 639 | #ifdef USE_SNZI
|
|---|
| 640 | // Re-create the snzi
|
|---|
| 641 | snzi{ log2( lanes.count / 8 ) };
|
|---|
| 642 | for( idx; (size_t)lanes.count ) {
|
|---|
| 643 | if( !is_empty(lanes.data[idx]) ) {
|
|---|
| 644 | arrive(snzi, idx);
|
|---|
| 645 | }
|
|---|
| 646 | }
|
|---|
| 647 | #endif
|
|---|
| 648 | }
|
|---|
| 649 |
|
|---|
| 650 | // Make sure that everything is consistent
|
|---|
| 651 | /* paranoid */ check( cltr->ready_queue );
|
|---|
| 652 |
|
|---|
| 653 | __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue done\n");
|
|---|
| 654 | /* paranoid */ verify( ready_mutate_islocked() );
|
|---|
| 655 | }
|
|---|