[7768b8d] | 1 | // |
---|
| 2 | // Cforall Version 1.0.0 Copyright (C) 2019 University of Waterloo |
---|
| 3 | // |
---|
| 4 | // The contents of this file are covered under the licence agreement in the |
---|
| 5 | // file "LICENCE" distributed with Cforall. |
---|
| 6 | // |
---|
| 7 | // ready_queue.cfa -- |
---|
| 8 | // |
---|
| 9 | // Author : Thierry Delisle |
---|
| 10 | // Created On : Mon Nov dd 16:29:18 2019 |
---|
| 11 | // Last Modified By : |
---|
| 12 | // Last Modified On : |
---|
| 13 | // Update Count : |
---|
| 14 | // |
---|
| 15 | |
---|
| 16 | #define __cforall_thread__ |
---|
[1b143de] | 17 | // #define __CFA_DEBUG_PRINT_READY_QUEUE__ |
---|
[7768b8d] | 18 | |
---|
[1eb239e4] | 19 | // #define USE_SNZI |
---|
| 20 | |
---|
[7768b8d] | 21 | #include "bits/defs.hfa" |
---|
| 22 | #include "kernel_private.hfa" |
---|
| 23 | |
---|
| 24 | #define _GNU_SOURCE |
---|
| 25 | #include "stdlib.hfa" |
---|
[61d7bec] | 26 | #include "math.hfa" |
---|
[7768b8d] | 27 | |
---|
[04b5cef] | 28 | #include <unistd.h> |
---|
| 29 | |
---|
[13c5e19] | 30 | #include "snzi.hfa" |
---|
| 31 | #include "ready_subqueue.hfa" |
---|
| 32 | |
---|
[7768b8d] | 33 | static const size_t cache_line_size = 64; |
---|
| 34 | |
---|
[dca5802] | 35 | // No overriden function, no environment variable, no define |
---|
| 36 | // fall back to a magic number |
---|
| 37 | #ifndef __CFA_MAX_PROCESSORS__ |
---|
[b388ee81] | 38 | #define __CFA_MAX_PROCESSORS__ 1024 |
---|
[dca5802] | 39 | #endif |
---|
[7768b8d] | 40 | |
---|
[320ec6fc] | 41 | #define BIAS 16 |
---|
[04b5cef] | 42 | |
---|
[dca5802] | 43 | // returns the maximum number of processors the RWLock support |
---|
[7768b8d] | 44 | __attribute__((weak)) unsigned __max_processors() { |
---|
| 45 | const char * max_cores_s = getenv("CFA_MAX_PROCESSORS"); |
---|
| 46 | if(!max_cores_s) { |
---|
[504a7dc] | 47 | __cfadbg_print_nolock(ready_queue, "No CFA_MAX_PROCESSORS in ENV\n"); |
---|
[dca5802] | 48 | return __CFA_MAX_PROCESSORS__; |
---|
[7768b8d] | 49 | } |
---|
| 50 | |
---|
| 51 | char * endptr = 0p; |
---|
| 52 | long int max_cores_l = strtol(max_cores_s, &endptr, 10); |
---|
| 53 | if(max_cores_l < 1 || max_cores_l > 65535) { |
---|
[504a7dc] | 54 | __cfadbg_print_nolock(ready_queue, "CFA_MAX_PROCESSORS out of range : %ld\n", max_cores_l); |
---|
[dca5802] | 55 | return __CFA_MAX_PROCESSORS__; |
---|
[7768b8d] | 56 | } |
---|
| 57 | if('\0' != *endptr) { |
---|
[504a7dc] | 58 | __cfadbg_print_nolock(ready_queue, "CFA_MAX_PROCESSORS not a decimal number : %s\n", max_cores_s); |
---|
[dca5802] | 59 | return __CFA_MAX_PROCESSORS__; |
---|
[7768b8d] | 60 | } |
---|
| 61 | |
---|
| 62 | return max_cores_l; |
---|
| 63 | } |
---|
| 64 | |
---|
| 65 | //======================================================================= |
---|
| 66 | // Cluster wide reader-writer lock |
---|
| 67 | //======================================================================= |
---|
[b388ee81] | 68 | void ?{}(__scheduler_RWLock_t & this) { |
---|
[7768b8d] | 69 | this.max = __max_processors(); |
---|
| 70 | this.alloc = 0; |
---|
| 71 | this.ready = 0; |
---|
| 72 | this.lock = false; |
---|
| 73 | this.data = alloc(this.max); |
---|
| 74 | |
---|
| 75 | /*paranoid*/ verify( 0 == (((uintptr_t)(this.data )) % 64) ); |
---|
| 76 | /*paranoid*/ verify( 0 == (((uintptr_t)(this.data + 1)) % 64) ); |
---|
| 77 | /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.alloc), &this.alloc)); |
---|
| 78 | /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.ready), &this.ready)); |
---|
| 79 | |
---|
| 80 | } |
---|
[b388ee81] | 81 | void ^?{}(__scheduler_RWLock_t & this) { |
---|
[7768b8d] | 82 | free(this.data); |
---|
| 83 | } |
---|
| 84 | |
---|
[9b1dcc2] | 85 | void ?{}( __scheduler_lock_id_t & this, __processor_id_t * proc ) { |
---|
[7768b8d] | 86 | this.handle = proc; |
---|
| 87 | this.lock = false; |
---|
[64a7146] | 88 | #ifdef __CFA_WITH_VERIFY__ |
---|
| 89 | this.owned = false; |
---|
| 90 | #endif |
---|
[7768b8d] | 91 | } |
---|
| 92 | |
---|
| 93 | //======================================================================= |
---|
| 94 | // Lock-Free registering/unregistering of threads |
---|
[9b1dcc2] | 95 | unsigned doregister( struct __processor_id_t * proc ) with(*__scheduler_lock) { |
---|
[b388ee81] | 96 | __cfadbg_print_safe(ready_queue, "Kernel : Registering proc %p for RW-Lock\n", proc); |
---|
[504a7dc] | 97 | |
---|
[7768b8d] | 98 | // Step - 1 : check if there is already space in the data |
---|
| 99 | uint_fast32_t s = ready; |
---|
| 100 | |
---|
| 101 | // Check among all the ready |
---|
| 102 | for(uint_fast32_t i = 0; i < s; i++) { |
---|
[9b1dcc2] | 103 | __processor_id_t * null = 0p; // Re-write every loop since compare thrashes it |
---|
[7768b8d] | 104 | if( __atomic_load_n(&data[i].handle, (int)__ATOMIC_RELAXED) == null |
---|
| 105 | && __atomic_compare_exchange_n( &data[i].handle, &null, proc, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) { |
---|
| 106 | /*paranoid*/ verify(i < ready); |
---|
[64a7146] | 107 | /*paranoid*/ verify(0 == (__alignof__(data[i]) % cache_line_size)); |
---|
[7768b8d] | 108 | /*paranoid*/ verify((((uintptr_t)&data[i]) % cache_line_size) == 0); |
---|
| 109 | return i; |
---|
| 110 | } |
---|
| 111 | } |
---|
| 112 | |
---|
[b388ee81] | 113 | if(max <= alloc) abort("Trying to create more than %ud processors", __scheduler_lock->max); |
---|
[7768b8d] | 114 | |
---|
| 115 | // Step - 2 : F&A to get a new spot in the array. |
---|
| 116 | uint_fast32_t n = __atomic_fetch_add(&alloc, 1, __ATOMIC_SEQ_CST); |
---|
[b388ee81] | 117 | if(max <= n) abort("Trying to create more than %ud processors", __scheduler_lock->max); |
---|
[7768b8d] | 118 | |
---|
| 119 | // Step - 3 : Mark space as used and then publish it. |
---|
[9b1dcc2] | 120 | __scheduler_lock_id_t * storage = (__scheduler_lock_id_t *)&data[n]; |
---|
[7768b8d] | 121 | (*storage){ proc }; |
---|
[fd9b524] | 122 | while() { |
---|
[7768b8d] | 123 | unsigned copy = n; |
---|
| 124 | if( __atomic_load_n(&ready, __ATOMIC_RELAXED) == n |
---|
| 125 | && __atomic_compare_exchange_n(&ready, ©, n + 1, true, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) |
---|
| 126 | break; |
---|
[fd9b524] | 127 | Pause(); |
---|
[7768b8d] | 128 | } |
---|
| 129 | |
---|
[1b143de] | 130 | __cfadbg_print_safe(ready_queue, "Kernel : Registering proc %p done, id %lu\n", proc, n); |
---|
[504a7dc] | 131 | |
---|
[7768b8d] | 132 | // Return new spot. |
---|
| 133 | /*paranoid*/ verify(n < ready); |
---|
[37ba662] | 134 | /*paranoid*/ verify(__alignof__(data[n]) == (2 * cache_line_size)); |
---|
[7768b8d] | 135 | /*paranoid*/ verify((((uintptr_t)&data[n]) % cache_line_size) == 0); |
---|
| 136 | return n; |
---|
| 137 | } |
---|
| 138 | |
---|
[9b1dcc2] | 139 | void unregister( struct __processor_id_t * proc ) with(*__scheduler_lock) { |
---|
[7768b8d] | 140 | unsigned id = proc->id; |
---|
| 141 | /*paranoid*/ verify(id < ready); |
---|
| 142 | /*paranoid*/ verify(proc == __atomic_load_n(&data[id].handle, __ATOMIC_RELAXED)); |
---|
| 143 | __atomic_store_n(&data[id].handle, 0p, __ATOMIC_RELEASE); |
---|
[504a7dc] | 144 | |
---|
| 145 | __cfadbg_print_safe(ready_queue, "Kernel : Unregister proc %p\n", proc); |
---|
[7768b8d] | 146 | } |
---|
| 147 | |
---|
| 148 | //----------------------------------------------------------------------- |
---|
| 149 | // Writer side : acquire when changing the ready queue, e.g. adding more |
---|
| 150 | // queues or removing them. |
---|
[b388ee81] | 151 | uint_fast32_t ready_mutate_lock( void ) with(*__scheduler_lock) { |
---|
[8fc652e0] | 152 | /* paranoid */ verify( ! __preemption_enabled() ); |
---|
[62502cc4] | 153 | |
---|
[7768b8d] | 154 | // Step 1 : lock global lock |
---|
| 155 | // It is needed to avoid processors that register mid Critical-Section |
---|
| 156 | // to simply lock their own lock and enter. |
---|
| 157 | __atomic_acquire( &lock ); |
---|
| 158 | |
---|
| 159 | // Step 2 : lock per-proc lock |
---|
| 160 | // Processors that are currently being registered aren't counted |
---|
| 161 | // but can't be in read_lock or in the critical section. |
---|
| 162 | // All other processors are counted |
---|
| 163 | uint_fast32_t s = ready; |
---|
| 164 | for(uint_fast32_t i = 0; i < s; i++) { |
---|
| 165 | __atomic_acquire( &data[i].lock ); |
---|
| 166 | } |
---|
| 167 | |
---|
[8fc652e0] | 168 | /* paranoid */ verify( ! __preemption_enabled() ); |
---|
[7768b8d] | 169 | return s; |
---|
| 170 | } |
---|
| 171 | |
---|
[b388ee81] | 172 | void ready_mutate_unlock( uint_fast32_t last_s ) with(*__scheduler_lock) { |
---|
[8fc652e0] | 173 | /* paranoid */ verify( ! __preemption_enabled() ); |
---|
[62502cc4] | 174 | |
---|
[7768b8d] | 175 | // Step 1 : release local locks |
---|
| 176 | // This must be done while the global lock is held to avoid |
---|
| 177 | // threads that where created mid critical section |
---|
| 178 | // to race to lock their local locks and have the writer |
---|
| 179 | // immidiately unlock them |
---|
| 180 | // Alternative solution : return s in write_lock and pass it to write_unlock |
---|
| 181 | for(uint_fast32_t i = 0; i < last_s; i++) { |
---|
| 182 | verify(data[i].lock); |
---|
| 183 | __atomic_store_n(&data[i].lock, (bool)false, __ATOMIC_RELEASE); |
---|
| 184 | } |
---|
| 185 | |
---|
| 186 | // Step 2 : release global lock |
---|
| 187 | /*paranoid*/ assert(true == lock); |
---|
| 188 | __atomic_store_n(&lock, (bool)false, __ATOMIC_RELEASE); |
---|
[62502cc4] | 189 | |
---|
[8fc652e0] | 190 | /* paranoid */ verify( ! __preemption_enabled() ); |
---|
[7768b8d] | 191 | } |
---|
| 192 | |
---|
| 193 | //======================================================================= |
---|
[13c5e19] | 194 | // Cforall Reqdy Queue used for scheduling |
---|
[b798713] | 195 | //======================================================================= |
---|
| 196 | void ?{}(__ready_queue_t & this) with (this) { |
---|
[28d73c1] | 197 | lanes.data = 0p; |
---|
| 198 | lanes.count = 0; |
---|
[b798713] | 199 | } |
---|
| 200 | |
---|
| 201 | void ^?{}(__ready_queue_t & this) with (this) { |
---|
[39fc03e] | 202 | verify( 1 == lanes.count ); |
---|
[1eb239e4] | 203 | #ifdef USE_SNZI |
---|
| 204 | verify( !query( snzi ) ); |
---|
| 205 | #endif |
---|
[dca5802] | 206 | free(lanes.data); |
---|
| 207 | } |
---|
| 208 | |
---|
[64a7146] | 209 | //----------------------------------------------------------------------- |
---|
| 210 | __attribute__((hot)) bool query(struct cluster * cltr) { |
---|
[1eb239e4] | 211 | #ifdef USE_SNZI |
---|
| 212 | return query(cltr->ready_queue.snzi); |
---|
| 213 | #endif |
---|
| 214 | return true; |
---|
[64a7146] | 215 | } |
---|
| 216 | |
---|
[772411a] | 217 | static inline [unsigned, bool] idx_from_r(unsigned r, unsigned preferred) { |
---|
| 218 | unsigned i; |
---|
| 219 | bool local; |
---|
| 220 | #if defined(BIAS) |
---|
| 221 | unsigned rlow = r % BIAS; |
---|
| 222 | unsigned rhigh = r / BIAS; |
---|
| 223 | if((0 != rlow) && preferred >= 0) { |
---|
| 224 | // (BIAS - 1) out of BIAS chances |
---|
| 225 | // Use perferred queues |
---|
| 226 | i = preferred + (rhigh % 4); |
---|
| 227 | local = true; |
---|
| 228 | } |
---|
| 229 | else { |
---|
| 230 | // 1 out of BIAS chances |
---|
| 231 | // Use all queues |
---|
| 232 | i = rhigh; |
---|
| 233 | local = false; |
---|
| 234 | } |
---|
| 235 | #else |
---|
| 236 | i = r; |
---|
| 237 | local = false; |
---|
| 238 | #endif |
---|
| 239 | return [i, local]; |
---|
| 240 | } |
---|
| 241 | |
---|
[dca5802] | 242 | //----------------------------------------------------------------------- |
---|
[504a7dc] | 243 | __attribute__((hot)) bool push(struct cluster * cltr, struct $thread * thrd) with (cltr->ready_queue) { |
---|
[61d7bec] | 244 | __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr); |
---|
[1b143de] | 245 | |
---|
[dca5802] | 246 | // write timestamp |
---|
[b798713] | 247 | thrd->link.ts = rdtscl(); |
---|
| 248 | |
---|
[772411a] | 249 | __attribute__((unused)) bool local; |
---|
| 250 | __attribute__((unused)) int preferred; |
---|
| 251 | #if defined(BIAS) |
---|
| 252 | preferred = |
---|
[d72c074] | 253 | //* |
---|
[8fc652e0] | 254 | kernelTLS().this_processor ? kernelTLS().this_processor->id * 4 : -1; |
---|
[d72c074] | 255 | /*/ |
---|
| 256 | thrd->link.preferred * 4; |
---|
| 257 | //*/ |
---|
[52769ba] | 258 | #endif |
---|
| 259 | |
---|
[dca5802] | 260 | // Try to pick a lane and lock it |
---|
| 261 | unsigned i; |
---|
| 262 | do { |
---|
| 263 | // Pick the index of a lane |
---|
[5fe7322] | 264 | // unsigned r = __tls_rand(); |
---|
| 265 | unsigned r = __tls_rand_fwd(); |
---|
[772411a] | 266 | [i, local] = idx_from_r(r, preferred); |
---|
| 267 | |
---|
| 268 | #if !defined(__CFA_NO_STATISTICS__) |
---|
| 269 | if(local) { |
---|
| 270 | __tls_stats()->ready.pick.push.local++; |
---|
[04b5cef] | 271 | } |
---|
| 272 | #endif |
---|
| 273 | |
---|
| 274 | i %= __atomic_load_n( &lanes.count, __ATOMIC_RELAXED ); |
---|
[b798713] | 275 | |
---|
| 276 | #if !defined(__CFA_NO_STATISTICS__) |
---|
[8834751] | 277 | __tls_stats()->ready.pick.push.attempt++; |
---|
[b798713] | 278 | #endif |
---|
| 279 | |
---|
| 280 | // If we can't lock it retry |
---|
[dca5802] | 281 | } while( !__atomic_try_acquire( &lanes.data[i].lock ) ); |
---|
[b798713] | 282 | |
---|
[dca5802] | 283 | bool first = false; |
---|
| 284 | |
---|
| 285 | // Actually push it |
---|
[5fe7322] | 286 | #ifdef USE_SNZI |
---|
| 287 | bool lane_first = |
---|
| 288 | #endif |
---|
| 289 | |
---|
| 290 | push(lanes.data[i], thrd); |
---|
[dca5802] | 291 | |
---|
[1eb239e4] | 292 | #ifdef USE_SNZI |
---|
| 293 | // If this lane used to be empty we need to do more |
---|
| 294 | if(lane_first) { |
---|
| 295 | // Check if the entire queue used to be empty |
---|
| 296 | first = !query(snzi); |
---|
[61d7bec] | 297 | |
---|
[1eb239e4] | 298 | // Update the snzi |
---|
| 299 | arrive( snzi, i ); |
---|
| 300 | } |
---|
| 301 | #endif |
---|
[dca5802] | 302 | |
---|
[5fe7322] | 303 | __tls_rand_advance_bck(); |
---|
| 304 | |
---|
[dca5802] | 305 | // Unlock and return |
---|
| 306 | __atomic_unlock( &lanes.data[i].lock ); |
---|
| 307 | |
---|
[1b143de] | 308 | __cfadbg_print_safe(ready_queue, "Kernel : Pushed %p on cluster %p (idx: %u, mask %llu, first %d)\n", thrd, cltr, i, used.mask[0], lane_first); |
---|
| 309 | |
---|
[dca5802] | 310 | // Update statistics |
---|
| 311 | #if !defined(__CFA_NO_STATISTICS__) |
---|
[52769ba] | 312 | #if defined(BIAS) |
---|
| 313 | if( local ) __tls_stats()->ready.pick.push.lsuccess++; |
---|
| 314 | #endif |
---|
[8834751] | 315 | __tls_stats()->ready.pick.push.success++; |
---|
[dca5802] | 316 | #endif |
---|
| 317 | |
---|
| 318 | // return whether or not the list was empty before this push |
---|
| 319 | return first; |
---|
[b798713] | 320 | } |
---|
| 321 | |
---|
[13c5e19] | 322 | static struct $thread * try_pop(struct cluster * cltr, unsigned i, unsigned j); |
---|
| 323 | static struct $thread * try_pop(struct cluster * cltr, unsigned i); |
---|
| 324 | |
---|
| 325 | // Pop from the ready queue from a given cluster |
---|
| 326 | __attribute__((hot)) $thread * pop(struct cluster * cltr) with (cltr->ready_queue) { |
---|
| 327 | /* paranoid */ verify( lanes.count > 0 ); |
---|
[1eb239e4] | 328 | unsigned count = __atomic_load_n( &lanes.count, __ATOMIC_RELAXED ); |
---|
[772411a] | 329 | int preferred; |
---|
[13c5e19] | 330 | #if defined(BIAS) |
---|
| 331 | // Don't bother trying locally too much |
---|
| 332 | int local_tries = 8; |
---|
[8fc652e0] | 333 | preferred = kernelTLS().this_processor->id * 4; |
---|
[13c5e19] | 334 | #endif |
---|
| 335 | |
---|
[772411a] | 336 | |
---|
[13c5e19] | 337 | // As long as the list is not empty, try finding a lane that isn't empty and pop from it |
---|
[1eb239e4] | 338 | #ifdef USE_SNZI |
---|
| 339 | while( query(snzi) ) { |
---|
| 340 | #else |
---|
| 341 | for(25) { |
---|
| 342 | #endif |
---|
[13c5e19] | 343 | // Pick two lists at random |
---|
[5fe7322] | 344 | // unsigned ri = __tls_rand(); |
---|
| 345 | // unsigned rj = __tls_rand(); |
---|
| 346 | unsigned ri = __tls_rand_bck(); |
---|
| 347 | unsigned rj = __tls_rand_bck(); |
---|
[772411a] | 348 | |
---|
| 349 | unsigned i, j; |
---|
| 350 | __attribute__((unused)) bool locali, localj; |
---|
| 351 | [i, locali] = idx_from_r(ri, preferred); |
---|
| 352 | [j, localj] = idx_from_r(rj, preferred); |
---|
| 353 | |
---|
| 354 | #if !defined(__CFA_NO_STATISTICS__) |
---|
| 355 | if(locali) { |
---|
| 356 | __tls_stats()->ready.pick.pop.local++; |
---|
[13c5e19] | 357 | } |
---|
[772411a] | 358 | if(localj) { |
---|
| 359 | __tls_stats()->ready.pick.pop.local++; |
---|
[13c5e19] | 360 | } |
---|
| 361 | #endif |
---|
| 362 | |
---|
[1eb239e4] | 363 | i %= count; |
---|
| 364 | j %= count; |
---|
[13c5e19] | 365 | |
---|
| 366 | // try popping from the 2 picked lists |
---|
| 367 | struct $thread * thrd = try_pop(cltr, i, j); |
---|
[52769ba] | 368 | if(thrd) { |
---|
| 369 | #if defined(BIAS) && !defined(__CFA_NO_STATISTICS__) |
---|
[772411a] | 370 | if( locali || localj ) __tls_stats()->ready.pick.pop.lsuccess++; |
---|
[52769ba] | 371 | #endif |
---|
| 372 | return thrd; |
---|
| 373 | } |
---|
[13c5e19] | 374 | } |
---|
| 375 | |
---|
| 376 | // All lanes where empty return 0p |
---|
| 377 | return 0p; |
---|
| 378 | } |
---|
| 379 | |
---|
[1eb239e4] | 380 | __attribute__((hot)) struct $thread * pop_slow(struct cluster * cltr) with (cltr->ready_queue) { |
---|
| 381 | /* paranoid */ verify( lanes.count > 0 ); |
---|
| 382 | unsigned count = __atomic_load_n( &lanes.count, __ATOMIC_RELAXED ); |
---|
| 383 | unsigned offset = __tls_rand(); |
---|
| 384 | for(i; count) { |
---|
| 385 | unsigned idx = (offset + i) % count; |
---|
| 386 | struct $thread * thrd = try_pop(cltr, idx); |
---|
| 387 | if(thrd) { |
---|
| 388 | return thrd; |
---|
| 389 | } |
---|
| 390 | } |
---|
| 391 | |
---|
| 392 | // All lanes where empty return 0p |
---|
| 393 | return 0p; |
---|
| 394 | } |
---|
| 395 | |
---|
| 396 | |
---|
[b798713] | 397 | //----------------------------------------------------------------------- |
---|
[dca5802] | 398 | // Given 2 indexes, pick the list with the oldest push an try to pop from it |
---|
[13c5e19] | 399 | static inline struct $thread * try_pop(struct cluster * cltr, unsigned i, unsigned j) with (cltr->ready_queue) { |
---|
[b798713] | 400 | #if !defined(__CFA_NO_STATISTICS__) |
---|
[8834751] | 401 | __tls_stats()->ready.pick.pop.attempt++; |
---|
[b798713] | 402 | #endif |
---|
| 403 | |
---|
| 404 | // Pick the bet list |
---|
| 405 | int w = i; |
---|
[dca5802] | 406 | if( __builtin_expect(!is_empty(lanes.data[j]), true) ) { |
---|
| 407 | w = (ts(lanes.data[i]) < ts(lanes.data[j])) ? i : j; |
---|
[b798713] | 408 | } |
---|
| 409 | |
---|
[13c5e19] | 410 | return try_pop(cltr, w); |
---|
| 411 | } |
---|
| 412 | |
---|
| 413 | static inline struct $thread * try_pop(struct cluster * cltr, unsigned w) with (cltr->ready_queue) { |
---|
[dca5802] | 414 | // Get relevant elements locally |
---|
| 415 | __intrusive_lane_t & lane = lanes.data[w]; |
---|
| 416 | |
---|
[b798713] | 417 | // If list looks empty retry |
---|
[dca5802] | 418 | if( is_empty(lane) ) return 0p; |
---|
[b798713] | 419 | |
---|
| 420 | // If we can't get the lock retry |
---|
[dca5802] | 421 | if( !__atomic_try_acquire(&lane.lock) ) return 0p; |
---|
[b798713] | 422 | |
---|
| 423 | |
---|
| 424 | // If list is empty, unlock and retry |
---|
[dca5802] | 425 | if( is_empty(lane) ) { |
---|
| 426 | __atomic_unlock(&lane.lock); |
---|
[b798713] | 427 | return 0p; |
---|
| 428 | } |
---|
| 429 | |
---|
| 430 | // Actually pop the list |
---|
[504a7dc] | 431 | struct $thread * thrd; |
---|
[343d10e] | 432 | thrd = pop(lane); |
---|
[b798713] | 433 | |
---|
[dca5802] | 434 | /* paranoid */ verify(thrd); |
---|
| 435 | /* paranoid */ verify(lane.lock); |
---|
[b798713] | 436 | |
---|
[1eb239e4] | 437 | #ifdef USE_SNZI |
---|
| 438 | // If this was the last element in the lane |
---|
| 439 | if(emptied) { |
---|
| 440 | depart( snzi, w ); |
---|
| 441 | } |
---|
| 442 | #endif |
---|
[b798713] | 443 | |
---|
| 444 | // Unlock and return |
---|
[dca5802] | 445 | __atomic_unlock(&lane.lock); |
---|
[b798713] | 446 | |
---|
[dca5802] | 447 | // Update statistics |
---|
[b798713] | 448 | #if !defined(__CFA_NO_STATISTICS__) |
---|
[8834751] | 449 | __tls_stats()->ready.pick.pop.success++; |
---|
[b798713] | 450 | #endif |
---|
| 451 | |
---|
[d72c074] | 452 | // Update the thread bias |
---|
| 453 | thrd->link.preferred = w / 4; |
---|
| 454 | |
---|
[dca5802] | 455 | // return the popped thread |
---|
[b798713] | 456 | return thrd; |
---|
| 457 | } |
---|
[13c5e19] | 458 | //----------------------------------------------------------------------- |
---|
[b798713] | 459 | |
---|
[13c5e19] | 460 | bool remove_head(struct cluster * cltr, struct $thread * thrd) with (cltr->ready_queue) { |
---|
| 461 | for(i; lanes.count) { |
---|
| 462 | __intrusive_lane_t & lane = lanes.data[i]; |
---|
[b798713] | 463 | |
---|
[13c5e19] | 464 | bool removed = false; |
---|
[04b5cef] | 465 | |
---|
[13c5e19] | 466 | __atomic_acquire(&lane.lock); |
---|
| 467 | if(head(lane)->link.next == thrd) { |
---|
| 468 | $thread * pthrd; |
---|
[343d10e] | 469 | pthrd = pop(lane); |
---|
[04b5cef] | 470 | |
---|
[13c5e19] | 471 | /* paranoid */ verify( pthrd == thrd ); |
---|
[61d7bec] | 472 | |
---|
[13c5e19] | 473 | removed = true; |
---|
[1eb239e4] | 474 | #ifdef USE_SNZI |
---|
| 475 | if(emptied) { |
---|
| 476 | depart( snzi, i ); |
---|
| 477 | } |
---|
| 478 | #endif |
---|
[13c5e19] | 479 | } |
---|
| 480 | __atomic_unlock(&lane.lock); |
---|
[b798713] | 481 | |
---|
[13c5e19] | 482 | if( removed ) return true; |
---|
| 483 | } |
---|
| 484 | return false; |
---|
[b798713] | 485 | } |
---|
| 486 | |
---|
| 487 | //----------------------------------------------------------------------- |
---|
| 488 | |
---|
| 489 | static void check( __ready_queue_t & q ) with (q) { |
---|
| 490 | #if defined(__CFA_WITH_VERIFY__) |
---|
| 491 | { |
---|
[dca5802] | 492 | for( idx ; lanes.count ) { |
---|
| 493 | __intrusive_lane_t & sl = lanes.data[idx]; |
---|
| 494 | assert(!lanes.data[idx].lock); |
---|
[b798713] | 495 | |
---|
| 496 | assert(head(sl)->link.prev == 0p ); |
---|
| 497 | assert(head(sl)->link.next->link.prev == head(sl) ); |
---|
| 498 | assert(tail(sl)->link.next == 0p ); |
---|
| 499 | assert(tail(sl)->link.prev->link.next == tail(sl) ); |
---|
| 500 | |
---|
| 501 | if(sl.before.link.ts == 0l) { |
---|
| 502 | assert(tail(sl)->link.prev == head(sl)); |
---|
| 503 | assert(head(sl)->link.next == tail(sl)); |
---|
[1b143de] | 504 | } else { |
---|
| 505 | assert(tail(sl)->link.prev != head(sl)); |
---|
| 506 | assert(head(sl)->link.next != tail(sl)); |
---|
[b798713] | 507 | } |
---|
| 508 | } |
---|
| 509 | } |
---|
| 510 | #endif |
---|
| 511 | } |
---|
| 512 | |
---|
| 513 | // Call this function of the intrusive list was moved using memcpy |
---|
[dca5802] | 514 | // fixes the list so that the pointers back to anchors aren't left dangling |
---|
| 515 | static inline void fix(__intrusive_lane_t & ll) { |
---|
| 516 | // if the list is not empty then follow he pointer and fix its reverse |
---|
| 517 | if(!is_empty(ll)) { |
---|
[b798713] | 518 | head(ll)->link.next->link.prev = head(ll); |
---|
| 519 | tail(ll)->link.prev->link.next = tail(ll); |
---|
| 520 | } |
---|
| 521 | // Otherwise just reset the list |
---|
| 522 | else { |
---|
[dca5802] | 523 | verify(tail(ll)->link.next == 0p); |
---|
[b798713] | 524 | tail(ll)->link.prev = head(ll); |
---|
| 525 | head(ll)->link.next = tail(ll); |
---|
[dca5802] | 526 | verify(head(ll)->link.prev == 0p); |
---|
[b798713] | 527 | } |
---|
| 528 | } |
---|
| 529 | |
---|
[dca5802] | 530 | // Grow the ready queue |
---|
[320ec6fc] | 531 | void ready_queue_grow (struct cluster * cltr, int target) { |
---|
[64a7146] | 532 | /* paranoid */ verify( ready_mutate_islocked() ); |
---|
[504a7dc] | 533 | __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue\n"); |
---|
[b798713] | 534 | |
---|
[dca5802] | 535 | // Make sure that everything is consistent |
---|
| 536 | /* paranoid */ check( cltr->ready_queue ); |
---|
| 537 | |
---|
| 538 | // grow the ready queue |
---|
[b798713] | 539 | with( cltr->ready_queue ) { |
---|
[1eb239e4] | 540 | #ifdef USE_SNZI |
---|
| 541 | ^(snzi){}; |
---|
| 542 | #endif |
---|
[b798713] | 543 | |
---|
[39fc03e] | 544 | // Find new count |
---|
| 545 | // Make sure we always have atleast 1 list |
---|
| 546 | size_t ncount = target >= 2 ? target * 4: 1; |
---|
[b798713] | 547 | |
---|
[dca5802] | 548 | // Allocate new array (uses realloc and memcpies the data) |
---|
[ceb7db8] | 549 | lanes.data = alloc( ncount, lanes.data`realloc ); |
---|
[b798713] | 550 | |
---|
| 551 | // Fix the moved data |
---|
[dca5802] | 552 | for( idx; (size_t)lanes.count ) { |
---|
| 553 | fix(lanes.data[idx]); |
---|
[b798713] | 554 | } |
---|
| 555 | |
---|
| 556 | // Construct new data |
---|
[dca5802] | 557 | for( idx; (size_t)lanes.count ~ ncount) { |
---|
| 558 | (lanes.data[idx]){}; |
---|
[b798713] | 559 | } |
---|
| 560 | |
---|
| 561 | // Update original |
---|
[dca5802] | 562 | lanes.count = ncount; |
---|
| 563 | |
---|
[1eb239e4] | 564 | #ifdef USE_SNZI |
---|
| 565 | // Re-create the snzi |
---|
| 566 | snzi{ log2( lanes.count / 8 ) }; |
---|
| 567 | for( idx; (size_t)lanes.count ) { |
---|
| 568 | if( !is_empty(lanes.data[idx]) ) { |
---|
| 569 | arrive(snzi, idx); |
---|
| 570 | } |
---|
[61d7bec] | 571 | } |
---|
[1eb239e4] | 572 | #endif |
---|
[b798713] | 573 | } |
---|
| 574 | |
---|
| 575 | // Make sure that everything is consistent |
---|
[dca5802] | 576 | /* paranoid */ check( cltr->ready_queue ); |
---|
| 577 | |
---|
[504a7dc] | 578 | __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue done\n"); |
---|
[dca5802] | 579 | |
---|
[64a7146] | 580 | /* paranoid */ verify( ready_mutate_islocked() ); |
---|
[b798713] | 581 | } |
---|
| 582 | |
---|
[dca5802] | 583 | // Shrink the ready queue |
---|
[320ec6fc] | 584 | void ready_queue_shrink(struct cluster * cltr, int target) { |
---|
[64a7146] | 585 | /* paranoid */ verify( ready_mutate_islocked() ); |
---|
[504a7dc] | 586 | __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue\n"); |
---|
[dca5802] | 587 | |
---|
| 588 | // Make sure that everything is consistent |
---|
| 589 | /* paranoid */ check( cltr->ready_queue ); |
---|
| 590 | |
---|
[b798713] | 591 | with( cltr->ready_queue ) { |
---|
[1eb239e4] | 592 | #ifdef USE_SNZI |
---|
| 593 | ^(snzi){}; |
---|
| 594 | #endif |
---|
[61d7bec] | 595 | |
---|
[39fc03e] | 596 | // Remember old count |
---|
[dca5802] | 597 | size_t ocount = lanes.count; |
---|
[b798713] | 598 | |
---|
[39fc03e] | 599 | // Find new count |
---|
| 600 | // Make sure we always have atleast 1 list |
---|
| 601 | lanes.count = target >= 2 ? target * 4: 1; |
---|
| 602 | /* paranoid */ verify( ocount >= lanes.count ); |
---|
[320ec6fc] | 603 | /* paranoid */ verify( lanes.count == target * 4 || target < 2 ); |
---|
[dca5802] | 604 | |
---|
| 605 | // for printing count the number of displaced threads |
---|
[504a7dc] | 606 | #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__) |
---|
[dca5802] | 607 | __attribute__((unused)) size_t displaced = 0; |
---|
| 608 | #endif |
---|
[b798713] | 609 | |
---|
| 610 | // redistribute old data |
---|
[dca5802] | 611 | for( idx; (size_t)lanes.count ~ ocount) { |
---|
| 612 | // Lock is not strictly needed but makes checking invariants much easier |
---|
[1b143de] | 613 | __attribute__((unused)) bool locked = __atomic_try_acquire(&lanes.data[idx].lock); |
---|
[b798713] | 614 | verify(locked); |
---|
[dca5802] | 615 | |
---|
| 616 | // As long as we can pop from this lane to push the threads somewhere else in the queue |
---|
| 617 | while(!is_empty(lanes.data[idx])) { |
---|
[504a7dc] | 618 | struct $thread * thrd; |
---|
[343d10e] | 619 | thrd = pop(lanes.data[idx]); |
---|
[dca5802] | 620 | |
---|
[b798713] | 621 | push(cltr, thrd); |
---|
[dca5802] | 622 | |
---|
| 623 | // for printing count the number of displaced threads |
---|
[504a7dc] | 624 | #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__) |
---|
[dca5802] | 625 | displaced++; |
---|
| 626 | #endif |
---|
[b798713] | 627 | } |
---|
| 628 | |
---|
[dca5802] | 629 | // Unlock the lane |
---|
| 630 | __atomic_unlock(&lanes.data[idx].lock); |
---|
[b798713] | 631 | |
---|
| 632 | // TODO print the queue statistics here |
---|
| 633 | |
---|
[dca5802] | 634 | ^(lanes.data[idx]){}; |
---|
[b798713] | 635 | } |
---|
| 636 | |
---|
[504a7dc] | 637 | __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue displaced %zu threads\n", displaced); |
---|
[c84b4be] | 638 | |
---|
[dca5802] | 639 | // Allocate new array (uses realloc and memcpies the data) |
---|
[ceb7db8] | 640 | lanes.data = alloc( lanes.count, lanes.data`realloc ); |
---|
[b798713] | 641 | |
---|
| 642 | // Fix the moved data |
---|
[dca5802] | 643 | for( idx; (size_t)lanes.count ) { |
---|
| 644 | fix(lanes.data[idx]); |
---|
[b798713] | 645 | } |
---|
[c84b4be] | 646 | |
---|
[1eb239e4] | 647 | #ifdef USE_SNZI |
---|
| 648 | // Re-create the snzi |
---|
| 649 | snzi{ log2( lanes.count / 8 ) }; |
---|
| 650 | for( idx; (size_t)lanes.count ) { |
---|
| 651 | if( !is_empty(lanes.data[idx]) ) { |
---|
| 652 | arrive(snzi, idx); |
---|
| 653 | } |
---|
[61d7bec] | 654 | } |
---|
[1eb239e4] | 655 | #endif |
---|
[b798713] | 656 | } |
---|
| 657 | |
---|
| 658 | // Make sure that everything is consistent |
---|
[dca5802] | 659 | /* paranoid */ check( cltr->ready_queue ); |
---|
| 660 | |
---|
[504a7dc] | 661 | __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue done\n"); |
---|
[64a7146] | 662 | /* paranoid */ verify( ready_mutate_islocked() ); |
---|
[fd9b524] | 663 | } |
---|