[7768b8d] | 1 | // |
---|
| 2 | // Cforall Version 1.0.0 Copyright (C) 2019 University of Waterloo |
---|
| 3 | // |
---|
| 4 | // The contents of this file are covered under the licence agreement in the |
---|
| 5 | // file "LICENCE" distributed with Cforall. |
---|
| 6 | // |
---|
| 7 | // ready_queue.cfa -- |
---|
| 8 | // |
---|
| 9 | // Author : Thierry Delisle |
---|
| 10 | // Created On : Mon Nov dd 16:29:18 2019 |
---|
| 11 | // Last Modified By : |
---|
| 12 | // Last Modified On : |
---|
| 13 | // Update Count : |
---|
| 14 | // |
---|
| 15 | |
---|
| 16 | #define __cforall_thread__ |
---|
[1b143de] | 17 | // #define __CFA_DEBUG_PRINT_READY_QUEUE__ |
---|
[7768b8d] | 18 | |
---|
| 19 | #include "bits/defs.hfa" |
---|
| 20 | #include "kernel_private.hfa" |
---|
| 21 | |
---|
| 22 | #define _GNU_SOURCE |
---|
| 23 | #include "stdlib.hfa" |
---|
[61d7bec] | 24 | #include "math.hfa" |
---|
[7768b8d] | 25 | |
---|
[04b5cef] | 26 | #include <unistd.h> |
---|
| 27 | |
---|
[13c5e19] | 28 | #include "snzi.hfa" |
---|
| 29 | #include "ready_subqueue.hfa" |
---|
| 30 | |
---|
[7768b8d] | 31 | static const size_t cache_line_size = 64; |
---|
| 32 | |
---|
[dca5802] | 33 | // No overriden function, no environment variable, no define |
---|
| 34 | // fall back to a magic number |
---|
| 35 | #ifndef __CFA_MAX_PROCESSORS__ |
---|
[b388ee81] | 36 | #define __CFA_MAX_PROCESSORS__ 1024 |
---|
[dca5802] | 37 | #endif |
---|
[7768b8d] | 38 | |
---|
[320ec6fc] | 39 | #define BIAS 16 |
---|
[04b5cef] | 40 | |
---|
[dca5802] | 41 | // returns the maximum number of processors the RWLock support |
---|
[7768b8d] | 42 | __attribute__((weak)) unsigned __max_processors() { |
---|
| 43 | const char * max_cores_s = getenv("CFA_MAX_PROCESSORS"); |
---|
| 44 | if(!max_cores_s) { |
---|
[504a7dc] | 45 | __cfadbg_print_nolock(ready_queue, "No CFA_MAX_PROCESSORS in ENV\n"); |
---|
[dca5802] | 46 | return __CFA_MAX_PROCESSORS__; |
---|
[7768b8d] | 47 | } |
---|
| 48 | |
---|
| 49 | char * endptr = 0p; |
---|
| 50 | long int max_cores_l = strtol(max_cores_s, &endptr, 10); |
---|
| 51 | if(max_cores_l < 1 || max_cores_l > 65535) { |
---|
[504a7dc] | 52 | __cfadbg_print_nolock(ready_queue, "CFA_MAX_PROCESSORS out of range : %ld\n", max_cores_l); |
---|
[dca5802] | 53 | return __CFA_MAX_PROCESSORS__; |
---|
[7768b8d] | 54 | } |
---|
| 55 | if('\0' != *endptr) { |
---|
[504a7dc] | 56 | __cfadbg_print_nolock(ready_queue, "CFA_MAX_PROCESSORS not a decimal number : %s\n", max_cores_s); |
---|
[dca5802] | 57 | return __CFA_MAX_PROCESSORS__; |
---|
[7768b8d] | 58 | } |
---|
| 59 | |
---|
| 60 | return max_cores_l; |
---|
| 61 | } |
---|
| 62 | |
---|
| 63 | //======================================================================= |
---|
| 64 | // Cluster wide reader-writer lock |
---|
| 65 | //======================================================================= |
---|
[b388ee81] | 66 | void ?{}(__scheduler_RWLock_t & this) { |
---|
[7768b8d] | 67 | this.max = __max_processors(); |
---|
| 68 | this.alloc = 0; |
---|
| 69 | this.ready = 0; |
---|
| 70 | this.lock = false; |
---|
| 71 | this.data = alloc(this.max); |
---|
| 72 | |
---|
| 73 | /*paranoid*/ verify( 0 == (((uintptr_t)(this.data )) % 64) ); |
---|
| 74 | /*paranoid*/ verify( 0 == (((uintptr_t)(this.data + 1)) % 64) ); |
---|
| 75 | /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.alloc), &this.alloc)); |
---|
| 76 | /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.ready), &this.ready)); |
---|
| 77 | |
---|
| 78 | } |
---|
[b388ee81] | 79 | void ^?{}(__scheduler_RWLock_t & this) { |
---|
[7768b8d] | 80 | free(this.data); |
---|
| 81 | } |
---|
| 82 | |
---|
[9b1dcc2] | 83 | void ?{}( __scheduler_lock_id_t & this, __processor_id_t * proc ) { |
---|
[7768b8d] | 84 | this.handle = proc; |
---|
| 85 | this.lock = false; |
---|
[64a7146] | 86 | #ifdef __CFA_WITH_VERIFY__ |
---|
| 87 | this.owned = false; |
---|
| 88 | #endif |
---|
[7768b8d] | 89 | } |
---|
| 90 | |
---|
| 91 | //======================================================================= |
---|
| 92 | // Lock-Free registering/unregistering of threads |
---|
[9b1dcc2] | 93 | unsigned doregister( struct __processor_id_t * proc ) with(*__scheduler_lock) { |
---|
[b388ee81] | 94 | __cfadbg_print_safe(ready_queue, "Kernel : Registering proc %p for RW-Lock\n", proc); |
---|
[504a7dc] | 95 | |
---|
[7768b8d] | 96 | // Step - 1 : check if there is already space in the data |
---|
| 97 | uint_fast32_t s = ready; |
---|
| 98 | |
---|
| 99 | // Check among all the ready |
---|
| 100 | for(uint_fast32_t i = 0; i < s; i++) { |
---|
[9b1dcc2] | 101 | __processor_id_t * null = 0p; // Re-write every loop since compare thrashes it |
---|
[7768b8d] | 102 | if( __atomic_load_n(&data[i].handle, (int)__ATOMIC_RELAXED) == null |
---|
| 103 | && __atomic_compare_exchange_n( &data[i].handle, &null, proc, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) { |
---|
| 104 | /*paranoid*/ verify(i < ready); |
---|
[64a7146] | 105 | /*paranoid*/ verify(0 == (__alignof__(data[i]) % cache_line_size)); |
---|
[7768b8d] | 106 | /*paranoid*/ verify((((uintptr_t)&data[i]) % cache_line_size) == 0); |
---|
| 107 | return i; |
---|
| 108 | } |
---|
| 109 | } |
---|
| 110 | |
---|
[b388ee81] | 111 | if(max <= alloc) abort("Trying to create more than %ud processors", __scheduler_lock->max); |
---|
[7768b8d] | 112 | |
---|
| 113 | // Step - 2 : F&A to get a new spot in the array. |
---|
| 114 | uint_fast32_t n = __atomic_fetch_add(&alloc, 1, __ATOMIC_SEQ_CST); |
---|
[b388ee81] | 115 | if(max <= n) abort("Trying to create more than %ud processors", __scheduler_lock->max); |
---|
[7768b8d] | 116 | |
---|
| 117 | // Step - 3 : Mark space as used and then publish it. |
---|
[9b1dcc2] | 118 | __scheduler_lock_id_t * storage = (__scheduler_lock_id_t *)&data[n]; |
---|
[7768b8d] | 119 | (*storage){ proc }; |
---|
[fd9b524] | 120 | while() { |
---|
[7768b8d] | 121 | unsigned copy = n; |
---|
| 122 | if( __atomic_load_n(&ready, __ATOMIC_RELAXED) == n |
---|
| 123 | && __atomic_compare_exchange_n(&ready, ©, n + 1, true, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) |
---|
| 124 | break; |
---|
[fd9b524] | 125 | Pause(); |
---|
[7768b8d] | 126 | } |
---|
| 127 | |
---|
[1b143de] | 128 | __cfadbg_print_safe(ready_queue, "Kernel : Registering proc %p done, id %lu\n", proc, n); |
---|
[504a7dc] | 129 | |
---|
[7768b8d] | 130 | // Return new spot. |
---|
| 131 | /*paranoid*/ verify(n < ready); |
---|
[37ba662] | 132 | /*paranoid*/ verify(__alignof__(data[n]) == (2 * cache_line_size)); |
---|
[7768b8d] | 133 | /*paranoid*/ verify((((uintptr_t)&data[n]) % cache_line_size) == 0); |
---|
| 134 | return n; |
---|
| 135 | } |
---|
| 136 | |
---|
[9b1dcc2] | 137 | void unregister( struct __processor_id_t * proc ) with(*__scheduler_lock) { |
---|
[7768b8d] | 138 | unsigned id = proc->id; |
---|
| 139 | /*paranoid*/ verify(id < ready); |
---|
| 140 | /*paranoid*/ verify(proc == __atomic_load_n(&data[id].handle, __ATOMIC_RELAXED)); |
---|
| 141 | __atomic_store_n(&data[id].handle, 0p, __ATOMIC_RELEASE); |
---|
[504a7dc] | 142 | |
---|
| 143 | __cfadbg_print_safe(ready_queue, "Kernel : Unregister proc %p\n", proc); |
---|
[7768b8d] | 144 | } |
---|
| 145 | |
---|
| 146 | //----------------------------------------------------------------------- |
---|
| 147 | // Writer side : acquire when changing the ready queue, e.g. adding more |
---|
| 148 | // queues or removing them. |
---|
[b388ee81] | 149 | uint_fast32_t ready_mutate_lock( void ) with(*__scheduler_lock) { |
---|
[7768b8d] | 150 | // Step 1 : lock global lock |
---|
| 151 | // It is needed to avoid processors that register mid Critical-Section |
---|
| 152 | // to simply lock their own lock and enter. |
---|
| 153 | __atomic_acquire( &lock ); |
---|
| 154 | |
---|
| 155 | // Step 2 : lock per-proc lock |
---|
| 156 | // Processors that are currently being registered aren't counted |
---|
| 157 | // but can't be in read_lock or in the critical section. |
---|
| 158 | // All other processors are counted |
---|
| 159 | uint_fast32_t s = ready; |
---|
| 160 | for(uint_fast32_t i = 0; i < s; i++) { |
---|
| 161 | __atomic_acquire( &data[i].lock ); |
---|
| 162 | } |
---|
| 163 | |
---|
| 164 | return s; |
---|
| 165 | } |
---|
| 166 | |
---|
[b388ee81] | 167 | void ready_mutate_unlock( uint_fast32_t last_s ) with(*__scheduler_lock) { |
---|
[7768b8d] | 168 | // Step 1 : release local locks |
---|
| 169 | // This must be done while the global lock is held to avoid |
---|
| 170 | // threads that where created mid critical section |
---|
| 171 | // to race to lock their local locks and have the writer |
---|
| 172 | // immidiately unlock them |
---|
| 173 | // Alternative solution : return s in write_lock and pass it to write_unlock |
---|
| 174 | for(uint_fast32_t i = 0; i < last_s; i++) { |
---|
| 175 | verify(data[i].lock); |
---|
| 176 | __atomic_store_n(&data[i].lock, (bool)false, __ATOMIC_RELEASE); |
---|
| 177 | } |
---|
| 178 | |
---|
| 179 | // Step 2 : release global lock |
---|
| 180 | /*paranoid*/ assert(true == lock); |
---|
| 181 | __atomic_store_n(&lock, (bool)false, __ATOMIC_RELEASE); |
---|
| 182 | } |
---|
| 183 | |
---|
| 184 | //======================================================================= |
---|
[13c5e19] | 185 | // Cforall Reqdy Queue used for scheduling |
---|
[b798713] | 186 | //======================================================================= |
---|
| 187 | void ?{}(__ready_queue_t & this) with (this) { |
---|
[28d73c1] | 188 | lanes.data = 0p; |
---|
| 189 | lanes.count = 0; |
---|
[b798713] | 190 | } |
---|
| 191 | |
---|
| 192 | void ^?{}(__ready_queue_t & this) with (this) { |
---|
[39fc03e] | 193 | verify( 1 == lanes.count ); |
---|
[61d7bec] | 194 | verify( !query( snzi ) ); |
---|
[dca5802] | 195 | free(lanes.data); |
---|
| 196 | } |
---|
| 197 | |
---|
[64a7146] | 198 | //----------------------------------------------------------------------- |
---|
| 199 | __attribute__((hot)) bool query(struct cluster * cltr) { |
---|
| 200 | return query(cltr->ready_queue.snzi); |
---|
| 201 | } |
---|
| 202 | |
---|
[dca5802] | 203 | //----------------------------------------------------------------------- |
---|
[504a7dc] | 204 | __attribute__((hot)) bool push(struct cluster * cltr, struct $thread * thrd) with (cltr->ready_queue) { |
---|
[61d7bec] | 205 | __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr); |
---|
[1b143de] | 206 | |
---|
[dca5802] | 207 | // write timestamp |
---|
[b798713] | 208 | thrd->link.ts = rdtscl(); |
---|
| 209 | |
---|
[52769ba] | 210 | #if defined(BIAS) && !defined(__CFA_NO_STATISTICS__) |
---|
| 211 | bool local = false; |
---|
[d72c074] | 212 | int preferred = |
---|
| 213 | //* |
---|
| 214 | kernelTLS.this_processor ? kernelTLS.this_processor->id * 4 : -1; |
---|
| 215 | /*/ |
---|
| 216 | thrd->link.preferred * 4; |
---|
| 217 | //*/ |
---|
| 218 | |
---|
| 219 | |
---|
[52769ba] | 220 | #endif |
---|
| 221 | |
---|
[dca5802] | 222 | // Try to pick a lane and lock it |
---|
| 223 | unsigned i; |
---|
| 224 | do { |
---|
| 225 | // Pick the index of a lane |
---|
[04b5cef] | 226 | #if defined(BIAS) |
---|
| 227 | unsigned r = __tls_rand(); |
---|
| 228 | unsigned rlow = r % BIAS; |
---|
| 229 | unsigned rhigh = r / BIAS; |
---|
[d72c074] | 230 | if((0 != rlow) && preferred >= 0) { |
---|
[04b5cef] | 231 | // (BIAS - 1) out of BIAS chances |
---|
| 232 | // Use perferred queues |
---|
[d72c074] | 233 | i = preferred + (rhigh % 4); |
---|
[13c5e19] | 234 | |
---|
| 235 | #if !defined(__CFA_NO_STATISTICS__) |
---|
[52769ba] | 236 | local = true; |
---|
[13c5e19] | 237 | __tls_stats()->ready.pick.push.local++; |
---|
| 238 | #endif |
---|
[04b5cef] | 239 | } |
---|
| 240 | else { |
---|
| 241 | // 1 out of BIAS chances |
---|
| 242 | // Use all queues |
---|
| 243 | i = rhigh; |
---|
[52769ba] | 244 | local = false; |
---|
[04b5cef] | 245 | } |
---|
| 246 | #else |
---|
| 247 | i = __tls_rand(); |
---|
| 248 | #endif |
---|
| 249 | |
---|
| 250 | i %= __atomic_load_n( &lanes.count, __ATOMIC_RELAXED ); |
---|
[b798713] | 251 | |
---|
| 252 | #if !defined(__CFA_NO_STATISTICS__) |
---|
[8834751] | 253 | __tls_stats()->ready.pick.push.attempt++; |
---|
[b798713] | 254 | #endif |
---|
| 255 | |
---|
| 256 | // If we can't lock it retry |
---|
[dca5802] | 257 | } while( !__atomic_try_acquire( &lanes.data[i].lock ) ); |
---|
[b798713] | 258 | |
---|
[dca5802] | 259 | bool first = false; |
---|
| 260 | |
---|
| 261 | // Actually push it |
---|
| 262 | bool lane_first = push(lanes.data[i], thrd); |
---|
| 263 | |
---|
| 264 | // If this lane used to be empty we need to do more |
---|
| 265 | if(lane_first) { |
---|
[504a7dc] | 266 | // Check if the entire queue used to be empty |
---|
[61d7bec] | 267 | first = !query(snzi); |
---|
| 268 | |
---|
| 269 | // Update the snzi |
---|
| 270 | arrive( snzi, i ); |
---|
[b798713] | 271 | } |
---|
[dca5802] | 272 | |
---|
| 273 | // Unlock and return |
---|
| 274 | __atomic_unlock( &lanes.data[i].lock ); |
---|
| 275 | |
---|
[1b143de] | 276 | __cfadbg_print_safe(ready_queue, "Kernel : Pushed %p on cluster %p (idx: %u, mask %llu, first %d)\n", thrd, cltr, i, used.mask[0], lane_first); |
---|
| 277 | |
---|
[dca5802] | 278 | // Update statistics |
---|
| 279 | #if !defined(__CFA_NO_STATISTICS__) |
---|
[52769ba] | 280 | #if defined(BIAS) |
---|
| 281 | if( local ) __tls_stats()->ready.pick.push.lsuccess++; |
---|
| 282 | #endif |
---|
[8834751] | 283 | __tls_stats()->ready.pick.push.success++; |
---|
[dca5802] | 284 | #endif |
---|
| 285 | |
---|
| 286 | // return whether or not the list was empty before this push |
---|
| 287 | return first; |
---|
[b798713] | 288 | } |
---|
| 289 | |
---|
[13c5e19] | 290 | static struct $thread * try_pop(struct cluster * cltr, unsigned i, unsigned j); |
---|
| 291 | static struct $thread * try_pop(struct cluster * cltr, unsigned i); |
---|
| 292 | |
---|
| 293 | // Pop from the ready queue from a given cluster |
---|
| 294 | __attribute__((hot)) $thread * pop(struct cluster * cltr) with (cltr->ready_queue) { |
---|
| 295 | /* paranoid */ verify( lanes.count > 0 ); |
---|
| 296 | #if defined(BIAS) |
---|
| 297 | // Don't bother trying locally too much |
---|
| 298 | int local_tries = 8; |
---|
| 299 | #endif |
---|
| 300 | |
---|
| 301 | // As long as the list is not empty, try finding a lane that isn't empty and pop from it |
---|
| 302 | while( query(snzi) ) { |
---|
| 303 | // Pick two lists at random |
---|
| 304 | unsigned i,j; |
---|
| 305 | #if defined(BIAS) |
---|
[52769ba] | 306 | #if !defined(__CFA_NO_STATISTICS__) |
---|
| 307 | bool local = false; |
---|
| 308 | #endif |
---|
[13c5e19] | 309 | uint64_t r = __tls_rand(); |
---|
| 310 | unsigned rlow = r % BIAS; |
---|
| 311 | uint64_t rhigh = r / BIAS; |
---|
| 312 | if(local_tries && 0 != rlow) { |
---|
| 313 | // (BIAS - 1) out of BIAS chances |
---|
| 314 | // Use perferred queues |
---|
| 315 | unsigned pid = kernelTLS.this_processor->id * 4; |
---|
| 316 | i = pid + (rhigh % 4); |
---|
| 317 | j = pid + ((rhigh >> 32ull) % 4); |
---|
| 318 | |
---|
| 319 | // count the tries |
---|
| 320 | local_tries--; |
---|
| 321 | |
---|
| 322 | #if !defined(__CFA_NO_STATISTICS__) |
---|
[52769ba] | 323 | local = true; |
---|
[13c5e19] | 324 | __tls_stats()->ready.pick.pop.local++; |
---|
| 325 | #endif |
---|
| 326 | } |
---|
| 327 | else { |
---|
| 328 | // 1 out of BIAS chances |
---|
| 329 | // Use all queues |
---|
| 330 | i = rhigh; |
---|
| 331 | j = rhigh >> 32ull; |
---|
| 332 | } |
---|
| 333 | #else |
---|
| 334 | i = __tls_rand(); |
---|
| 335 | j = __tls_rand(); |
---|
| 336 | #endif |
---|
| 337 | |
---|
| 338 | i %= __atomic_load_n( &lanes.count, __ATOMIC_RELAXED ); |
---|
| 339 | j %= __atomic_load_n( &lanes.count, __ATOMIC_RELAXED ); |
---|
| 340 | |
---|
| 341 | // try popping from the 2 picked lists |
---|
| 342 | struct $thread * thrd = try_pop(cltr, i, j); |
---|
[52769ba] | 343 | if(thrd) { |
---|
| 344 | #if defined(BIAS) && !defined(__CFA_NO_STATISTICS__) |
---|
| 345 | if( local ) __tls_stats()->ready.pick.pop.lsuccess++; |
---|
| 346 | #endif |
---|
| 347 | return thrd; |
---|
| 348 | } |
---|
[13c5e19] | 349 | } |
---|
| 350 | |
---|
| 351 | // All lanes where empty return 0p |
---|
| 352 | return 0p; |
---|
| 353 | } |
---|
| 354 | |
---|
[b798713] | 355 | //----------------------------------------------------------------------- |
---|
[dca5802] | 356 | // Given 2 indexes, pick the list with the oldest push an try to pop from it |
---|
[13c5e19] | 357 | static inline struct $thread * try_pop(struct cluster * cltr, unsigned i, unsigned j) with (cltr->ready_queue) { |
---|
[b798713] | 358 | #if !defined(__CFA_NO_STATISTICS__) |
---|
[8834751] | 359 | __tls_stats()->ready.pick.pop.attempt++; |
---|
[b798713] | 360 | #endif |
---|
| 361 | |
---|
| 362 | // Pick the bet list |
---|
| 363 | int w = i; |
---|
[dca5802] | 364 | if( __builtin_expect(!is_empty(lanes.data[j]), true) ) { |
---|
| 365 | w = (ts(lanes.data[i]) < ts(lanes.data[j])) ? i : j; |
---|
[b798713] | 366 | } |
---|
| 367 | |
---|
[13c5e19] | 368 | return try_pop(cltr, w); |
---|
| 369 | } |
---|
| 370 | |
---|
| 371 | static inline struct $thread * try_pop(struct cluster * cltr, unsigned w) with (cltr->ready_queue) { |
---|
[dca5802] | 372 | // Get relevant elements locally |
---|
| 373 | __intrusive_lane_t & lane = lanes.data[w]; |
---|
| 374 | |
---|
[b798713] | 375 | // If list looks empty retry |
---|
[dca5802] | 376 | if( is_empty(lane) ) return 0p; |
---|
[b798713] | 377 | |
---|
| 378 | // If we can't get the lock retry |
---|
[dca5802] | 379 | if( !__atomic_try_acquire(&lane.lock) ) return 0p; |
---|
[b798713] | 380 | |
---|
| 381 | |
---|
| 382 | // If list is empty, unlock and retry |
---|
[dca5802] | 383 | if( is_empty(lane) ) { |
---|
| 384 | __atomic_unlock(&lane.lock); |
---|
[b798713] | 385 | return 0p; |
---|
| 386 | } |
---|
| 387 | |
---|
| 388 | // Actually pop the list |
---|
[504a7dc] | 389 | struct $thread * thrd; |
---|
[b798713] | 390 | bool emptied; |
---|
[dca5802] | 391 | [thrd, emptied] = pop(lane); |
---|
[b798713] | 392 | |
---|
[dca5802] | 393 | /* paranoid */ verify(thrd); |
---|
| 394 | /* paranoid */ verify(lane.lock); |
---|
[b798713] | 395 | |
---|
[dca5802] | 396 | // If this was the last element in the lane |
---|
[b798713] | 397 | if(emptied) { |
---|
[61d7bec] | 398 | depart( snzi, w ); |
---|
[b798713] | 399 | } |
---|
| 400 | |
---|
| 401 | // Unlock and return |
---|
[dca5802] | 402 | __atomic_unlock(&lane.lock); |
---|
[b798713] | 403 | |
---|
[dca5802] | 404 | // Update statistics |
---|
[b798713] | 405 | #if !defined(__CFA_NO_STATISTICS__) |
---|
[8834751] | 406 | __tls_stats()->ready.pick.pop.success++; |
---|
[b798713] | 407 | #endif |
---|
| 408 | |
---|
[d72c074] | 409 | // Update the thread bias |
---|
| 410 | thrd->link.preferred = w / 4; |
---|
| 411 | |
---|
[dca5802] | 412 | // return the popped thread |
---|
[b798713] | 413 | return thrd; |
---|
| 414 | } |
---|
[13c5e19] | 415 | //----------------------------------------------------------------------- |
---|
[b798713] | 416 | |
---|
[13c5e19] | 417 | bool remove_head(struct cluster * cltr, struct $thread * thrd) with (cltr->ready_queue) { |
---|
| 418 | for(i; lanes.count) { |
---|
| 419 | __intrusive_lane_t & lane = lanes.data[i]; |
---|
[b798713] | 420 | |
---|
[13c5e19] | 421 | bool removed = false; |
---|
[04b5cef] | 422 | |
---|
[13c5e19] | 423 | __atomic_acquire(&lane.lock); |
---|
| 424 | if(head(lane)->link.next == thrd) { |
---|
| 425 | $thread * pthrd; |
---|
| 426 | bool emptied; |
---|
| 427 | [pthrd, emptied] = pop(lane); |
---|
[04b5cef] | 428 | |
---|
[13c5e19] | 429 | /* paranoid */ verify( pthrd == thrd ); |
---|
[61d7bec] | 430 | |
---|
[13c5e19] | 431 | removed = true; |
---|
| 432 | if(emptied) { |
---|
| 433 | depart( snzi, i ); |
---|
| 434 | } |
---|
| 435 | } |
---|
| 436 | __atomic_unlock(&lane.lock); |
---|
[b798713] | 437 | |
---|
[13c5e19] | 438 | if( removed ) return true; |
---|
| 439 | } |
---|
| 440 | return false; |
---|
[b798713] | 441 | } |
---|
| 442 | |
---|
| 443 | //----------------------------------------------------------------------- |
---|
| 444 | |
---|
| 445 | static void check( __ready_queue_t & q ) with (q) { |
---|
| 446 | #if defined(__CFA_WITH_VERIFY__) |
---|
| 447 | { |
---|
[dca5802] | 448 | for( idx ; lanes.count ) { |
---|
| 449 | __intrusive_lane_t & sl = lanes.data[idx]; |
---|
| 450 | assert(!lanes.data[idx].lock); |
---|
[b798713] | 451 | |
---|
| 452 | assert(head(sl)->link.prev == 0p ); |
---|
| 453 | assert(head(sl)->link.next->link.prev == head(sl) ); |
---|
| 454 | assert(tail(sl)->link.next == 0p ); |
---|
| 455 | assert(tail(sl)->link.prev->link.next == tail(sl) ); |
---|
| 456 | |
---|
| 457 | if(sl.before.link.ts == 0l) { |
---|
| 458 | assert(tail(sl)->link.prev == head(sl)); |
---|
| 459 | assert(head(sl)->link.next == tail(sl)); |
---|
[1b143de] | 460 | } else { |
---|
| 461 | assert(tail(sl)->link.prev != head(sl)); |
---|
| 462 | assert(head(sl)->link.next != tail(sl)); |
---|
[b798713] | 463 | } |
---|
| 464 | } |
---|
| 465 | } |
---|
| 466 | #endif |
---|
| 467 | } |
---|
| 468 | |
---|
| 469 | // Call this function of the intrusive list was moved using memcpy |
---|
[dca5802] | 470 | // fixes the list so that the pointers back to anchors aren't left dangling |
---|
| 471 | static inline void fix(__intrusive_lane_t & ll) { |
---|
| 472 | // if the list is not empty then follow he pointer and fix its reverse |
---|
| 473 | if(!is_empty(ll)) { |
---|
[b798713] | 474 | head(ll)->link.next->link.prev = head(ll); |
---|
| 475 | tail(ll)->link.prev->link.next = tail(ll); |
---|
| 476 | } |
---|
| 477 | // Otherwise just reset the list |
---|
| 478 | else { |
---|
[dca5802] | 479 | verify(tail(ll)->link.next == 0p); |
---|
[b798713] | 480 | tail(ll)->link.prev = head(ll); |
---|
| 481 | head(ll)->link.next = tail(ll); |
---|
[dca5802] | 482 | verify(head(ll)->link.prev == 0p); |
---|
[b798713] | 483 | } |
---|
| 484 | } |
---|
| 485 | |
---|
[dca5802] | 486 | // Grow the ready queue |
---|
[320ec6fc] | 487 | void ready_queue_grow (struct cluster * cltr, int target) { |
---|
[64a7146] | 488 | /* paranoid */ verify( ready_mutate_islocked() ); |
---|
[504a7dc] | 489 | __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue\n"); |
---|
[b798713] | 490 | |
---|
[dca5802] | 491 | // Make sure that everything is consistent |
---|
| 492 | /* paranoid */ check( cltr->ready_queue ); |
---|
| 493 | |
---|
| 494 | // grow the ready queue |
---|
[b798713] | 495 | with( cltr->ready_queue ) { |
---|
[61d7bec] | 496 | ^(snzi){}; |
---|
[b798713] | 497 | |
---|
[39fc03e] | 498 | // Find new count |
---|
| 499 | // Make sure we always have atleast 1 list |
---|
| 500 | size_t ncount = target >= 2 ? target * 4: 1; |
---|
[b798713] | 501 | |
---|
[dca5802] | 502 | // Allocate new array (uses realloc and memcpies the data) |
---|
[39fc03e] | 503 | lanes.data = alloc(lanes.data, ncount); |
---|
[b798713] | 504 | |
---|
| 505 | // Fix the moved data |
---|
[dca5802] | 506 | for( idx; (size_t)lanes.count ) { |
---|
| 507 | fix(lanes.data[idx]); |
---|
[b798713] | 508 | } |
---|
| 509 | |
---|
| 510 | // Construct new data |
---|
[dca5802] | 511 | for( idx; (size_t)lanes.count ~ ncount) { |
---|
| 512 | (lanes.data[idx]){}; |
---|
[b798713] | 513 | } |
---|
| 514 | |
---|
| 515 | // Update original |
---|
[dca5802] | 516 | lanes.count = ncount; |
---|
| 517 | |
---|
[61d7bec] | 518 | // Re-create the snzi |
---|
| 519 | snzi{ log2( lanes.count / 8 ) }; |
---|
| 520 | for( idx; (size_t)lanes.count ) { |
---|
| 521 | if( !is_empty(lanes.data[idx]) ) { |
---|
| 522 | arrive(snzi, idx); |
---|
| 523 | } |
---|
| 524 | } |
---|
[b798713] | 525 | } |
---|
| 526 | |
---|
| 527 | // Make sure that everything is consistent |
---|
[dca5802] | 528 | /* paranoid */ check( cltr->ready_queue ); |
---|
| 529 | |
---|
[504a7dc] | 530 | __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue done\n"); |
---|
[dca5802] | 531 | |
---|
[64a7146] | 532 | /* paranoid */ verify( ready_mutate_islocked() ); |
---|
[b798713] | 533 | } |
---|
| 534 | |
---|
[dca5802] | 535 | // Shrink the ready queue |
---|
[320ec6fc] | 536 | void ready_queue_shrink(struct cluster * cltr, int target) { |
---|
[64a7146] | 537 | /* paranoid */ verify( ready_mutate_islocked() ); |
---|
[504a7dc] | 538 | __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue\n"); |
---|
[dca5802] | 539 | |
---|
| 540 | // Make sure that everything is consistent |
---|
| 541 | /* paranoid */ check( cltr->ready_queue ); |
---|
| 542 | |
---|
[b798713] | 543 | with( cltr->ready_queue ) { |
---|
[61d7bec] | 544 | ^(snzi){}; |
---|
| 545 | |
---|
[39fc03e] | 546 | // Remember old count |
---|
[dca5802] | 547 | size_t ocount = lanes.count; |
---|
[b798713] | 548 | |
---|
[39fc03e] | 549 | // Find new count |
---|
| 550 | // Make sure we always have atleast 1 list |
---|
| 551 | lanes.count = target >= 2 ? target * 4: 1; |
---|
| 552 | /* paranoid */ verify( ocount >= lanes.count ); |
---|
[320ec6fc] | 553 | /* paranoid */ verify( lanes.count == target * 4 || target < 2 ); |
---|
[dca5802] | 554 | |
---|
| 555 | // for printing count the number of displaced threads |
---|
[504a7dc] | 556 | #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__) |
---|
[dca5802] | 557 | __attribute__((unused)) size_t displaced = 0; |
---|
| 558 | #endif |
---|
[b798713] | 559 | |
---|
| 560 | // redistribute old data |
---|
[dca5802] | 561 | for( idx; (size_t)lanes.count ~ ocount) { |
---|
| 562 | // Lock is not strictly needed but makes checking invariants much easier |
---|
[1b143de] | 563 | __attribute__((unused)) bool locked = __atomic_try_acquire(&lanes.data[idx].lock); |
---|
[b798713] | 564 | verify(locked); |
---|
[dca5802] | 565 | |
---|
| 566 | // As long as we can pop from this lane to push the threads somewhere else in the queue |
---|
| 567 | while(!is_empty(lanes.data[idx])) { |
---|
[504a7dc] | 568 | struct $thread * thrd; |
---|
[b798713] | 569 | __attribute__((unused)) bool _; |
---|
[dca5802] | 570 | [thrd, _] = pop(lanes.data[idx]); |
---|
| 571 | |
---|
[b798713] | 572 | push(cltr, thrd); |
---|
[dca5802] | 573 | |
---|
| 574 | // for printing count the number of displaced threads |
---|
[504a7dc] | 575 | #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__) |
---|
[dca5802] | 576 | displaced++; |
---|
| 577 | #endif |
---|
[b798713] | 578 | } |
---|
| 579 | |
---|
[dca5802] | 580 | // Unlock the lane |
---|
| 581 | __atomic_unlock(&lanes.data[idx].lock); |
---|
[b798713] | 582 | |
---|
| 583 | // TODO print the queue statistics here |
---|
| 584 | |
---|
[dca5802] | 585 | ^(lanes.data[idx]){}; |
---|
[b798713] | 586 | } |
---|
| 587 | |
---|
[504a7dc] | 588 | __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue displaced %zu threads\n", displaced); |
---|
[c84b4be] | 589 | |
---|
[dca5802] | 590 | // Allocate new array (uses realloc and memcpies the data) |
---|
[39fc03e] | 591 | lanes.data = alloc(lanes.data, lanes.count); |
---|
[b798713] | 592 | |
---|
| 593 | // Fix the moved data |
---|
[dca5802] | 594 | for( idx; (size_t)lanes.count ) { |
---|
| 595 | fix(lanes.data[idx]); |
---|
[b798713] | 596 | } |
---|
[c84b4be] | 597 | |
---|
[61d7bec] | 598 | // Re-create the snzi |
---|
| 599 | snzi{ log2( lanes.count / 8 ) }; |
---|
| 600 | for( idx; (size_t)lanes.count ) { |
---|
| 601 | if( !is_empty(lanes.data[idx]) ) { |
---|
| 602 | arrive(snzi, idx); |
---|
| 603 | } |
---|
| 604 | } |
---|
[b798713] | 605 | } |
---|
| 606 | |
---|
| 607 | // Make sure that everything is consistent |
---|
[dca5802] | 608 | /* paranoid */ check( cltr->ready_queue ); |
---|
| 609 | |
---|
[504a7dc] | 610 | __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue done\n"); |
---|
[64a7146] | 611 | /* paranoid */ verify( ready_mutate_islocked() ); |
---|
[fd9b524] | 612 | } |
---|