| [7768b8d] | 1 | // | 
|---|
|  | 2 | // Cforall Version 1.0.0 Copyright (C) 2019 University of Waterloo | 
|---|
|  | 3 | // | 
|---|
|  | 4 | // The contents of this file are covered under the licence agreement in the | 
|---|
|  | 5 | // file "LICENCE" distributed with Cforall. | 
|---|
|  | 6 | // | 
|---|
|  | 7 | // ready_queue.cfa -- | 
|---|
|  | 8 | // | 
|---|
|  | 9 | // Author           : Thierry Delisle | 
|---|
|  | 10 | // Created On       : Mon Nov dd 16:29:18 2019 | 
|---|
|  | 11 | // Last Modified By : | 
|---|
|  | 12 | // Last Modified On : | 
|---|
|  | 13 | // Update Count     : | 
|---|
|  | 14 | // | 
|---|
|  | 15 |  | 
|---|
|  | 16 | #define __cforall_thread__ | 
|---|
| [1b143de] | 17 | // #define __CFA_DEBUG_PRINT_READY_QUEUE__ | 
|---|
| [7768b8d] | 18 |  | 
|---|
|  | 19 | #include "bits/defs.hfa" | 
|---|
|  | 20 | #include "kernel_private.hfa" | 
|---|
|  | 21 |  | 
|---|
|  | 22 | #define _GNU_SOURCE | 
|---|
|  | 23 | #include "stdlib.hfa" | 
|---|
| [61d7bec] | 24 | #include "math.hfa" | 
|---|
| [7768b8d] | 25 |  | 
|---|
| [04b5cef] | 26 | #include <unistd.h> | 
|---|
|  | 27 |  | 
|---|
| [13c5e19] | 28 | #include "snzi.hfa" | 
|---|
|  | 29 | #include "ready_subqueue.hfa" | 
|---|
|  | 30 |  | 
|---|
| [7768b8d] | 31 | static const size_t cache_line_size = 64; | 
|---|
|  | 32 |  | 
|---|
| [dca5802] | 33 | // No overriden function, no environment variable, no define | 
|---|
|  | 34 | // fall back to a magic number | 
|---|
|  | 35 | #ifndef __CFA_MAX_PROCESSORS__ | 
|---|
| [b388ee81] | 36 | #define __CFA_MAX_PROCESSORS__ 1024 | 
|---|
| [dca5802] | 37 | #endif | 
|---|
| [7768b8d] | 38 |  | 
|---|
| [d72c074] | 39 | #define BIAS 64 | 
|---|
| [04b5cef] | 40 |  | 
|---|
| [dca5802] | 41 | // returns the maximum number of processors the RWLock support | 
|---|
| [7768b8d] | 42 | __attribute__((weak)) unsigned __max_processors() { | 
|---|
|  | 43 | const char * max_cores_s = getenv("CFA_MAX_PROCESSORS"); | 
|---|
|  | 44 | if(!max_cores_s) { | 
|---|
| [504a7dc] | 45 | __cfadbg_print_nolock(ready_queue, "No CFA_MAX_PROCESSORS in ENV\n"); | 
|---|
| [dca5802] | 46 | return __CFA_MAX_PROCESSORS__; | 
|---|
| [7768b8d] | 47 | } | 
|---|
|  | 48 |  | 
|---|
|  | 49 | char * endptr = 0p; | 
|---|
|  | 50 | long int max_cores_l = strtol(max_cores_s, &endptr, 10); | 
|---|
|  | 51 | if(max_cores_l < 1 || max_cores_l > 65535) { | 
|---|
| [504a7dc] | 52 | __cfadbg_print_nolock(ready_queue, "CFA_MAX_PROCESSORS out of range : %ld\n", max_cores_l); | 
|---|
| [dca5802] | 53 | return __CFA_MAX_PROCESSORS__; | 
|---|
| [7768b8d] | 54 | } | 
|---|
|  | 55 | if('\0' != *endptr) { | 
|---|
| [504a7dc] | 56 | __cfadbg_print_nolock(ready_queue, "CFA_MAX_PROCESSORS not a decimal number : %s\n", max_cores_s); | 
|---|
| [dca5802] | 57 | return __CFA_MAX_PROCESSORS__; | 
|---|
| [7768b8d] | 58 | } | 
|---|
|  | 59 |  | 
|---|
|  | 60 | return max_cores_l; | 
|---|
|  | 61 | } | 
|---|
|  | 62 |  | 
|---|
|  | 63 | //======================================================================= | 
|---|
|  | 64 | // Cluster wide reader-writer lock | 
|---|
|  | 65 | //======================================================================= | 
|---|
| [b388ee81] | 66 | void  ?{}(__scheduler_RWLock_t & this) { | 
|---|
| [7768b8d] | 67 | this.max   = __max_processors(); | 
|---|
|  | 68 | this.alloc = 0; | 
|---|
|  | 69 | this.ready = 0; | 
|---|
|  | 70 | this.lock  = false; | 
|---|
|  | 71 | this.data  = alloc(this.max); | 
|---|
|  | 72 |  | 
|---|
|  | 73 | /*paranoid*/ verify( 0 == (((uintptr_t)(this.data    )) % 64) ); | 
|---|
|  | 74 | /*paranoid*/ verify( 0 == (((uintptr_t)(this.data + 1)) % 64) ); | 
|---|
|  | 75 | /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.alloc), &this.alloc)); | 
|---|
|  | 76 | /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.ready), &this.ready)); | 
|---|
|  | 77 |  | 
|---|
|  | 78 | } | 
|---|
| [b388ee81] | 79 | void ^?{}(__scheduler_RWLock_t & this) { | 
|---|
| [7768b8d] | 80 | free(this.data); | 
|---|
|  | 81 | } | 
|---|
|  | 82 |  | 
|---|
| [9b1dcc2] | 83 | void ?{}( __scheduler_lock_id_t & this, __processor_id_t * proc ) { | 
|---|
| [7768b8d] | 84 | this.handle = proc; | 
|---|
|  | 85 | this.lock   = false; | 
|---|
| [64a7146] | 86 | #ifdef __CFA_WITH_VERIFY__ | 
|---|
|  | 87 | this.owned  = false; | 
|---|
|  | 88 | #endif | 
|---|
| [7768b8d] | 89 | } | 
|---|
|  | 90 |  | 
|---|
|  | 91 | //======================================================================= | 
|---|
|  | 92 | // Lock-Free registering/unregistering of threads | 
|---|
| [9b1dcc2] | 93 | unsigned doregister( struct __processor_id_t * proc ) with(*__scheduler_lock) { | 
|---|
| [b388ee81] | 94 | __cfadbg_print_safe(ready_queue, "Kernel : Registering proc %p for RW-Lock\n", proc); | 
|---|
| [504a7dc] | 95 |  | 
|---|
| [7768b8d] | 96 | // Step - 1 : check if there is already space in the data | 
|---|
|  | 97 | uint_fast32_t s = ready; | 
|---|
|  | 98 |  | 
|---|
|  | 99 | // Check among all the ready | 
|---|
|  | 100 | for(uint_fast32_t i = 0; i < s; i++) { | 
|---|
| [9b1dcc2] | 101 | __processor_id_t * null = 0p; // Re-write every loop since compare thrashes it | 
|---|
| [7768b8d] | 102 | if( __atomic_load_n(&data[i].handle, (int)__ATOMIC_RELAXED) == null | 
|---|
|  | 103 | && __atomic_compare_exchange_n( &data[i].handle, &null, proc, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) { | 
|---|
|  | 104 | /*paranoid*/ verify(i < ready); | 
|---|
| [64a7146] | 105 | /*paranoid*/ verify(0 == (__alignof__(data[i]) % cache_line_size)); | 
|---|
| [7768b8d] | 106 | /*paranoid*/ verify((((uintptr_t)&data[i]) % cache_line_size) == 0); | 
|---|
|  | 107 | return i; | 
|---|
|  | 108 | } | 
|---|
|  | 109 | } | 
|---|
|  | 110 |  | 
|---|
| [b388ee81] | 111 | if(max <= alloc) abort("Trying to create more than %ud processors", __scheduler_lock->max); | 
|---|
| [7768b8d] | 112 |  | 
|---|
|  | 113 | // Step - 2 : F&A to get a new spot in the array. | 
|---|
|  | 114 | uint_fast32_t n = __atomic_fetch_add(&alloc, 1, __ATOMIC_SEQ_CST); | 
|---|
| [b388ee81] | 115 | if(max <= n) abort("Trying to create more than %ud processors", __scheduler_lock->max); | 
|---|
| [7768b8d] | 116 |  | 
|---|
|  | 117 | // Step - 3 : Mark space as used and then publish it. | 
|---|
| [9b1dcc2] | 118 | __scheduler_lock_id_t * storage = (__scheduler_lock_id_t *)&data[n]; | 
|---|
| [7768b8d] | 119 | (*storage){ proc }; | 
|---|
|  | 120 | while(true) { | 
|---|
|  | 121 | unsigned copy = n; | 
|---|
|  | 122 | if( __atomic_load_n(&ready, __ATOMIC_RELAXED) == n | 
|---|
|  | 123 | && __atomic_compare_exchange_n(&ready, ©, n + 1, true, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) | 
|---|
|  | 124 | break; | 
|---|
|  | 125 | asm volatile("pause"); | 
|---|
|  | 126 | } | 
|---|
|  | 127 |  | 
|---|
| [1b143de] | 128 | __cfadbg_print_safe(ready_queue, "Kernel : Registering proc %p done, id %lu\n", proc, n); | 
|---|
| [504a7dc] | 129 |  | 
|---|
| [7768b8d] | 130 | // Return new spot. | 
|---|
|  | 131 | /*paranoid*/ verify(n < ready); | 
|---|
| [37ba662] | 132 | /*paranoid*/ verify(__alignof__(data[n]) == (2 * cache_line_size)); | 
|---|
| [7768b8d] | 133 | /*paranoid*/ verify((((uintptr_t)&data[n]) % cache_line_size) == 0); | 
|---|
|  | 134 | return n; | 
|---|
|  | 135 | } | 
|---|
|  | 136 |  | 
|---|
| [9b1dcc2] | 137 | void unregister( struct __processor_id_t * proc ) with(*__scheduler_lock) { | 
|---|
| [7768b8d] | 138 | unsigned id = proc->id; | 
|---|
|  | 139 | /*paranoid*/ verify(id < ready); | 
|---|
|  | 140 | /*paranoid*/ verify(proc == __atomic_load_n(&data[id].handle, __ATOMIC_RELAXED)); | 
|---|
|  | 141 | __atomic_store_n(&data[id].handle, 0p, __ATOMIC_RELEASE); | 
|---|
| [504a7dc] | 142 |  | 
|---|
|  | 143 | __cfadbg_print_safe(ready_queue, "Kernel : Unregister proc %p\n", proc); | 
|---|
| [7768b8d] | 144 | } | 
|---|
|  | 145 |  | 
|---|
|  | 146 | //----------------------------------------------------------------------- | 
|---|
|  | 147 | // Writer side : acquire when changing the ready queue, e.g. adding more | 
|---|
|  | 148 | //  queues or removing them. | 
|---|
| [b388ee81] | 149 | uint_fast32_t ready_mutate_lock( void ) with(*__scheduler_lock) { | 
|---|
| [7768b8d] | 150 | // Step 1 : lock global lock | 
|---|
|  | 151 | // It is needed to avoid processors that register mid Critical-Section | 
|---|
|  | 152 | //   to simply lock their own lock and enter. | 
|---|
|  | 153 | __atomic_acquire( &lock ); | 
|---|
|  | 154 |  | 
|---|
|  | 155 | // Step 2 : lock per-proc lock | 
|---|
|  | 156 | // Processors that are currently being registered aren't counted | 
|---|
|  | 157 | //   but can't be in read_lock or in the critical section. | 
|---|
|  | 158 | // All other processors are counted | 
|---|
|  | 159 | uint_fast32_t s = ready; | 
|---|
|  | 160 | for(uint_fast32_t i = 0; i < s; i++) { | 
|---|
|  | 161 | __atomic_acquire( &data[i].lock ); | 
|---|
|  | 162 | } | 
|---|
|  | 163 |  | 
|---|
|  | 164 | return s; | 
|---|
|  | 165 | } | 
|---|
|  | 166 |  | 
|---|
| [b388ee81] | 167 | void ready_mutate_unlock( uint_fast32_t last_s ) with(*__scheduler_lock) { | 
|---|
| [7768b8d] | 168 | // Step 1 : release local locks | 
|---|
|  | 169 | // This must be done while the global lock is held to avoid | 
|---|
|  | 170 | //   threads that where created mid critical section | 
|---|
|  | 171 | //   to race to lock their local locks and have the writer | 
|---|
|  | 172 | //   immidiately unlock them | 
|---|
|  | 173 | // Alternative solution : return s in write_lock and pass it to write_unlock | 
|---|
|  | 174 | for(uint_fast32_t i = 0; i < last_s; i++) { | 
|---|
|  | 175 | verify(data[i].lock); | 
|---|
|  | 176 | __atomic_store_n(&data[i].lock, (bool)false, __ATOMIC_RELEASE); | 
|---|
|  | 177 | } | 
|---|
|  | 178 |  | 
|---|
|  | 179 | // Step 2 : release global lock | 
|---|
|  | 180 | /*paranoid*/ assert(true == lock); | 
|---|
|  | 181 | __atomic_store_n(&lock, (bool)false, __ATOMIC_RELEASE); | 
|---|
|  | 182 | } | 
|---|
|  | 183 |  | 
|---|
|  | 184 | //======================================================================= | 
|---|
| [13c5e19] | 185 | // Cforall Reqdy Queue used for scheduling | 
|---|
| [b798713] | 186 | //======================================================================= | 
|---|
|  | 187 | void ?{}(__ready_queue_t & this) with (this) { | 
|---|
|  | 188 |  | 
|---|
| [dca5802] | 189 | lanes.data = alloc(4); | 
|---|
| [b798713] | 190 | for( i; 4 ) { | 
|---|
| [dca5802] | 191 | (lanes.data[i]){}; | 
|---|
| [b798713] | 192 | } | 
|---|
| [dca5802] | 193 | lanes.count = 4; | 
|---|
| [61d7bec] | 194 | snzi{ log2( lanes.count / 8 ) }; | 
|---|
| [b798713] | 195 | } | 
|---|
|  | 196 |  | 
|---|
|  | 197 | void ^?{}(__ready_queue_t & this) with (this) { | 
|---|
| [dca5802] | 198 | verify( 4  == lanes.count ); | 
|---|
| [61d7bec] | 199 | verify( !query( snzi ) ); | 
|---|
|  | 200 |  | 
|---|
|  | 201 | ^(snzi){}; | 
|---|
| [b798713] | 202 |  | 
|---|
|  | 203 | for( i; 4 ) { | 
|---|
| [dca5802] | 204 | ^(lanes.data[i]){}; | 
|---|
| [b798713] | 205 | } | 
|---|
| [dca5802] | 206 | free(lanes.data); | 
|---|
|  | 207 | } | 
|---|
|  | 208 |  | 
|---|
| [64a7146] | 209 | //----------------------------------------------------------------------- | 
|---|
|  | 210 | __attribute__((hot)) bool query(struct cluster * cltr) { | 
|---|
|  | 211 | return query(cltr->ready_queue.snzi); | 
|---|
|  | 212 | } | 
|---|
|  | 213 |  | 
|---|
| [dca5802] | 214 | //----------------------------------------------------------------------- | 
|---|
| [504a7dc] | 215 | __attribute__((hot)) bool push(struct cluster * cltr, struct $thread * thrd) with (cltr->ready_queue) { | 
|---|
| [61d7bec] | 216 | __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr); | 
|---|
| [1b143de] | 217 |  | 
|---|
| [dca5802] | 218 | // write timestamp | 
|---|
| [b798713] | 219 | thrd->link.ts = rdtscl(); | 
|---|
|  | 220 |  | 
|---|
| [52769ba] | 221 | #if defined(BIAS) && !defined(__CFA_NO_STATISTICS__) | 
|---|
|  | 222 | bool local = false; | 
|---|
| [d72c074] | 223 | int preferred = | 
|---|
|  | 224 | //* | 
|---|
|  | 225 | kernelTLS.this_processor ? kernelTLS.this_processor->id * 4 : -1; | 
|---|
|  | 226 | /*/ | 
|---|
|  | 227 | thrd->link.preferred * 4; | 
|---|
|  | 228 | //*/ | 
|---|
|  | 229 |  | 
|---|
|  | 230 |  | 
|---|
| [52769ba] | 231 | #endif | 
|---|
|  | 232 |  | 
|---|
| [dca5802] | 233 | // Try to pick a lane and lock it | 
|---|
|  | 234 | unsigned i; | 
|---|
|  | 235 | do { | 
|---|
|  | 236 | // Pick the index of a lane | 
|---|
| [04b5cef] | 237 | #if defined(BIAS) | 
|---|
|  | 238 | unsigned r = __tls_rand(); | 
|---|
|  | 239 | unsigned rlow  = r % BIAS; | 
|---|
|  | 240 | unsigned rhigh = r / BIAS; | 
|---|
| [d72c074] | 241 | if((0 != rlow) && preferred >= 0) { | 
|---|
| [04b5cef] | 242 | // (BIAS - 1) out of BIAS chances | 
|---|
|  | 243 | // Use perferred queues | 
|---|
| [d72c074] | 244 | i = preferred + (rhigh % 4); | 
|---|
| [13c5e19] | 245 |  | 
|---|
|  | 246 | #if !defined(__CFA_NO_STATISTICS__) | 
|---|
| [52769ba] | 247 | local = true; | 
|---|
| [13c5e19] | 248 | __tls_stats()->ready.pick.push.local++; | 
|---|
|  | 249 | #endif | 
|---|
| [04b5cef] | 250 | } | 
|---|
|  | 251 | else { | 
|---|
|  | 252 | // 1 out of BIAS chances | 
|---|
|  | 253 | // Use all queues | 
|---|
|  | 254 | i = rhigh; | 
|---|
| [52769ba] | 255 | local = false; | 
|---|
| [04b5cef] | 256 | } | 
|---|
|  | 257 | #else | 
|---|
|  | 258 | i = __tls_rand(); | 
|---|
|  | 259 | #endif | 
|---|
|  | 260 |  | 
|---|
|  | 261 | i %= __atomic_load_n( &lanes.count, __ATOMIC_RELAXED ); | 
|---|
| [b798713] | 262 |  | 
|---|
|  | 263 | #if !defined(__CFA_NO_STATISTICS__) | 
|---|
| [8834751] | 264 | __tls_stats()->ready.pick.push.attempt++; | 
|---|
| [b798713] | 265 | #endif | 
|---|
|  | 266 |  | 
|---|
|  | 267 | // If we can't lock it retry | 
|---|
| [dca5802] | 268 | } while( !__atomic_try_acquire( &lanes.data[i].lock ) ); | 
|---|
| [b798713] | 269 |  | 
|---|
| [dca5802] | 270 | bool first = false; | 
|---|
|  | 271 |  | 
|---|
|  | 272 | // Actually push it | 
|---|
|  | 273 | bool lane_first = push(lanes.data[i], thrd); | 
|---|
|  | 274 |  | 
|---|
|  | 275 | // If this lane used to be empty we need to do more | 
|---|
|  | 276 | if(lane_first) { | 
|---|
| [504a7dc] | 277 | // Check if the entire queue used to be empty | 
|---|
| [61d7bec] | 278 | first = !query(snzi); | 
|---|
|  | 279 |  | 
|---|
|  | 280 | // Update the snzi | 
|---|
|  | 281 | arrive( snzi, i ); | 
|---|
| [b798713] | 282 | } | 
|---|
| [dca5802] | 283 |  | 
|---|
|  | 284 | // Unlock and return | 
|---|
|  | 285 | __atomic_unlock( &lanes.data[i].lock ); | 
|---|
|  | 286 |  | 
|---|
| [1b143de] | 287 | __cfadbg_print_safe(ready_queue, "Kernel : Pushed %p on cluster %p (idx: %u, mask %llu, first %d)\n", thrd, cltr, i, used.mask[0], lane_first); | 
|---|
|  | 288 |  | 
|---|
| [dca5802] | 289 | // Update statistics | 
|---|
|  | 290 | #if !defined(__CFA_NO_STATISTICS__) | 
|---|
| [52769ba] | 291 | #if defined(BIAS) | 
|---|
|  | 292 | if( local ) __tls_stats()->ready.pick.push.lsuccess++; | 
|---|
|  | 293 | #endif | 
|---|
| [8834751] | 294 | __tls_stats()->ready.pick.push.success++; | 
|---|
| [dca5802] | 295 | #endif | 
|---|
|  | 296 |  | 
|---|
|  | 297 | // return whether or not the list was empty before this push | 
|---|
|  | 298 | return first; | 
|---|
| [b798713] | 299 | } | 
|---|
|  | 300 |  | 
|---|
| [13c5e19] | 301 | static struct $thread * try_pop(struct cluster * cltr, unsigned i, unsigned j); | 
|---|
|  | 302 | static struct $thread * try_pop(struct cluster * cltr, unsigned i); | 
|---|
|  | 303 |  | 
|---|
|  | 304 | // Pop from the ready queue from a given cluster | 
|---|
|  | 305 | __attribute__((hot)) $thread * pop(struct cluster * cltr) with (cltr->ready_queue) { | 
|---|
|  | 306 | /* paranoid */ verify( lanes.count > 0 ); | 
|---|
|  | 307 | #if defined(BIAS) | 
|---|
|  | 308 | // Don't bother trying locally too much | 
|---|
|  | 309 | int local_tries = 8; | 
|---|
|  | 310 | #endif | 
|---|
|  | 311 |  | 
|---|
|  | 312 | // As long as the list is not empty, try finding a lane that isn't empty and pop from it | 
|---|
|  | 313 | while( query(snzi) ) { | 
|---|
|  | 314 | // Pick two lists at random | 
|---|
|  | 315 | unsigned i,j; | 
|---|
|  | 316 | #if defined(BIAS) | 
|---|
| [52769ba] | 317 | #if !defined(__CFA_NO_STATISTICS__) | 
|---|
|  | 318 | bool local = false; | 
|---|
|  | 319 | #endif | 
|---|
| [13c5e19] | 320 | uint64_t r = __tls_rand(); | 
|---|
|  | 321 | unsigned rlow  = r % BIAS; | 
|---|
|  | 322 | uint64_t rhigh = r / BIAS; | 
|---|
|  | 323 | if(local_tries && 0 != rlow) { | 
|---|
|  | 324 | // (BIAS - 1) out of BIAS chances | 
|---|
|  | 325 | // Use perferred queues | 
|---|
|  | 326 | unsigned pid = kernelTLS.this_processor->id * 4; | 
|---|
|  | 327 | i = pid + (rhigh % 4); | 
|---|
|  | 328 | j = pid + ((rhigh >> 32ull) % 4); | 
|---|
|  | 329 |  | 
|---|
|  | 330 | // count the tries | 
|---|
|  | 331 | local_tries--; | 
|---|
|  | 332 |  | 
|---|
|  | 333 | #if !defined(__CFA_NO_STATISTICS__) | 
|---|
| [52769ba] | 334 | local = true; | 
|---|
| [13c5e19] | 335 | __tls_stats()->ready.pick.pop.local++; | 
|---|
|  | 336 | #endif | 
|---|
|  | 337 | } | 
|---|
|  | 338 | else { | 
|---|
|  | 339 | // 1 out of BIAS chances | 
|---|
|  | 340 | // Use all queues | 
|---|
|  | 341 | i = rhigh; | 
|---|
|  | 342 | j = rhigh >> 32ull; | 
|---|
|  | 343 | } | 
|---|
|  | 344 | #else | 
|---|
|  | 345 | i = __tls_rand(); | 
|---|
|  | 346 | j = __tls_rand(); | 
|---|
|  | 347 | #endif | 
|---|
|  | 348 |  | 
|---|
|  | 349 | i %= __atomic_load_n( &lanes.count, __ATOMIC_RELAXED ); | 
|---|
|  | 350 | j %= __atomic_load_n( &lanes.count, __ATOMIC_RELAXED ); | 
|---|
|  | 351 |  | 
|---|
|  | 352 | // try popping from the 2 picked lists | 
|---|
|  | 353 | struct $thread * thrd = try_pop(cltr, i, j); | 
|---|
| [52769ba] | 354 | if(thrd) { | 
|---|
|  | 355 | #if defined(BIAS) && !defined(__CFA_NO_STATISTICS__) | 
|---|
|  | 356 | if( local ) __tls_stats()->ready.pick.pop.lsuccess++; | 
|---|
|  | 357 | #endif | 
|---|
|  | 358 | return thrd; | 
|---|
|  | 359 | } | 
|---|
| [13c5e19] | 360 | } | 
|---|
|  | 361 |  | 
|---|
|  | 362 | // All lanes where empty return 0p | 
|---|
|  | 363 | return 0p; | 
|---|
|  | 364 | } | 
|---|
|  | 365 |  | 
|---|
| [b798713] | 366 | //----------------------------------------------------------------------- | 
|---|
| [dca5802] | 367 | // Given 2 indexes, pick the list with the oldest push an try to pop from it | 
|---|
| [13c5e19] | 368 | static inline struct $thread * try_pop(struct cluster * cltr, unsigned i, unsigned j) with (cltr->ready_queue) { | 
|---|
| [b798713] | 369 | #if !defined(__CFA_NO_STATISTICS__) | 
|---|
| [8834751] | 370 | __tls_stats()->ready.pick.pop.attempt++; | 
|---|
| [b798713] | 371 | #endif | 
|---|
|  | 372 |  | 
|---|
|  | 373 | // Pick the bet list | 
|---|
|  | 374 | int w = i; | 
|---|
| [dca5802] | 375 | if( __builtin_expect(!is_empty(lanes.data[j]), true) ) { | 
|---|
|  | 376 | w = (ts(lanes.data[i]) < ts(lanes.data[j])) ? i : j; | 
|---|
| [b798713] | 377 | } | 
|---|
|  | 378 |  | 
|---|
| [13c5e19] | 379 | return try_pop(cltr, w); | 
|---|
|  | 380 | } | 
|---|
|  | 381 |  | 
|---|
|  | 382 | static inline struct $thread * try_pop(struct cluster * cltr, unsigned w) with (cltr->ready_queue) { | 
|---|
| [dca5802] | 383 | // Get relevant elements locally | 
|---|
|  | 384 | __intrusive_lane_t & lane = lanes.data[w]; | 
|---|
|  | 385 |  | 
|---|
| [b798713] | 386 | // If list looks empty retry | 
|---|
| [dca5802] | 387 | if( is_empty(lane) ) return 0p; | 
|---|
| [b798713] | 388 |  | 
|---|
|  | 389 | // If we can't get the lock retry | 
|---|
| [dca5802] | 390 | if( !__atomic_try_acquire(&lane.lock) ) return 0p; | 
|---|
| [b798713] | 391 |  | 
|---|
|  | 392 |  | 
|---|
|  | 393 | // If list is empty, unlock and retry | 
|---|
| [dca5802] | 394 | if( is_empty(lane) ) { | 
|---|
|  | 395 | __atomic_unlock(&lane.lock); | 
|---|
| [b798713] | 396 | return 0p; | 
|---|
|  | 397 | } | 
|---|
|  | 398 |  | 
|---|
|  | 399 | // Actually pop the list | 
|---|
| [504a7dc] | 400 | struct $thread * thrd; | 
|---|
| [b798713] | 401 | bool emptied; | 
|---|
| [dca5802] | 402 | [thrd, emptied] = pop(lane); | 
|---|
| [b798713] | 403 |  | 
|---|
| [dca5802] | 404 | /* paranoid */ verify(thrd); | 
|---|
|  | 405 | /* paranoid */ verify(lane.lock); | 
|---|
| [b798713] | 406 |  | 
|---|
| [dca5802] | 407 | // If this was the last element in the lane | 
|---|
| [b798713] | 408 | if(emptied) { | 
|---|
| [61d7bec] | 409 | depart( snzi, w ); | 
|---|
| [b798713] | 410 | } | 
|---|
|  | 411 |  | 
|---|
|  | 412 | // Unlock and return | 
|---|
| [dca5802] | 413 | __atomic_unlock(&lane.lock); | 
|---|
| [b798713] | 414 |  | 
|---|
| [dca5802] | 415 | // Update statistics | 
|---|
| [b798713] | 416 | #if !defined(__CFA_NO_STATISTICS__) | 
|---|
| [8834751] | 417 | __tls_stats()->ready.pick.pop.success++; | 
|---|
| [b798713] | 418 | #endif | 
|---|
|  | 419 |  | 
|---|
| [d72c074] | 420 | // Update the thread bias | 
|---|
|  | 421 | thrd->link.preferred = w / 4; | 
|---|
|  | 422 |  | 
|---|
| [dca5802] | 423 | // return the popped thread | 
|---|
| [b798713] | 424 | return thrd; | 
|---|
|  | 425 | } | 
|---|
| [13c5e19] | 426 | //----------------------------------------------------------------------- | 
|---|
| [b798713] | 427 |  | 
|---|
| [13c5e19] | 428 | bool remove_head(struct cluster * cltr, struct $thread * thrd) with (cltr->ready_queue) { | 
|---|
|  | 429 | for(i; lanes.count) { | 
|---|
|  | 430 | __intrusive_lane_t & lane = lanes.data[i]; | 
|---|
| [b798713] | 431 |  | 
|---|
| [13c5e19] | 432 | bool removed = false; | 
|---|
| [04b5cef] | 433 |  | 
|---|
| [13c5e19] | 434 | __atomic_acquire(&lane.lock); | 
|---|
|  | 435 | if(head(lane)->link.next == thrd) { | 
|---|
|  | 436 | $thread * pthrd; | 
|---|
|  | 437 | bool emptied; | 
|---|
|  | 438 | [pthrd, emptied] = pop(lane); | 
|---|
| [04b5cef] | 439 |  | 
|---|
| [13c5e19] | 440 | /* paranoid */ verify( pthrd == thrd ); | 
|---|
| [61d7bec] | 441 |  | 
|---|
| [13c5e19] | 442 | removed = true; | 
|---|
|  | 443 | if(emptied) { | 
|---|
|  | 444 | depart( snzi, i ); | 
|---|
|  | 445 | } | 
|---|
|  | 446 | } | 
|---|
|  | 447 | __atomic_unlock(&lane.lock); | 
|---|
| [b798713] | 448 |  | 
|---|
| [13c5e19] | 449 | if( removed ) return true; | 
|---|
|  | 450 | } | 
|---|
|  | 451 | return false; | 
|---|
| [b798713] | 452 | } | 
|---|
|  | 453 |  | 
|---|
|  | 454 | //----------------------------------------------------------------------- | 
|---|
|  | 455 |  | 
|---|
|  | 456 | static void check( __ready_queue_t & q ) with (q) { | 
|---|
|  | 457 | #if defined(__CFA_WITH_VERIFY__) | 
|---|
|  | 458 | { | 
|---|
| [dca5802] | 459 | for( idx ; lanes.count ) { | 
|---|
|  | 460 | __intrusive_lane_t & sl = lanes.data[idx]; | 
|---|
|  | 461 | assert(!lanes.data[idx].lock); | 
|---|
| [b798713] | 462 |  | 
|---|
|  | 463 | assert(head(sl)->link.prev == 0p ); | 
|---|
|  | 464 | assert(head(sl)->link.next->link.prev == head(sl) ); | 
|---|
|  | 465 | assert(tail(sl)->link.next == 0p ); | 
|---|
|  | 466 | assert(tail(sl)->link.prev->link.next == tail(sl) ); | 
|---|
|  | 467 |  | 
|---|
|  | 468 | if(sl.before.link.ts == 0l) { | 
|---|
|  | 469 | assert(tail(sl)->link.prev == head(sl)); | 
|---|
|  | 470 | assert(head(sl)->link.next == tail(sl)); | 
|---|
| [1b143de] | 471 | } else { | 
|---|
|  | 472 | assert(tail(sl)->link.prev != head(sl)); | 
|---|
|  | 473 | assert(head(sl)->link.next != tail(sl)); | 
|---|
| [b798713] | 474 | } | 
|---|
|  | 475 | } | 
|---|
|  | 476 | } | 
|---|
|  | 477 | #endif | 
|---|
|  | 478 | } | 
|---|
|  | 479 |  | 
|---|
|  | 480 | // Call this function of the intrusive list was moved using memcpy | 
|---|
| [dca5802] | 481 | // fixes the list so that the pointers back to anchors aren't left dangling | 
|---|
|  | 482 | static inline void fix(__intrusive_lane_t & ll) { | 
|---|
|  | 483 | // if the list is not empty then follow he pointer and fix its reverse | 
|---|
|  | 484 | if(!is_empty(ll)) { | 
|---|
| [b798713] | 485 | head(ll)->link.next->link.prev = head(ll); | 
|---|
|  | 486 | tail(ll)->link.prev->link.next = tail(ll); | 
|---|
|  | 487 | } | 
|---|
|  | 488 | // Otherwise just reset the list | 
|---|
|  | 489 | else { | 
|---|
| [dca5802] | 490 | verify(tail(ll)->link.next == 0p); | 
|---|
| [b798713] | 491 | tail(ll)->link.prev = head(ll); | 
|---|
|  | 492 | head(ll)->link.next = tail(ll); | 
|---|
| [dca5802] | 493 | verify(head(ll)->link.prev == 0p); | 
|---|
| [b798713] | 494 | } | 
|---|
|  | 495 | } | 
|---|
|  | 496 |  | 
|---|
| [dca5802] | 497 | // Grow the ready queue | 
|---|
| [b798713] | 498 | void ready_queue_grow  (struct cluster * cltr) { | 
|---|
| [64a7146] | 499 | /* paranoid */ verify( ready_mutate_islocked() ); | 
|---|
| [504a7dc] | 500 | __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue\n"); | 
|---|
| [b798713] | 501 |  | 
|---|
| [dca5802] | 502 | // Make sure that everything is consistent | 
|---|
|  | 503 | /* paranoid */ check( cltr->ready_queue ); | 
|---|
|  | 504 |  | 
|---|
|  | 505 | // grow the ready queue | 
|---|
| [b798713] | 506 | with( cltr->ready_queue ) { | 
|---|
| [61d7bec] | 507 | ^(snzi){}; | 
|---|
| [b798713] | 508 |  | 
|---|
| [61d7bec] | 509 | size_t ncount = lanes.count; | 
|---|
| [b798713] | 510 |  | 
|---|
| [dca5802] | 511 | // increase count | 
|---|
| [b798713] | 512 | ncount += 4; | 
|---|
|  | 513 |  | 
|---|
| [dca5802] | 514 | // Allocate new array (uses realloc and memcpies the data) | 
|---|
|  | 515 | lanes.data = alloc(lanes.data, ncount); | 
|---|
| [b798713] | 516 |  | 
|---|
|  | 517 | // Fix the moved data | 
|---|
| [dca5802] | 518 | for( idx; (size_t)lanes.count ) { | 
|---|
|  | 519 | fix(lanes.data[idx]); | 
|---|
| [b798713] | 520 | } | 
|---|
|  | 521 |  | 
|---|
|  | 522 | // Construct new data | 
|---|
| [dca5802] | 523 | for( idx; (size_t)lanes.count ~ ncount) { | 
|---|
|  | 524 | (lanes.data[idx]){}; | 
|---|
| [b798713] | 525 | } | 
|---|
|  | 526 |  | 
|---|
|  | 527 | // Update original | 
|---|
| [dca5802] | 528 | lanes.count = ncount; | 
|---|
|  | 529 |  | 
|---|
| [61d7bec] | 530 | // Re-create the snzi | 
|---|
|  | 531 | snzi{ log2( lanes.count / 8 ) }; | 
|---|
|  | 532 | for( idx; (size_t)lanes.count ) { | 
|---|
|  | 533 | if( !is_empty(lanes.data[idx]) ) { | 
|---|
|  | 534 | arrive(snzi, idx); | 
|---|
|  | 535 | } | 
|---|
|  | 536 | } | 
|---|
| [b798713] | 537 | } | 
|---|
|  | 538 |  | 
|---|
|  | 539 | // Make sure that everything is consistent | 
|---|
| [dca5802] | 540 | /* paranoid */ check( cltr->ready_queue ); | 
|---|
|  | 541 |  | 
|---|
| [504a7dc] | 542 | __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue done\n"); | 
|---|
| [dca5802] | 543 |  | 
|---|
| [64a7146] | 544 | /* paranoid */ verify( ready_mutate_islocked() ); | 
|---|
| [b798713] | 545 | } | 
|---|
|  | 546 |  | 
|---|
| [dca5802] | 547 | // Shrink the ready queue | 
|---|
| [b798713] | 548 | void ready_queue_shrink(struct cluster * cltr) { | 
|---|
| [64a7146] | 549 | /* paranoid */ verify( ready_mutate_islocked() ); | 
|---|
| [504a7dc] | 550 | __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue\n"); | 
|---|
| [dca5802] | 551 |  | 
|---|
|  | 552 | // Make sure that everything is consistent | 
|---|
|  | 553 | /* paranoid */ check( cltr->ready_queue ); | 
|---|
|  | 554 |  | 
|---|
| [b798713] | 555 | with( cltr->ready_queue ) { | 
|---|
| [61d7bec] | 556 | ^(snzi){}; | 
|---|
|  | 557 |  | 
|---|
| [dca5802] | 558 | size_t ocount = lanes.count; | 
|---|
| [b798713] | 559 | // Check that we have some space left | 
|---|
|  | 560 | if(ocount < 8) abort("Program attempted to destroy more Ready Queues than were created"); | 
|---|
|  | 561 |  | 
|---|
| [dca5802] | 562 | // reduce the actual count so push doesn't use the old queues | 
|---|
|  | 563 | lanes.count -= 4; | 
|---|
|  | 564 | verify(ocount > lanes.count); | 
|---|
|  | 565 |  | 
|---|
|  | 566 | // for printing count the number of displaced threads | 
|---|
| [504a7dc] | 567 | #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__) | 
|---|
| [dca5802] | 568 | __attribute__((unused)) size_t displaced = 0; | 
|---|
|  | 569 | #endif | 
|---|
| [b798713] | 570 |  | 
|---|
|  | 571 | // redistribute old data | 
|---|
| [dca5802] | 572 | for( idx; (size_t)lanes.count ~ ocount) { | 
|---|
|  | 573 | // Lock is not strictly needed but makes checking invariants much easier | 
|---|
| [1b143de] | 574 | __attribute__((unused)) bool locked = __atomic_try_acquire(&lanes.data[idx].lock); | 
|---|
| [b798713] | 575 | verify(locked); | 
|---|
| [dca5802] | 576 |  | 
|---|
|  | 577 | // As long as we can pop from this lane to push the threads somewhere else in the queue | 
|---|
|  | 578 | while(!is_empty(lanes.data[idx])) { | 
|---|
| [504a7dc] | 579 | struct $thread * thrd; | 
|---|
| [b798713] | 580 | __attribute__((unused)) bool _; | 
|---|
| [dca5802] | 581 | [thrd, _] = pop(lanes.data[idx]); | 
|---|
|  | 582 |  | 
|---|
| [b798713] | 583 | push(cltr, thrd); | 
|---|
| [dca5802] | 584 |  | 
|---|
|  | 585 | // for printing count the number of displaced threads | 
|---|
| [504a7dc] | 586 | #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__) | 
|---|
| [dca5802] | 587 | displaced++; | 
|---|
|  | 588 | #endif | 
|---|
| [b798713] | 589 | } | 
|---|
|  | 590 |  | 
|---|
| [dca5802] | 591 | // Unlock the lane | 
|---|
|  | 592 | __atomic_unlock(&lanes.data[idx].lock); | 
|---|
| [b798713] | 593 |  | 
|---|
|  | 594 | // TODO print the queue statistics here | 
|---|
|  | 595 |  | 
|---|
| [dca5802] | 596 | ^(lanes.data[idx]){}; | 
|---|
| [b798713] | 597 | } | 
|---|
|  | 598 |  | 
|---|
| [504a7dc] | 599 | __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue displaced %zu threads\n", displaced); | 
|---|
| [c84b4be] | 600 |  | 
|---|
| [dca5802] | 601 | // Allocate new array (uses realloc and memcpies the data) | 
|---|
|  | 602 | lanes.data = alloc(lanes.data, lanes.count); | 
|---|
| [b798713] | 603 |  | 
|---|
|  | 604 | // Fix the moved data | 
|---|
| [dca5802] | 605 | for( idx; (size_t)lanes.count ) { | 
|---|
|  | 606 | fix(lanes.data[idx]); | 
|---|
| [b798713] | 607 | } | 
|---|
| [c84b4be] | 608 |  | 
|---|
| [61d7bec] | 609 | // Re-create the snzi | 
|---|
|  | 610 | snzi{ log2( lanes.count / 8 ) }; | 
|---|
|  | 611 | for( idx; (size_t)lanes.count ) { | 
|---|
|  | 612 | if( !is_empty(lanes.data[idx]) ) { | 
|---|
|  | 613 | arrive(snzi, idx); | 
|---|
|  | 614 | } | 
|---|
|  | 615 | } | 
|---|
| [b798713] | 616 | } | 
|---|
|  | 617 |  | 
|---|
|  | 618 | // Make sure that everything is consistent | 
|---|
| [dca5802] | 619 | /* paranoid */ check( cltr->ready_queue ); | 
|---|
|  | 620 |  | 
|---|
| [504a7dc] | 621 | __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue done\n"); | 
|---|
| [64a7146] | 622 | /* paranoid */ verify( ready_mutate_islocked() ); | 
|---|
| [8834751] | 623 | } | 
|---|