| [c42b8a1] | 1 | // | 
|---|
|  | 2 | // Cforall Version 1.0.0 Copyright (C) 2022 University of Waterloo | 
|---|
|  | 3 | // | 
|---|
|  | 4 | // The contents of this file are covered under the licence agreement in the | 
|---|
|  | 5 | // file "LICENCE" distributed with Cforall. | 
|---|
|  | 6 | // | 
|---|
| [708ae38] | 7 | // cluster.cfa -- file that includes helpers for subsystem that need cluster wide support | 
|---|
| [c42b8a1] | 8 | // | 
|---|
|  | 9 | // Author           : Thierry Delisle | 
|---|
| [708ae38] | 10 | // Created On       : Fri Mar 11 12:39:24 2022 | 
|---|
| [c42b8a1] | 11 | // Last Modified By : | 
|---|
|  | 12 | // Last Modified On : | 
|---|
|  | 13 | // Update Count     : | 
|---|
|  | 14 | // | 
|---|
|  | 15 |  | 
|---|
|  | 16 | #define __cforall_thread__ | 
|---|
| [48a91e2] | 17 | #define _GNU_SOURCE | 
|---|
| [c42b8a1] | 18 |  | 
|---|
|  | 19 | #include "bits/defs.hfa" | 
|---|
|  | 20 | #include "device/cpu.hfa" | 
|---|
| [708ae38] | 21 | #include "kernel/cluster.hfa" | 
|---|
|  | 22 | #include "kernel/private.hfa" | 
|---|
| [c42b8a1] | 23 |  | 
|---|
|  | 24 | #include "stdlib.hfa" | 
|---|
|  | 25 | #include "limits.hfa" | 
|---|
|  | 26 | #include "math.hfa" | 
|---|
|  | 27 |  | 
|---|
|  | 28 | #include "ready_subqueue.hfa" | 
|---|
| [78a580d] | 29 | #include "io/types.hfa" | 
|---|
| [c42b8a1] | 30 |  | 
|---|
|  | 31 | #include <errno.h> | 
|---|
|  | 32 | #include <unistd.h> | 
|---|
|  | 33 |  | 
|---|
|  | 34 | extern "C" { | 
|---|
|  | 35 | #include <sys/syscall.h>  // __NR_xxx | 
|---|
|  | 36 | } | 
|---|
|  | 37 |  | 
|---|
|  | 38 | // No overriden function, no environment variable, no define | 
|---|
|  | 39 | // fall back to a magic number | 
|---|
|  | 40 | #ifndef __CFA_MAX_PROCESSORS__ | 
|---|
|  | 41 | #define __CFA_MAX_PROCESSORS__ 1024 | 
|---|
|  | 42 | #endif | 
|---|
|  | 43 |  | 
|---|
|  | 44 | #if !defined(__CFA_NO_STATISTICS__) | 
|---|
|  | 45 | #define __STATS(...) __VA_ARGS__ | 
|---|
|  | 46 | #else | 
|---|
|  | 47 | #define __STATS(...) | 
|---|
|  | 48 | #endif | 
|---|
|  | 49 |  | 
|---|
|  | 50 | // returns the maximum number of processors the RWLock support | 
|---|
|  | 51 | __attribute__((weak)) unsigned __max_processors() { | 
|---|
|  | 52 | const char * max_cores_s = getenv("CFA_MAX_PROCESSORS"); | 
|---|
|  | 53 | if(!max_cores_s) { | 
|---|
|  | 54 | __cfadbg_print_nolock(ready_queue, "No CFA_MAX_PROCESSORS in ENV\n"); | 
|---|
|  | 55 | return __CFA_MAX_PROCESSORS__; | 
|---|
|  | 56 | } | 
|---|
|  | 57 |  | 
|---|
|  | 58 | char * endptr = 0p; | 
|---|
|  | 59 | long int max_cores_l = strtol(max_cores_s, &endptr, 10); | 
|---|
|  | 60 | if(max_cores_l < 1 || max_cores_l > 65535) { | 
|---|
|  | 61 | __cfadbg_print_nolock(ready_queue, "CFA_MAX_PROCESSORS out of range : %ld\n", max_cores_l); | 
|---|
|  | 62 | return __CFA_MAX_PROCESSORS__; | 
|---|
|  | 63 | } | 
|---|
|  | 64 | if('\0' != *endptr) { | 
|---|
|  | 65 | __cfadbg_print_nolock(ready_queue, "CFA_MAX_PROCESSORS not a decimal number : %s\n", max_cores_s); | 
|---|
|  | 66 | return __CFA_MAX_PROCESSORS__; | 
|---|
|  | 67 | } | 
|---|
|  | 68 |  | 
|---|
|  | 69 | return max_cores_l; | 
|---|
|  | 70 | } | 
|---|
|  | 71 |  | 
|---|
|  | 72 | #if   defined(CFA_HAVE_LINUX_LIBRSEQ) | 
|---|
|  | 73 | // No forward declaration needed | 
|---|
|  | 74 | #define __kernel_rseq_register rseq_register_current_thread | 
|---|
|  | 75 | #define __kernel_rseq_unregister rseq_unregister_current_thread | 
|---|
|  | 76 | #elif defined(CFA_HAVE_LINUX_RSEQ_H) | 
|---|
|  | 77 | static void __kernel_raw_rseq_register  (void); | 
|---|
|  | 78 | static void __kernel_raw_rseq_unregister(void); | 
|---|
|  | 79 |  | 
|---|
|  | 80 | #define __kernel_rseq_register __kernel_raw_rseq_register | 
|---|
|  | 81 | #define __kernel_rseq_unregister __kernel_raw_rseq_unregister | 
|---|
|  | 82 | #else | 
|---|
|  | 83 | // No forward declaration needed | 
|---|
|  | 84 | // No initialization needed | 
|---|
|  | 85 | static inline void noop(void) {} | 
|---|
|  | 86 |  | 
|---|
|  | 87 | #define __kernel_rseq_register noop | 
|---|
|  | 88 | #define __kernel_rseq_unregister noop | 
|---|
|  | 89 | #endif | 
|---|
|  | 90 |  | 
|---|
|  | 91 | //======================================================================= | 
|---|
|  | 92 | // Cluster wide reader-writer lock | 
|---|
|  | 93 | //======================================================================= | 
|---|
|  | 94 | void  ?{}(__scheduler_RWLock_t & this) { | 
|---|
|  | 95 | this.max   = __max_processors(); | 
|---|
|  | 96 | this.alloc = 0; | 
|---|
|  | 97 | this.ready = 0; | 
|---|
|  | 98 | this.data  = alloc(this.max); | 
|---|
|  | 99 | this.write_lock  = false; | 
|---|
|  | 100 |  | 
|---|
|  | 101 | /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.alloc), &this.alloc)); | 
|---|
|  | 102 | /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.ready), &this.ready)); | 
|---|
|  | 103 |  | 
|---|
|  | 104 | } | 
|---|
|  | 105 | void ^?{}(__scheduler_RWLock_t & this) { | 
|---|
|  | 106 | free(this.data); | 
|---|
|  | 107 | } | 
|---|
|  | 108 |  | 
|---|
|  | 109 |  | 
|---|
|  | 110 | //======================================================================= | 
|---|
|  | 111 | // Lock-Free registering/unregistering of threads | 
|---|
|  | 112 | unsigned register_proc_id( void ) with(*__scheduler_lock) { | 
|---|
|  | 113 | __kernel_rseq_register(); | 
|---|
|  | 114 |  | 
|---|
|  | 115 | bool * handle = (bool *)&kernelTLS().sched_lock; | 
|---|
|  | 116 |  | 
|---|
|  | 117 | // Step - 1 : check if there is already space in the data | 
|---|
|  | 118 | uint_fast32_t s = ready; | 
|---|
|  | 119 |  | 
|---|
|  | 120 | // Check among all the ready | 
|---|
|  | 121 | for(uint_fast32_t i = 0; i < s; i++) { | 
|---|
|  | 122 | bool * volatile * cell = (bool * volatile *)&data[i]; // Cforall is bugged and the double volatiles causes problems | 
|---|
|  | 123 | /* paranoid */ verify( handle != *cell ); | 
|---|
|  | 124 |  | 
|---|
|  | 125 | bool * null = 0p; // Re-write every loop since compare thrashes it | 
|---|
|  | 126 | if( __atomic_load_n(cell, (int)__ATOMIC_RELAXED) == null | 
|---|
|  | 127 | && __atomic_compare_exchange_n( cell, &null, handle, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) { | 
|---|
|  | 128 | /* paranoid */ verify(i < ready); | 
|---|
|  | 129 | /* paranoid */ verify( (kernelTLS().sched_id = i, true) ); | 
|---|
|  | 130 | return i; | 
|---|
|  | 131 | } | 
|---|
|  | 132 | } | 
|---|
|  | 133 |  | 
|---|
|  | 134 | if(max <= alloc) abort("Trying to create more than %ud processors", __scheduler_lock->max); | 
|---|
|  | 135 |  | 
|---|
|  | 136 | // Step - 2 : F&A to get a new spot in the array. | 
|---|
|  | 137 | uint_fast32_t n = __atomic_fetch_add(&alloc, 1, __ATOMIC_SEQ_CST); | 
|---|
|  | 138 | if(max <= n) abort("Trying to create more than %ud processors", __scheduler_lock->max); | 
|---|
|  | 139 |  | 
|---|
|  | 140 | // Step - 3 : Mark space as used and then publish it. | 
|---|
|  | 141 | data[n] = handle; | 
|---|
|  | 142 | while() { | 
|---|
|  | 143 | unsigned copy = n; | 
|---|
|  | 144 | if( __atomic_load_n(&ready, __ATOMIC_RELAXED) == n | 
|---|
|  | 145 | && __atomic_compare_exchange_n(&ready, ©, n + 1, true, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) | 
|---|
|  | 146 | break; | 
|---|
|  | 147 | Pause(); | 
|---|
|  | 148 | } | 
|---|
|  | 149 |  | 
|---|
|  | 150 | // Return new spot. | 
|---|
|  | 151 | /* paranoid */ verify(n < ready); | 
|---|
|  | 152 | /* paranoid */ verify( (kernelTLS().sched_id = n, true) ); | 
|---|
|  | 153 | return n; | 
|---|
|  | 154 | } | 
|---|
|  | 155 |  | 
|---|
|  | 156 | void unregister_proc_id( unsigned id ) with(*__scheduler_lock) { | 
|---|
|  | 157 | /* paranoid */ verify(id < ready); | 
|---|
|  | 158 | /* paranoid */ verify(id == kernelTLS().sched_id); | 
|---|
|  | 159 | /* paranoid */ verify(data[id] == &kernelTLS().sched_lock); | 
|---|
|  | 160 |  | 
|---|
|  | 161 | bool * volatile * cell = (bool * volatile *)&data[id]; // Cforall is bugged and the double volatiles causes problems | 
|---|
|  | 162 |  | 
|---|
|  | 163 | __atomic_store_n(cell, 0p, __ATOMIC_RELEASE); | 
|---|
|  | 164 |  | 
|---|
|  | 165 | __kernel_rseq_unregister(); | 
|---|
|  | 166 | } | 
|---|
|  | 167 |  | 
|---|
|  | 168 | //----------------------------------------------------------------------- | 
|---|
|  | 169 | // Writer side : acquire when changing the ready queue, e.g. adding more | 
|---|
|  | 170 | //  queues or removing them. | 
|---|
|  | 171 | uint_fast32_t ready_mutate_lock( void ) with(*__scheduler_lock) { | 
|---|
|  | 172 | /* paranoid */ verify( ! __preemption_enabled() ); | 
|---|
|  | 173 |  | 
|---|
|  | 174 | // Step 1 : lock global lock | 
|---|
|  | 175 | // It is needed to avoid processors that register mid Critical-Section | 
|---|
|  | 176 | //   to simply lock their own lock and enter. | 
|---|
|  | 177 | __atomic_acquire( &write_lock ); | 
|---|
|  | 178 |  | 
|---|
|  | 179 | // Make sure we won't deadlock ourself | 
|---|
|  | 180 | // Checking before acquiring the writer lock isn't safe | 
|---|
|  | 181 | // because someone else could have locked us. | 
|---|
|  | 182 | /* paranoid */ verify( ! kernelTLS().sched_lock ); | 
|---|
|  | 183 |  | 
|---|
|  | 184 | // Step 2 : lock per-proc lock | 
|---|
|  | 185 | // Processors that are currently being registered aren't counted | 
|---|
|  | 186 | //   but can't be in read_lock or in the critical section. | 
|---|
|  | 187 | // All other processors are counted | 
|---|
|  | 188 | uint_fast32_t s = ready; | 
|---|
|  | 189 | for(uint_fast32_t i = 0; i < s; i++) { | 
|---|
|  | 190 | volatile bool * llock = data[i]; | 
|---|
|  | 191 | if(llock) __atomic_acquire( llock ); | 
|---|
|  | 192 | } | 
|---|
|  | 193 |  | 
|---|
|  | 194 | /* paranoid */ verify( ! __preemption_enabled() ); | 
|---|
|  | 195 | return s; | 
|---|
|  | 196 | } | 
|---|
|  | 197 |  | 
|---|
|  | 198 | void ready_mutate_unlock( uint_fast32_t last_s ) with(*__scheduler_lock) { | 
|---|
|  | 199 | /* paranoid */ verify( ! __preemption_enabled() ); | 
|---|
|  | 200 |  | 
|---|
|  | 201 | // Step 1 : release local locks | 
|---|
|  | 202 | // This must be done while the global lock is held to avoid | 
|---|
|  | 203 | //   threads that where created mid critical section | 
|---|
|  | 204 | //   to race to lock their local locks and have the writer | 
|---|
|  | 205 | //   immidiately unlock them | 
|---|
|  | 206 | // Alternative solution : return s in write_lock and pass it to write_unlock | 
|---|
|  | 207 | for(uint_fast32_t i = 0; i < last_s; i++) { | 
|---|
|  | 208 | volatile bool * llock = data[i]; | 
|---|
|  | 209 | if(llock) __atomic_store_n(llock, (bool)false, __ATOMIC_RELEASE); | 
|---|
|  | 210 | } | 
|---|
|  | 211 |  | 
|---|
|  | 212 | // Step 2 : release global lock | 
|---|
|  | 213 | /*paranoid*/ assert(true == write_lock); | 
|---|
|  | 214 | __atomic_store_n(&write_lock, (bool)false, __ATOMIC_RELEASE); | 
|---|
|  | 215 |  | 
|---|
|  | 216 | /* paranoid */ verify( ! __preemption_enabled() ); | 
|---|
|  | 217 | } | 
|---|
|  | 218 |  | 
|---|
|  | 219 | //======================================================================= | 
|---|
|  | 220 | // Cluster growth | 
|---|
|  | 221 | static const unsigned __readyq_single_shard = 2; | 
|---|
|  | 222 |  | 
|---|
|  | 223 | //----------------------------------------------------------------------- | 
|---|
|  | 224 | // Check that all the intrusive queues in the data structure are still consistent | 
|---|
| [884f3f67] | 225 | static void check_readyQ( cluster * cltr ) with (cltr->sched) { | 
|---|
| [c42b8a1] | 226 | #if defined(__CFA_WITH_VERIFY__) | 
|---|
|  | 227 | { | 
|---|
| [884f3f67] | 228 | const unsigned lanes_count = readyQ.count; | 
|---|
|  | 229 | for( idx ; lanes_count ) { | 
|---|
|  | 230 | __intrusive_lane_t & sl = readyQ.data[idx]; | 
|---|
|  | 231 | assert(!readyQ.data[idx].lock); | 
|---|
| [c42b8a1] | 232 |  | 
|---|
|  | 233 | if(is_empty(sl)) { | 
|---|
|  | 234 | assert( sl.anchor.next == 0p ); | 
|---|
|  | 235 | assert( sl.anchor.ts   == -1llu ); | 
|---|
|  | 236 | assert( mock_head(sl)  == sl.prev ); | 
|---|
|  | 237 | } else { | 
|---|
|  | 238 | assert( sl.anchor.next != 0p ); | 
|---|
|  | 239 | assert( sl.anchor.ts   != -1llu ); | 
|---|
|  | 240 | assert( mock_head(sl)  != sl.prev ); | 
|---|
|  | 241 | } | 
|---|
|  | 242 | } | 
|---|
|  | 243 | } | 
|---|
|  | 244 | #endif | 
|---|
|  | 245 | } | 
|---|
|  | 246 |  | 
|---|
|  | 247 | // Call this function of the intrusive list was moved using memcpy | 
|---|
|  | 248 | // fixes the list so that the pointers back to anchors aren't left dangling | 
|---|
|  | 249 | static inline void fix(__intrusive_lane_t & ll) { | 
|---|
| [708ae38] | 250 | if(is_empty(ll)) { | 
|---|
|  | 251 | verify(ll.anchor.next == 0p); | 
|---|
|  | 252 | ll.prev = mock_head(ll); | 
|---|
|  | 253 | } | 
|---|
| [c42b8a1] | 254 | } | 
|---|
|  | 255 |  | 
|---|
| [adb3ea1] | 256 | static void assign_list(unsigned & valrq, unsigned & valio, dlist(processor) & list, unsigned count) { | 
|---|
| [c42b8a1] | 257 | processor * it = &list`first; | 
|---|
|  | 258 | for(unsigned i = 0; i < count; i++) { | 
|---|
|  | 259 | /* paranoid */ verifyf( it, "Unexpected null iterator, at index %u of %u\n", i, count); | 
|---|
| [adb3ea1] | 260 | it->rdq.id = valrq; | 
|---|
| [c42b8a1] | 261 | it->rdq.target = MAX; | 
|---|
| [adb3ea1] | 262 | valrq += __shard_factor.readyq; | 
|---|
| [1a567d0] | 263 | #if defined(CFA_HAVE_LINUX_IO_URING_H) | 
|---|
|  | 264 | it->io.ctx->cq.id = valio; | 
|---|
|  | 265 | it->io.target = MAX; | 
|---|
|  | 266 | valio += __shard_factor.io; | 
|---|
|  | 267 | #endif | 
|---|
| [c42b8a1] | 268 | it = &(*it)`next; | 
|---|
|  | 269 | } | 
|---|
|  | 270 | } | 
|---|
|  | 271 |  | 
|---|
|  | 272 | static void reassign_cltr_id(struct cluster * cltr) { | 
|---|
| [adb3ea1] | 273 | unsigned prefrq = 0; | 
|---|
|  | 274 | unsigned prefio = 0; | 
|---|
|  | 275 | assign_list(prefrq, prefio, cltr->procs.actives, cltr->procs.total - cltr->procs.idle); | 
|---|
|  | 276 | assign_list(prefrq, prefio, cltr->procs.idles  , cltr->procs.idle ); | 
|---|
|  | 277 | } | 
|---|
|  | 278 |  | 
|---|
| [1a567d0] | 279 | #if defined(CFA_HAVE_LINUX_IO_URING_H) | 
|---|
|  | 280 | static void assign_io($io_context ** data, size_t count, dlist(processor) & list) { | 
|---|
|  | 281 | processor * it = &list`first; | 
|---|
|  | 282 | while(it) { | 
|---|
|  | 283 | /* paranoid */ verifyf( it, "Unexpected null iterator\n"); | 
|---|
|  | 284 | /* paranoid */ verifyf( it->io.ctx->cq.id < count, "Processor %p has id %u above count %zu\n", it, it->rdq.id, count); | 
|---|
|  | 285 | data[it->io.ctx->cq.id] = it->io.ctx; | 
|---|
|  | 286 | it = &(*it)`next; | 
|---|
|  | 287 | } | 
|---|
| [adb3ea1] | 288 | } | 
|---|
|  | 289 |  | 
|---|
| [1a567d0] | 290 | static void reassign_cltr_io(struct cluster * cltr) { | 
|---|
|  | 291 | assign_io(cltr->sched.io.data, cltr->sched.io.count, cltr->procs.actives); | 
|---|
|  | 292 | assign_io(cltr->sched.io.data, cltr->sched.io.count, cltr->procs.idles  ); | 
|---|
|  | 293 | } | 
|---|
|  | 294 | #else | 
|---|
|  | 295 | static void reassign_cltr_io(struct cluster *) {} | 
|---|
|  | 296 | #endif | 
|---|
| [c42b8a1] | 297 |  | 
|---|
| [884f3f67] | 298 | static void fix_times( __timestamp_t * volatile & tscs, unsigned count ) { | 
|---|
|  | 299 | tscs = alloc(count, tscs`realloc); | 
|---|
|  | 300 | for(i; count) { | 
|---|
|  | 301 | tscs[i].tv = rdtscl(); | 
|---|
|  | 302 | tscs[i].ma = 0; | 
|---|
| [c42b8a1] | 303 | } | 
|---|
|  | 304 | } | 
|---|
|  | 305 |  | 
|---|
|  | 306 | // Grow the ready queue | 
|---|
|  | 307 | void ready_queue_grow(struct cluster * cltr) { | 
|---|
|  | 308 | int target = cltr->procs.total; | 
|---|
|  | 309 |  | 
|---|
|  | 310 | /* paranoid */ verify( ready_mutate_islocked() ); | 
|---|
|  | 311 | __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue\n"); | 
|---|
|  | 312 |  | 
|---|
|  | 313 | // Make sure that everything is consistent | 
|---|
| [884f3f67] | 314 | /* paranoid */ check_readyQ( cltr ); | 
|---|
| [c42b8a1] | 315 |  | 
|---|
|  | 316 |  | 
|---|
| [884f3f67] | 317 | // Find new count | 
|---|
|  | 318 | // Make sure we always have atleast 1 list | 
|---|
|  | 319 | size_t ocount = cltr->sched.readyQ.count; | 
|---|
|  | 320 | size_t ncount = max(target * __shard_factor.readyq, __readyq_single_shard); | 
|---|
| [c42b8a1] | 321 |  | 
|---|
| [884f3f67] | 322 | // Do we have to do anything? | 
|---|
|  | 323 | if( ocount != ncount ) { | 
|---|
|  | 324 |  | 
|---|
|  | 325 | // grow the ready queue | 
|---|
|  | 326 | with( cltr->sched ) { | 
|---|
|  | 327 |  | 
|---|
|  | 328 | // Allocate new array (uses realloc and memcpies the data) | 
|---|
|  | 329 | readyQ.data = alloc( ncount, readyQ.data`realloc ); | 
|---|
| [c42b8a1] | 330 |  | 
|---|
| [884f3f67] | 331 | // Fix the moved data | 
|---|
|  | 332 | for( idx; ocount ) { | 
|---|
|  | 333 | fix(readyQ.data[idx]); | 
|---|
|  | 334 | } | 
|---|
|  | 335 |  | 
|---|
|  | 336 | // Construct new data | 
|---|
|  | 337 | for( idx; ocount ~ ncount) { | 
|---|
|  | 338 | (readyQ.data[idx]){}; | 
|---|
|  | 339 | } | 
|---|
|  | 340 |  | 
|---|
|  | 341 | // Update original count | 
|---|
|  | 342 | readyQ.count = ncount; | 
|---|
| [c42b8a1] | 343 | } | 
|---|
|  | 344 |  | 
|---|
|  | 345 |  | 
|---|
| [884f3f67] | 346 | fix_times(cltr->sched.readyQ.tscs, cltr->sched.readyQ.count); | 
|---|
| [c42b8a1] | 347 | } | 
|---|
|  | 348 |  | 
|---|
| [708ae38] | 349 | // Fix the io times | 
|---|
| [adb3ea1] | 350 | cltr->sched.io.count = target * __shard_factor.io; | 
|---|
| [708ae38] | 351 | fix_times(cltr->sched.io.tscs, cltr->sched.io.count); | 
|---|
|  | 352 |  | 
|---|
| [884f3f67] | 353 | // realloc the caches | 
|---|
|  | 354 | cltr->sched.caches = alloc( target, cltr->sched.caches`realloc ); | 
|---|
| [c42b8a1] | 355 |  | 
|---|
| [884f3f67] | 356 | // reassign the clusters. | 
|---|
| [c42b8a1] | 357 | reassign_cltr_id(cltr); | 
|---|
|  | 358 |  | 
|---|
| [adb3ea1] | 359 | cltr->sched.io.data = alloc( cltr->sched.io.count, cltr->sched.io.data`realloc ); | 
|---|
|  | 360 | reassign_cltr_io(cltr); | 
|---|
|  | 361 |  | 
|---|
| [c42b8a1] | 362 | // Make sure that everything is consistent | 
|---|
| [884f3f67] | 363 | /* paranoid */ check_readyQ( cltr ); | 
|---|
|  | 364 | /* paranoid */ verify( (target == 0) == (cltr->sched.caches == 0p) ); | 
|---|
| [c42b8a1] | 365 |  | 
|---|
|  | 366 | __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue done\n"); | 
|---|
|  | 367 |  | 
|---|
|  | 368 | /* paranoid */ verify( ready_mutate_islocked() ); | 
|---|
|  | 369 | } | 
|---|
|  | 370 |  | 
|---|
|  | 371 | // Shrink the ready queue | 
|---|
|  | 372 | void ready_queue_shrink(struct cluster * cltr) { | 
|---|
|  | 373 | /* paranoid */ verify( ready_mutate_islocked() ); | 
|---|
|  | 374 | __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue\n"); | 
|---|
|  | 375 |  | 
|---|
|  | 376 | // Make sure that everything is consistent | 
|---|
| [884f3f67] | 377 | /* paranoid */ check_readyQ( cltr ); | 
|---|
| [c42b8a1] | 378 |  | 
|---|
|  | 379 | int target = cltr->procs.total; | 
|---|
|  | 380 |  | 
|---|
| [884f3f67] | 381 | with( cltr->sched ) { | 
|---|
| [c42b8a1] | 382 | // Remember old count | 
|---|
| [884f3f67] | 383 | size_t ocount = readyQ.count; | 
|---|
| [c42b8a1] | 384 |  | 
|---|
|  | 385 | // Find new count | 
|---|
|  | 386 | // Make sure we always have atleast 1 list | 
|---|
| [884f3f67] | 387 | size_t ncount = max(target * __shard_factor.readyq, __readyq_single_shard); | 
|---|
|  | 388 | /* paranoid */ verifyf( ocount >= ncount, "Error in shrinking size calculation, %zu >= %zu", ocount, ncount ); | 
|---|
|  | 389 | /* paranoid */ verifyf( ncount == target * __shard_factor.readyq || ncount == __readyq_single_shard, | 
|---|
| [bfb9bf5] | 390 | /* paranoid */          "Error in shrinking size calculation, expected %u or %u, got %zu", target * __shard_factor.readyq, __readyq_single_shard, ncount ); | 
|---|
| [884f3f67] | 391 |  | 
|---|
|  | 392 | readyQ.count = ncount; | 
|---|
| [c42b8a1] | 393 |  | 
|---|
|  | 394 | // for printing count the number of displaced threads | 
|---|
|  | 395 | #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__) | 
|---|
|  | 396 | __attribute__((unused)) size_t displaced = 0; | 
|---|
|  | 397 | #endif | 
|---|
|  | 398 |  | 
|---|
|  | 399 | // redistribute old data | 
|---|
| [884f3f67] | 400 | for( idx; ncount ~ ocount) { | 
|---|
| [c42b8a1] | 401 | // Lock is not strictly needed but makes checking invariants much easier | 
|---|
| [884f3f67] | 402 | __attribute__((unused)) bool locked = __atomic_try_acquire(&readyQ.data[idx].lock); | 
|---|
| [c42b8a1] | 403 | verify(locked); | 
|---|
|  | 404 |  | 
|---|
|  | 405 | // As long as we can pop from this lane to push the threads somewhere else in the queue | 
|---|
| [884f3f67] | 406 | while(!is_empty(readyQ.data[idx])) { | 
|---|
| [c42b8a1] | 407 | struct thread$ * thrd; | 
|---|
|  | 408 | unsigned long long _; | 
|---|
| [884f3f67] | 409 | [thrd, _] = pop(readyQ.data[idx]); | 
|---|
| [c42b8a1] | 410 |  | 
|---|
|  | 411 | push(cltr, thrd, true); | 
|---|
|  | 412 |  | 
|---|
|  | 413 | // for printing count the number of displaced threads | 
|---|
|  | 414 | #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__) | 
|---|
|  | 415 | displaced++; | 
|---|
|  | 416 | #endif | 
|---|
|  | 417 | } | 
|---|
|  | 418 |  | 
|---|
|  | 419 | // Unlock the lane | 
|---|
| [884f3f67] | 420 | __atomic_unlock(&readyQ.data[idx].lock); | 
|---|
| [c42b8a1] | 421 |  | 
|---|
|  | 422 | // TODO print the queue statistics here | 
|---|
|  | 423 |  | 
|---|
| [884f3f67] | 424 | ^(readyQ.data[idx]){}; | 
|---|
| [c42b8a1] | 425 | } | 
|---|
|  | 426 |  | 
|---|
|  | 427 | __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue displaced %zu threads\n", displaced); | 
|---|
|  | 428 |  | 
|---|
|  | 429 | // Allocate new array (uses realloc and memcpies the data) | 
|---|
| [884f3f67] | 430 | readyQ.data = alloc( ncount, readyQ.data`realloc ); | 
|---|
| [c42b8a1] | 431 |  | 
|---|
|  | 432 | // Fix the moved data | 
|---|
| [884f3f67] | 433 | for( idx; ncount ) { | 
|---|
|  | 434 | fix(readyQ.data[idx]); | 
|---|
| [c42b8a1] | 435 | } | 
|---|
|  | 436 |  | 
|---|
| [884f3f67] | 437 | fix_times(readyQ.tscs, ncount); | 
|---|
| [c42b8a1] | 438 | } | 
|---|
| [884f3f67] | 439 | cltr->sched.caches = alloc( target, cltr->sched.caches`realloc ); | 
|---|
| [c42b8a1] | 440 |  | 
|---|
| [708ae38] | 441 | // Fix the io times | 
|---|
| [adb3ea1] | 442 | cltr->sched.io.count = target * __shard_factor.io; | 
|---|
| [708ae38] | 443 | fix_times(cltr->sched.io.tscs, cltr->sched.io.count); | 
|---|
| [c42b8a1] | 444 |  | 
|---|
|  | 445 | reassign_cltr_id(cltr); | 
|---|
|  | 446 |  | 
|---|
| [adb3ea1] | 447 | cltr->sched.io.data = alloc( cltr->sched.io.count, cltr->sched.io.data`realloc ); | 
|---|
|  | 448 | reassign_cltr_io(cltr); | 
|---|
|  | 449 |  | 
|---|
| [c42b8a1] | 450 | // Make sure that everything is consistent | 
|---|
| [884f3f67] | 451 | /* paranoid */ verify( (target == 0) == (cltr->sched.caches == 0p) ); | 
|---|
|  | 452 | /* paranoid */ check_readyQ( cltr ); | 
|---|
| [c42b8a1] | 453 |  | 
|---|
|  | 454 | __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue done\n"); | 
|---|
|  | 455 | /* paranoid */ verify( ready_mutate_islocked() ); | 
|---|
|  | 456 | } | 
|---|
|  | 457 |  | 
|---|
| [884f3f67] | 458 | void ready_queue_close(struct cluster * cltr) { | 
|---|
|  | 459 | free( cltr->sched.readyQ.data ); | 
|---|
|  | 460 | free( cltr->sched.readyQ.tscs ); | 
|---|
|  | 461 | cltr->sched.readyQ.data = 0p; | 
|---|
|  | 462 | cltr->sched.readyQ.tscs = 0p; | 
|---|
|  | 463 | cltr->sched.readyQ.count = 0; | 
|---|
|  | 464 |  | 
|---|
|  | 465 | free( cltr->sched.io.tscs ); | 
|---|
|  | 466 | free( cltr->sched.caches ); | 
|---|
|  | 467 | } | 
|---|
|  | 468 |  | 
|---|
| [c42b8a1] | 469 | // Ctor | 
|---|
|  | 470 | void ?{}( __intrusive_lane_t & this ) { | 
|---|
|  | 471 | this.lock = false; | 
|---|
|  | 472 | this.prev = mock_head(this); | 
|---|
|  | 473 | this.anchor.next = 0p; | 
|---|
|  | 474 | this.anchor.ts   = -1llu; | 
|---|
|  | 475 | #if !defined(__CFA_NO_STATISTICS__) | 
|---|
|  | 476 | this.cnt  = 0; | 
|---|
|  | 477 | #endif | 
|---|
|  | 478 |  | 
|---|
|  | 479 | // We add a boat-load of assertions here because the anchor code is very fragile | 
|---|
|  | 480 | /* paranoid */ _Static_assert( offsetof( thread$, link ) == offsetof(__intrusive_lane_t, anchor) ); | 
|---|
|  | 481 | /* paranoid */ verify( offsetof( thread$, link ) == offsetof(__intrusive_lane_t, anchor) ); | 
|---|
|  | 482 | /* paranoid */ verify( ((uintptr_t)( mock_head(this) ) + offsetof( thread$, link )) == (uintptr_t)(&this.anchor) ); | 
|---|
|  | 483 | /* paranoid */ verify( &mock_head(this)->link.next == &this.anchor.next ); | 
|---|
|  | 484 | /* paranoid */ verify( &mock_head(this)->link.ts   == &this.anchor.ts   ); | 
|---|
|  | 485 | /* paranoid */ verify( mock_head(this)->link.next == 0p ); | 
|---|
|  | 486 | /* paranoid */ verify( mock_head(this)->link.ts   == -1llu  ); | 
|---|
|  | 487 | /* paranoid */ verify( mock_head(this) == this.prev ); | 
|---|
|  | 488 | /* paranoid */ verify( __alignof__(__intrusive_lane_t) == 128 ); | 
|---|
|  | 489 | /* paranoid */ verify( __alignof__(this) == 128 ); | 
|---|
|  | 490 | /* paranoid */ verifyf( ((intptr_t)(&this) % 128) == 0, "Expected address to be aligned %p %% 128 == %zd", &this, ((intptr_t)(&this) % 128) ); | 
|---|
|  | 491 | } | 
|---|
|  | 492 |  | 
|---|
|  | 493 | // Dtor is trivial | 
|---|
|  | 494 | void ^?{}( __intrusive_lane_t & this ) { | 
|---|
|  | 495 | // Make sure the list is empty | 
|---|
|  | 496 | /* paranoid */ verify( this.anchor.next == 0p ); | 
|---|
|  | 497 | /* paranoid */ verify( this.anchor.ts   == -1llu ); | 
|---|
|  | 498 | /* paranoid */ verify( mock_head(this)  == this.prev ); | 
|---|
|  | 499 | } | 
|---|
|  | 500 |  | 
|---|
|  | 501 | #if   defined(CFA_HAVE_LINUX_LIBRSEQ) | 
|---|
|  | 502 | // No definition needed | 
|---|
|  | 503 | #elif defined(CFA_HAVE_LINUX_RSEQ_H) | 
|---|
|  | 504 |  | 
|---|
|  | 505 | #if defined( __x86_64 ) || defined( __i386 ) | 
|---|
|  | 506 | #define RSEQ_SIG        0x53053053 | 
|---|
|  | 507 | #elif defined( __ARM_ARCH ) | 
|---|
|  | 508 | #ifdef __ARMEB__ | 
|---|
|  | 509 | #define RSEQ_SIG    0xf3def5e7      /* udf    #24035    ; 0x5de3 (ARMv6+) */ | 
|---|
|  | 510 | #else | 
|---|
|  | 511 | #define RSEQ_SIG    0xe7f5def3      /* udf    #24035    ; 0x5de3 */ | 
|---|
|  | 512 | #endif | 
|---|
|  | 513 | #endif | 
|---|
|  | 514 |  | 
|---|
|  | 515 | extern void __disable_interrupts_hard(); | 
|---|
|  | 516 | extern void __enable_interrupts_hard(); | 
|---|
|  | 517 |  | 
|---|
|  | 518 | static void __kernel_raw_rseq_register  (void) { | 
|---|
|  | 519 | /* paranoid */ verify( __cfaabi_rseq.cpu_id == RSEQ_CPU_ID_UNINITIALIZED ); | 
|---|
|  | 520 |  | 
|---|
|  | 521 | // int ret = syscall(__NR_rseq, &__cfaabi_rseq, sizeof(struct rseq), 0, (sigset_t *)0p, _NSIG / 8); | 
|---|
|  | 522 | int ret = syscall(__NR_rseq, &__cfaabi_rseq, sizeof(struct rseq), 0, RSEQ_SIG); | 
|---|
|  | 523 | if(ret != 0) { | 
|---|
|  | 524 | int e = errno; | 
|---|
|  | 525 | switch(e) { | 
|---|
|  | 526 | case EINVAL: abort("KERNEL ERROR: rseq register invalid argument"); | 
|---|
|  | 527 | case ENOSYS: abort("KERNEL ERROR: rseq register no supported"); | 
|---|
|  | 528 | case EFAULT: abort("KERNEL ERROR: rseq register with invalid argument"); | 
|---|
|  | 529 | case EBUSY : abort("KERNEL ERROR: rseq register already registered"); | 
|---|
|  | 530 | case EPERM : abort("KERNEL ERROR: rseq register sig  argument  on unregistration does not match the signature received on registration"); | 
|---|
|  | 531 | default: abort("KERNEL ERROR: rseq register unexpected return %d", e); | 
|---|
|  | 532 | } | 
|---|
|  | 533 | } | 
|---|
|  | 534 | } | 
|---|
|  | 535 |  | 
|---|
|  | 536 | static void __kernel_raw_rseq_unregister(void) { | 
|---|
|  | 537 | /* paranoid */ verify( __cfaabi_rseq.cpu_id >= 0 ); | 
|---|
|  | 538 |  | 
|---|
|  | 539 | // int ret = syscall(__NR_rseq, &__cfaabi_rseq, sizeof(struct rseq), RSEQ_FLAG_UNREGISTER, (sigset_t *)0p, _NSIG / 8); | 
|---|
|  | 540 | int ret = syscall(__NR_rseq, &__cfaabi_rseq, sizeof(struct rseq), RSEQ_FLAG_UNREGISTER, RSEQ_SIG); | 
|---|
|  | 541 | if(ret != 0) { | 
|---|
|  | 542 | int e = errno; | 
|---|
|  | 543 | switch(e) { | 
|---|
|  | 544 | case EINVAL: abort("KERNEL ERROR: rseq unregister invalid argument"); | 
|---|
|  | 545 | case ENOSYS: abort("KERNEL ERROR: rseq unregister no supported"); | 
|---|
|  | 546 | case EFAULT: abort("KERNEL ERROR: rseq unregister with invalid argument"); | 
|---|
|  | 547 | case EBUSY : abort("KERNEL ERROR: rseq unregister already registered"); | 
|---|
|  | 548 | case EPERM : abort("KERNEL ERROR: rseq unregister sig  argument  on unregistration does not match the signature received on registration"); | 
|---|
|  | 549 | default: abort("KERNEL ERROR: rseq unregisteunexpected return %d", e); | 
|---|
|  | 550 | } | 
|---|
|  | 551 | } | 
|---|
|  | 552 | } | 
|---|
|  | 553 | #else | 
|---|
|  | 554 | // No definition needed | 
|---|
|  | 555 | #endif | 
|---|