source: libcfa/src/concurrency/ready_queue.cfa@ 660665f

ADT ast-experimental enum forall-pointer-decay jacob/cs343-translation new-ast-unique-expr pthread-emulation qualifiedEnum
Last change on this file since 660665f was 6ba6846, checked in by Thierry Delisle <tdelisle@…>, 4 years ago

Minor ready-queue fixes

  • Property mode set to 100644
File size: 31.3 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2019 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// ready_queue.cfa --
8//
9// Author : Thierry Delisle
10// Created On : Mon Nov dd 16:29:18 2019
11// Last Modified By :
12// Last Modified On :
13// Update Count :
14//
15
16#define __cforall_thread__
17#define _GNU_SOURCE
18
19// #define __CFA_DEBUG_PRINT_READY_QUEUE__
20
21
22#define USE_RELAXED_FIFO
23// #define USE_WORK_STEALING
24// #define USE_CPU_WORK_STEALING
25
26#include "bits/defs.hfa"
27#include "device/cpu.hfa"
28#include "kernel_private.hfa"
29
30#include "stdlib.hfa"
31#include "math.hfa"
32
33#include <errno.h>
34#include <unistd.h>
35
36extern "C" {
37 #include <sys/syscall.h> // __NR_xxx
38}
39
40#include "ready_subqueue.hfa"
41
42static const size_t cache_line_size = 64;
43
44#if !defined(__CFA_NO_STATISTICS__)
45 #define __STATS(...) __VA_ARGS__
46#else
47 #define __STATS(...)
48#endif
49
50// No overriden function, no environment variable, no define
51// fall back to a magic number
52#ifndef __CFA_MAX_PROCESSORS__
53 #define __CFA_MAX_PROCESSORS__ 1024
54#endif
55
56#if defined(USE_CPU_WORK_STEALING)
57 #define READYQ_SHARD_FACTOR 2
58#elif defined(USE_RELAXED_FIFO)
59 #define BIAS 4
60 #define READYQ_SHARD_FACTOR 4
61 #define SEQUENTIAL_SHARD 1
62#elif defined(USE_WORK_STEALING)
63 #define READYQ_SHARD_FACTOR 2
64 #define SEQUENTIAL_SHARD 2
65#else
66 #error no scheduling strategy selected
67#endif
68
69static inline struct $thread * try_pop(struct cluster * cltr, unsigned w __STATS(, __stats_readyQ_pop_t & stats));
70static inline struct $thread * try_pop(struct cluster * cltr, unsigned i, unsigned j __STATS(, __stats_readyQ_pop_t & stats));
71static inline struct $thread * search(struct cluster * cltr);
72static inline [unsigned, bool] idx_from_r(unsigned r, unsigned preferred);
73
74
75// returns the maximum number of processors the RWLock support
76__attribute__((weak)) unsigned __max_processors() {
77 const char * max_cores_s = getenv("CFA_MAX_PROCESSORS");
78 if(!max_cores_s) {
79 __cfadbg_print_nolock(ready_queue, "No CFA_MAX_PROCESSORS in ENV\n");
80 return __CFA_MAX_PROCESSORS__;
81 }
82
83 char * endptr = 0p;
84 long int max_cores_l = strtol(max_cores_s, &endptr, 10);
85 if(max_cores_l < 1 || max_cores_l > 65535) {
86 __cfadbg_print_nolock(ready_queue, "CFA_MAX_PROCESSORS out of range : %ld\n", max_cores_l);
87 return __CFA_MAX_PROCESSORS__;
88 }
89 if('\0' != *endptr) {
90 __cfadbg_print_nolock(ready_queue, "CFA_MAX_PROCESSORS not a decimal number : %s\n", max_cores_s);
91 return __CFA_MAX_PROCESSORS__;
92 }
93
94 return max_cores_l;
95}
96
97#if defined(CFA_HAVE_LINUX_LIBRSEQ)
98 // No forward declaration needed
99 #define __kernel_rseq_register rseq_register_current_thread
100 #define __kernel_rseq_unregister rseq_unregister_current_thread
101#elif defined(CFA_HAVE_LINUX_RSEQ_H)
102 void __kernel_raw_rseq_register (void);
103 void __kernel_raw_rseq_unregister(void);
104
105 #define __kernel_rseq_register __kernel_raw_rseq_register
106 #define __kernel_rseq_unregister __kernel_raw_rseq_unregister
107#else
108 // No forward declaration needed
109 // No initialization needed
110 static inline void noop(void) {}
111
112 #define __kernel_rseq_register noop
113 #define __kernel_rseq_unregister noop
114#endif
115
116//=======================================================================
117// Cluster wide reader-writer lock
118//=======================================================================
119void ?{}(__scheduler_RWLock_t & this) {
120 this.max = __max_processors();
121 this.alloc = 0;
122 this.ready = 0;
123 this.data = alloc(this.max);
124 this.write_lock = false;
125
126 /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.alloc), &this.alloc));
127 /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.ready), &this.ready));
128
129}
130void ^?{}(__scheduler_RWLock_t & this) {
131 free(this.data);
132}
133
134
135//=======================================================================
136// Lock-Free registering/unregistering of threads
137unsigned register_proc_id( void ) with(*__scheduler_lock) {
138 __kernel_rseq_register();
139
140 __cfadbg_print_safe(ready_queue, "Kernel : Registering proc %p for RW-Lock\n", proc);
141 bool * handle = (bool *)&kernelTLS().sched_lock;
142
143 // Step - 1 : check if there is already space in the data
144 uint_fast32_t s = ready;
145
146 // Check among all the ready
147 for(uint_fast32_t i = 0; i < s; i++) {
148 bool * volatile * cell = (bool * volatile *)&data[i]; // Cforall is bugged and the double volatiles causes problems
149 /* paranoid */ verify( handle != *cell );
150
151 bool * null = 0p; // Re-write every loop since compare thrashes it
152 if( __atomic_load_n(cell, (int)__ATOMIC_RELAXED) == null
153 && __atomic_compare_exchange_n( cell, &null, handle, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
154 /* paranoid */ verify(i < ready);
155 /* paranoid */ verify( (kernelTLS().sched_id = i, true) );
156 return i;
157 }
158 }
159
160 if(max <= alloc) abort("Trying to create more than %ud processors", __scheduler_lock->max);
161
162 // Step - 2 : F&A to get a new spot in the array.
163 uint_fast32_t n = __atomic_fetch_add(&alloc, 1, __ATOMIC_SEQ_CST);
164 if(max <= n) abort("Trying to create more than %ud processors", __scheduler_lock->max);
165
166 // Step - 3 : Mark space as used and then publish it.
167 data[n] = handle;
168 while() {
169 unsigned copy = n;
170 if( __atomic_load_n(&ready, __ATOMIC_RELAXED) == n
171 && __atomic_compare_exchange_n(&ready, &copy, n + 1, true, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
172 break;
173 Pause();
174 }
175
176 __cfadbg_print_safe(ready_queue, "Kernel : Registering proc %p done, id %lu\n", proc, n);
177
178 // Return new spot.
179 /* paranoid */ verify(n < ready);
180 /* paranoid */ verify( (kernelTLS().sched_id = n, true) );
181 return n;
182}
183
184void unregister_proc_id( unsigned id ) with(*__scheduler_lock) {
185 /* paranoid */ verify(id < ready);
186 /* paranoid */ verify(id == kernelTLS().sched_id);
187 /* paranoid */ verify(data[id] == &kernelTLS().sched_lock);
188
189 bool * volatile * cell = (bool * volatile *)&data[id]; // Cforall is bugged and the double volatiles causes problems
190
191 __atomic_store_n(cell, 0p, __ATOMIC_RELEASE);
192
193 __cfadbg_print_safe(ready_queue, "Kernel : Unregister proc %p\n", proc);
194
195 __kernel_rseq_unregister();
196}
197
198//-----------------------------------------------------------------------
199// Writer side : acquire when changing the ready queue, e.g. adding more
200// queues or removing them.
201uint_fast32_t ready_mutate_lock( void ) with(*__scheduler_lock) {
202 /* paranoid */ verify( ! __preemption_enabled() );
203 /* paranoid */ verify( ! kernelTLS().sched_lock );
204
205 // Step 1 : lock global lock
206 // It is needed to avoid processors that register mid Critical-Section
207 // to simply lock their own lock and enter.
208 __atomic_acquire( &write_lock );
209
210 // Step 2 : lock per-proc lock
211 // Processors that are currently being registered aren't counted
212 // but can't be in read_lock or in the critical section.
213 // All other processors are counted
214 uint_fast32_t s = ready;
215 for(uint_fast32_t i = 0; i < s; i++) {
216 volatile bool * llock = data[i];
217 if(llock) __atomic_acquire( llock );
218 }
219
220 /* paranoid */ verify( ! __preemption_enabled() );
221 return s;
222}
223
224void ready_mutate_unlock( uint_fast32_t last_s ) with(*__scheduler_lock) {
225 /* paranoid */ verify( ! __preemption_enabled() );
226
227 // Step 1 : release local locks
228 // This must be done while the global lock is held to avoid
229 // threads that where created mid critical section
230 // to race to lock their local locks and have the writer
231 // immidiately unlock them
232 // Alternative solution : return s in write_lock and pass it to write_unlock
233 for(uint_fast32_t i = 0; i < last_s; i++) {
234 volatile bool * llock = data[i];
235 if(llock) __atomic_store_n(llock, (bool)false, __ATOMIC_RELEASE);
236 }
237
238 // Step 2 : release global lock
239 /*paranoid*/ assert(true == write_lock);
240 __atomic_store_n(&write_lock, (bool)false, __ATOMIC_RELEASE);
241
242 /* paranoid */ verify( ! __preemption_enabled() );
243}
244
245//=======================================================================
246// Cforall Ready Queue used for scheduling
247//=======================================================================
248void ?{}(__ready_queue_t & this) with (this) {
249 #if defined(USE_CPU_WORK_STEALING)
250 lanes.count = cpu_info.hthrd_count * READYQ_SHARD_FACTOR;
251 lanes.data = alloc( lanes.count );
252 lanes.tscs = alloc( lanes.count );
253
254 for( idx; (size_t)lanes.count ) {
255 (lanes.data[idx]){};
256 lanes.tscs[idx].tv = rdtscl();
257 }
258 #else
259 lanes.data = 0p;
260 lanes.tscs = 0p;
261 lanes.count = 0;
262 #endif
263}
264
265void ^?{}(__ready_queue_t & this) with (this) {
266 #if !defined(USE_CPU_WORK_STEALING)
267 verify( SEQUENTIAL_SHARD == lanes.count );
268 #endif
269
270 free(lanes.data);
271 free(lanes.tscs);
272}
273
274//-----------------------------------------------------------------------
275#if defined(USE_CPU_WORK_STEALING)
276 __attribute__((hot)) void push(struct cluster * cltr, struct $thread * thrd, bool push_local) with (cltr->ready_queue) {
277 __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr);
278
279 processor * const proc = kernelTLS().this_processor;
280 const bool external = !push_local || (!proc) || (cltr != proc->cltr);
281
282 const int cpu = __kernel_getcpu();
283 /* paranoid */ verify(cpu >= 0);
284 /* paranoid */ verify(cpu < cpu_info.hthrd_count);
285 /* paranoid */ verify(cpu * READYQ_SHARD_FACTOR < lanes.count);
286
287 const cpu_map_entry_t & map = cpu_info.llc_map[cpu];
288 /* paranoid */ verify(map.start * READYQ_SHARD_FACTOR < lanes.count);
289 /* paranoid */ verify(map.self * READYQ_SHARD_FACTOR < lanes.count);
290 /* paranoid */ verifyf((map.start + map.count) * READYQ_SHARD_FACTOR <= lanes.count, "have %zu lanes but map can go up to %u", lanes.count, (map.start + map.count) * READYQ_SHARD_FACTOR);
291
292 const int start = map.self * READYQ_SHARD_FACTOR;
293 unsigned i;
294 do {
295 unsigned r;
296 if(unlikely(external)) { r = __tls_rand(); }
297 else { r = proc->rdq.its++; }
298 i = start + (r % READYQ_SHARD_FACTOR);
299 // If we can't lock it retry
300 } while( !__atomic_try_acquire( &lanes.data[i].lock ) );
301
302 // Actually push it
303 push(lanes.data[i], thrd);
304
305 // Unlock and return
306 __atomic_unlock( &lanes.data[i].lock );
307
308 #if !defined(__CFA_NO_STATISTICS__)
309 if(unlikely(external)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.success, 1, __ATOMIC_RELAXED);
310 else __tls_stats()->ready.push.local.success++;
311 #endif
312
313 __cfadbg_print_safe(ready_queue, "Kernel : Pushed %p on cluster %p (idx: %u, mask %llu, first %d)\n", thrd, cltr, i, used.mask[0], lane_first);
314
315 }
316
317 // Pop from the ready queue from a given cluster
318 __attribute__((hot)) $thread * pop_fast(struct cluster * cltr) with (cltr->ready_queue) {
319 /* paranoid */ verify( lanes.count > 0 );
320 /* paranoid */ verify( kernelTLS().this_processor );
321
322 const int cpu = __kernel_getcpu();
323 /* paranoid */ verify(cpu >= 0);
324 /* paranoid */ verify(cpu < cpu_info.hthrd_count);
325 /* paranoid */ verify(cpu * READYQ_SHARD_FACTOR < lanes.count);
326
327 const cpu_map_entry_t & map = cpu_info.llc_map[cpu];
328 /* paranoid */ verify(map.start * READYQ_SHARD_FACTOR < lanes.count);
329 /* paranoid */ verify(map.self * READYQ_SHARD_FACTOR < lanes.count);
330 /* paranoid */ verifyf((map.start + map.count) * READYQ_SHARD_FACTOR <= lanes.count, "have %zu lanes but map can go up to %u", lanes.count, (map.start + map.count) * READYQ_SHARD_FACTOR);
331
332 processor * const proc = kernelTLS().this_processor;
333 const int start = map.self * READYQ_SHARD_FACTOR;
334
335 // Did we already have a help target
336 if(proc->rdq.target == -1u) {
337 // if We don't have a
338 unsigned long long min = ts(lanes.data[start]);
339 for(i; READYQ_SHARD_FACTOR) {
340 unsigned long long tsc = ts(lanes.data[start + i]);
341 if(tsc < min) min = tsc;
342 }
343 proc->rdq.cutoff = min;
344
345 /* paranoid */ verify(lanes.count < 65536); // The following code assumes max 65536 cores.
346 /* paranoid */ verify(map.count < 65536); // The following code assumes max 65536 cores.
347 uint64_t chaos = __tls_rand();
348 uint64_t high_chaos = (chaos >> 32);
349 uint64_t mid_chaos = (chaos >> 16) & 0xffff;
350 uint64_t low_chaos = chaos & 0xffff;
351
352 unsigned me = map.self;
353 unsigned cpu_chaos = map.start + (mid_chaos % map.count);
354 bool global = cpu_chaos == me;
355
356 if(global) {
357 proc->rdq.target = high_chaos % lanes.count;
358 } else {
359 proc->rdq.target = (cpu_chaos * READYQ_SHARD_FACTOR) + (low_chaos % READYQ_SHARD_FACTOR);
360 /* paranoid */ verify(proc->rdq.target >= (map.start * READYQ_SHARD_FACTOR));
361 /* paranoid */ verify(proc->rdq.target < ((map.start + map.count) * READYQ_SHARD_FACTOR));
362 }
363
364 /* paranoid */ verify(proc->rdq.target != -1u);
365 }
366 else {
367 const unsigned long long bias = 0; //2_500_000_000;
368 const unsigned long long cutoff = proc->rdq.cutoff > bias ? proc->rdq.cutoff - bias : proc->rdq.cutoff;
369 {
370 unsigned target = proc->rdq.target;
371 proc->rdq.target = -1u;
372 if(lanes.tscs[target].tv < cutoff && ts(lanes.data[target]) < cutoff) {
373 $thread * t = try_pop(cltr, target __STATS(, __tls_stats()->ready.pop.help));
374 proc->rdq.last = target;
375 if(t) return t;
376 }
377 }
378
379 unsigned last = proc->rdq.last;
380 if(last != -1u && lanes.tscs[last].tv < cutoff && ts(lanes.data[last]) < cutoff) {
381 $thread * t = try_pop(cltr, last __STATS(, __tls_stats()->ready.pop.help));
382 if(t) return t;
383 }
384 else {
385 proc->rdq.last = -1u;
386 }
387 }
388
389 for(READYQ_SHARD_FACTOR) {
390 unsigned i = start + (proc->rdq.itr++ % READYQ_SHARD_FACTOR);
391 if($thread * t = try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.local))) return t;
392 }
393
394 // All lanes where empty return 0p
395 return 0p;
396 }
397
398 __attribute__((hot)) struct $thread * pop_slow(struct cluster * cltr) with (cltr->ready_queue) {
399 processor * const proc = kernelTLS().this_processor;
400 unsigned last = proc->rdq.last;
401 if(last != -1u) {
402 struct $thread * t = try_pop(cltr, last __STATS(, __tls_stats()->ready.pop.steal));
403 if(t) return t;
404 proc->rdq.last = -1u;
405 }
406
407 unsigned i = __tls_rand() % lanes.count;
408 return try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.steal));
409 }
410 __attribute__((hot)) struct $thread * pop_search(struct cluster * cltr) {
411 return search(cltr);
412 }
413#endif
414#if defined(USE_RELAXED_FIFO)
415 //-----------------------------------------------------------------------
416 // get index from random number with or without bias towards queues
417 static inline [unsigned, bool] idx_from_r(unsigned r, unsigned preferred) {
418 unsigned i;
419 bool local;
420 unsigned rlow = r % BIAS;
421 unsigned rhigh = r / BIAS;
422 if((0 != rlow) && preferred >= 0) {
423 // (BIAS - 1) out of BIAS chances
424 // Use perferred queues
425 i = preferred + (rhigh % READYQ_SHARD_FACTOR);
426 local = true;
427 }
428 else {
429 // 1 out of BIAS chances
430 // Use all queues
431 i = rhigh;
432 local = false;
433 }
434 return [i, local];
435 }
436
437 __attribute__((hot)) void push(struct cluster * cltr, struct $thread * thrd, bool push_local) with (cltr->ready_queue) {
438 __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr);
439
440 const bool external = !push_local || (!kernelTLS().this_processor) || (cltr != kernelTLS().this_processor->cltr);
441 /* paranoid */ verify(external || kernelTLS().this_processor->rdq.id < lanes.count );
442
443 bool local;
444 int preferred = external ? -1 : kernelTLS().this_processor->rdq.id;
445
446 // Try to pick a lane and lock it
447 unsigned i;
448 do {
449 // Pick the index of a lane
450 unsigned r = __tls_rand_fwd();
451 [i, local] = idx_from_r(r, preferred);
452
453 i %= __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
454
455 #if !defined(__CFA_NO_STATISTICS__)
456 if(unlikely(external)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.attempt, 1, __ATOMIC_RELAXED);
457 else if(local) __tls_stats()->ready.push.local.attempt++;
458 else __tls_stats()->ready.push.share.attempt++;
459 #endif
460
461 // If we can't lock it retry
462 } while( !__atomic_try_acquire( &lanes.data[i].lock ) );
463
464 // Actually push it
465 push(lanes.data[i], thrd);
466
467 // Unlock and return
468 __atomic_unlock( &lanes.data[i].lock );
469
470 // Mark the current index in the tls rng instance as having an item
471 __tls_rand_advance_bck();
472
473 __cfadbg_print_safe(ready_queue, "Kernel : Pushed %p on cluster %p (idx: %u, mask %llu, first %d)\n", thrd, cltr, i, used.mask[0], lane_first);
474
475 // Update statistics
476 #if !defined(__CFA_NO_STATISTICS__)
477 if(unlikely(external)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.success, 1, __ATOMIC_RELAXED);
478 else if(local) __tls_stats()->ready.push.local.success++;
479 else __tls_stats()->ready.push.share.success++;
480 #endif
481 }
482
483 // Pop from the ready queue from a given cluster
484 __attribute__((hot)) $thread * pop_fast(struct cluster * cltr) with (cltr->ready_queue) {
485 /* paranoid */ verify( lanes.count > 0 );
486 /* paranoid */ verify( kernelTLS().this_processor );
487 /* paranoid */ verify( kernelTLS().this_processor->rdq.id < lanes.count );
488
489 unsigned count = __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
490 int preferred = kernelTLS().this_processor->rdq.id;
491
492
493 // As long as the list is not empty, try finding a lane that isn't empty and pop from it
494 for(25) {
495 // Pick two lists at random
496 unsigned ri = __tls_rand_bck();
497 unsigned rj = __tls_rand_bck();
498
499 unsigned i, j;
500 __attribute__((unused)) bool locali, localj;
501 [i, locali] = idx_from_r(ri, preferred);
502 [j, localj] = idx_from_r(rj, preferred);
503
504 i %= count;
505 j %= count;
506
507 // try popping from the 2 picked lists
508 struct $thread * thrd = try_pop(cltr, i, j __STATS(, *(locali || localj ? &__tls_stats()->ready.pop.local : &__tls_stats()->ready.pop.help)));
509 if(thrd) {
510 return thrd;
511 }
512 }
513
514 // All lanes where empty return 0p
515 return 0p;
516 }
517
518 __attribute__((hot)) struct $thread * pop_slow(struct cluster * cltr) { return pop_fast(cltr); }
519 __attribute__((hot)) struct $thread * pop_search(struct cluster * cltr) {
520 return search(cltr);
521 }
522#endif
523#if defined(USE_WORK_STEALING)
524 __attribute__((hot)) void push(struct cluster * cltr, struct $thread * thrd, bool push_local) with (cltr->ready_queue) {
525 __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr);
526
527 // #define USE_PREFERRED
528 #if !defined(USE_PREFERRED)
529 const bool external = !push_local || (!kernelTLS().this_processor) || (cltr != kernelTLS().this_processor->cltr);
530 /* paranoid */ verify(external || kernelTLS().this_processor->rdq.id < lanes.count );
531 #else
532 unsigned preferred = thrd->preferred;
533 const bool external = push_local || (!kernelTLS().this_processor) || preferred == -1u || thrd->curr_cluster != cltr;
534 /* paranoid */ verifyf(external || preferred < lanes.count, "Invalid preferred queue %u for %u lanes", preferred, lanes.count );
535
536 unsigned r = preferred % READYQ_SHARD_FACTOR;
537 const unsigned start = preferred - r;
538 #endif
539
540 // Try to pick a lane and lock it
541 unsigned i;
542 do {
543 #if !defined(__CFA_NO_STATISTICS__)
544 if(unlikely(external)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.attempt, 1, __ATOMIC_RELAXED);
545 else __tls_stats()->ready.push.local.attempt++;
546 #endif
547
548 if(unlikely(external)) {
549 i = __tls_rand() % lanes.count;
550 }
551 else {
552 #if !defined(USE_PREFERRED)
553 processor * proc = kernelTLS().this_processor;
554 unsigned r = proc->rdq.its++;
555 i = proc->rdq.id + (r % READYQ_SHARD_FACTOR);
556 #else
557 i = start + (r++ % READYQ_SHARD_FACTOR);
558 #endif
559 }
560 // If we can't lock it retry
561 } while( !__atomic_try_acquire( &lanes.data[i].lock ) );
562
563 // Actually push it
564 push(lanes.data[i], thrd);
565
566 // Unlock and return
567 __atomic_unlock( &lanes.data[i].lock );
568
569 #if !defined(__CFA_NO_STATISTICS__)
570 if(unlikely(external)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.success, 1, __ATOMIC_RELAXED);
571 else __tls_stats()->ready.push.local.success++;
572 #endif
573
574 __cfadbg_print_safe(ready_queue, "Kernel : Pushed %p on cluster %p (idx: %u, mask %llu, first %d)\n", thrd, cltr, i, used.mask[0], lane_first);
575 }
576
577 // Pop from the ready queue from a given cluster
578 __attribute__((hot)) $thread * pop_fast(struct cluster * cltr) with (cltr->ready_queue) {
579 /* paranoid */ verify( lanes.count > 0 );
580 /* paranoid */ verify( kernelTLS().this_processor );
581 /* paranoid */ verify( kernelTLS().this_processor->rdq.id < lanes.count );
582
583 processor * proc = kernelTLS().this_processor;
584
585 if(proc->rdq.target == -1u) {
586 unsigned long long min = ts(lanes.data[proc->rdq.id]);
587 for(int i = 0; i < READYQ_SHARD_FACTOR; i++) {
588 unsigned long long tsc = ts(lanes.data[proc->rdq.id + i]);
589 if(tsc < min) min = tsc;
590 }
591 proc->rdq.cutoff = min;
592 proc->rdq.target = __tls_rand() % lanes.count;
593 }
594 else {
595 unsigned target = proc->rdq.target;
596 proc->rdq.target = -1u;
597 const unsigned long long bias = 0; //2_500_000_000;
598 const unsigned long long cutoff = proc->rdq.cutoff > bias ? proc->rdq.cutoff - bias : proc->rdq.cutoff;
599 if(lanes.tscs[target].tv < cutoff && ts(lanes.data[target]) < cutoff) {
600 $thread * t = try_pop(cltr, target __STATS(, __tls_stats()->ready.pop.help));
601 if(t) return t;
602 }
603 }
604
605 for(READYQ_SHARD_FACTOR) {
606 unsigned i = proc->rdq.id + (proc->rdq.itr++ % READYQ_SHARD_FACTOR);
607 if($thread * t = try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.local))) return t;
608 }
609 return 0p;
610 }
611
612 __attribute__((hot)) struct $thread * pop_slow(struct cluster * cltr) with (cltr->ready_queue) {
613 unsigned i = __tls_rand() % lanes.count;
614 return try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.steal));
615 }
616
617 __attribute__((hot)) struct $thread * pop_search(struct cluster * cltr) with (cltr->ready_queue) {
618 return search(cltr);
619 }
620#endif
621
622//=======================================================================
623// Various Ready Queue utilities
624//=======================================================================
625// these function work the same or almost the same
626// whether they are using work-stealing or relaxed fifo scheduling
627
628//-----------------------------------------------------------------------
629// try to pop from a lane given by index w
630static inline struct $thread * try_pop(struct cluster * cltr, unsigned w __STATS(, __stats_readyQ_pop_t & stats)) with (cltr->ready_queue) {
631 __STATS( stats.attempt++; )
632
633 // Get relevant elements locally
634 __intrusive_lane_t & lane = lanes.data[w];
635
636 // If list looks empty retry
637 if( is_empty(lane) ) {
638 return 0p;
639 }
640
641 // If we can't get the lock retry
642 if( !__atomic_try_acquire(&lane.lock) ) {
643 return 0p;
644 }
645
646 // If list is empty, unlock and retry
647 if( is_empty(lane) ) {
648 __atomic_unlock(&lane.lock);
649 return 0p;
650 }
651
652 // Actually pop the list
653 struct $thread * thrd;
654 unsigned long long tsv;
655 [thrd, tsv] = pop(lane);
656
657 /* paranoid */ verify(thrd);
658 /* paranoid */ verify(tsv);
659 /* paranoid */ verify(lane.lock);
660
661 // Unlock and return
662 __atomic_unlock(&lane.lock);
663
664 // Update statistics
665 __STATS( stats.success++; )
666
667 #if defined(USE_WORK_STEALING)
668 lanes.tscs[w].tv = tsv;
669 #endif
670
671 thrd->preferred = w;
672
673 // return the popped thread
674 return thrd;
675}
676
677//-----------------------------------------------------------------------
678// try to pop from any lanes making sure you don't miss any threads push
679// before the start of the function
680static inline struct $thread * search(struct cluster * cltr) with (cltr->ready_queue) {
681 /* paranoid */ verify( lanes.count > 0 );
682 unsigned count = __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
683 unsigned offset = __tls_rand();
684 for(i; count) {
685 unsigned idx = (offset + i) % count;
686 struct $thread * thrd = try_pop(cltr, idx __STATS(, __tls_stats()->ready.pop.search));
687 if(thrd) {
688 return thrd;
689 }
690 }
691
692 // All lanes where empty return 0p
693 return 0p;
694}
695
696//-----------------------------------------------------------------------
697// Check that all the intrusive queues in the data structure are still consistent
698static void check( __ready_queue_t & q ) with (q) {
699 #if defined(__CFA_WITH_VERIFY__)
700 {
701 for( idx ; lanes.count ) {
702 __intrusive_lane_t & sl = lanes.data[idx];
703 assert(!lanes.data[idx].lock);
704
705 if(is_empty(sl)) {
706 assert( sl.anchor.next == 0p );
707 assert( sl.anchor.ts == -1llu );
708 assert( mock_head(sl) == sl.prev );
709 } else {
710 assert( sl.anchor.next != 0p );
711 assert( sl.anchor.ts != -1llu );
712 assert( mock_head(sl) != sl.prev );
713 }
714 }
715 }
716 #endif
717}
718
719//-----------------------------------------------------------------------
720// Given 2 indexes, pick the list with the oldest push an try to pop from it
721static inline struct $thread * try_pop(struct cluster * cltr, unsigned i, unsigned j __STATS(, __stats_readyQ_pop_t & stats)) with (cltr->ready_queue) {
722 // Pick the bet list
723 int w = i;
724 if( __builtin_expect(!is_empty(lanes.data[j]), true) ) {
725 w = (ts(lanes.data[i]) < ts(lanes.data[j])) ? i : j;
726 }
727
728 return try_pop(cltr, w __STATS(, stats));
729}
730
731// Call this function of the intrusive list was moved using memcpy
732// fixes the list so that the pointers back to anchors aren't left dangling
733static inline void fix(__intrusive_lane_t & ll) {
734 if(is_empty(ll)) {
735 verify(ll.anchor.next == 0p);
736 ll.prev = mock_head(ll);
737 }
738}
739
740static void assign_list(unsigned & value, dlist(processor) & list, unsigned count) {
741 processor * it = &list`first;
742 for(unsigned i = 0; i < count; i++) {
743 /* paranoid */ verifyf( it, "Unexpected null iterator, at index %u of %u\n", i, count);
744 it->rdq.id = value;
745 it->rdq.target = -1u;
746 value += READYQ_SHARD_FACTOR;
747 it = &(*it)`next;
748 }
749}
750
751static void reassign_cltr_id(struct cluster * cltr) {
752 unsigned preferred = 0;
753 assign_list(preferred, cltr->procs.actives, cltr->procs.total - cltr->procs.idle);
754 assign_list(preferred, cltr->procs.idles , cltr->procs.idle );
755}
756
757static void fix_times( struct cluster * cltr ) with( cltr->ready_queue ) {
758 #if defined(USE_WORK_STEALING)
759 lanes.tscs = alloc(lanes.count, lanes.tscs`realloc);
760 for(i; lanes.count) {
761 unsigned long long tsc1 = ts(lanes.data[i]);
762 unsigned long long tsc2 = rdtscl();
763 lanes.tscs[i].tv = min(tsc1, tsc2);
764 }
765 #endif
766}
767
768#if defined(USE_CPU_WORK_STEALING)
769 // ready_queue size is fixed in this case
770 void ready_queue_grow(struct cluster * cltr) {}
771 void ready_queue_shrink(struct cluster * cltr) {}
772#else
773 // Grow the ready queue
774 void ready_queue_grow(struct cluster * cltr) {
775 size_t ncount;
776 int target = cltr->procs.total;
777
778 /* paranoid */ verify( ready_mutate_islocked() );
779 __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue\n");
780
781 // Make sure that everything is consistent
782 /* paranoid */ check( cltr->ready_queue );
783
784 // grow the ready queue
785 with( cltr->ready_queue ) {
786 // Find new count
787 // Make sure we always have atleast 1 list
788 if(target >= 2) {
789 ncount = target * READYQ_SHARD_FACTOR;
790 } else {
791 ncount = SEQUENTIAL_SHARD;
792 }
793
794 // Allocate new array (uses realloc and memcpies the data)
795 lanes.data = alloc( ncount, lanes.data`realloc );
796
797 // Fix the moved data
798 for( idx; (size_t)lanes.count ) {
799 fix(lanes.data[idx]);
800 }
801
802 // Construct new data
803 for( idx; (size_t)lanes.count ~ ncount) {
804 (lanes.data[idx]){};
805 }
806
807 // Update original
808 lanes.count = ncount;
809 }
810
811 fix_times(cltr);
812
813 reassign_cltr_id(cltr);
814
815 // Make sure that everything is consistent
816 /* paranoid */ check( cltr->ready_queue );
817
818 __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue done\n");
819
820 /* paranoid */ verify( ready_mutate_islocked() );
821 }
822
823 // Shrink the ready queue
824 void ready_queue_shrink(struct cluster * cltr) {
825 /* paranoid */ verify( ready_mutate_islocked() );
826 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue\n");
827
828 // Make sure that everything is consistent
829 /* paranoid */ check( cltr->ready_queue );
830
831 int target = cltr->procs.total;
832
833 with( cltr->ready_queue ) {
834 // Remember old count
835 size_t ocount = lanes.count;
836
837 // Find new count
838 // Make sure we always have atleast 1 list
839 lanes.count = target >= 2 ? target * READYQ_SHARD_FACTOR: SEQUENTIAL_SHARD;
840 /* paranoid */ verify( ocount >= lanes.count );
841 /* paranoid */ verify( lanes.count == target * READYQ_SHARD_FACTOR || target < 2 );
842
843 // for printing count the number of displaced threads
844 #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__)
845 __attribute__((unused)) size_t displaced = 0;
846 #endif
847
848 // redistribute old data
849 for( idx; (size_t)lanes.count ~ ocount) {
850 // Lock is not strictly needed but makes checking invariants much easier
851 __attribute__((unused)) bool locked = __atomic_try_acquire(&lanes.data[idx].lock);
852 verify(locked);
853
854 // As long as we can pop from this lane to push the threads somewhere else in the queue
855 while(!is_empty(lanes.data[idx])) {
856 struct $thread * thrd;
857 unsigned long long _;
858 [thrd, _] = pop(lanes.data[idx]);
859
860 push(cltr, thrd, true);
861
862 // for printing count the number of displaced threads
863 #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__)
864 displaced++;
865 #endif
866 }
867
868 // Unlock the lane
869 __atomic_unlock(&lanes.data[idx].lock);
870
871 // TODO print the queue statistics here
872
873 ^(lanes.data[idx]){};
874 }
875
876 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue displaced %zu threads\n", displaced);
877
878 // Allocate new array (uses realloc and memcpies the data)
879 lanes.data = alloc( lanes.count, lanes.data`realloc );
880
881 // Fix the moved data
882 for( idx; (size_t)lanes.count ) {
883 fix(lanes.data[idx]);
884 }
885 }
886
887 fix_times(cltr);
888
889 reassign_cltr_id(cltr);
890
891 // Make sure that everything is consistent
892 /* paranoid */ check( cltr->ready_queue );
893
894 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue done\n");
895 /* paranoid */ verify( ready_mutate_islocked() );
896 }
897#endif
898
899#if !defined(__CFA_NO_STATISTICS__)
900 unsigned cnt(const __ready_queue_t & this, unsigned idx) {
901 /* paranoid */ verify(this.lanes.count > idx);
902 return this.lanes.data[idx].cnt;
903 }
904#endif
905
906
907#if defined(CFA_HAVE_LINUX_LIBRSEQ)
908 // No definition needed
909#elif defined(CFA_HAVE_LINUX_RSEQ_H)
910
911 #if defined( __x86_64 ) || defined( __i386 )
912 #define RSEQ_SIG 0x53053053
913 #elif defined( __ARM_ARCH )
914 #ifdef __ARMEB__
915 #define RSEQ_SIG 0xf3def5e7 /* udf #24035 ; 0x5de3 (ARMv6+) */
916 #else
917 #define RSEQ_SIG 0xe7f5def3 /* udf #24035 ; 0x5de3 */
918 #endif
919 #endif
920
921 extern void __disable_interrupts_hard();
922 extern void __enable_interrupts_hard();
923
924 void __kernel_raw_rseq_register (void) {
925 /* paranoid */ verify( __cfaabi_rseq.cpu_id == RSEQ_CPU_ID_UNINITIALIZED );
926
927 // int ret = syscall(__NR_rseq, &__cfaabi_rseq, sizeof(struct rseq), 0, (sigset_t *)0p, _NSIG / 8);
928 int ret = syscall(__NR_rseq, &__cfaabi_rseq, sizeof(struct rseq), 0, RSEQ_SIG);
929 if(ret != 0) {
930 int e = errno;
931 switch(e) {
932 case EINVAL: abort("KERNEL ERROR: rseq register invalid argument");
933 case ENOSYS: abort("KERNEL ERROR: rseq register no supported");
934 case EFAULT: abort("KERNEL ERROR: rseq register with invalid argument");
935 case EBUSY : abort("KERNEL ERROR: rseq register already registered");
936 case EPERM : abort("KERNEL ERROR: rseq register sig argument on unregistration does not match the signature received on registration");
937 default: abort("KERNEL ERROR: rseq register unexpected return %d", e);
938 }
939 }
940 }
941
942 void __kernel_raw_rseq_unregister(void) {
943 /* paranoid */ verify( __cfaabi_rseq.cpu_id >= 0 );
944
945 // int ret = syscall(__NR_rseq, &__cfaabi_rseq, sizeof(struct rseq), RSEQ_FLAG_UNREGISTER, (sigset_t *)0p, _NSIG / 8);
946 int ret = syscall(__NR_rseq, &__cfaabi_rseq, sizeof(struct rseq), RSEQ_FLAG_UNREGISTER, RSEQ_SIG);
947 if(ret != 0) {
948 int e = errno;
949 switch(e) {
950 case EINVAL: abort("KERNEL ERROR: rseq unregister invalid argument");
951 case ENOSYS: abort("KERNEL ERROR: rseq unregister no supported");
952 case EFAULT: abort("KERNEL ERROR: rseq unregister with invalid argument");
953 case EBUSY : abort("KERNEL ERROR: rseq unregister already registered");
954 case EPERM : abort("KERNEL ERROR: rseq unregister sig argument on unregistration does not match the signature received on registration");
955 default: abort("KERNEL ERROR: rseq unregisteunexpected return %d", e);
956 }
957 }
958 }
959#else
960 // No definition needed
961#endif
Note: See TracBrowser for help on using the repository browser.