source: libcfa/src/concurrency/ready_queue.cfa@ b2fc7ad9

ADT arm-eh ast-experimental enum forall-pointer-decay jacob/cs343-translation new-ast-unique-expr pthread-emulation qualifiedEnum
Last change on this file since b2fc7ad9 was b2fc7ad9, checked in by Thierry Delisle <tdelisle@…>, 4 years ago

Fixed missing return.
Was incorrectly removed in a33c11376e88fecef869e2f63e32b80f9410edc8

  • Property mode set to 100644
File size: 22.4 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2019 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// ready_queue.cfa --
8//
9// Author : Thierry Delisle
10// Created On : Mon Nov dd 16:29:18 2019
11// Last Modified By :
12// Last Modified On :
13// Update Count :
14//
15
16#define __cforall_thread__
17// #define __CFA_DEBUG_PRINT_READY_QUEUE__
18
19// #define USE_MPSC
20
21#define USE_RELAXED_FIFO
22// #define USE_WORK_STEALING
23
24#include "bits/defs.hfa"
25#include "kernel_private.hfa"
26
27#define _GNU_SOURCE
28#include "stdlib.hfa"
29#include "math.hfa"
30
31#include <unistd.h>
32
33#include "ready_subqueue.hfa"
34
35static const size_t cache_line_size = 64;
36
37#if !defined(__CFA_NO_STATISTICS__)
38 #define __STATS(...) __VA_ARGS__
39#else
40 #define __STATS(...)
41#endif
42
43// No overriden function, no environment variable, no define
44// fall back to a magic number
45#ifndef __CFA_MAX_PROCESSORS__
46 #define __CFA_MAX_PROCESSORS__ 1024
47#endif
48
49#if defined(USE_RELAXED_FIFO)
50 #define BIAS 4
51 #define READYQ_SHARD_FACTOR 4
52 #define SEQUENTIAL_SHARD 1
53#elif defined(USE_WORK_STEALING)
54 #define READYQ_SHARD_FACTOR 2
55 #define SEQUENTIAL_SHARD 2
56#else
57 #error no scheduling strategy selected
58#endif
59
60static inline struct $thread * try_pop(struct cluster * cltr, unsigned w __STATS(, __stats_readyQ_pop_t & stats));
61static inline struct $thread * try_pop(struct cluster * cltr, unsigned i, unsigned j __STATS(, __stats_readyQ_pop_t & stats));
62static inline struct $thread * search(struct cluster * cltr);
63static inline [unsigned, bool] idx_from_r(unsigned r, unsigned preferred);
64
65
66// returns the maximum number of processors the RWLock support
67__attribute__((weak)) unsigned __max_processors() {
68 const char * max_cores_s = getenv("CFA_MAX_PROCESSORS");
69 if(!max_cores_s) {
70 __cfadbg_print_nolock(ready_queue, "No CFA_MAX_PROCESSORS in ENV\n");
71 return __CFA_MAX_PROCESSORS__;
72 }
73
74 char * endptr = 0p;
75 long int max_cores_l = strtol(max_cores_s, &endptr, 10);
76 if(max_cores_l < 1 || max_cores_l > 65535) {
77 __cfadbg_print_nolock(ready_queue, "CFA_MAX_PROCESSORS out of range : %ld\n", max_cores_l);
78 return __CFA_MAX_PROCESSORS__;
79 }
80 if('\0' != *endptr) {
81 __cfadbg_print_nolock(ready_queue, "CFA_MAX_PROCESSORS not a decimal number : %s\n", max_cores_s);
82 return __CFA_MAX_PROCESSORS__;
83 }
84
85 return max_cores_l;
86}
87
88//=======================================================================
89// Cluster wide reader-writer lock
90//=======================================================================
91void ?{}(__scheduler_RWLock_t & this) {
92 this.max = __max_processors();
93 this.alloc = 0;
94 this.ready = 0;
95 this.lock = false;
96 this.data = alloc(this.max);
97
98 /*paranoid*/ verify( 0 == (((uintptr_t)(this.data )) % 64) );
99 /*paranoid*/ verify( 0 == (((uintptr_t)(this.data + 1)) % 64) );
100 /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.alloc), &this.alloc));
101 /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.ready), &this.ready));
102
103}
104void ^?{}(__scheduler_RWLock_t & this) {
105 free(this.data);
106}
107
108void ?{}( __scheduler_lock_id_t & this, __processor_id_t * proc ) {
109 this.handle = proc;
110 this.lock = false;
111 #ifdef __CFA_WITH_VERIFY__
112 this.owned = false;
113 #endif
114}
115
116//=======================================================================
117// Lock-Free registering/unregistering of threads
118void register_proc_id( struct __processor_id_t * proc ) with(*__scheduler_lock) {
119 __cfadbg_print_safe(ready_queue, "Kernel : Registering proc %p for RW-Lock\n", proc);
120
121 // Step - 1 : check if there is already space in the data
122 uint_fast32_t s = ready;
123
124 // Check among all the ready
125 for(uint_fast32_t i = 0; i < s; i++) {
126 __processor_id_t * null = 0p; // Re-write every loop since compare thrashes it
127 if( __atomic_load_n(&data[i].handle, (int)__ATOMIC_RELAXED) == null
128 && __atomic_compare_exchange_n( &data[i].handle, &null, proc, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
129 /*paranoid*/ verify(i < ready);
130 /*paranoid*/ verify(0 == (__alignof__(data[i]) % cache_line_size));
131 /*paranoid*/ verify((((uintptr_t)&data[i]) % cache_line_size) == 0);
132 proc->id = i;
133 return;
134 }
135 }
136
137 if(max <= alloc) abort("Trying to create more than %ud processors", __scheduler_lock->max);
138
139 // Step - 2 : F&A to get a new spot in the array.
140 uint_fast32_t n = __atomic_fetch_add(&alloc, 1, __ATOMIC_SEQ_CST);
141 if(max <= n) abort("Trying to create more than %ud processors", __scheduler_lock->max);
142
143 // Step - 3 : Mark space as used and then publish it.
144 __scheduler_lock_id_t * storage = (__scheduler_lock_id_t *)&data[n];
145 (*storage){ proc };
146 while() {
147 unsigned copy = n;
148 if( __atomic_load_n(&ready, __ATOMIC_RELAXED) == n
149 && __atomic_compare_exchange_n(&ready, &copy, n + 1, true, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
150 break;
151 Pause();
152 }
153
154 __cfadbg_print_safe(ready_queue, "Kernel : Registering proc %p done, id %lu\n", proc, n);
155
156 // Return new spot.
157 /*paranoid*/ verify(n < ready);
158 /*paranoid*/ verify(__alignof__(data[n]) == (2 * cache_line_size));
159 /*paranoid*/ verify((((uintptr_t)&data[n]) % cache_line_size) == 0);
160 proc->id = n;
161}
162
163void unregister_proc_id( struct __processor_id_t * proc ) with(*__scheduler_lock) {
164 unsigned id = proc->id;
165 /*paranoid*/ verify(id < ready);
166 /*paranoid*/ verify(proc == __atomic_load_n(&data[id].handle, __ATOMIC_RELAXED));
167 __atomic_store_n(&data[id].handle, 0p, __ATOMIC_RELEASE);
168
169 __cfadbg_print_safe(ready_queue, "Kernel : Unregister proc %p\n", proc);
170}
171
172//-----------------------------------------------------------------------
173// Writer side : acquire when changing the ready queue, e.g. adding more
174// queues or removing them.
175uint_fast32_t ready_mutate_lock( void ) with(*__scheduler_lock) {
176 /* paranoid */ verify( ! __preemption_enabled() );
177
178 // Step 1 : lock global lock
179 // It is needed to avoid processors that register mid Critical-Section
180 // to simply lock their own lock and enter.
181 __atomic_acquire( &lock );
182
183 // Step 2 : lock per-proc lock
184 // Processors that are currently being registered aren't counted
185 // but can't be in read_lock or in the critical section.
186 // All other processors are counted
187 uint_fast32_t s = ready;
188 for(uint_fast32_t i = 0; i < s; i++) {
189 __atomic_acquire( &data[i].lock );
190 }
191
192 /* paranoid */ verify( ! __preemption_enabled() );
193 return s;
194}
195
196void ready_mutate_unlock( uint_fast32_t last_s ) with(*__scheduler_lock) {
197 /* paranoid */ verify( ! __preemption_enabled() );
198
199 // Step 1 : release local locks
200 // This must be done while the global lock is held to avoid
201 // threads that where created mid critical section
202 // to race to lock their local locks and have the writer
203 // immidiately unlock them
204 // Alternative solution : return s in write_lock and pass it to write_unlock
205 for(uint_fast32_t i = 0; i < last_s; i++) {
206 verify(data[i].lock);
207 __atomic_store_n(&data[i].lock, (bool)false, __ATOMIC_RELEASE);
208 }
209
210 // Step 2 : release global lock
211 /*paranoid*/ assert(true == lock);
212 __atomic_store_n(&lock, (bool)false, __ATOMIC_RELEASE);
213
214 /* paranoid */ verify( ! __preemption_enabled() );
215}
216
217//=======================================================================
218// Cforall Ready Queue used for scheduling
219//=======================================================================
220void ?{}(__ready_queue_t & this) with (this) {
221 lanes.data = 0p;
222 lanes.tscs = 0p;
223 lanes.count = 0;
224}
225
226void ^?{}(__ready_queue_t & this) with (this) {
227 verify( SEQUENTIAL_SHARD == lanes.count );
228 free(lanes.data);
229 free(lanes.tscs);
230}
231
232//-----------------------------------------------------------------------
233#if defined(USE_RELAXED_FIFO)
234 //-----------------------------------------------------------------------
235 // get index from random number with or without bias towards queues
236 static inline [unsigned, bool] idx_from_r(unsigned r, unsigned preferred) {
237 unsigned i;
238 bool local;
239 unsigned rlow = r % BIAS;
240 unsigned rhigh = r / BIAS;
241 if((0 != rlow) && preferred >= 0) {
242 // (BIAS - 1) out of BIAS chances
243 // Use perferred queues
244 i = preferred + (rhigh % READYQ_SHARD_FACTOR);
245 local = true;
246 }
247 else {
248 // 1 out of BIAS chances
249 // Use all queues
250 i = rhigh;
251 local = false;
252 }
253 return [i, local];
254 }
255
256 __attribute__((hot)) void push(struct cluster * cltr, struct $thread * thrd) with (cltr->ready_queue) {
257 __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr);
258
259 const bool external = (!kernelTLS().this_processor) || (cltr != kernelTLS().this_processor->cltr);
260 /* paranoid */ verify(external || kernelTLS().this_processor->rdq.id < lanes.count );
261
262 // write timestamp
263 thrd->link.ts = rdtscl();
264
265 bool local;
266 int preferred = external ? -1 : kernelTLS().this_processor->rdq.id;
267
268 // Try to pick a lane and lock it
269 unsigned i;
270 do {
271 // Pick the index of a lane
272 unsigned r = __tls_rand_fwd();
273 [i, local] = idx_from_r(r, preferred);
274
275 i %= __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
276
277 #if !defined(__CFA_NO_STATISTICS__)
278 if(unlikely(external)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.attempt, 1, __ATOMIC_RELAXED);
279 else if(local) __tls_stats()->ready.push.local.attempt++;
280 else __tls_stats()->ready.push.share.attempt++;
281 #endif
282
283 #if defined(USE_MPSC)
284 // mpsc always succeeds
285 } while( false );
286 #else
287 // If we can't lock it retry
288 } while( !__atomic_try_acquire( &lanes.data[i].lock ) );
289 #endif
290
291 // Actually push it
292 push(lanes.data[i], thrd);
293
294 #if !defined(USE_MPSC)
295 // Unlock and return
296 __atomic_unlock( &lanes.data[i].lock );
297 #endif
298
299 // Mark the current index in the tls rng instance as having an item
300 __tls_rand_advance_bck();
301
302 __cfadbg_print_safe(ready_queue, "Kernel : Pushed %p on cluster %p (idx: %u, mask %llu, first %d)\n", thrd, cltr, i, used.mask[0], lane_first);
303
304 // Update statistics
305 #if !defined(__CFA_NO_STATISTICS__)
306 if(unlikely(external)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.success, 1, __ATOMIC_RELAXED);
307 else if(local) __tls_stats()->ready.push.local.success++;
308 else __tls_stats()->ready.push.share.success++;
309 #endif
310 }
311
312 // Pop from the ready queue from a given cluster
313 __attribute__((hot)) $thread * pop_fast(struct cluster * cltr) with (cltr->ready_queue) {
314 /* paranoid */ verify( lanes.count > 0 );
315 /* paranoid */ verify( kernelTLS().this_processor );
316 /* paranoid */ verify( kernelTLS().this_processor->rdq.id < lanes.count );
317
318 unsigned count = __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
319 int preferred = kernelTLS().this_processor->rdq.id;
320
321
322 // As long as the list is not empty, try finding a lane that isn't empty and pop from it
323 for(25) {
324 // Pick two lists at random
325 unsigned ri = __tls_rand_bck();
326 unsigned rj = __tls_rand_bck();
327
328 unsigned i, j;
329 __attribute__((unused)) bool locali, localj;
330 [i, locali] = idx_from_r(ri, preferred);
331 [j, localj] = idx_from_r(rj, preferred);
332
333 i %= count;
334 j %= count;
335
336 // try popping from the 2 picked lists
337 struct $thread * thrd = try_pop(cltr, i, j __STATS(, *(locali || localj ? &__tls_stats()->ready.pop.local : &__tls_stats()->ready.pop.help)));
338 if(thrd) {
339 return thrd;
340 }
341 }
342
343 // All lanes where empty return 0p
344 return 0p;
345 }
346
347 __attribute__((hot)) struct $thread * pop_slow(struct cluster * cltr) { return pop_fast(cltr); }
348 __attribute__((hot)) struct $thread * pop_search(struct cluster * cltr) {
349 return search(cltr);
350 }
351#endif
352#if defined(USE_WORK_STEALING)
353 __attribute__((hot)) void push(struct cluster * cltr, struct $thread * thrd) with (cltr->ready_queue) {
354 __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr);
355
356 const bool external = (!kernelTLS().this_processor) || (cltr != kernelTLS().this_processor->cltr);
357 /* paranoid */ verify(external || kernelTLS().this_processor->rdq.id < lanes.count );
358
359 // write timestamp
360 thrd->link.ts = rdtscl();
361
362 // Try to pick a lane and lock it
363 unsigned i;
364 do {
365 #if !defined(__CFA_NO_STATISTICS__)
366 if(unlikely(external)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.attempt, 1, __ATOMIC_RELAXED);
367 else __tls_stats()->ready.push.local.attempt++;
368 #endif
369
370 if(unlikely(external)) {
371 i = __tls_rand() % lanes.count;
372 }
373 else {
374 processor * proc = kernelTLS().this_processor;
375 unsigned r = proc->rdq.its++;
376 i = proc->rdq.id + (r % READYQ_SHARD_FACTOR);
377 }
378
379
380 #if defined(USE_MPSC)
381 // mpsc always succeeds
382 } while( false );
383 #else
384 // If we can't lock it retry
385 } while( !__atomic_try_acquire( &lanes.data[i].lock ) );
386 #endif
387
388 // Actually push it
389 push(lanes.data[i], thrd);
390
391 #if !defined(USE_MPSC)
392 // Unlock and return
393 __atomic_unlock( &lanes.data[i].lock );
394 #endif
395
396 #if !defined(__CFA_NO_STATISTICS__)
397 if(unlikely(external)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.success, 1, __ATOMIC_RELAXED);
398 else __tls_stats()->ready.push.local.success++;
399 #endif
400
401 __cfadbg_print_safe(ready_queue, "Kernel : Pushed %p on cluster %p (idx: %u, mask %llu, first %d)\n", thrd, cltr, i, used.mask[0], lane_first);
402 }
403
404 // Pop from the ready queue from a given cluster
405 __attribute__((hot)) $thread * pop_fast(struct cluster * cltr) with (cltr->ready_queue) {
406 /* paranoid */ verify( lanes.count > 0 );
407 /* paranoid */ verify( kernelTLS().this_processor );
408 /* paranoid */ verify( kernelTLS().this_processor->rdq.id < lanes.count );
409
410 processor * proc = kernelTLS().this_processor;
411
412 if(proc->rdq.target == -1u) {
413 proc->rdq.target = __tls_rand() % lanes.count;
414 unsigned it1 = proc->rdq.itr;
415 unsigned it2 = proc->rdq.itr + 1;
416 unsigned idx1 = proc->rdq.id + (it1 % READYQ_SHARD_FACTOR);
417 unsigned idx2 = proc->rdq.id + (it2 % READYQ_SHARD_FACTOR);
418 unsigned long long tsc1 = ts(lanes.data[idx1]);
419 unsigned long long tsc2 = ts(lanes.data[idx2]);
420 proc->rdq.cutoff = min(tsc1, tsc2);
421 if(proc->rdq.cutoff == 0) proc->rdq.cutoff = -1ull;
422 }
423 else {
424 unsigned target = proc->rdq.target;
425 proc->rdq.target = -1u;
426 if(lanes.tscs[target].tv < proc->rdq.cutoff) {
427 $thread * t = try_pop(cltr, target __STATS(, __tls_stats()->ready.pop.help));
428 if(t) return t;
429 }
430 }
431
432 for(READYQ_SHARD_FACTOR) {
433 unsigned i = proc->rdq.id + (--proc->rdq.itr % READYQ_SHARD_FACTOR);
434 if($thread * t = try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.local))) return t;
435 }
436 return 0p;
437 }
438
439 __attribute__((hot)) struct $thread * pop_slow(struct cluster * cltr) with (cltr->ready_queue) {
440 unsigned i = __tls_rand() % lanes.count;
441 return try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.steal));
442 }
443
444 __attribute__((hot)) struct $thread * pop_search(struct cluster * cltr) with (cltr->ready_queue) {
445 return search(cltr);
446 }
447#endif
448
449//=======================================================================
450// Various Ready Queue utilities
451//=======================================================================
452// these function work the same or almost the same
453// whether they are using work-stealing or relaxed fifo scheduling
454
455//-----------------------------------------------------------------------
456// try to pop from a lane given by index w
457static inline struct $thread * try_pop(struct cluster * cltr, unsigned w __STATS(, __stats_readyQ_pop_t & stats)) with (cltr->ready_queue) {
458 __STATS( stats.attempt++; )
459
460 // Get relevant elements locally
461 __intrusive_lane_t & lane = lanes.data[w];
462
463 // If list looks empty retry
464 if( is_empty(lane) ) {
465 __STATS( stats.espec++; )
466 return 0p;
467 }
468
469 // If we can't get the lock retry
470 if( !__atomic_try_acquire(&lane.lock) ) {
471 __STATS( stats.elock++; )
472 return 0p;
473 }
474
475 // If list is empty, unlock and retry
476 if( is_empty(lane) ) {
477 __atomic_unlock(&lane.lock);
478 __STATS( stats.eempty++; )
479 return 0p;
480 }
481
482 // Actually pop the list
483 struct $thread * thrd;
484 thrd = pop(lane);
485
486 /* paranoid */ verify(thrd);
487 /* paranoid */ verify(lane.lock);
488
489 // Unlock and return
490 __atomic_unlock(&lane.lock);
491
492 // Update statistics
493 __STATS( stats.success++; )
494
495 #if defined(USE_WORK_STEALING)
496 lanes.tscs[w].tv = thrd->link.ts;
497 #endif
498
499 // return the popped thread
500 return thrd;
501}
502
503//-----------------------------------------------------------------------
504// try to pop from any lanes making sure you don't miss any threads push
505// before the start of the function
506static inline struct $thread * search(struct cluster * cltr) with (cltr->ready_queue) {
507 /* paranoid */ verify( lanes.count > 0 );
508 unsigned count = __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
509 unsigned offset = __tls_rand();
510 for(i; count) {
511 unsigned idx = (offset + i) % count;
512 struct $thread * thrd = try_pop(cltr, idx __STATS(, __tls_stats()->ready.pop.search));
513 if(thrd) {
514 return thrd;
515 }
516 }
517
518 // All lanes where empty return 0p
519 return 0p;
520}
521
522//-----------------------------------------------------------------------
523// Check that all the intrusive queues in the data structure are still consistent
524static void check( __ready_queue_t & q ) with (q) {
525 #if defined(__CFA_WITH_VERIFY__) && !defined(USE_MPSC)
526 {
527 for( idx ; lanes.count ) {
528 __intrusive_lane_t & sl = lanes.data[idx];
529 assert(!lanes.data[idx].lock);
530
531 assert(head(sl)->link.prev == 0p );
532 assert(head(sl)->link.next->link.prev == head(sl) );
533 assert(tail(sl)->link.next == 0p );
534 assert(tail(sl)->link.prev->link.next == tail(sl) );
535
536 if(is_empty(sl)) {
537 assert(tail(sl)->link.prev == head(sl));
538 assert(head(sl)->link.next == tail(sl));
539 } else {
540 assert(tail(sl)->link.prev != head(sl));
541 assert(head(sl)->link.next != tail(sl));
542 }
543 }
544 }
545 #endif
546}
547
548//-----------------------------------------------------------------------
549// Given 2 indexes, pick the list with the oldest push an try to pop from it
550static inline struct $thread * try_pop(struct cluster * cltr, unsigned i, unsigned j __STATS(, __stats_readyQ_pop_t & stats)) with (cltr->ready_queue) {
551 // Pick the bet list
552 int w = i;
553 if( __builtin_expect(!is_empty(lanes.data[j]), true) ) {
554 w = (ts(lanes.data[i]) < ts(lanes.data[j])) ? i : j;
555 }
556
557 return try_pop(cltr, w __STATS(, stats));
558}
559
560// Call this function of the intrusive list was moved using memcpy
561// fixes the list so that the pointers back to anchors aren't left dangling
562static inline void fix(__intrusive_lane_t & ll) {
563 #if !defined(USE_MPSC)
564 // if the list is not empty then follow he pointer and fix its reverse
565 if(!is_empty(ll)) {
566 head(ll)->link.next->link.prev = head(ll);
567 tail(ll)->link.prev->link.next = tail(ll);
568 }
569 // Otherwise just reset the list
570 else {
571 verify(tail(ll)->link.next == 0p);
572 tail(ll)->link.prev = head(ll);
573 head(ll)->link.next = tail(ll);
574 verify(head(ll)->link.prev == 0p);
575 }
576 #endif
577}
578
579static void assign_list(unsigned & value, dlist(processor, processor) & list, unsigned count) {
580 processor * it = &list`first;
581 for(unsigned i = 0; i < count; i++) {
582 /* paranoid */ verifyf( it, "Unexpected null iterator, at index %u of %u\n", i, count);
583 it->rdq.id = value;
584 it->rdq.target = -1u;
585 value += READYQ_SHARD_FACTOR;
586 it = &(*it)`next;
587 }
588}
589
590static void reassign_cltr_id(struct cluster * cltr) {
591 unsigned preferred = 0;
592 assign_list(preferred, cltr->procs.actives, cltr->procs.total - cltr->procs.idle);
593 assign_list(preferred, cltr->procs.idles , cltr->procs.idle );
594}
595
596static void fix_times( struct cluster * cltr ) with( cltr->ready_queue ) {
597 #if defined(USE_WORK_STEALING)
598 lanes.tscs = alloc(lanes.count, lanes.tscs`realloc);
599 for(i; lanes.count) {
600 lanes.tscs[i].tv = ts(lanes.data[i]);
601 }
602 #endif
603}
604
605// Grow the ready queue
606void ready_queue_grow(struct cluster * cltr) {
607 size_t ncount;
608 int target = cltr->procs.total;
609
610 /* paranoid */ verify( ready_mutate_islocked() );
611 __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue\n");
612
613 // Make sure that everything is consistent
614 /* paranoid */ check( cltr->ready_queue );
615
616 // grow the ready queue
617 with( cltr->ready_queue ) {
618 // Find new count
619 // Make sure we always have atleast 1 list
620 if(target >= 2) {
621 ncount = target * READYQ_SHARD_FACTOR;
622 } else {
623 ncount = SEQUENTIAL_SHARD;
624 }
625
626 // Allocate new array (uses realloc and memcpies the data)
627 lanes.data = alloc( ncount, lanes.data`realloc );
628
629 // Fix the moved data
630 for( idx; (size_t)lanes.count ) {
631 fix(lanes.data[idx]);
632 }
633
634 // Construct new data
635 for( idx; (size_t)lanes.count ~ ncount) {
636 (lanes.data[idx]){};
637 }
638
639 // Update original
640 lanes.count = ncount;
641 }
642
643 fix_times(cltr);
644
645 reassign_cltr_id(cltr);
646
647 // Make sure that everything is consistent
648 /* paranoid */ check( cltr->ready_queue );
649
650 __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue done\n");
651
652 /* paranoid */ verify( ready_mutate_islocked() );
653}
654
655// Shrink the ready queue
656void ready_queue_shrink(struct cluster * cltr) {
657 /* paranoid */ verify( ready_mutate_islocked() );
658 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue\n");
659
660 // Make sure that everything is consistent
661 /* paranoid */ check( cltr->ready_queue );
662
663 int target = cltr->procs.total;
664
665 with( cltr->ready_queue ) {
666 // Remember old count
667 size_t ocount = lanes.count;
668
669 // Find new count
670 // Make sure we always have atleast 1 list
671 lanes.count = target >= 2 ? target * READYQ_SHARD_FACTOR: SEQUENTIAL_SHARD;
672 /* paranoid */ verify( ocount >= lanes.count );
673 /* paranoid */ verify( lanes.count == target * READYQ_SHARD_FACTOR || target < 2 );
674
675 // for printing count the number of displaced threads
676 #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__)
677 __attribute__((unused)) size_t displaced = 0;
678 #endif
679
680 // redistribute old data
681 for( idx; (size_t)lanes.count ~ ocount) {
682 // Lock is not strictly needed but makes checking invariants much easier
683 __attribute__((unused)) bool locked = __atomic_try_acquire(&lanes.data[idx].lock);
684 verify(locked);
685
686 // As long as we can pop from this lane to push the threads somewhere else in the queue
687 while(!is_empty(lanes.data[idx])) {
688 struct $thread * thrd;
689 thrd = pop(lanes.data[idx]);
690
691 push(cltr, thrd);
692
693 // for printing count the number of displaced threads
694 #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__)
695 displaced++;
696 #endif
697 }
698
699 // Unlock the lane
700 __atomic_unlock(&lanes.data[idx].lock);
701
702 // TODO print the queue statistics here
703
704 ^(lanes.data[idx]){};
705 }
706
707 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue displaced %zu threads\n", displaced);
708
709 // Allocate new array (uses realloc and memcpies the data)
710 lanes.data = alloc( lanes.count, lanes.data`realloc );
711
712 // Fix the moved data
713 for( idx; (size_t)lanes.count ) {
714 fix(lanes.data[idx]);
715 }
716 }
717
718 fix_times(cltr);
719
720 reassign_cltr_id(cltr);
721
722 // Make sure that everything is consistent
723 /* paranoid */ check( cltr->ready_queue );
724
725 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue done\n");
726 /* paranoid */ verify( ready_mutate_islocked() );
727}
Note: See TracBrowser for help on using the repository browser.