source: libcfa/src/concurrency/ready_queue.cfa@ 6c5d92f

ADT arm-eh ast-experimental enum forall-pointer-decay jacob/cs343-translation new-ast-unique-expr pthread-emulation qualifiedEnum
Last change on this file since 6c5d92f was d2fadeb, checked in by Thierry Delisle <tdelisle@…>, 4 years ago

Changed stats to make sense with relaxed fifo and work stealing

  • Property mode set to 100644
File size: 22.2 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2019 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// ready_queue.cfa --
8//
9// Author : Thierry Delisle
10// Created On : Mon Nov dd 16:29:18 2019
11// Last Modified By :
12// Last Modified On :
13// Update Count :
14//
15
16#define __cforall_thread__
17// #define __CFA_DEBUG_PRINT_READY_QUEUE__
18
19// #define USE_MPSC
20
21#define USE_RELAXED_FIFO
22// #define USE_WORK_STEALING
23
24#include "bits/defs.hfa"
25#include "kernel_private.hfa"
26
27#define _GNU_SOURCE
28#include "stdlib.hfa"
29#include "math.hfa"
30
31#include <unistd.h>
32
33#include "ready_subqueue.hfa"
34
35static const size_t cache_line_size = 64;
36
37#if !defined(__CFA_NO_STATISTICS__)
38 #define __STATS(...) __VA_ARGS__
39#else
40 #define __STATS(...)
41#endif
42
43// No overriden function, no environment variable, no define
44// fall back to a magic number
45#ifndef __CFA_MAX_PROCESSORS__
46 #define __CFA_MAX_PROCESSORS__ 1024
47#endif
48
49#if defined(USE_RELAXED_FIFO)
50 #define BIAS 4
51 #define READYQ_SHARD_FACTOR 4
52 #define SEQUENTIAL_SHARD 1
53#elif defined(USE_WORK_STEALING)
54 #define READYQ_SHARD_FACTOR 2
55 #define SEQUENTIAL_SHARD 2
56#else
57 #error no scheduling strategy selected
58#endif
59
60static inline struct $thread * try_pop(struct cluster * cltr, unsigned w __STATS(, __stats_readyQ_pop_t & stats));
61static inline struct $thread * try_pop(struct cluster * cltr, unsigned i, unsigned j __STATS(, __stats_readyQ_pop_t & stats));
62static inline struct $thread * search(struct cluster * cltr);
63static inline [unsigned, bool] idx_from_r(unsigned r, unsigned preferred);
64
65
66// returns the maximum number of processors the RWLock support
67__attribute__((weak)) unsigned __max_processors() {
68 const char * max_cores_s = getenv("CFA_MAX_PROCESSORS");
69 if(!max_cores_s) {
70 __cfadbg_print_nolock(ready_queue, "No CFA_MAX_PROCESSORS in ENV\n");
71 return __CFA_MAX_PROCESSORS__;
72 }
73
74 char * endptr = 0p;
75 long int max_cores_l = strtol(max_cores_s, &endptr, 10);
76 if(max_cores_l < 1 || max_cores_l > 65535) {
77 __cfadbg_print_nolock(ready_queue, "CFA_MAX_PROCESSORS out of range : %ld\n", max_cores_l);
78 return __CFA_MAX_PROCESSORS__;
79 }
80 if('\0' != *endptr) {
81 __cfadbg_print_nolock(ready_queue, "CFA_MAX_PROCESSORS not a decimal number : %s\n", max_cores_s);
82 return __CFA_MAX_PROCESSORS__;
83 }
84
85 return max_cores_l;
86}
87
88//=======================================================================
89// Cluster wide reader-writer lock
90//=======================================================================
91void ?{}(__scheduler_RWLock_t & this) {
92 this.max = __max_processors();
93 this.alloc = 0;
94 this.ready = 0;
95 this.lock = false;
96 this.data = alloc(this.max);
97
98 /*paranoid*/ verify( 0 == (((uintptr_t)(this.data )) % 64) );
99 /*paranoid*/ verify( 0 == (((uintptr_t)(this.data + 1)) % 64) );
100 /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.alloc), &this.alloc));
101 /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.ready), &this.ready));
102
103}
104void ^?{}(__scheduler_RWLock_t & this) {
105 free(this.data);
106}
107
108void ?{}( __scheduler_lock_id_t & this, __processor_id_t * proc ) {
109 this.handle = proc;
110 this.lock = false;
111 #ifdef __CFA_WITH_VERIFY__
112 this.owned = false;
113 #endif
114}
115
116//=======================================================================
117// Lock-Free registering/unregistering of threads
118void register_proc_id( struct __processor_id_t * proc ) with(*__scheduler_lock) {
119 __cfadbg_print_safe(ready_queue, "Kernel : Registering proc %p for RW-Lock\n", proc);
120
121 // Step - 1 : check if there is already space in the data
122 uint_fast32_t s = ready;
123
124 // Check among all the ready
125 for(uint_fast32_t i = 0; i < s; i++) {
126 __processor_id_t * null = 0p; // Re-write every loop since compare thrashes it
127 if( __atomic_load_n(&data[i].handle, (int)__ATOMIC_RELAXED) == null
128 && __atomic_compare_exchange_n( &data[i].handle, &null, proc, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
129 /*paranoid*/ verify(i < ready);
130 /*paranoid*/ verify(0 == (__alignof__(data[i]) % cache_line_size));
131 /*paranoid*/ verify((((uintptr_t)&data[i]) % cache_line_size) == 0);
132 proc->id = i;
133 }
134 }
135
136 if(max <= alloc) abort("Trying to create more than %ud processors", __scheduler_lock->max);
137
138 // Step - 2 : F&A to get a new spot in the array.
139 uint_fast32_t n = __atomic_fetch_add(&alloc, 1, __ATOMIC_SEQ_CST);
140 if(max <= n) abort("Trying to create more than %ud processors", __scheduler_lock->max);
141
142 // Step - 3 : Mark space as used and then publish it.
143 __scheduler_lock_id_t * storage = (__scheduler_lock_id_t *)&data[n];
144 (*storage){ proc };
145 while() {
146 unsigned copy = n;
147 if( __atomic_load_n(&ready, __ATOMIC_RELAXED) == n
148 && __atomic_compare_exchange_n(&ready, &copy, n + 1, true, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
149 break;
150 Pause();
151 }
152
153 __cfadbg_print_safe(ready_queue, "Kernel : Registering proc %p done, id %lu\n", proc, n);
154
155 // Return new spot.
156 /*paranoid*/ verify(n < ready);
157 /*paranoid*/ verify(__alignof__(data[n]) == (2 * cache_line_size));
158 /*paranoid*/ verify((((uintptr_t)&data[n]) % cache_line_size) == 0);
159 proc->id = n;
160}
161
162void unregister_proc_id( struct __processor_id_t * proc ) with(*__scheduler_lock) {
163 unsigned id = proc->id;
164 /*paranoid*/ verify(id < ready);
165 /*paranoid*/ verify(proc == __atomic_load_n(&data[id].handle, __ATOMIC_RELAXED));
166 __atomic_store_n(&data[id].handle, 0p, __ATOMIC_RELEASE);
167
168 __cfadbg_print_safe(ready_queue, "Kernel : Unregister proc %p\n", proc);
169}
170
171//-----------------------------------------------------------------------
172// Writer side : acquire when changing the ready queue, e.g. adding more
173// queues or removing them.
174uint_fast32_t ready_mutate_lock( void ) with(*__scheduler_lock) {
175 /* paranoid */ verify( ! __preemption_enabled() );
176
177 // Step 1 : lock global lock
178 // It is needed to avoid processors that register mid Critical-Section
179 // to simply lock their own lock and enter.
180 __atomic_acquire( &lock );
181
182 // Step 2 : lock per-proc lock
183 // Processors that are currently being registered aren't counted
184 // but can't be in read_lock or in the critical section.
185 // All other processors are counted
186 uint_fast32_t s = ready;
187 for(uint_fast32_t i = 0; i < s; i++) {
188 __atomic_acquire( &data[i].lock );
189 }
190
191 /* paranoid */ verify( ! __preemption_enabled() );
192 return s;
193}
194
195void ready_mutate_unlock( uint_fast32_t last_s ) with(*__scheduler_lock) {
196 /* paranoid */ verify( ! __preemption_enabled() );
197
198 // Step 1 : release local locks
199 // This must be done while the global lock is held to avoid
200 // threads that where created mid critical section
201 // to race to lock their local locks and have the writer
202 // immidiately unlock them
203 // Alternative solution : return s in write_lock and pass it to write_unlock
204 for(uint_fast32_t i = 0; i < last_s; i++) {
205 verify(data[i].lock);
206 __atomic_store_n(&data[i].lock, (bool)false, __ATOMIC_RELEASE);
207 }
208
209 // Step 2 : release global lock
210 /*paranoid*/ assert(true == lock);
211 __atomic_store_n(&lock, (bool)false, __ATOMIC_RELEASE);
212
213 /* paranoid */ verify( ! __preemption_enabled() );
214}
215
216//=======================================================================
217// Cforall Ready Queue used for scheduling
218//=======================================================================
219void ?{}(__ready_queue_t & this) with (this) {
220 lanes.data = 0p;
221 lanes.tscs = 0p;
222 lanes.count = 0;
223}
224
225void ^?{}(__ready_queue_t & this) with (this) {
226 verify( SEQUENTIAL_SHARD == lanes.count );
227 free(lanes.data);
228 free(lanes.tscs);
229}
230
231//-----------------------------------------------------------------------
232#if defined(USE_RELAXED_FIFO)
233 //-----------------------------------------------------------------------
234 // get index from random number with or without bias towards queues
235 static inline [unsigned, bool] idx_from_r(unsigned r, unsigned preferred) {
236 unsigned i;
237 bool local;
238 unsigned rlow = r % BIAS;
239 unsigned rhigh = r / BIAS;
240 if((0 != rlow) && preferred >= 0) {
241 // (BIAS - 1) out of BIAS chances
242 // Use perferred queues
243 i = preferred + (rhigh % READYQ_SHARD_FACTOR);
244 local = true;
245 }
246 else {
247 // 1 out of BIAS chances
248 // Use all queues
249 i = rhigh;
250 local = false;
251 }
252 return [i, local];
253 }
254
255 __attribute__((hot)) void push(struct cluster * cltr, struct $thread * thrd) with (cltr->ready_queue) {
256 __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr);
257
258 const bool external = (!kernelTLS().this_processor) || (cltr != kernelTLS().this_processor->cltr);
259 /* paranoid */ verify(external || kernelTLS().this_processor->rdq.id < lanes.count );
260
261 // write timestamp
262 thrd->link.ts = rdtscl();
263
264 bool local;
265 int preferred = external ? -1 : kernelTLS().this_processor->rdq.id;
266
267 // Try to pick a lane and lock it
268 unsigned i;
269 do {
270 // Pick the index of a lane
271 unsigned r = __tls_rand_fwd();
272 [i, local] = idx_from_r(r, preferred);
273
274 i %= __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
275
276 #if !defined(__CFA_NO_STATISTICS__)
277 if(unlikely(external)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.attempt, 1, __ATOMIC_RELAXED);
278 else if(local) __tls_stats()->ready.push.local.attempt++;
279 else __tls_stats()->ready.push.share.attempt++;
280 #endif
281
282 #if defined(USE_MPSC)
283 // mpsc always succeeds
284 } while( false );
285 #else
286 // If we can't lock it retry
287 } while( !__atomic_try_acquire( &lanes.data[i].lock ) );
288 #endif
289
290 // Actually push it
291 push(lanes.data[i], thrd);
292
293 #if !defined(USE_MPSC)
294 // Unlock and return
295 __atomic_unlock( &lanes.data[i].lock );
296 #endif
297
298 // Mark the current index in the tls rng instance as having an item
299 __tls_rand_advance_bck();
300
301 __cfadbg_print_safe(ready_queue, "Kernel : Pushed %p on cluster %p (idx: %u, mask %llu, first %d)\n", thrd, cltr, i, used.mask[0], lane_first);
302
303 // Update statistics
304 #if !defined(__CFA_NO_STATISTICS__)
305 if(unlikely(external)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.success, 1, __ATOMIC_RELAXED);
306 else if(local) __tls_stats()->ready.push.local.success++;
307 else __tls_stats()->ready.push.share.success++;
308 #endif
309 }
310
311 // Pop from the ready queue from a given cluster
312 __attribute__((hot)) $thread * pop_fast(struct cluster * cltr) with (cltr->ready_queue) {
313 /* paranoid */ verify( lanes.count > 0 );
314 /* paranoid */ verify( kernelTLS().this_processor );
315 /* paranoid */ verify( kernelTLS().this_processor->rdq.id < lanes.count );
316
317 unsigned count = __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
318 int preferred = kernelTLS().this_processor->rdq.id;
319
320
321 // As long as the list is not empty, try finding a lane that isn't empty and pop from it
322 for(25) {
323 // Pick two lists at random
324 unsigned ri = __tls_rand_bck();
325 unsigned rj = __tls_rand_bck();
326
327 unsigned i, j;
328 __attribute__((unused)) bool locali, localj;
329 [i, locali] = idx_from_r(ri, preferred);
330 [j, localj] = idx_from_r(rj, preferred);
331
332 i %= count;
333 j %= count;
334
335 // try popping from the 2 picked lists
336 struct $thread * thrd = try_pop(cltr, i, j __STATS(, *(locali || localj ? &__tls_stats()->ready.pop.local : &__tls_stats()->ready.pop.help)));
337 if(thrd) {
338 return thrd;
339 }
340 }
341
342 // All lanes where empty return 0p
343 return 0p;
344 }
345
346 __attribute__((hot)) struct $thread * pop_slow(struct cluster * cltr) {
347 return search(cltr);
348 }
349#endif
350#if defined(USE_WORK_STEALING)
351 __attribute__((hot)) void push(struct cluster * cltr, struct $thread * thrd) with (cltr->ready_queue) {
352 __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr);
353
354 const bool external = (!kernelTLS().this_processor) || (cltr != kernelTLS().this_processor->cltr);
355 /* paranoid */ verify(external || kernelTLS().this_processor->rdq.id < lanes.count );
356
357 // write timestamp
358 thrd->link.ts = rdtscl();
359
360 // Try to pick a lane and lock it
361 unsigned i;
362 do {
363 #if !defined(__CFA_NO_STATISTICS__)
364 if(unlikely(external)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.attempt, 1, __ATOMIC_RELAXED);
365 else __tls_stats()->ready.push.local.attempt++;
366 #endif
367
368 if(unlikely(external)) {
369 i = __tls_rand() % lanes.count;
370 }
371 else {
372 processor * proc = kernelTLS().this_processor;
373 unsigned r = proc->rdq.its++;
374 i = proc->rdq.id + (r % READYQ_SHARD_FACTOR);
375 }
376
377
378 #if defined(USE_MPSC)
379 // mpsc always succeeds
380 } while( false );
381 #else
382 // If we can't lock it retry
383 } while( !__atomic_try_acquire( &lanes.data[i].lock ) );
384 #endif
385
386 // Actually push it
387 push(lanes.data[i], thrd);
388
389 #if !defined(USE_MPSC)
390 // Unlock and return
391 __atomic_unlock( &lanes.data[i].lock );
392 #endif
393
394 #if !defined(__CFA_NO_STATISTICS__)
395 if(unlikely(external)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.success, 1, __ATOMIC_RELAXED);
396 else __tls_stats()->ready.push.local.success++;
397 #endif
398
399 __cfadbg_print_safe(ready_queue, "Kernel : Pushed %p on cluster %p (idx: %u, mask %llu, first %d)\n", thrd, cltr, i, used.mask[0], lane_first);
400 }
401
402 // Pop from the ready queue from a given cluster
403 __attribute__((hot)) $thread * pop_fast(struct cluster * cltr) with (cltr->ready_queue) {
404 /* paranoid */ verify( lanes.count > 0 );
405 /* paranoid */ verify( kernelTLS().this_processor );
406 /* paranoid */ verify( kernelTLS().this_processor->rdq.id < lanes.count );
407
408 processor * proc = kernelTLS().this_processor;
409
410 if(proc->rdq.target == -1u) {
411 proc->rdq.target = __tls_rand() % lanes.count;
412 unsigned it1 = proc->rdq.itr;
413 unsigned it2 = proc->rdq.itr + 1;
414 unsigned idx1 = proc->rdq.id + (it1 % READYQ_SHARD_FACTOR);
415 unsigned idx2 = proc->rdq.id + (it1 % READYQ_SHARD_FACTOR);
416 unsigned long long tsc1 = ts(lanes.data[idx1]);
417 unsigned long long tsc2 = ts(lanes.data[idx2]);
418 proc->rdq.cutoff = min(tsc1, tsc2);
419 }
420 else if(lanes.tscs[proc->rdq.target].tv < proc->rdq.cutoff) {
421 $thread * t = try_pop(cltr, proc->rdq.target __STATS(, __tls_stats()->ready.pop.help));
422 proc->rdq.target = -1u;
423 if(t) return t;
424 }
425
426 for(READYQ_SHARD_FACTOR) {
427 unsigned i = proc->rdq.id + (--proc->rdq.itr % READYQ_SHARD_FACTOR);
428 if($thread * t = try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.local))) return t;
429 }
430 return 0p;
431 }
432
433 __attribute__((hot)) struct $thread * pop_slow(struct cluster * cltr) with (cltr->ready_queue) {
434 for(25) {
435 unsigned i = __tls_rand() % lanes.count;
436 $thread * t = try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.steal));
437 if(t) return t;
438 }
439
440 return search(cltr);
441 }
442#endif
443
444//=======================================================================
445// Various Ready Queue utilities
446//=======================================================================
447// these function work the same or almost the same
448// whether they are using work-stealing or relaxed fifo scheduling
449
450//-----------------------------------------------------------------------
451// try to pop from a lane given by index w
452static inline struct $thread * try_pop(struct cluster * cltr, unsigned w __STATS(, __stats_readyQ_pop_t & stats)) with (cltr->ready_queue) {
453 __STATS( stats.attempt++; )
454
455 // Get relevant elements locally
456 __intrusive_lane_t & lane = lanes.data[w];
457
458 // If list looks empty retry
459 if( is_empty(lane) ) {
460 __STATS( stats.espec++; )
461 return 0p;
462 }
463
464 // If we can't get the lock retry
465 if( !__atomic_try_acquire(&lane.lock) ) {
466 __STATS( stats.elock++; )
467 return 0p;
468 }
469
470 // If list is empty, unlock and retry
471 if( is_empty(lane) ) {
472 __atomic_unlock(&lane.lock);
473 __STATS( stats.eempty++; )
474 return 0p;
475 }
476
477 // Actually pop the list
478 struct $thread * thrd;
479 thrd = pop(lane);
480
481 /* paranoid */ verify(thrd);
482 /* paranoid */ verify(lane.lock);
483
484 // Unlock and return
485 __atomic_unlock(&lane.lock);
486
487 // Update statistics
488 __STATS( stats.success++; )
489
490 #if defined(USE_WORK_STEALING)
491 lanes.tscs[w].tv = thrd->link.ts;
492 #endif
493
494 // return the popped thread
495 return thrd;
496}
497
498//-----------------------------------------------------------------------
499// try to pop from any lanes making sure you don't miss any threads push
500// before the start of the function
501static inline struct $thread * search(struct cluster * cltr) with (cltr->ready_queue) {
502 /* paranoid */ verify( lanes.count > 0 );
503 unsigned count = __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
504 unsigned offset = __tls_rand();
505 for(i; count) {
506 unsigned idx = (offset + i) % count;
507 struct $thread * thrd = try_pop(cltr, idx __STATS(, __tls_stats()->ready.pop.search));
508 if(thrd) {
509 return thrd;
510 }
511 }
512
513 // All lanes where empty return 0p
514 return 0p;
515}
516
517//-----------------------------------------------------------------------
518// Check that all the intrusive queues in the data structure are still consistent
519static void check( __ready_queue_t & q ) with (q) {
520 #if defined(__CFA_WITH_VERIFY__) && !defined(USE_MPSC)
521 {
522 for( idx ; lanes.count ) {
523 __intrusive_lane_t & sl = lanes.data[idx];
524 assert(!lanes.data[idx].lock);
525
526 assert(head(sl)->link.prev == 0p );
527 assert(head(sl)->link.next->link.prev == head(sl) );
528 assert(tail(sl)->link.next == 0p );
529 assert(tail(sl)->link.prev->link.next == tail(sl) );
530
531 if(is_empty(sl)) {
532 assert(tail(sl)->link.prev == head(sl));
533 assert(head(sl)->link.next == tail(sl));
534 } else {
535 assert(tail(sl)->link.prev != head(sl));
536 assert(head(sl)->link.next != tail(sl));
537 }
538 }
539 }
540 #endif
541}
542
543//-----------------------------------------------------------------------
544// Given 2 indexes, pick the list with the oldest push an try to pop from it
545static inline struct $thread * try_pop(struct cluster * cltr, unsigned i, unsigned j __STATS(, __stats_readyQ_pop_t & stats)) with (cltr->ready_queue) {
546 // Pick the bet list
547 int w = i;
548 if( __builtin_expect(!is_empty(lanes.data[j]), true) ) {
549 w = (ts(lanes.data[i]) < ts(lanes.data[j])) ? i : j;
550 }
551
552 return try_pop(cltr, w __STATS(, stats));
553}
554
555// Call this function of the intrusive list was moved using memcpy
556// fixes the list so that the pointers back to anchors aren't left dangling
557static inline void fix(__intrusive_lane_t & ll) {
558 #if !defined(USE_MPSC)
559 // if the list is not empty then follow he pointer and fix its reverse
560 if(!is_empty(ll)) {
561 head(ll)->link.next->link.prev = head(ll);
562 tail(ll)->link.prev->link.next = tail(ll);
563 }
564 // Otherwise just reset the list
565 else {
566 verify(tail(ll)->link.next == 0p);
567 tail(ll)->link.prev = head(ll);
568 head(ll)->link.next = tail(ll);
569 verify(head(ll)->link.prev == 0p);
570 }
571 #endif
572}
573
574static void assign_list(unsigned & value, dlist(processor, processor) & list, unsigned count) {
575 processor * it = &list`first;
576 for(unsigned i = 0; i < count; i++) {
577 /* paranoid */ verifyf( it, "Unexpected null iterator, at index %u of %u\n", i, count);
578 it->rdq.id = value;
579 it->rdq.target = -1u;
580 value += READYQ_SHARD_FACTOR;
581 it = &(*it)`next;
582 }
583}
584
585static void reassign_cltr_id(struct cluster * cltr) {
586 unsigned preferred = 0;
587 assign_list(preferred, cltr->procs.actives, cltr->procs.total - cltr->procs.idle);
588 assign_list(preferred, cltr->procs.idles , cltr->procs.idle );
589}
590
591static void fix_times( struct cluster * cltr ) with( cltr->ready_queue ) {
592 #if defined(USE_WORK_STEALING)
593 lanes.tscs = alloc(lanes.count, lanes.tscs`realloc);
594 for(i; lanes.count) {
595 lanes.tscs[i].tv = ts(lanes.data[i]);
596 }
597 #endif
598}
599
600// Grow the ready queue
601void ready_queue_grow(struct cluster * cltr) {
602 size_t ncount;
603 int target = cltr->procs.total;
604
605 /* paranoid */ verify( ready_mutate_islocked() );
606 __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue\n");
607
608 // Make sure that everything is consistent
609 /* paranoid */ check( cltr->ready_queue );
610
611 // grow the ready queue
612 with( cltr->ready_queue ) {
613 // Find new count
614 // Make sure we always have atleast 1 list
615 if(target >= 2) {
616 ncount = target * READYQ_SHARD_FACTOR;
617 } else {
618 ncount = SEQUENTIAL_SHARD;
619 }
620
621 // Allocate new array (uses realloc and memcpies the data)
622 lanes.data = alloc( ncount, lanes.data`realloc );
623
624 // Fix the moved data
625 for( idx; (size_t)lanes.count ) {
626 fix(lanes.data[idx]);
627 }
628
629 // Construct new data
630 for( idx; (size_t)lanes.count ~ ncount) {
631 (lanes.data[idx]){};
632 }
633
634 // Update original
635 lanes.count = ncount;
636 }
637
638 fix_times(cltr);
639
640 reassign_cltr_id(cltr);
641
642 // Make sure that everything is consistent
643 /* paranoid */ check( cltr->ready_queue );
644
645 __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue done\n");
646
647 /* paranoid */ verify( ready_mutate_islocked() );
648}
649
650// Shrink the ready queue
651void ready_queue_shrink(struct cluster * cltr) {
652 /* paranoid */ verify( ready_mutate_islocked() );
653 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue\n");
654
655 // Make sure that everything is consistent
656 /* paranoid */ check( cltr->ready_queue );
657
658 int target = cltr->procs.total;
659
660 with( cltr->ready_queue ) {
661 // Remember old count
662 size_t ocount = lanes.count;
663
664 // Find new count
665 // Make sure we always have atleast 1 list
666 lanes.count = target >= 2 ? target * READYQ_SHARD_FACTOR: SEQUENTIAL_SHARD;
667 /* paranoid */ verify( ocount >= lanes.count );
668 /* paranoid */ verify( lanes.count == target * READYQ_SHARD_FACTOR || target < 2 );
669
670 // for printing count the number of displaced threads
671 #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__)
672 __attribute__((unused)) size_t displaced = 0;
673 #endif
674
675 // redistribute old data
676 for( idx; (size_t)lanes.count ~ ocount) {
677 // Lock is not strictly needed but makes checking invariants much easier
678 __attribute__((unused)) bool locked = __atomic_try_acquire(&lanes.data[idx].lock);
679 verify(locked);
680
681 // As long as we can pop from this lane to push the threads somewhere else in the queue
682 while(!is_empty(lanes.data[idx])) {
683 struct $thread * thrd;
684 thrd = pop(lanes.data[idx]);
685
686 push(cltr, thrd);
687
688 // for printing count the number of displaced threads
689 #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__)
690 displaced++;
691 #endif
692 }
693
694 // Unlock the lane
695 __atomic_unlock(&lanes.data[idx].lock);
696
697 // TODO print the queue statistics here
698
699 ^(lanes.data[idx]){};
700 }
701
702 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue displaced %zu threads\n", displaced);
703
704 // Allocate new array (uses realloc and memcpies the data)
705 lanes.data = alloc( lanes.count, lanes.data`realloc );
706
707 // Fix the moved data
708 for( idx; (size_t)lanes.count ) {
709 fix(lanes.data[idx]);
710 }
711 }
712
713 fix_times(cltr);
714
715 reassign_cltr_id(cltr);
716
717 // Make sure that everything is consistent
718 /* paranoid */ check( cltr->ready_queue );
719
720 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue done\n");
721 /* paranoid */ verify( ready_mutate_islocked() );
722}
Note: See TracBrowser for help on using the repository browser.