source: libcfa/src/concurrency/ready_queue.cfa@ e54d0c3

ADT arm-eh ast-experimental enum forall-pointer-decay jacob/cs343-translation new-ast-unique-expr pthread-emulation qualifiedEnum
Last change on this file since e54d0c3 was 5f6a172, checked in by Thierry Delisle <tdelisle@…>, 5 years ago

Fix assertions on ready_queue with workstealling

  • Property mode set to 100644
File size: 21.7 KB
RevLine 
[7768b8d]1//
2// Cforall Version 1.0.0 Copyright (C) 2019 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// ready_queue.cfa --
8//
9// Author : Thierry Delisle
10// Created On : Mon Nov dd 16:29:18 2019
11// Last Modified By :
12// Last Modified On :
13// Update Count :
14//
15
16#define __cforall_thread__
[1b143de]17// #define __CFA_DEBUG_PRINT_READY_QUEUE__
[7768b8d]18
[7a2972b9]19// #define USE_MPSC
[1eb239e4]20
[9cc3a18]21#define USE_RELAXED_FIFO
22// #define USE_WORK_STEALING
23
[7768b8d]24#include "bits/defs.hfa"
25#include "kernel_private.hfa"
26
27#define _GNU_SOURCE
28#include "stdlib.hfa"
[61d7bec]29#include "math.hfa"
[7768b8d]30
[04b5cef]31#include <unistd.h>
32
[13c5e19]33#include "ready_subqueue.hfa"
34
[7768b8d]35static const size_t cache_line_size = 64;
36
[dca5802]37// No overriden function, no environment variable, no define
38// fall back to a magic number
39#ifndef __CFA_MAX_PROCESSORS__
[b388ee81]40 #define __CFA_MAX_PROCESSORS__ 1024
[dca5802]41#endif
[7768b8d]42
[9cc3a18]43#if defined(USE_RELAXED_FIFO)
44 #define BIAS 4
45 #define READYQ_SHARD_FACTOR 4
[5f6a172]46 #define SEQUENTIAL_SHARD 1
[9cc3a18]47#elif defined(USE_WORK_STEALING)
48 #define READYQ_SHARD_FACTOR 2
[5f6a172]49 #define SEQUENTIAL_SHARD 2
[9cc3a18]50#else
51 #error no scheduling strategy selected
52#endif
53
54static inline [unsigned, bool] idx_from_r(unsigned r, unsigned preferred);
55static inline struct $thread * try_pop(struct cluster * cltr, unsigned w);
[431cd4f]56static inline struct $thread * try_pop(struct cluster * cltr, unsigned i, unsigned j);
57static inline struct $thread * search(struct cluster * cltr);
[9cc3a18]58
[04b5cef]59
[dca5802]60// returns the maximum number of processors the RWLock support
[7768b8d]61__attribute__((weak)) unsigned __max_processors() {
62 const char * max_cores_s = getenv("CFA_MAX_PROCESSORS");
63 if(!max_cores_s) {
[504a7dc]64 __cfadbg_print_nolock(ready_queue, "No CFA_MAX_PROCESSORS in ENV\n");
[dca5802]65 return __CFA_MAX_PROCESSORS__;
[7768b8d]66 }
67
68 char * endptr = 0p;
69 long int max_cores_l = strtol(max_cores_s, &endptr, 10);
70 if(max_cores_l < 1 || max_cores_l > 65535) {
[504a7dc]71 __cfadbg_print_nolock(ready_queue, "CFA_MAX_PROCESSORS out of range : %ld\n", max_cores_l);
[dca5802]72 return __CFA_MAX_PROCESSORS__;
[7768b8d]73 }
74 if('\0' != *endptr) {
[504a7dc]75 __cfadbg_print_nolock(ready_queue, "CFA_MAX_PROCESSORS not a decimal number : %s\n", max_cores_s);
[dca5802]76 return __CFA_MAX_PROCESSORS__;
[7768b8d]77 }
78
79 return max_cores_l;
80}
81
82//=======================================================================
83// Cluster wide reader-writer lock
84//=======================================================================
[b388ee81]85void ?{}(__scheduler_RWLock_t & this) {
[7768b8d]86 this.max = __max_processors();
87 this.alloc = 0;
88 this.ready = 0;
89 this.lock = false;
90 this.data = alloc(this.max);
91
92 /*paranoid*/ verify( 0 == (((uintptr_t)(this.data )) % 64) );
93 /*paranoid*/ verify( 0 == (((uintptr_t)(this.data + 1)) % 64) );
94 /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.alloc), &this.alloc));
95 /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.ready), &this.ready));
96
97}
[b388ee81]98void ^?{}(__scheduler_RWLock_t & this) {
[7768b8d]99 free(this.data);
100}
101
[9b1dcc2]102void ?{}( __scheduler_lock_id_t & this, __processor_id_t * proc ) {
[7768b8d]103 this.handle = proc;
104 this.lock = false;
[64a7146]105 #ifdef __CFA_WITH_VERIFY__
106 this.owned = false;
107 #endif
[7768b8d]108}
109
110//=======================================================================
111// Lock-Free registering/unregistering of threads
[a33c113]112void register_proc_id( struct __processor_id_t * proc ) with(*__scheduler_lock) {
[b388ee81]113 __cfadbg_print_safe(ready_queue, "Kernel : Registering proc %p for RW-Lock\n", proc);
[504a7dc]114
[7768b8d]115 // Step - 1 : check if there is already space in the data
116 uint_fast32_t s = ready;
117
118 // Check among all the ready
119 for(uint_fast32_t i = 0; i < s; i++) {
[9b1dcc2]120 __processor_id_t * null = 0p; // Re-write every loop since compare thrashes it
[7768b8d]121 if( __atomic_load_n(&data[i].handle, (int)__ATOMIC_RELAXED) == null
122 && __atomic_compare_exchange_n( &data[i].handle, &null, proc, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
123 /*paranoid*/ verify(i < ready);
[64a7146]124 /*paranoid*/ verify(0 == (__alignof__(data[i]) % cache_line_size));
[7768b8d]125 /*paranoid*/ verify((((uintptr_t)&data[i]) % cache_line_size) == 0);
[a33c113]126 proc->id = i;
[7768b8d]127 }
128 }
129
[b388ee81]130 if(max <= alloc) abort("Trying to create more than %ud processors", __scheduler_lock->max);
[7768b8d]131
132 // Step - 2 : F&A to get a new spot in the array.
133 uint_fast32_t n = __atomic_fetch_add(&alloc, 1, __ATOMIC_SEQ_CST);
[b388ee81]134 if(max <= n) abort("Trying to create more than %ud processors", __scheduler_lock->max);
[7768b8d]135
136 // Step - 3 : Mark space as used and then publish it.
[9b1dcc2]137 __scheduler_lock_id_t * storage = (__scheduler_lock_id_t *)&data[n];
[7768b8d]138 (*storage){ proc };
[fd9b524]139 while() {
[7768b8d]140 unsigned copy = n;
141 if( __atomic_load_n(&ready, __ATOMIC_RELAXED) == n
142 && __atomic_compare_exchange_n(&ready, &copy, n + 1, true, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
143 break;
[fd9b524]144 Pause();
[7768b8d]145 }
146
[1b143de]147 __cfadbg_print_safe(ready_queue, "Kernel : Registering proc %p done, id %lu\n", proc, n);
[504a7dc]148
[7768b8d]149 // Return new spot.
150 /*paranoid*/ verify(n < ready);
[37ba662]151 /*paranoid*/ verify(__alignof__(data[n]) == (2 * cache_line_size));
[7768b8d]152 /*paranoid*/ verify((((uintptr_t)&data[n]) % cache_line_size) == 0);
[a33c113]153 proc->id = n;
[7768b8d]154}
155
[a33c113]156void unregister_proc_id( struct __processor_id_t * proc ) with(*__scheduler_lock) {
[7768b8d]157 unsigned id = proc->id;
158 /*paranoid*/ verify(id < ready);
159 /*paranoid*/ verify(proc == __atomic_load_n(&data[id].handle, __ATOMIC_RELAXED));
160 __atomic_store_n(&data[id].handle, 0p, __ATOMIC_RELEASE);
[504a7dc]161
162 __cfadbg_print_safe(ready_queue, "Kernel : Unregister proc %p\n", proc);
[7768b8d]163}
164
165//-----------------------------------------------------------------------
166// Writer side : acquire when changing the ready queue, e.g. adding more
167// queues or removing them.
[b388ee81]168uint_fast32_t ready_mutate_lock( void ) with(*__scheduler_lock) {
[8fc652e0]169 /* paranoid */ verify( ! __preemption_enabled() );
[62502cc4]170
[7768b8d]171 // Step 1 : lock global lock
172 // It is needed to avoid processors that register mid Critical-Section
173 // to simply lock their own lock and enter.
174 __atomic_acquire( &lock );
175
176 // Step 2 : lock per-proc lock
177 // Processors that are currently being registered aren't counted
178 // but can't be in read_lock or in the critical section.
179 // All other processors are counted
180 uint_fast32_t s = ready;
181 for(uint_fast32_t i = 0; i < s; i++) {
182 __atomic_acquire( &data[i].lock );
183 }
184
[8fc652e0]185 /* paranoid */ verify( ! __preemption_enabled() );
[7768b8d]186 return s;
187}
188
[b388ee81]189void ready_mutate_unlock( uint_fast32_t last_s ) with(*__scheduler_lock) {
[8fc652e0]190 /* paranoid */ verify( ! __preemption_enabled() );
[62502cc4]191
[7768b8d]192 // Step 1 : release local locks
193 // This must be done while the global lock is held to avoid
194 // threads that where created mid critical section
195 // to race to lock their local locks and have the writer
196 // immidiately unlock them
197 // Alternative solution : return s in write_lock and pass it to write_unlock
198 for(uint_fast32_t i = 0; i < last_s; i++) {
199 verify(data[i].lock);
200 __atomic_store_n(&data[i].lock, (bool)false, __ATOMIC_RELEASE);
201 }
202
203 // Step 2 : release global lock
204 /*paranoid*/ assert(true == lock);
205 __atomic_store_n(&lock, (bool)false, __ATOMIC_RELEASE);
[62502cc4]206
[8fc652e0]207 /* paranoid */ verify( ! __preemption_enabled() );
[7768b8d]208}
209
210//=======================================================================
[9cc3a18]211// Cforall Ready Queue used for scheduling
[b798713]212//=======================================================================
213void ?{}(__ready_queue_t & this) with (this) {
[28d73c1]214 lanes.data = 0p;
[9cc3a18]215 lanes.tscs = 0p;
[28d73c1]216 lanes.count = 0;
[b798713]217}
218
219void ^?{}(__ready_queue_t & this) with (this) {
[5f6a172]220 verify( SEQUENTIAL_SHARD == lanes.count );
[dca5802]221 free(lanes.data);
[9cc3a18]222 free(lanes.tscs);
[dca5802]223}
224
[64a7146]225//-----------------------------------------------------------------------
[431cd4f]226#if defined(USE_RELAXED_FIFO)
227 //-----------------------------------------------------------------------
228 // get index from random number with or without bias towards queues
229 static inline [unsigned, bool] idx_from_r(unsigned r, unsigned preferred) {
230 unsigned i;
231 bool local;
232 unsigned rlow = r % BIAS;
233 unsigned rhigh = r / BIAS;
234 if((0 != rlow) && preferred >= 0) {
235 // (BIAS - 1) out of BIAS chances
236 // Use perferred queues
237 i = preferred + (rhigh % READYQ_SHARD_FACTOR);
238 local = true;
239 }
240 else {
241 // 1 out of BIAS chances
242 // Use all queues
243 i = rhigh;
244 local = false;
245 }
246 return [i, local];
247 }
248
249 __attribute__((hot)) void push(struct cluster * cltr, struct $thread * thrd) with (cltr->ready_queue) {
250 __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr);
[1b143de]251
[431cd4f]252 const bool external = (!kernelTLS().this_processor) || (cltr != kernelTLS().this_processor->cltr);
253 /* paranoid */ verify(external || kernelTLS().this_processor->rdq.id < lanes.count );
[fd1f65e]254
[431cd4f]255 // write timestamp
256 thrd->link.ts = rdtscl();
[b798713]257
[431cd4f]258 bool local;
259 int preferred = external ? -1 : kernelTLS().this_processor->rdq.id;
[52769ba]260
[431cd4f]261 // Try to pick a lane and lock it
262 unsigned i;
263 do {
264 // Pick the index of a lane
265 unsigned r = __tls_rand_fwd();
266 [i, local] = idx_from_r(r, preferred);
[772411a]267
[431cd4f]268 i %= __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
269
270 #if !defined(__CFA_NO_STATISTICS__)
271 if(external) {
272 if(local) __atomic_fetch_add(&cltr->stats->ready.pick.ext.local, 1, __ATOMIC_RELAXED);
273 __atomic_fetch_add(&cltr->stats->ready.pick.ext.attempt, 1, __ATOMIC_RELAXED);
274 }
275 else {
276 if(local) __tls_stats()->ready.pick.push.local++;
277 __tls_stats()->ready.pick.push.attempt++;
278 }
279 #endif
[b798713]280
[431cd4f]281 #if defined(USE_MPSC)
282 // mpsc always succeeds
283 } while( false );
284 #else
285 // If we can't lock it retry
286 } while( !__atomic_try_acquire( &lanes.data[i].lock ) );
287 #endif
288
289 // Actually push it
290 push(lanes.data[i], thrd);
291
292 #if !defined(USE_MPSC)
293 // Unlock and return
294 __atomic_unlock( &lanes.data[i].lock );
295 #endif
296
297 // Mark the current index in the tls rng instance as having an item
298 __tls_rand_advance_bck();
299
300 __cfadbg_print_safe(ready_queue, "Kernel : Pushed %p on cluster %p (idx: %u, mask %llu, first %d)\n", thrd, cltr, i, used.mask[0], lane_first);
301
302 // Update statistics
[b798713]303 #if !defined(__CFA_NO_STATISTICS__)
[fd1f65e]304 if(external) {
[431cd4f]305 if(local) __atomic_fetch_add(&cltr->stats->ready.pick.ext.lsuccess, 1, __ATOMIC_RELAXED);
306 __atomic_fetch_add(&cltr->stats->ready.pick.ext.success, 1, __ATOMIC_RELAXED);
[fd1f65e]307 }
308 else {
[431cd4f]309 if(local) __tls_stats()->ready.pick.push.lsuccess++;
310 __tls_stats()->ready.pick.push.success++;
[fd1f65e]311 }
[b798713]312 #endif
[431cd4f]313 }
[b798713]314
[431cd4f]315 // Pop from the ready queue from a given cluster
316 __attribute__((hot)) $thread * pop_fast(struct cluster * cltr) with (cltr->ready_queue) {
317 /* paranoid */ verify( lanes.count > 0 );
318 /* paranoid */ verify( kernelTLS().this_processor );
319 /* paranoid */ verify( kernelTLS().this_processor->rdq.id < lanes.count );
[b798713]320
[431cd4f]321 unsigned count = __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
322 int preferred = kernelTLS().this_processor->rdq.id;
[dca5802]323
324
[431cd4f]325 // As long as the list is not empty, try finding a lane that isn't empty and pop from it
326 for(25) {
327 // Pick two lists at random
328 unsigned ri = __tls_rand_bck();
329 unsigned rj = __tls_rand_bck();
[c426b03]330
[431cd4f]331 unsigned i, j;
332 __attribute__((unused)) bool locali, localj;
333 [i, locali] = idx_from_r(ri, preferred);
334 [j, localj] = idx_from_r(rj, preferred);
[1b143de]335
[431cd4f]336 #if !defined(__CFA_NO_STATISTICS__)
337 if(locali && localj) {
338 __tls_stats()->ready.pick.pop.local++;
339 }
340 #endif
[b798713]341
[431cd4f]342 i %= count;
343 j %= count;
[9cc3a18]344
[431cd4f]345 // try popping from the 2 picked lists
346 struct $thread * thrd = try_pop(cltr, i, j);
347 if(thrd) {
348 #if !defined(__CFA_NO_STATISTICS__)
349 if( locali || localj ) __tls_stats()->ready.pick.pop.lsuccess++;
350 #endif
351 return thrd;
352 }
353 }
[13c5e19]354
[431cd4f]355 // All lanes where empty return 0p
356 return 0p;
357 }
[772411a]358
[431cd4f]359 __attribute__((hot)) struct $thread * pop_slow(struct cluster * cltr) {
360 return search(cltr);
361 }
362#endif
363#if defined(USE_WORK_STEALING)
364 __attribute__((hot)) void push(struct cluster * cltr, struct $thread * thrd) with (cltr->ready_queue) {
365 __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr);
[772411a]366
[431cd4f]367 const bool external = (!kernelTLS().this_processor) || (cltr != kernelTLS().this_processor->cltr);
368 /* paranoid */ verify(external || kernelTLS().this_processor->rdq.id < lanes.count );
[772411a]369
[431cd4f]370 // write timestamp
371 thrd->link.ts = rdtscl();
372
373 // Try to pick a lane and lock it
374 unsigned i;
375 do {
376 if(unlikely(external)) {
377 i = __tls_rand() % lanes.count;
378 }
379 else {
380 processor * proc = kernelTLS().this_processor;
381 unsigned r = proc->rdq.its++;
382 i = proc->rdq.id + (r % READYQ_SHARD_FACTOR);
[13c5e19]383 }
[431cd4f]384
385
386 #if defined(USE_MPSC)
387 // mpsc always succeeds
388 } while( false );
389 #else
390 // If we can't lock it retry
391 } while( !__atomic_try_acquire( &lanes.data[i].lock ) );
[13c5e19]392 #endif
393
[431cd4f]394 // Actually push it
395 push(lanes.data[i], thrd);
[13c5e19]396
[431cd4f]397 #if !defined(USE_MPSC)
398 // Unlock and return
399 __atomic_unlock( &lanes.data[i].lock );
400 #endif
401
402 __cfadbg_print_safe(ready_queue, "Kernel : Pushed %p on cluster %p (idx: %u, mask %llu, first %d)\n", thrd, cltr, i, used.mask[0], lane_first);
[13c5e19]403 }
404
[431cd4f]405 // Pop from the ready queue from a given cluster
406 __attribute__((hot)) $thread * pop_fast(struct cluster * cltr) with (cltr->ready_queue) {
407 /* paranoid */ verify( lanes.count > 0 );
408 /* paranoid */ verify( kernelTLS().this_processor );
409 /* paranoid */ verify( kernelTLS().this_processor->rdq.id < lanes.count );
410
411 processor * proc = kernelTLS().this_processor;
412
413 if(proc->rdq.target == -1u) {
414 proc->rdq.target = __tls_rand() % lanes.count;
415 unsigned it1 = proc->rdq.itr;
416 unsigned it2 = proc->rdq.itr + 1;
417 unsigned idx1 = proc->rdq.id + (it1 % READYQ_SHARD_FACTOR);
418 unsigned idx2 = proc->rdq.id + (it1 % READYQ_SHARD_FACTOR);
419 unsigned long long tsc1 = ts(lanes.data[idx1]);
420 unsigned long long tsc2 = ts(lanes.data[idx2]);
421 proc->rdq.cutoff = min(tsc1, tsc2);
422 }
423 else if(lanes.tscs[proc->rdq.target].tv < proc->rdq.cutoff) {
424 $thread * t = try_pop(cltr, proc->rdq.target);
425 proc->rdq.target = -1u;
426 if(t) return t;
427 }
[13c5e19]428
[431cd4f]429 for(READYQ_SHARD_FACTOR) {
430 unsigned i = proc->rdq.id + (--proc->rdq.itr % READYQ_SHARD_FACTOR);
431 if($thread * t = try_pop(cltr, i)) return t;
432 }
433 return 0p;
[1eb239e4]434 }
435
[431cd4f]436 __attribute__((hot)) struct $thread * pop_slow(struct cluster * cltr) with (cltr->ready_queue) {
437 for(25) {
438 unsigned i = __tls_rand() % lanes.count;
439 $thread * t = try_pop(cltr, i);
440 if(t) return t;
441 }
442
443 return search(cltr);
444 }
445#endif
[1eb239e4]446
[9cc3a18]447//=======================================================================
448// Various Ready Queue utilities
449//=======================================================================
450// these function work the same or almost the same
451// whether they are using work-stealing or relaxed fifo scheduling
[1eb239e4]452
[9cc3a18]453//-----------------------------------------------------------------------
454// try to pop from a lane given by index w
[13c5e19]455static inline struct $thread * try_pop(struct cluster * cltr, unsigned w) with (cltr->ready_queue) {
[dca5802]456 // Get relevant elements locally
457 __intrusive_lane_t & lane = lanes.data[w];
458
[b798713]459 // If list looks empty retry
[dca5802]460 if( is_empty(lane) ) return 0p;
[b798713]461
462 // If we can't get the lock retry
[dca5802]463 if( !__atomic_try_acquire(&lane.lock) ) return 0p;
[b798713]464
465 // If list is empty, unlock and retry
[dca5802]466 if( is_empty(lane) ) {
467 __atomic_unlock(&lane.lock);
[b798713]468 return 0p;
469 }
470
471 // Actually pop the list
[504a7dc]472 struct $thread * thrd;
[343d10e]473 thrd = pop(lane);
[b798713]474
[dca5802]475 /* paranoid */ verify(thrd);
476 /* paranoid */ verify(lane.lock);
[b798713]477
478 // Unlock and return
[dca5802]479 __atomic_unlock(&lane.lock);
[b798713]480
[dca5802]481 // Update statistics
[b798713]482 #if !defined(__CFA_NO_STATISTICS__)
[8834751]483 __tls_stats()->ready.pick.pop.success++;
[b798713]484 #endif
485
[431cd4f]486 #if defined(USE_WORK_STEALING)
487 lanes.tscs[w].tv = thrd->link.ts;
[9cc3a18]488 #endif
[d72c074]489
[dca5802]490 // return the popped thread
[b798713]491 return thrd;
492}
[04b5cef]493
[9cc3a18]494//-----------------------------------------------------------------------
495// try to pop from any lanes making sure you don't miss any threads push
496// before the start of the function
[431cd4f]497static inline struct $thread * search(struct cluster * cltr) with (cltr->ready_queue) {
[9cc3a18]498 /* paranoid */ verify( lanes.count > 0 );
499 unsigned count = __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
500 unsigned offset = __tls_rand();
501 for(i; count) {
502 unsigned idx = (offset + i) % count;
503 struct $thread * thrd = try_pop(cltr, idx);
504 if(thrd) {
505 return thrd;
506 }
[13c5e19]507 }
[9cc3a18]508
509 // All lanes where empty return 0p
510 return 0p;
[b798713]511}
512
513//-----------------------------------------------------------------------
[9cc3a18]514// Check that all the intrusive queues in the data structure are still consistent
[b798713]515static void check( __ready_queue_t & q ) with (q) {
[7a2972b9]516 #if defined(__CFA_WITH_VERIFY__) && !defined(USE_MPSC)
[b798713]517 {
[dca5802]518 for( idx ; lanes.count ) {
519 __intrusive_lane_t & sl = lanes.data[idx];
520 assert(!lanes.data[idx].lock);
[b798713]521
522 assert(head(sl)->link.prev == 0p );
523 assert(head(sl)->link.next->link.prev == head(sl) );
524 assert(tail(sl)->link.next == 0p );
525 assert(tail(sl)->link.prev->link.next == tail(sl) );
526
[7a2972b9]527 if(is_empty(sl)) {
[b798713]528 assert(tail(sl)->link.prev == head(sl));
529 assert(head(sl)->link.next == tail(sl));
[1b143de]530 } else {
531 assert(tail(sl)->link.prev != head(sl));
532 assert(head(sl)->link.next != tail(sl));
[b798713]533 }
534 }
535 }
536 #endif
537}
538
[9cc3a18]539//-----------------------------------------------------------------------
540// Given 2 indexes, pick the list with the oldest push an try to pop from it
541static inline struct $thread * try_pop(struct cluster * cltr, unsigned i, unsigned j) with (cltr->ready_queue) {
542 #if !defined(__CFA_NO_STATISTICS__)
543 __tls_stats()->ready.pick.pop.attempt++;
544 #endif
545
546 // Pick the bet list
547 int w = i;
548 if( __builtin_expect(!is_empty(lanes.data[j]), true) ) {
549 w = (ts(lanes.data[i]) < ts(lanes.data[j])) ? i : j;
550 }
551
552 return try_pop(cltr, w);
553}
554
[b798713]555// Call this function of the intrusive list was moved using memcpy
[dca5802]556// fixes the list so that the pointers back to anchors aren't left dangling
557static inline void fix(__intrusive_lane_t & ll) {
[7a2972b9]558 #if !defined(USE_MPSC)
559 // if the list is not empty then follow he pointer and fix its reverse
560 if(!is_empty(ll)) {
561 head(ll)->link.next->link.prev = head(ll);
562 tail(ll)->link.prev->link.next = tail(ll);
563 }
564 // Otherwise just reset the list
565 else {
566 verify(tail(ll)->link.next == 0p);
567 tail(ll)->link.prev = head(ll);
568 head(ll)->link.next = tail(ll);
569 verify(head(ll)->link.prev == 0p);
570 }
571 #endif
[b798713]572}
573
[9cc3a18]574static void assign_list(unsigned & value, dlist(processor, processor) & list, unsigned count) {
[a017ee7]575 processor * it = &list`first;
576 for(unsigned i = 0; i < count; i++) {
577 /* paranoid */ verifyf( it, "Unexpected null iterator, at index %u of %u\n", i, count);
[431cd4f]578 it->rdq.id = value;
579 it->rdq.target = -1u;
[9cc3a18]580 value += READYQ_SHARD_FACTOR;
[a017ee7]581 it = &(*it)`next;
582 }
583}
584
[9cc3a18]585static void reassign_cltr_id(struct cluster * cltr) {
[a017ee7]586 unsigned preferred = 0;
[9cc3a18]587 assign_list(preferred, cltr->procs.actives, cltr->procs.total - cltr->procs.idle);
588 assign_list(preferred, cltr->procs.idles , cltr->procs.idle );
[a017ee7]589}
590
[431cd4f]591static void fix_times( struct cluster * cltr ) with( cltr->ready_queue ) {
592 #if defined(USE_WORK_STEALING)
593 lanes.tscs = alloc(lanes.count, lanes.tscs`realloc);
594 for(i; lanes.count) {
595 lanes.tscs[i].tv = ts(lanes.data[i]);
596 }
597 #endif
598}
599
[dca5802]600// Grow the ready queue
[a017ee7]601void ready_queue_grow(struct cluster * cltr) {
[bd0bdd37]602 size_t ncount;
[a017ee7]603 int target = cltr->procs.total;
[bd0bdd37]604
[64a7146]605 /* paranoid */ verify( ready_mutate_islocked() );
[504a7dc]606 __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue\n");
[b798713]607
[dca5802]608 // Make sure that everything is consistent
609 /* paranoid */ check( cltr->ready_queue );
610
611 // grow the ready queue
[b798713]612 with( cltr->ready_queue ) {
[39fc03e]613 // Find new count
614 // Make sure we always have atleast 1 list
[bd0bdd37]615 if(target >= 2) {
[9cc3a18]616 ncount = target * READYQ_SHARD_FACTOR;
[bd0bdd37]617 } else {
[5f6a172]618 ncount = SEQUENTIAL_SHARD;
[bd0bdd37]619 }
[b798713]620
[dca5802]621 // Allocate new array (uses realloc and memcpies the data)
[ceb7db8]622 lanes.data = alloc( ncount, lanes.data`realloc );
[b798713]623
624 // Fix the moved data
[dca5802]625 for( idx; (size_t)lanes.count ) {
626 fix(lanes.data[idx]);
[b798713]627 }
628
629 // Construct new data
[dca5802]630 for( idx; (size_t)lanes.count ~ ncount) {
631 (lanes.data[idx]){};
[b798713]632 }
633
634 // Update original
[dca5802]635 lanes.count = ncount;
[b798713]636 }
637
[9cc3a18]638 fix_times(cltr);
639
640 reassign_cltr_id(cltr);
[a017ee7]641
[b798713]642 // Make sure that everything is consistent
[dca5802]643 /* paranoid */ check( cltr->ready_queue );
644
[504a7dc]645 __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue done\n");
[dca5802]646
[64a7146]647 /* paranoid */ verify( ready_mutate_islocked() );
[b798713]648}
649
[dca5802]650// Shrink the ready queue
[a017ee7]651void ready_queue_shrink(struct cluster * cltr) {
[64a7146]652 /* paranoid */ verify( ready_mutate_islocked() );
[504a7dc]653 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue\n");
[dca5802]654
655 // Make sure that everything is consistent
656 /* paranoid */ check( cltr->ready_queue );
657
[a017ee7]658 int target = cltr->procs.total;
659
[b798713]660 with( cltr->ready_queue ) {
[39fc03e]661 // Remember old count
[dca5802]662 size_t ocount = lanes.count;
[b798713]663
[39fc03e]664 // Find new count
665 // Make sure we always have atleast 1 list
[5f6a172]666 lanes.count = target >= 2 ? target * READYQ_SHARD_FACTOR: SEQUENTIAL_SHARD;
[39fc03e]667 /* paranoid */ verify( ocount >= lanes.count );
[9cc3a18]668 /* paranoid */ verify( lanes.count == target * READYQ_SHARD_FACTOR || target < 2 );
[dca5802]669
670 // for printing count the number of displaced threads
[504a7dc]671 #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__)
[dca5802]672 __attribute__((unused)) size_t displaced = 0;
673 #endif
[b798713]674
675 // redistribute old data
[dca5802]676 for( idx; (size_t)lanes.count ~ ocount) {
677 // Lock is not strictly needed but makes checking invariants much easier
[1b143de]678 __attribute__((unused)) bool locked = __atomic_try_acquire(&lanes.data[idx].lock);
[b798713]679 verify(locked);
[dca5802]680
681 // As long as we can pop from this lane to push the threads somewhere else in the queue
682 while(!is_empty(lanes.data[idx])) {
[504a7dc]683 struct $thread * thrd;
[343d10e]684 thrd = pop(lanes.data[idx]);
[dca5802]685
[b798713]686 push(cltr, thrd);
[dca5802]687
688 // for printing count the number of displaced threads
[504a7dc]689 #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__)
[dca5802]690 displaced++;
691 #endif
[b798713]692 }
693
[dca5802]694 // Unlock the lane
695 __atomic_unlock(&lanes.data[idx].lock);
[b798713]696
697 // TODO print the queue statistics here
698
[dca5802]699 ^(lanes.data[idx]){};
[b798713]700 }
701
[504a7dc]702 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue displaced %zu threads\n", displaced);
[c84b4be]703
[dca5802]704 // Allocate new array (uses realloc and memcpies the data)
[ceb7db8]705 lanes.data = alloc( lanes.count, lanes.data`realloc );
[b798713]706
707 // Fix the moved data
[dca5802]708 for( idx; (size_t)lanes.count ) {
709 fix(lanes.data[idx]);
[b798713]710 }
711 }
712
[9cc3a18]713 fix_times(cltr);
714
715 reassign_cltr_id(cltr);
[a017ee7]716
[b798713]717 // Make sure that everything is consistent
[dca5802]718 /* paranoid */ check( cltr->ready_queue );
719
[504a7dc]720 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue done\n");
[64a7146]721 /* paranoid */ verify( ready_mutate_islocked() );
[fd9b524]722}
Note: See TracBrowser for help on using the repository browser.