source: libcfa/src/concurrency/ready_queue.cfa@ fd1f65e

ADT arm-eh ast-experimental enum forall-pointer-decay jacob/cs343-translation new-ast-unique-expr pthread-emulation qualifiedEnum
Last change on this file since fd1f65e was fd1f65e, checked in by Thierry Delisle <tdelisle@…>, 5 years ago

Stats now keep track of external pushes separately

  • Property mode set to 100644
File size: 18.5 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2019 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// ready_queue.cfa --
8//
9// Author : Thierry Delisle
10// Created On : Mon Nov dd 16:29:18 2019
11// Last Modified By :
12// Last Modified On :
13// Update Count :
14//
15
16#define __cforall_thread__
17// #define __CFA_DEBUG_PRINT_READY_QUEUE__
18
19// #define USE_SNZI
20
21#include "bits/defs.hfa"
22#include "kernel_private.hfa"
23
24#define _GNU_SOURCE
25#include "stdlib.hfa"
26#include "math.hfa"
27
28#include <unistd.h>
29
30#include "snzi.hfa"
31#include "ready_subqueue.hfa"
32
33static const size_t cache_line_size = 64;
34
35// No overriden function, no environment variable, no define
36// fall back to a magic number
37#ifndef __CFA_MAX_PROCESSORS__
38 #define __CFA_MAX_PROCESSORS__ 1024
39#endif
40
41#define BIAS 4
42
43// returns the maximum number of processors the RWLock support
44__attribute__((weak)) unsigned __max_processors() {
45 const char * max_cores_s = getenv("CFA_MAX_PROCESSORS");
46 if(!max_cores_s) {
47 __cfadbg_print_nolock(ready_queue, "No CFA_MAX_PROCESSORS in ENV\n");
48 return __CFA_MAX_PROCESSORS__;
49 }
50
51 char * endptr = 0p;
52 long int max_cores_l = strtol(max_cores_s, &endptr, 10);
53 if(max_cores_l < 1 || max_cores_l > 65535) {
54 __cfadbg_print_nolock(ready_queue, "CFA_MAX_PROCESSORS out of range : %ld\n", max_cores_l);
55 return __CFA_MAX_PROCESSORS__;
56 }
57 if('\0' != *endptr) {
58 __cfadbg_print_nolock(ready_queue, "CFA_MAX_PROCESSORS not a decimal number : %s\n", max_cores_s);
59 return __CFA_MAX_PROCESSORS__;
60 }
61
62 return max_cores_l;
63}
64
65//=======================================================================
66// Cluster wide reader-writer lock
67//=======================================================================
68void ?{}(__scheduler_RWLock_t & this) {
69 this.max = __max_processors();
70 this.alloc = 0;
71 this.ready = 0;
72 this.lock = false;
73 this.data = alloc(this.max);
74
75 /*paranoid*/ verify( 0 == (((uintptr_t)(this.data )) % 64) );
76 /*paranoid*/ verify( 0 == (((uintptr_t)(this.data + 1)) % 64) );
77 /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.alloc), &this.alloc));
78 /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.ready), &this.ready));
79
80}
81void ^?{}(__scheduler_RWLock_t & this) {
82 free(this.data);
83}
84
85void ?{}( __scheduler_lock_id_t & this, __processor_id_t * proc ) {
86 this.handle = proc;
87 this.lock = false;
88 #ifdef __CFA_WITH_VERIFY__
89 this.owned = false;
90 #endif
91}
92
93//=======================================================================
94// Lock-Free registering/unregistering of threads
95unsigned doregister( struct __processor_id_t * proc ) with(*__scheduler_lock) {
96 __cfadbg_print_safe(ready_queue, "Kernel : Registering proc %p for RW-Lock\n", proc);
97
98 // Step - 1 : check if there is already space in the data
99 uint_fast32_t s = ready;
100
101 // Check among all the ready
102 for(uint_fast32_t i = 0; i < s; i++) {
103 __processor_id_t * null = 0p; // Re-write every loop since compare thrashes it
104 if( __atomic_load_n(&data[i].handle, (int)__ATOMIC_RELAXED) == null
105 && __atomic_compare_exchange_n( &data[i].handle, &null, proc, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
106 /*paranoid*/ verify(i < ready);
107 /*paranoid*/ verify(0 == (__alignof__(data[i]) % cache_line_size));
108 /*paranoid*/ verify((((uintptr_t)&data[i]) % cache_line_size) == 0);
109 return i;
110 }
111 }
112
113 if(max <= alloc) abort("Trying to create more than %ud processors", __scheduler_lock->max);
114
115 // Step - 2 : F&A to get a new spot in the array.
116 uint_fast32_t n = __atomic_fetch_add(&alloc, 1, __ATOMIC_SEQ_CST);
117 if(max <= n) abort("Trying to create more than %ud processors", __scheduler_lock->max);
118
119 // Step - 3 : Mark space as used and then publish it.
120 __scheduler_lock_id_t * storage = (__scheduler_lock_id_t *)&data[n];
121 (*storage){ proc };
122 while() {
123 unsigned copy = n;
124 if( __atomic_load_n(&ready, __ATOMIC_RELAXED) == n
125 && __atomic_compare_exchange_n(&ready, &copy, n + 1, true, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
126 break;
127 Pause();
128 }
129
130 __cfadbg_print_safe(ready_queue, "Kernel : Registering proc %p done, id %lu\n", proc, n);
131
132 // Return new spot.
133 /*paranoid*/ verify(n < ready);
134 /*paranoid*/ verify(__alignof__(data[n]) == (2 * cache_line_size));
135 /*paranoid*/ verify((((uintptr_t)&data[n]) % cache_line_size) == 0);
136 return n;
137}
138
139void unregister( struct __processor_id_t * proc ) with(*__scheduler_lock) {
140 unsigned id = proc->id;
141 /*paranoid*/ verify(id < ready);
142 /*paranoid*/ verify(proc == __atomic_load_n(&data[id].handle, __ATOMIC_RELAXED));
143 __atomic_store_n(&data[id].handle, 0p, __ATOMIC_RELEASE);
144
145 __cfadbg_print_safe(ready_queue, "Kernel : Unregister proc %p\n", proc);
146}
147
148//-----------------------------------------------------------------------
149// Writer side : acquire when changing the ready queue, e.g. adding more
150// queues or removing them.
151uint_fast32_t ready_mutate_lock( void ) with(*__scheduler_lock) {
152 /* paranoid */ verify( ! __preemption_enabled() );
153
154 // Step 1 : lock global lock
155 // It is needed to avoid processors that register mid Critical-Section
156 // to simply lock their own lock and enter.
157 __atomic_acquire( &lock );
158
159 // Step 2 : lock per-proc lock
160 // Processors that are currently being registered aren't counted
161 // but can't be in read_lock or in the critical section.
162 // All other processors are counted
163 uint_fast32_t s = ready;
164 for(uint_fast32_t i = 0; i < s; i++) {
165 __atomic_acquire( &data[i].lock );
166 }
167
168 /* paranoid */ verify( ! __preemption_enabled() );
169 return s;
170}
171
172void ready_mutate_unlock( uint_fast32_t last_s ) with(*__scheduler_lock) {
173 /* paranoid */ verify( ! __preemption_enabled() );
174
175 // Step 1 : release local locks
176 // This must be done while the global lock is held to avoid
177 // threads that where created mid critical section
178 // to race to lock their local locks and have the writer
179 // immidiately unlock them
180 // Alternative solution : return s in write_lock and pass it to write_unlock
181 for(uint_fast32_t i = 0; i < last_s; i++) {
182 verify(data[i].lock);
183 __atomic_store_n(&data[i].lock, (bool)false, __ATOMIC_RELEASE);
184 }
185
186 // Step 2 : release global lock
187 /*paranoid*/ assert(true == lock);
188 __atomic_store_n(&lock, (bool)false, __ATOMIC_RELEASE);
189
190 /* paranoid */ verify( ! __preemption_enabled() );
191}
192
193//=======================================================================
194// Cforall Reqdy Queue used for scheduling
195//=======================================================================
196void ?{}(__ready_queue_t & this) with (this) {
197 lanes.data = 0p;
198 lanes.count = 0;
199}
200
201void ^?{}(__ready_queue_t & this) with (this) {
202 verify( 1 == lanes.count );
203 #ifdef USE_SNZI
204 verify( !query( snzi ) );
205 #endif
206 free(lanes.data);
207}
208
209//-----------------------------------------------------------------------
210__attribute__((hot)) bool query(struct cluster * cltr) {
211 #ifdef USE_SNZI
212 return query(cltr->ready_queue.snzi);
213 #endif
214 return true;
215}
216
217static inline [unsigned, bool] idx_from_r(unsigned r, unsigned preferred) {
218 unsigned i;
219 bool local;
220 #if defined(BIAS)
221 unsigned rlow = r % BIAS;
222 unsigned rhigh = r / BIAS;
223 if((0 != rlow) && preferred >= 0) {
224 // (BIAS - 1) out of BIAS chances
225 // Use perferred queues
226 i = preferred + (rhigh % 4);
227 local = true;
228 }
229 else {
230 // 1 out of BIAS chances
231 // Use all queues
232 i = rhigh;
233 local = false;
234 }
235 #else
236 i = r;
237 local = false;
238 #endif
239 return [i, local];
240}
241
242//-----------------------------------------------------------------------
243__attribute__((hot)) bool push(struct cluster * cltr, struct $thread * thrd) with (cltr->ready_queue) {
244 __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr);
245
246 #if !defined(__CFA_NO_STATISTICS__)
247 const bool external = cltr == kernelTLS().this_processor->cltr;
248 #endif
249
250 // write timestamp
251 thrd->link.ts = rdtscl();
252
253 __attribute__((unused)) bool local;
254 __attribute__((unused)) int preferred;
255 #if defined(BIAS)
256 preferred =
257 //*
258 kernelTLS().this_processor ? kernelTLS().this_processor->cltr_id : -1;
259 /*/
260 thrd->link.preferred * 4;
261 //*/
262 #endif
263
264 // Try to pick a lane and lock it
265 unsigned i;
266 do {
267 // Pick the index of a lane
268 // unsigned r = __tls_rand();
269 unsigned r = __tls_rand_fwd();
270 [i, local] = idx_from_r(r, preferred);
271
272 i %= __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
273
274 #if !defined(__CFA_NO_STATISTICS__)
275 if(external) {
276 if(local) __atomic_fetch_add(&cltr->stats->ready.pick.ext.local, 1, __ATOMIC_RELAXED);
277 __atomic_fetch_add(&cltr->stats->ready.pick.ext.attempt, 1, __ATOMIC_RELAXED);
278 }
279 else {
280 if(local) __tls_stats()->ready.pick.push.local++;
281 __tls_stats()->ready.pick.push.attempt++;
282 }
283 #endif
284
285 // If we can't lock it retry
286 } while( !__atomic_try_acquire( &lanes.data[i].lock ) );
287
288 bool first = false;
289
290 // Actually push it
291 #ifdef USE_SNZI
292 bool lane_first =
293 #endif
294
295 push(lanes.data[i], thrd);
296
297 #ifdef USE_SNZI
298 // If this lane used to be empty we need to do more
299 if(lane_first) {
300 // Check if the entire queue used to be empty
301 first = !query(snzi);
302
303 // Update the snzi
304 arrive( snzi, i );
305 }
306 #endif
307
308 __tls_rand_advance_bck();
309
310 // Unlock and return
311 __atomic_unlock( &lanes.data[i].lock );
312
313 __cfadbg_print_safe(ready_queue, "Kernel : Pushed %p on cluster %p (idx: %u, mask %llu, first %d)\n", thrd, cltr, i, used.mask[0], lane_first);
314
315 // Update statistics
316 #if !defined(__CFA_NO_STATISTICS__)
317 if(external) {
318 if(local) __atomic_fetch_add(&cltr->stats->ready.pick.ext.lsuccess, 1, __ATOMIC_RELAXED);
319 __atomic_fetch_add(&cltr->stats->ready.pick.ext.success, 1, __ATOMIC_RELAXED);
320 }
321 else {
322 if(local) __tls_stats()->ready.pick.push.lsuccess++;
323 __tls_stats()->ready.pick.push.success++;
324 }
325 #endif
326
327 // return whether or not the list was empty before this push
328 return first;
329}
330
331static struct $thread * try_pop(struct cluster * cltr, unsigned i, unsigned j);
332static struct $thread * try_pop(struct cluster * cltr, unsigned i);
333
334// Pop from the ready queue from a given cluster
335__attribute__((hot)) $thread * pop(struct cluster * cltr) with (cltr->ready_queue) {
336 /* paranoid */ verify( lanes.count > 0 );
337 unsigned count = __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
338 int preferred;
339 #if defined(BIAS)
340 // Don't bother trying locally too much
341 preferred = kernelTLS().this_processor->cltr_id;
342 #endif
343
344
345 // As long as the list is not empty, try finding a lane that isn't empty and pop from it
346 #ifdef USE_SNZI
347 while( query(snzi) ) {
348 #else
349 for(25) {
350 #endif
351 // Pick two lists at random
352 // unsigned ri = __tls_rand();
353 // unsigned rj = __tls_rand();
354 unsigned ri = __tls_rand_bck();
355 unsigned rj = __tls_rand_bck();
356
357 unsigned i, j;
358 __attribute__((unused)) bool locali, localj;
359 [i, locali] = idx_from_r(ri, preferred);
360 [j, localj] = idx_from_r(rj, preferred);
361
362 #if !defined(__CFA_NO_STATISTICS__)
363 if(locali && localj) {
364 __tls_stats()->ready.pick.pop.local++;
365 }
366 #endif
367
368 i %= count;
369 j %= count;
370
371 // try popping from the 2 picked lists
372 struct $thread * thrd = try_pop(cltr, i, j);
373 if(thrd) {
374 #if defined(BIAS) && !defined(__CFA_NO_STATISTICS__)
375 if( locali || localj ) __tls_stats()->ready.pick.pop.lsuccess++;
376 #endif
377 return thrd;
378 }
379 }
380
381 // All lanes where empty return 0p
382 return 0p;
383}
384
385__attribute__((hot)) struct $thread * pop_slow(struct cluster * cltr) with (cltr->ready_queue) {
386 /* paranoid */ verify( lanes.count > 0 );
387 unsigned count = __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
388 unsigned offset = __tls_rand();
389 for(i; count) {
390 unsigned idx = (offset + i) % count;
391 struct $thread * thrd = try_pop(cltr, idx);
392 if(thrd) {
393 return thrd;
394 }
395 }
396
397 // All lanes where empty return 0p
398 return 0p;
399}
400
401
402//-----------------------------------------------------------------------
403// Given 2 indexes, pick the list with the oldest push an try to pop from it
404static inline struct $thread * try_pop(struct cluster * cltr, unsigned i, unsigned j) with (cltr->ready_queue) {
405 #if !defined(__CFA_NO_STATISTICS__)
406 __tls_stats()->ready.pick.pop.attempt++;
407 #endif
408
409 // Pick the bet list
410 int w = i;
411 if( __builtin_expect(!is_empty(lanes.data[j]), true) ) {
412 w = (ts(lanes.data[i]) < ts(lanes.data[j])) ? i : j;
413 }
414
415 return try_pop(cltr, w);
416}
417
418static inline struct $thread * try_pop(struct cluster * cltr, unsigned w) with (cltr->ready_queue) {
419 // Get relevant elements locally
420 __intrusive_lane_t & lane = lanes.data[w];
421
422 // If list looks empty retry
423 if( is_empty(lane) ) return 0p;
424
425 // If we can't get the lock retry
426 if( !__atomic_try_acquire(&lane.lock) ) return 0p;
427
428
429 // If list is empty, unlock and retry
430 if( is_empty(lane) ) {
431 __atomic_unlock(&lane.lock);
432 return 0p;
433 }
434
435 // Actually pop the list
436 struct $thread * thrd;
437 thrd = pop(lane);
438
439 /* paranoid */ verify(thrd);
440 /* paranoid */ verify(lane.lock);
441
442 #ifdef USE_SNZI
443 // If this was the last element in the lane
444 if(emptied) {
445 depart( snzi, w );
446 }
447 #endif
448
449 // Unlock and return
450 __atomic_unlock(&lane.lock);
451
452 // Update statistics
453 #if !defined(__CFA_NO_STATISTICS__)
454 __tls_stats()->ready.pick.pop.success++;
455 #endif
456
457 // Update the thread bias
458 thrd->link.preferred = w / 4;
459
460 // return the popped thread
461 return thrd;
462}
463//-----------------------------------------------------------------------
464
465bool remove_head(struct cluster * cltr, struct $thread * thrd) with (cltr->ready_queue) {
466 for(i; lanes.count) {
467 __intrusive_lane_t & lane = lanes.data[i];
468
469 bool removed = false;
470
471 __atomic_acquire(&lane.lock);
472 if(head(lane)->link.next == thrd) {
473 $thread * pthrd;
474 pthrd = pop(lane);
475
476 /* paranoid */ verify( pthrd == thrd );
477
478 removed = true;
479 #ifdef USE_SNZI
480 if(emptied) {
481 depart( snzi, i );
482 }
483 #endif
484 }
485 __atomic_unlock(&lane.lock);
486
487 if( removed ) return true;
488 }
489 return false;
490}
491
492//-----------------------------------------------------------------------
493
494static void check( __ready_queue_t & q ) with (q) {
495 #if defined(__CFA_WITH_VERIFY__)
496 {
497 for( idx ; lanes.count ) {
498 __intrusive_lane_t & sl = lanes.data[idx];
499 assert(!lanes.data[idx].lock);
500
501 assert(head(sl)->link.prev == 0p );
502 assert(head(sl)->link.next->link.prev == head(sl) );
503 assert(tail(sl)->link.next == 0p );
504 assert(tail(sl)->link.prev->link.next == tail(sl) );
505
506 if(sl.before.link.ts == 0l) {
507 assert(tail(sl)->link.prev == head(sl));
508 assert(head(sl)->link.next == tail(sl));
509 } else {
510 assert(tail(sl)->link.prev != head(sl));
511 assert(head(sl)->link.next != tail(sl));
512 }
513 }
514 }
515 #endif
516}
517
518// Call this function of the intrusive list was moved using memcpy
519// fixes the list so that the pointers back to anchors aren't left dangling
520static inline void fix(__intrusive_lane_t & ll) {
521 // if the list is not empty then follow he pointer and fix its reverse
522 if(!is_empty(ll)) {
523 head(ll)->link.next->link.prev = head(ll);
524 tail(ll)->link.prev->link.next = tail(ll);
525 }
526 // Otherwise just reset the list
527 else {
528 verify(tail(ll)->link.next == 0p);
529 tail(ll)->link.prev = head(ll);
530 head(ll)->link.next = tail(ll);
531 verify(head(ll)->link.prev == 0p);
532 }
533}
534
535// Grow the ready queue
536unsigned ready_queue_grow(struct cluster * cltr, int target) {
537 unsigned preferred;
538 size_t ncount;
539
540 /* paranoid */ verify( ready_mutate_islocked() );
541 __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue\n");
542
543 // Make sure that everything is consistent
544 /* paranoid */ check( cltr->ready_queue );
545
546 // grow the ready queue
547 with( cltr->ready_queue ) {
548 #ifdef USE_SNZI
549 ^(snzi){};
550 #endif
551
552 // Find new count
553 // Make sure we always have atleast 1 list
554 if(target >= 2) {
555 ncount = target * 4;
556 preferred = ncount - 4;
557 } else {
558 ncount = 1;
559 preferred = 0;
560 }
561
562 // Allocate new array (uses realloc and memcpies the data)
563 lanes.data = alloc( ncount, lanes.data`realloc );
564
565 // Fix the moved data
566 for( idx; (size_t)lanes.count ) {
567 fix(lanes.data[idx]);
568 }
569
570 // Construct new data
571 for( idx; (size_t)lanes.count ~ ncount) {
572 (lanes.data[idx]){};
573 }
574
575 // Update original
576 lanes.count = ncount;
577
578 #ifdef USE_SNZI
579 // Re-create the snzi
580 snzi{ log2( lanes.count / 8 ) };
581 for( idx; (size_t)lanes.count ) {
582 if( !is_empty(lanes.data[idx]) ) {
583 arrive(snzi, idx);
584 }
585 }
586 #endif
587 }
588
589 // Make sure that everything is consistent
590 /* paranoid */ check( cltr->ready_queue );
591
592 __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue done\n");
593
594 /* paranoid */ verify( ready_mutate_islocked() );
595 return preferred;
596}
597
598// Shrink the ready queue
599void ready_queue_shrink(struct cluster * cltr, int target) {
600 /* paranoid */ verify( ready_mutate_islocked() );
601 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue\n");
602
603 // Make sure that everything is consistent
604 /* paranoid */ check( cltr->ready_queue );
605
606 with( cltr->ready_queue ) {
607 #ifdef USE_SNZI
608 ^(snzi){};
609 #endif
610
611 // Remember old count
612 size_t ocount = lanes.count;
613
614 // Find new count
615 // Make sure we always have atleast 1 list
616 lanes.count = target >= 2 ? target * 4: 1;
617 /* paranoid */ verify( ocount >= lanes.count );
618 /* paranoid */ verify( lanes.count == target * 4 || target < 2 );
619
620 // for printing count the number of displaced threads
621 #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__)
622 __attribute__((unused)) size_t displaced = 0;
623 #endif
624
625 // redistribute old data
626 for( idx; (size_t)lanes.count ~ ocount) {
627 // Lock is not strictly needed but makes checking invariants much easier
628 __attribute__((unused)) bool locked = __atomic_try_acquire(&lanes.data[idx].lock);
629 verify(locked);
630
631 // As long as we can pop from this lane to push the threads somewhere else in the queue
632 while(!is_empty(lanes.data[idx])) {
633 struct $thread * thrd;
634 thrd = pop(lanes.data[idx]);
635
636 push(cltr, thrd);
637
638 // for printing count the number of displaced threads
639 #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__)
640 displaced++;
641 #endif
642 }
643
644 // Unlock the lane
645 __atomic_unlock(&lanes.data[idx].lock);
646
647 // TODO print the queue statistics here
648
649 ^(lanes.data[idx]){};
650 }
651
652 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue displaced %zu threads\n", displaced);
653
654 // Allocate new array (uses realloc and memcpies the data)
655 lanes.data = alloc( lanes.count, lanes.data`realloc );
656
657 // Fix the moved data
658 for( idx; (size_t)lanes.count ) {
659 fix(lanes.data[idx]);
660 }
661
662 #ifdef USE_SNZI
663 // Re-create the snzi
664 snzi{ log2( lanes.count / 8 ) };
665 for( idx; (size_t)lanes.count ) {
666 if( !is_empty(lanes.data[idx]) ) {
667 arrive(snzi, idx);
668 }
669 }
670 #endif
671 }
672
673 // Make sure that everything is consistent
674 /* paranoid */ check( cltr->ready_queue );
675
676 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue done\n");
677 /* paranoid */ verify( ready_mutate_islocked() );
678}
Note: See TracBrowser for help on using the repository browser.