source: libcfa/src/concurrency/ready_queue.cfa@ a0e7d3c

ADT arm-eh ast-experimental enum forall-pointer-decay jacob/cs343-translation new-ast-unique-expr pthread-emulation qualifiedEnum
Last change on this file since a0e7d3c was e0d6748, checked in by Thierry Delisle <tdelisle@…>, 4 years ago

Removed unused functions

  • Property mode set to 100644
File size: 18.2 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2019 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// ready_queue.cfa --
8//
9// Author : Thierry Delisle
10// Created On : Mon Nov dd 16:29:18 2019
11// Last Modified By :
12// Last Modified On :
13// Update Count :
14//
15
16#define __cforall_thread__
17// #define __CFA_DEBUG_PRINT_READY_QUEUE__
18
19// #define USE_MPSC
20
21#include "bits/defs.hfa"
22#include "kernel_private.hfa"
23
24#define _GNU_SOURCE
25#include "stdlib.hfa"
26#include "math.hfa"
27
28#include <unistd.h>
29
30#include "ready_subqueue.hfa"
31
32static const size_t cache_line_size = 64;
33
34// No overriden function, no environment variable, no define
35// fall back to a magic number
36#ifndef __CFA_MAX_PROCESSORS__
37 #define __CFA_MAX_PROCESSORS__ 1024
38#endif
39
40#define BIAS 4
41
42// returns the maximum number of processors the RWLock support
43__attribute__((weak)) unsigned __max_processors() {
44 const char * max_cores_s = getenv("CFA_MAX_PROCESSORS");
45 if(!max_cores_s) {
46 __cfadbg_print_nolock(ready_queue, "No CFA_MAX_PROCESSORS in ENV\n");
47 return __CFA_MAX_PROCESSORS__;
48 }
49
50 char * endptr = 0p;
51 long int max_cores_l = strtol(max_cores_s, &endptr, 10);
52 if(max_cores_l < 1 || max_cores_l > 65535) {
53 __cfadbg_print_nolock(ready_queue, "CFA_MAX_PROCESSORS out of range : %ld\n", max_cores_l);
54 return __CFA_MAX_PROCESSORS__;
55 }
56 if('\0' != *endptr) {
57 __cfadbg_print_nolock(ready_queue, "CFA_MAX_PROCESSORS not a decimal number : %s\n", max_cores_s);
58 return __CFA_MAX_PROCESSORS__;
59 }
60
61 return max_cores_l;
62}
63
64//=======================================================================
65// Cluster wide reader-writer lock
66//=======================================================================
67void ?{}(__scheduler_RWLock_t & this) {
68 this.max = __max_processors();
69 this.alloc = 0;
70 this.ready = 0;
71 this.lock = false;
72 this.data = alloc(this.max);
73
74 /*paranoid*/ verify( 0 == (((uintptr_t)(this.data )) % 64) );
75 /*paranoid*/ verify( 0 == (((uintptr_t)(this.data + 1)) % 64) );
76 /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.alloc), &this.alloc));
77 /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.ready), &this.ready));
78
79}
80void ^?{}(__scheduler_RWLock_t & this) {
81 free(this.data);
82}
83
84void ?{}( __scheduler_lock_id_t & this, __processor_id_t * proc ) {
85 this.handle = proc;
86 this.lock = false;
87 #ifdef __CFA_WITH_VERIFY__
88 this.owned = false;
89 #endif
90}
91
92//=======================================================================
93// Lock-Free registering/unregistering of threads
94void register_proc_id( struct __processor_id_t * proc ) with(*__scheduler_lock) {
95 __cfadbg_print_safe(ready_queue, "Kernel : Registering proc %p for RW-Lock\n", proc);
96
97 // Step - 1 : check if there is already space in the data
98 uint_fast32_t s = ready;
99
100 // Check among all the ready
101 for(uint_fast32_t i = 0; i < s; i++) {
102 __processor_id_t * null = 0p; // Re-write every loop since compare thrashes it
103 if( __atomic_load_n(&data[i].handle, (int)__ATOMIC_RELAXED) == null
104 && __atomic_compare_exchange_n( &data[i].handle, &null, proc, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
105 /*paranoid*/ verify(i < ready);
106 /*paranoid*/ verify(0 == (__alignof__(data[i]) % cache_line_size));
107 /*paranoid*/ verify((((uintptr_t)&data[i]) % cache_line_size) == 0);
108 proc->id = i;
109 }
110 }
111
112 if(max <= alloc) abort("Trying to create more than %ud processors", __scheduler_lock->max);
113
114 // Step - 2 : F&A to get a new spot in the array.
115 uint_fast32_t n = __atomic_fetch_add(&alloc, 1, __ATOMIC_SEQ_CST);
116 if(max <= n) abort("Trying to create more than %ud processors", __scheduler_lock->max);
117
118 // Step - 3 : Mark space as used and then publish it.
119 __scheduler_lock_id_t * storage = (__scheduler_lock_id_t *)&data[n];
120 (*storage){ proc };
121 while() {
122 unsigned copy = n;
123 if( __atomic_load_n(&ready, __ATOMIC_RELAXED) == n
124 && __atomic_compare_exchange_n(&ready, &copy, n + 1, true, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
125 break;
126 Pause();
127 }
128
129 __cfadbg_print_safe(ready_queue, "Kernel : Registering proc %p done, id %lu\n", proc, n);
130
131 // Return new spot.
132 /*paranoid*/ verify(n < ready);
133 /*paranoid*/ verify(__alignof__(data[n]) == (2 * cache_line_size));
134 /*paranoid*/ verify((((uintptr_t)&data[n]) % cache_line_size) == 0);
135 proc->id = n;
136}
137
138void unregister_proc_id( struct __processor_id_t * proc ) with(*__scheduler_lock) {
139 unsigned id = proc->id;
140 /*paranoid*/ verify(id < ready);
141 /*paranoid*/ verify(proc == __atomic_load_n(&data[id].handle, __ATOMIC_RELAXED));
142 __atomic_store_n(&data[id].handle, 0p, __ATOMIC_RELEASE);
143
144 __cfadbg_print_safe(ready_queue, "Kernel : Unregister proc %p\n", proc);
145}
146
147//-----------------------------------------------------------------------
148// Writer side : acquire when changing the ready queue, e.g. adding more
149// queues or removing them.
150uint_fast32_t ready_mutate_lock( void ) with(*__scheduler_lock) {
151 /* paranoid */ verify( ! __preemption_enabled() );
152
153 // Step 1 : lock global lock
154 // It is needed to avoid processors that register mid Critical-Section
155 // to simply lock their own lock and enter.
156 __atomic_acquire( &lock );
157
158 // Step 2 : lock per-proc lock
159 // Processors that are currently being registered aren't counted
160 // but can't be in read_lock or in the critical section.
161 // All other processors are counted
162 uint_fast32_t s = ready;
163 for(uint_fast32_t i = 0; i < s; i++) {
164 __atomic_acquire( &data[i].lock );
165 }
166
167 /* paranoid */ verify( ! __preemption_enabled() );
168 return s;
169}
170
171void ready_mutate_unlock( uint_fast32_t last_s ) with(*__scheduler_lock) {
172 /* paranoid */ verify( ! __preemption_enabled() );
173
174 // Step 1 : release local locks
175 // This must be done while the global lock is held to avoid
176 // threads that where created mid critical section
177 // to race to lock their local locks and have the writer
178 // immidiately unlock them
179 // Alternative solution : return s in write_lock and pass it to write_unlock
180 for(uint_fast32_t i = 0; i < last_s; i++) {
181 verify(data[i].lock);
182 __atomic_store_n(&data[i].lock, (bool)false, __ATOMIC_RELEASE);
183 }
184
185 // Step 2 : release global lock
186 /*paranoid*/ assert(true == lock);
187 __atomic_store_n(&lock, (bool)false, __ATOMIC_RELEASE);
188
189 /* paranoid */ verify( ! __preemption_enabled() );
190}
191
192//=======================================================================
193// Cforall Reqdy Queue used for scheduling
194//=======================================================================
195void ?{}(__ready_queue_t & this) with (this) {
196 lanes.data = 0p;
197 lanes.count = 0;
198}
199
200void ^?{}(__ready_queue_t & this) with (this) {
201 verify( 1 == lanes.count );
202 free(lanes.data);
203}
204
205//-----------------------------------------------------------------------
206static inline [unsigned, bool] idx_from_r(unsigned r, unsigned preferred) {
207 unsigned i;
208 bool local;
209 #if defined(BIAS)
210 unsigned rlow = r % BIAS;
211 unsigned rhigh = r / BIAS;
212 if((0 != rlow) && preferred >= 0) {
213 // (BIAS - 1) out of BIAS chances
214 // Use perferred queues
215 i = preferred + (rhigh % 4);
216 local = true;
217 }
218 else {
219 // 1 out of BIAS chances
220 // Use all queues
221 i = rhigh;
222 local = false;
223 }
224 #else
225 i = r;
226 local = false;
227 #endif
228 return [i, local];
229}
230
231//-----------------------------------------------------------------------
232__attribute__((hot)) bool push(struct cluster * cltr, struct $thread * thrd) with (cltr->ready_queue) {
233 __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr);
234
235 const bool external = (!kernelTLS().this_processor) || (cltr != kernelTLS().this_processor->cltr);
236
237 // write timestamp
238 thrd->link.ts = rdtscl();
239
240 bool first = false;
241 __attribute__((unused)) bool local;
242 __attribute__((unused)) int preferred;
243 #if defined(BIAS)
244 /* paranoid */ verify(external || kernelTLS().this_processor->cltr_id < lanes.count );
245 preferred =
246 //*
247 external ? -1 : kernelTLS().this_processor->cltr_id;
248 /*/
249 thrd->link.preferred * 4;
250 //*/
251 #endif
252
253 // Try to pick a lane and lock it
254 unsigned i;
255 do {
256 // Pick the index of a lane
257 // unsigned r = __tls_rand();
258 unsigned r = __tls_rand_fwd();
259 [i, local] = idx_from_r(r, preferred);
260
261 i %= __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
262
263 #if !defined(__CFA_NO_STATISTICS__)
264 if(external) {
265 if(local) __atomic_fetch_add(&cltr->stats->ready.pick.ext.local, 1, __ATOMIC_RELAXED);
266 __atomic_fetch_add(&cltr->stats->ready.pick.ext.attempt, 1, __ATOMIC_RELAXED);
267 }
268 else {
269 if(local) __tls_stats()->ready.pick.push.local++;
270 __tls_stats()->ready.pick.push.attempt++;
271 }
272 #endif
273
274 #if defined(USE_MPSC)
275 // mpsc always succeeds
276 } while( false );
277 #else
278 // If we can't lock it retry
279 } while( !__atomic_try_acquire( &lanes.data[i].lock ) );
280 #endif
281
282 // Actually push it
283 push(lanes.data[i], thrd);
284
285 #if !defined(USE_MPSC)
286 // Unlock and return
287 __atomic_unlock( &lanes.data[i].lock );
288 #endif
289
290 // Mark the current index in the tls rng instance as having an item
291 __tls_rand_advance_bck();
292
293 __cfadbg_print_safe(ready_queue, "Kernel : Pushed %p on cluster %p (idx: %u, mask %llu, first %d)\n", thrd, cltr, i, used.mask[0], lane_first);
294
295 // Update statistics
296 #if !defined(__CFA_NO_STATISTICS__)
297 if(external) {
298 if(local) __atomic_fetch_add(&cltr->stats->ready.pick.ext.lsuccess, 1, __ATOMIC_RELAXED);
299 __atomic_fetch_add(&cltr->stats->ready.pick.ext.success, 1, __ATOMIC_RELAXED);
300 }
301 else {
302 if(local) __tls_stats()->ready.pick.push.lsuccess++;
303 __tls_stats()->ready.pick.push.success++;
304 }
305 #endif
306
307 // return whether or not the list was empty before this push
308 return first;
309}
310
311static struct $thread * try_pop(struct cluster * cltr, unsigned i, unsigned j);
312static struct $thread * try_pop(struct cluster * cltr, unsigned i);
313
314// Pop from the ready queue from a given cluster
315__attribute__((hot)) $thread * pop(struct cluster * cltr) with (cltr->ready_queue) {
316 /* paranoid */ verify( lanes.count > 0 );
317 unsigned count = __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
318 int preferred;
319 #if defined(BIAS)
320 /* paranoid */ verify(kernelTLS().this_processor->cltr_id < lanes.count );
321 preferred = kernelTLS().this_processor->cltr_id;
322 #endif
323
324
325 // As long as the list is not empty, try finding a lane that isn't empty and pop from it
326 for(25) {
327 // Pick two lists at random
328 // unsigned ri = __tls_rand();
329 // unsigned rj = __tls_rand();
330 unsigned ri = __tls_rand_bck();
331 unsigned rj = __tls_rand_bck();
332
333 unsigned i, j;
334 __attribute__((unused)) bool locali, localj;
335 [i, locali] = idx_from_r(ri, preferred);
336 [j, localj] = idx_from_r(rj, preferred);
337
338 #if !defined(__CFA_NO_STATISTICS__)
339 if(locali && localj) {
340 __tls_stats()->ready.pick.pop.local++;
341 }
342 #endif
343
344 i %= count;
345 j %= count;
346
347 // try popping from the 2 picked lists
348 struct $thread * thrd = try_pop(cltr, i, j);
349 if(thrd) {
350 #if defined(BIAS) && !defined(__CFA_NO_STATISTICS__)
351 if( locali || localj ) __tls_stats()->ready.pick.pop.lsuccess++;
352 #endif
353 return thrd;
354 }
355 }
356
357 // All lanes where empty return 0p
358 return 0p;
359}
360
361__attribute__((hot)) struct $thread * pop_slow(struct cluster * cltr) with (cltr->ready_queue) {
362 /* paranoid */ verify( lanes.count > 0 );
363 unsigned count = __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
364 unsigned offset = __tls_rand();
365 for(i; count) {
366 unsigned idx = (offset + i) % count;
367 struct $thread * thrd = try_pop(cltr, idx);
368 if(thrd) {
369 return thrd;
370 }
371 }
372
373 // All lanes where empty return 0p
374 return 0p;
375}
376
377
378//-----------------------------------------------------------------------
379// Given 2 indexes, pick the list with the oldest push an try to pop from it
380static inline struct $thread * try_pop(struct cluster * cltr, unsigned i, unsigned j) with (cltr->ready_queue) {
381 #if !defined(__CFA_NO_STATISTICS__)
382 __tls_stats()->ready.pick.pop.attempt++;
383 #endif
384
385 // Pick the bet list
386 int w = i;
387 if( __builtin_expect(!is_empty(lanes.data[j]), true) ) {
388 w = (ts(lanes.data[i]) < ts(lanes.data[j])) ? i : j;
389 }
390
391 return try_pop(cltr, w);
392}
393
394static inline struct $thread * try_pop(struct cluster * cltr, unsigned w) with (cltr->ready_queue) {
395 // Get relevant elements locally
396 __intrusive_lane_t & lane = lanes.data[w];
397
398 // If list looks empty retry
399 if( is_empty(lane) ) return 0p;
400
401 // If we can't get the lock retry
402 if( !__atomic_try_acquire(&lane.lock) ) return 0p;
403
404
405 // If list is empty, unlock and retry
406 if( is_empty(lane) ) {
407 __atomic_unlock(&lane.lock);
408 return 0p;
409 }
410
411 // Actually pop the list
412 struct $thread * thrd;
413 thrd = pop(lane);
414
415 /* paranoid */ verify(thrd);
416 /* paranoid */ verify(lane.lock);
417
418 // Unlock and return
419 __atomic_unlock(&lane.lock);
420
421 // Update statistics
422 #if !defined(__CFA_NO_STATISTICS__)
423 __tls_stats()->ready.pick.pop.success++;
424 #endif
425
426 // Update the thread bias
427 thrd->link.preferred = w / 4;
428
429 // return the popped thread
430 return thrd;
431}
432//-----------------------------------------------------------------------
433
434bool remove_head(struct cluster * cltr, struct $thread * thrd) with (cltr->ready_queue) {
435 for(i; lanes.count) {
436 __intrusive_lane_t & lane = lanes.data[i];
437
438 bool removed = false;
439
440 __atomic_acquire(&lane.lock);
441 if(head(lane)->link.next == thrd) {
442 $thread * pthrd;
443 pthrd = pop(lane);
444
445 /* paranoid */ verify( pthrd == thrd );
446
447 removed = true;
448 }
449 __atomic_unlock(&lane.lock);
450
451 if( removed ) return true;
452 }
453 return false;
454}
455
456//-----------------------------------------------------------------------
457
458static void check( __ready_queue_t & q ) with (q) {
459 #if defined(__CFA_WITH_VERIFY__) && !defined(USE_MPSC)
460 {
461 for( idx ; lanes.count ) {
462 __intrusive_lane_t & sl = lanes.data[idx];
463 assert(!lanes.data[idx].lock);
464
465 assert(head(sl)->link.prev == 0p );
466 assert(head(sl)->link.next->link.prev == head(sl) );
467 assert(tail(sl)->link.next == 0p );
468 assert(tail(sl)->link.prev->link.next == tail(sl) );
469
470 if(is_empty(sl)) {
471 assert(tail(sl)->link.prev == head(sl));
472 assert(head(sl)->link.next == tail(sl));
473 } else {
474 assert(tail(sl)->link.prev != head(sl));
475 assert(head(sl)->link.next != tail(sl));
476 }
477 }
478 }
479 #endif
480}
481
482// Call this function of the intrusive list was moved using memcpy
483// fixes the list so that the pointers back to anchors aren't left dangling
484static inline void fix(__intrusive_lane_t & ll) {
485 #if !defined(USE_MPSC)
486 // if the list is not empty then follow he pointer and fix its reverse
487 if(!is_empty(ll)) {
488 head(ll)->link.next->link.prev = head(ll);
489 tail(ll)->link.prev->link.next = tail(ll);
490 }
491 // Otherwise just reset the list
492 else {
493 verify(tail(ll)->link.next == 0p);
494 tail(ll)->link.prev = head(ll);
495 head(ll)->link.next = tail(ll);
496 verify(head(ll)->link.prev == 0p);
497 }
498 #endif
499}
500
501static void assign_list(unsigned & value, const int inc, dlist(processor, processor) & list, unsigned count) {
502 processor * it = &list`first;
503 for(unsigned i = 0; i < count; i++) {
504 /* paranoid */ verifyf( it, "Unexpected null iterator, at index %u of %u\n", i, count);
505 it->cltr_id = value;
506 value += inc;
507 it = &(*it)`next;
508 }
509}
510
511static void reassign_cltr_id(struct cluster * cltr, const int inc) {
512 unsigned preferred = 0;
513 assign_list(preferred, inc, cltr->procs.actives, cltr->procs.total - cltr->procs.idle);
514 assign_list(preferred, inc, cltr->procs.idles , cltr->procs.idle );
515}
516
517// Grow the ready queue
518void ready_queue_grow(struct cluster * cltr) {
519 size_t ncount;
520 int target = cltr->procs.total;
521
522 /* paranoid */ verify( ready_mutate_islocked() );
523 __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue\n");
524
525 // Make sure that everything is consistent
526 /* paranoid */ check( cltr->ready_queue );
527
528 // grow the ready queue
529 with( cltr->ready_queue ) {
530 // Find new count
531 // Make sure we always have atleast 1 list
532 if(target >= 2) {
533 ncount = target * 4;
534 } else {
535 ncount = 1;
536 }
537
538 // Allocate new array (uses realloc and memcpies the data)
539 lanes.data = alloc( ncount, lanes.data`realloc );
540
541 // Fix the moved data
542 for( idx; (size_t)lanes.count ) {
543 fix(lanes.data[idx]);
544 }
545
546 // Construct new data
547 for( idx; (size_t)lanes.count ~ ncount) {
548 (lanes.data[idx]){};
549 }
550
551 // Update original
552 lanes.count = ncount;
553 }
554
555 reassign_cltr_id(cltr, 4);
556
557 // Make sure that everything is consistent
558 /* paranoid */ check( cltr->ready_queue );
559
560 __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue done\n");
561
562 /* paranoid */ verify( ready_mutate_islocked() );
563}
564
565// Shrink the ready queue
566void ready_queue_shrink(struct cluster * cltr) {
567 /* paranoid */ verify( ready_mutate_islocked() );
568 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue\n");
569
570 // Make sure that everything is consistent
571 /* paranoid */ check( cltr->ready_queue );
572
573 int target = cltr->procs.total;
574
575 with( cltr->ready_queue ) {
576 // Remember old count
577 size_t ocount = lanes.count;
578
579 // Find new count
580 // Make sure we always have atleast 1 list
581 lanes.count = target >= 2 ? target * 4: 1;
582 /* paranoid */ verify( ocount >= lanes.count );
583 /* paranoid */ verify( lanes.count == target * 4 || target < 2 );
584
585 // for printing count the number of displaced threads
586 #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__)
587 __attribute__((unused)) size_t displaced = 0;
588 #endif
589
590 // redistribute old data
591 for( idx; (size_t)lanes.count ~ ocount) {
592 // Lock is not strictly needed but makes checking invariants much easier
593 __attribute__((unused)) bool locked = __atomic_try_acquire(&lanes.data[idx].lock);
594 verify(locked);
595
596 // As long as we can pop from this lane to push the threads somewhere else in the queue
597 while(!is_empty(lanes.data[idx])) {
598 struct $thread * thrd;
599 thrd = pop(lanes.data[idx]);
600
601 push(cltr, thrd);
602
603 // for printing count the number of displaced threads
604 #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__)
605 displaced++;
606 #endif
607 }
608
609 // Unlock the lane
610 __atomic_unlock(&lanes.data[idx].lock);
611
612 // TODO print the queue statistics here
613
614 ^(lanes.data[idx]){};
615 }
616
617 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue displaced %zu threads\n", displaced);
618
619 // Allocate new array (uses realloc and memcpies the data)
620 lanes.data = alloc( lanes.count, lanes.data`realloc );
621
622 // Fix the moved data
623 for( idx; (size_t)lanes.count ) {
624 fix(lanes.data[idx]);
625 }
626 }
627
628 reassign_cltr_id(cltr, 4);
629
630 // Make sure that everything is consistent
631 /* paranoid */ check( cltr->ready_queue );
632
633 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue done\n");
634 /* paranoid */ verify( ready_mutate_islocked() );
635}
Note: See TracBrowser for help on using the repository browser.