source: libcfa/src/concurrency/ready_queue.cfa@ 53e4562

ADT arm-eh ast-experimental enum forall-pointer-decay jacob/cs343-translation new-ast new-ast-unique-expr pthread-emulation qualifiedEnum
Last change on this file since 53e4562 was d72c074, checked in by Thierry Delisle <tdelisle@…>, 5 years ago

Added option to bias threads to queues, instead of processors to queues

  • Property mode set to 100644
File size: 17.1 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2019 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// ready_queue.cfa --
8//
9// Author : Thierry Delisle
10// Created On : Mon Nov dd 16:29:18 2019
11// Last Modified By :
12// Last Modified On :
13// Update Count :
14//
15
16#define __cforall_thread__
17// #define __CFA_DEBUG_PRINT_READY_QUEUE__
18
19#include "bits/defs.hfa"
20#include "kernel_private.hfa"
21
22#define _GNU_SOURCE
23#include "stdlib.hfa"
24#include "math.hfa"
25
26#include <unistd.h>
27
28#include "snzi.hfa"
29#include "ready_subqueue.hfa"
30
31static const size_t cache_line_size = 64;
32
33// No overriden function, no environment variable, no define
34// fall back to a magic number
35#ifndef __CFA_MAX_PROCESSORS__
36 #define __CFA_MAX_PROCESSORS__ 1024
37#endif
38
39#define BIAS 64
40
41// returns the maximum number of processors the RWLock support
42__attribute__((weak)) unsigned __max_processors() {
43 const char * max_cores_s = getenv("CFA_MAX_PROCESSORS");
44 if(!max_cores_s) {
45 __cfadbg_print_nolock(ready_queue, "No CFA_MAX_PROCESSORS in ENV\n");
46 return __CFA_MAX_PROCESSORS__;
47 }
48
49 char * endptr = 0p;
50 long int max_cores_l = strtol(max_cores_s, &endptr, 10);
51 if(max_cores_l < 1 || max_cores_l > 65535) {
52 __cfadbg_print_nolock(ready_queue, "CFA_MAX_PROCESSORS out of range : %ld\n", max_cores_l);
53 return __CFA_MAX_PROCESSORS__;
54 }
55 if('\0' != *endptr) {
56 __cfadbg_print_nolock(ready_queue, "CFA_MAX_PROCESSORS not a decimal number : %s\n", max_cores_s);
57 return __CFA_MAX_PROCESSORS__;
58 }
59
60 return max_cores_l;
61}
62
63//=======================================================================
64// Cluster wide reader-writer lock
65//=======================================================================
66void ?{}(__scheduler_RWLock_t & this) {
67 this.max = __max_processors();
68 this.alloc = 0;
69 this.ready = 0;
70 this.lock = false;
71 this.data = alloc(this.max);
72
73 /*paranoid*/ verify( 0 == (((uintptr_t)(this.data )) % 64) );
74 /*paranoid*/ verify( 0 == (((uintptr_t)(this.data + 1)) % 64) );
75 /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.alloc), &this.alloc));
76 /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.ready), &this.ready));
77
78}
79void ^?{}(__scheduler_RWLock_t & this) {
80 free(this.data);
81}
82
83void ?{}( __scheduler_lock_id_t & this, __processor_id_t * proc ) {
84 this.handle = proc;
85 this.lock = false;
86 #ifdef __CFA_WITH_VERIFY__
87 this.owned = false;
88 #endif
89}
90
91//=======================================================================
92// Lock-Free registering/unregistering of threads
93unsigned doregister( struct __processor_id_t * proc ) with(*__scheduler_lock) {
94 __cfadbg_print_safe(ready_queue, "Kernel : Registering proc %p for RW-Lock\n", proc);
95
96 // Step - 1 : check if there is already space in the data
97 uint_fast32_t s = ready;
98
99 // Check among all the ready
100 for(uint_fast32_t i = 0; i < s; i++) {
101 __processor_id_t * null = 0p; // Re-write every loop since compare thrashes it
102 if( __atomic_load_n(&data[i].handle, (int)__ATOMIC_RELAXED) == null
103 && __atomic_compare_exchange_n( &data[i].handle, &null, proc, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
104 /*paranoid*/ verify(i < ready);
105 /*paranoid*/ verify(0 == (__alignof__(data[i]) % cache_line_size));
106 /*paranoid*/ verify((((uintptr_t)&data[i]) % cache_line_size) == 0);
107 return i;
108 }
109 }
110
111 if(max <= alloc) abort("Trying to create more than %ud processors", __scheduler_lock->max);
112
113 // Step - 2 : F&A to get a new spot in the array.
114 uint_fast32_t n = __atomic_fetch_add(&alloc, 1, __ATOMIC_SEQ_CST);
115 if(max <= n) abort("Trying to create more than %ud processors", __scheduler_lock->max);
116
117 // Step - 3 : Mark space as used and then publish it.
118 __scheduler_lock_id_t * storage = (__scheduler_lock_id_t *)&data[n];
119 (*storage){ proc };
120 while(true) {
121 unsigned copy = n;
122 if( __atomic_load_n(&ready, __ATOMIC_RELAXED) == n
123 && __atomic_compare_exchange_n(&ready, &copy, n + 1, true, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
124 break;
125 asm volatile("pause");
126 }
127
128 __cfadbg_print_safe(ready_queue, "Kernel : Registering proc %p done, id %lu\n", proc, n);
129
130 // Return new spot.
131 /*paranoid*/ verify(n < ready);
132 /*paranoid*/ verify(__alignof__(data[n]) == (2 * cache_line_size));
133 /*paranoid*/ verify((((uintptr_t)&data[n]) % cache_line_size) == 0);
134 return n;
135}
136
137void unregister( struct __processor_id_t * proc ) with(*__scheduler_lock) {
138 unsigned id = proc->id;
139 /*paranoid*/ verify(id < ready);
140 /*paranoid*/ verify(proc == __atomic_load_n(&data[id].handle, __ATOMIC_RELAXED));
141 __atomic_store_n(&data[id].handle, 0p, __ATOMIC_RELEASE);
142
143 __cfadbg_print_safe(ready_queue, "Kernel : Unregister proc %p\n", proc);
144}
145
146//-----------------------------------------------------------------------
147// Writer side : acquire when changing the ready queue, e.g. adding more
148// queues or removing them.
149uint_fast32_t ready_mutate_lock( void ) with(*__scheduler_lock) {
150 // Step 1 : lock global lock
151 // It is needed to avoid processors that register mid Critical-Section
152 // to simply lock their own lock and enter.
153 __atomic_acquire( &lock );
154
155 // Step 2 : lock per-proc lock
156 // Processors that are currently being registered aren't counted
157 // but can't be in read_lock or in the critical section.
158 // All other processors are counted
159 uint_fast32_t s = ready;
160 for(uint_fast32_t i = 0; i < s; i++) {
161 __atomic_acquire( &data[i].lock );
162 }
163
164 return s;
165}
166
167void ready_mutate_unlock( uint_fast32_t last_s ) with(*__scheduler_lock) {
168 // Step 1 : release local locks
169 // This must be done while the global lock is held to avoid
170 // threads that where created mid critical section
171 // to race to lock their local locks and have the writer
172 // immidiately unlock them
173 // Alternative solution : return s in write_lock and pass it to write_unlock
174 for(uint_fast32_t i = 0; i < last_s; i++) {
175 verify(data[i].lock);
176 __atomic_store_n(&data[i].lock, (bool)false, __ATOMIC_RELEASE);
177 }
178
179 // Step 2 : release global lock
180 /*paranoid*/ assert(true == lock);
181 __atomic_store_n(&lock, (bool)false, __ATOMIC_RELEASE);
182}
183
184//=======================================================================
185// Cforall Reqdy Queue used for scheduling
186//=======================================================================
187void ?{}(__ready_queue_t & this) with (this) {
188
189 lanes.data = alloc(4);
190 for( i; 4 ) {
191 (lanes.data[i]){};
192 }
193 lanes.count = 4;
194 snzi{ log2( lanes.count / 8 ) };
195}
196
197void ^?{}(__ready_queue_t & this) with (this) {
198 verify( 4 == lanes.count );
199 verify( !query( snzi ) );
200
201 ^(snzi){};
202
203 for( i; 4 ) {
204 ^(lanes.data[i]){};
205 }
206 free(lanes.data);
207}
208
209//-----------------------------------------------------------------------
210__attribute__((hot)) bool query(struct cluster * cltr) {
211 return query(cltr->ready_queue.snzi);
212}
213
214//-----------------------------------------------------------------------
215__attribute__((hot)) bool push(struct cluster * cltr, struct $thread * thrd) with (cltr->ready_queue) {
216 __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr);
217
218 // write timestamp
219 thrd->link.ts = rdtscl();
220
221 #if defined(BIAS) && !defined(__CFA_NO_STATISTICS__)
222 bool local = false;
223 int preferred =
224 //*
225 kernelTLS.this_processor ? kernelTLS.this_processor->id * 4 : -1;
226 /*/
227 thrd->link.preferred * 4;
228 //*/
229
230
231 #endif
232
233 // Try to pick a lane and lock it
234 unsigned i;
235 do {
236 // Pick the index of a lane
237 #if defined(BIAS)
238 unsigned r = __tls_rand();
239 unsigned rlow = r % BIAS;
240 unsigned rhigh = r / BIAS;
241 if((0 != rlow) && preferred >= 0) {
242 // (BIAS - 1) out of BIAS chances
243 // Use perferred queues
244 i = preferred + (rhigh % 4);
245
246 #if !defined(__CFA_NO_STATISTICS__)
247 local = true;
248 __tls_stats()->ready.pick.push.local++;
249 #endif
250 }
251 else {
252 // 1 out of BIAS chances
253 // Use all queues
254 i = rhigh;
255 local = false;
256 }
257 #else
258 i = __tls_rand();
259 #endif
260
261 i %= __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
262
263 #if !defined(__CFA_NO_STATISTICS__)
264 __tls_stats()->ready.pick.push.attempt++;
265 #endif
266
267 // If we can't lock it retry
268 } while( !__atomic_try_acquire( &lanes.data[i].lock ) );
269
270 bool first = false;
271
272 // Actually push it
273 bool lane_first = push(lanes.data[i], thrd);
274
275 // If this lane used to be empty we need to do more
276 if(lane_first) {
277 // Check if the entire queue used to be empty
278 first = !query(snzi);
279
280 // Update the snzi
281 arrive( snzi, i );
282 }
283
284 // Unlock and return
285 __atomic_unlock( &lanes.data[i].lock );
286
287 __cfadbg_print_safe(ready_queue, "Kernel : Pushed %p on cluster %p (idx: %u, mask %llu, first %d)\n", thrd, cltr, i, used.mask[0], lane_first);
288
289 // Update statistics
290 #if !defined(__CFA_NO_STATISTICS__)
291 #if defined(BIAS)
292 if( local ) __tls_stats()->ready.pick.push.lsuccess++;
293 #endif
294 __tls_stats()->ready.pick.push.success++;
295 #endif
296
297 // return whether or not the list was empty before this push
298 return first;
299}
300
301static struct $thread * try_pop(struct cluster * cltr, unsigned i, unsigned j);
302static struct $thread * try_pop(struct cluster * cltr, unsigned i);
303
304// Pop from the ready queue from a given cluster
305__attribute__((hot)) $thread * pop(struct cluster * cltr) with (cltr->ready_queue) {
306 /* paranoid */ verify( lanes.count > 0 );
307 #if defined(BIAS)
308 // Don't bother trying locally too much
309 int local_tries = 8;
310 #endif
311
312 // As long as the list is not empty, try finding a lane that isn't empty and pop from it
313 while( query(snzi) ) {
314 // Pick two lists at random
315 unsigned i,j;
316 #if defined(BIAS)
317 #if !defined(__CFA_NO_STATISTICS__)
318 bool local = false;
319 #endif
320 uint64_t r = __tls_rand();
321 unsigned rlow = r % BIAS;
322 uint64_t rhigh = r / BIAS;
323 if(local_tries && 0 != rlow) {
324 // (BIAS - 1) out of BIAS chances
325 // Use perferred queues
326 unsigned pid = kernelTLS.this_processor->id * 4;
327 i = pid + (rhigh % 4);
328 j = pid + ((rhigh >> 32ull) % 4);
329
330 // count the tries
331 local_tries--;
332
333 #if !defined(__CFA_NO_STATISTICS__)
334 local = true;
335 __tls_stats()->ready.pick.pop.local++;
336 #endif
337 }
338 else {
339 // 1 out of BIAS chances
340 // Use all queues
341 i = rhigh;
342 j = rhigh >> 32ull;
343 }
344 #else
345 i = __tls_rand();
346 j = __tls_rand();
347 #endif
348
349 i %= __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
350 j %= __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
351
352 // try popping from the 2 picked lists
353 struct $thread * thrd = try_pop(cltr, i, j);
354 if(thrd) {
355 #if defined(BIAS) && !defined(__CFA_NO_STATISTICS__)
356 if( local ) __tls_stats()->ready.pick.pop.lsuccess++;
357 #endif
358 return thrd;
359 }
360 }
361
362 // All lanes where empty return 0p
363 return 0p;
364}
365
366//-----------------------------------------------------------------------
367// Given 2 indexes, pick the list with the oldest push an try to pop from it
368static inline struct $thread * try_pop(struct cluster * cltr, unsigned i, unsigned j) with (cltr->ready_queue) {
369 #if !defined(__CFA_NO_STATISTICS__)
370 __tls_stats()->ready.pick.pop.attempt++;
371 #endif
372
373 // Pick the bet list
374 int w = i;
375 if( __builtin_expect(!is_empty(lanes.data[j]), true) ) {
376 w = (ts(lanes.data[i]) < ts(lanes.data[j])) ? i : j;
377 }
378
379 return try_pop(cltr, w);
380}
381
382static inline struct $thread * try_pop(struct cluster * cltr, unsigned w) with (cltr->ready_queue) {
383 // Get relevant elements locally
384 __intrusive_lane_t & lane = lanes.data[w];
385
386 // If list looks empty retry
387 if( is_empty(lane) ) return 0p;
388
389 // If we can't get the lock retry
390 if( !__atomic_try_acquire(&lane.lock) ) return 0p;
391
392
393 // If list is empty, unlock and retry
394 if( is_empty(lane) ) {
395 __atomic_unlock(&lane.lock);
396 return 0p;
397 }
398
399 // Actually pop the list
400 struct $thread * thrd;
401 bool emptied;
402 [thrd, emptied] = pop(lane);
403
404 /* paranoid */ verify(thrd);
405 /* paranoid */ verify(lane.lock);
406
407 // If this was the last element in the lane
408 if(emptied) {
409 depart( snzi, w );
410 }
411
412 // Unlock and return
413 __atomic_unlock(&lane.lock);
414
415 // Update statistics
416 #if !defined(__CFA_NO_STATISTICS__)
417 __tls_stats()->ready.pick.pop.success++;
418 #endif
419
420 // Update the thread bias
421 thrd->link.preferred = w / 4;
422
423 // return the popped thread
424 return thrd;
425}
426//-----------------------------------------------------------------------
427
428bool remove_head(struct cluster * cltr, struct $thread * thrd) with (cltr->ready_queue) {
429 for(i; lanes.count) {
430 __intrusive_lane_t & lane = lanes.data[i];
431
432 bool removed = false;
433
434 __atomic_acquire(&lane.lock);
435 if(head(lane)->link.next == thrd) {
436 $thread * pthrd;
437 bool emptied;
438 [pthrd, emptied] = pop(lane);
439
440 /* paranoid */ verify( pthrd == thrd );
441
442 removed = true;
443 if(emptied) {
444 depart( snzi, i );
445 }
446 }
447 __atomic_unlock(&lane.lock);
448
449 if( removed ) return true;
450 }
451 return false;
452}
453
454//-----------------------------------------------------------------------
455
456static void check( __ready_queue_t & q ) with (q) {
457 #if defined(__CFA_WITH_VERIFY__)
458 {
459 for( idx ; lanes.count ) {
460 __intrusive_lane_t & sl = lanes.data[idx];
461 assert(!lanes.data[idx].lock);
462
463 assert(head(sl)->link.prev == 0p );
464 assert(head(sl)->link.next->link.prev == head(sl) );
465 assert(tail(sl)->link.next == 0p );
466 assert(tail(sl)->link.prev->link.next == tail(sl) );
467
468 if(sl.before.link.ts == 0l) {
469 assert(tail(sl)->link.prev == head(sl));
470 assert(head(sl)->link.next == tail(sl));
471 } else {
472 assert(tail(sl)->link.prev != head(sl));
473 assert(head(sl)->link.next != tail(sl));
474 }
475 }
476 }
477 #endif
478}
479
480// Call this function of the intrusive list was moved using memcpy
481// fixes the list so that the pointers back to anchors aren't left dangling
482static inline void fix(__intrusive_lane_t & ll) {
483 // if the list is not empty then follow he pointer and fix its reverse
484 if(!is_empty(ll)) {
485 head(ll)->link.next->link.prev = head(ll);
486 tail(ll)->link.prev->link.next = tail(ll);
487 }
488 // Otherwise just reset the list
489 else {
490 verify(tail(ll)->link.next == 0p);
491 tail(ll)->link.prev = head(ll);
492 head(ll)->link.next = tail(ll);
493 verify(head(ll)->link.prev == 0p);
494 }
495}
496
497// Grow the ready queue
498void ready_queue_grow (struct cluster * cltr) {
499 /* paranoid */ verify( ready_mutate_islocked() );
500 __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue\n");
501
502 // Make sure that everything is consistent
503 /* paranoid */ check( cltr->ready_queue );
504
505 // grow the ready queue
506 with( cltr->ready_queue ) {
507 ^(snzi){};
508
509 size_t ncount = lanes.count;
510
511 // increase count
512 ncount += 4;
513
514 // Allocate new array (uses realloc and memcpies the data)
515 lanes.data = alloc(lanes.data, ncount);
516
517 // Fix the moved data
518 for( idx; (size_t)lanes.count ) {
519 fix(lanes.data[idx]);
520 }
521
522 // Construct new data
523 for( idx; (size_t)lanes.count ~ ncount) {
524 (lanes.data[idx]){};
525 }
526
527 // Update original
528 lanes.count = ncount;
529
530 // Re-create the snzi
531 snzi{ log2( lanes.count / 8 ) };
532 for( idx; (size_t)lanes.count ) {
533 if( !is_empty(lanes.data[idx]) ) {
534 arrive(snzi, idx);
535 }
536 }
537 }
538
539 // Make sure that everything is consistent
540 /* paranoid */ check( cltr->ready_queue );
541
542 __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue done\n");
543
544 /* paranoid */ verify( ready_mutate_islocked() );
545}
546
547// Shrink the ready queue
548void ready_queue_shrink(struct cluster * cltr) {
549 /* paranoid */ verify( ready_mutate_islocked() );
550 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue\n");
551
552 // Make sure that everything is consistent
553 /* paranoid */ check( cltr->ready_queue );
554
555 with( cltr->ready_queue ) {
556 ^(snzi){};
557
558 size_t ocount = lanes.count;
559 // Check that we have some space left
560 if(ocount < 8) abort("Program attempted to destroy more Ready Queues than were created");
561
562 // reduce the actual count so push doesn't use the old queues
563 lanes.count -= 4;
564 verify(ocount > lanes.count);
565
566 // for printing count the number of displaced threads
567 #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__)
568 __attribute__((unused)) size_t displaced = 0;
569 #endif
570
571 // redistribute old data
572 for( idx; (size_t)lanes.count ~ ocount) {
573 // Lock is not strictly needed but makes checking invariants much easier
574 __attribute__((unused)) bool locked = __atomic_try_acquire(&lanes.data[idx].lock);
575 verify(locked);
576
577 // As long as we can pop from this lane to push the threads somewhere else in the queue
578 while(!is_empty(lanes.data[idx])) {
579 struct $thread * thrd;
580 __attribute__((unused)) bool _;
581 [thrd, _] = pop(lanes.data[idx]);
582
583 push(cltr, thrd);
584
585 // for printing count the number of displaced threads
586 #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__)
587 displaced++;
588 #endif
589 }
590
591 // Unlock the lane
592 __atomic_unlock(&lanes.data[idx].lock);
593
594 // TODO print the queue statistics here
595
596 ^(lanes.data[idx]){};
597 }
598
599 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue displaced %zu threads\n", displaced);
600
601 // Allocate new array (uses realloc and memcpies the data)
602 lanes.data = alloc(lanes.data, lanes.count);
603
604 // Fix the moved data
605 for( idx; (size_t)lanes.count ) {
606 fix(lanes.data[idx]);
607 }
608
609 // Re-create the snzi
610 snzi{ log2( lanes.count / 8 ) };
611 for( idx; (size_t)lanes.count ) {
612 if( !is_empty(lanes.data[idx]) ) {
613 arrive(snzi, idx);
614 }
615 }
616 }
617
618 // Make sure that everything is consistent
619 /* paranoid */ check( cltr->ready_queue );
620
621 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue done\n");
622 /* paranoid */ verify( ready_mutate_islocked() );
623}
Note: See TracBrowser for help on using the repository browser.