source: libcfa/src/concurrency/ready_queue.cfa@ ff79d5e

ADT arm-eh ast-experimental enum forall-pointer-decay jacob/cs343-translation new-ast new-ast-unique-expr pthread-emulation qualifiedEnum
Last change on this file since ff79d5e was 9b1dcc2, checked in by Thierry Delisle <tdelisle@…>, 5 years ago

Changed scheduling API to adapt to non-Processors scheduling threads.

  • Property mode set to 100644
File size: 26.1 KB
RevLine 
[7768b8d]1//
2// Cforall Version 1.0.0 Copyright (C) 2019 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// ready_queue.cfa --
8//
9// Author : Thierry Delisle
10// Created On : Mon Nov dd 16:29:18 2019
11// Last Modified By :
12// Last Modified On :
13// Update Count :
14//
15
16#define __cforall_thread__
[1b143de]17// #define __CFA_DEBUG_PRINT_READY_QUEUE__
[7768b8d]18
19#include "bits/defs.hfa"
20#include "kernel_private.hfa"
21
22#define _GNU_SOURCE
23#include "stdlib.hfa"
[61d7bec]24#include "math.hfa"
[7768b8d]25
26static const size_t cache_line_size = 64;
27
[dca5802]28// No overriden function, no environment variable, no define
29// fall back to a magic number
30#ifndef __CFA_MAX_PROCESSORS__
[b388ee81]31 #define __CFA_MAX_PROCESSORS__ 1024
[dca5802]32#endif
[7768b8d]33
[dca5802]34// returns the maximum number of processors the RWLock support
[7768b8d]35__attribute__((weak)) unsigned __max_processors() {
36 const char * max_cores_s = getenv("CFA_MAX_PROCESSORS");
37 if(!max_cores_s) {
[504a7dc]38 __cfadbg_print_nolock(ready_queue, "No CFA_MAX_PROCESSORS in ENV\n");
[dca5802]39 return __CFA_MAX_PROCESSORS__;
[7768b8d]40 }
41
42 char * endptr = 0p;
43 long int max_cores_l = strtol(max_cores_s, &endptr, 10);
44 if(max_cores_l < 1 || max_cores_l > 65535) {
[504a7dc]45 __cfadbg_print_nolock(ready_queue, "CFA_MAX_PROCESSORS out of range : %ld\n", max_cores_l);
[dca5802]46 return __CFA_MAX_PROCESSORS__;
[7768b8d]47 }
48 if('\0' != *endptr) {
[504a7dc]49 __cfadbg_print_nolock(ready_queue, "CFA_MAX_PROCESSORS not a decimal number : %s\n", max_cores_s);
[dca5802]50 return __CFA_MAX_PROCESSORS__;
[7768b8d]51 }
52
53 return max_cores_l;
54}
55
56//=======================================================================
57// Cluster wide reader-writer lock
58//=======================================================================
[b388ee81]59void ?{}(__scheduler_RWLock_t & this) {
[7768b8d]60 this.max = __max_processors();
61 this.alloc = 0;
62 this.ready = 0;
63 this.lock = false;
64 this.data = alloc(this.max);
65
66 /*paranoid*/ verify( 0 == (((uintptr_t)(this.data )) % 64) );
67 /*paranoid*/ verify( 0 == (((uintptr_t)(this.data + 1)) % 64) );
68 /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.alloc), &this.alloc));
69 /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.ready), &this.ready));
70
71}
[b388ee81]72void ^?{}(__scheduler_RWLock_t & this) {
[7768b8d]73 free(this.data);
74}
75
[9b1dcc2]76void ?{}( __scheduler_lock_id_t & this, __processor_id_t * proc ) {
[7768b8d]77 this.handle = proc;
78 this.lock = false;
79}
80
81//=======================================================================
82// Lock-Free registering/unregistering of threads
[9b1dcc2]83unsigned doregister( struct __processor_id_t * proc ) with(*__scheduler_lock) {
[b388ee81]84 __cfadbg_print_safe(ready_queue, "Kernel : Registering proc %p for RW-Lock\n", proc);
[504a7dc]85
[7768b8d]86 // Step - 1 : check if there is already space in the data
87 uint_fast32_t s = ready;
88
89 // Check among all the ready
90 for(uint_fast32_t i = 0; i < s; i++) {
[9b1dcc2]91 __processor_id_t * null = 0p; // Re-write every loop since compare thrashes it
[7768b8d]92 if( __atomic_load_n(&data[i].handle, (int)__ATOMIC_RELAXED) == null
93 && __atomic_compare_exchange_n( &data[i].handle, &null, proc, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
94 /*paranoid*/ verify(i < ready);
95 /*paranoid*/ verify(__alignof__(data[i]) == cache_line_size);
96 /*paranoid*/ verify((((uintptr_t)&data[i]) % cache_line_size) == 0);
97 return i;
98 }
99 }
100
[b388ee81]101 if(max <= alloc) abort("Trying to create more than %ud processors", __scheduler_lock->max);
[7768b8d]102
103 // Step - 2 : F&A to get a new spot in the array.
104 uint_fast32_t n = __atomic_fetch_add(&alloc, 1, __ATOMIC_SEQ_CST);
[b388ee81]105 if(max <= n) abort("Trying to create more than %ud processors", __scheduler_lock->max);
[7768b8d]106
107 // Step - 3 : Mark space as used and then publish it.
[9b1dcc2]108 __scheduler_lock_id_t * storage = (__scheduler_lock_id_t *)&data[n];
[7768b8d]109 (*storage){ proc };
110 while(true) {
111 unsigned copy = n;
112 if( __atomic_load_n(&ready, __ATOMIC_RELAXED) == n
113 && __atomic_compare_exchange_n(&ready, &copy, n + 1, true, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
114 break;
115 asm volatile("pause");
116 }
117
[1b143de]118 __cfadbg_print_safe(ready_queue, "Kernel : Registering proc %p done, id %lu\n", proc, n);
[504a7dc]119
[7768b8d]120 // Return new spot.
121 /*paranoid*/ verify(n < ready);
122 /*paranoid*/ verify(__alignof__(data[n]) == cache_line_size);
123 /*paranoid*/ verify((((uintptr_t)&data[n]) % cache_line_size) == 0);
124 return n;
125}
126
[9b1dcc2]127void unregister( struct __processor_id_t * proc ) with(*__scheduler_lock) {
[7768b8d]128 unsigned id = proc->id;
129 /*paranoid*/ verify(id < ready);
130 /*paranoid*/ verify(proc == __atomic_load_n(&data[id].handle, __ATOMIC_RELAXED));
131 __atomic_store_n(&data[id].handle, 0p, __ATOMIC_RELEASE);
[504a7dc]132
133 __cfadbg_print_safe(ready_queue, "Kernel : Unregister proc %p\n", proc);
[7768b8d]134}
135
136//-----------------------------------------------------------------------
137// Writer side : acquire when changing the ready queue, e.g. adding more
138// queues or removing them.
[b388ee81]139uint_fast32_t ready_mutate_lock( void ) with(*__scheduler_lock) {
[7768b8d]140 // Step 1 : lock global lock
141 // It is needed to avoid processors that register mid Critical-Section
142 // to simply lock their own lock and enter.
143 __atomic_acquire( &lock );
144
145 // Step 2 : lock per-proc lock
146 // Processors that are currently being registered aren't counted
147 // but can't be in read_lock or in the critical section.
148 // All other processors are counted
149 uint_fast32_t s = ready;
150 for(uint_fast32_t i = 0; i < s; i++) {
151 __atomic_acquire( &data[i].lock );
152 }
153
154 return s;
155}
156
[b388ee81]157void ready_mutate_unlock( uint_fast32_t last_s ) with(*__scheduler_lock) {
[7768b8d]158 // Step 1 : release local locks
159 // This must be done while the global lock is held to avoid
160 // threads that where created mid critical section
161 // to race to lock their local locks and have the writer
162 // immidiately unlock them
163 // Alternative solution : return s in write_lock and pass it to write_unlock
164 for(uint_fast32_t i = 0; i < last_s; i++) {
165 verify(data[i].lock);
166 __atomic_store_n(&data[i].lock, (bool)false, __ATOMIC_RELEASE);
167 }
168
169 // Step 2 : release global lock
170 /*paranoid*/ assert(true == lock);
171 __atomic_store_n(&lock, (bool)false, __ATOMIC_RELEASE);
172}
173
174//=======================================================================
175// Intrusive Queue used by ready queue
176//=======================================================================
[61d7bec]177// Intrusives lanes which are used by the relaxed ready queue
178struct __attribute__((aligned(128))) __intrusive_lane_t {
179 // spin lock protecting the queue
180 volatile bool lock;
181
182 // anchor for the head and the tail of the queue
183 struct __sentinel_t {
184 // Link lists fields
185 // instrusive link field for threads
186 // must be exactly as in $thread
187 __thread_desc_link link;
188 } before, after;
189
190 // Optional statistic counters
191 #if !defined(__CFA_NO_SCHED_STATS__)
192 struct __attribute__((aligned(64))) {
193 // difference between number of push and pops
194 ssize_t diff;
195
196 // total number of pushes and pops
197 size_t push;
198 size_t pop ;
199 } stat;
200 #endif
201};
202
203void ?{}(__intrusive_lane_t & this);
204void ^?{}(__intrusive_lane_t & this);
205
[7768b8d]206// Get the head pointer (one before the first element) from the anchor
[504a7dc]207static inline $thread * head(const __intrusive_lane_t & this) {
208 $thread * rhead = ($thread *)(
209 (uintptr_t)( &this.before ) - offsetof( $thread, link )
[7768b8d]210 );
211 /* paranoid */ verify(rhead);
212 return rhead;
213}
214
215// Get the tail pointer (one after the last element) from the anchor
[504a7dc]216static inline $thread * tail(const __intrusive_lane_t & this) {
217 $thread * rtail = ($thread *)(
218 (uintptr_t)( &this.after ) - offsetof( $thread, link )
[7768b8d]219 );
220 /* paranoid */ verify(rtail);
221 return rtail;
222}
223
224// Ctor
[dca5802]225void ?{}( __intrusive_lane_t & this ) {
[b798713]226 this.lock = false;
227
228 this.before.link.prev = 0p;
229 this.before.link.next = tail(this);
230 this.before.link.ts = 0;
[7768b8d]231
[b798713]232 this.after .link.prev = head(this);
233 this.after .link.next = 0p;
234 this.after .link.ts = 0;
235
236 #if !defined(__CFA_NO_SCHED_STATS__)
237 this.stat.diff = 0;
238 this.stat.push = 0;
239 this.stat.pop = 0;
240 #endif
[7768b8d]241
242 // We add a boat-load of assertions here because the anchor code is very fragile
[504a7dc]243 /* paranoid */ verify(((uintptr_t)( head(this) ) + offsetof( $thread, link )) == (uintptr_t)(&this.before));
244 /* paranoid */ verify(((uintptr_t)( tail(this) ) + offsetof( $thread, link )) == (uintptr_t)(&this.after ));
[b798713]245 /* paranoid */ verify(head(this)->link.prev == 0p );
246 /* paranoid */ verify(head(this)->link.next == tail(this) );
247 /* paranoid */ verify(tail(this)->link.next == 0p );
248 /* paranoid */ verify(tail(this)->link.prev == head(this) );
249 /* paranoid */ verify(&head(this)->link.prev == &this.before.link.prev );
250 /* paranoid */ verify(&head(this)->link.next == &this.before.link.next );
251 /* paranoid */ verify(&tail(this)->link.prev == &this.after .link.prev );
252 /* paranoid */ verify(&tail(this)->link.next == &this.after .link.next );
[dca5802]253 /* paranoid */ verify(sizeof(__intrusive_lane_t) == 128);
[7768b8d]254 /* paranoid */ verify(sizeof(this) == 128);
[dca5802]255 /* paranoid */ verify(__alignof__(__intrusive_lane_t) == 128);
[7768b8d]256 /* paranoid */ verify(__alignof__(this) == 128);
257 /* paranoid */ verifyf(((intptr_t)(&this) % 128) == 0, "Expected address to be aligned %p %% 128 == %zd", &this, ((intptr_t)(&this) % 128));
258}
259
260// Dtor is trivial
[dca5802]261void ^?{}( __intrusive_lane_t & this ) {
[7768b8d]262 // Make sure the list is empty
[b798713]263 /* paranoid */ verify(head(this)->link.prev == 0p );
264 /* paranoid */ verify(head(this)->link.next == tail(this) );
265 /* paranoid */ verify(tail(this)->link.next == 0p );
266 /* paranoid */ verify(tail(this)->link.prev == head(this) );
[7768b8d]267}
268
[dca5802]269// Push a thread onto this lane
270// returns true of lane was empty before push, false otherwise
[504a7dc]271bool push(__intrusive_lane_t & this, $thread * node) {
[dca5802]272 #if defined(__CFA_WITH_VERIFY__)
273 /* paranoid */ verify(this.lock);
274 /* paranoid */ verify(node->link.ts != 0);
275 /* paranoid */ verify(node->link.next == 0p);
276 /* paranoid */ verify(node->link.prev == 0p);
[504a7dc]277 /* paranoid */ verify(tail(this)->link.next == 0p);
278 /* paranoid */ verify(head(this)->link.prev == 0p);
[dca5802]279
280 if(this.before.link.ts == 0l) {
281 /* paranoid */ verify(tail(this)->link.prev == head(this));
282 /* paranoid */ verify(head(this)->link.next == tail(this));
[504a7dc]283 } else {
284 /* paranoid */ verify(tail(this)->link.prev != head(this));
285 /* paranoid */ verify(head(this)->link.next != tail(this));
[dca5802]286 }
287 #endif
[7768b8d]288
289 // Get the relevant nodes locally
[504a7dc]290 $thread * tail = tail(this);
291 $thread * prev = tail->link.prev;
[7768b8d]292
293 // Do the push
[b798713]294 node->link.next = tail;
295 node->link.prev = prev;
296 prev->link.next = node;
297 tail->link.prev = node;
[7768b8d]298
299 // Update stats
[dca5802]300 #if !defined(__CFA_NO_SCHED_STATS__)
[7768b8d]301 this.stat.diff++;
302 this.stat.push++;
303 #endif
304
[b798713]305 verify(node->link.next == tail(this));
306
[7768b8d]307 // Check if the queue used to be empty
[b798713]308 if(this.before.link.ts == 0l) {
309 this.before.link.ts = node->link.ts;
[dca5802]310 /* paranoid */ verify(node->link.prev == head(this));
[7768b8d]311 return true;
312 }
313 return false;
314}
315
[dca5802]316// Pop a thread from this lane (must be non-empty)
317// returns popped
318// returns true of lane was empty before push, false otherwise
[504a7dc]319[$thread *, bool] pop(__intrusive_lane_t & this) {
[dca5802]320 /* paranoid */ verify(this.lock);
321 /* paranoid */ verify(this.before.link.ts != 0ul);
322
323 // Get anchors locally
[504a7dc]324 $thread * head = head(this);
325 $thread * tail = tail(this);
[7768b8d]326
[dca5802]327 // Get the relevant nodes locally
[504a7dc]328 $thread * node = head->link.next;
329 $thread * next = node->link.next;
[7768b8d]330
[9b1dcc2]331 /* paranoid */ verify(node != tail);
332 /* paranoid */ verify(node);
[7768b8d]333
[dca5802]334 // Do the pop
[b798713]335 head->link.next = next;
336 next->link.prev = head;
[dca5802]337 node->link.[next, prev] = 0p;
338
339 // Update head time stamp
340 this.before.link.ts = next->link.ts;
[7768b8d]341
[dca5802]342 // Update stats
[7768b8d]343 #ifndef __CFA_NO_SCHED_STATS__
344 this.stat.diff--;
345 this.stat.pop ++;
346 #endif
347
[dca5802]348 // Check if we emptied list and return accordingly
[504a7dc]349 /* paranoid */ verify(tail(this)->link.next == 0p);
350 /* paranoid */ verify(head(this)->link.prev == 0p);
[7768b8d]351 if(next == tail) {
[dca5802]352 /* paranoid */ verify(this.before.link.ts == 0);
353 /* paranoid */ verify(tail(this)->link.prev == head(this));
354 /* paranoid */ verify(head(this)->link.next == tail(this));
[7768b8d]355 return [node, true];
356 }
357 else {
[dca5802]358 /* paranoid */ verify(next->link.ts != 0);
[504a7dc]359 /* paranoid */ verify(tail(this)->link.prev != head(this));
360 /* paranoid */ verify(head(this)->link.next != tail(this));
[dca5802]361 /* paranoid */ verify(this.before.link.ts != 0);
[7768b8d]362 return [node, false];
363 }
364}
365
[dca5802]366// Check whether or not list is empty
367static inline bool is_empty(__intrusive_lane_t & this) {
[1b143de]368 // Cannot verify here since it may not be locked
[dca5802]369 return this.before.link.ts == 0;
370}
371
372// Return the timestamp
373static inline unsigned long long ts(__intrusive_lane_t & this) {
[1b143de]374 // Cannot verify here since it may not be locked
[b798713]375 return this.before.link.ts;
376}
377
[61d7bec]378//=======================================================================
379// Scalable Non-Zero counter
380//=======================================================================
381
382union __snzi_val_t {
383 uint64_t _all;
384 struct __attribute__((packed)) {
385 char cnt;
386 uint64_t ver:56;
387 };
388};
389
390bool cas(volatile __snzi_val_t & self, __snzi_val_t & exp, char _cnt, uint64_t _ver) {
391 __snzi_val_t t;
392 t.ver = _ver;
393 t.cnt = _cnt;
394 /* paranoid */ verify(t._all == ((_ver << 8) | ((unsigned char)_cnt)));
395 return __atomic_compare_exchange_n(&self._all, &exp._all, t._all, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
396}
397
398bool cas(volatile __snzi_val_t & self, __snzi_val_t & exp, const __snzi_val_t & tar) {
399 return __atomic_compare_exchange_n(&self._all, &exp._all, tar._all, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
400}
401
402void ?{}( __snzi_val_t & this ) { this._all = 0; }
403void ?{}( __snzi_val_t & this, const volatile __snzi_val_t & o) { this._all = o._all; }
404
405struct __attribute__((aligned(128))) __snzi_node_t {
406 volatile __snzi_val_t value;
407 struct __snzi_node_t * parent;
408 bool is_root;
409};
410
411static inline void arrive( __snzi_node_t & );
412static inline void depart( __snzi_node_t & );
413
414#define __snzi_half -1
415
416//--------------------------------------------------
417// Root node
418static void arrive_r( __snzi_node_t & this ) {
419 /* paranoid */ verify( this.is_root );
420 __atomic_fetch_add(&this.value._all, 1, __ATOMIC_SEQ_CST);
421}
422
423static void depart_r( __snzi_node_t & this ) {
424 /* paranoid */ verify( this.is_root );
425 __atomic_fetch_sub(&this.value._all, 1, __ATOMIC_SEQ_CST);
426}
427
428//--------------------------------------------------
429// Hierarchical node
430static void arrive_h( __snzi_node_t & this ) {
431 int undoArr = 0;
432 bool success = false;
433 while(!success) {
434 __snzi_val_t x = { this.value };
435 /* paranoid */ verify(x.cnt <= 120);
436 if( x.cnt >= 1 ) {
437 if( cas( this.value, x, x.cnt + 1, x.ver ) ) {
438 success = true;
439 }
440 }
441 /* paranoid */ verify(x.cnt <= 120);
442 if( x.cnt == 0 ) {
443 if( cas( this.value, x, __snzi_half, x.ver + 1) ) {
444 success = true;
445 x.cnt = __snzi_half;
446 x.ver = x.ver + 1;
447 }
448 }
449 /* paranoid */ verify(x.cnt <= 120);
450 if( x.cnt == __snzi_half ) {
451 /* paranoid */ verify( this.parent);
452 arrive( *this.parent );
453 if( !cas( this.value, x, 1, x.ver) ) {
454 undoArr = undoArr + 1;
455 }
456 }
457 }
458
459 for(int i = 0; i < undoArr; i++) {
460 /* paranoid */ verify( this.parent );
461 depart( *this.parent );
462 }
463}
464
465static void depart_h( __snzi_node_t & this ) {
466 while(true) {
467 const __snzi_val_t x = { this.value };
468 /* paranoid */ verifyf(x.cnt >= 1, "%d", x.cnt);
469 if( cas( this.value, x, x.cnt - 1, x.ver ) ) {
470 if( x.cnt == 1 ) {
471 /* paranoid */ verify( this.parent );
472 depart( *this.parent );
473 }
474 return;
475 }
476 }
477}
478
479//--------------------------------------------------
480// All nodes
481static inline void arrive( __snzi_node_t & this ) {
482 if(this.is_root) arrive_r( this );
483 else arrive_h( this );
484}
485
486static inline void depart( __snzi_node_t & this ) {
487 if(this.is_root) depart_r( this );
488 else depart_h( this );
489}
490
491static inline bool query( __snzi_node_t & this ) {
492 /* paranoid */ verify( this.is_root );
493 return this.value._all > 0;
494}
495
496//--------------------------------------------------
497// SNZI object
498void ?{}( __snzi_t & this, unsigned depth ) with( this ) {
499 mask = (1 << depth) - 1;
500 root = (1 << (depth + 1)) - 2;
501 nodes = alloc( root + 1 );
502
503 int width = 1 << depth;
504 for(int i = 0; i < root; i++) {
505 nodes[i].value._all = 0;
506 nodes[i].parent = &nodes[(i / 2) + width ];
507 nodes[i].is_root = false;
508 }
509
510 nodes[ root ].value._all = 0;
511 nodes[ root ].parent = 0p;
512 nodes[ root ].is_root = true;
513}
514
515void ^?{}( __snzi_t & this ) {
516 free( this.nodes );
517}
518
519static inline void arrive( __snzi_t & this, int idx) {
520 idx &= this.mask;
521 arrive( this.nodes[idx] );
522}
523
524static inline void depart( __snzi_t & this, int idx) {
525 idx &= this.mask;
526 depart( this.nodes[idx] );
527}
528
529static inline bool query( const __snzi_t & this ) {
530 return query( this.nodes[ this.root ] );
531}
532
[b798713]533//=======================================================================
534// Cforall Reqdy Queue used by ready queue
535//=======================================================================
536
[dca5802]537// Thread local mirror of ready queue statistics
538#if !defined(__CFA_NO_STATISTICS__)
[b798713]539static __attribute__((aligned(128))) thread_local struct {
540 struct {
541 struct {
542 size_t attempt;
543 size_t success;
544 } push;
545 struct {
546 size_t maskrds;
547 size_t attempt;
548 size_t success;
549 } pop;
550 } pick;
551 struct {
552 size_t value;
553 size_t count;
[dca5802]554 } used;
[b798713]555} tls = {
556 /* pick */{
557 /* push */{ 0, 0 },
558 /* pop */{ 0, 0, 0 },
559 },
[dca5802]560 /* used */{ 0, 0 }
[b798713]561};
[dca5802]562#endif
[b798713]563
564//-----------------------------------------------------------------------
565
566void ?{}(__ready_queue_t & this) with (this) {
567
[dca5802]568 lanes.data = alloc(4);
[b798713]569 for( i; 4 ) {
[dca5802]570 (lanes.data[i]){};
[b798713]571 }
[dca5802]572 lanes.count = 4;
[61d7bec]573 snzi{ log2( lanes.count / 8 ) };
[b798713]574
575 #if !defined(__CFA_NO_STATISTICS__)
576 global_stats.pick.push.attempt = 0;
577 global_stats.pick.push.success = 0;
578 global_stats.pick.pop .maskrds = 0;
579 global_stats.pick.pop .attempt = 0;
580 global_stats.pick.pop .success = 0;
581
[dca5802]582 global_stats.used.value = 0;
583 global_stats.used.count = 0;
[b798713]584 #endif
585}
586
587void ^?{}(__ready_queue_t & this) with (this) {
[dca5802]588 verify( 4 == lanes.count );
[61d7bec]589 verify( !query( snzi ) );
590
591 ^(snzi){};
[b798713]592
593 for( i; 4 ) {
[dca5802]594 ^(lanes.data[i]){};
[b798713]595 }
[dca5802]596 free(lanes.data);
597}
598
599//-----------------------------------------------------------------------
[504a7dc]600__attribute__((hot)) bool push(struct cluster * cltr, struct $thread * thrd) with (cltr->ready_queue) {
[61d7bec]601 __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr);
[1b143de]602
[dca5802]603 // write timestamp
[b798713]604 thrd->link.ts = rdtscl();
605
[dca5802]606 // Try to pick a lane and lock it
607 unsigned i;
608 do {
609 // Pick the index of a lane
[504a7dc]610 i = __tls_rand() % lanes.count;
[b798713]611
612 #if !defined(__CFA_NO_STATISTICS__)
613 tls.pick.push.attempt++;
614 #endif
615
616 // If we can't lock it retry
[dca5802]617 } while( !__atomic_try_acquire( &lanes.data[i].lock ) );
[b798713]618
[dca5802]619 bool first = false;
620
621 // Actually push it
622 bool lane_first = push(lanes.data[i], thrd);
623
624 // If this lane used to be empty we need to do more
625 if(lane_first) {
[504a7dc]626 // Check if the entire queue used to be empty
[61d7bec]627 first = !query(snzi);
628
629 // Update the snzi
630 arrive( snzi, i );
[b798713]631 }
[dca5802]632
633 // Unlock and return
634 __atomic_unlock( &lanes.data[i].lock );
635
[1b143de]636 __cfadbg_print_safe(ready_queue, "Kernel : Pushed %p on cluster %p (idx: %u, mask %llu, first %d)\n", thrd, cltr, i, used.mask[0], lane_first);
637
[dca5802]638 // Update statistics
639 #if !defined(__CFA_NO_STATISTICS__)
640 tls.pick.push.success++;
641 #endif
642
643 // return whether or not the list was empty before this push
644 return first;
[b798713]645}
646
647//-----------------------------------------------------------------------
[dca5802]648// Given 2 indexes, pick the list with the oldest push an try to pop from it
[504a7dc]649static struct $thread * try_pop(struct cluster * cltr, unsigned i, unsigned j) with (cltr->ready_queue) {
[b798713]650 #if !defined(__CFA_NO_STATISTICS__)
651 tls.pick.pop.attempt++;
652 #endif
653
654 // Pick the bet list
655 int w = i;
[dca5802]656 if( __builtin_expect(!is_empty(lanes.data[j]), true) ) {
657 w = (ts(lanes.data[i]) < ts(lanes.data[j])) ? i : j;
[b798713]658 }
659
[dca5802]660 // Get relevant elements locally
661 __intrusive_lane_t & lane = lanes.data[w];
662
[b798713]663 // If list looks empty retry
[dca5802]664 if( is_empty(lane) ) return 0p;
[b798713]665
666 // If we can't get the lock retry
[dca5802]667 if( !__atomic_try_acquire(&lane.lock) ) return 0p;
[b798713]668
669
670 // If list is empty, unlock and retry
[dca5802]671 if( is_empty(lane) ) {
672 __atomic_unlock(&lane.lock);
[b798713]673 return 0p;
674 }
675
676 // Actually pop the list
[504a7dc]677 struct $thread * thrd;
[b798713]678 bool emptied;
[dca5802]679 [thrd, emptied] = pop(lane);
[b798713]680
[dca5802]681 /* paranoid */ verify(thrd);
682 /* paranoid */ verify(lane.lock);
[b798713]683
[dca5802]684 // If this was the last element in the lane
[b798713]685 if(emptied) {
[61d7bec]686 depart( snzi, w );
[b798713]687 }
688
689 // Unlock and return
[dca5802]690 __atomic_unlock(&lane.lock);
[b798713]691
[dca5802]692 // Update statistics
[b798713]693 #if !defined(__CFA_NO_STATISTICS__)
694 tls.pick.pop.success++;
695 #endif
696
[dca5802]697 // return the popped thread
[b798713]698 return thrd;
699}
700
[dca5802]701// Pop from the ready queue from a given cluster
[504a7dc]702__attribute__((hot)) $thread * pop(struct cluster * cltr) with (cltr->ready_queue) {
[dca5802]703 /* paranoid */ verify( lanes.count > 0 );
[b798713]704
[dca5802]705 // As long as the list is not empty, try finding a lane that isn't empty and pop from it
[61d7bec]706 while( query(snzi) ) {
707 // Pick two lists at random
708 int i = __tls_rand() % __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
709 int j = __tls_rand() % __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
710
711 // try popping from the 2 picked lists
712 struct $thread * thrd = try_pop(cltr, i, j);
713 if(thrd) return thrd;
[b798713]714 }
715
[dca5802]716 // All lanes where empty return 0p
[b798713]717 return 0p;
718}
719
720//-----------------------------------------------------------------------
721
722static void check( __ready_queue_t & q ) with (q) {
723 #if defined(__CFA_WITH_VERIFY__)
724 {
[dca5802]725 for( idx ; lanes.count ) {
726 __intrusive_lane_t & sl = lanes.data[idx];
727 assert(!lanes.data[idx].lock);
[b798713]728
729 assert(head(sl)->link.prev == 0p );
730 assert(head(sl)->link.next->link.prev == head(sl) );
731 assert(tail(sl)->link.next == 0p );
732 assert(tail(sl)->link.prev->link.next == tail(sl) );
733
734 if(sl.before.link.ts == 0l) {
735 assert(tail(sl)->link.prev == head(sl));
736 assert(head(sl)->link.next == tail(sl));
[1b143de]737 } else {
738 assert(tail(sl)->link.prev != head(sl));
739 assert(head(sl)->link.next != tail(sl));
[b798713]740 }
741 }
742 }
743 #endif
744}
745
746// Call this function of the intrusive list was moved using memcpy
[dca5802]747// fixes the list so that the pointers back to anchors aren't left dangling
748static inline void fix(__intrusive_lane_t & ll) {
749 // if the list is not empty then follow he pointer and fix its reverse
750 if(!is_empty(ll)) {
[b798713]751 head(ll)->link.next->link.prev = head(ll);
752 tail(ll)->link.prev->link.next = tail(ll);
753 }
754 // Otherwise just reset the list
755 else {
[dca5802]756 verify(tail(ll)->link.next == 0p);
[b798713]757 tail(ll)->link.prev = head(ll);
758 head(ll)->link.next = tail(ll);
[dca5802]759 verify(head(ll)->link.prev == 0p);
[b798713]760 }
761}
762
[dca5802]763// Grow the ready queue
[b798713]764void ready_queue_grow (struct cluster * cltr) {
[dca5802]765 // Lock the RWlock so no-one pushes/pops while we are changing the queue
[b388ee81]766 uint_fast32_t last_size = ready_mutate_lock();
[dca5802]767
[504a7dc]768 __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue\n");
[b798713]769
[dca5802]770 // Make sure that everything is consistent
771 /* paranoid */ check( cltr->ready_queue );
772
773 // grow the ready queue
[b798713]774 with( cltr->ready_queue ) {
[61d7bec]775 ^(snzi){};
[b798713]776
[61d7bec]777 size_t ncount = lanes.count;
[b798713]778
[dca5802]779 // increase count
[b798713]780 ncount += 4;
781
[dca5802]782 // Allocate new array (uses realloc and memcpies the data)
783 lanes.data = alloc(lanes.data, ncount);
[b798713]784
785 // Fix the moved data
[dca5802]786 for( idx; (size_t)lanes.count ) {
787 fix(lanes.data[idx]);
[b798713]788 }
789
790 // Construct new data
[dca5802]791 for( idx; (size_t)lanes.count ~ ncount) {
792 (lanes.data[idx]){};
[b798713]793 }
794
795 // Update original
[dca5802]796 lanes.count = ncount;
797
[61d7bec]798 // Re-create the snzi
799 snzi{ log2( lanes.count / 8 ) };
800 for( idx; (size_t)lanes.count ) {
801 if( !is_empty(lanes.data[idx]) ) {
802 arrive(snzi, idx);
803 }
804 }
[b798713]805 }
806
807 // Make sure that everything is consistent
[dca5802]808 /* paranoid */ check( cltr->ready_queue );
809
[504a7dc]810 __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue done\n");
[dca5802]811
812 // Unlock the RWlock
[b388ee81]813 ready_mutate_unlock( last_size );
[b798713]814}
815
[dca5802]816// Shrink the ready queue
[b798713]817void ready_queue_shrink(struct cluster * cltr) {
[dca5802]818 // Lock the RWlock so no-one pushes/pops while we are changing the queue
[b388ee81]819 uint_fast32_t last_size = ready_mutate_lock();
[dca5802]820
[504a7dc]821 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue\n");
[dca5802]822
823 // Make sure that everything is consistent
824 /* paranoid */ check( cltr->ready_queue );
825
[b798713]826 with( cltr->ready_queue ) {
[61d7bec]827 ^(snzi){};
828
[dca5802]829 size_t ocount = lanes.count;
[b798713]830 // Check that we have some space left
831 if(ocount < 8) abort("Program attempted to destroy more Ready Queues than were created");
832
[dca5802]833 // reduce the actual count so push doesn't use the old queues
834 lanes.count -= 4;
835 verify(ocount > lanes.count);
836
837 // for printing count the number of displaced threads
[504a7dc]838 #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__)
[dca5802]839 __attribute__((unused)) size_t displaced = 0;
840 #endif
[b798713]841
842 // redistribute old data
[dca5802]843 for( idx; (size_t)lanes.count ~ ocount) {
844 // Lock is not strictly needed but makes checking invariants much easier
[1b143de]845 __attribute__((unused)) bool locked = __atomic_try_acquire(&lanes.data[idx].lock);
[b798713]846 verify(locked);
[dca5802]847
848 // As long as we can pop from this lane to push the threads somewhere else in the queue
849 while(!is_empty(lanes.data[idx])) {
[504a7dc]850 struct $thread * thrd;
[b798713]851 __attribute__((unused)) bool _;
[dca5802]852 [thrd, _] = pop(lanes.data[idx]);
853
[b798713]854 push(cltr, thrd);
[dca5802]855
856 // for printing count the number of displaced threads
[504a7dc]857 #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__)
[dca5802]858 displaced++;
859 #endif
[b798713]860 }
861
[dca5802]862 // Unlock the lane
863 __atomic_unlock(&lanes.data[idx].lock);
[b798713]864
865 // TODO print the queue statistics here
866
[dca5802]867 ^(lanes.data[idx]){};
[b798713]868 }
869
[504a7dc]870 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue displaced %zu threads\n", displaced);
[c84b4be]871
[dca5802]872 // Allocate new array (uses realloc and memcpies the data)
873 lanes.data = alloc(lanes.data, lanes.count);
[b798713]874
875 // Fix the moved data
[dca5802]876 for( idx; (size_t)lanes.count ) {
877 fix(lanes.data[idx]);
[b798713]878 }
[c84b4be]879
[61d7bec]880 // Re-create the snzi
881 snzi{ log2( lanes.count / 8 ) };
882 for( idx; (size_t)lanes.count ) {
883 if( !is_empty(lanes.data[idx]) ) {
884 arrive(snzi, idx);
885 }
886 }
[b798713]887 }
888
889 // Make sure that everything is consistent
[dca5802]890 /* paranoid */ check( cltr->ready_queue );
891
[504a7dc]892 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue done\n");
[dca5802]893
894 // Unlock the RWlock
[b388ee81]895 ready_mutate_unlock( last_size );
[b798713]896}
897
898//-----------------------------------------------------------------------
899
900#if !defined(__CFA_NO_STATISTICS__)
901void stats_tls_tally(struct cluster * cltr) with (cltr->ready_queue) {
902 __atomic_fetch_add( &global_stats.pick.push.attempt, tls.pick.push.attempt, __ATOMIC_SEQ_CST );
903 __atomic_fetch_add( &global_stats.pick.push.success, tls.pick.push.success, __ATOMIC_SEQ_CST );
904 __atomic_fetch_add( &global_stats.pick.pop .maskrds, tls.pick.pop .maskrds, __ATOMIC_SEQ_CST );
905 __atomic_fetch_add( &global_stats.pick.pop .attempt, tls.pick.pop .attempt, __ATOMIC_SEQ_CST );
906 __atomic_fetch_add( &global_stats.pick.pop .success, tls.pick.pop .success, __ATOMIC_SEQ_CST );
907
[dca5802]908 __atomic_fetch_add( &global_stats.used.value, tls.used.value, __ATOMIC_SEQ_CST );
909 __atomic_fetch_add( &global_stats.used.count, tls.used.count, __ATOMIC_SEQ_CST );
[b798713]910}
911#endif
Note: See TracBrowser for help on using the repository browser.