source: libcfa/src/concurrency/ready_queue.cfa@ 4fe6224

ADT arm-eh ast-experimental enum forall-pointer-decay jacob/cs343-translation new-ast new-ast-unique-expr pthread-emulation qualifiedEnum
Last change on this file since 4fe6224 was 343d10e, checked in by Thierry Delisle <tdelisle@…>, 5 years ago

Removed code broken in new-ast out of libcfa

  • Property mode set to 100644
File size: 17.7 KB
RevLine 
[7768b8d]1//
2// Cforall Version 1.0.0 Copyright (C) 2019 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// ready_queue.cfa --
8//
9// Author : Thierry Delisle
10// Created On : Mon Nov dd 16:29:18 2019
11// Last Modified By :
12// Last Modified On :
13// Update Count :
14//
15
16#define __cforall_thread__
[1b143de]17// #define __CFA_DEBUG_PRINT_READY_QUEUE__
[7768b8d]18
[1eb239e4]19// #define USE_SNZI
20
[7768b8d]21#include "bits/defs.hfa"
22#include "kernel_private.hfa"
23
24#define _GNU_SOURCE
25#include "stdlib.hfa"
[61d7bec]26#include "math.hfa"
[7768b8d]27
[04b5cef]28#include <unistd.h>
29
[13c5e19]30#include "snzi.hfa"
31#include "ready_subqueue.hfa"
32
[7768b8d]33static const size_t cache_line_size = 64;
34
[dca5802]35// No overriden function, no environment variable, no define
36// fall back to a magic number
37#ifndef __CFA_MAX_PROCESSORS__
[b388ee81]38 #define __CFA_MAX_PROCESSORS__ 1024
[dca5802]39#endif
[7768b8d]40
[320ec6fc]41#define BIAS 16
[04b5cef]42
[dca5802]43// returns the maximum number of processors the RWLock support
[7768b8d]44__attribute__((weak)) unsigned __max_processors() {
45 const char * max_cores_s = getenv("CFA_MAX_PROCESSORS");
46 if(!max_cores_s) {
[504a7dc]47 __cfadbg_print_nolock(ready_queue, "No CFA_MAX_PROCESSORS in ENV\n");
[dca5802]48 return __CFA_MAX_PROCESSORS__;
[7768b8d]49 }
50
51 char * endptr = 0p;
52 long int max_cores_l = strtol(max_cores_s, &endptr, 10);
53 if(max_cores_l < 1 || max_cores_l > 65535) {
[504a7dc]54 __cfadbg_print_nolock(ready_queue, "CFA_MAX_PROCESSORS out of range : %ld\n", max_cores_l);
[dca5802]55 return __CFA_MAX_PROCESSORS__;
[7768b8d]56 }
57 if('\0' != *endptr) {
[504a7dc]58 __cfadbg_print_nolock(ready_queue, "CFA_MAX_PROCESSORS not a decimal number : %s\n", max_cores_s);
[dca5802]59 return __CFA_MAX_PROCESSORS__;
[7768b8d]60 }
61
62 return max_cores_l;
63}
64
65//=======================================================================
66// Cluster wide reader-writer lock
67//=======================================================================
[b388ee81]68void ?{}(__scheduler_RWLock_t & this) {
[7768b8d]69 this.max = __max_processors();
70 this.alloc = 0;
71 this.ready = 0;
72 this.lock = false;
73 this.data = alloc(this.max);
74
75 /*paranoid*/ verify( 0 == (((uintptr_t)(this.data )) % 64) );
76 /*paranoid*/ verify( 0 == (((uintptr_t)(this.data + 1)) % 64) );
77 /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.alloc), &this.alloc));
78 /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.ready), &this.ready));
79
80}
[b388ee81]81void ^?{}(__scheduler_RWLock_t & this) {
[7768b8d]82 free(this.data);
83}
84
[9b1dcc2]85void ?{}( __scheduler_lock_id_t & this, __processor_id_t * proc ) {
[7768b8d]86 this.handle = proc;
87 this.lock = false;
[64a7146]88 #ifdef __CFA_WITH_VERIFY__
89 this.owned = false;
90 #endif
[7768b8d]91}
92
93//=======================================================================
94// Lock-Free registering/unregistering of threads
[9b1dcc2]95unsigned doregister( struct __processor_id_t * proc ) with(*__scheduler_lock) {
[b388ee81]96 __cfadbg_print_safe(ready_queue, "Kernel : Registering proc %p for RW-Lock\n", proc);
[504a7dc]97
[7768b8d]98 // Step - 1 : check if there is already space in the data
99 uint_fast32_t s = ready;
100
101 // Check among all the ready
102 for(uint_fast32_t i = 0; i < s; i++) {
[9b1dcc2]103 __processor_id_t * null = 0p; // Re-write every loop since compare thrashes it
[7768b8d]104 if( __atomic_load_n(&data[i].handle, (int)__ATOMIC_RELAXED) == null
105 && __atomic_compare_exchange_n( &data[i].handle, &null, proc, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
106 /*paranoid*/ verify(i < ready);
[64a7146]107 /*paranoid*/ verify(0 == (__alignof__(data[i]) % cache_line_size));
[7768b8d]108 /*paranoid*/ verify((((uintptr_t)&data[i]) % cache_line_size) == 0);
109 return i;
110 }
111 }
112
[b388ee81]113 if(max <= alloc) abort("Trying to create more than %ud processors", __scheduler_lock->max);
[7768b8d]114
115 // Step - 2 : F&A to get a new spot in the array.
116 uint_fast32_t n = __atomic_fetch_add(&alloc, 1, __ATOMIC_SEQ_CST);
[b388ee81]117 if(max <= n) abort("Trying to create more than %ud processors", __scheduler_lock->max);
[7768b8d]118
119 // Step - 3 : Mark space as used and then publish it.
[9b1dcc2]120 __scheduler_lock_id_t * storage = (__scheduler_lock_id_t *)&data[n];
[7768b8d]121 (*storage){ proc };
122 while(true) {
123 unsigned copy = n;
124 if( __atomic_load_n(&ready, __ATOMIC_RELAXED) == n
125 && __atomic_compare_exchange_n(&ready, &copy, n + 1, true, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
126 break;
127 asm volatile("pause");
128 }
129
[1b143de]130 __cfadbg_print_safe(ready_queue, "Kernel : Registering proc %p done, id %lu\n", proc, n);
[504a7dc]131
[7768b8d]132 // Return new spot.
133 /*paranoid*/ verify(n < ready);
[37ba662]134 /*paranoid*/ verify(__alignof__(data[n]) == (2 * cache_line_size));
[7768b8d]135 /*paranoid*/ verify((((uintptr_t)&data[n]) % cache_line_size) == 0);
136 return n;
137}
138
[9b1dcc2]139void unregister( struct __processor_id_t * proc ) with(*__scheduler_lock) {
[7768b8d]140 unsigned id = proc->id;
141 /*paranoid*/ verify(id < ready);
142 /*paranoid*/ verify(proc == __atomic_load_n(&data[id].handle, __ATOMIC_RELAXED));
143 __atomic_store_n(&data[id].handle, 0p, __ATOMIC_RELEASE);
[504a7dc]144
145 __cfadbg_print_safe(ready_queue, "Kernel : Unregister proc %p\n", proc);
[7768b8d]146}
147
148//-----------------------------------------------------------------------
149// Writer side : acquire when changing the ready queue, e.g. adding more
150// queues or removing them.
[b388ee81]151uint_fast32_t ready_mutate_lock( void ) with(*__scheduler_lock) {
[7768b8d]152 // Step 1 : lock global lock
153 // It is needed to avoid processors that register mid Critical-Section
154 // to simply lock their own lock and enter.
155 __atomic_acquire( &lock );
156
157 // Step 2 : lock per-proc lock
158 // Processors that are currently being registered aren't counted
159 // but can't be in read_lock or in the critical section.
160 // All other processors are counted
161 uint_fast32_t s = ready;
162 for(uint_fast32_t i = 0; i < s; i++) {
163 __atomic_acquire( &data[i].lock );
164 }
165
166 return s;
167}
168
[b388ee81]169void ready_mutate_unlock( uint_fast32_t last_s ) with(*__scheduler_lock) {
[7768b8d]170 // Step 1 : release local locks
171 // This must be done while the global lock is held to avoid
172 // threads that where created mid critical section
173 // to race to lock their local locks and have the writer
174 // immidiately unlock them
175 // Alternative solution : return s in write_lock and pass it to write_unlock
176 for(uint_fast32_t i = 0; i < last_s; i++) {
177 verify(data[i].lock);
178 __atomic_store_n(&data[i].lock, (bool)false, __ATOMIC_RELEASE);
179 }
180
181 // Step 2 : release global lock
182 /*paranoid*/ assert(true == lock);
183 __atomic_store_n(&lock, (bool)false, __ATOMIC_RELEASE);
184}
185
186//=======================================================================
[13c5e19]187// Cforall Reqdy Queue used for scheduling
[b798713]188//=======================================================================
189void ?{}(__ready_queue_t & this) with (this) {
[28d73c1]190 lanes.data = 0p;
191 lanes.count = 0;
[b798713]192}
193
194void ^?{}(__ready_queue_t & this) with (this) {
[39fc03e]195 verify( 1 == lanes.count );
[1eb239e4]196 #ifdef USE_SNZI
197 verify( !query( snzi ) );
198 #endif
[dca5802]199 free(lanes.data);
200}
201
[64a7146]202//-----------------------------------------------------------------------
203__attribute__((hot)) bool query(struct cluster * cltr) {
[1eb239e4]204 #ifdef USE_SNZI
205 return query(cltr->ready_queue.snzi);
206 #endif
207 return true;
[64a7146]208}
209
[dca5802]210//-----------------------------------------------------------------------
[504a7dc]211__attribute__((hot)) bool push(struct cluster * cltr, struct $thread * thrd) with (cltr->ready_queue) {
[61d7bec]212 __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr);
[1b143de]213
[dca5802]214 // write timestamp
[b798713]215 thrd->link.ts = rdtscl();
216
[52769ba]217 #if defined(BIAS) && !defined(__CFA_NO_STATISTICS__)
218 bool local = false;
[d72c074]219 int preferred =
220 //*
221 kernelTLS.this_processor ? kernelTLS.this_processor->id * 4 : -1;
222 /*/
223 thrd->link.preferred * 4;
224 //*/
225
226
[52769ba]227 #endif
228
[dca5802]229 // Try to pick a lane and lock it
230 unsigned i;
231 do {
232 // Pick the index of a lane
[04b5cef]233 #if defined(BIAS)
234 unsigned r = __tls_rand();
235 unsigned rlow = r % BIAS;
236 unsigned rhigh = r / BIAS;
[d72c074]237 if((0 != rlow) && preferred >= 0) {
[04b5cef]238 // (BIAS - 1) out of BIAS chances
239 // Use perferred queues
[d72c074]240 i = preferred + (rhigh % 4);
[13c5e19]241
242 #if !defined(__CFA_NO_STATISTICS__)
[52769ba]243 local = true;
[13c5e19]244 __tls_stats()->ready.pick.push.local++;
245 #endif
[04b5cef]246 }
247 else {
248 // 1 out of BIAS chances
249 // Use all queues
250 i = rhigh;
[52769ba]251 local = false;
[04b5cef]252 }
253 #else
254 i = __tls_rand();
255 #endif
256
257 i %= __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
[b798713]258
259 #if !defined(__CFA_NO_STATISTICS__)
[8834751]260 __tls_stats()->ready.pick.push.attempt++;
[b798713]261 #endif
262
263 // If we can't lock it retry
[dca5802]264 } while( !__atomic_try_acquire( &lanes.data[i].lock ) );
[b798713]265
[dca5802]266 bool first = false;
267
268 // Actually push it
269 bool lane_first = push(lanes.data[i], thrd);
270
[1eb239e4]271 #ifdef USE_SNZI
272 // If this lane used to be empty we need to do more
273 if(lane_first) {
274 // Check if the entire queue used to be empty
275 first = !query(snzi);
[61d7bec]276
[1eb239e4]277 // Update the snzi
278 arrive( snzi, i );
279 }
280 #endif
[dca5802]281
282 // Unlock and return
283 __atomic_unlock( &lanes.data[i].lock );
284
[1b143de]285 __cfadbg_print_safe(ready_queue, "Kernel : Pushed %p on cluster %p (idx: %u, mask %llu, first %d)\n", thrd, cltr, i, used.mask[0], lane_first);
286
[dca5802]287 // Update statistics
288 #if !defined(__CFA_NO_STATISTICS__)
[52769ba]289 #if defined(BIAS)
290 if( local ) __tls_stats()->ready.pick.push.lsuccess++;
291 #endif
[8834751]292 __tls_stats()->ready.pick.push.success++;
[dca5802]293 #endif
294
295 // return whether or not the list was empty before this push
296 return first;
[b798713]297}
298
[13c5e19]299static struct $thread * try_pop(struct cluster * cltr, unsigned i, unsigned j);
300static struct $thread * try_pop(struct cluster * cltr, unsigned i);
301
302// Pop from the ready queue from a given cluster
303__attribute__((hot)) $thread * pop(struct cluster * cltr) with (cltr->ready_queue) {
304 /* paranoid */ verify( lanes.count > 0 );
[1eb239e4]305 unsigned count = __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
[13c5e19]306 #if defined(BIAS)
307 // Don't bother trying locally too much
308 int local_tries = 8;
309 #endif
310
311 // As long as the list is not empty, try finding a lane that isn't empty and pop from it
[1eb239e4]312 #ifdef USE_SNZI
313 while( query(snzi) ) {
314 #else
315 for(25) {
316 #endif
[13c5e19]317 // Pick two lists at random
318 unsigned i,j;
319 #if defined(BIAS)
[52769ba]320 #if !defined(__CFA_NO_STATISTICS__)
321 bool local = false;
322 #endif
[13c5e19]323 uint64_t r = __tls_rand();
324 unsigned rlow = r % BIAS;
325 uint64_t rhigh = r / BIAS;
326 if(local_tries && 0 != rlow) {
327 // (BIAS - 1) out of BIAS chances
328 // Use perferred queues
329 unsigned pid = kernelTLS.this_processor->id * 4;
330 i = pid + (rhigh % 4);
331 j = pid + ((rhigh >> 32ull) % 4);
332
333 // count the tries
334 local_tries--;
335
336 #if !defined(__CFA_NO_STATISTICS__)
[52769ba]337 local = true;
[13c5e19]338 __tls_stats()->ready.pick.pop.local++;
339 #endif
340 }
341 else {
342 // 1 out of BIAS chances
343 // Use all queues
344 i = rhigh;
345 j = rhigh >> 32ull;
346 }
347 #else
348 i = __tls_rand();
349 j = __tls_rand();
350 #endif
351
[1eb239e4]352 i %= count;
353 j %= count;
[13c5e19]354
355 // try popping from the 2 picked lists
356 struct $thread * thrd = try_pop(cltr, i, j);
[52769ba]357 if(thrd) {
358 #if defined(BIAS) && !defined(__CFA_NO_STATISTICS__)
359 if( local ) __tls_stats()->ready.pick.pop.lsuccess++;
360 #endif
361 return thrd;
362 }
[13c5e19]363 }
364
365 // All lanes where empty return 0p
366 return 0p;
367}
368
[1eb239e4]369__attribute__((hot)) struct $thread * pop_slow(struct cluster * cltr) with (cltr->ready_queue) {
370 /* paranoid */ verify( lanes.count > 0 );
371 unsigned count = __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
372 unsigned offset = __tls_rand();
373 for(i; count) {
374 unsigned idx = (offset + i) % count;
375 struct $thread * thrd = try_pop(cltr, idx);
376 if(thrd) {
377 return thrd;
378 }
379 }
380
381 // All lanes where empty return 0p
382 return 0p;
383}
384
385
[b798713]386//-----------------------------------------------------------------------
[dca5802]387// Given 2 indexes, pick the list with the oldest push an try to pop from it
[13c5e19]388static inline struct $thread * try_pop(struct cluster * cltr, unsigned i, unsigned j) with (cltr->ready_queue) {
[b798713]389 #if !defined(__CFA_NO_STATISTICS__)
[8834751]390 __tls_stats()->ready.pick.pop.attempt++;
[b798713]391 #endif
392
393 // Pick the bet list
394 int w = i;
[dca5802]395 if( __builtin_expect(!is_empty(lanes.data[j]), true) ) {
396 w = (ts(lanes.data[i]) < ts(lanes.data[j])) ? i : j;
[b798713]397 }
398
[13c5e19]399 return try_pop(cltr, w);
400}
401
402static inline struct $thread * try_pop(struct cluster * cltr, unsigned w) with (cltr->ready_queue) {
[dca5802]403 // Get relevant elements locally
404 __intrusive_lane_t & lane = lanes.data[w];
405
[b798713]406 // If list looks empty retry
[dca5802]407 if( is_empty(lane) ) return 0p;
[b798713]408
409 // If we can't get the lock retry
[dca5802]410 if( !__atomic_try_acquire(&lane.lock) ) return 0p;
[b798713]411
412
413 // If list is empty, unlock and retry
[dca5802]414 if( is_empty(lane) ) {
415 __atomic_unlock(&lane.lock);
[b798713]416 return 0p;
417 }
418
419 // Actually pop the list
[504a7dc]420 struct $thread * thrd;
[343d10e]421 thrd = pop(lane);
[b798713]422
[dca5802]423 /* paranoid */ verify(thrd);
424 /* paranoid */ verify(lane.lock);
[b798713]425
[1eb239e4]426 #ifdef USE_SNZI
427 // If this was the last element in the lane
428 if(emptied) {
429 depart( snzi, w );
430 }
431 #endif
[b798713]432
433 // Unlock and return
[dca5802]434 __atomic_unlock(&lane.lock);
[b798713]435
[dca5802]436 // Update statistics
[b798713]437 #if !defined(__CFA_NO_STATISTICS__)
[8834751]438 __tls_stats()->ready.pick.pop.success++;
[b798713]439 #endif
440
[d72c074]441 // Update the thread bias
442 thrd->link.preferred = w / 4;
443
[dca5802]444 // return the popped thread
[b798713]445 return thrd;
446}
[13c5e19]447//-----------------------------------------------------------------------
[b798713]448
[13c5e19]449bool remove_head(struct cluster * cltr, struct $thread * thrd) with (cltr->ready_queue) {
450 for(i; lanes.count) {
451 __intrusive_lane_t & lane = lanes.data[i];
[b798713]452
[13c5e19]453 bool removed = false;
[04b5cef]454
[13c5e19]455 __atomic_acquire(&lane.lock);
456 if(head(lane)->link.next == thrd) {
457 $thread * pthrd;
[343d10e]458 pthrd = pop(lane);
[04b5cef]459
[13c5e19]460 /* paranoid */ verify( pthrd == thrd );
[61d7bec]461
[13c5e19]462 removed = true;
[1eb239e4]463 #ifdef USE_SNZI
464 if(emptied) {
465 depart( snzi, i );
466 }
467 #endif
[13c5e19]468 }
469 __atomic_unlock(&lane.lock);
[b798713]470
[13c5e19]471 if( removed ) return true;
472 }
473 return false;
[b798713]474}
475
476//-----------------------------------------------------------------------
477
478static void check( __ready_queue_t & q ) with (q) {
479 #if defined(__CFA_WITH_VERIFY__)
480 {
[dca5802]481 for( idx ; lanes.count ) {
482 __intrusive_lane_t & sl = lanes.data[idx];
483 assert(!lanes.data[idx].lock);
[b798713]484
485 assert(head(sl)->link.prev == 0p );
486 assert(head(sl)->link.next->link.prev == head(sl) );
487 assert(tail(sl)->link.next == 0p );
488 assert(tail(sl)->link.prev->link.next == tail(sl) );
489
490 if(sl.before.link.ts == 0l) {
491 assert(tail(sl)->link.prev == head(sl));
492 assert(head(sl)->link.next == tail(sl));
[1b143de]493 } else {
494 assert(tail(sl)->link.prev != head(sl));
495 assert(head(sl)->link.next != tail(sl));
[b798713]496 }
497 }
498 }
499 #endif
500}
501
502// Call this function of the intrusive list was moved using memcpy
[dca5802]503// fixes the list so that the pointers back to anchors aren't left dangling
504static inline void fix(__intrusive_lane_t & ll) {
505 // if the list is not empty then follow he pointer and fix its reverse
506 if(!is_empty(ll)) {
[b798713]507 head(ll)->link.next->link.prev = head(ll);
508 tail(ll)->link.prev->link.next = tail(ll);
509 }
510 // Otherwise just reset the list
511 else {
[dca5802]512 verify(tail(ll)->link.next == 0p);
[b798713]513 tail(ll)->link.prev = head(ll);
514 head(ll)->link.next = tail(ll);
[dca5802]515 verify(head(ll)->link.prev == 0p);
[b798713]516 }
517}
518
[dca5802]519// Grow the ready queue
[320ec6fc]520void ready_queue_grow (struct cluster * cltr, int target) {
[64a7146]521 /* paranoid */ verify( ready_mutate_islocked() );
[504a7dc]522 __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue\n");
[b798713]523
[dca5802]524 // Make sure that everything is consistent
525 /* paranoid */ check( cltr->ready_queue );
526
527 // grow the ready queue
[b798713]528 with( cltr->ready_queue ) {
[1eb239e4]529 #ifdef USE_SNZI
530 ^(snzi){};
531 #endif
[b798713]532
[39fc03e]533 // Find new count
534 // Make sure we always have atleast 1 list
535 size_t ncount = target >= 2 ? target * 4: 1;
[b798713]536
[dca5802]537 // Allocate new array (uses realloc and memcpies the data)
[39fc03e]538 lanes.data = alloc(lanes.data, ncount);
[b798713]539
540 // Fix the moved data
[dca5802]541 for( idx; (size_t)lanes.count ) {
542 fix(lanes.data[idx]);
[b798713]543 }
544
545 // Construct new data
[dca5802]546 for( idx; (size_t)lanes.count ~ ncount) {
547 (lanes.data[idx]){};
[b798713]548 }
549
550 // Update original
[dca5802]551 lanes.count = ncount;
552
[1eb239e4]553 #ifdef USE_SNZI
554 // Re-create the snzi
555 snzi{ log2( lanes.count / 8 ) };
556 for( idx; (size_t)lanes.count ) {
557 if( !is_empty(lanes.data[idx]) ) {
558 arrive(snzi, idx);
559 }
[61d7bec]560 }
[1eb239e4]561 #endif
[b798713]562 }
563
564 // Make sure that everything is consistent
[dca5802]565 /* paranoid */ check( cltr->ready_queue );
566
[504a7dc]567 __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue done\n");
[dca5802]568
[64a7146]569 /* paranoid */ verify( ready_mutate_islocked() );
[b798713]570}
571
[dca5802]572// Shrink the ready queue
[320ec6fc]573void ready_queue_shrink(struct cluster * cltr, int target) {
[64a7146]574 /* paranoid */ verify( ready_mutate_islocked() );
[504a7dc]575 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue\n");
[dca5802]576
577 // Make sure that everything is consistent
578 /* paranoid */ check( cltr->ready_queue );
579
[b798713]580 with( cltr->ready_queue ) {
[1eb239e4]581 #ifdef USE_SNZI
582 ^(snzi){};
583 #endif
[61d7bec]584
[39fc03e]585 // Remember old count
[dca5802]586 size_t ocount = lanes.count;
[b798713]587
[39fc03e]588 // Find new count
589 // Make sure we always have atleast 1 list
590 lanes.count = target >= 2 ? target * 4: 1;
591 /* paranoid */ verify( ocount >= lanes.count );
[320ec6fc]592 /* paranoid */ verify( lanes.count == target * 4 || target < 2 );
[dca5802]593
594 // for printing count the number of displaced threads
[504a7dc]595 #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__)
[dca5802]596 __attribute__((unused)) size_t displaced = 0;
597 #endif
[b798713]598
599 // redistribute old data
[dca5802]600 for( idx; (size_t)lanes.count ~ ocount) {
601 // Lock is not strictly needed but makes checking invariants much easier
[1b143de]602 __attribute__((unused)) bool locked = __atomic_try_acquire(&lanes.data[idx].lock);
[b798713]603 verify(locked);
[dca5802]604
605 // As long as we can pop from this lane to push the threads somewhere else in the queue
606 while(!is_empty(lanes.data[idx])) {
[504a7dc]607 struct $thread * thrd;
[343d10e]608 thrd = pop(lanes.data[idx]);
[dca5802]609
[b798713]610 push(cltr, thrd);
[dca5802]611
612 // for printing count the number of displaced threads
[504a7dc]613 #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__)
[dca5802]614 displaced++;
615 #endif
[b798713]616 }
617
[dca5802]618 // Unlock the lane
619 __atomic_unlock(&lanes.data[idx].lock);
[b798713]620
621 // TODO print the queue statistics here
622
[dca5802]623 ^(lanes.data[idx]){};
[b798713]624 }
625
[504a7dc]626 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue displaced %zu threads\n", displaced);
[c84b4be]627
[dca5802]628 // Allocate new array (uses realloc and memcpies the data)
[39fc03e]629 lanes.data = alloc(lanes.data, lanes.count);
[b798713]630
631 // Fix the moved data
[dca5802]632 for( idx; (size_t)lanes.count ) {
633 fix(lanes.data[idx]);
[b798713]634 }
[c84b4be]635
[1eb239e4]636 #ifdef USE_SNZI
637 // Re-create the snzi
638 snzi{ log2( lanes.count / 8 ) };
639 for( idx; (size_t)lanes.count ) {
640 if( !is_empty(lanes.data[idx]) ) {
641 arrive(snzi, idx);
642 }
[61d7bec]643 }
[1eb239e4]644 #endif
[b798713]645 }
646
647 // Make sure that everything is consistent
[dca5802]648 /* paranoid */ check( cltr->ready_queue );
649
[504a7dc]650 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue done\n");
[64a7146]651 /* paranoid */ verify( ready_mutate_islocked() );
[8834751]652}
Note: See TracBrowser for help on using the repository browser.