source: libcfa/src/concurrency/ready_queue.cfa @ 5f6a172

ADTarm-ehast-experimentalenumforall-pointer-decayjacob/cs343-translationnew-ast-unique-exprpthread-emulationqualifiedEnum
Last change on this file since 5f6a172 was 5f6a172, checked in by Thierry Delisle <tdelisle@…>, 3 years ago

Fix assertions on ready_queue with workstealling

  • Property mode set to 100644
File size: 21.7 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2019 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// ready_queue.cfa --
8//
9// Author           : Thierry Delisle
10// Created On       : Mon Nov dd 16:29:18 2019
11// Last Modified By :
12// Last Modified On :
13// Update Count     :
14//
15
16#define __cforall_thread__
17// #define __CFA_DEBUG_PRINT_READY_QUEUE__
18
19// #define USE_MPSC
20
21#define USE_RELAXED_FIFO
22// #define USE_WORK_STEALING
23
24#include "bits/defs.hfa"
25#include "kernel_private.hfa"
26
27#define _GNU_SOURCE
28#include "stdlib.hfa"
29#include "math.hfa"
30
31#include <unistd.h>
32
33#include "ready_subqueue.hfa"
34
35static const size_t cache_line_size = 64;
36
37// No overriden function, no environment variable, no define
38// fall back to a magic number
39#ifndef __CFA_MAX_PROCESSORS__
40        #define __CFA_MAX_PROCESSORS__ 1024
41#endif
42
43#if   defined(USE_RELAXED_FIFO)
44        #define BIAS 4
45        #define READYQ_SHARD_FACTOR 4
46        #define SEQUENTIAL_SHARD 1
47#elif defined(USE_WORK_STEALING)
48        #define READYQ_SHARD_FACTOR 2
49        #define SEQUENTIAL_SHARD 2
50#else
51        #error no scheduling strategy selected
52#endif
53
54static inline [unsigned, bool] idx_from_r(unsigned r, unsigned preferred);
55static inline struct $thread * try_pop(struct cluster * cltr, unsigned w);
56static inline struct $thread * try_pop(struct cluster * cltr, unsigned i, unsigned j);
57static inline struct $thread * search(struct cluster * cltr);
58
59
60// returns the maximum number of processors the RWLock support
61__attribute__((weak)) unsigned __max_processors() {
62        const char * max_cores_s = getenv("CFA_MAX_PROCESSORS");
63        if(!max_cores_s) {
64                __cfadbg_print_nolock(ready_queue, "No CFA_MAX_PROCESSORS in ENV\n");
65                return __CFA_MAX_PROCESSORS__;
66        }
67
68        char * endptr = 0p;
69        long int max_cores_l = strtol(max_cores_s, &endptr, 10);
70        if(max_cores_l < 1 || max_cores_l > 65535) {
71                __cfadbg_print_nolock(ready_queue, "CFA_MAX_PROCESSORS out of range : %ld\n", max_cores_l);
72                return __CFA_MAX_PROCESSORS__;
73        }
74        if('\0' != *endptr) {
75                __cfadbg_print_nolock(ready_queue, "CFA_MAX_PROCESSORS not a decimal number : %s\n", max_cores_s);
76                return __CFA_MAX_PROCESSORS__;
77        }
78
79        return max_cores_l;
80}
81
82//=======================================================================
83// Cluster wide reader-writer lock
84//=======================================================================
85void  ?{}(__scheduler_RWLock_t & this) {
86        this.max   = __max_processors();
87        this.alloc = 0;
88        this.ready = 0;
89        this.lock  = false;
90        this.data  = alloc(this.max);
91
92        /*paranoid*/ verify( 0 == (((uintptr_t)(this.data    )) % 64) );
93        /*paranoid*/ verify( 0 == (((uintptr_t)(this.data + 1)) % 64) );
94        /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.alloc), &this.alloc));
95        /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.ready), &this.ready));
96
97}
98void ^?{}(__scheduler_RWLock_t & this) {
99        free(this.data);
100}
101
102void ?{}( __scheduler_lock_id_t & this, __processor_id_t * proc ) {
103        this.handle = proc;
104        this.lock   = false;
105        #ifdef __CFA_WITH_VERIFY__
106                this.owned  = false;
107        #endif
108}
109
110//=======================================================================
111// Lock-Free registering/unregistering of threads
112void register_proc_id( struct __processor_id_t * proc ) with(*__scheduler_lock) {
113        __cfadbg_print_safe(ready_queue, "Kernel : Registering proc %p for RW-Lock\n", proc);
114
115        // Step - 1 : check if there is already space in the data
116        uint_fast32_t s = ready;
117
118        // Check among all the ready
119        for(uint_fast32_t i = 0; i < s; i++) {
120                __processor_id_t * null = 0p; // Re-write every loop since compare thrashes it
121                if( __atomic_load_n(&data[i].handle, (int)__ATOMIC_RELAXED) == null
122                        && __atomic_compare_exchange_n( &data[i].handle, &null, proc, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
123                        /*paranoid*/ verify(i < ready);
124                        /*paranoid*/ verify(0 == (__alignof__(data[i]) % cache_line_size));
125                        /*paranoid*/ verify((((uintptr_t)&data[i]) % cache_line_size) == 0);
126                        proc->id = i;
127                }
128        }
129
130        if(max <= alloc) abort("Trying to create more than %ud processors", __scheduler_lock->max);
131
132        // Step - 2 : F&A to get a new spot in the array.
133        uint_fast32_t n = __atomic_fetch_add(&alloc, 1, __ATOMIC_SEQ_CST);
134        if(max <= n) abort("Trying to create more than %ud processors", __scheduler_lock->max);
135
136        // Step - 3 : Mark space as used and then publish it.
137        __scheduler_lock_id_t * storage = (__scheduler_lock_id_t *)&data[n];
138        (*storage){ proc };
139        while() {
140                unsigned copy = n;
141                if( __atomic_load_n(&ready, __ATOMIC_RELAXED) == n
142                        && __atomic_compare_exchange_n(&ready, &copy, n + 1, true, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
143                        break;
144                Pause();
145        }
146
147        __cfadbg_print_safe(ready_queue, "Kernel : Registering proc %p done, id %lu\n", proc, n);
148
149        // Return new spot.
150        /*paranoid*/ verify(n < ready);
151        /*paranoid*/ verify(__alignof__(data[n]) == (2 * cache_line_size));
152        /*paranoid*/ verify((((uintptr_t)&data[n]) % cache_line_size) == 0);
153        proc->id = n;
154}
155
156void unregister_proc_id( struct __processor_id_t * proc ) with(*__scheduler_lock) {
157        unsigned id = proc->id;
158        /*paranoid*/ verify(id < ready);
159        /*paranoid*/ verify(proc == __atomic_load_n(&data[id].handle, __ATOMIC_RELAXED));
160        __atomic_store_n(&data[id].handle, 0p, __ATOMIC_RELEASE);
161
162        __cfadbg_print_safe(ready_queue, "Kernel : Unregister proc %p\n", proc);
163}
164
165//-----------------------------------------------------------------------
166// Writer side : acquire when changing the ready queue, e.g. adding more
167//  queues or removing them.
168uint_fast32_t ready_mutate_lock( void ) with(*__scheduler_lock) {
169        /* paranoid */ verify( ! __preemption_enabled() );
170
171        // Step 1 : lock global lock
172        // It is needed to avoid processors that register mid Critical-Section
173        //   to simply lock their own lock and enter.
174        __atomic_acquire( &lock );
175
176        // Step 2 : lock per-proc lock
177        // Processors that are currently being registered aren't counted
178        //   but can't be in read_lock or in the critical section.
179        // All other processors are counted
180        uint_fast32_t s = ready;
181        for(uint_fast32_t i = 0; i < s; i++) {
182                __atomic_acquire( &data[i].lock );
183        }
184
185        /* paranoid */ verify( ! __preemption_enabled() );
186        return s;
187}
188
189void ready_mutate_unlock( uint_fast32_t last_s ) with(*__scheduler_lock) {
190        /* paranoid */ verify( ! __preemption_enabled() );
191
192        // Step 1 : release local locks
193        // This must be done while the global lock is held to avoid
194        //   threads that where created mid critical section
195        //   to race to lock their local locks and have the writer
196        //   immidiately unlock them
197        // Alternative solution : return s in write_lock and pass it to write_unlock
198        for(uint_fast32_t i = 0; i < last_s; i++) {
199                verify(data[i].lock);
200                __atomic_store_n(&data[i].lock, (bool)false, __ATOMIC_RELEASE);
201        }
202
203        // Step 2 : release global lock
204        /*paranoid*/ assert(true == lock);
205        __atomic_store_n(&lock, (bool)false, __ATOMIC_RELEASE);
206
207        /* paranoid */ verify( ! __preemption_enabled() );
208}
209
210//=======================================================================
211// Cforall Ready Queue used for scheduling
212//=======================================================================
213void ?{}(__ready_queue_t & this) with (this) {
214        lanes.data  = 0p;
215        lanes.tscs  = 0p;
216        lanes.count = 0;
217}
218
219void ^?{}(__ready_queue_t & this) with (this) {
220        verify( SEQUENTIAL_SHARD == lanes.count );
221        free(lanes.data);
222        free(lanes.tscs);
223}
224
225//-----------------------------------------------------------------------
226#if defined(USE_RELAXED_FIFO)
227        //-----------------------------------------------------------------------
228        // get index from random number with or without bias towards queues
229        static inline [unsigned, bool] idx_from_r(unsigned r, unsigned preferred) {
230                unsigned i;
231                bool local;
232                unsigned rlow  = r % BIAS;
233                unsigned rhigh = r / BIAS;
234                if((0 != rlow) && preferred >= 0) {
235                        // (BIAS - 1) out of BIAS chances
236                        // Use perferred queues
237                        i = preferred + (rhigh % READYQ_SHARD_FACTOR);
238                        local = true;
239                }
240                else {
241                        // 1 out of BIAS chances
242                        // Use all queues
243                        i = rhigh;
244                        local = false;
245                }
246                return [i, local];
247        }
248
249        __attribute__((hot)) void push(struct cluster * cltr, struct $thread * thrd) with (cltr->ready_queue) {
250                __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr);
251
252                const bool external = (!kernelTLS().this_processor) || (cltr != kernelTLS().this_processor->cltr);
253                /* paranoid */ verify(external || kernelTLS().this_processor->rdq.id < lanes.count );
254
255                // write timestamp
256                thrd->link.ts = rdtscl();
257
258                bool local;
259                int preferred = external ? -1 : kernelTLS().this_processor->rdq.id;
260
261                // Try to pick a lane and lock it
262                unsigned i;
263                do {
264                        // Pick the index of a lane
265                        unsigned r = __tls_rand_fwd();
266                        [i, local] = idx_from_r(r, preferred);
267
268                        i %= __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
269
270                        #if !defined(__CFA_NO_STATISTICS__)
271                                if(external) {
272                                        if(local) __atomic_fetch_add(&cltr->stats->ready.pick.ext.local, 1, __ATOMIC_RELAXED);
273                                        __atomic_fetch_add(&cltr->stats->ready.pick.ext.attempt, 1, __ATOMIC_RELAXED);
274                                }
275                                else {
276                                        if(local) __tls_stats()->ready.pick.push.local++;
277                                        __tls_stats()->ready.pick.push.attempt++;
278                                }
279                        #endif
280
281                #if defined(USE_MPSC)
282                        // mpsc always succeeds
283                } while( false );
284                #else
285                        // If we can't lock it retry
286                } while( !__atomic_try_acquire( &lanes.data[i].lock ) );
287                #endif
288
289                // Actually push it
290                push(lanes.data[i], thrd);
291
292                #if !defined(USE_MPSC)
293                        // Unlock and return
294                        __atomic_unlock( &lanes.data[i].lock );
295                #endif
296
297                // Mark the current index in the tls rng instance as having an item
298                __tls_rand_advance_bck();
299
300                __cfadbg_print_safe(ready_queue, "Kernel : Pushed %p on cluster %p (idx: %u, mask %llu, first %d)\n", thrd, cltr, i, used.mask[0], lane_first);
301
302                // Update statistics
303                #if !defined(__CFA_NO_STATISTICS__)
304                        if(external) {
305                                if(local) __atomic_fetch_add(&cltr->stats->ready.pick.ext.lsuccess, 1, __ATOMIC_RELAXED);
306                                __atomic_fetch_add(&cltr->stats->ready.pick.ext.success, 1, __ATOMIC_RELAXED);
307                        }
308                        else {
309                                if(local) __tls_stats()->ready.pick.push.lsuccess++;
310                                __tls_stats()->ready.pick.push.success++;
311                        }
312                #endif
313        }
314
315        // Pop from the ready queue from a given cluster
316        __attribute__((hot)) $thread * pop_fast(struct cluster * cltr) with (cltr->ready_queue) {
317                /* paranoid */ verify( lanes.count > 0 );
318                /* paranoid */ verify( kernelTLS().this_processor );
319                /* paranoid */ verify( kernelTLS().this_processor->rdq.id < lanes.count );
320
321                unsigned count = __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
322                int preferred = kernelTLS().this_processor->rdq.id;
323
324
325                // As long as the list is not empty, try finding a lane that isn't empty and pop from it
326                for(25) {
327                        // Pick two lists at random
328                        unsigned ri = __tls_rand_bck();
329                        unsigned rj = __tls_rand_bck();
330
331                        unsigned i, j;
332                        __attribute__((unused)) bool locali, localj;
333                        [i, locali] = idx_from_r(ri, preferred);
334                        [j, localj] = idx_from_r(rj, preferred);
335
336                        #if !defined(__CFA_NO_STATISTICS__)
337                                if(locali && localj) {
338                                        __tls_stats()->ready.pick.pop.local++;
339                                }
340                        #endif
341
342                        i %= count;
343                        j %= count;
344
345                        // try popping from the 2 picked lists
346                        struct $thread * thrd = try_pop(cltr, i, j);
347                        if(thrd) {
348                                #if !defined(__CFA_NO_STATISTICS__)
349                                        if( locali || localj ) __tls_stats()->ready.pick.pop.lsuccess++;
350                                #endif
351                                return thrd;
352                        }
353                }
354
355                // All lanes where empty return 0p
356                return 0p;
357        }
358
359        __attribute__((hot)) struct $thread * pop_slow(struct cluster * cltr) {
360                return search(cltr);
361        }
362#endif
363#if defined(USE_WORK_STEALING)
364        __attribute__((hot)) void push(struct cluster * cltr, struct $thread * thrd) with (cltr->ready_queue) {
365                __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr);
366
367                const bool external = (!kernelTLS().this_processor) || (cltr != kernelTLS().this_processor->cltr);
368                /* paranoid */ verify(external || kernelTLS().this_processor->rdq.id < lanes.count );
369
370                // write timestamp
371                thrd->link.ts = rdtscl();
372
373                // Try to pick a lane and lock it
374                unsigned i;
375                do {
376                        if(unlikely(external)) {
377                                i = __tls_rand() % lanes.count;
378                        }
379                        else {
380                                processor * proc = kernelTLS().this_processor;
381                                unsigned r = proc->rdq.its++;
382                                i =  proc->rdq.id + (r % READYQ_SHARD_FACTOR);
383                        }
384
385
386                #if defined(USE_MPSC)
387                        // mpsc always succeeds
388                } while( false );
389                #else
390                        // If we can't lock it retry
391                } while( !__atomic_try_acquire( &lanes.data[i].lock ) );
392                #endif
393
394                // Actually push it
395                push(lanes.data[i], thrd);
396
397                #if !defined(USE_MPSC)
398                        // Unlock and return
399                        __atomic_unlock( &lanes.data[i].lock );
400                #endif
401
402                __cfadbg_print_safe(ready_queue, "Kernel : Pushed %p on cluster %p (idx: %u, mask %llu, first %d)\n", thrd, cltr, i, used.mask[0], lane_first);
403        }
404
405        // Pop from the ready queue from a given cluster
406        __attribute__((hot)) $thread * pop_fast(struct cluster * cltr) with (cltr->ready_queue) {
407                /* paranoid */ verify( lanes.count > 0 );
408                /* paranoid */ verify( kernelTLS().this_processor );
409                /* paranoid */ verify( kernelTLS().this_processor->rdq.id < lanes.count );
410
411                processor * proc = kernelTLS().this_processor;
412
413                if(proc->rdq.target == -1u) {
414                        proc->rdq.target = __tls_rand() % lanes.count;
415                        unsigned it1  = proc->rdq.itr;
416                        unsigned it2  = proc->rdq.itr + 1;
417                        unsigned idx1 = proc->rdq.id + (it1 % READYQ_SHARD_FACTOR);
418                        unsigned idx2 = proc->rdq.id + (it1 % READYQ_SHARD_FACTOR);
419                        unsigned long long tsc1 = ts(lanes.data[idx1]);
420                        unsigned long long tsc2 = ts(lanes.data[idx2]);
421                        proc->rdq.cutoff = min(tsc1, tsc2);
422                }
423                else if(lanes.tscs[proc->rdq.target].tv < proc->rdq.cutoff) {
424                        $thread * t = try_pop(cltr, proc->rdq.target);
425                        proc->rdq.target = -1u;
426                        if(t) return t;
427                }
428
429                for(READYQ_SHARD_FACTOR) {
430                        unsigned i = proc->rdq.id + (--proc->rdq.itr % READYQ_SHARD_FACTOR);
431                        if($thread * t = try_pop(cltr, i)) return t;
432                }
433                return 0p;
434        }
435
436        __attribute__((hot)) struct $thread * pop_slow(struct cluster * cltr) with (cltr->ready_queue) {
437                for(25) {
438                        unsigned i = __tls_rand() % lanes.count;
439                        $thread * t = try_pop(cltr, i);
440                        if(t) return t;
441                }
442
443                return search(cltr);
444        }
445#endif
446
447//=======================================================================
448// Various Ready Queue utilities
449//=======================================================================
450// these function work the same or almost the same
451// whether they are using work-stealing or relaxed fifo scheduling
452
453//-----------------------------------------------------------------------
454// try to pop from a lane given by index w
455static inline struct $thread * try_pop(struct cluster * cltr, unsigned w) with (cltr->ready_queue) {
456        // Get relevant elements locally
457        __intrusive_lane_t & lane = lanes.data[w];
458
459        // If list looks empty retry
460        if( is_empty(lane) ) return 0p;
461
462        // If we can't get the lock retry
463        if( !__atomic_try_acquire(&lane.lock) ) return 0p;
464
465        // If list is empty, unlock and retry
466        if( is_empty(lane) ) {
467                __atomic_unlock(&lane.lock);
468                return 0p;
469        }
470
471        // Actually pop the list
472        struct $thread * thrd;
473        thrd = pop(lane);
474
475        /* paranoid */ verify(thrd);
476        /* paranoid */ verify(lane.lock);
477
478        // Unlock and return
479        __atomic_unlock(&lane.lock);
480
481        // Update statistics
482        #if !defined(__CFA_NO_STATISTICS__)
483                __tls_stats()->ready.pick.pop.success++;
484        #endif
485
486        #if defined(USE_WORK_STEALING)
487                lanes.tscs[w].tv = thrd->link.ts;
488        #endif
489
490        // return the popped thread
491        return thrd;
492}
493
494//-----------------------------------------------------------------------
495// try to pop from any lanes making sure you don't miss any threads push
496// before the start of the function
497static inline struct $thread * search(struct cluster * cltr) with (cltr->ready_queue) {
498        /* paranoid */ verify( lanes.count > 0 );
499        unsigned count = __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
500        unsigned offset = __tls_rand();
501        for(i; count) {
502                unsigned idx = (offset + i) % count;
503                struct $thread * thrd = try_pop(cltr, idx);
504                if(thrd) {
505                        return thrd;
506                }
507        }
508
509        // All lanes where empty return 0p
510        return 0p;
511}
512
513//-----------------------------------------------------------------------
514// Check that all the intrusive queues in the data structure are still consistent
515static void check( __ready_queue_t & q ) with (q) {
516        #if defined(__CFA_WITH_VERIFY__) && !defined(USE_MPSC)
517                {
518                        for( idx ; lanes.count ) {
519                                __intrusive_lane_t & sl = lanes.data[idx];
520                                assert(!lanes.data[idx].lock);
521
522                                assert(head(sl)->link.prev == 0p );
523                                assert(head(sl)->link.next->link.prev == head(sl) );
524                                assert(tail(sl)->link.next == 0p );
525                                assert(tail(sl)->link.prev->link.next == tail(sl) );
526
527                                if(is_empty(sl)) {
528                                        assert(tail(sl)->link.prev == head(sl));
529                                        assert(head(sl)->link.next == tail(sl));
530                                } else {
531                                        assert(tail(sl)->link.prev != head(sl));
532                                        assert(head(sl)->link.next != tail(sl));
533                                }
534                        }
535                }
536        #endif
537}
538
539//-----------------------------------------------------------------------
540// Given 2 indexes, pick the list with the oldest push an try to pop from it
541static inline struct $thread * try_pop(struct cluster * cltr, unsigned i, unsigned j) with (cltr->ready_queue) {
542        #if !defined(__CFA_NO_STATISTICS__)
543                __tls_stats()->ready.pick.pop.attempt++;
544        #endif
545
546        // Pick the bet list
547        int w = i;
548        if( __builtin_expect(!is_empty(lanes.data[j]), true) ) {
549                w = (ts(lanes.data[i]) < ts(lanes.data[j])) ? i : j;
550        }
551
552        return try_pop(cltr, w);
553}
554
555// Call this function of the intrusive list was moved using memcpy
556// fixes the list so that the pointers back to anchors aren't left dangling
557static inline void fix(__intrusive_lane_t & ll) {
558        #if !defined(USE_MPSC)
559                // if the list is not empty then follow he pointer and fix its reverse
560                if(!is_empty(ll)) {
561                        head(ll)->link.next->link.prev = head(ll);
562                        tail(ll)->link.prev->link.next = tail(ll);
563                }
564                // Otherwise just reset the list
565                else {
566                        verify(tail(ll)->link.next == 0p);
567                        tail(ll)->link.prev = head(ll);
568                        head(ll)->link.next = tail(ll);
569                        verify(head(ll)->link.prev == 0p);
570                }
571        #endif
572}
573
574static void assign_list(unsigned & value, dlist(processor, processor) & list, unsigned count) {
575        processor * it = &list`first;
576        for(unsigned i = 0; i < count; i++) {
577                /* paranoid */ verifyf( it, "Unexpected null iterator, at index %u of %u\n", i, count);
578                it->rdq.id = value;
579                it->rdq.target = -1u;
580                value += READYQ_SHARD_FACTOR;
581                it = &(*it)`next;
582        }
583}
584
585static void reassign_cltr_id(struct cluster * cltr) {
586        unsigned preferred = 0;
587        assign_list(preferred, cltr->procs.actives, cltr->procs.total - cltr->procs.idle);
588        assign_list(preferred, cltr->procs.idles  , cltr->procs.idle );
589}
590
591static void fix_times( struct cluster * cltr ) with( cltr->ready_queue ) {
592        #if defined(USE_WORK_STEALING)
593                lanes.tscs = alloc(lanes.count, lanes.tscs`realloc);
594                for(i; lanes.count) {
595                        lanes.tscs[i].tv = ts(lanes.data[i]);
596                }
597        #endif
598}
599
600// Grow the ready queue
601void ready_queue_grow(struct cluster * cltr) {
602        size_t ncount;
603        int target = cltr->procs.total;
604
605        /* paranoid */ verify( ready_mutate_islocked() );
606        __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue\n");
607
608        // Make sure that everything is consistent
609        /* paranoid */ check( cltr->ready_queue );
610
611        // grow the ready queue
612        with( cltr->ready_queue ) {
613                // Find new count
614                // Make sure we always have atleast 1 list
615                if(target >= 2) {
616                        ncount = target * READYQ_SHARD_FACTOR;
617                } else {
618                        ncount = SEQUENTIAL_SHARD;
619                }
620
621                // Allocate new array (uses realloc and memcpies the data)
622                lanes.data = alloc( ncount, lanes.data`realloc );
623
624                // Fix the moved data
625                for( idx; (size_t)lanes.count ) {
626                        fix(lanes.data[idx]);
627                }
628
629                // Construct new data
630                for( idx; (size_t)lanes.count ~ ncount) {
631                        (lanes.data[idx]){};
632                }
633
634                // Update original
635                lanes.count = ncount;
636        }
637
638        fix_times(cltr);
639
640        reassign_cltr_id(cltr);
641
642        // Make sure that everything is consistent
643        /* paranoid */ check( cltr->ready_queue );
644
645        __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue done\n");
646
647        /* paranoid */ verify( ready_mutate_islocked() );
648}
649
650// Shrink the ready queue
651void ready_queue_shrink(struct cluster * cltr) {
652        /* paranoid */ verify( ready_mutate_islocked() );
653        __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue\n");
654
655        // Make sure that everything is consistent
656        /* paranoid */ check( cltr->ready_queue );
657
658        int target = cltr->procs.total;
659
660        with( cltr->ready_queue ) {
661                // Remember old count
662                size_t ocount = lanes.count;
663
664                // Find new count
665                // Make sure we always have atleast 1 list
666                lanes.count = target >= 2 ? target * READYQ_SHARD_FACTOR: SEQUENTIAL_SHARD;
667                /* paranoid */ verify( ocount >= lanes.count );
668                /* paranoid */ verify( lanes.count == target * READYQ_SHARD_FACTOR || target < 2 );
669
670                // for printing count the number of displaced threads
671                #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__)
672                        __attribute__((unused)) size_t displaced = 0;
673                #endif
674
675                // redistribute old data
676                for( idx; (size_t)lanes.count ~ ocount) {
677                        // Lock is not strictly needed but makes checking invariants much easier
678                        __attribute__((unused)) bool locked = __atomic_try_acquire(&lanes.data[idx].lock);
679                        verify(locked);
680
681                        // As long as we can pop from this lane to push the threads somewhere else in the queue
682                        while(!is_empty(lanes.data[idx])) {
683                                struct $thread * thrd;
684                                thrd = pop(lanes.data[idx]);
685
686                                push(cltr, thrd);
687
688                                // for printing count the number of displaced threads
689                                #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__)
690                                        displaced++;
691                                #endif
692                        }
693
694                        // Unlock the lane
695                        __atomic_unlock(&lanes.data[idx].lock);
696
697                        // TODO print the queue statistics here
698
699                        ^(lanes.data[idx]){};
700                }
701
702                __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue displaced %zu threads\n", displaced);
703
704                // Allocate new array (uses realloc and memcpies the data)
705                lanes.data = alloc( lanes.count, lanes.data`realloc );
706
707                // Fix the moved data
708                for( idx; (size_t)lanes.count ) {
709                        fix(lanes.data[idx]);
710                }
711        }
712
713        fix_times(cltr);
714
715        reassign_cltr_id(cltr);
716
717        // Make sure that everything is consistent
718        /* paranoid */ check( cltr->ready_queue );
719
720        __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue done\n");
721        /* paranoid */ verify( ready_mutate_islocked() );
722}
Note: See TracBrowser for help on using the repository browser.