source: libcfa/src/concurrency/ready_queue.cfa @ c426b03

arm-ehjacob/cs343-translationnew-ast-unique-expr
Last change on this file since c426b03 was c426b03, checked in by Thierry Delisle <tdelisle@…>, 7 months ago

Minor clean-up

  • Property mode set to 100644
File size: 18.6 KB
RevLine 
[7768b8d]1//
2// Cforall Version 1.0.0 Copyright (C) 2019 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// ready_queue.cfa --
8//
9// Author           : Thierry Delisle
10// Created On       : Mon Nov dd 16:29:18 2019
11// Last Modified By :
12// Last Modified On :
13// Update Count     :
14//
15
16#define __cforall_thread__
[1b143de]17// #define __CFA_DEBUG_PRINT_READY_QUEUE__
[7768b8d]18
[1eb239e4]19// #define USE_SNZI
20
[7768b8d]21#include "bits/defs.hfa"
22#include "kernel_private.hfa"
23
24#define _GNU_SOURCE
25#include "stdlib.hfa"
[61d7bec]26#include "math.hfa"
[7768b8d]27
[04b5cef]28#include <unistd.h>
29
[13c5e19]30#include "snzi.hfa"
31#include "ready_subqueue.hfa"
32
[7768b8d]33static const size_t cache_line_size = 64;
34
[dca5802]35// No overriden function, no environment variable, no define
36// fall back to a magic number
37#ifndef __CFA_MAX_PROCESSORS__
[b388ee8]38        #define __CFA_MAX_PROCESSORS__ 1024
[dca5802]39#endif
[7768b8d]40
[3143f28]41#define BIAS 4
[04b5cef]42
[dca5802]43// returns the maximum number of processors the RWLock support
[7768b8d]44__attribute__((weak)) unsigned __max_processors() {
45        const char * max_cores_s = getenv("CFA_MAX_PROCESSORS");
46        if(!max_cores_s) {
[504a7dc]47                __cfadbg_print_nolock(ready_queue, "No CFA_MAX_PROCESSORS in ENV\n");
[dca5802]48                return __CFA_MAX_PROCESSORS__;
[7768b8d]49        }
50
51        char * endptr = 0p;
52        long int max_cores_l = strtol(max_cores_s, &endptr, 10);
53        if(max_cores_l < 1 || max_cores_l > 65535) {
[504a7dc]54                __cfadbg_print_nolock(ready_queue, "CFA_MAX_PROCESSORS out of range : %ld\n", max_cores_l);
[dca5802]55                return __CFA_MAX_PROCESSORS__;
[7768b8d]56        }
57        if('\0' != *endptr) {
[504a7dc]58                __cfadbg_print_nolock(ready_queue, "CFA_MAX_PROCESSORS not a decimal number : %s\n", max_cores_s);
[dca5802]59                return __CFA_MAX_PROCESSORS__;
[7768b8d]60        }
61
62        return max_cores_l;
63}
64
65//=======================================================================
66// Cluster wide reader-writer lock
67//=======================================================================
[b388ee8]68void  ?{}(__scheduler_RWLock_t & this) {
[7768b8d]69        this.max   = __max_processors();
70        this.alloc = 0;
71        this.ready = 0;
72        this.lock  = false;
73        this.data  = alloc(this.max);
74
75        /*paranoid*/ verify( 0 == (((uintptr_t)(this.data    )) % 64) );
76        /*paranoid*/ verify( 0 == (((uintptr_t)(this.data + 1)) % 64) );
77        /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.alloc), &this.alloc));
78        /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.ready), &this.ready));
79
80}
[b388ee8]81void ^?{}(__scheduler_RWLock_t & this) {
[7768b8d]82        free(this.data);
83}
84
[9b1dcc2]85void ?{}( __scheduler_lock_id_t & this, __processor_id_t * proc ) {
[7768b8d]86        this.handle = proc;
87        this.lock   = false;
[64a7146]88        #ifdef __CFA_WITH_VERIFY__
89                this.owned  = false;
90        #endif
[7768b8d]91}
92
93//=======================================================================
94// Lock-Free registering/unregistering of threads
[9b1dcc2]95unsigned doregister( struct __processor_id_t * proc ) with(*__scheduler_lock) {
[b388ee8]96        __cfadbg_print_safe(ready_queue, "Kernel : Registering proc %p for RW-Lock\n", proc);
[504a7dc]97
[7768b8d]98        // Step - 1 : check if there is already space in the data
99        uint_fast32_t s = ready;
100
101        // Check among all the ready
102        for(uint_fast32_t i = 0; i < s; i++) {
[9b1dcc2]103                __processor_id_t * null = 0p; // Re-write every loop since compare thrashes it
[7768b8d]104                if( __atomic_load_n(&data[i].handle, (int)__ATOMIC_RELAXED) == null
105                        && __atomic_compare_exchange_n( &data[i].handle, &null, proc, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
106                        /*paranoid*/ verify(i < ready);
[64a7146]107                        /*paranoid*/ verify(0 == (__alignof__(data[i]) % cache_line_size));
[7768b8d]108                        /*paranoid*/ verify((((uintptr_t)&data[i]) % cache_line_size) == 0);
109                        return i;
110                }
111        }
112
[b388ee8]113        if(max <= alloc) abort("Trying to create more than %ud processors", __scheduler_lock->max);
[7768b8d]114
115        // Step - 2 : F&A to get a new spot in the array.
116        uint_fast32_t n = __atomic_fetch_add(&alloc, 1, __ATOMIC_SEQ_CST);
[b388ee8]117        if(max <= n) abort("Trying to create more than %ud processors", __scheduler_lock->max);
[7768b8d]118
119        // Step - 3 : Mark space as used and then publish it.
[9b1dcc2]120        __scheduler_lock_id_t * storage = (__scheduler_lock_id_t *)&data[n];
[7768b8d]121        (*storage){ proc };
[fd9b524]122        while() {
[7768b8d]123                unsigned copy = n;
124                if( __atomic_load_n(&ready, __ATOMIC_RELAXED) == n
125                        && __atomic_compare_exchange_n(&ready, &copy, n + 1, true, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
126                        break;
[fd9b524]127                Pause();
[7768b8d]128        }
129
[1b143de]130        __cfadbg_print_safe(ready_queue, "Kernel : Registering proc %p done, id %lu\n", proc, n);
[504a7dc]131
[7768b8d]132        // Return new spot.
133        /*paranoid*/ verify(n < ready);
[37ba662]134        /*paranoid*/ verify(__alignof__(data[n]) == (2 * cache_line_size));
[7768b8d]135        /*paranoid*/ verify((((uintptr_t)&data[n]) % cache_line_size) == 0);
136        return n;
137}
138
[9b1dcc2]139void unregister( struct __processor_id_t * proc ) with(*__scheduler_lock) {
[7768b8d]140        unsigned id = proc->id;
141        /*paranoid*/ verify(id < ready);
142        /*paranoid*/ verify(proc == __atomic_load_n(&data[id].handle, __ATOMIC_RELAXED));
143        __atomic_store_n(&data[id].handle, 0p, __ATOMIC_RELEASE);
[504a7dc]144
145        __cfadbg_print_safe(ready_queue, "Kernel : Unregister proc %p\n", proc);
[7768b8d]146}
147
148//-----------------------------------------------------------------------
149// Writer side : acquire when changing the ready queue, e.g. adding more
150//  queues or removing them.
[b388ee8]151uint_fast32_t ready_mutate_lock( void ) with(*__scheduler_lock) {
[8fc652e0]152        /* paranoid */ verify( ! __preemption_enabled() );
[62502cc4]153
[7768b8d]154        // Step 1 : lock global lock
155        // It is needed to avoid processors that register mid Critical-Section
156        //   to simply lock their own lock and enter.
157        __atomic_acquire( &lock );
158
159        // Step 2 : lock per-proc lock
160        // Processors that are currently being registered aren't counted
161        //   but can't be in read_lock or in the critical section.
162        // All other processors are counted
163        uint_fast32_t s = ready;
164        for(uint_fast32_t i = 0; i < s; i++) {
165                __atomic_acquire( &data[i].lock );
166        }
167
[8fc652e0]168        /* paranoid */ verify( ! __preemption_enabled() );
[7768b8d]169        return s;
170}
171
[b388ee8]172void ready_mutate_unlock( uint_fast32_t last_s ) with(*__scheduler_lock) {
[8fc652e0]173        /* paranoid */ verify( ! __preemption_enabled() );
[62502cc4]174
[7768b8d]175        // Step 1 : release local locks
176        // This must be done while the global lock is held to avoid
177        //   threads that where created mid critical section
178        //   to race to lock their local locks and have the writer
179        //   immidiately unlock them
180        // Alternative solution : return s in write_lock and pass it to write_unlock
181        for(uint_fast32_t i = 0; i < last_s; i++) {
182                verify(data[i].lock);
183                __atomic_store_n(&data[i].lock, (bool)false, __ATOMIC_RELEASE);
184        }
185
186        // Step 2 : release global lock
187        /*paranoid*/ assert(true == lock);
188        __atomic_store_n(&lock, (bool)false, __ATOMIC_RELEASE);
[62502cc4]189
[8fc652e0]190        /* paranoid */ verify( ! __preemption_enabled() );
[7768b8d]191}
192
193//=======================================================================
[13c5e19]194// Cforall Reqdy Queue used for scheduling
[b798713]195//=======================================================================
196void ?{}(__ready_queue_t & this) with (this) {
[28d73c1]197        lanes.data  = 0p;
198        lanes.count = 0;
[b798713]199}
200
201void ^?{}(__ready_queue_t & this) with (this) {
[39fc03e]202        verify( 1 == lanes.count );
[1eb239e4]203        #ifdef USE_SNZI
204                verify( !query( snzi ) );
205        #endif
[dca5802]206        free(lanes.data);
207}
208
[64a7146]209//-----------------------------------------------------------------------
210__attribute__((hot)) bool query(struct cluster * cltr) {
[1eb239e4]211        #ifdef USE_SNZI
212                return query(cltr->ready_queue.snzi);
213        #endif
214        return true;
[64a7146]215}
216
[772411a]217static inline [unsigned, bool] idx_from_r(unsigned r, unsigned preferred) {
218        unsigned i;
219        bool local;
220        #if defined(BIAS)
221                unsigned rlow  = r % BIAS;
222                unsigned rhigh = r / BIAS;
223                if((0 != rlow) && preferred >= 0) {
224                        // (BIAS - 1) out of BIAS chances
225                        // Use perferred queues
226                        i = preferred + (rhigh % 4);
227                        local = true;
228                }
229                else {
230                        // 1 out of BIAS chances
231                        // Use all queues
232                        i = rhigh;
233                        local = false;
234                }
235        #else
236                i = r;
237                local = false;
238        #endif
239        return [i, local];
240}
241
[dca5802]242//-----------------------------------------------------------------------
[504a7dc]243__attribute__((hot)) bool push(struct cluster * cltr, struct $thread * thrd) with (cltr->ready_queue) {
[61d7bec]244        __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr);
[1b143de]245
[fd1f65e]246        #if !defined(__CFA_NO_STATISTICS__)
[a344425]247                const bool external = (!kernelTLS().this_proc_id->full_proc) || (cltr != kernelTLS().this_processor->cltr);
[fd1f65e]248        #endif
249
[dca5802]250        // write timestamp
[b798713]251        thrd->link.ts = rdtscl();
252
[c426b03]253        bool first = false;
[772411a]254        __attribute__((unused)) bool local;
255        __attribute__((unused)) int preferred;
256        #if defined(BIAS)
257                preferred =
[d72c074]258                        //*
[bd0bdd37]259                        kernelTLS().this_processor ? kernelTLS().this_processor->cltr_id : -1;
[d72c074]260                        /*/
261                        thrd->link.preferred * 4;
262                        //*/
[52769ba]263        #endif
264
[dca5802]265        // Try to pick a lane and lock it
266        unsigned i;
267        do {
268                // Pick the index of a lane
[5fe7322]269                // unsigned r = __tls_rand();
270                unsigned r = __tls_rand_fwd();
[772411a]271                [i, local] = idx_from_r(r, preferred);
272
[04b5cef]273                i %= __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
[b798713]274
275                #if !defined(__CFA_NO_STATISTICS__)
[fd1f65e]276                        if(external) {
277                                if(local) __atomic_fetch_add(&cltr->stats->ready.pick.ext.local, 1, __ATOMIC_RELAXED);
278                                __atomic_fetch_add(&cltr->stats->ready.pick.ext.attempt, 1, __ATOMIC_RELAXED);
279                        }
280                        else {
281                                if(local) __tls_stats()->ready.pick.push.local++;
282                                __tls_stats()->ready.pick.push.attempt++;
283                        }
[b798713]284                #endif
285
286                // If we can't lock it retry
[dca5802]287        } while( !__atomic_try_acquire( &lanes.data[i].lock ) );
[b798713]288
[dca5802]289        // Actually push it
[5fe7322]290        #ifdef USE_SNZI
291                bool lane_first =
292        #endif
293
294        push(lanes.data[i], thrd);
[dca5802]295
[1eb239e4]296        #ifdef USE_SNZI
297                // If this lane used to be empty we need to do more
298                if(lane_first) {
299                        // Check if the entire queue used to be empty
300                        first = !query(snzi);
[61d7bec]301
[1eb239e4]302                        // Update the snzi
303                        arrive( snzi, i );
304                }
305        #endif
[dca5802]306
307        // Unlock and return
308        __atomic_unlock( &lanes.data[i].lock );
309
[c426b03]310        // Mark the current index in the tls rng instance as having an item
311        __tls_rand_advance_bck();
312
[1b143de]313        __cfadbg_print_safe(ready_queue, "Kernel : Pushed %p on cluster %p (idx: %u, mask %llu, first %d)\n", thrd, cltr, i, used.mask[0], lane_first);
314
[dca5802]315        // Update statistics
316        #if !defined(__CFA_NO_STATISTICS__)
[fd1f65e]317                if(external) {
318                        if(local) __atomic_fetch_add(&cltr->stats->ready.pick.ext.lsuccess, 1, __ATOMIC_RELAXED);
319                        __atomic_fetch_add(&cltr->stats->ready.pick.ext.success, 1, __ATOMIC_RELAXED);
320                }
321                else {
322                        if(local) __tls_stats()->ready.pick.push.lsuccess++;
323                        __tls_stats()->ready.pick.push.success++;
324                }
[dca5802]325        #endif
326
327        // return whether or not the list was empty before this push
328        return first;
[b798713]329}
330
[13c5e19]331static struct $thread * try_pop(struct cluster * cltr, unsigned i, unsigned j);
332static struct $thread * try_pop(struct cluster * cltr, unsigned i);
333
334// Pop from the ready queue from a given cluster
335__attribute__((hot)) $thread * pop(struct cluster * cltr) with (cltr->ready_queue) {
336        /* paranoid */ verify( lanes.count > 0 );
[1eb239e4]337        unsigned count = __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
[772411a]338        int preferred;
[13c5e19]339        #if defined(BIAS)
340                // Don't bother trying locally too much
[bd0bdd37]341                preferred = kernelTLS().this_processor->cltr_id;
[13c5e19]342        #endif
343
[772411a]344
[13c5e19]345        // As long as the list is not empty, try finding a lane that isn't empty and pop from it
[1eb239e4]346        #ifdef USE_SNZI
347                while( query(snzi) ) {
348        #else
349                for(25) {
350        #endif
[13c5e19]351                // Pick two lists at random
[5fe7322]352                // unsigned ri = __tls_rand();
353                // unsigned rj = __tls_rand();
354                unsigned ri = __tls_rand_bck();
355                unsigned rj = __tls_rand_bck();
[772411a]356
357                unsigned i, j;
358                __attribute__((unused)) bool locali, localj;
359                [i, locali] = idx_from_r(ri, preferred);
360                [j, localj] = idx_from_r(rj, preferred);
361
362                #if !defined(__CFA_NO_STATISTICS__)
[3143f28]363                        if(locali && localj) {
[772411a]364                                __tls_stats()->ready.pick.pop.local++;
[13c5e19]365                        }
366                #endif
367
[1eb239e4]368                i %= count;
369                j %= count;
[13c5e19]370
371                // try popping from the 2 picked lists
372                struct $thread * thrd = try_pop(cltr, i, j);
[52769ba]373                if(thrd) {
374                        #if defined(BIAS) && !defined(__CFA_NO_STATISTICS__)
[772411a]375                                if( locali || localj ) __tls_stats()->ready.pick.pop.lsuccess++;
[52769ba]376                        #endif
377                        return thrd;
378                }
[13c5e19]379        }
380
381        // All lanes where empty return 0p
382        return 0p;
383}
384
[1eb239e4]385__attribute__((hot)) struct $thread * pop_slow(struct cluster * cltr) with (cltr->ready_queue) {
386        /* paranoid */ verify( lanes.count > 0 );
387        unsigned count = __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
388        unsigned offset = __tls_rand();
389        for(i; count) {
390                unsigned idx = (offset + i) % count;
391                struct $thread * thrd = try_pop(cltr, idx);
392                if(thrd) {
393                        return thrd;
394                }
395        }
396
397        // All lanes where empty return 0p
398        return 0p;
399}
400
401
[b798713]402//-----------------------------------------------------------------------
[dca5802]403// Given 2 indexes, pick the list with the oldest push an try to pop from it
[13c5e19]404static inline struct $thread * try_pop(struct cluster * cltr, unsigned i, unsigned j) with (cltr->ready_queue) {
[b798713]405        #if !defined(__CFA_NO_STATISTICS__)
[8834751]406                __tls_stats()->ready.pick.pop.attempt++;
[b798713]407        #endif
408
409        // Pick the bet list
410        int w = i;
[dca5802]411        if( __builtin_expect(!is_empty(lanes.data[j]), true) ) {
412                w = (ts(lanes.data[i]) < ts(lanes.data[j])) ? i : j;
[b798713]413        }
414
[13c5e19]415        return try_pop(cltr, w);
416}
417
418static inline struct $thread * try_pop(struct cluster * cltr, unsigned w) with (cltr->ready_queue) {
[dca5802]419        // Get relevant elements locally
420        __intrusive_lane_t & lane = lanes.data[w];
421
[b798713]422        // If list looks empty retry
[dca5802]423        if( is_empty(lane) ) return 0p;
[b798713]424
425        // If we can't get the lock retry
[dca5802]426        if( !__atomic_try_acquire(&lane.lock) ) return 0p;
[b798713]427
428
429        // If list is empty, unlock and retry
[dca5802]430        if( is_empty(lane) ) {
431                __atomic_unlock(&lane.lock);
[b798713]432                return 0p;
433        }
434
435        // Actually pop the list
[504a7dc]436        struct $thread * thrd;
[343d10e]437        thrd = pop(lane);
[b798713]438
[dca5802]439        /* paranoid */ verify(thrd);
440        /* paranoid */ verify(lane.lock);
[b798713]441
[1eb239e4]442        #ifdef USE_SNZI
443                // If this was the last element in the lane
444                if(emptied) {
445                        depart( snzi, w );
446                }
447        #endif
[b798713]448
449        // Unlock and return
[dca5802]450        __atomic_unlock(&lane.lock);
[b798713]451
[dca5802]452        // Update statistics
[b798713]453        #if !defined(__CFA_NO_STATISTICS__)
[8834751]454                __tls_stats()->ready.pick.pop.success++;
[b798713]455        #endif
456
[d72c074]457        // Update the thread bias
458        thrd->link.preferred = w / 4;
459
[dca5802]460        // return the popped thread
[b798713]461        return thrd;
462}
[13c5e19]463//-----------------------------------------------------------------------
[b798713]464
[13c5e19]465bool remove_head(struct cluster * cltr, struct $thread * thrd) with (cltr->ready_queue) {
466        for(i; lanes.count) {
467                __intrusive_lane_t & lane = lanes.data[i];
[b798713]468
[13c5e19]469                bool removed = false;
[04b5cef]470
[13c5e19]471                __atomic_acquire(&lane.lock);
472                        if(head(lane)->link.next == thrd) {
473                                $thread * pthrd;
[343d10e]474                                pthrd = pop(lane);
[04b5cef]475
[13c5e19]476                                /* paranoid */ verify( pthrd == thrd );
[61d7bec]477
[13c5e19]478                                removed = true;
[1eb239e4]479                                #ifdef USE_SNZI
480                                        if(emptied) {
481                                                depart( snzi, i );
482                                        }
483                                #endif
[13c5e19]484                        }
485                __atomic_unlock(&lane.lock);
[b798713]486
[13c5e19]487                if( removed ) return true;
488        }
489        return false;
[b798713]490}
491
492//-----------------------------------------------------------------------
493
494static void check( __ready_queue_t & q ) with (q) {
495        #if defined(__CFA_WITH_VERIFY__)
496                {
[dca5802]497                        for( idx ; lanes.count ) {
498                                __intrusive_lane_t & sl = lanes.data[idx];
499                                assert(!lanes.data[idx].lock);
[b798713]500
501                                assert(head(sl)->link.prev == 0p );
502                                assert(head(sl)->link.next->link.prev == head(sl) );
503                                assert(tail(sl)->link.next == 0p );
504                                assert(tail(sl)->link.prev->link.next == tail(sl) );
505
506                                if(sl.before.link.ts == 0l) {
507                                        assert(tail(sl)->link.prev == head(sl));
508                                        assert(head(sl)->link.next == tail(sl));
[1b143de]509                                } else {
510                                        assert(tail(sl)->link.prev != head(sl));
511                                        assert(head(sl)->link.next != tail(sl));
[b798713]512                                }
513                        }
514                }
515        #endif
516}
517
518// Call this function of the intrusive list was moved using memcpy
[dca5802]519// fixes the list so that the pointers back to anchors aren't left dangling
520static inline void fix(__intrusive_lane_t & ll) {
521        // if the list is not empty then follow he pointer and fix its reverse
522        if(!is_empty(ll)) {
[b798713]523                head(ll)->link.next->link.prev = head(ll);
524                tail(ll)->link.prev->link.next = tail(ll);
525        }
526        // Otherwise just reset the list
527        else {
[dca5802]528                verify(tail(ll)->link.next == 0p);
[b798713]529                tail(ll)->link.prev = head(ll);
530                head(ll)->link.next = tail(ll);
[dca5802]531                verify(head(ll)->link.prev == 0p);
[b798713]532        }
533}
534
[dca5802]535// Grow the ready queue
[bd0bdd37]536unsigned ready_queue_grow(struct cluster * cltr, int target) {
537        unsigned preferred;
538        size_t ncount;
539
[64a7146]540        /* paranoid */ verify( ready_mutate_islocked() );
[504a7dc]541        __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue\n");
[b798713]542
[dca5802]543        // Make sure that everything is consistent
544        /* paranoid */ check( cltr->ready_queue );
545
546        // grow the ready queue
[b798713]547        with( cltr->ready_queue ) {
[1eb239e4]548                #ifdef USE_SNZI
549                        ^(snzi){};
550                #endif
[b798713]551
[39fc03e]552                // Find new count
553                // Make sure we always have atleast 1 list
[bd0bdd37]554                if(target >= 2) {
555                        ncount = target * 4;
556                        preferred = ncount - 4;
557                } else {
558                        ncount = 1;
559                        preferred = 0;
560                }
[b798713]561
[dca5802]562                // Allocate new array (uses realloc and memcpies the data)
[ceb7db8]563                lanes.data = alloc( ncount, lanes.data`realloc );
[b798713]564
565                // Fix the moved data
[dca5802]566                for( idx; (size_t)lanes.count ) {
567                        fix(lanes.data[idx]);
[b798713]568                }
569
570                // Construct new data
[dca5802]571                for( idx; (size_t)lanes.count ~ ncount) {
572                        (lanes.data[idx]){};
[b798713]573                }
574
575                // Update original
[dca5802]576                lanes.count = ncount;
577
[1eb239e4]578                #ifdef USE_SNZI
579                        // Re-create the snzi
580                        snzi{ log2( lanes.count / 8 ) };
581                        for( idx; (size_t)lanes.count ) {
582                                if( !is_empty(lanes.data[idx]) ) {
583                                        arrive(snzi, idx);
584                                }
[61d7bec]585                        }
[1eb239e4]586                #endif
[b798713]587        }
588
589        // Make sure that everything is consistent
[dca5802]590        /* paranoid */ check( cltr->ready_queue );
591
[504a7dc]592        __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue done\n");
[dca5802]593
[64a7146]594        /* paranoid */ verify( ready_mutate_islocked() );
[bd0bdd37]595        return preferred;
[b798713]596}
597
[dca5802]598// Shrink the ready queue
[320ec6fc]599void ready_queue_shrink(struct cluster * cltr, int target) {
[64a7146]600        /* paranoid */ verify( ready_mutate_islocked() );
[504a7dc]601        __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue\n");
[dca5802]602
603        // Make sure that everything is consistent
604        /* paranoid */ check( cltr->ready_queue );
605
[b798713]606        with( cltr->ready_queue ) {
[1eb239e4]607                #ifdef USE_SNZI
608                        ^(snzi){};
609                #endif
[61d7bec]610
[39fc03e]611                // Remember old count
[dca5802]612                size_t ocount = lanes.count;
[b798713]613
[39fc03e]614                // Find new count
615                // Make sure we always have atleast 1 list
616                lanes.count = target >= 2 ? target * 4: 1;
617                /* paranoid */ verify( ocount >= lanes.count );
[320ec6fc]618                /* paranoid */ verify( lanes.count == target * 4 || target < 2 );
[dca5802]619
620                // for printing count the number of displaced threads
[504a7dc]621                #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__)
[dca5802]622                        __attribute__((unused)) size_t displaced = 0;
623                #endif
[b798713]624
625                // redistribute old data
[dca5802]626                for( idx; (size_t)lanes.count ~ ocount) {
627                        // Lock is not strictly needed but makes checking invariants much easier
[1b143de]628                        __attribute__((unused)) bool locked = __atomic_try_acquire(&lanes.data[idx].lock);
[b798713]629                        verify(locked);
[dca5802]630
631                        // As long as we can pop from this lane to push the threads somewhere else in the queue
632                        while(!is_empty(lanes.data[idx])) {
[504a7dc]633                                struct $thread * thrd;
[343d10e]634                                thrd = pop(lanes.data[idx]);
[dca5802]635
[b798713]636                                push(cltr, thrd);
[dca5802]637
638                                // for printing count the number of displaced threads
[504a7dc]639                                #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__)
[dca5802]640                                        displaced++;
641                                #endif
[b798713]642                        }
643
[dca5802]644                        // Unlock the lane
645                        __atomic_unlock(&lanes.data[idx].lock);
[b798713]646
647                        // TODO print the queue statistics here
648
[dca5802]649                        ^(lanes.data[idx]){};
[b798713]650                }
651
[504a7dc]652                __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue displaced %zu threads\n", displaced);
[c84b4be]653
[dca5802]654                // Allocate new array (uses realloc and memcpies the data)
[ceb7db8]655                lanes.data = alloc( lanes.count, lanes.data`realloc );
[b798713]656
657                // Fix the moved data
[dca5802]658                for( idx; (size_t)lanes.count ) {
659                        fix(lanes.data[idx]);
[b798713]660                }
[c84b4be]661
[1eb239e4]662                #ifdef USE_SNZI
663                        // Re-create the snzi
664                        snzi{ log2( lanes.count / 8 ) };
665                        for( idx; (size_t)lanes.count ) {
666                                if( !is_empty(lanes.data[idx]) ) {
667                                        arrive(snzi, idx);
668                                }
[61d7bec]669                        }
[1eb239e4]670                #endif
[b798713]671        }
672
673        // Make sure that everything is consistent
[dca5802]674        /* paranoid */ check( cltr->ready_queue );
675
[504a7dc]676        __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue done\n");
[64a7146]677        /* paranoid */ verify( ready_mutate_islocked() );
[fd9b524]678}
Note: See TracBrowser for help on using the repository browser.