source: libcfa/src/concurrency/ready_queue.cfa @ 341aa39

arm-ehjacob/cs343-translationnew-ast-unique-expr
Last change on this file since 341aa39 was 341aa39, checked in by Thierry Delisle <tdelisle@…>, 6 months ago

Fix bugs in workstealing edge cases:

  • cutoff was wrong of all local queues are empty
  • target would get stuck if the rng returned the local queues
  • Property mode set to 100644
File size: 22.3 KB
RevLine 
[7768b8d]1//
2// Cforall Version 1.0.0 Copyright (C) 2019 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// ready_queue.cfa --
8//
9// Author           : Thierry Delisle
10// Created On       : Mon Nov dd 16:29:18 2019
11// Last Modified By :
12// Last Modified On :
13// Update Count     :
14//
15
16#define __cforall_thread__
[1b143de]17// #define __CFA_DEBUG_PRINT_READY_QUEUE__
[7768b8d]18
[7a2972b]19// #define USE_MPSC
[1eb239e4]20
[9cc3a18]21#define USE_RELAXED_FIFO
22// #define USE_WORK_STEALING
23
[7768b8d]24#include "bits/defs.hfa"
25#include "kernel_private.hfa"
26
27#define _GNU_SOURCE
28#include "stdlib.hfa"
[61d7bec]29#include "math.hfa"
[7768b8d]30
[04b5cef]31#include <unistd.h>
32
[13c5e19]33#include "ready_subqueue.hfa"
34
[7768b8d]35static const size_t cache_line_size = 64;
36
[d2fadeb]37#if !defined(__CFA_NO_STATISTICS__)
38        #define __STATS(...) __VA_ARGS__
39#else
40        #define __STATS(...)
41#endif
42
[dca5802]43// No overriden function, no environment variable, no define
44// fall back to a magic number
45#ifndef __CFA_MAX_PROCESSORS__
[b388ee8]46        #define __CFA_MAX_PROCESSORS__ 1024
[dca5802]47#endif
[7768b8d]48
[9cc3a18]49#if   defined(USE_RELAXED_FIFO)
50        #define BIAS 4
51        #define READYQ_SHARD_FACTOR 4
[5f6a172]52        #define SEQUENTIAL_SHARD 1
[9cc3a18]53#elif defined(USE_WORK_STEALING)
54        #define READYQ_SHARD_FACTOR 2
[5f6a172]55        #define SEQUENTIAL_SHARD 2
[9cc3a18]56#else
57        #error no scheduling strategy selected
58#endif
59
[d2fadeb]60static inline struct $thread * try_pop(struct cluster * cltr, unsigned w __STATS(, __stats_readyQ_pop_t & stats));
61static inline struct $thread * try_pop(struct cluster * cltr, unsigned i, unsigned j __STATS(, __stats_readyQ_pop_t & stats));
[431cd4f]62static inline struct $thread * search(struct cluster * cltr);
[d2fadeb]63static inline [unsigned, bool] idx_from_r(unsigned r, unsigned preferred);
[9cc3a18]64
[04b5cef]65
[dca5802]66// returns the maximum number of processors the RWLock support
[7768b8d]67__attribute__((weak)) unsigned __max_processors() {
68        const char * max_cores_s = getenv("CFA_MAX_PROCESSORS");
69        if(!max_cores_s) {
[504a7dc]70                __cfadbg_print_nolock(ready_queue, "No CFA_MAX_PROCESSORS in ENV\n");
[dca5802]71                return __CFA_MAX_PROCESSORS__;
[7768b8d]72        }
73
74        char * endptr = 0p;
75        long int max_cores_l = strtol(max_cores_s, &endptr, 10);
76        if(max_cores_l < 1 || max_cores_l > 65535) {
[504a7dc]77                __cfadbg_print_nolock(ready_queue, "CFA_MAX_PROCESSORS out of range : %ld\n", max_cores_l);
[dca5802]78                return __CFA_MAX_PROCESSORS__;
[7768b8d]79        }
80        if('\0' != *endptr) {
[504a7dc]81                __cfadbg_print_nolock(ready_queue, "CFA_MAX_PROCESSORS not a decimal number : %s\n", max_cores_s);
[dca5802]82                return __CFA_MAX_PROCESSORS__;
[7768b8d]83        }
84
85        return max_cores_l;
86}
87
88//=======================================================================
89// Cluster wide reader-writer lock
90//=======================================================================
[b388ee8]91void  ?{}(__scheduler_RWLock_t & this) {
[7768b8d]92        this.max   = __max_processors();
93        this.alloc = 0;
94        this.ready = 0;
95        this.lock  = false;
96        this.data  = alloc(this.max);
97
98        /*paranoid*/ verify( 0 == (((uintptr_t)(this.data    )) % 64) );
99        /*paranoid*/ verify( 0 == (((uintptr_t)(this.data + 1)) % 64) );
100        /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.alloc), &this.alloc));
101        /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.ready), &this.ready));
102
103}
[b388ee8]104void ^?{}(__scheduler_RWLock_t & this) {
[7768b8d]105        free(this.data);
106}
107
[9b1dcc2]108void ?{}( __scheduler_lock_id_t & this, __processor_id_t * proc ) {
[7768b8d]109        this.handle = proc;
110        this.lock   = false;
[64a7146]111        #ifdef __CFA_WITH_VERIFY__
112                this.owned  = false;
113        #endif
[7768b8d]114}
115
116//=======================================================================
117// Lock-Free registering/unregistering of threads
[a33c113]118void register_proc_id( struct __processor_id_t * proc ) with(*__scheduler_lock) {
[b388ee8]119        __cfadbg_print_safe(ready_queue, "Kernel : Registering proc %p for RW-Lock\n", proc);
[504a7dc]120
[7768b8d]121        // Step - 1 : check if there is already space in the data
122        uint_fast32_t s = ready;
123
124        // Check among all the ready
125        for(uint_fast32_t i = 0; i < s; i++) {
[9b1dcc2]126                __processor_id_t * null = 0p; // Re-write every loop since compare thrashes it
[7768b8d]127                if( __atomic_load_n(&data[i].handle, (int)__ATOMIC_RELAXED) == null
128                        && __atomic_compare_exchange_n( &data[i].handle, &null, proc, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
129                        /*paranoid*/ verify(i < ready);
[64a7146]130                        /*paranoid*/ verify(0 == (__alignof__(data[i]) % cache_line_size));
[7768b8d]131                        /*paranoid*/ verify((((uintptr_t)&data[i]) % cache_line_size) == 0);
[a33c113]132                        proc->id = i;
[7768b8d]133                }
134        }
135
[b388ee8]136        if(max <= alloc) abort("Trying to create more than %ud processors", __scheduler_lock->max);
[7768b8d]137
138        // Step - 2 : F&A to get a new spot in the array.
139        uint_fast32_t n = __atomic_fetch_add(&alloc, 1, __ATOMIC_SEQ_CST);
[b388ee8]140        if(max <= n) abort("Trying to create more than %ud processors", __scheduler_lock->max);
[7768b8d]141
142        // Step - 3 : Mark space as used and then publish it.
[9b1dcc2]143        __scheduler_lock_id_t * storage = (__scheduler_lock_id_t *)&data[n];
[7768b8d]144        (*storage){ proc };
[fd9b524]145        while() {
[7768b8d]146                unsigned copy = n;
147                if( __atomic_load_n(&ready, __ATOMIC_RELAXED) == n
148                        && __atomic_compare_exchange_n(&ready, &copy, n + 1, true, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
149                        break;
[fd9b524]150                Pause();
[7768b8d]151        }
152
[1b143de]153        __cfadbg_print_safe(ready_queue, "Kernel : Registering proc %p done, id %lu\n", proc, n);
[504a7dc]154
[7768b8d]155        // Return new spot.
156        /*paranoid*/ verify(n < ready);
[37ba662]157        /*paranoid*/ verify(__alignof__(data[n]) == (2 * cache_line_size));
[7768b8d]158        /*paranoid*/ verify((((uintptr_t)&data[n]) % cache_line_size) == 0);
[a33c113]159        proc->id = n;
[7768b8d]160}
161
[a33c113]162void unregister_proc_id( struct __processor_id_t * proc ) with(*__scheduler_lock) {
[7768b8d]163        unsigned id = proc->id;
164        /*paranoid*/ verify(id < ready);
165        /*paranoid*/ verify(proc == __atomic_load_n(&data[id].handle, __ATOMIC_RELAXED));
166        __atomic_store_n(&data[id].handle, 0p, __ATOMIC_RELEASE);
[504a7dc]167
168        __cfadbg_print_safe(ready_queue, "Kernel : Unregister proc %p\n", proc);
[7768b8d]169}
170
171//-----------------------------------------------------------------------
172// Writer side : acquire when changing the ready queue, e.g. adding more
173//  queues or removing them.
[b388ee8]174uint_fast32_t ready_mutate_lock( void ) with(*__scheduler_lock) {
[8fc652e0]175        /* paranoid */ verify( ! __preemption_enabled() );
[62502cc4]176
[7768b8d]177        // Step 1 : lock global lock
178        // It is needed to avoid processors that register mid Critical-Section
179        //   to simply lock their own lock and enter.
180        __atomic_acquire( &lock );
181
182        // Step 2 : lock per-proc lock
183        // Processors that are currently being registered aren't counted
184        //   but can't be in read_lock or in the critical section.
185        // All other processors are counted
186        uint_fast32_t s = ready;
187        for(uint_fast32_t i = 0; i < s; i++) {
188                __atomic_acquire( &data[i].lock );
189        }
190
[8fc652e0]191        /* paranoid */ verify( ! __preemption_enabled() );
[7768b8d]192        return s;
193}
194
[b388ee8]195void ready_mutate_unlock( uint_fast32_t last_s ) with(*__scheduler_lock) {
[8fc652e0]196        /* paranoid */ verify( ! __preemption_enabled() );
[62502cc4]197
[7768b8d]198        // Step 1 : release local locks
199        // This must be done while the global lock is held to avoid
200        //   threads that where created mid critical section
201        //   to race to lock their local locks and have the writer
202        //   immidiately unlock them
203        // Alternative solution : return s in write_lock and pass it to write_unlock
204        for(uint_fast32_t i = 0; i < last_s; i++) {
205                verify(data[i].lock);
206                __atomic_store_n(&data[i].lock, (bool)false, __ATOMIC_RELEASE);
207        }
208
209        // Step 2 : release global lock
210        /*paranoid*/ assert(true == lock);
211        __atomic_store_n(&lock, (bool)false, __ATOMIC_RELEASE);
[62502cc4]212
[8fc652e0]213        /* paranoid */ verify( ! __preemption_enabled() );
[7768b8d]214}
215
216//=======================================================================
[9cc3a18]217// Cforall Ready Queue used for scheduling
[b798713]218//=======================================================================
219void ?{}(__ready_queue_t & this) with (this) {
[28d73c1]220        lanes.data  = 0p;
[9cc3a18]221        lanes.tscs  = 0p;
[28d73c1]222        lanes.count = 0;
[b798713]223}
224
225void ^?{}(__ready_queue_t & this) with (this) {
[5f6a172]226        verify( SEQUENTIAL_SHARD == lanes.count );
[dca5802]227        free(lanes.data);
[9cc3a18]228        free(lanes.tscs);
[dca5802]229}
230
[64a7146]231//-----------------------------------------------------------------------
[431cd4f]232#if defined(USE_RELAXED_FIFO)
233        //-----------------------------------------------------------------------
234        // get index from random number with or without bias towards queues
235        static inline [unsigned, bool] idx_from_r(unsigned r, unsigned preferred) {
236                unsigned i;
237                bool local;
238                unsigned rlow  = r % BIAS;
239                unsigned rhigh = r / BIAS;
240                if((0 != rlow) && preferred >= 0) {
241                        // (BIAS - 1) out of BIAS chances
242                        // Use perferred queues
243                        i = preferred + (rhigh % READYQ_SHARD_FACTOR);
244                        local = true;
245                }
246                else {
247                        // 1 out of BIAS chances
248                        // Use all queues
249                        i = rhigh;
250                        local = false;
251                }
252                return [i, local];
253        }
254
255        __attribute__((hot)) void push(struct cluster * cltr, struct $thread * thrd) with (cltr->ready_queue) {
256                __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr);
[1b143de]257
[431cd4f]258                const bool external = (!kernelTLS().this_processor) || (cltr != kernelTLS().this_processor->cltr);
259                /* paranoid */ verify(external || kernelTLS().this_processor->rdq.id < lanes.count );
[fd1f65e]260
[431cd4f]261                // write timestamp
262                thrd->link.ts = rdtscl();
[b798713]263
[431cd4f]264                bool local;
265                int preferred = external ? -1 : kernelTLS().this_processor->rdq.id;
[52769ba]266
[431cd4f]267                // Try to pick a lane and lock it
268                unsigned i;
269                do {
270                        // Pick the index of a lane
271                        unsigned r = __tls_rand_fwd();
272                        [i, local] = idx_from_r(r, preferred);
[772411a]273
[431cd4f]274                        i %= __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
275
276                        #if !defined(__CFA_NO_STATISTICS__)
[d2fadeb]277                                if(unlikely(external)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.attempt, 1, __ATOMIC_RELAXED);
278                                else if(local) __tls_stats()->ready.push.local.attempt++;
279                                else __tls_stats()->ready.push.share.attempt++;
[431cd4f]280                        #endif
[b798713]281
[431cd4f]282                #if defined(USE_MPSC)
283                        // mpsc always succeeds
284                } while( false );
285                #else
286                        // If we can't lock it retry
287                } while( !__atomic_try_acquire( &lanes.data[i].lock ) );
288                #endif
289
290                // Actually push it
291                push(lanes.data[i], thrd);
292
293                #if !defined(USE_MPSC)
294                        // Unlock and return
295                        __atomic_unlock( &lanes.data[i].lock );
296                #endif
297
298                // Mark the current index in the tls rng instance as having an item
299                __tls_rand_advance_bck();
300
301                __cfadbg_print_safe(ready_queue, "Kernel : Pushed %p on cluster %p (idx: %u, mask %llu, first %d)\n", thrd, cltr, i, used.mask[0], lane_first);
302
303                // Update statistics
[b798713]304                #if !defined(__CFA_NO_STATISTICS__)
[d2fadeb]305                        if(unlikely(external)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.success, 1, __ATOMIC_RELAXED);
306                        else if(local) __tls_stats()->ready.push.local.success++;
307                        else __tls_stats()->ready.push.share.success++;
[b798713]308                #endif
[431cd4f]309        }
[b798713]310
[431cd4f]311        // Pop from the ready queue from a given cluster
312        __attribute__((hot)) $thread * pop_fast(struct cluster * cltr) with (cltr->ready_queue) {
313                /* paranoid */ verify( lanes.count > 0 );
314                /* paranoid */ verify( kernelTLS().this_processor );
315                /* paranoid */ verify( kernelTLS().this_processor->rdq.id < lanes.count );
[b798713]316
[431cd4f]317                unsigned count = __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
318                int preferred = kernelTLS().this_processor->rdq.id;
[dca5802]319
320
[431cd4f]321                // As long as the list is not empty, try finding a lane that isn't empty and pop from it
322                for(25) {
323                        // Pick two lists at random
324                        unsigned ri = __tls_rand_bck();
325                        unsigned rj = __tls_rand_bck();
[c426b03]326
[431cd4f]327                        unsigned i, j;
328                        __attribute__((unused)) bool locali, localj;
329                        [i, locali] = idx_from_r(ri, preferred);
330                        [j, localj] = idx_from_r(rj, preferred);
[1b143de]331
[431cd4f]332                        i %= count;
333                        j %= count;
[9cc3a18]334
[431cd4f]335                        // try popping from the 2 picked lists
[d2fadeb]336                        struct $thread * thrd = try_pop(cltr, i, j __STATS(, *(locali || localj ? &__tls_stats()->ready.pop.local : &__tls_stats()->ready.pop.help)));
[431cd4f]337                        if(thrd) {
338                                return thrd;
339                        }
340                }
[13c5e19]341
[431cd4f]342                // All lanes where empty return 0p
343                return 0p;
344        }
[772411a]345
[431cd4f]346        __attribute__((hot)) struct $thread * pop_slow(struct cluster * cltr) {
347                return search(cltr);
348        }
349#endif
350#if defined(USE_WORK_STEALING)
351        __attribute__((hot)) void push(struct cluster * cltr, struct $thread * thrd) with (cltr->ready_queue) {
352                __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr);
[772411a]353
[431cd4f]354                const bool external = (!kernelTLS().this_processor) || (cltr != kernelTLS().this_processor->cltr);
355                /* paranoid */ verify(external || kernelTLS().this_processor->rdq.id < lanes.count );
[772411a]356
[431cd4f]357                // write timestamp
358                thrd->link.ts = rdtscl();
359
360                // Try to pick a lane and lock it
361                unsigned i;
362                do {
[d2fadeb]363                        #if !defined(__CFA_NO_STATISTICS__)
364                                if(unlikely(external)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.attempt, 1, __ATOMIC_RELAXED);
365                                else __tls_stats()->ready.push.local.attempt++;
366                        #endif
367
[431cd4f]368                        if(unlikely(external)) {
369                                i = __tls_rand() % lanes.count;
370                        }
371                        else {
372                                processor * proc = kernelTLS().this_processor;
373                                unsigned r = proc->rdq.its++;
374                                i =  proc->rdq.id + (r % READYQ_SHARD_FACTOR);
[13c5e19]375                        }
[431cd4f]376
377
378                #if defined(USE_MPSC)
379                        // mpsc always succeeds
380                } while( false );
381                #else
382                        // If we can't lock it retry
383                } while( !__atomic_try_acquire( &lanes.data[i].lock ) );
[13c5e19]384                #endif
385
[431cd4f]386                // Actually push it
387                push(lanes.data[i], thrd);
[13c5e19]388
[431cd4f]389                #if !defined(USE_MPSC)
390                        // Unlock and return
391                        __atomic_unlock( &lanes.data[i].lock );
392                #endif
393
[d2fadeb]394                #if !defined(__CFA_NO_STATISTICS__)
395                        if(unlikely(external)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.success, 1, __ATOMIC_RELAXED);
396                        else __tls_stats()->ready.push.local.success++;
397                #endif
398
[431cd4f]399                __cfadbg_print_safe(ready_queue, "Kernel : Pushed %p on cluster %p (idx: %u, mask %llu, first %d)\n", thrd, cltr, i, used.mask[0], lane_first);
[13c5e19]400        }
401
[431cd4f]402        // Pop from the ready queue from a given cluster
403        __attribute__((hot)) $thread * pop_fast(struct cluster * cltr) with (cltr->ready_queue) {
404                /* paranoid */ verify( lanes.count > 0 );
405                /* paranoid */ verify( kernelTLS().this_processor );
406                /* paranoid */ verify( kernelTLS().this_processor->rdq.id < lanes.count );
407
408                processor * proc = kernelTLS().this_processor;
409
410                if(proc->rdq.target == -1u) {
411                        proc->rdq.target = __tls_rand() % lanes.count;
412                        unsigned it1  = proc->rdq.itr;
413                        unsigned it2  = proc->rdq.itr + 1;
414                        unsigned idx1 = proc->rdq.id + (it1 % READYQ_SHARD_FACTOR);
[ddd473f]415                        unsigned idx2 = proc->rdq.id + (it2 % READYQ_SHARD_FACTOR);
[431cd4f]416                        unsigned long long tsc1 = ts(lanes.data[idx1]);
417                        unsigned long long tsc2 = ts(lanes.data[idx2]);
418                        proc->rdq.cutoff = min(tsc1, tsc2);
[341aa39]419                        if(proc->rdq.cutoff == 0) proc->rdq.cutoff = -1ull;
[431cd4f]420                }
[341aa39]421                else {
422                        unsigned target = proc->rdq.target;
[431cd4f]423                        proc->rdq.target = -1u;
[341aa39]424                        if(lanes.tscs[target].tv < proc->rdq.cutoff) {
425                                $thread * t = try_pop(cltr, target __STATS(, __tls_stats()->ready.pop.help));
426                                if(t) return t;
427                        }
[431cd4f]428                }
[13c5e19]429
[431cd4f]430                for(READYQ_SHARD_FACTOR) {
431                        unsigned i = proc->rdq.id + (--proc->rdq.itr % READYQ_SHARD_FACTOR);
[d2fadeb]432                        if($thread * t = try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.local))) return t;
[431cd4f]433                }
434                return 0p;
[1eb239e4]435        }
436
[431cd4f]437        __attribute__((hot)) struct $thread * pop_slow(struct cluster * cltr) with (cltr->ready_queue) {
438                for(25) {
439                        unsigned i = __tls_rand() % lanes.count;
[d2fadeb]440                        $thread * t = try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.steal));
[431cd4f]441                        if(t) return t;
442                }
443
444                return search(cltr);
445        }
446#endif
[1eb239e4]447
[9cc3a18]448//=======================================================================
449// Various Ready Queue utilities
450//=======================================================================
451// these function work the same or almost the same
452// whether they are using work-stealing or relaxed fifo scheduling
[1eb239e4]453
[9cc3a18]454//-----------------------------------------------------------------------
455// try to pop from a lane given by index w
[d2fadeb]456static inline struct $thread * try_pop(struct cluster * cltr, unsigned w __STATS(, __stats_readyQ_pop_t & stats)) with (cltr->ready_queue) {
457        __STATS( stats.attempt++; )
458
[dca5802]459        // Get relevant elements locally
460        __intrusive_lane_t & lane = lanes.data[w];
461
[b798713]462        // If list looks empty retry
[d2fadeb]463        if( is_empty(lane) ) {
464                __STATS( stats.espec++; )
465                return 0p;
466        }
[b798713]467
468        // If we can't get the lock retry
[d2fadeb]469        if( !__atomic_try_acquire(&lane.lock) ) {
470                __STATS( stats.elock++; )
471                return 0p;
472        }
[b798713]473
474        // If list is empty, unlock and retry
[dca5802]475        if( is_empty(lane) ) {
476                __atomic_unlock(&lane.lock);
[d2fadeb]477                __STATS( stats.eempty++; )
[b798713]478                return 0p;
479        }
480
481        // Actually pop the list
[504a7dc]482        struct $thread * thrd;
[343d10e]483        thrd = pop(lane);
[b798713]484
[dca5802]485        /* paranoid */ verify(thrd);
486        /* paranoid */ verify(lane.lock);
[b798713]487
488        // Unlock and return
[dca5802]489        __atomic_unlock(&lane.lock);
[b798713]490
[dca5802]491        // Update statistics
[d2fadeb]492        __STATS( stats.success++; )
[b798713]493
[431cd4f]494        #if defined(USE_WORK_STEALING)
495                lanes.tscs[w].tv = thrd->link.ts;
[9cc3a18]496        #endif
[d72c074]497
[dca5802]498        // return the popped thread
[b798713]499        return thrd;
500}
[04b5cef]501
[9cc3a18]502//-----------------------------------------------------------------------
503// try to pop from any lanes making sure you don't miss any threads push
504// before the start of the function
[431cd4f]505static inline struct $thread * search(struct cluster * cltr) with (cltr->ready_queue) {
[9cc3a18]506        /* paranoid */ verify( lanes.count > 0 );
507        unsigned count = __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
508        unsigned offset = __tls_rand();
509        for(i; count) {
510                unsigned idx = (offset + i) % count;
[d2fadeb]511                struct $thread * thrd = try_pop(cltr, idx __STATS(, __tls_stats()->ready.pop.search));
[9cc3a18]512                if(thrd) {
513                        return thrd;
514                }
[13c5e19]515        }
[9cc3a18]516
517        // All lanes where empty return 0p
518        return 0p;
[b798713]519}
520
521//-----------------------------------------------------------------------
[9cc3a18]522// Check that all the intrusive queues in the data structure are still consistent
[b798713]523static void check( __ready_queue_t & q ) with (q) {
[7a2972b]524        #if defined(__CFA_WITH_VERIFY__) && !defined(USE_MPSC)
[b798713]525                {
[dca5802]526                        for( idx ; lanes.count ) {
527                                __intrusive_lane_t & sl = lanes.data[idx];
528                                assert(!lanes.data[idx].lock);
[b798713]529
530                                assert(head(sl)->link.prev == 0p );
531                                assert(head(sl)->link.next->link.prev == head(sl) );
532                                assert(tail(sl)->link.next == 0p );
533                                assert(tail(sl)->link.prev->link.next == tail(sl) );
534
[7a2972b]535                                if(is_empty(sl)) {
[b798713]536                                        assert(tail(sl)->link.prev == head(sl));
537                                        assert(head(sl)->link.next == tail(sl));
[1b143de]538                                } else {
539                                        assert(tail(sl)->link.prev != head(sl));
540                                        assert(head(sl)->link.next != tail(sl));
[b798713]541                                }
542                        }
543                }
544        #endif
545}
546
[9cc3a18]547//-----------------------------------------------------------------------
548// Given 2 indexes, pick the list with the oldest push an try to pop from it
[d2fadeb]549static inline struct $thread * try_pop(struct cluster * cltr, unsigned i, unsigned j __STATS(, __stats_readyQ_pop_t & stats)) with (cltr->ready_queue) {
[9cc3a18]550        // Pick the bet list
551        int w = i;
552        if( __builtin_expect(!is_empty(lanes.data[j]), true) ) {
553                w = (ts(lanes.data[i]) < ts(lanes.data[j])) ? i : j;
554        }
555
[d2fadeb]556        return try_pop(cltr, w __STATS(, stats));
[9cc3a18]557}
558
[b798713]559// Call this function of the intrusive list was moved using memcpy
[dca5802]560// fixes the list so that the pointers back to anchors aren't left dangling
561static inline void fix(__intrusive_lane_t & ll) {
[7a2972b]562        #if !defined(USE_MPSC)
563                // if the list is not empty then follow he pointer and fix its reverse
564                if(!is_empty(ll)) {
565                        head(ll)->link.next->link.prev = head(ll);
566                        tail(ll)->link.prev->link.next = tail(ll);
567                }
568                // Otherwise just reset the list
569                else {
570                        verify(tail(ll)->link.next == 0p);
571                        tail(ll)->link.prev = head(ll);
572                        head(ll)->link.next = tail(ll);
573                        verify(head(ll)->link.prev == 0p);
574                }
575        #endif
[b798713]576}
577
[9cc3a18]578static void assign_list(unsigned & value, dlist(processor, processor) & list, unsigned count) {
[a017ee7]579        processor * it = &list`first;
580        for(unsigned i = 0; i < count; i++) {
581                /* paranoid */ verifyf( it, "Unexpected null iterator, at index %u of %u\n", i, count);
[431cd4f]582                it->rdq.id = value;
583                it->rdq.target = -1u;
[9cc3a18]584                value += READYQ_SHARD_FACTOR;
[a017ee7]585                it = &(*it)`next;
586        }
587}
588
[9cc3a18]589static void reassign_cltr_id(struct cluster * cltr) {
[a017ee7]590        unsigned preferred = 0;
[9cc3a18]591        assign_list(preferred, cltr->procs.actives, cltr->procs.total - cltr->procs.idle);
592        assign_list(preferred, cltr->procs.idles  , cltr->procs.idle );
[a017ee7]593}
594
[431cd4f]595static void fix_times( struct cluster * cltr ) with( cltr->ready_queue ) {
596        #if defined(USE_WORK_STEALING)
597                lanes.tscs = alloc(lanes.count, lanes.tscs`realloc);
598                for(i; lanes.count) {
599                        lanes.tscs[i].tv = ts(lanes.data[i]);
600                }
601        #endif
602}
603
[dca5802]604// Grow the ready queue
[a017ee7]605void ready_queue_grow(struct cluster * cltr) {
[bd0bdd37]606        size_t ncount;
[a017ee7]607        int target = cltr->procs.total;
[bd0bdd37]608
[64a7146]609        /* paranoid */ verify( ready_mutate_islocked() );
[504a7dc]610        __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue\n");
[b798713]611
[dca5802]612        // Make sure that everything is consistent
613        /* paranoid */ check( cltr->ready_queue );
614
615        // grow the ready queue
[b798713]616        with( cltr->ready_queue ) {
[39fc03e]617                // Find new count
618                // Make sure we always have atleast 1 list
[bd0bdd37]619                if(target >= 2) {
[9cc3a18]620                        ncount = target * READYQ_SHARD_FACTOR;
[bd0bdd37]621                } else {
[5f6a172]622                        ncount = SEQUENTIAL_SHARD;
[bd0bdd37]623                }
[b798713]624
[dca5802]625                // Allocate new array (uses realloc and memcpies the data)
[ceb7db8]626                lanes.data = alloc( ncount, lanes.data`realloc );
[b798713]627
628                // Fix the moved data
[dca5802]629                for( idx; (size_t)lanes.count ) {
630                        fix(lanes.data[idx]);
[b798713]631                }
632
633                // Construct new data
[dca5802]634                for( idx; (size_t)lanes.count ~ ncount) {
635                        (lanes.data[idx]){};
[b798713]636                }
637
638                // Update original
[dca5802]639                lanes.count = ncount;
[b798713]640        }
641
[9cc3a18]642        fix_times(cltr);
643
644        reassign_cltr_id(cltr);
[a017ee7]645
[b798713]646        // Make sure that everything is consistent
[dca5802]647        /* paranoid */ check( cltr->ready_queue );
648
[504a7dc]649        __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue done\n");
[dca5802]650
[64a7146]651        /* paranoid */ verify( ready_mutate_islocked() );
[b798713]652}
653
[dca5802]654// Shrink the ready queue
[a017ee7]655void ready_queue_shrink(struct cluster * cltr) {
[64a7146]656        /* paranoid */ verify( ready_mutate_islocked() );
[504a7dc]657        __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue\n");
[dca5802]658
659        // Make sure that everything is consistent
660        /* paranoid */ check( cltr->ready_queue );
661
[a017ee7]662        int target = cltr->procs.total;
663
[b798713]664        with( cltr->ready_queue ) {
[39fc03e]665                // Remember old count
[dca5802]666                size_t ocount = lanes.count;
[b798713]667
[39fc03e]668                // Find new count
669                // Make sure we always have atleast 1 list
[5f6a172]670                lanes.count = target >= 2 ? target * READYQ_SHARD_FACTOR: SEQUENTIAL_SHARD;
[39fc03e]671                /* paranoid */ verify( ocount >= lanes.count );
[9cc3a18]672                /* paranoid */ verify( lanes.count == target * READYQ_SHARD_FACTOR || target < 2 );
[dca5802]673
674                // for printing count the number of displaced threads
[504a7dc]675                #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__)
[dca5802]676                        __attribute__((unused)) size_t displaced = 0;
677                #endif
[b798713]678
679                // redistribute old data
[dca5802]680                for( idx; (size_t)lanes.count ~ ocount) {
681                        // Lock is not strictly needed but makes checking invariants much easier
[1b143de]682                        __attribute__((unused)) bool locked = __atomic_try_acquire(&lanes.data[idx].lock);
[b798713]683                        verify(locked);
[dca5802]684
685                        // As long as we can pop from this lane to push the threads somewhere else in the queue
686                        while(!is_empty(lanes.data[idx])) {
[504a7dc]687                                struct $thread * thrd;
[343d10e]688                                thrd = pop(lanes.data[idx]);
[dca5802]689
[b798713]690                                push(cltr, thrd);
[dca5802]691
692                                // for printing count the number of displaced threads
[504a7dc]693                                #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__)
[dca5802]694                                        displaced++;
695                                #endif
[b798713]696                        }
697
[dca5802]698                        // Unlock the lane
699                        __atomic_unlock(&lanes.data[idx].lock);
[b798713]700
701                        // TODO print the queue statistics here
702
[dca5802]703                        ^(lanes.data[idx]){};
[b798713]704                }
705
[504a7dc]706                __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue displaced %zu threads\n", displaced);
[c84b4be]707
[dca5802]708                // Allocate new array (uses realloc and memcpies the data)
[ceb7db8]709                lanes.data = alloc( lanes.count, lanes.data`realloc );
[b798713]710
711                // Fix the moved data
[dca5802]712                for( idx; (size_t)lanes.count ) {
713                        fix(lanes.data[idx]);
[b798713]714                }
715        }
716
[9cc3a18]717        fix_times(cltr);
718
719        reassign_cltr_id(cltr);
[a017ee7]720
[b798713]721        // Make sure that everything is consistent
[dca5802]722        /* paranoid */ check( cltr->ready_queue );
723
[504a7dc]724        __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue done\n");
[64a7146]725        /* paranoid */ verify( ready_mutate_islocked() );
[fd9b524]726}
Note: See TracBrowser for help on using the repository browser.