source: libcfa/src/concurrency/ready_queue.cfa @ d3ba775

ADTarm-ehast-experimentalenumforall-pointer-decayjacob/cs343-translationnew-ast-unique-exprpthread-emulationqualifiedEnum
Last change on this file since d3ba775 was d3ba775, checked in by Thierry Delisle <tdelisle@…>, 3 years ago

More clean-up after new subqueue

  • Property mode set to 100644
File size: 22.0 KB
RevLine 
[7768b8d]1//
2// Cforall Version 1.0.0 Copyright (C) 2019 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// ready_queue.cfa --
8//
9// Author           : Thierry Delisle
10// Created On       : Mon Nov dd 16:29:18 2019
11// Last Modified By :
12// Last Modified On :
13// Update Count     :
14//
15
16#define __cforall_thread__
[1b143de]17// #define __CFA_DEBUG_PRINT_READY_QUEUE__
[7768b8d]18
[1eb239e4]19
[9cc3a18]20#define USE_RELAXED_FIFO
21// #define USE_WORK_STEALING
22
[7768b8d]23#include "bits/defs.hfa"
24#include "kernel_private.hfa"
25
26#define _GNU_SOURCE
27#include "stdlib.hfa"
[61d7bec]28#include "math.hfa"
[7768b8d]29
[04b5cef]30#include <unistd.h>
31
[13c5e19]32#include "ready_subqueue.hfa"
33
[7768b8d]34static const size_t cache_line_size = 64;
35
[d2fadeb]36#if !defined(__CFA_NO_STATISTICS__)
37        #define __STATS(...) __VA_ARGS__
38#else
39        #define __STATS(...)
40#endif
41
[dca5802]42// No overriden function, no environment variable, no define
43// fall back to a magic number
44#ifndef __CFA_MAX_PROCESSORS__
[b388ee81]45        #define __CFA_MAX_PROCESSORS__ 1024
[dca5802]46#endif
[7768b8d]47
[9cc3a18]48#if   defined(USE_RELAXED_FIFO)
49        #define BIAS 4
50        #define READYQ_SHARD_FACTOR 4
[5f6a172]51        #define SEQUENTIAL_SHARD 1
[9cc3a18]52#elif defined(USE_WORK_STEALING)
53        #define READYQ_SHARD_FACTOR 2
[5f6a172]54        #define SEQUENTIAL_SHARD 2
[9cc3a18]55#else
56        #error no scheduling strategy selected
57#endif
58
[d2fadeb]59static inline struct $thread * try_pop(struct cluster * cltr, unsigned w __STATS(, __stats_readyQ_pop_t & stats));
60static inline struct $thread * try_pop(struct cluster * cltr, unsigned i, unsigned j __STATS(, __stats_readyQ_pop_t & stats));
[431cd4f]61static inline struct $thread * search(struct cluster * cltr);
[d2fadeb]62static inline [unsigned, bool] idx_from_r(unsigned r, unsigned preferred);
[9cc3a18]63
[04b5cef]64
[dca5802]65// returns the maximum number of processors the RWLock support
[7768b8d]66__attribute__((weak)) unsigned __max_processors() {
67        const char * max_cores_s = getenv("CFA_MAX_PROCESSORS");
68        if(!max_cores_s) {
[504a7dc]69                __cfadbg_print_nolock(ready_queue, "No CFA_MAX_PROCESSORS in ENV\n");
[dca5802]70                return __CFA_MAX_PROCESSORS__;
[7768b8d]71        }
72
73        char * endptr = 0p;
74        long int max_cores_l = strtol(max_cores_s, &endptr, 10);
75        if(max_cores_l < 1 || max_cores_l > 65535) {
[504a7dc]76                __cfadbg_print_nolock(ready_queue, "CFA_MAX_PROCESSORS out of range : %ld\n", max_cores_l);
[dca5802]77                return __CFA_MAX_PROCESSORS__;
[7768b8d]78        }
79        if('\0' != *endptr) {
[504a7dc]80                __cfadbg_print_nolock(ready_queue, "CFA_MAX_PROCESSORS not a decimal number : %s\n", max_cores_s);
[dca5802]81                return __CFA_MAX_PROCESSORS__;
[7768b8d]82        }
83
84        return max_cores_l;
85}
86
87//=======================================================================
88// Cluster wide reader-writer lock
89//=======================================================================
[b388ee81]90void  ?{}(__scheduler_RWLock_t & this) {
[7768b8d]91        this.max   = __max_processors();
92        this.alloc = 0;
93        this.ready = 0;
94        this.data  = alloc(this.max);
[c993b15]95        this.write_lock  = false;
[7768b8d]96
97        /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.alloc), &this.alloc));
98        /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.ready), &this.ready));
99
100}
[b388ee81]101void ^?{}(__scheduler_RWLock_t & this) {
[7768b8d]102        free(this.data);
103}
104
105
106//=======================================================================
107// Lock-Free registering/unregistering of threads
[c993b15]108unsigned register_proc_id( void ) with(*__scheduler_lock) {
[b388ee81]109        __cfadbg_print_safe(ready_queue, "Kernel : Registering proc %p for RW-Lock\n", proc);
[c993b15]110        bool * handle = (bool *)&kernelTLS().sched_lock;
[504a7dc]111
[7768b8d]112        // Step - 1 : check if there is already space in the data
113        uint_fast32_t s = ready;
114
115        // Check among all the ready
116        for(uint_fast32_t i = 0; i < s; i++) {
[c993b15]117                bool * volatile * cell = (bool * volatile *)&data[i]; // Cforall is bugged and the double volatiles causes problems
118                /* paranoid */ verify( handle != *cell );
119
120                bool * null = 0p; // Re-write every loop since compare thrashes it
121                if( __atomic_load_n(cell, (int)__ATOMIC_RELAXED) == null
122                        && __atomic_compare_exchange_n( cell, &null, handle, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
123                        /* paranoid */ verify(i < ready);
124                        /* paranoid */ verify( (kernelTLS().sched_id = i, true) );
125                        return i;
[7768b8d]126                }
127        }
128
[b388ee81]129        if(max <= alloc) abort("Trying to create more than %ud processors", __scheduler_lock->max);
[7768b8d]130
131        // Step - 2 : F&A to get a new spot in the array.
132        uint_fast32_t n = __atomic_fetch_add(&alloc, 1, __ATOMIC_SEQ_CST);
[b388ee81]133        if(max <= n) abort("Trying to create more than %ud processors", __scheduler_lock->max);
[7768b8d]134
135        // Step - 3 : Mark space as used and then publish it.
[c993b15]136        data[n] = handle;
[fd9b524]137        while() {
[7768b8d]138                unsigned copy = n;
139                if( __atomic_load_n(&ready, __ATOMIC_RELAXED) == n
140                        && __atomic_compare_exchange_n(&ready, &copy, n + 1, true, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
141                        break;
[fd9b524]142                Pause();
[7768b8d]143        }
144
[1b143de]145        __cfadbg_print_safe(ready_queue, "Kernel : Registering proc %p done, id %lu\n", proc, n);
[504a7dc]146
[7768b8d]147        // Return new spot.
[c993b15]148        /* paranoid */ verify(n < ready);
149        /* paranoid */ verify( (kernelTLS().sched_id = n, true) );
150        return n;
[7768b8d]151}
152
[c993b15]153void unregister_proc_id( unsigned id ) with(*__scheduler_lock) {
154        /* paranoid */ verify(id < ready);
155        /* paranoid */ verify(id == kernelTLS().sched_id);
156        /* paranoid */ verify(data[id] == &kernelTLS().sched_lock);
157
158        bool * volatile * cell = (bool * volatile *)&data[id]; // Cforall is bugged and the double volatiles causes problems
159
160        __atomic_store_n(cell, 0p, __ATOMIC_RELEASE);
[504a7dc]161
162        __cfadbg_print_safe(ready_queue, "Kernel : Unregister proc %p\n", proc);
[7768b8d]163}
164
165//-----------------------------------------------------------------------
166// Writer side : acquire when changing the ready queue, e.g. adding more
167//  queues or removing them.
[b388ee81]168uint_fast32_t ready_mutate_lock( void ) with(*__scheduler_lock) {
[8fc652e0]169        /* paranoid */ verify( ! __preemption_enabled() );
[c993b15]170        /* paranoid */ verify( ! kernelTLS().sched_lock );
[62502cc4]171
[7768b8d]172        // Step 1 : lock global lock
173        // It is needed to avoid processors that register mid Critical-Section
174        //   to simply lock their own lock and enter.
[c993b15]175        __atomic_acquire( &write_lock );
[7768b8d]176
177        // Step 2 : lock per-proc lock
178        // Processors that are currently being registered aren't counted
179        //   but can't be in read_lock or in the critical section.
180        // All other processors are counted
181        uint_fast32_t s = ready;
182        for(uint_fast32_t i = 0; i < s; i++) {
[c993b15]183                volatile bool * llock = data[i];
184                if(llock) __atomic_acquire( llock );
[7768b8d]185        }
186
[8fc652e0]187        /* paranoid */ verify( ! __preemption_enabled() );
[7768b8d]188        return s;
189}
190
[b388ee81]191void ready_mutate_unlock( uint_fast32_t last_s ) with(*__scheduler_lock) {
[8fc652e0]192        /* paranoid */ verify( ! __preemption_enabled() );
[62502cc4]193
[7768b8d]194        // Step 1 : release local locks
195        // This must be done while the global lock is held to avoid
196        //   threads that where created mid critical section
197        //   to race to lock their local locks and have the writer
198        //   immidiately unlock them
199        // Alternative solution : return s in write_lock and pass it to write_unlock
200        for(uint_fast32_t i = 0; i < last_s; i++) {
[c993b15]201                volatile bool * llock = data[i];
202                if(llock) __atomic_store_n(llock, (bool)false, __ATOMIC_RELEASE);
[7768b8d]203        }
204
205        // Step 2 : release global lock
[c993b15]206        /*paranoid*/ assert(true == write_lock);
207        __atomic_store_n(&write_lock, (bool)false, __ATOMIC_RELEASE);
[62502cc4]208
[8fc652e0]209        /* paranoid */ verify( ! __preemption_enabled() );
[7768b8d]210}
211
212//=======================================================================
[9cc3a18]213// Cforall Ready Queue used for scheduling
[b798713]214//=======================================================================
215void ?{}(__ready_queue_t & this) with (this) {
[28d73c1]216        lanes.data  = 0p;
[9cc3a18]217        lanes.tscs  = 0p;
[28d73c1]218        lanes.count = 0;
[b798713]219}
220
221void ^?{}(__ready_queue_t & this) with (this) {
[5f6a172]222        verify( SEQUENTIAL_SHARD == lanes.count );
[dca5802]223        free(lanes.data);
[9cc3a18]224        free(lanes.tscs);
[dca5802]225}
226
[64a7146]227//-----------------------------------------------------------------------
[431cd4f]228#if defined(USE_RELAXED_FIFO)
229        //-----------------------------------------------------------------------
230        // get index from random number with or without bias towards queues
231        static inline [unsigned, bool] idx_from_r(unsigned r, unsigned preferred) {
232                unsigned i;
233                bool local;
234                unsigned rlow  = r % BIAS;
235                unsigned rhigh = r / BIAS;
236                if((0 != rlow) && preferred >= 0) {
237                        // (BIAS - 1) out of BIAS chances
238                        // Use perferred queues
239                        i = preferred + (rhigh % READYQ_SHARD_FACTOR);
240                        local = true;
241                }
242                else {
243                        // 1 out of BIAS chances
244                        // Use all queues
245                        i = rhigh;
246                        local = false;
247                }
248                return [i, local];
249        }
250
251        __attribute__((hot)) void push(struct cluster * cltr, struct $thread * thrd) with (cltr->ready_queue) {
252                __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr);
[1b143de]253
[431cd4f]254                const bool external = (!kernelTLS().this_processor) || (cltr != kernelTLS().this_processor->cltr);
255                /* paranoid */ verify(external || kernelTLS().this_processor->rdq.id < lanes.count );
[fd1f65e]256
[431cd4f]257                bool local;
258                int preferred = external ? -1 : kernelTLS().this_processor->rdq.id;
[52769ba]259
[431cd4f]260                // Try to pick a lane and lock it
261                unsigned i;
262                do {
263                        // Pick the index of a lane
264                        unsigned r = __tls_rand_fwd();
265                        [i, local] = idx_from_r(r, preferred);
[772411a]266
[431cd4f]267                        i %= __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
268
269                        #if !defined(__CFA_NO_STATISTICS__)
[d2fadeb]270                                if(unlikely(external)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.attempt, 1, __ATOMIC_RELAXED);
271                                else if(local) __tls_stats()->ready.push.local.attempt++;
272                                else __tls_stats()->ready.push.share.attempt++;
[431cd4f]273                        #endif
[b798713]274
[431cd4f]275                        // If we can't lock it retry
276                } while( !__atomic_try_acquire( &lanes.data[i].lock ) );
277
278                // Actually push it
279                push(lanes.data[i], thrd);
280
281                        // Unlock and return
282                        __atomic_unlock( &lanes.data[i].lock );
283
284                // Mark the current index in the tls rng instance as having an item
285                __tls_rand_advance_bck();
286
287                __cfadbg_print_safe(ready_queue, "Kernel : Pushed %p on cluster %p (idx: %u, mask %llu, first %d)\n", thrd, cltr, i, used.mask[0], lane_first);
288
289                // Update statistics
[b798713]290                #if !defined(__CFA_NO_STATISTICS__)
[d2fadeb]291                        if(unlikely(external)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.success, 1, __ATOMIC_RELAXED);
292                        else if(local) __tls_stats()->ready.push.local.success++;
293                        else __tls_stats()->ready.push.share.success++;
[b798713]294                #endif
[431cd4f]295        }
[b798713]296
[431cd4f]297        // Pop from the ready queue from a given cluster
298        __attribute__((hot)) $thread * pop_fast(struct cluster * cltr) with (cltr->ready_queue) {
299                /* paranoid */ verify( lanes.count > 0 );
300                /* paranoid */ verify( kernelTLS().this_processor );
301                /* paranoid */ verify( kernelTLS().this_processor->rdq.id < lanes.count );
[b798713]302
[431cd4f]303                unsigned count = __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
304                int preferred = kernelTLS().this_processor->rdq.id;
[dca5802]305
306
[431cd4f]307                // As long as the list is not empty, try finding a lane that isn't empty and pop from it
308                for(25) {
309                        // Pick two lists at random
310                        unsigned ri = __tls_rand_bck();
311                        unsigned rj = __tls_rand_bck();
[c426b03]312
[431cd4f]313                        unsigned i, j;
314                        __attribute__((unused)) bool locali, localj;
315                        [i, locali] = idx_from_r(ri, preferred);
316                        [j, localj] = idx_from_r(rj, preferred);
[1b143de]317
[431cd4f]318                        i %= count;
319                        j %= count;
[9cc3a18]320
[431cd4f]321                        // try popping from the 2 picked lists
[d2fadeb]322                        struct $thread * thrd = try_pop(cltr, i, j __STATS(, *(locali || localj ? &__tls_stats()->ready.pop.local : &__tls_stats()->ready.pop.help)));
[431cd4f]323                        if(thrd) {
324                                return thrd;
325                        }
326                }
[13c5e19]327
[431cd4f]328                // All lanes where empty return 0p
329                return 0p;
330        }
[772411a]331
[fc59df78]332        __attribute__((hot)) struct $thread * pop_slow(struct cluster * cltr) { return pop_fast(cltr); }
333        __attribute__((hot)) struct $thread * pop_search(struct cluster * cltr) {
[431cd4f]334                return search(cltr);
335        }
336#endif
337#if defined(USE_WORK_STEALING)
338        __attribute__((hot)) void push(struct cluster * cltr, struct $thread * thrd) with (cltr->ready_queue) {
339                __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr);
[772411a]340
[d3ba775]341                // #define USE_PREFERRED
342                #if !defined(USE_PREFERRED)
[431cd4f]343                const bool external = (!kernelTLS().this_processor) || (cltr != kernelTLS().this_processor->cltr);
344                /* paranoid */ verify(external || kernelTLS().this_processor->rdq.id < lanes.count );
[d3ba775]345                #else
346                        unsigned preferred = thrd->preferred;
347                        const bool external = (!kernelTLS().this_processor) || preferred == -1u || thrd->curr_cluster != cltr;
348                        /* paranoid */ verifyf(external || preferred < lanes.count, "Invalid preferred queue %u for %u lanes", preferred, lanes.count );
[772411a]349
[d3ba775]350                        unsigned r = preferred % READYQ_SHARD_FACTOR;
351                        const unsigned start = preferred - r;
[2b96031]352                #endif
[431cd4f]353
354                // Try to pick a lane and lock it
355                unsigned i;
356                do {
[d2fadeb]357                        #if !defined(__CFA_NO_STATISTICS__)
358                                if(unlikely(external)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.attempt, 1, __ATOMIC_RELAXED);
359                                else __tls_stats()->ready.push.local.attempt++;
360                        #endif
361
[431cd4f]362                        if(unlikely(external)) {
363                                i = __tls_rand() % lanes.count;
364                        }
365                        else {
[d3ba775]366                                #if !defined(USE_PREFERRED)
[431cd4f]367                                processor * proc = kernelTLS().this_processor;
368                                unsigned r = proc->rdq.its++;
369                                i =  proc->rdq.id + (r % READYQ_SHARD_FACTOR);
370                #else
[d3ba775]371                                        i = start + (r++ % READYQ_SHARD_FACTOR);
372                                #endif
373                        }
[431cd4f]374                        // If we can't lock it retry
375                } while( !__atomic_try_acquire( &lanes.data[i].lock ) );
[13c5e19]376
[431cd4f]377                // Actually push it
378                push(lanes.data[i], thrd);
[13c5e19]379
[431cd4f]380                        // Unlock and return
381                        __atomic_unlock( &lanes.data[i].lock );
382
[d2fadeb]383                #if !defined(__CFA_NO_STATISTICS__)
384                        if(unlikely(external)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.success, 1, __ATOMIC_RELAXED);
385                        else __tls_stats()->ready.push.local.success++;
386                #endif
387
[431cd4f]388                __cfadbg_print_safe(ready_queue, "Kernel : Pushed %p on cluster %p (idx: %u, mask %llu, first %d)\n", thrd, cltr, i, used.mask[0], lane_first);
[13c5e19]389        }
390
[431cd4f]391        // Pop from the ready queue from a given cluster
392        __attribute__((hot)) $thread * pop_fast(struct cluster * cltr) with (cltr->ready_queue) {
393                /* paranoid */ verify( lanes.count > 0 );
394                /* paranoid */ verify( kernelTLS().this_processor );
395                /* paranoid */ verify( kernelTLS().this_processor->rdq.id < lanes.count );
396
397                processor * proc = kernelTLS().this_processor;
398
399                if(proc->rdq.target == -1u) {
400                        proc->rdq.target = __tls_rand() % lanes.count;
401                        unsigned it1  = proc->rdq.itr;
402                        unsigned it2  = proc->rdq.itr + 1;
403                        unsigned idx1 = proc->rdq.id + (it1 % READYQ_SHARD_FACTOR);
[ddd473f]404                        unsigned idx2 = proc->rdq.id + (it2 % READYQ_SHARD_FACTOR);
[431cd4f]405                        unsigned long long tsc1 = ts(lanes.data[idx1]);
406                        unsigned long long tsc2 = ts(lanes.data[idx2]);
407                        proc->rdq.cutoff = min(tsc1, tsc2);
[341aa39]408                        if(proc->rdq.cutoff == 0) proc->rdq.cutoff = -1ull;
[431cd4f]409                }
[341aa39]410                else {
411                        unsigned target = proc->rdq.target;
[431cd4f]412                        proc->rdq.target = -1u;
[341aa39]413                        if(lanes.tscs[target].tv < proc->rdq.cutoff) {
414                                $thread * t = try_pop(cltr, target __STATS(, __tls_stats()->ready.pop.help));
415                                if(t) return t;
416                        }
[431cd4f]417                }
[13c5e19]418
[431cd4f]419                for(READYQ_SHARD_FACTOR) {
420                        unsigned i = proc->rdq.id + (--proc->rdq.itr % READYQ_SHARD_FACTOR);
[d2fadeb]421                        if($thread * t = try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.local))) return t;
[431cd4f]422                }
423                return 0p;
[1eb239e4]424        }
425
[431cd4f]426        __attribute__((hot)) struct $thread * pop_slow(struct cluster * cltr) with (cltr->ready_queue) {
[fc59df78]427                unsigned i = __tls_rand() % lanes.count;
428                return try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.steal));
429        }
[431cd4f]430
[fc59df78]431        __attribute__((hot)) struct $thread * pop_search(struct cluster * cltr) with (cltr->ready_queue) {
[431cd4f]432                return search(cltr);
433        }
434#endif
[1eb239e4]435
[9cc3a18]436//=======================================================================
437// Various Ready Queue utilities
438//=======================================================================
439// these function work the same or almost the same
440// whether they are using work-stealing or relaxed fifo scheduling
[1eb239e4]441
[9cc3a18]442//-----------------------------------------------------------------------
443// try to pop from a lane given by index w
[d2fadeb]444static inline struct $thread * try_pop(struct cluster * cltr, unsigned w __STATS(, __stats_readyQ_pop_t & stats)) with (cltr->ready_queue) {
445        __STATS( stats.attempt++; )
446
[dca5802]447        // Get relevant elements locally
448        __intrusive_lane_t & lane = lanes.data[w];
449
[b798713]450        // If list looks empty retry
[d2fadeb]451        if( is_empty(lane) ) {
452                __STATS( stats.espec++; )
453                return 0p;
454        }
[b798713]455
456        // If we can't get the lock retry
[d2fadeb]457        if( !__atomic_try_acquire(&lane.lock) ) {
458                __STATS( stats.elock++; )
459                return 0p;
460        }
[b798713]461
462        // If list is empty, unlock and retry
[dca5802]463        if( is_empty(lane) ) {
464                __atomic_unlock(&lane.lock);
[d2fadeb]465                __STATS( stats.eempty++; )
[b798713]466                return 0p;
467        }
468
469        // Actually pop the list
[504a7dc]470        struct $thread * thrd;
[343d10e]471        thrd = pop(lane);
[b798713]472
[dca5802]473        /* paranoid */ verify(thrd);
474        /* paranoid */ verify(lane.lock);
[b798713]475
476        // Unlock and return
[dca5802]477        __atomic_unlock(&lane.lock);
[b798713]478
[dca5802]479        // Update statistics
[d2fadeb]480        __STATS( stats.success++; )
[b798713]481
[431cd4f]482        #if defined(USE_WORK_STEALING)
483                lanes.tscs[w].tv = thrd->link.ts;
[9cc3a18]484        #endif
[d72c074]485
[d3ba775]486        thrd->preferred = w;
487
[dca5802]488        // return the popped thread
[b798713]489        return thrd;
490}
[04b5cef]491
[9cc3a18]492//-----------------------------------------------------------------------
493// try to pop from any lanes making sure you don't miss any threads push
494// before the start of the function
[431cd4f]495static inline struct $thread * search(struct cluster * cltr) with (cltr->ready_queue) {
[9cc3a18]496        /* paranoid */ verify( lanes.count > 0 );
497        unsigned count = __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
498        unsigned offset = __tls_rand();
499        for(i; count) {
500                unsigned idx = (offset + i) % count;
[d2fadeb]501                struct $thread * thrd = try_pop(cltr, idx __STATS(, __tls_stats()->ready.pop.search));
[9cc3a18]502                if(thrd) {
503                        return thrd;
504                }
[13c5e19]505        }
[9cc3a18]506
507        // All lanes where empty return 0p
508        return 0p;
[b798713]509}
510
511//-----------------------------------------------------------------------
[9cc3a18]512// Check that all the intrusive queues in the data structure are still consistent
[b798713]513static void check( __ready_queue_t & q ) with (q) {
[d3ba775]514        #if defined(__CFA_WITH_VERIFY__)
[b798713]515                {
[dca5802]516                        for( idx ; lanes.count ) {
517                                __intrusive_lane_t & sl = lanes.data[idx];
518                                assert(!lanes.data[idx].lock);
[b798713]519
[2b96031]520                                        if(is_empty(sl)) {
521                                                assert( sl.anchor.next == 0p );
522                                                assert( sl.anchor.ts   == 0  );
523                                                assert( mock_head(sl)  == sl.prev );
524                                        } else {
525                                                assert( sl.anchor.next != 0p );
526                                                assert( sl.anchor.ts   != 0  );
527                                                assert( mock_head(sl)  != sl.prev );
528                                        }
[b798713]529                        }
530                }
531        #endif
532}
533
[9cc3a18]534//-----------------------------------------------------------------------
535// Given 2 indexes, pick the list with the oldest push an try to pop from it
[d2fadeb]536static inline struct $thread * try_pop(struct cluster * cltr, unsigned i, unsigned j __STATS(, __stats_readyQ_pop_t & stats)) with (cltr->ready_queue) {
[9cc3a18]537        // Pick the bet list
538        int w = i;
539        if( __builtin_expect(!is_empty(lanes.data[j]), true) ) {
540                w = (ts(lanes.data[i]) < ts(lanes.data[j])) ? i : j;
541        }
542
[d2fadeb]543        return try_pop(cltr, w __STATS(, stats));
[9cc3a18]544}
545
[b798713]546// Call this function of the intrusive list was moved using memcpy
[dca5802]547// fixes the list so that the pointers back to anchors aren't left dangling
548static inline void fix(__intrusive_lane_t & ll) {
[2b96031]549                        if(is_empty(ll)) {
550                                verify(ll.anchor.next == 0p);
551                                ll.prev = mock_head(ll);
552                        }
[b798713]553}
554
[9cc3a18]555static void assign_list(unsigned & value, dlist(processor, processor) & list, unsigned count) {
[a017ee7]556        processor * it = &list`first;
557        for(unsigned i = 0; i < count; i++) {
558                /* paranoid */ verifyf( it, "Unexpected null iterator, at index %u of %u\n", i, count);
[431cd4f]559                it->rdq.id = value;
560                it->rdq.target = -1u;
[9cc3a18]561                value += READYQ_SHARD_FACTOR;
[a017ee7]562                it = &(*it)`next;
563        }
564}
565
[9cc3a18]566static void reassign_cltr_id(struct cluster * cltr) {
[a017ee7]567        unsigned preferred = 0;
[9cc3a18]568        assign_list(preferred, cltr->procs.actives, cltr->procs.total - cltr->procs.idle);
569        assign_list(preferred, cltr->procs.idles  , cltr->procs.idle );
[a017ee7]570}
571
[431cd4f]572static void fix_times( struct cluster * cltr ) with( cltr->ready_queue ) {
573        #if defined(USE_WORK_STEALING)
574                lanes.tscs = alloc(lanes.count, lanes.tscs`realloc);
575                for(i; lanes.count) {
576                        lanes.tscs[i].tv = ts(lanes.data[i]);
577                }
578        #endif
579}
580
[dca5802]581// Grow the ready queue
[a017ee7]582void ready_queue_grow(struct cluster * cltr) {
[bd0bdd37]583        size_t ncount;
[a017ee7]584        int target = cltr->procs.total;
[bd0bdd37]585
[64a7146]586        /* paranoid */ verify( ready_mutate_islocked() );
[504a7dc]587        __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue\n");
[b798713]588
[dca5802]589        // Make sure that everything is consistent
590        /* paranoid */ check( cltr->ready_queue );
591
592        // grow the ready queue
[b798713]593        with( cltr->ready_queue ) {
[39fc03e]594                // Find new count
595                // Make sure we always have atleast 1 list
[bd0bdd37]596                if(target >= 2) {
[9cc3a18]597                        ncount = target * READYQ_SHARD_FACTOR;
[bd0bdd37]598                } else {
[5f6a172]599                        ncount = SEQUENTIAL_SHARD;
[bd0bdd37]600                }
[b798713]601
[dca5802]602                // Allocate new array (uses realloc and memcpies the data)
[ceb7db8]603                lanes.data = alloc( ncount, lanes.data`realloc );
[b798713]604
605                // Fix the moved data
[dca5802]606                for( idx; (size_t)lanes.count ) {
607                        fix(lanes.data[idx]);
[b798713]608                }
609
610                // Construct new data
[dca5802]611                for( idx; (size_t)lanes.count ~ ncount) {
612                        (lanes.data[idx]){};
[b798713]613                }
614
615                // Update original
[dca5802]616                lanes.count = ncount;
[b798713]617        }
618
[9cc3a18]619        fix_times(cltr);
620
621        reassign_cltr_id(cltr);
[a017ee7]622
[b798713]623        // Make sure that everything is consistent
[dca5802]624        /* paranoid */ check( cltr->ready_queue );
625
[504a7dc]626        __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue done\n");
[dca5802]627
[64a7146]628        /* paranoid */ verify( ready_mutate_islocked() );
[b798713]629}
630
[dca5802]631// Shrink the ready queue
[a017ee7]632void ready_queue_shrink(struct cluster * cltr) {
[64a7146]633        /* paranoid */ verify( ready_mutate_islocked() );
[504a7dc]634        __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue\n");
[dca5802]635
636        // Make sure that everything is consistent
637        /* paranoid */ check( cltr->ready_queue );
638
[a017ee7]639        int target = cltr->procs.total;
640
[b798713]641        with( cltr->ready_queue ) {
[39fc03e]642                // Remember old count
[dca5802]643                size_t ocount = lanes.count;
[b798713]644
[39fc03e]645                // Find new count
646                // Make sure we always have atleast 1 list
[5f6a172]647                lanes.count = target >= 2 ? target * READYQ_SHARD_FACTOR: SEQUENTIAL_SHARD;
[39fc03e]648                /* paranoid */ verify( ocount >= lanes.count );
[9cc3a18]649                /* paranoid */ verify( lanes.count == target * READYQ_SHARD_FACTOR || target < 2 );
[dca5802]650
651                // for printing count the number of displaced threads
[504a7dc]652                #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__)
[dca5802]653                        __attribute__((unused)) size_t displaced = 0;
654                #endif
[b798713]655
656                // redistribute old data
[dca5802]657                for( idx; (size_t)lanes.count ~ ocount) {
658                        // Lock is not strictly needed but makes checking invariants much easier
[1b143de]659                        __attribute__((unused)) bool locked = __atomic_try_acquire(&lanes.data[idx].lock);
[b798713]660                        verify(locked);
[dca5802]661
662                        // As long as we can pop from this lane to push the threads somewhere else in the queue
663                        while(!is_empty(lanes.data[idx])) {
[504a7dc]664                                struct $thread * thrd;
[343d10e]665                                thrd = pop(lanes.data[idx]);
[dca5802]666
[b798713]667                                push(cltr, thrd);
[dca5802]668
669                                // for printing count the number of displaced threads
[504a7dc]670                                #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__)
[dca5802]671                                        displaced++;
672                                #endif
[b798713]673                        }
674
[dca5802]675                        // Unlock the lane
676                        __atomic_unlock(&lanes.data[idx].lock);
[b798713]677
678                        // TODO print the queue statistics here
679
[dca5802]680                        ^(lanes.data[idx]){};
[b798713]681                }
682
[504a7dc]683                __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue displaced %zu threads\n", displaced);
[c84b4be]684
[dca5802]685                // Allocate new array (uses realloc and memcpies the data)
[ceb7db8]686                lanes.data = alloc( lanes.count, lanes.data`realloc );
[b798713]687
688                // Fix the moved data
[dca5802]689                for( idx; (size_t)lanes.count ) {
690                        fix(lanes.data[idx]);
[b798713]691                }
692        }
693
[9cc3a18]694        fix_times(cltr);
695
696        reassign_cltr_id(cltr);
[a017ee7]697
[b798713]698        // Make sure that everything is consistent
[dca5802]699        /* paranoid */ check( cltr->ready_queue );
700
[504a7dc]701        __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue done\n");
[64a7146]702        /* paranoid */ verify( ready_mutate_islocked() );
[fd9b524]703}
Note: See TracBrowser for help on using the repository browser.