source: libcfa/src/concurrency/ready_queue.cfa @ 6528d75

ADTarm-ehast-experimentalenumforall-pointer-decayjacob/cs343-translationnew-ast-unique-exprpthread-emulationqualifiedEnum
Last change on this file since 6528d75 was 431cd4f, checked in by Thierry Delisle <tdelisle@…>, 4 years ago

Added alternative to relaxed-fifo scheduler.
Disabled by default

  • Property mode set to 100644
File size: 21.6 KB
RevLine 
[7768b8d]1//
2// Cforall Version 1.0.0 Copyright (C) 2019 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// ready_queue.cfa --
8//
9// Author           : Thierry Delisle
10// Created On       : Mon Nov dd 16:29:18 2019
11// Last Modified By :
12// Last Modified On :
13// Update Count     :
14//
15
16#define __cforall_thread__
[1b143de]17// #define __CFA_DEBUG_PRINT_READY_QUEUE__
[7768b8d]18
[7a2972b9]19// #define USE_MPSC
[1eb239e4]20
[9cc3a18]21#define USE_RELAXED_FIFO
22// #define USE_WORK_STEALING
23
[7768b8d]24#include "bits/defs.hfa"
25#include "kernel_private.hfa"
26
27#define _GNU_SOURCE
28#include "stdlib.hfa"
[61d7bec]29#include "math.hfa"
[7768b8d]30
[04b5cef]31#include <unistd.h>
32
[13c5e19]33#include "ready_subqueue.hfa"
34
[7768b8d]35static const size_t cache_line_size = 64;
36
[dca5802]37// No overriden function, no environment variable, no define
38// fall back to a magic number
39#ifndef __CFA_MAX_PROCESSORS__
[b388ee81]40        #define __CFA_MAX_PROCESSORS__ 1024
[dca5802]41#endif
[7768b8d]42
[9cc3a18]43#if   defined(USE_RELAXED_FIFO)
44        #define BIAS 4
45        #define READYQ_SHARD_FACTOR 4
46#elif defined(USE_WORK_STEALING)
47        #define READYQ_SHARD_FACTOR 2
48#else
49        #error no scheduling strategy selected
50#endif
51
52static inline [unsigned, bool] idx_from_r(unsigned r, unsigned preferred);
53static inline struct $thread * try_pop(struct cluster * cltr, unsigned w);
[431cd4f]54static inline struct $thread * try_pop(struct cluster * cltr, unsigned i, unsigned j);
55static inline struct $thread * search(struct cluster * cltr);
[9cc3a18]56
[04b5cef]57
[dca5802]58// returns the maximum number of processors the RWLock support
[7768b8d]59__attribute__((weak)) unsigned __max_processors() {
60        const char * max_cores_s = getenv("CFA_MAX_PROCESSORS");
61        if(!max_cores_s) {
[504a7dc]62                __cfadbg_print_nolock(ready_queue, "No CFA_MAX_PROCESSORS in ENV\n");
[dca5802]63                return __CFA_MAX_PROCESSORS__;
[7768b8d]64        }
65
66        char * endptr = 0p;
67        long int max_cores_l = strtol(max_cores_s, &endptr, 10);
68        if(max_cores_l < 1 || max_cores_l > 65535) {
[504a7dc]69                __cfadbg_print_nolock(ready_queue, "CFA_MAX_PROCESSORS out of range : %ld\n", max_cores_l);
[dca5802]70                return __CFA_MAX_PROCESSORS__;
[7768b8d]71        }
72        if('\0' != *endptr) {
[504a7dc]73                __cfadbg_print_nolock(ready_queue, "CFA_MAX_PROCESSORS not a decimal number : %s\n", max_cores_s);
[dca5802]74                return __CFA_MAX_PROCESSORS__;
[7768b8d]75        }
76
77        return max_cores_l;
78}
79
80//=======================================================================
81// Cluster wide reader-writer lock
82//=======================================================================
[b388ee81]83void  ?{}(__scheduler_RWLock_t & this) {
[7768b8d]84        this.max   = __max_processors();
85        this.alloc = 0;
86        this.ready = 0;
87        this.lock  = false;
88        this.data  = alloc(this.max);
89
90        /*paranoid*/ verify( 0 == (((uintptr_t)(this.data    )) % 64) );
91        /*paranoid*/ verify( 0 == (((uintptr_t)(this.data + 1)) % 64) );
92        /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.alloc), &this.alloc));
93        /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.ready), &this.ready));
94
95}
[b388ee81]96void ^?{}(__scheduler_RWLock_t & this) {
[7768b8d]97        free(this.data);
98}
99
[9b1dcc2]100void ?{}( __scheduler_lock_id_t & this, __processor_id_t * proc ) {
[7768b8d]101        this.handle = proc;
102        this.lock   = false;
[64a7146]103        #ifdef __CFA_WITH_VERIFY__
104                this.owned  = false;
105        #endif
[7768b8d]106}
107
108//=======================================================================
109// Lock-Free registering/unregistering of threads
[a33c113]110void register_proc_id( struct __processor_id_t * proc ) with(*__scheduler_lock) {
[b388ee81]111        __cfadbg_print_safe(ready_queue, "Kernel : Registering proc %p for RW-Lock\n", proc);
[504a7dc]112
[7768b8d]113        // Step - 1 : check if there is already space in the data
114        uint_fast32_t s = ready;
115
116        // Check among all the ready
117        for(uint_fast32_t i = 0; i < s; i++) {
[9b1dcc2]118                __processor_id_t * null = 0p; // Re-write every loop since compare thrashes it
[7768b8d]119                if( __atomic_load_n(&data[i].handle, (int)__ATOMIC_RELAXED) == null
120                        && __atomic_compare_exchange_n( &data[i].handle, &null, proc, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
121                        /*paranoid*/ verify(i < ready);
[64a7146]122                        /*paranoid*/ verify(0 == (__alignof__(data[i]) % cache_line_size));
[7768b8d]123                        /*paranoid*/ verify((((uintptr_t)&data[i]) % cache_line_size) == 0);
[a33c113]124                        proc->id = i;
[7768b8d]125                }
126        }
127
[b388ee81]128        if(max <= alloc) abort("Trying to create more than %ud processors", __scheduler_lock->max);
[7768b8d]129
130        // Step - 2 : F&A to get a new spot in the array.
131        uint_fast32_t n = __atomic_fetch_add(&alloc, 1, __ATOMIC_SEQ_CST);
[b388ee81]132        if(max <= n) abort("Trying to create more than %ud processors", __scheduler_lock->max);
[7768b8d]133
134        // Step - 3 : Mark space as used and then publish it.
[9b1dcc2]135        __scheduler_lock_id_t * storage = (__scheduler_lock_id_t *)&data[n];
[7768b8d]136        (*storage){ proc };
[fd9b524]137        while() {
[7768b8d]138                unsigned copy = n;
139                if( __atomic_load_n(&ready, __ATOMIC_RELAXED) == n
140                        && __atomic_compare_exchange_n(&ready, &copy, n + 1, true, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
141                        break;
[fd9b524]142                Pause();
[7768b8d]143        }
144
[1b143de]145        __cfadbg_print_safe(ready_queue, "Kernel : Registering proc %p done, id %lu\n", proc, n);
[504a7dc]146
[7768b8d]147        // Return new spot.
148        /*paranoid*/ verify(n < ready);
[37ba662]149        /*paranoid*/ verify(__alignof__(data[n]) == (2 * cache_line_size));
[7768b8d]150        /*paranoid*/ verify((((uintptr_t)&data[n]) % cache_line_size) == 0);
[a33c113]151        proc->id = n;
[7768b8d]152}
153
[a33c113]154void unregister_proc_id( struct __processor_id_t * proc ) with(*__scheduler_lock) {
[7768b8d]155        unsigned id = proc->id;
156        /*paranoid*/ verify(id < ready);
157        /*paranoid*/ verify(proc == __atomic_load_n(&data[id].handle, __ATOMIC_RELAXED));
158        __atomic_store_n(&data[id].handle, 0p, __ATOMIC_RELEASE);
[504a7dc]159
160        __cfadbg_print_safe(ready_queue, "Kernel : Unregister proc %p\n", proc);
[7768b8d]161}
162
163//-----------------------------------------------------------------------
164// Writer side : acquire when changing the ready queue, e.g. adding more
165//  queues or removing them.
[b388ee81]166uint_fast32_t ready_mutate_lock( void ) with(*__scheduler_lock) {
[8fc652e0]167        /* paranoid */ verify( ! __preemption_enabled() );
[62502cc4]168
[7768b8d]169        // Step 1 : lock global lock
170        // It is needed to avoid processors that register mid Critical-Section
171        //   to simply lock their own lock and enter.
172        __atomic_acquire( &lock );
173
174        // Step 2 : lock per-proc lock
175        // Processors that are currently being registered aren't counted
176        //   but can't be in read_lock or in the critical section.
177        // All other processors are counted
178        uint_fast32_t s = ready;
179        for(uint_fast32_t i = 0; i < s; i++) {
180                __atomic_acquire( &data[i].lock );
181        }
182
[8fc652e0]183        /* paranoid */ verify( ! __preemption_enabled() );
[7768b8d]184        return s;
185}
186
[b388ee81]187void ready_mutate_unlock( uint_fast32_t last_s ) with(*__scheduler_lock) {
[8fc652e0]188        /* paranoid */ verify( ! __preemption_enabled() );
[62502cc4]189
[7768b8d]190        // Step 1 : release local locks
191        // This must be done while the global lock is held to avoid
192        //   threads that where created mid critical section
193        //   to race to lock their local locks and have the writer
194        //   immidiately unlock them
195        // Alternative solution : return s in write_lock and pass it to write_unlock
196        for(uint_fast32_t i = 0; i < last_s; i++) {
197                verify(data[i].lock);
198                __atomic_store_n(&data[i].lock, (bool)false, __ATOMIC_RELEASE);
199        }
200
201        // Step 2 : release global lock
202        /*paranoid*/ assert(true == lock);
203        __atomic_store_n(&lock, (bool)false, __ATOMIC_RELEASE);
[62502cc4]204
[8fc652e0]205        /* paranoid */ verify( ! __preemption_enabled() );
[7768b8d]206}
207
208//=======================================================================
[9cc3a18]209// Cforall Ready Queue used for scheduling
[b798713]210//=======================================================================
211void ?{}(__ready_queue_t & this) with (this) {
[28d73c1]212        lanes.data  = 0p;
[9cc3a18]213        lanes.tscs  = 0p;
[28d73c1]214        lanes.count = 0;
[b798713]215}
216
217void ^?{}(__ready_queue_t & this) with (this) {
[39fc03e]218        verify( 1 == lanes.count );
[dca5802]219        free(lanes.data);
[9cc3a18]220        free(lanes.tscs);
[dca5802]221}
222
[64a7146]223//-----------------------------------------------------------------------
[431cd4f]224#if defined(USE_RELAXED_FIFO)
225        //-----------------------------------------------------------------------
226        // get index from random number with or without bias towards queues
227        static inline [unsigned, bool] idx_from_r(unsigned r, unsigned preferred) {
228                unsigned i;
229                bool local;
230                unsigned rlow  = r % BIAS;
231                unsigned rhigh = r / BIAS;
232                if((0 != rlow) && preferred >= 0) {
233                        // (BIAS - 1) out of BIAS chances
234                        // Use perferred queues
235                        i = preferred + (rhigh % READYQ_SHARD_FACTOR);
236                        local = true;
237                }
238                else {
239                        // 1 out of BIAS chances
240                        // Use all queues
241                        i = rhigh;
242                        local = false;
243                }
244                return [i, local];
245        }
246
247        __attribute__((hot)) void push(struct cluster * cltr, struct $thread * thrd) with (cltr->ready_queue) {
248                __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr);
[1b143de]249
[431cd4f]250                const bool external = (!kernelTLS().this_processor) || (cltr != kernelTLS().this_processor->cltr);
251                /* paranoid */ verify(external || kernelTLS().this_processor->rdq.id < lanes.count );
[fd1f65e]252
[431cd4f]253                // write timestamp
254                thrd->link.ts = rdtscl();
[b798713]255
[431cd4f]256                bool local;
257                int preferred = external ? -1 : kernelTLS().this_processor->rdq.id;
[52769ba]258
[431cd4f]259                // Try to pick a lane and lock it
260                unsigned i;
261                do {
262                        // Pick the index of a lane
263                        unsigned r = __tls_rand_fwd();
264                        [i, local] = idx_from_r(r, preferred);
[772411a]265
[431cd4f]266                        i %= __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
267
268                        #if !defined(__CFA_NO_STATISTICS__)
269                                if(external) {
270                                        if(local) __atomic_fetch_add(&cltr->stats->ready.pick.ext.local, 1, __ATOMIC_RELAXED);
271                                        __atomic_fetch_add(&cltr->stats->ready.pick.ext.attempt, 1, __ATOMIC_RELAXED);
272                                }
273                                else {
274                                        if(local) __tls_stats()->ready.pick.push.local++;
275                                        __tls_stats()->ready.pick.push.attempt++;
276                                }
277                        #endif
[b798713]278
[431cd4f]279                #if defined(USE_MPSC)
280                        // mpsc always succeeds
281                } while( false );
282                #else
283                        // If we can't lock it retry
284                } while( !__atomic_try_acquire( &lanes.data[i].lock ) );
285                #endif
286
287                // Actually push it
288                push(lanes.data[i], thrd);
289
290                #if !defined(USE_MPSC)
291                        // Unlock and return
292                        __atomic_unlock( &lanes.data[i].lock );
293                #endif
294
295                // Mark the current index in the tls rng instance as having an item
296                __tls_rand_advance_bck();
297
298                __cfadbg_print_safe(ready_queue, "Kernel : Pushed %p on cluster %p (idx: %u, mask %llu, first %d)\n", thrd, cltr, i, used.mask[0], lane_first);
299
300                // Update statistics
[b798713]301                #if !defined(__CFA_NO_STATISTICS__)
[fd1f65e]302                        if(external) {
[431cd4f]303                                if(local) __atomic_fetch_add(&cltr->stats->ready.pick.ext.lsuccess, 1, __ATOMIC_RELAXED);
304                                __atomic_fetch_add(&cltr->stats->ready.pick.ext.success, 1, __ATOMIC_RELAXED);
[fd1f65e]305                        }
306                        else {
[431cd4f]307                                if(local) __tls_stats()->ready.pick.push.lsuccess++;
308                                __tls_stats()->ready.pick.push.success++;
[fd1f65e]309                        }
[b798713]310                #endif
[431cd4f]311        }
[b798713]312
[431cd4f]313        // Pop from the ready queue from a given cluster
314        __attribute__((hot)) $thread * pop_fast(struct cluster * cltr) with (cltr->ready_queue) {
315                /* paranoid */ verify( lanes.count > 0 );
316                /* paranoid */ verify( kernelTLS().this_processor );
317                /* paranoid */ verify( kernelTLS().this_processor->rdq.id < lanes.count );
[b798713]318
[431cd4f]319                unsigned count = __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
320                int preferred = kernelTLS().this_processor->rdq.id;
[dca5802]321
322
[431cd4f]323                // As long as the list is not empty, try finding a lane that isn't empty and pop from it
324                for(25) {
325                        // Pick two lists at random
326                        unsigned ri = __tls_rand_bck();
327                        unsigned rj = __tls_rand_bck();
[c426b03]328
[431cd4f]329                        unsigned i, j;
330                        __attribute__((unused)) bool locali, localj;
331                        [i, locali] = idx_from_r(ri, preferred);
332                        [j, localj] = idx_from_r(rj, preferred);
[1b143de]333
[431cd4f]334                        #if !defined(__CFA_NO_STATISTICS__)
335                                if(locali && localj) {
336                                        __tls_stats()->ready.pick.pop.local++;
337                                }
338                        #endif
[b798713]339
[431cd4f]340                        i %= count;
341                        j %= count;
[9cc3a18]342
[431cd4f]343                        // try popping from the 2 picked lists
344                        struct $thread * thrd = try_pop(cltr, i, j);
345                        if(thrd) {
346                                #if !defined(__CFA_NO_STATISTICS__)
347                                        if( locali || localj ) __tls_stats()->ready.pick.pop.lsuccess++;
348                                #endif
349                                return thrd;
350                        }
351                }
[13c5e19]352
[431cd4f]353                // All lanes where empty return 0p
354                return 0p;
355        }
[772411a]356
[431cd4f]357        __attribute__((hot)) struct $thread * pop_slow(struct cluster * cltr) {
358                return search(cltr);
359        }
360#endif
361#if defined(USE_WORK_STEALING)
362        __attribute__((hot)) void push(struct cluster * cltr, struct $thread * thrd) with (cltr->ready_queue) {
363                __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr);
[772411a]364
[431cd4f]365                const bool external = (!kernelTLS().this_processor) || (cltr != kernelTLS().this_processor->cltr);
366                /* paranoid */ verify(external || kernelTLS().this_processor->rdq.id < lanes.count );
[772411a]367
[431cd4f]368                // write timestamp
369                thrd->link.ts = rdtscl();
370
371                // Try to pick a lane and lock it
372                unsigned i;
373                do {
374                        if(unlikely(external)) {
375                                i = __tls_rand() % lanes.count;
376                        }
377                        else {
378                                processor * proc = kernelTLS().this_processor;
379                                unsigned r = proc->rdq.its++;
380                                i =  proc->rdq.id + (r % READYQ_SHARD_FACTOR);
[13c5e19]381                        }
[431cd4f]382
383
384                #if defined(USE_MPSC)
385                        // mpsc always succeeds
386                } while( false );
387                #else
388                        // If we can't lock it retry
389                } while( !__atomic_try_acquire( &lanes.data[i].lock ) );
[13c5e19]390                #endif
391
[431cd4f]392                // Actually push it
393                push(lanes.data[i], thrd);
[13c5e19]394
[431cd4f]395                #if !defined(USE_MPSC)
396                        // Unlock and return
397                        __atomic_unlock( &lanes.data[i].lock );
398                #endif
399
400                __cfadbg_print_safe(ready_queue, "Kernel : Pushed %p on cluster %p (idx: %u, mask %llu, first %d)\n", thrd, cltr, i, used.mask[0], lane_first);
[13c5e19]401        }
402
[431cd4f]403        // Pop from the ready queue from a given cluster
404        __attribute__((hot)) $thread * pop_fast(struct cluster * cltr) with (cltr->ready_queue) {
405                /* paranoid */ verify( lanes.count > 0 );
406                /* paranoid */ verify( kernelTLS().this_processor );
407                /* paranoid */ verify( kernelTLS().this_processor->rdq.id < lanes.count );
408
409                processor * proc = kernelTLS().this_processor;
410
411                if(proc->rdq.target == -1u) {
412                        proc->rdq.target = __tls_rand() % lanes.count;
413                        unsigned it1  = proc->rdq.itr;
414                        unsigned it2  = proc->rdq.itr + 1;
415                        unsigned idx1 = proc->rdq.id + (it1 % READYQ_SHARD_FACTOR);
416                        unsigned idx2 = proc->rdq.id + (it1 % READYQ_SHARD_FACTOR);
417                        unsigned long long tsc1 = ts(lanes.data[idx1]);
418                        unsigned long long tsc2 = ts(lanes.data[idx2]);
419                        proc->rdq.cutoff = min(tsc1, tsc2);
420                }
421                else if(lanes.tscs[proc->rdq.target].tv < proc->rdq.cutoff) {
422                        $thread * t = try_pop(cltr, proc->rdq.target);
423                        proc->rdq.target = -1u;
424                        if(t) return t;
425                }
[13c5e19]426
[431cd4f]427                for(READYQ_SHARD_FACTOR) {
428                        unsigned i = proc->rdq.id + (--proc->rdq.itr % READYQ_SHARD_FACTOR);
429                        if($thread * t = try_pop(cltr, i)) return t;
430                }
431                return 0p;
[1eb239e4]432        }
433
[431cd4f]434        __attribute__((hot)) struct $thread * pop_slow(struct cluster * cltr) with (cltr->ready_queue) {
435                for(25) {
436                        unsigned i = __tls_rand() % lanes.count;
437                        $thread * t = try_pop(cltr, i);
438                        if(t) return t;
439                }
440
441                return search(cltr);
442        }
443#endif
[1eb239e4]444
[9cc3a18]445//=======================================================================
446// Various Ready Queue utilities
447//=======================================================================
448// these function work the same or almost the same
449// whether they are using work-stealing or relaxed fifo scheduling
[1eb239e4]450
[9cc3a18]451//-----------------------------------------------------------------------
452// try to pop from a lane given by index w
[13c5e19]453static inline struct $thread * try_pop(struct cluster * cltr, unsigned w) with (cltr->ready_queue) {
[dca5802]454        // Get relevant elements locally
455        __intrusive_lane_t & lane = lanes.data[w];
456
[b798713]457        // If list looks empty retry
[dca5802]458        if( is_empty(lane) ) return 0p;
[b798713]459
460        // If we can't get the lock retry
[dca5802]461        if( !__atomic_try_acquire(&lane.lock) ) return 0p;
[b798713]462
463        // If list is empty, unlock and retry
[dca5802]464        if( is_empty(lane) ) {
465                __atomic_unlock(&lane.lock);
[b798713]466                return 0p;
467        }
468
469        // Actually pop the list
[504a7dc]470        struct $thread * thrd;
[343d10e]471        thrd = pop(lane);
[b798713]472
[dca5802]473        /* paranoid */ verify(thrd);
474        /* paranoid */ verify(lane.lock);
[b798713]475
476        // Unlock and return
[dca5802]477        __atomic_unlock(&lane.lock);
[b798713]478
[dca5802]479        // Update statistics
[b798713]480        #if !defined(__CFA_NO_STATISTICS__)
[8834751]481                __tls_stats()->ready.pick.pop.success++;
[b798713]482        #endif
483
[431cd4f]484        #if defined(USE_WORK_STEALING)
485                lanes.tscs[w].tv = thrd->link.ts;
[9cc3a18]486        #endif
[d72c074]487
[dca5802]488        // return the popped thread
[b798713]489        return thrd;
490}
[04b5cef]491
[9cc3a18]492//-----------------------------------------------------------------------
493// try to pop from any lanes making sure you don't miss any threads push
494// before the start of the function
[431cd4f]495static inline struct $thread * search(struct cluster * cltr) with (cltr->ready_queue) {
[9cc3a18]496        /* paranoid */ verify( lanes.count > 0 );
497        unsigned count = __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
498        unsigned offset = __tls_rand();
499        for(i; count) {
500                unsigned idx = (offset + i) % count;
501                struct $thread * thrd = try_pop(cltr, idx);
502                if(thrd) {
503                        return thrd;
504                }
[13c5e19]505        }
[9cc3a18]506
507        // All lanes where empty return 0p
508        return 0p;
[b798713]509}
510
511//-----------------------------------------------------------------------
[9cc3a18]512// Check that all the intrusive queues in the data structure are still consistent
[b798713]513static void check( __ready_queue_t & q ) with (q) {
[7a2972b9]514        #if defined(__CFA_WITH_VERIFY__) && !defined(USE_MPSC)
[b798713]515                {
[dca5802]516                        for( idx ; lanes.count ) {
517                                __intrusive_lane_t & sl = lanes.data[idx];
518                                assert(!lanes.data[idx].lock);
[b798713]519
520                                assert(head(sl)->link.prev == 0p );
521                                assert(head(sl)->link.next->link.prev == head(sl) );
522                                assert(tail(sl)->link.next == 0p );
523                                assert(tail(sl)->link.prev->link.next == tail(sl) );
524
[7a2972b9]525                                if(is_empty(sl)) {
[b798713]526                                        assert(tail(sl)->link.prev == head(sl));
527                                        assert(head(sl)->link.next == tail(sl));
[1b143de]528                                } else {
529                                        assert(tail(sl)->link.prev != head(sl));
530                                        assert(head(sl)->link.next != tail(sl));
[b798713]531                                }
532                        }
533                }
534        #endif
535}
536
[9cc3a18]537//-----------------------------------------------------------------------
538// Given 2 indexes, pick the list with the oldest push an try to pop from it
539static inline struct $thread * try_pop(struct cluster * cltr, unsigned i, unsigned j) with (cltr->ready_queue) {
540        #if !defined(__CFA_NO_STATISTICS__)
541                __tls_stats()->ready.pick.pop.attempt++;
542        #endif
543
544        // Pick the bet list
545        int w = i;
546        if( __builtin_expect(!is_empty(lanes.data[j]), true) ) {
547                w = (ts(lanes.data[i]) < ts(lanes.data[j])) ? i : j;
548        }
549
550        return try_pop(cltr, w);
551}
552
[b798713]553// Call this function of the intrusive list was moved using memcpy
[dca5802]554// fixes the list so that the pointers back to anchors aren't left dangling
555static inline void fix(__intrusive_lane_t & ll) {
[7a2972b9]556        #if !defined(USE_MPSC)
557                // if the list is not empty then follow he pointer and fix its reverse
558                if(!is_empty(ll)) {
559                        head(ll)->link.next->link.prev = head(ll);
560                        tail(ll)->link.prev->link.next = tail(ll);
561                }
562                // Otherwise just reset the list
563                else {
564                        verify(tail(ll)->link.next == 0p);
565                        tail(ll)->link.prev = head(ll);
566                        head(ll)->link.next = tail(ll);
567                        verify(head(ll)->link.prev == 0p);
568                }
569        #endif
[b798713]570}
571
[9cc3a18]572static void assign_list(unsigned & value, dlist(processor, processor) & list, unsigned count) {
[a017ee7]573        processor * it = &list`first;
574        for(unsigned i = 0; i < count; i++) {
575                /* paranoid */ verifyf( it, "Unexpected null iterator, at index %u of %u\n", i, count);
[431cd4f]576                it->rdq.id = value;
577                it->rdq.target = -1u;
[9cc3a18]578                value += READYQ_SHARD_FACTOR;
[a017ee7]579                it = &(*it)`next;
580        }
581}
582
[9cc3a18]583static void reassign_cltr_id(struct cluster * cltr) {
[a017ee7]584        unsigned preferred = 0;
[9cc3a18]585        assign_list(preferred, cltr->procs.actives, cltr->procs.total - cltr->procs.idle);
586        assign_list(preferred, cltr->procs.idles  , cltr->procs.idle );
[a017ee7]587}
588
[431cd4f]589static void fix_times( struct cluster * cltr ) with( cltr->ready_queue ) {
590        #if defined(USE_WORK_STEALING)
591                lanes.tscs = alloc(lanes.count, lanes.tscs`realloc);
592                for(i; lanes.count) {
593                        lanes.tscs[i].tv = ts(lanes.data[i]);
594                }
595        #endif
596}
597
[dca5802]598// Grow the ready queue
[a017ee7]599void ready_queue_grow(struct cluster * cltr) {
[bd0bdd37]600        size_t ncount;
[a017ee7]601        int target = cltr->procs.total;
[bd0bdd37]602
[64a7146]603        /* paranoid */ verify( ready_mutate_islocked() );
[504a7dc]604        __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue\n");
[b798713]605
[dca5802]606        // Make sure that everything is consistent
607        /* paranoid */ check( cltr->ready_queue );
608
609        // grow the ready queue
[b798713]610        with( cltr->ready_queue ) {
[39fc03e]611                // Find new count
612                // Make sure we always have atleast 1 list
[bd0bdd37]613                if(target >= 2) {
[9cc3a18]614                        ncount = target * READYQ_SHARD_FACTOR;
[bd0bdd37]615                } else {
616                        ncount = 1;
617                }
[b798713]618
[dca5802]619                // Allocate new array (uses realloc and memcpies the data)
[ceb7db8]620                lanes.data = alloc( ncount, lanes.data`realloc );
[b798713]621
622                // Fix the moved data
[dca5802]623                for( idx; (size_t)lanes.count ) {
624                        fix(lanes.data[idx]);
[b798713]625                }
626
627                // Construct new data
[dca5802]628                for( idx; (size_t)lanes.count ~ ncount) {
629                        (lanes.data[idx]){};
[b798713]630                }
631
632                // Update original
[dca5802]633                lanes.count = ncount;
[b798713]634        }
635
[9cc3a18]636        fix_times(cltr);
637
638        reassign_cltr_id(cltr);
[a017ee7]639
[b798713]640        // Make sure that everything is consistent
[dca5802]641        /* paranoid */ check( cltr->ready_queue );
642
[504a7dc]643        __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue done\n");
[dca5802]644
[64a7146]645        /* paranoid */ verify( ready_mutate_islocked() );
[b798713]646}
647
[dca5802]648// Shrink the ready queue
[a017ee7]649void ready_queue_shrink(struct cluster * cltr) {
[64a7146]650        /* paranoid */ verify( ready_mutate_islocked() );
[504a7dc]651        __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue\n");
[dca5802]652
653        // Make sure that everything is consistent
654        /* paranoid */ check( cltr->ready_queue );
655
[a017ee7]656        int target = cltr->procs.total;
657
[b798713]658        with( cltr->ready_queue ) {
[39fc03e]659                // Remember old count
[dca5802]660                size_t ocount = lanes.count;
[b798713]661
[39fc03e]662                // Find new count
663                // Make sure we always have atleast 1 list
[9cc3a18]664                lanes.count = target >= 2 ? target * READYQ_SHARD_FACTOR: 1;
[39fc03e]665                /* paranoid */ verify( ocount >= lanes.count );
[9cc3a18]666                /* paranoid */ verify( lanes.count == target * READYQ_SHARD_FACTOR || target < 2 );
[dca5802]667
668                // for printing count the number of displaced threads
[504a7dc]669                #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__)
[dca5802]670                        __attribute__((unused)) size_t displaced = 0;
671                #endif
[b798713]672
673                // redistribute old data
[dca5802]674                for( idx; (size_t)lanes.count ~ ocount) {
675                        // Lock is not strictly needed but makes checking invariants much easier
[1b143de]676                        __attribute__((unused)) bool locked = __atomic_try_acquire(&lanes.data[idx].lock);
[b798713]677                        verify(locked);
[dca5802]678
679                        // As long as we can pop from this lane to push the threads somewhere else in the queue
680                        while(!is_empty(lanes.data[idx])) {
[504a7dc]681                                struct $thread * thrd;
[343d10e]682                                thrd = pop(lanes.data[idx]);
[dca5802]683
[b798713]684                                push(cltr, thrd);
[dca5802]685
686                                // for printing count the number of displaced threads
[504a7dc]687                                #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__)
[dca5802]688                                        displaced++;
689                                #endif
[b798713]690                        }
691
[dca5802]692                        // Unlock the lane
693                        __atomic_unlock(&lanes.data[idx].lock);
[b798713]694
695                        // TODO print the queue statistics here
696
[dca5802]697                        ^(lanes.data[idx]){};
[b798713]698                }
699
[504a7dc]700                __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue displaced %zu threads\n", displaced);
[c84b4be]701
[dca5802]702                // Allocate new array (uses realloc and memcpies the data)
[ceb7db8]703                lanes.data = alloc( lanes.count, lanes.data`realloc );
[b798713]704
705                // Fix the moved data
[dca5802]706                for( idx; (size_t)lanes.count ) {
707                        fix(lanes.data[idx]);
[b798713]708                }
709        }
710
[9cc3a18]711        fix_times(cltr);
712
713        reassign_cltr_id(cltr);
[a017ee7]714
[b798713]715        // Make sure that everything is consistent
[dca5802]716        /* paranoid */ check( cltr->ready_queue );
717
[504a7dc]718        __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue done\n");
[64a7146]719        /* paranoid */ verify( ready_mutate_islocked() );
[fd9b524]720}
Note: See TracBrowser for help on using the repository browser.