Changeset 504a7dc


Ignore:
Timestamp:
May 12, 2020, 1:32:45 PM (5 years ago)
Author:
Thierry Delisle <tdelisle@…>
Branches:
ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
Children:
4fa44e7
Parents:
6a490b2
Message:

Some fixes after the merge, compiles but still has livelocks

Location:
libcfa/src
Files:
6 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/bits/debug.hfa

    r6a490b2 r504a7dc  
    5252                || defined(__CFA_DEBUG_PRINT_IO__) || defined(__CFA_DEBUG_PRINT_IO_CORE__) \
    5353                || defined(__CFA_DEBUG_PRINT_MONITOR__) || defined(__CFA_DEBUG_PRINT_PREEMPTION__) \
    54                 || defined(__CFA_DEBUG_PRINT_RUNTIME_CORE__) || defined(__CFA_DEBUG_PRINT_EXCEPTION__)
     54                || defined(__CFA_DEBUG_PRINT_RUNTIME_CORE__) || defined(__CFA_DEBUG_PRINT_EXCEPTION__) \
     55                || defined(__CFA_DEBUG_PRINT_READY_QUEUE__)
    5556        #include <stdio.h>
    5657        #include <unistd.h>
  • libcfa/src/concurrency/kernel.cfa

    r6a490b2 r504a7dc  
    120120static void __run_thread(processor * this, $thread * dst);
    121121static $thread * __halt(processor * this);
    122 static bool __wake_one(cluster * cltr, bool was_empty);
     122static bool __wake_one(cluster * cltr);
    123123static bool __wake_proc(processor *);
    124124
     
    299299        // register the processor unless it's the main thread which is handled in the boot sequence
    300300        if(this != mainProcessor) {
    301                 this->id = doregister(this->cltr, this);
     301                this->id = doregister2(this->cltr, this);
    302302                ready_queue_grow( this->cltr );
    303303        }
    304304
     305        doregister(this->cltr, this);
    305306
    306307        {
     
    325326                                /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    326327                                /* paranoid */ verifyf( readyThread->state == Ready || readyThread->preempted != __NO_PREEMPTION, "state : %d, preempted %d\n", readyThread->state, readyThread->preempted);
    327                                 /* paranoid */ verifyf( readyThread->next == 0p, "Expected null got %p", readyThread->next );
     328                                /* paranoid */ verifyf( readyThread->link.next == 0p, "Expected null got %p", readyThread->link.next );
    328329
    329330                                // We found a thread run it
     
    337338        }
    338339
     340        unregister(this->cltr, this);
     341
    339342        V( this->terminated );
    340343
     
    342345        if(this != mainProcessor) {
    343346                ready_queue_shrink( this->cltr );
    344                 unregister(this->cltr, this);
     347                unregister2(this->cltr, this);
    345348        }
    346349        else {
     
    610613        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    611614        /* paranoid */ #if defined( __CFA_WITH_VERIFY__ )
    612         /* paranoid */ if( thrd->state == Blocked || thrd->state == Start ) assertf( thrd->preempted == __NO_PREEMPTION,
    613                           "Error inactive thread marked as preempted, state %d, preemption %d\n", thrd->state, thrd->preempted );
    614         /* paranoid */ if( thrd->preempted != __NO_PREEMPTION ) assertf(thrd->state == Active || thrd->state == Rerun,
    615                           "Error preempted thread marked as not currently running, state %d, preemption %d\n", thrd->state, thrd->preempted );
     615        /* paranoid */  if( thrd->state == Blocked || thrd->state == Start ) assertf( thrd->preempted == __NO_PREEMPTION,
     616                                        "Error inactive thread marked as preempted, state %d, preemption %d\n", thrd->state, thrd->preempted );
     617        /* paranoid */  if( thrd->preempted != __NO_PREEMPTION ) assertf(thrd->state == Active || thrd->state == Rerun,
     618                                        "Error preempted thread marked as not currently running, state %d, preemption %d\n", thrd->state, thrd->preempted );
    616619        /* paranoid */ #endif
    617620        /* paranoid */ verifyf( thrd->link.next == 0p, "Expected null got %p", thrd->link.next );
     
    620623
    621624        ready_schedule_lock(thrd->curr_cluster, kernelTLS.this_processor);
    622                 bool was_empty = push( thrd->curr_cluster, thrd );
     625                push( thrd->curr_cluster, thrd );
     626
     627                __wake_one(thrd->curr_cluster);
    623628        ready_schedule_unlock(thrd->curr_cluster, kernelTLS.this_processor);
    624 
    625         __wake_one(thrd->curr_cluster, was_empty);
    626629
    627630        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     
    718721        // If that is the case, abandon the preemption.
    719722        bool preempted = false;
    720         if(thrd->next == 0p) {
     723        if(thrd->link.next == 0p) {
    721724                preempted = true;
    722725                thrd->preempted = reason;
     
    789792        (*mainProcessor){};
    790793
    791         mainProcessor->id = doregister(mainCluster, mainProcessor);
     794        mainProcessor->id = doregister2(mainCluster, mainProcessor);
    792795
    793796        //initialize the global state variables
     
    844847        kernel_stop_preemption();
    845848
    846         unregister(mainCluster, mainProcessor);
     849        unregister2(mainCluster, mainProcessor);
    847850
    848851        // Destroy the main processor and its context in reverse order of construction
     
    909912
    910913// Wake a thread from the front if there are any
    911 static bool __wake_one(cluster * this, __attribute__((unused)) bool force) {
    912         // if we don't want to force check if we know it's false
    913         // if( !this->idles.head && !force ) return false;
    914 
     914static bool __wake_one(cluster * this) {
    915915        // First, lock the cluster idle
    916916        lock( this->idle_lock __cfaabi_dbg_ctx2 );
     
    10991099        cltr->nthreads -= 1;
    11001100        unlock(cltr->thread_list_lock);
     1101}
     1102
     1103void doregister( cluster * cltr, processor * proc ) {
     1104        lock      (cltr->idle_lock __cfaabi_dbg_ctx2);
     1105        cltr->nprocessors += 1;
     1106        push_front(cltr->procs, *proc);
     1107        unlock    (cltr->idle_lock);
     1108}
     1109
     1110void unregister( cluster * cltr, processor * proc ) {
     1111        lock  (cltr->idle_lock __cfaabi_dbg_ctx2);
     1112        remove(cltr->procs, *proc );
     1113        cltr->nprocessors -= 1;
     1114        unlock(cltr->idle_lock);
    11011115}
    11021116
  • libcfa/src/concurrency/kernel.hfa

    r6a490b2 r504a7dc  
    9393
    9494        // Link lists fields
    95         struct __dbg_node_proc {
    96                 struct processor * next;
    97                 struct processor * prev;
     95        struct __dbg_node_cltr {
     96                processor * next;
     97                processor * prev;
    9898        } node;
    9999
     
    162162                // Link lists fields
    163163                // instrusive link field for threads
    164                 // must be exactly as in thread_desc
     164                // must be exactly as in $thread
    165165                __thread_desc_link link;
    166166        } before, after;
     
    286286
    287287        // List of processors
    288         __spinlock_t proc_list_lock;
     288        __spinlock_t idle_lock;
     289        __dllist_t(struct processor) procs;
    289290        __dllist_t(struct processor) idles;
     291        unsigned int nprocessors;
    290292
    291293        // List of threads
  • libcfa/src/concurrency/kernel_private.hfa

    r6a490b2 r504a7dc  
    100100void unregister( struct cluster * cltr, struct $thread & thrd );
    101101
     102void doregister( struct cluster * cltr, struct processor * proc );
     103void unregister( struct cluster * cltr, struct processor * proc );
     104
    102105//=======================================================================
    103106// Cluster lock API
     
    110113// Lock-Free registering/unregistering of threads
    111114// Register a processor to a given cluster and get its unique id in return
    112 unsigned doregister( struct cluster * cltr, struct processor * proc );
     115unsigned doregister2( struct cluster * cltr, struct processor * proc );
    113116
    114117// Unregister a processor from a given cluster using its id, getting back the original pointer
    115 void     unregister( struct cluster * cltr, struct processor * proc );
     118void     unregister2( struct cluster * cltr, struct processor * proc );
    116119
    117120//=======================================================================
     
    184187// push thread onto a ready queue for a cluster
    185188// returns true if the list was previously empty, false otherwise
    186 __attribute__((hot)) bool push(struct cluster * cltr, struct thread_desc * thrd);
     189__attribute__((hot)) bool push(struct cluster * cltr, struct $thread * thrd);
    187190
    188191//-----------------------------------------------------------------------
    189192// pop thread from the ready queue of a cluster
    190193// returns 0p if empty
    191 __attribute__((hot)) thread_desc * pop(struct cluster * cltr);
     194__attribute__((hot)) struct $thread * pop(struct cluster * cltr);
    192195
    193196//-----------------------------------------------------------------------
  • libcfa/src/concurrency/monitor.cfa

    r6a490b2 r504a7dc  
    114114
    115115                // Some one else has the monitor, wait in line for it
    116                 /* paranoid */ verify( thrd->next == 0p );
     116                /* paranoid */ verify( thrd->link.next == 0p );
    117117                append( this->entry_queue, thrd );
    118                 /* paranoid */ verify( thrd->next == 1p );
     118                /* paranoid */ verify( thrd->link.next == 1p );
    119119
    120120                unlock( this->lock );
     
    199199
    200200                // Some one else has the monitor, wait in line for it
    201                 /* paranoid */ verify( thrd->next == 0p );
     201                /* paranoid */ verify( thrd->link.next == 0p );
    202202                append( this->entry_queue, thrd );
    203                 /* paranoid */ verify( thrd->next == 1p );
     203                /* paranoid */ verify( thrd->link.next == 1p );
    204204                unlock( this->lock );
    205205
     
    761761        $thread * new_owner = pop_head( this->entry_queue );
    762762        /* paranoid */ verifyf( !this->owner || kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
    763         /* paranoid */ verify( !new_owner || new_owner->next == 0p );
     763        /* paranoid */ verify( !new_owner || new_owner->link.next == 0p );
    764764        __set_owner( this, new_owner );
    765765
     
    883883        }
    884884
    885         __cfaabi_dbg_print_safe( "Kernel :  Runing %i (%p)\n", ready2run, ready2run ? (thread_desc*)node->waiting_thread : (thread_desc*)0p );
     885        __cfaabi_dbg_print_safe( "Kernel :  Runing %i (%p)\n", ready2run, ready2run ? (thread*)node->waiting_thread : (thread*)0p );
    886886        return ready2run ? node->waiting_thread : 0p;
    887887}
  • libcfa/src/concurrency/ready_queue.cfa

    r6a490b2 r504a7dc  
    1515
    1616#define __cforall_thread__
     17#define __CFA_DEBUG_PRINT_READY_QUEUE__
    1718
    1819#include "bits/defs.hfa"
     
    3435        const char * max_cores_s = getenv("CFA_MAX_PROCESSORS");
    3536        if(!max_cores_s) {
    36                 __cfaabi_dbg_print_nolock("No CFA_MAX_PROCESSORS in ENV");
     37                __cfadbg_print_nolock(ready_queue, "No CFA_MAX_PROCESSORS in ENV\n");
    3738                return __CFA_MAX_PROCESSORS__;
    3839        }
     
    4142        long int max_cores_l = strtol(max_cores_s, &endptr, 10);
    4243        if(max_cores_l < 1 || max_cores_l > 65535) {
    43                 __cfaabi_dbg_print_nolock("CFA_MAX_PROCESSORS out of range : %ld", max_cores_l);
     44                __cfadbg_print_nolock(ready_queue, "CFA_MAX_PROCESSORS out of range : %ld\n", max_cores_l);
    4445                return __CFA_MAX_PROCESSORS__;
    4546        }
    4647        if('\0' != *endptr) {
    47                 __cfaabi_dbg_print_nolock("CFA_MAX_PROCESSORS not a decimal number : %s", max_cores_s);
     48                __cfadbg_print_nolock(ready_queue, "CFA_MAX_PROCESSORS not a decimal number : %s\n", max_cores_s);
    4849                return __CFA_MAX_PROCESSORS__;
    4950        }
     
    152153//=======================================================================
    153154// Lock-Free registering/unregistering of threads
    154 unsigned doregister( struct cluster * cltr, struct processor * proc ) with(cltr->ready_lock) {
     155unsigned doregister2( struct cluster * cltr, struct processor * proc ) with(cltr->ready_lock) {
     156        __cfadbg_print_safe(ready_queue, "Kernel : Registering proc %p with cluster %p\n", proc, cltr);
     157
    155158        // Step - 1 : check if there is already space in the data
    156159        uint_fast32_t s = ready;
     
    185188        }
    186189
     190        __cfadbg_print_safe(ready_queue, "Kernel : Registering proc %p done, id %u\n", proc, n);
     191
    187192        // Return new spot.
    188193        /*paranoid*/ verify(n < ready);
     
    192197}
    193198
    194 void unregister( struct cluster * cltr, struct processor * proc ) with(cltr->ready_lock) {
     199void unregister2( struct cluster * cltr, struct processor * proc ) with(cltr->ready_lock) {
    195200        unsigned id = proc->id;
    196201        /*paranoid*/ verify(id < ready);
    197202        /*paranoid*/ verify(proc == __atomic_load_n(&data[id].handle, __ATOMIC_RELAXED));
    198203        __atomic_store_n(&data[id].handle, 0p, __ATOMIC_RELEASE);
     204
     205        __cfadbg_print_safe(ready_queue, "Kernel : Unregister proc %p\n", proc);
    199206}
    200207
     
    241248//=======================================================================
    242249// Get the head pointer (one before the first element) from the anchor
    243 static inline thread_desc * head(const __intrusive_lane_t & this) {
    244         thread_desc * rhead = (thread_desc *)(
    245                 (uintptr_t)( &this.before ) - offsetof( thread_desc, link )
     250static inline $thread * head(const __intrusive_lane_t & this) {
     251        $thread * rhead = ($thread *)(
     252                (uintptr_t)( &this.before ) - offsetof( $thread, link )
    246253        );
    247254        /* paranoid */ verify(rhead);
     
    250257
    251258// Get the tail pointer (one after the last element) from the anchor
    252 static inline thread_desc * tail(const __intrusive_lane_t & this) {
    253         thread_desc * rtail = (thread_desc *)(
    254                 (uintptr_t)( &this.after ) - offsetof( thread_desc, link )
     259static inline $thread * tail(const __intrusive_lane_t & this) {
     260        $thread * rtail = ($thread *)(
     261                (uintptr_t)( &this.after ) - offsetof( $thread, link )
    255262        );
    256263        /* paranoid */ verify(rtail);
     
    261268void ?{}( __intrusive_lane_t & this ) {
    262269        this.lock = false;
    263         this.last_id = -1u;
    264         this.count = 0u;
     270        #if defined(__CFA_WITH_VERIFY__)
     271                this.last_id = -1u;
     272                this.count = 0u;
     273        #endif
    265274
    266275        this.before.link.prev = 0p;
     
    279288
    280289        // We add a boat-load of assertions here because the anchor code is very fragile
    281         /* paranoid */ verify(((uintptr_t)( head(this) ) + offsetof( thread_desc, link )) == (uintptr_t)(&this.before));
    282         /* paranoid */ verify(((uintptr_t)( tail(this) ) + offsetof( thread_desc, link )) == (uintptr_t)(&this.after ));
     290        /* paranoid */ verify(((uintptr_t)( head(this) ) + offsetof( $thread, link )) == (uintptr_t)(&this.before));
     291        /* paranoid */ verify(((uintptr_t)( tail(this) ) + offsetof( $thread, link )) == (uintptr_t)(&this.after ));
    283292        /* paranoid */ verify(head(this)->link.prev == 0p );
    284293        /* paranoid */ verify(head(this)->link.next == tail(this) );
     
    311320// Push a thread onto this lane
    312321// returns true of lane was empty before push, false otherwise
    313 bool push(__intrusive_lane_t & this, thread_desc * node) {
     322bool push(__intrusive_lane_t & this, $thread * node) {
    314323        #if defined(__CFA_WITH_VERIFY__)
    315324                /* paranoid */ verify(this.lock);
     
    317326                /* paranoid */ verify(node->link.next == 0p);
    318327                /* paranoid */ verify(node->link.prev == 0p);
     328                /* paranoid */ verify(tail(this)->link.next == 0p);
     329                /* paranoid */ verify(head(this)->link.prev == 0p);
    319330
    320331                this.count++;
    321332
    322333                if(this.before.link.ts == 0l) {
    323                         /* paranoid */ verify(tail(this)->link.next == 0p);
    324334                        /* paranoid */ verify(tail(this)->link.prev == head(this));
    325335                        /* paranoid */ verify(head(this)->link.next == tail(this));
    326                         /* paranoid */ verify(head(this)->link.prev == 0p);
     336                } else {
     337                        /* paranoid */ verify(tail(this)->link.prev != head(this));
     338                        /* paranoid */ verify(head(this)->link.next != tail(this));
    327339                }
    328340        #endif
    329341
    330342        // Get the relevant nodes locally
    331         thread_desc * tail = tail(this);
    332         thread_desc * prev = tail->link.prev;
     343        $thread * tail = tail(this);
     344        $thread * prev = tail->link.prev;
    333345
    334346        // Do the push
     
    358370// returns popped
    359371// returns true of lane was empty before push, false otherwise
    360 [thread_desc *, bool] pop(__intrusive_lane_t & this) {
     372[$thread *, bool] pop(__intrusive_lane_t & this) {
    361373        /* paranoid */ verify(this.lock);
    362374        /* paranoid */ verify(this.before.link.ts != 0ul);
    363375
    364376        // Get anchors locally
    365         thread_desc * head = head(this);
    366         thread_desc * tail = tail(this);
     377        $thread * head = head(this);
     378        $thread * tail = tail(this);
    367379
    368380        // Get the relevant nodes locally
    369         thread_desc * node = head->link.next;
    370         thread_desc * next = node->link.next;
     381        $thread * node = head->link.next;
     382        $thread * next = node->link.next;
    371383
    372384        #if defined(__CFA_WITH_VERIFY__)
     
    391403
    392404        // Check if we emptied list and return accordingly
     405        /* paranoid */ verify(tail(this)->link.next == 0p);
     406        /* paranoid */ verify(head(this)->link.prev == 0p);
    393407        if(next == tail) {
    394408                /* paranoid */ verify(this.before.link.ts == 0);
    395                 /* paranoid */ verify(tail(this)->link.next == 0p);
    396409                /* paranoid */ verify(tail(this)->link.prev == head(this));
    397410                /* paranoid */ verify(head(this)->link.next == tail(this));
    398                 /* paranoid */ verify(head(this)->link.prev == 0p);
    399411                return [node, true];
    400412        }
    401413        else {
    402414                /* paranoid */ verify(next->link.ts != 0);
     415                /* paranoid */ verify(tail(this)->link.prev != head(this));
     416                /* paranoid */ verify(head(this)->link.next != tail(this));
    403417                /* paranoid */ verify(this.before.link.ts != 0);
    404418                return [node, false];
     
    508522        // Conditional check
    509523        verifyf(
    510                 strict == STRICT && // Conditional check if it was expected to be cleared
     524                strict != STRICT || // Conditional check if it was expected to be cleared
    511525                ((mask[word] & (1ull << bit)) == 0),
    512526                "Before set %llu:%llu (%u), %llx & %llx", word, bit, index, mask[word], (1ull << bit)
     
    518532        // Conditional check
    519533        verifyf(
    520                 strict == STRICT && // Conditional check if it was expected to be cleared
     534                strict != STRICT || // Conditional check if it was expected to be cleared
    521535                !ret,
    522536                "Bit was not set but bts returned true"
     
    561575
    562576//-----------------------------------------------------------------------
    563 __attribute__((hot)) bool push(struct cluster * cltr, struct thread_desc * thrd) with (cltr->ready_queue) {
     577__attribute__((hot)) bool push(struct cluster * cltr, struct $thread * thrd) with (cltr->ready_queue) {
    564578        // write timestamp
    565579        thrd->link.ts = rdtscl();
     
    569583        do {
    570584                // Pick the index of a lane
    571                 unsigned i = tls_rand() % lanes.count;
     585                i = __tls_rand() % lanes.count;
    572586
    573587                #if !defined(__CFA_NO_STATISTICS__)
     
    594608                size_t ret = __atomic_fetch_add( &used.count, 1z, __ATOMIC_SEQ_CST);
    595609
    596                 // Check if the entire quue used to be empty
     610                // Check if the entire queue used to be empty
    597611                first = (ret == 0);
    598612
     
    624638//-----------------------------------------------------------------------
    625639// Given 2 indexes, pick the list with the oldest push an try to pop from it
    626 static struct thread_desc * try_pop(struct cluster * cltr, unsigned i, unsigned j) with (cltr->ready_queue) {
     640static struct $thread * try_pop(struct cluster * cltr, unsigned i, unsigned j) with (cltr->ready_queue) {
    627641        #if !defined(__CFA_NO_STATISTICS__)
    628642                tls.pick.pop.attempt++;
     
    662676
    663677        // Actually pop the list
    664         struct thread_desc * thrd;
     678        struct $thread * thrd;
    665679        bool emptied;
    666680        [thrd, emptied] = pop(lane);
     
    704718
    705719// Pop from the ready queue from a given cluster
    706 __attribute__((hot)) thread_desc * pop(struct cluster * cltr) with (cltr->ready_queue) {
     720__attribute__((hot)) $thread * pop(struct cluster * cltr) with (cltr->ready_queue) {
    707721        /* paranoid */ verify( lanes.count > 0 );
    708722
     
    716730
    717731                        // Pick two lists at random
    718                         unsigned ri = tls_rand();
    719                         unsigned rj = tls_rand();
     732                        unsigned ri = __tls_rand();
     733                        unsigned rj = __tls_rand();
    720734
    721735                        // Find which __cfa_readyQ_mask_t the two lists belong
     
    748762
    749763                        // try popping from the 2 picked lists
    750                         struct thread_desc * thrd = try_pop(cltr, i, j);
     764                        struct $thread * thrd = try_pop(cltr, i, j);
    751765                        if(thrd) return thrd;
    752766                #else
    753767                        // Pick two lists at random
    754                         int i = tls_rand() % __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
    755                         int j = tls_rand() % __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
     768                        int i = __tls_rand() % __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
     769                        int j = __tls_rand() % __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
    756770
    757771                        // try popping from the 2 picked lists
    758                         struct thread_desc * thrd = try_pop(cltr, i, j);
     772                        struct $thread * thrd = try_pop(cltr, i, j);
    759773                        if(thrd) return thrd;
    760774                #endif
     
    825839        uint_fast32_t last_size = ready_mutate_lock( *cltr );
    826840
    827         __cfaabi_dbg_print_safe("Kernel : Growing ready queue\n");
     841        __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue\n");
    828842
    829843        // Make sure that everything is consistent
     
    862876        /* paranoid */ check( cltr->ready_queue );
    863877
    864         __cfaabi_dbg_print_safe("Kernel : Growing ready queue done\n");
     878        __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue done\n");
    865879
    866880        // Unlock the RWlock
     
    873887        uint_fast32_t last_size = ready_mutate_lock( *cltr );
    874888
    875         __cfaabi_dbg_print_safe("Kernel : Shrinking ready queue\n");
     889        __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue\n");
    876890
    877891        // Make sure that everything is consistent
     
    896910
    897911                // for printing count the number of displaced threads
    898                 #if defined(__CFA_DEBUG_PRINT__)
     912                #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__)
    899913                        __attribute__((unused)) size_t displaced = 0;
    900914                #endif
     
    908922                        // As long as we can pop from this lane to push the threads somewhere else in the queue
    909923                        while(!is_empty(lanes.data[idx])) {
    910                                 struct thread_desc * thrd;
     924                                struct $thread * thrd;
    911925                                __attribute__((unused)) bool _;
    912926                                [thrd, _] = pop(lanes.data[idx]);
     
    915929
    916930                                // for printing count the number of displaced threads
    917                                 #if defined(__CFA_DEBUG_PRINT__)
     931                                #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__)
    918932                                        displaced++;
    919933                                #endif
     
    930944                }
    931945
    932                 __cfaabi_dbg_print_safe("Kernel : Shrinking ready queue displaced %zu threads\n", displaced);
     946                __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue displaced %zu threads\n", displaced);
    933947
    934948                // recompute the used.count instead of maintaining it
     
    958972        /* paranoid */ check( cltr->ready_queue );
    959973
    960         __cfaabi_dbg_print_safe("Kernel : Shrinking ready queue done\n");
     974        __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue done\n");
    961975
    962976        // Unlock the RWlock
Note: See TracChangeset for help on using the changeset viewer.