Ignore:
File:
1 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/concurrency/ready_queue.cfa

    r5cb51502 ra017ee7  
    9494//=======================================================================
    9595// Lock-Free registering/unregistering of threads
    96 unsigned doregister( struct __processor_id_t * proc ) with(*__scheduler_lock) {
     96void register_proc_id( struct __processor_id_t * proc ) with(*__scheduler_lock) {
    9797        __cfadbg_print_safe(ready_queue, "Kernel : Registering proc %p for RW-Lock\n", proc);
    9898
     
    108108                        /*paranoid*/ verify(0 == (__alignof__(data[i]) % cache_line_size));
    109109                        /*paranoid*/ verify((((uintptr_t)&data[i]) % cache_line_size) == 0);
    110                         return i;
     110                        proc->id = i;
    111111                }
    112112        }
     
    135135        /*paranoid*/ verify(__alignof__(data[n]) == (2 * cache_line_size));
    136136        /*paranoid*/ verify((((uintptr_t)&data[n]) % cache_line_size) == 0);
    137         return n;
    138 }
    139 
    140 void unregister( struct __processor_id_t * proc ) with(*__scheduler_lock) {
     137        proc->id = n;
     138}
     139
     140void unregister_proc_id( struct __processor_id_t * proc ) with(*__scheduler_lock) {
    141141        unsigned id = proc->id;
    142142        /*paranoid*/ verify(id < ready);
     
    254254        __attribute__((unused)) int preferred;
    255255        #if defined(BIAS)
     256                /* paranoid */ verify(external || kernelTLS().this_processor->cltr_id < lanes.count );
    256257                preferred =
    257258                        //*
     
    344345        int preferred;
    345346        #if defined(BIAS)
    346                 // Don't bother trying locally too much
     347                /* paranoid */ verify(kernelTLS().this_processor->cltr_id < lanes.count );
    347348                preferred = kernelTLS().this_processor->cltr_id;
    348349        #endif
     
    541542}
    542543
     544static void assign_list(unsigned & value, const int inc, dlist(processor, processor) & list, unsigned count) {
     545        processor * it = &list`first;
     546        for(unsigned i = 0; i < count; i++) {
     547                /* paranoid */ verifyf( it, "Unexpected null iterator, at index %u of %u\n", i, count);
     548                it->cltr_id = value;
     549                value += inc;
     550                it = &(*it)`next;
     551        }
     552}
     553
     554static void reassign_cltr_id(struct cluster * cltr, const int inc) {
     555        unsigned preferred = 0;
     556        assign_list(preferred, inc, cltr->procs.actives, cltr->procs.total - cltr->procs.idle);
     557        assign_list(preferred, inc, cltr->procs.idles  , cltr->procs.idle );
     558}
     559
    543560// Grow the ready queue
    544 unsigned ready_queue_grow(struct cluster * cltr, int target) {
    545         unsigned preferred;
     561void ready_queue_grow(struct cluster * cltr) {
    546562        size_t ncount;
     563        int target = cltr->procs.total;
    547564
    548565        /* paranoid */ verify( ready_mutate_islocked() );
     
    562579                if(target >= 2) {
    563580                        ncount = target * 4;
    564                         preferred = ncount - 4;
    565581                } else {
    566582                        ncount = 1;
    567                         preferred = 0;
    568583                }
    569584
     
    595610        }
    596611
     612        reassign_cltr_id(cltr, 4);
     613
    597614        // Make sure that everything is consistent
    598615        /* paranoid */ check( cltr->ready_queue );
     
    601618
    602619        /* paranoid */ verify( ready_mutate_islocked() );
    603         return preferred;
    604620}
    605621
    606622// Shrink the ready queue
    607 void ready_queue_shrink(struct cluster * cltr, int target) {
     623void ready_queue_shrink(struct cluster * cltr) {
    608624        /* paranoid */ verify( ready_mutate_islocked() );
    609625        __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue\n");
     
    611627        // Make sure that everything is consistent
    612628        /* paranoid */ check( cltr->ready_queue );
     629
     630        int target = cltr->procs.total;
    613631
    614632        with( cltr->ready_queue ) {
     
    679697        }
    680698
     699        reassign_cltr_id(cltr, 4);
     700
    681701        // Make sure that everything is consistent
    682702        /* paranoid */ check( cltr->ready_queue );
Note: See TracChangeset for help on using the changeset viewer.