Changeset b388ee81


Ignore:
Timestamp:
Jun 11, 2020, 6:47:27 PM (4 years ago)
Author:
Thierry Delisle <tdelisle@…>
Branches:
ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
Children:
9b1dcc2
Parents:
61d7bec
Message:

Changed ready RW-Lock to be a single global lock instead of per cluster.
This was needed because otherwise, processors outside the cluster could not schedule threads.

Location:
libcfa/src/concurrency
Files:
4 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/concurrency/kernel.cfa

    r61d7bec rb388ee81  
    125125//-----------------------------------------------------------------------------
    126126// Kernel storage
    127 KERNEL_STORAGE(cluster,         mainCluster);
    128 KERNEL_STORAGE(processor,       mainProcessor);
    129 KERNEL_STORAGE($thread, mainThread);
    130 KERNEL_STORAGE(__stack_t,       mainThreadCtx);
    131 
    132 cluster     * mainCluster;
    133 processor   * mainProcessor;
    134 $thread * mainThread;
     127KERNEL_STORAGE(cluster,              mainCluster);
     128KERNEL_STORAGE(processor,            mainProcessor);
     129KERNEL_STORAGE($thread,              mainThread);
     130KERNEL_STORAGE(__stack_t,            mainThreadCtx);
     131KERNEL_STORAGE(__scheduler_RWLock_t, __scheduler_lock);
     132
     133cluster              * mainCluster;
     134processor            * mainProcessor;
     135$thread              * mainThread;
     136__scheduler_RWLock_t * __scheduler_lock;
    135137
    136138extern "C" {
     
    262264        this.preemption_rate = preemption_rate;
    263265        ready_queue{};
    264         ready_lock{};
    265266
    266267        #if !defined(__CFA_NO_STATISTICS__)
     
    299300        // register the processor unless it's the main thread which is handled in the boot sequence
    300301        if(this != mainProcessor) {
    301                 this->id = doregister2(this->cltr, this);
     302                this->id = doregister(this);
    302303                ready_queue_grow( this->cltr );
    303304        }
     
    345346        if(this != mainProcessor) {
    346347                ready_queue_shrink( this->cltr );
    347                 unregister2(this->cltr, this);
     348                unregister(this);
    348349        }
    349350        else {
     
    622623        if (thrd->preempted == __NO_PREEMPTION) thrd->state = Ready;
    623624
    624         ready_schedule_lock(thrd->curr_cluster, kernelTLS.this_processor);
     625        ready_schedule_lock( kernelTLS.this_processor );
    625626                push( thrd->curr_cluster, thrd );
    626627
    627628                __wake_one(thrd->curr_cluster);
    628         ready_schedule_unlock(thrd->curr_cluster, kernelTLS.this_processor);
     629        ready_schedule_unlock( kernelTLS.this_processor );
    629630
    630631        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     
    635636        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    636637
    637         ready_schedule_lock(this, kernelTLS.this_processor);
     638        ready_schedule_lock( kernelTLS.this_processor );
    638639                $thread * head = pop( this );
    639         ready_schedule_unlock(this, kernelTLS.this_processor);
     640        ready_schedule_unlock( kernelTLS.this_processor );
    640641
    641642        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     
    749750        __cfa_dbg_global_clusters.lock{};
    750751
     752        // Initialize the global scheduler lock
     753        __scheduler_lock = (__scheduler_RWLock_t*)&storage___scheduler_lock;
     754        (*__scheduler_lock){};
     755
    751756        // Initialize the main cluster
    752757        mainCluster = (cluster *)&storage_mainCluster;
     
    793798        (*mainProcessor){};
    794799
    795         mainProcessor->id = doregister2(mainCluster, mainProcessor);
     800        mainProcessor->id = doregister(mainProcessor);
    796801
    797802        //initialize the global state variables
     
    848853        kernel_stop_preemption();
    849854
    850         unregister2(mainCluster, mainProcessor);
     855        unregister(mainProcessor);
    851856
    852857        // Destroy the main processor and its context in reverse order of construction
     
    866871
    867872        ^(*mainCluster){};
     873
     874        ^(*__scheduler_lock){};
    868875
    869876        ^(__cfa_dbg_global_clusters.list){};
  • libcfa/src/concurrency/kernel.hfa

    r61d7bec rb388ee81  
    125125//-----------------------------------------------------------------------------
    126126// Cluster Tools
    127 
    128 // Cells use by the reader writer lock
    129 // while not generic it only relies on a opaque pointer
    130 struct __processor_id;
    131 
    132 // Reader-Writer lock protecting the ready-queue
    133 // while this lock is mostly generic some aspects
    134 // have been hard-coded to for the ready-queue for
    135 // simplicity and performance
    136 struct __clusterRWLock_t {
    137         // total cachelines allocated
    138         unsigned int max;
    139 
    140         // cachelines currently in use
    141         volatile unsigned int alloc;
    142 
    143         // cachelines ready to itereate over
    144         // (!= to alloc when thread is in second half of doregister)
    145         volatile unsigned int ready;
    146 
    147         // writer lock
    148         volatile bool lock;
    149 
    150         // data pointer
    151         __processor_id * data;
    152 };
    153 
    154 void  ?{}(__clusterRWLock_t & this);
    155 void ^?{}(__clusterRWLock_t & this);
    156127
    157128// Intrusives lanes which are used by the relaxed ready queue
     
    236207// Cluster
    237208struct cluster {
    238         // Ready queue locks
    239         __clusterRWLock_t ready_lock;
    240 
    241209        // Ready queue for threads
    242210        __ready_queue_t ready_queue;
  • libcfa/src/concurrency/kernel_private.hfa

    r61d7bec rb388ee81  
    106106// Cluster lock API
    107107//=======================================================================
     108// Cells use by the reader writer lock
     109// while not generic it only relies on a opaque pointer
    108110struct __attribute__((aligned(64))) __processor_id {
    109111        processor * volatile handle;
     
    113115// Lock-Free registering/unregistering of threads
    114116// Register a processor to a given cluster and get its unique id in return
    115 unsigned doregister2( struct cluster * cltr, struct processor * proc );
     117unsigned doregister( struct processor * proc );
    116118
    117119// Unregister a processor from a given cluster using its id, getting back the original pointer
    118 void     unregister2( struct cluster * cltr, struct processor * proc );
     120void     unregister( struct processor * proc );
    119121
    120122//=======================================================================
     
    146148
    147149//-----------------------------------------------------------------------
     150// Reader-Writer lock protecting the ready-queues
     151// while this lock is mostly generic some aspects
     152// have been hard-coded to for the ready-queue for
     153// simplicity and performance
     154struct __scheduler_RWLock_t {
     155        // total cachelines allocated
     156        unsigned int max;
     157
     158        // cachelines currently in use
     159        volatile unsigned int alloc;
     160
     161        // cachelines ready to itereate over
     162        // (!= to alloc when thread is in second half of doregister)
     163        volatile unsigned int ready;
     164
     165        // writer lock
     166        volatile bool lock;
     167
     168        // data pointer
     169        __processor_id * data;
     170};
     171
     172void  ?{}(__scheduler_RWLock_t & this);
     173void ^?{}(__scheduler_RWLock_t & this);
     174
     175extern __scheduler_RWLock_t * __scheduler_lock;
     176
     177//-----------------------------------------------------------------------
    148178// Reader side : acquire when using the ready queue to schedule but not
    149179//  creating/destroying queues
    150 static inline void ready_schedule_lock( struct cluster * cltr, struct processor * proc) with(cltr->ready_lock) {
     180static inline void ready_schedule_lock( struct processor * proc) with(*__scheduler_lock) {
    151181        unsigned iproc = proc->id;
    152182        /*paranoid*/ verify(data[iproc].handle == proc);
     
    167197}
    168198
    169 static inline void ready_schedule_unlock( struct cluster * cltr, struct processor * proc) with(cltr->ready_lock) {
     199static inline void ready_schedule_unlock( struct processor * proc) with(*__scheduler_lock) {
    170200        unsigned iproc = proc->id;
    171201        /*paranoid*/ verify(data[iproc].handle == proc);
     
    178208// Writer side : acquire when changing the ready queue, e.g. adding more
    179209//  queues or removing them.
    180 uint_fast32_t ready_mutate_lock( struct cluster & cltr );
    181 
    182 void ready_mutate_unlock( struct cluster & cltr, uint_fast32_t /* value returned by lock */ );
     210uint_fast32_t ready_mutate_lock( void );
     211
     212void ready_mutate_unlock( uint_fast32_t /* value returned by lock */ );
    183213
    184214//=======================================================================
  • libcfa/src/concurrency/ready_queue.cfa

    r61d7bec rb388ee81  
    2929// fall back to a magic number
    3030#ifndef __CFA_MAX_PROCESSORS__
    31         #define __CFA_MAX_PROCESSORS__ 128
     31        #define __CFA_MAX_PROCESSORS__ 1024
    3232#endif
    3333
     
    5757// Cluster wide reader-writer lock
    5858//=======================================================================
    59 void  ?{}(__clusterRWLock_t & this) {
     59void  ?{}(__scheduler_RWLock_t & this) {
    6060        this.max   = __max_processors();
    6161        this.alloc = 0;
     
    7070
    7171}
    72 void ^?{}(__clusterRWLock_t & this) {
     72void ^?{}(__scheduler_RWLock_t & this) {
    7373        free(this.data);
    7474}
     
    8181//=======================================================================
    8282// Lock-Free registering/unregistering of threads
    83 unsigned doregister2( struct cluster * cltr, struct processor * proc ) with(cltr->ready_lock) {
    84         __cfadbg_print_safe(ready_queue, "Kernel : Registering proc %p with cluster %p\n", proc, cltr);
     83unsigned doregister( struct processor * proc ) with(*__scheduler_lock) {
     84        __cfadbg_print_safe(ready_queue, "Kernel : Registering proc %p for RW-Lock\n", proc);
    8585
    8686        // Step - 1 : check if there is already space in the data
     
    9999        }
    100100
    101         if(max <= alloc) abort("Trying to create more than %ud processors", cltr->ready_lock.max);
     101        if(max <= alloc) abort("Trying to create more than %ud processors", __scheduler_lock->max);
    102102
    103103        // Step - 2 : F&A to get a new spot in the array.
    104104        uint_fast32_t n = __atomic_fetch_add(&alloc, 1, __ATOMIC_SEQ_CST);
    105         if(max <= n) abort("Trying to create more than %ud processors", cltr->ready_lock.max);
     105        if(max <= n) abort("Trying to create more than %ud processors", __scheduler_lock->max);
    106106
    107107        // Step - 3 : Mark space as used and then publish it.
     
    125125}
    126126
    127 void unregister2( struct cluster * cltr, struct processor * proc ) with(cltr->ready_lock) {
     127void unregister( struct processor * proc ) with(*__scheduler_lock) {
    128128        unsigned id = proc->id;
    129129        /*paranoid*/ verify(id < ready);
     
    137137// Writer side : acquire when changing the ready queue, e.g. adding more
    138138//  queues or removing them.
    139 uint_fast32_t ready_mutate_lock( struct cluster & cltr ) with(cltr.ready_lock) {
     139uint_fast32_t ready_mutate_lock( void ) with(*__scheduler_lock) {
    140140        // Step 1 : lock global lock
    141141        // It is needed to avoid processors that register mid Critical-Section
     
    155155}
    156156
    157 void ready_mutate_unlock( struct cluster & cltr, uint_fast32_t last_s ) with(cltr.ready_lock) {
     157void ready_mutate_unlock( uint_fast32_t last_s ) with(*__scheduler_lock) {
    158158        // Step 1 : release local locks
    159159        // This must be done while the global lock is held to avoid
     
    811811void ready_queue_grow  (struct cluster * cltr) {
    812812        // Lock the RWlock so no-one pushes/pops while we are changing the queue
    813         uint_fast32_t last_size = ready_mutate_lock( *cltr );
     813        uint_fast32_t last_size = ready_mutate_lock();
    814814
    815815        __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue\n");
     
    858858
    859859        // Unlock the RWlock
    860         ready_mutate_unlock( *cltr, last_size );
     860        ready_mutate_unlock( last_size );
    861861}
    862862
     
    864864void ready_queue_shrink(struct cluster * cltr) {
    865865        // Lock the RWlock so no-one pushes/pops while we are changing the queue
    866         uint_fast32_t last_size = ready_mutate_lock( *cltr );
     866        uint_fast32_t last_size = ready_mutate_lock();
    867867
    868868        __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue\n");
     
    956956
    957957        // Unlock the RWlock
    958         ready_mutate_unlock( *cltr, last_size );
     958        ready_mutate_unlock( last_size );
    959959}
    960960
Note: See TracChangeset for help on using the changeset viewer.