Ignore:
Timestamp:
Apr 29, 2021, 4:26:25 PM (3 years ago)
Author:
Thierry Delisle <tdelisle@…>
Branches:
ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast-unique-expr, pthread-emulation, qualifiedEnum
Children:
3eb55f98
Parents:
b2fc7ad9
Message:

Changed RW lock to avoid hitting the global array on schedule.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/concurrency/kernel_private.hfa

    rb2fc7ad9 rc993b15  
    2525// Scheduler
    2626
    27 struct __attribute__((aligned(128))) __scheduler_lock_id_t;
    2827
    2928extern "C" {
     
    8079// Lock-Free registering/unregistering of threads
    8180// Register a processor to a given cluster and get its unique id in return
    82 void register_proc_id( struct __processor_id_t * );
     81unsigned register_proc_id( void );
    8382
    8483// Unregister a processor from a given cluster using its id, getting back the original pointer
    85 void unregister_proc_id( struct __processor_id_t * proc );
     84void unregister_proc_id( unsigned );
    8685
    8786//=======================================================================
     
    112111}
    113112
    114 // Cells use by the reader writer lock
    115 // while not generic it only relies on a opaque pointer
    116 struct __attribute__((aligned(128))) __scheduler_lock_id_t {
    117         // Spin lock used as the underlying lock
    118         volatile bool lock;
    119 
    120         // Handle pointing to the proc owning this cell
    121         // Used for allocating cells and debugging
    122         __processor_id_t * volatile handle;
    123 
    124         #ifdef __CFA_WITH_VERIFY__
    125                 // Debug, check if this is owned for reading
    126                 bool owned;
    127         #endif
    128 };
    129 
    130 static_assert( sizeof(struct __scheduler_lock_id_t) <= __alignof(struct __scheduler_lock_id_t));
     113
     114
     115
    131116
    132117//-----------------------------------------------------------------------
     
    147132
    148133        // writer lock
    149         volatile bool lock;
     134        volatile bool write_lock;
    150135
    151136        // data pointer
    152         __scheduler_lock_id_t * data;
     137        volatile bool * volatile * data;
    153138};
    154139
     
    163148static inline void ready_schedule_lock(void) with(*__scheduler_lock) {
    164149        /* paranoid */ verify( ! __preemption_enabled() );
    165         /* paranoid */ verify( kernelTLS().this_proc_id );
    166 
    167         unsigned iproc = kernelTLS().this_proc_id->id;
    168         /*paranoid*/ verify(data[iproc].handle == kernelTLS().this_proc_id);
    169         /*paranoid*/ verify(iproc < ready);
     150        /* paranoid */ verify( ! kernelTLS().in_sched_lock );
     151        /* paranoid */ verify( data[kernelTLS().sched_id] == &kernelTLS().sched_lock );
     152        /* paranoid */ verify( !kernelTLS().this_processor || kernelTLS().this_processor->unique_id == kernelTLS().sched_id );
    170153
    171154        // Step 1 : make sure no writer are in the middle of the critical section
    172         while(__atomic_load_n(&lock, (int)__ATOMIC_RELAXED))
     155        while(__atomic_load_n(&write_lock, (int)__ATOMIC_RELAXED))
    173156                Pause();
    174157
     
    179162
    180163        // Step 2 : acquire our local lock
    181         __atomic_acquire( &data[iproc].lock );
    182         /*paranoid*/ verify(data[iproc].lock);
     164        __atomic_acquire( &kernelTLS().sched_lock );
     165        /*paranoid*/ verify(kernelTLS().sched_lock);
    183166
    184167        #ifdef __CFA_WITH_VERIFY__
    185168                // Debug, check if this is owned for reading
    186                 data[iproc].owned = true;
     169                kernelTLS().in_sched_lock = true;
    187170        #endif
    188171}
     
    190173static inline void ready_schedule_unlock(void) with(*__scheduler_lock) {
    191174        /* paranoid */ verify( ! __preemption_enabled() );
    192         /* paranoid */ verify( kernelTLS().this_proc_id );
    193 
    194         unsigned iproc = kernelTLS().this_proc_id->id;
    195         /*paranoid*/ verify(data[iproc].handle == kernelTLS().this_proc_id);
    196         /*paranoid*/ verify(iproc < ready);
    197         /*paranoid*/ verify(data[iproc].lock);
    198         /*paranoid*/ verify(data[iproc].owned);
     175        /* paranoid */ verify( data[kernelTLS().sched_id] == &kernelTLS().sched_lock );
     176        /* paranoid */ verify( !kernelTLS().this_processor || kernelTLS().this_processor->unique_id == kernelTLS().sched_id );
     177        /* paranoid */ verify( kernelTLS().sched_lock );
     178        /* paranoid */ verify( kernelTLS().in_sched_lock );
    199179        #ifdef __CFA_WITH_VERIFY__
    200180                // Debug, check if this is owned for reading
    201                 data[iproc].owned = false;
     181                kernelTLS().in_sched_lock = false;
    202182        #endif
    203         __atomic_unlock(&data[iproc].lock);
     183        __atomic_unlock(&kernelTLS().sched_lock);
    204184}
    205185
     
    207187        static inline bool ready_schedule_islocked(void) {
    208188                /* paranoid */ verify( ! __preemption_enabled() );
    209                 /*paranoid*/ verify( kernelTLS().this_proc_id );
    210                 __processor_id_t * proc = kernelTLS().this_proc_id;
    211                 return __scheduler_lock->data[proc->id].owned;
     189                /* paranoid */ verify( (!kernelTLS().in_sched_lock) || kernelTLS().sched_lock );
     190                return kernelTLS().sched_lock;
    212191        }
    213192
    214193        static inline bool ready_mutate_islocked() {
    215                 return __scheduler_lock->lock;
     194                return __scheduler_lock->write_lock;
    216195        }
    217196#endif
     
    228207// Register a processor to a given cluster and get its unique id in return
    229208// For convenience, also acquires the lock
    230 static inline uint_fast32_t ready_mutate_register( struct __processor_id_t * proc ) {
    231         register_proc_id( proc );
    232         return ready_mutate_lock();
     209static inline [unsigned, uint_fast32_t] ready_mutate_register() {
     210        unsigned id = register_proc_id();
     211        uint_fast32_t last = ready_mutate_lock();
     212        return [id, last];
    233213}
    234214
    235215// Unregister a processor from a given cluster using its id, getting back the original pointer
    236216// assumes the lock is acquired
    237 static inline void ready_mutate_unregister( struct __processor_id_t * proc, uint_fast32_t last_s ) {
     217static inline void ready_mutate_unregister( unsigned id, uint_fast32_t last_s ) {
    238218        ready_mutate_unlock( last_s );
    239         unregister_proc_id( proc );
     219        unregister_proc_id( id );
    240220}
    241221
Note: See TracChangeset for help on using the changeset viewer.