Changeset cd3fc46


Ignore:
Timestamp:
Aug 17, 2022, 12:59:42 PM (20 months ago)
Author:
Thierry Delisle <tdelisle@…>
Branches:
ADT, ast-experimental, master, pthread-emulation
Children:
8fca132
Parents:
aec2c022
Message:

Changed scheduler lock to remove one level of pointer.

Location:
libcfa/src/concurrency/kernel
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/concurrency/kernel/cluster.cfa

    raec2c022 rcd3fc46  
    110110//=======================================================================
    111111// Lock-Free registering/unregistering of threads
    112 unsigned register_proc_id( void ) with(__scheduler_lock->lock) {
     112unsigned register_proc_id( void ) with(__scheduler_lock.lock) {
    113113        __kernel_rseq_register();
    114114
     
    132132        }
    133133
    134         if(max <= alloc) abort("Trying to create more than %ud processors", __scheduler_lock->lock.max);
     134        if(max <= alloc) abort("Trying to create more than %ud processors", __scheduler_lock.lock.max);
    135135
    136136        // Step - 2 : F&A to get a new spot in the array.
    137137        uint_fast32_t n = __atomic_fetch_add(&alloc, 1, __ATOMIC_SEQ_CST);
    138         if(max <= n) abort("Trying to create more than %ud processors", __scheduler_lock->lock.max);
     138        if(max <= n) abort("Trying to create more than %ud processors", __scheduler_lock.lock.max);
    139139
    140140        // Step - 3 : Mark space as used and then publish it.
     
    154154}
    155155
    156 void unregister_proc_id( unsigned id ) with(__scheduler_lock->lock) {
     156void unregister_proc_id( unsigned id ) with(__scheduler_lock.lock) {
    157157        /* paranoid */ verify(id < ready);
    158158        /* paranoid */ verify(id == kernelTLS().sched_id);
     
    169169// Writer side : acquire when changing the ready queue, e.g. adding more
    170170//  queues or removing them.
    171 uint_fast32_t ready_mutate_lock( void ) with(__scheduler_lock->lock) {
     171uint_fast32_t ready_mutate_lock( void ) with(__scheduler_lock.lock) {
    172172        /* paranoid */ verify( ! __preemption_enabled() );
    173173
     
    196196}
    197197
    198 void ready_mutate_unlock( uint_fast32_t last_s ) with(__scheduler_lock->lock) {
     198void ready_mutate_unlock( uint_fast32_t last_s ) with(__scheduler_lock.lock) {
    199199        /* paranoid */ verify( ! __preemption_enabled() );
    200200
  • libcfa/src/concurrency/kernel/private.hfa

    raec2c022 rcd3fc46  
    186186union __attribute__((aligned(64))) __scheduler_RWLock_t {
    187187        struct {
     188                __attribute__((aligned(64))) char padding;
     189
    188190                // total cachelines allocated
    189                 unsigned int max;
     191                __attribute__((aligned(64))) unsigned int max;
    190192
    191193                // cachelines currently in use
     
    208210void ^?{}(__scheduler_RWLock_t & this);
    209211
    210 extern __scheduler_RWLock_t * __scheduler_lock;
     212extern __scheduler_RWLock_t __scheduler_lock;
    211213
    212214//-----------------------------------------------------------------------
    213215// Reader side : acquire when using the ready queue to schedule but not
    214216//  creating/destroying queues
    215 static inline void ready_schedule_lock(void) with(__scheduler_lock->lock) {
     217static inline void ready_schedule_lock(void) with(__scheduler_lock.lock) {
    216218        /* paranoid */ verify( ! __preemption_enabled() );
    217219        /* paranoid */ verify( ! kernelTLS().in_sched_lock );
     
    238240}
    239241
    240 static inline void ready_schedule_unlock(void) with(__scheduler_lock->lock) {
     242static inline void ready_schedule_unlock(void) with(__scheduler_lock.lock) {
    241243        /* paranoid */ verify( ! __preemption_enabled() );
    242244        /* paranoid */ verify( data[kernelTLS().sched_id] == &kernelTLS().sched_lock );
     
    259261
    260262        static inline bool ready_mutate_islocked() {
    261                 return __scheduler_lock->lock.write_lock;
     263                return __scheduler_lock.lock.write_lock;
    262264        }
    263265#endif
  • libcfa/src/concurrency/kernel/startup.cfa

    raec2c022 rcd3fc46  
    113113KERNEL_STORAGE(thread$,              mainThread);
    114114KERNEL_STORAGE(__stack_t,            mainThreadCtx);
    115 KERNEL_STORAGE(__scheduler_RWLock_t, __scheduler_lock);
     115// KERNEL_STORAGE(__scheduler_RWLock_t, __scheduler_lock);
    116116KERNEL_STORAGE(eventfd_t,            mainIdleEventFd);
    117117KERNEL_STORAGE(io_future_t,          mainIdleFuture);
     
    123123processor            * mainProcessor;
    124124thread$              * mainThread;
    125 __scheduler_RWLock_t * __scheduler_lock;
    126125
    127126extern "C" {
     
    148147};
    149148
     149__scheduler_RWLock_t __scheduler_lock @= { 0 };
     150
    150151#if   defined(CFA_HAVE_LINUX_LIBRSEQ)
    151152        // No data needed
     
    198199
    199200        // Initialize the global scheduler lock
    200         __scheduler_lock = (__scheduler_RWLock_t*)&storage___scheduler_lock;
    201         (*__scheduler_lock){};
     201        // __scheduler_lock = (__scheduler_RWLock_t*)&storage___scheduler_lock;
     202        (__scheduler_lock){};
    202203
    203204        // Initialize the main cluster
     
    336337        ^(*mainCluster){};
    337338
    338         ^(*__scheduler_lock){};
     339        ^(__scheduler_lock){};
    339340
    340341        ^(__cfa_dbg_global_clusters.list){};
Note: See TracChangeset for help on using the changeset viewer.