Changeset a7504db


Ignore:
Timestamp:
Apr 14, 2021, 4:07:18 PM (3 years ago)
Author:
Thierry Delisle <tdelisle@…>
Branches:
ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast-unique-expr, pthread-emulation, qualifiedEnum
Children:
6a9b12b
Parents:
a33c113
Message:

Changed how the cluster idle lock is implemented to be covered by the global RW-lock

Location:
libcfa/src/concurrency
Files:
2 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/concurrency/kernel/startup.cfa

    ra33c113 ra7504db  
    489489        #endif
    490490
    491         lock( this.cltr->idles );
    492                 int target = this.cltr->idles.total += 1u;
    493         unlock( this.cltr->idles );
    494 
    495491        // Register and Lock the RWlock so no-one pushes/pops while we are changing the queue
    496492        uint_fast32_t last_size = ready_mutate_register((__processor_id_t*)&this);
     493                int target = this.cltr->idles.total += 1u;
    497494
    498495                // Adjust the ready queue size
     
    507504// Not a ctor, it just preps the destruction but should not destroy members
    508505static void deinit(processor & this) {
    509         lock( this.cltr->idles );
    510                 int target = this.cltr->idles.total -= 1u;
    511         unlock( this.cltr->idles );
    512 
    513506        // Lock the RWlock so no-one pushes/pops while we are changing the queue
    514507        uint_fast32_t last_size = ready_mutate_lock();
     508                int target = this.cltr->idles.total -= 1u;
    515509
    516510                // Adjust the ready queue size
  • libcfa/src/concurrency/kernel_private.hfa

    ra33c113 ra7504db  
    8989// Unregister a processor from a given cluster using its id, getting back the original pointer
    9090void unregister_proc_id( struct __processor_id_t * proc );
    91 
    92 //-----------------------------------------------------------------------
    93 // Cluster idle lock/unlock
    94 static inline void lock(__cluster_idles & this) {
    95         for() {
    96                 uint64_t l = this.lock;
    97                 if(
    98                         (0 == (l % 2))
    99                         && __atomic_compare_exchange_n(&this.lock, &l, l + 1, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
    100                 ) return;
    101                 Pause();
    102         }
    103 }
    104 
    105 static inline void unlock(__cluster_idles & this) {
    106         /* paranoid */ verify( 1 == (this.lock % 2) );
    107         __atomic_fetch_add( &this.lock, 1, __ATOMIC_SEQ_CST );
    108 }
    10991
    11092//=======================================================================
     
    263245}
    264246
     247//-----------------------------------------------------------------------
     248// Cluster idle lock/unlock
     249static inline void lock(__cluster_idles & this) {
     250        /* paranoid */ verify( ! __preemption_enabled() );
     251
     252        // Start by locking the global RWlock so that we know no-one is
     253        // adding/removing processors while we mess with the idle lock
     254        ready_schedule_lock();
     255
     256        // Simple counting lock, acquired, acquired by incrementing the counter
     257        // to an odd number
     258        for() {
     259                uint64_t l = this.lock;
     260                if(
     261                        (0 == (l % 2))
     262                        && __atomic_compare_exchange_n(&this.lock, &l, l + 1, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
     263                ) return;
     264                Pause();
     265        }
     266
     267        /* paranoid */ verify( ! __preemption_enabled() );
     268}
     269
     270static inline void unlock(__cluster_idles & this) {
     271        /* paranoid */ verify( ! __preemption_enabled() );
     272
     273        /* paranoid */ verify( 1 == (this.lock % 2) );
     274        // Simple couting lock, release by incrementing to an even number
     275        __atomic_fetch_add( &this.lock, 1, __ATOMIC_SEQ_CST );
     276
     277        // Release the global lock, which we acquired when locking
     278        ready_schedule_unlock();
     279
     280        /* paranoid */ verify( ! __preemption_enabled() );
     281}
     282
    265283//=======================================================================
    266284// Ready-Queue API
Note: See TracChangeset for help on using the changeset viewer.