Ignore:
Timestamp:
Apr 15, 2021, 12:05:16 PM (3 years ago)
Author:
Peter A. Buhr <pabuhr@…>
Branches:
ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast-unique-expr, pthread-emulation, qualifiedEnum
Children:
8590328
Parents:
2f5ea69 (diff), a4b0aa4 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge branch 'master' of plg.uwaterloo.ca:software/cfa/cfa-cc

File:
1 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/concurrency/kernel_private.hfa

    r2f5ea69 r8cfa4ef  
    8383// Cluster lock API
    8484//=======================================================================
    85 // Cells use by the reader writer lock
    86 // while not generic it only relies on a opaque pointer
    87 struct __attribute__((aligned(128))) __scheduler_lock_id_t {
    88         // Spin lock used as the underlying lock
    89         volatile bool lock;
    90 
    91         // Handle pointing to the proc owning this cell
    92         // Used for allocating cells and debugging
    93         __processor_id_t * volatile handle;
    94 
    95         #ifdef __CFA_WITH_VERIFY__
    96                 // Debug, check if this is owned for reading
    97                 bool owned;
    98         #endif
    99 };
    100 
    101 static_assert( sizeof(struct __scheduler_lock_id_t) <= __alignof(struct __scheduler_lock_id_t));
    102 
    10385// Lock-Free registering/unregistering of threads
    10486// Register a processor to a given cluster and get its unique id in return
    105 unsigned doregister( struct __processor_id_t * proc );
     87void register_proc_id( struct __processor_id_t * );
    10688
    10789// Unregister a processor from a given cluster using its id, getting back the original pointer
    108 void     unregister( struct __processor_id_t * proc );
    109 
    110 //-----------------------------------------------------------------------
    111 // Cluster idle lock/unlock
    112 static inline void lock(__cluster_idles & this) {
    113         for() {
    114                 uint64_t l = this.lock;
    115                 if(
    116                         (0 == (l % 2))
    117                         && __atomic_compare_exchange_n(&this.lock, &l, l + 1, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
    118                 ) return;
    119                 Pause();
    120         }
    121 }
    122 
    123 static inline void unlock(__cluster_idles & this) {
    124         /* paranoid */ verify( 1 == (this.lock % 2) );
    125         __atomic_fetch_add( &this.lock, 1, __ATOMIC_SEQ_CST );
    126 }
     90void unregister_proc_id( struct __processor_id_t * proc );
    12791
    12892//=======================================================================
     
    152116        __atomic_store_n(ll, (bool)false, __ATOMIC_RELEASE);
    153117}
     118
     119// Cells use by the reader writer lock
     120// while not generic it only relies on a opaque pointer
     121struct __attribute__((aligned(128))) __scheduler_lock_id_t {
     122        // Spin lock used as the underlying lock
     123        volatile bool lock;
     124
     125        // Handle pointing to the proc owning this cell
     126        // Used for allocating cells and debugging
     127        __processor_id_t * volatile handle;
     128
     129        #ifdef __CFA_WITH_VERIFY__
     130                // Debug, check if this is owned for reading
     131                bool owned;
     132        #endif
     133};
     134
     135static_assert( sizeof(struct __scheduler_lock_id_t) <= __alignof(struct __scheduler_lock_id_t));
    154136
    155137//-----------------------------------------------------------------------
     
    247229void ready_mutate_unlock( uint_fast32_t /* value returned by lock */ );
    248230
     231//-----------------------------------------------------------------------
     232// Lock-Free registering/unregistering of threads
     233// Register a processor to a given cluster and get its unique id in return
     234// For convenience, also acquires the lock
     235static inline uint_fast32_t ready_mutate_register( struct __processor_id_t * proc ) {
     236        register_proc_id( proc );
     237        return ready_mutate_lock();
     238}
     239
     240// Unregister a processor from a given cluster using its id, getting back the original pointer
     241// assumes the lock is acquired
     242static inline void ready_mutate_unregister( struct __processor_id_t * proc, uint_fast32_t last_s ) {
     243        ready_mutate_unlock( last_s );
     244        unregister_proc_id( proc );
     245}
     246
     247//-----------------------------------------------------------------------
     248// Cluster idle lock/unlock
     249static inline void lock(__cluster_proc_list & this) {
     250        /* paranoid */ verify( ! __preemption_enabled() );
     251
     252        // Start by locking the global RWlock so that we know no-one is
     253        // adding/removing processors while we mess with the idle lock
     254        ready_schedule_lock();
     255
     256        // Simple counting lock, acquired, acquired by incrementing the counter
     257        // to an odd number
     258        for() {
     259                uint64_t l = this.lock;
     260                if(
     261                        (0 == (l % 2))
     262                        && __atomic_compare_exchange_n(&this.lock, &l, l + 1, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
     263                ) return;
     264                Pause();
     265        }
     266
     267        /* paranoid */ verify( ! __preemption_enabled() );
     268}
     269
     270static inline void unlock(__cluster_proc_list & this) {
     271        /* paranoid */ verify( ! __preemption_enabled() );
     272
     273        /* paranoid */ verify( 1 == (this.lock % 2) );
     274        // Simple couting lock, release by incrementing to an even number
     275        __atomic_fetch_add( &this.lock, 1, __ATOMIC_SEQ_CST );
     276
     277        // Release the global lock, which we acquired when locking
     278        ready_schedule_unlock();
     279
     280        /* paranoid */ verify( ! __preemption_enabled() );
     281}
     282
    249283//=======================================================================
    250284// Ready-Queue API
     
    278312//-----------------------------------------------------------------------
    279313// Increase the width of the ready queue (number of lanes) by 4
    280 unsigned ready_queue_grow  (struct cluster * cltr, int target);
     314void ready_queue_grow  (struct cluster * cltr);
    281315
    282316//-----------------------------------------------------------------------
    283317// Decrease the width of the ready queue (number of lanes) by 4
    284 void ready_queue_shrink(struct cluster * cltr, int target);
     318void ready_queue_shrink(struct cluster * cltr);
    285319
    286320
Note: See TracChangeset for help on using the changeset viewer.