Ignore:
File:
1 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/concurrency/kernel_private.hfa

    r1eb239e4 rfd9b524  
    1010// Created On       : Mon Feb 13 12:27:26 2017
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Sat Nov 30 19:25:02 2019
    13 // Update Count     : 8
     12// Last Modified On : Wed Aug 12 08:21:33 2020
     13// Update Count     : 9
    1414//
    1515
     
    121121void     unregister( struct __processor_id_t * proc );
    122122
    123 //-----------------------------------------------------------------------
    124 // Cluster idle lock/unlock
    125 static inline void lock(__cluster_idles & this) {
    126         for() {
    127                 uint64_t l = this.lock;
    128                 if(
    129                         (0 == (l % 2))
    130                         && __atomic_compare_exchange_n(&this.lock, &l, l + 1, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
    131                 ) return;
    132                 Pause();
    133         }
    134 }
    135 
    136 static inline void unlock(__cluster_idles & this) {
    137         /* paranoid */ verify( 1 == (this.lock % 2) );
    138         __atomic_fetch_add( &this.lock, 1, __ATOMIC_SEQ_CST );
    139 }
    140 
    141123//=======================================================================
    142124// Reader-writer lock implementation
     
    150132        while( __builtin_expect(__atomic_exchange_n(ll, (bool)true, __ATOMIC_SEQ_CST), false) ) {
    151133                while(__atomic_load_n(ll, (int)__ATOMIC_RELAXED))
    152                         asm volatile("pause");
     134                        Pause();
    153135        }
    154136        /* paranoid */ verify(*ll);
     
    204186        // Step 1 : make sure no writer are in the middle of the critical section
    205187        while(__atomic_load_n(&lock, (int)__ATOMIC_RELAXED))
    206                 asm volatile("pause");
     188                Pause();
    207189
    208190        // Fence needed because we don't want to start trying to acquire the lock
     
    266248// pop thread from the ready queue of a cluster
    267249// returns 0p if empty
    268 // May return 0p spuriously
    269250__attribute__((hot)) struct $thread * pop(struct cluster * cltr);
    270 
    271 //-----------------------------------------------------------------------
    272 // pop thread from the ready queue of a cluster
    273 // returns 0p if empty
    274 // guaranteed to find any threads added before this call
    275 __attribute__((hot)) struct $thread * pop_slow(struct cluster * cltr);
    276251
    277252//-----------------------------------------------------------------------
Note: See TracChangeset for help on using the changeset viewer.