Ignore:
Timestamp:
Nov 9, 2017, 1:58:39 PM (7 years ago)
Author:
Thierry Delisle <tdelisle@…>
Branches:
ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, resolv-new, with_gc
Children:
34c6c767
Parents:
3351cc0
Message:

Moved spinlocks to bits/locks.h

File:
1 edited

Legend:

Unmodified
Added
Removed
  • src/libcfa/concurrency/monitor.c

    r3351cc0 rea7d2b0  
    3434static inline bool is_accepted( monitor_desc * this, const __monitor_group_t & monitors );
    3535
    36 static inline void lock_all  ( spinlock * locks [], __lock_size_t count );
    37 static inline void lock_all  ( monitor_desc * source [], spinlock * /*out*/ locks [], __lock_size_t count );
    38 static inline void unlock_all( spinlock * locks [], __lock_size_t count );
     36static inline void lock_all  ( __spinlock_t * locks [], __lock_size_t count );
     37static inline void lock_all  ( monitor_desc * source [], __spinlock_t * /*out*/ locks [], __lock_size_t count );
     38static inline void unlock_all( __spinlock_t * locks [], __lock_size_t count );
    3939static inline void unlock_all( monitor_desc * locks [], __lock_size_t count );
    4040
    41 static inline void save   ( monitor_desc * ctx [], __lock_size_t count, spinlock * locks [], unsigned int /*out*/ recursions [], __waitfor_mask_t /*out*/ masks [] );
    42 static inline void restore( monitor_desc * ctx [], __lock_size_t count, spinlock * locks [], unsigned int /*in */ recursions [], __waitfor_mask_t /*in */ masks [] );
     41static inline void save   ( monitor_desc * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*out*/ recursions [], __waitfor_mask_t /*out*/ masks [] );
     42static inline void restore( monitor_desc * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*in */ recursions [], __waitfor_mask_t /*in */ masks [] );
    4343
    4444static inline void init     ( __lock_size_t count, monitor_desc * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );
     
    7171        unsigned int recursions[ count ];                         /* Save the current recursion levels to restore them later                             */ \
    7272        __waitfor_mask_t masks [ count ];                         /* Save the current waitfor masks to restore them later                                */ \
    73         spinlock *   locks    [ count ];                         /* We need to pass-in an array of locks to BlockInternal                               */ \
     73        __spinlock_t *   locks [ count ];                         /* We need to pass-in an array of locks to BlockInternal                               */ \
    7474
    7575#define monitor_save    save   ( monitors, count, locks, recursions, masks )
     
    8585        static void __enter_monitor_desc( monitor_desc * this, const __monitor_group_t & group ) {
    8686                // Lock the monitor spinlock, lock_yield to reduce contention
    87                 lock_yield( &this->lock DEBUG_CTX2 );
     87                lock_yield( this->lock DEBUG_CTX2 );
    8888                thread_desc * thrd = this_thread;
    8989
     
    127127
    128128                // Release the lock and leave
    129                 unlock( &this->lock );
     129                unlock( this->lock );
    130130                return;
    131131        }
     
    133133        static void __enter_monitor_dtor( monitor_desc * this, fptr_t func ) {
    134134                // Lock the monitor spinlock, lock_yield to reduce contention
    135                 lock_yield( &this->lock DEBUG_CTX2 );
     135                lock_yield( this->lock DEBUG_CTX2 );
    136136                thread_desc * thrd = this_thread;
    137137
     
    145145                        set_owner( this, thrd );
    146146
    147                         unlock( &this->lock );
     147                        unlock( this->lock );
    148148                        return;
    149149                }
     
    197197        void __leave_monitor_desc( monitor_desc * this ) {
    198198                // Lock the monitor spinlock, lock_yield to reduce contention
    199                 lock_yield( &this->lock DEBUG_CTX2 );
     199                lock_yield( this->lock DEBUG_CTX2 );
    200200
    201201                LIB_DEBUG_PRINT_SAFE("Kernel : %10p Leaving mon %p (%p)\n", this_thread, this, this->owner);
     
    210210                if( this->recursion != 0) {
    211211                        LIB_DEBUG_PRINT_SAFE("Kernel :  recursion still %d\n", this->recursion);
    212                         unlock( &this->lock );
     212                        unlock( this->lock );
    213213                        return;
    214214                }
     
    218218
    219219                // We can now let other threads in safely
    220                 unlock( &this->lock );
     220                unlock( this->lock );
    221221
    222222                //We need to wake-up the thread
     
    243243
    244244                // Lock the monitor now
    245                 lock_yield( &this->lock DEBUG_CTX2 );
     245                lock_yield( this->lock DEBUG_CTX2 );
    246246
    247247                disable_interrupts();
     
    730730}
    731731
    732 static inline void lock_all( spinlock * locks [], __lock_size_t count ) {
     732static inline void lock_all( __spinlock_t * locks [], __lock_size_t count ) {
    733733        for( __lock_size_t i = 0; i < count; i++ ) {
    734                 lock_yield( locks[i] DEBUG_CTX2 );
    735         }
    736 }
    737 
    738 static inline void lock_all( monitor_desc * source [], spinlock * /*out*/ locks [], __lock_size_t count ) {
     734                lock_yield( *locks[i] DEBUG_CTX2 );
     735        }
     736}
     737
     738static inline void lock_all( monitor_desc * source [], __spinlock_t * /*out*/ locks [], __lock_size_t count ) {
    739739        for( __lock_size_t i = 0; i < count; i++ ) {
    740                 spinlock * l = &source[i]->lock;
    741                 lock_yield( l DEBUG_CTX2 );
     740                __spinlock_t * l = &source[i]->lock;
     741                lock_yield( *l DEBUG_CTX2 );
    742742                if(locks) locks[i] = l;
    743743        }
    744744}
    745745
    746 static inline void unlock_all( spinlock * locks [], __lock_size_t count ) {
     746static inline void unlock_all( __spinlock_t * locks [], __lock_size_t count ) {
    747747        for( __lock_size_t i = 0; i < count; i++ ) {
    748                 unlock( locks[i] );
     748                unlock( *locks[i] );
    749749        }
    750750}
     
    752752static inline void unlock_all( monitor_desc * locks [], __lock_size_t count ) {
    753753        for( __lock_size_t i = 0; i < count; i++ ) {
    754                 unlock( &locks[i]->lock );
     754                unlock( locks[i]->lock );
    755755        }
    756756}
     
    759759        monitor_desc * ctx [],
    760760        __lock_size_t count,
    761         __attribute((unused)) spinlock * locks [],
     761        __attribute((unused)) __spinlock_t * locks [],
    762762        unsigned int /*out*/ recursions [],
    763763        __waitfor_mask_t /*out*/ masks []
     
    772772        monitor_desc * ctx [],
    773773        __lock_size_t count,
    774         spinlock * locks [],
     774        __spinlock_t * locks [],
    775775        unsigned int /*out*/ recursions [],
    776776        __waitfor_mask_t /*out*/ masks []
Note: See TracChangeset for help on using the changeset viewer.