Ignore:
File:
1 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/concurrency/kernel/private.hfa

    r2284d20 r2af1943  
    8888#elif defined(CFA_HAVE_LINUX_RSEQ_H)
    8989        extern "Cforall" {
    90                 extern __attribute__((aligned(64))) __thread volatile struct rseq __cfaabi_rseq;
     90                extern __attribute__((aligned(64))) thread_local volatile struct rseq __cfaabi_rseq;
    9191        }
    9292#else
     
    139139//-----------------------------------------------------------------------------
    140140// I/O
    141 io_arbiter$ * create(void);
    142 void destroy(io_arbiter$ *);
     141$io_arbiter * create(void);
     142void destroy($io_arbiter *);
    143143
    144144//=======================================================================
     
    161161// Blocking acquire
    162162static inline void __atomic_acquire(volatile bool * ll) {
    163         /* paranoid */ verify( ! __preemption_enabled() );
    164         /* paranoid */ verify(ll);
    165 
    166163        while( __builtin_expect(__atomic_exchange_n(ll, (bool)true, __ATOMIC_SEQ_CST), false) ) {
    167164                while(__atomic_load_n(ll, (int)__ATOMIC_RELAXED))
     
    169166        }
    170167        /* paranoid */ verify(*ll);
    171         /* paranoid */ verify( ! __preemption_enabled() );
    172168}
    173169
    174170// Non-Blocking acquire
    175171static inline bool __atomic_try_acquire(volatile bool * ll) {
    176         /* paranoid */ verify( ! __preemption_enabled() );
    177         /* paranoid */ verify(ll);
    178 
    179172        return !__atomic_exchange_n(ll, (bool)true, __ATOMIC_SEQ_CST);
    180173}
     
    182175// Release
    183176static inline void __atomic_unlock(volatile bool * ll) {
    184         /* paranoid */ verify( ! __preemption_enabled() );
    185         /* paranoid */ verify(ll);
    186177        /* paranoid */ verify(*ll);
    187178        __atomic_store_n(ll, (bool)false, __ATOMIC_RELEASE);
     
    193184// have been hard-coded to for the ready-queue for
    194185// simplicity and performance
    195 union __attribute__((aligned(64))) __scheduler_RWLock_t {
    196         struct {
    197                 __attribute__((aligned(64))) char padding;
    198 
    199                 // total cachelines allocated
    200                 __attribute__((aligned(64))) unsigned int max;
    201 
    202                 // cachelines currently in use
    203                 volatile unsigned int alloc;
    204 
    205                 // cachelines ready to itereate over
    206                 // (!= to alloc when thread is in second half of doregister)
    207                 volatile unsigned int ready;
    208 
    209                 // writer lock
    210                 volatile bool write_lock;
    211 
    212                 // data pointer
    213                 volatile bool * volatile * data;
    214         } lock;
    215         char pad[192];
     186struct __scheduler_RWLock_t {
     187        // total cachelines allocated
     188        unsigned int max;
     189
     190        // cachelines currently in use
     191        volatile unsigned int alloc;
     192
     193        // cachelines ready to itereate over
     194        // (!= to alloc when thread is in second half of doregister)
     195        volatile unsigned int ready;
     196
     197        // writer lock
     198        volatile bool write_lock;
     199
     200        // data pointer
     201        volatile bool * volatile * data;
    216202};
    217203
     
    219205void ^?{}(__scheduler_RWLock_t & this);
    220206
    221 extern __scheduler_RWLock_t __scheduler_lock;
     207extern __scheduler_RWLock_t * __scheduler_lock;
    222208
    223209//-----------------------------------------------------------------------
    224210// Reader side : acquire when using the ready queue to schedule but not
    225211//  creating/destroying queues
    226 static inline void ready_schedule_lock(void) with(__scheduler_lock.lock) {
     212static inline void ready_schedule_lock(void) with(*__scheduler_lock) {
    227213        /* paranoid */ verify( ! __preemption_enabled() );
    228214        /* paranoid */ verify( ! kernelTLS().in_sched_lock );
     
    249235}
    250236
    251 static inline void ready_schedule_unlock(void) with(__scheduler_lock.lock) {
     237static inline void ready_schedule_unlock(void) with(*__scheduler_lock) {
    252238        /* paranoid */ verify( ! __preemption_enabled() );
    253239        /* paranoid */ verify( data[kernelTLS().sched_id] == &kernelTLS().sched_lock );
     
    270256
    271257        static inline bool ready_mutate_islocked() {
    272                 return __scheduler_lock.lock.write_lock;
     258                return __scheduler_lock->write_lock;
    273259        }
    274260#endif
Note: See TracChangeset for help on using the changeset viewer.