Ignore:
Timestamp:
Sep 21, 2022, 11:02:15 AM (22 months ago)
Author:
Thierry Delisle <tdelisle@…>
Branches:
ADT, ast-experimental, master, pthread-emulation
Children:
95dab9e
Parents:
428adbc (diff), 0bd46fd (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge branch 'master' into pthread-emulation

File:
1 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/concurrency/kernel/private.hfa

    r428adbc r7f6a7c9  
    8888#elif defined(CFA_HAVE_LINUX_RSEQ_H)
    8989        extern "Cforall" {
    90                 extern __attribute__((aligned(64))) thread_local volatile struct rseq __cfaabi_rseq;
     90                extern __attribute__((aligned(64))) __thread volatile struct rseq __cfaabi_rseq;
    9191        }
    9292#else
     
    139139//-----------------------------------------------------------------------------
    140140// I/O
    141 $io_arbiter * create(void);
    142 void destroy($io_arbiter *);
     141io_arbiter$ * create(void);
     142void destroy(io_arbiter$ *);
    143143
    144144//=======================================================================
     
    161161// Blocking acquire
    162162static inline void __atomic_acquire(volatile bool * ll) {
     163        /* paranoid */ verify( ! __preemption_enabled() );
     164        /* paranoid */ verify(ll);
     165
    163166        while( __builtin_expect(__atomic_exchange_n(ll, (bool)true, __ATOMIC_SEQ_CST), false) ) {
    164167                while(__atomic_load_n(ll, (int)__ATOMIC_RELAXED))
     
    166169        }
    167170        /* paranoid */ verify(*ll);
     171        /* paranoid */ verify( ! __preemption_enabled() );
    168172}
    169173
    170174// Non-Blocking acquire
    171175static inline bool __atomic_try_acquire(volatile bool * ll) {
     176        /* paranoid */ verify( ! __preemption_enabled() );
     177        /* paranoid */ verify(ll);
     178
    172179        return !__atomic_exchange_n(ll, (bool)true, __ATOMIC_SEQ_CST);
    173180}
     
    175182// Release
    176183static inline void __atomic_unlock(volatile bool * ll) {
     184        /* paranoid */ verify( ! __preemption_enabled() );
     185        /* paranoid */ verify(ll);
    177186        /* paranoid */ verify(*ll);
    178187        __atomic_store_n(ll, (bool)false, __ATOMIC_RELEASE);
     
    184193// have been hard-coded to for the ready-queue for
    185194// simplicity and performance
    186 struct __scheduler_RWLock_t {
    187         // total cachelines allocated
    188         unsigned int max;
    189 
    190         // cachelines currently in use
    191         volatile unsigned int alloc;
    192 
    193         // cachelines ready to itereate over
    194         // (!= to alloc when thread is in second half of doregister)
    195         volatile unsigned int ready;
    196 
    197         // writer lock
    198         volatile bool write_lock;
    199 
    200         // data pointer
    201         volatile bool * volatile * data;
     195union __attribute__((aligned(64))) __scheduler_RWLock_t {
     196        struct {
     197                __attribute__((aligned(64))) char padding;
     198
     199                // total cachelines allocated
     200                __attribute__((aligned(64))) unsigned int max;
     201
     202                // cachelines currently in use
     203                volatile unsigned int alloc;
     204
     205                // cachelines ready to itereate over
     206                // (!= to alloc when thread is in second half of doregister)
     207                volatile unsigned int ready;
     208
     209                // writer lock
     210                volatile bool write_lock;
     211
     212                // data pointer
     213                volatile bool * volatile * data;
     214        } lock;
     215        char pad[192];
    202216};
    203217
     
    205219void ^?{}(__scheduler_RWLock_t & this);
    206220
    207 extern __scheduler_RWLock_t * __scheduler_lock;
     221extern __scheduler_RWLock_t __scheduler_lock;
    208222
    209223//-----------------------------------------------------------------------
    210224// Reader side : acquire when using the ready queue to schedule but not
    211225//  creating/destroying queues
    212 static inline void ready_schedule_lock(void) with(*__scheduler_lock) {
     226static inline void ready_schedule_lock(void) with(__scheduler_lock.lock) {
    213227        /* paranoid */ verify( ! __preemption_enabled() );
    214228        /* paranoid */ verify( ! kernelTLS().in_sched_lock );
     
    235249}
    236250
    237 static inline void ready_schedule_unlock(void) with(*__scheduler_lock) {
     251static inline void ready_schedule_unlock(void) with(__scheduler_lock.lock) {
    238252        /* paranoid */ verify( ! __preemption_enabled() );
    239253        /* paranoid */ verify( data[kernelTLS().sched_id] == &kernelTLS().sched_lock );
     
    256270
    257271        static inline bool ready_mutate_islocked() {
    258                 return __scheduler_lock->write_lock;
     272                return __scheduler_lock.lock.write_lock;
    259273        }
    260274#endif
Note: See TracChangeset for help on using the changeset viewer.