Ignore:
File:
1 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/concurrency/kernel/private.hfa

    r8bee858 rcd3fc46  
    184184// have been hard-coded to for the ready-queue for
    185185// simplicity and performance
    186 struct __scheduler_RWLock_t {
    187         // total cachelines allocated
    188         unsigned int max;
    189 
    190         // cachelines currently in use
    191         volatile unsigned int alloc;
    192 
    193         // cachelines ready to itereate over
    194         // (!= to alloc when thread is in second half of doregister)
    195         volatile unsigned int ready;
    196 
    197         // writer lock
    198         volatile bool write_lock;
    199 
    200         // data pointer
    201         volatile bool * volatile * data;
     186union __attribute__((aligned(64))) __scheduler_RWLock_t {
     187        struct {
     188                __attribute__((aligned(64))) char padding;
     189
     190                // total cachelines allocated
     191                __attribute__((aligned(64))) unsigned int max;
     192
     193                // cachelines currently in use
     194                volatile unsigned int alloc;
     195
     196                // cachelines ready to itereate over
     197                // (!= to alloc when thread is in second half of doregister)
     198                volatile unsigned int ready;
     199
     200                // writer lock
     201                volatile bool write_lock;
     202
     203                // data pointer
     204                volatile bool * volatile * data;
     205        } lock;
     206        char pad[192];
    202207};
    203208
     
    205210void ^?{}(__scheduler_RWLock_t & this);
    206211
    207 extern __scheduler_RWLock_t * __scheduler_lock;
     212extern __scheduler_RWLock_t __scheduler_lock;
    208213
    209214//-----------------------------------------------------------------------
    210215// Reader side : acquire when using the ready queue to schedule but not
    211216//  creating/destroying queues
    212 static inline void ready_schedule_lock(void) with(*__scheduler_lock) {
     217static inline void ready_schedule_lock(void) with(__scheduler_lock.lock) {
    213218        /* paranoid */ verify( ! __preemption_enabled() );
    214219        /* paranoid */ verify( ! kernelTLS().in_sched_lock );
     
    235240}
    236241
    237 static inline void ready_schedule_unlock(void) with(*__scheduler_lock) {
     242static inline void ready_schedule_unlock(void) with(__scheduler_lock.lock) {
    238243        /* paranoid */ verify( ! __preemption_enabled() );
    239244        /* paranoid */ verify( data[kernelTLS().sched_id] == &kernelTLS().sched_lock );
     
    256261
    257262        static inline bool ready_mutate_islocked() {
    258                 return __scheduler_lock->write_lock;
     263                return __scheduler_lock.lock.write_lock;
    259264        }
    260265#endif
Note: See TracChangeset for help on using the changeset viewer.