- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/kernel/private.hfa
r2284d20 r2af1943 88 88 #elif defined(CFA_HAVE_LINUX_RSEQ_H) 89 89 extern "Cforall" { 90 extern __attribute__((aligned(64))) __threadvolatile struct rseq __cfaabi_rseq;90 extern __attribute__((aligned(64))) thread_local volatile struct rseq __cfaabi_rseq; 91 91 } 92 92 #else … … 139 139 //----------------------------------------------------------------------------- 140 140 // I/O 141 io_arbiter$* create(void);142 void destroy( io_arbiter$*);141 $io_arbiter * create(void); 142 void destroy($io_arbiter *); 143 143 144 144 //======================================================================= … … 161 161 // Blocking acquire 162 162 static inline void __atomic_acquire(volatile bool * ll) { 163 /* paranoid */ verify( ! __preemption_enabled() );164 /* paranoid */ verify(ll);165 166 163 while( __builtin_expect(__atomic_exchange_n(ll, (bool)true, __ATOMIC_SEQ_CST), false) ) { 167 164 while(__atomic_load_n(ll, (int)__ATOMIC_RELAXED)) … … 169 166 } 170 167 /* paranoid */ verify(*ll); 171 /* paranoid */ verify( ! __preemption_enabled() );172 168 } 173 169 174 170 // Non-Blocking acquire 175 171 static inline bool __atomic_try_acquire(volatile bool * ll) { 176 /* paranoid */ verify( ! __preemption_enabled() );177 /* paranoid */ verify(ll);178 179 172 return !__atomic_exchange_n(ll, (bool)true, __ATOMIC_SEQ_CST); 180 173 } … … 182 175 // Release 183 176 static inline void __atomic_unlock(volatile bool * ll) { 184 /* paranoid */ verify( ! __preemption_enabled() );185 /* paranoid */ verify(ll);186 177 /* paranoid */ verify(*ll); 187 178 __atomic_store_n(ll, (bool)false, __ATOMIC_RELEASE); … … 193 184 // have been hard-coded to for the ready-queue for 194 185 // simplicity and performance 195 union __attribute__((aligned(64))) __scheduler_RWLock_t { 196 struct { 197 __attribute__((aligned(64))) char padding; 198 199 // total cachelines allocated 200 __attribute__((aligned(64))) unsigned int max; 201 202 // cachelines currently in use 203 volatile unsigned int alloc; 204 205 // cachelines ready to itereate over 206 // (!= to alloc when thread is in second half of doregister) 207 volatile unsigned int ready; 208 209 // writer lock 210 volatile bool write_lock; 211 212 // data pointer 213 volatile bool * volatile * data; 214 } lock; 215 char pad[192]; 186 struct __scheduler_RWLock_t { 187 // total cachelines allocated 188 unsigned int max; 189 190 // cachelines currently in use 191 volatile unsigned int alloc; 192 193 // cachelines ready to itereate over 194 // (!= to alloc when thread is in second half of doregister) 195 volatile unsigned int ready; 196 197 // writer lock 198 volatile bool write_lock; 199 200 // data pointer 201 volatile bool * volatile * data; 216 202 }; 217 203 … … 219 205 void ^?{}(__scheduler_RWLock_t & this); 220 206 221 extern __scheduler_RWLock_t __scheduler_lock;207 extern __scheduler_RWLock_t * __scheduler_lock; 222 208 223 209 //----------------------------------------------------------------------- 224 210 // Reader side : acquire when using the ready queue to schedule but not 225 211 // creating/destroying queues 226 static inline void ready_schedule_lock(void) with( __scheduler_lock.lock) {212 static inline void ready_schedule_lock(void) with(*__scheduler_lock) { 227 213 /* paranoid */ verify( ! __preemption_enabled() ); 228 214 /* paranoid */ verify( ! kernelTLS().in_sched_lock ); … … 249 235 } 250 236 251 static inline void ready_schedule_unlock(void) with( __scheduler_lock.lock) {237 static inline void ready_schedule_unlock(void) with(*__scheduler_lock) { 252 238 /* paranoid */ verify( ! __preemption_enabled() ); 253 239 /* paranoid */ verify( data[kernelTLS().sched_id] == &kernelTLS().sched_lock ); … … 270 256 271 257 static inline bool ready_mutate_islocked() { 272 return __scheduler_lock .lock.write_lock;258 return __scheduler_lock->write_lock; 273 259 } 274 260 #endif
Note:
See TracChangeset
for help on using the changeset viewer.