Changeset 7f6a7c9 for libcfa/src/concurrency/kernel/private.hfa
- Timestamp:
- Sep 21, 2022, 11:02:15 AM (22 months ago)
- Branches:
- ADT, ast-experimental, master, pthread-emulation
- Children:
- 95dab9e
- Parents:
- 428adbc (diff), 0bd46fd (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/kernel/private.hfa
r428adbc r7f6a7c9 88 88 #elif defined(CFA_HAVE_LINUX_RSEQ_H) 89 89 extern "Cforall" { 90 extern __attribute__((aligned(64))) thread_localvolatile struct rseq __cfaabi_rseq;90 extern __attribute__((aligned(64))) __thread volatile struct rseq __cfaabi_rseq; 91 91 } 92 92 #else … … 139 139 //----------------------------------------------------------------------------- 140 140 // I/O 141 $io_arbiter* create(void);142 void destroy( $io_arbiter*);141 io_arbiter$ * create(void); 142 void destroy(io_arbiter$ *); 143 143 144 144 //======================================================================= … … 161 161 // Blocking acquire 162 162 static inline void __atomic_acquire(volatile bool * ll) { 163 /* paranoid */ verify( ! __preemption_enabled() ); 164 /* paranoid */ verify(ll); 165 163 166 while( __builtin_expect(__atomic_exchange_n(ll, (bool)true, __ATOMIC_SEQ_CST), false) ) { 164 167 while(__atomic_load_n(ll, (int)__ATOMIC_RELAXED)) … … 166 169 } 167 170 /* paranoid */ verify(*ll); 171 /* paranoid */ verify( ! __preemption_enabled() ); 168 172 } 169 173 170 174 // Non-Blocking acquire 171 175 static inline bool __atomic_try_acquire(volatile bool * ll) { 176 /* paranoid */ verify( ! __preemption_enabled() ); 177 /* paranoid */ verify(ll); 178 172 179 return !__atomic_exchange_n(ll, (bool)true, __ATOMIC_SEQ_CST); 173 180 } … … 175 182 // Release 176 183 static inline void __atomic_unlock(volatile bool * ll) { 184 /* paranoid */ verify( ! __preemption_enabled() ); 185 /* paranoid */ verify(ll); 177 186 /* paranoid */ verify(*ll); 178 187 __atomic_store_n(ll, (bool)false, __ATOMIC_RELEASE); … … 184 193 // have been hard-coded to for the ready-queue for 185 194 // simplicity and performance 186 struct __scheduler_RWLock_t { 187 // total cachelines allocated 188 unsigned int max; 189 190 // cachelines currently in use 191 volatile unsigned int alloc; 192 193 // cachelines ready to itereate over 194 // (!= to alloc when thread is in second half of doregister) 195 volatile unsigned int ready; 196 197 // writer lock 198 volatile bool write_lock; 199 200 // data pointer 201 volatile bool * volatile * data; 195 union __attribute__((aligned(64))) __scheduler_RWLock_t { 196 struct { 197 __attribute__((aligned(64))) char padding; 198 199 // total cachelines allocated 200 __attribute__((aligned(64))) unsigned int max; 201 202 // cachelines currently in use 203 volatile unsigned int alloc; 204 205 // cachelines ready to itereate over 206 // (!= to alloc when thread is in second half of doregister) 207 volatile unsigned int ready; 208 209 // writer lock 210 volatile bool write_lock; 211 212 // data pointer 213 volatile bool * volatile * data; 214 } lock; 215 char pad[192]; 202 216 }; 203 217 … … 205 219 void ^?{}(__scheduler_RWLock_t & this); 206 220 207 extern __scheduler_RWLock_t *__scheduler_lock;221 extern __scheduler_RWLock_t __scheduler_lock; 208 222 209 223 //----------------------------------------------------------------------- 210 224 // Reader side : acquire when using the ready queue to schedule but not 211 225 // creating/destroying queues 212 static inline void ready_schedule_lock(void) with( *__scheduler_lock) {226 static inline void ready_schedule_lock(void) with(__scheduler_lock.lock) { 213 227 /* paranoid */ verify( ! __preemption_enabled() ); 214 228 /* paranoid */ verify( ! kernelTLS().in_sched_lock ); … … 235 249 } 236 250 237 static inline void ready_schedule_unlock(void) with( *__scheduler_lock) {251 static inline void ready_schedule_unlock(void) with(__scheduler_lock.lock) { 238 252 /* paranoid */ verify( ! __preemption_enabled() ); 239 253 /* paranoid */ verify( data[kernelTLS().sched_id] == &kernelTLS().sched_lock ); … … 256 270 257 271 static inline bool ready_mutate_islocked() { 258 return __scheduler_lock ->write_lock;272 return __scheduler_lock.lock.write_lock; 259 273 } 260 274 #endif
Note: See TracChangeset
for help on using the changeset viewer.