Changeset cd3fc46
- Timestamp:
- Aug 17, 2022, 12:59:42 PM (2 years ago)
- Branches:
- ADT, ast-experimental, master, pthread-emulation
- Children:
- 8fca132
- Parents:
- aec2c022
- Location:
- libcfa/src/concurrency/kernel
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/kernel/cluster.cfa
raec2c022 rcd3fc46 110 110 //======================================================================= 111 111 // Lock-Free registering/unregistering of threads 112 unsigned register_proc_id( void ) with(__scheduler_lock ->lock) {112 unsigned register_proc_id( void ) with(__scheduler_lock.lock) { 113 113 __kernel_rseq_register(); 114 114 … … 132 132 } 133 133 134 if(max <= alloc) abort("Trying to create more than %ud processors", __scheduler_lock ->lock.max);134 if(max <= alloc) abort("Trying to create more than %ud processors", __scheduler_lock.lock.max); 135 135 136 136 // Step - 2 : F&A to get a new spot in the array. 137 137 uint_fast32_t n = __atomic_fetch_add(&alloc, 1, __ATOMIC_SEQ_CST); 138 if(max <= n) abort("Trying to create more than %ud processors", __scheduler_lock ->lock.max);138 if(max <= n) abort("Trying to create more than %ud processors", __scheduler_lock.lock.max); 139 139 140 140 // Step - 3 : Mark space as used and then publish it. … … 154 154 } 155 155 156 void unregister_proc_id( unsigned id ) with(__scheduler_lock ->lock) {156 void unregister_proc_id( unsigned id ) with(__scheduler_lock.lock) { 157 157 /* paranoid */ verify(id < ready); 158 158 /* paranoid */ verify(id == kernelTLS().sched_id); … … 169 169 // Writer side : acquire when changing the ready queue, e.g. adding more 170 170 // queues or removing them. 171 uint_fast32_t ready_mutate_lock( void ) with(__scheduler_lock ->lock) {171 uint_fast32_t ready_mutate_lock( void ) with(__scheduler_lock.lock) { 172 172 /* paranoid */ verify( ! __preemption_enabled() ); 173 173 … … 196 196 } 197 197 198 void ready_mutate_unlock( uint_fast32_t last_s ) with(__scheduler_lock ->lock) {198 void ready_mutate_unlock( uint_fast32_t last_s ) with(__scheduler_lock.lock) { 199 199 /* paranoid */ verify( ! __preemption_enabled() ); 200 200 -
libcfa/src/concurrency/kernel/private.hfa
raec2c022 rcd3fc46 186 186 union __attribute__((aligned(64))) __scheduler_RWLock_t { 187 187 struct { 188 __attribute__((aligned(64))) char padding; 189 188 190 // total cachelines allocated 189 unsigned int max;191 __attribute__((aligned(64))) unsigned int max; 190 192 191 193 // cachelines currently in use … … 208 210 void ^?{}(__scheduler_RWLock_t & this); 209 211 210 extern __scheduler_RWLock_t *__scheduler_lock;212 extern __scheduler_RWLock_t __scheduler_lock; 211 213 212 214 //----------------------------------------------------------------------- 213 215 // Reader side : acquire when using the ready queue to schedule but not 214 216 // creating/destroying queues 215 static inline void ready_schedule_lock(void) with(__scheduler_lock ->lock) {217 static inline void ready_schedule_lock(void) with(__scheduler_lock.lock) { 216 218 /* paranoid */ verify( ! __preemption_enabled() ); 217 219 /* paranoid */ verify( ! kernelTLS().in_sched_lock ); … … 238 240 } 239 241 240 static inline void ready_schedule_unlock(void) with(__scheduler_lock ->lock) {242 static inline void ready_schedule_unlock(void) with(__scheduler_lock.lock) { 241 243 /* paranoid */ verify( ! __preemption_enabled() ); 242 244 /* paranoid */ verify( data[kernelTLS().sched_id] == &kernelTLS().sched_lock ); … … 259 261 260 262 static inline bool ready_mutate_islocked() { 261 return __scheduler_lock ->lock.write_lock;263 return __scheduler_lock.lock.write_lock; 262 264 } 263 265 #endif -
libcfa/src/concurrency/kernel/startup.cfa
raec2c022 rcd3fc46 113 113 KERNEL_STORAGE(thread$, mainThread); 114 114 KERNEL_STORAGE(__stack_t, mainThreadCtx); 115 KERNEL_STORAGE(__scheduler_RWLock_t, __scheduler_lock);115 // KERNEL_STORAGE(__scheduler_RWLock_t, __scheduler_lock); 116 116 KERNEL_STORAGE(eventfd_t, mainIdleEventFd); 117 117 KERNEL_STORAGE(io_future_t, mainIdleFuture); … … 123 123 processor * mainProcessor; 124 124 thread$ * mainThread; 125 __scheduler_RWLock_t * __scheduler_lock;126 125 127 126 extern "C" { … … 148 147 }; 149 148 149 __scheduler_RWLock_t __scheduler_lock @= { 0 }; 150 150 151 #if defined(CFA_HAVE_LINUX_LIBRSEQ) 151 152 // No data needed … … 198 199 199 200 // Initialize the global scheduler lock 200 __scheduler_lock = (__scheduler_RWLock_t*)&storage___scheduler_lock;201 ( *__scheduler_lock){};201 // __scheduler_lock = (__scheduler_RWLock_t*)&storage___scheduler_lock; 202 (__scheduler_lock){}; 202 203 203 204 // Initialize the main cluster … … 336 337 ^(*mainCluster){}; 337 338 338 ^( *__scheduler_lock){};339 ^(__scheduler_lock){}; 339 340 340 341 ^(__cfa_dbg_global_clusters.list){};
Note: See TracChangeset
for help on using the changeset viewer.