Changeset 36cc24a for libcfa/src/concurrency/kernel
- Timestamp:
- Aug 17, 2022, 4:34:10 PM (3 years ago)
- Branches:
- ADT, ast-experimental, master, pthread-emulation
- Children:
- ff370d8
- Parents:
- 3ce3fb9 (diff), 683cc13 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- libcfa/src/concurrency/kernel
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/kernel/cluster.cfa
r3ce3fb9 r36cc24a 93 93 //======================================================================= 94 94 void ?{}(__scheduler_RWLock_t & this) { 95 this. max = __max_processors();96 this. alloc = 0;97 this. ready = 0;98 this. data = alloc(this.max);99 this. write_lock = false;100 101 /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this. alloc), &this.alloc));102 /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this. ready), &this.ready));95 this.lock.max = __max_processors(); 96 this.lock.alloc = 0; 97 this.lock.ready = 0; 98 this.lock.data = alloc(this.lock.max); 99 this.lock.write_lock = false; 100 101 /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.lock.alloc), &this.lock.alloc)); 102 /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.lock.ready), &this.lock.ready)); 103 103 104 104 } 105 105 void ^?{}(__scheduler_RWLock_t & this) { 106 free(this. data);106 free(this.lock.data); 107 107 } 108 108 … … 110 110 //======================================================================= 111 111 // Lock-Free registering/unregistering of threads 112 unsigned register_proc_id( void ) with( *__scheduler_lock) {112 unsigned register_proc_id( void ) with(__scheduler_lock.lock) { 113 113 __kernel_rseq_register(); 114 114 … … 132 132 } 133 133 134 if(max <= alloc) abort("Trying to create more than %ud processors", __scheduler_lock ->max);134 if(max <= alloc) abort("Trying to create more than %ud processors", __scheduler_lock.lock.max); 135 135 136 136 // Step - 2 : F&A to get a new spot in the array. 137 137 uint_fast32_t n = __atomic_fetch_add(&alloc, 1, __ATOMIC_SEQ_CST); 138 if(max <= n) abort("Trying to create more than %ud processors", __scheduler_lock ->max);138 if(max <= n) abort("Trying to create more than %ud processors", __scheduler_lock.lock.max); 139 139 140 140 // Step - 3 : Mark space as used and then publish it. … … 154 154 } 155 155 156 void unregister_proc_id( unsigned id ) with( *__scheduler_lock) {156 void unregister_proc_id( unsigned id ) with(__scheduler_lock.lock) { 157 157 /* paranoid */ verify(id < ready); 158 158 /* paranoid */ verify(id == kernelTLS().sched_id); … … 169 169 // Writer side : acquire when changing the ready queue, e.g. adding more 170 170 // queues or removing them. 171 uint_fast32_t ready_mutate_lock( void ) with( *__scheduler_lock) {171 uint_fast32_t ready_mutate_lock( void ) with(__scheduler_lock.lock) { 172 172 /* paranoid */ verify( ! __preemption_enabled() ); 173 173 … … 196 196 } 197 197 198 void ready_mutate_unlock( uint_fast32_t last_s ) with( *__scheduler_lock) {198 void ready_mutate_unlock( uint_fast32_t last_s ) with(__scheduler_lock.lock) { 199 199 /* paranoid */ verify( ! __preemption_enabled() ); 200 200 -
libcfa/src/concurrency/kernel/cluster.hfa
r3ce3fb9 r36cc24a 24 24 // Calc moving average based on existing average, before and current time. 25 25 static inline unsigned long long moving_average(unsigned long long currtsc, unsigned long long instsc, unsigned long long old_avg) { 26 /* paranoid */ verifyf( currtsc < 45000000000000000, "Suspiciously large current time: %'llu (%llx)\n", currtsc, currtsc );27 /* paranoid */ verifyf( instsc < 45000000000000000, "Suspiciously large insert time: %'llu (%llx)\n", instsc, instsc );28 26 /* paranoid */ verifyf( old_avg < 15000000000000, "Suspiciously large previous average: %'llu (%llx)\n", old_avg, old_avg ); 29 27 -
libcfa/src/concurrency/kernel/private.hfa
r3ce3fb9 r36cc24a 184 184 // have been hard-coded to for the ready-queue for 185 185 // simplicity and performance 186 struct __scheduler_RWLock_t { 187 // total cachelines allocated 188 unsigned int max; 189 190 // cachelines currently in use 191 volatile unsigned int alloc; 192 193 // cachelines ready to itereate over 194 // (!= to alloc when thread is in second half of doregister) 195 volatile unsigned int ready; 196 197 // writer lock 198 volatile bool write_lock; 199 200 // data pointer 201 volatile bool * volatile * data; 186 union __attribute__((aligned(64))) __scheduler_RWLock_t { 187 struct { 188 __attribute__((aligned(64))) char padding; 189 190 // total cachelines allocated 191 __attribute__((aligned(64))) unsigned int max; 192 193 // cachelines currently in use 194 volatile unsigned int alloc; 195 196 // cachelines ready to itereate over 197 // (!= to alloc when thread is in second half of doregister) 198 volatile unsigned int ready; 199 200 // writer lock 201 volatile bool write_lock; 202 203 // data pointer 204 volatile bool * volatile * data; 205 } lock; 206 char pad[192]; 202 207 }; 203 208 … … 205 210 void ^?{}(__scheduler_RWLock_t & this); 206 211 207 extern __scheduler_RWLock_t *__scheduler_lock;212 extern __scheduler_RWLock_t __scheduler_lock; 208 213 209 214 //----------------------------------------------------------------------- 210 215 // Reader side : acquire when using the ready queue to schedule but not 211 216 // creating/destroying queues 212 static inline void ready_schedule_lock(void) with( *__scheduler_lock) {217 static inline void ready_schedule_lock(void) with(__scheduler_lock.lock) { 213 218 /* paranoid */ verify( ! __preemption_enabled() ); 214 219 /* paranoid */ verify( ! kernelTLS().in_sched_lock ); … … 235 240 } 236 241 237 static inline void ready_schedule_unlock(void) with( *__scheduler_lock) {242 static inline void ready_schedule_unlock(void) with(__scheduler_lock.lock) { 238 243 /* paranoid */ verify( ! __preemption_enabled() ); 239 244 /* paranoid */ verify( data[kernelTLS().sched_id] == &kernelTLS().sched_lock ); … … 256 261 257 262 static inline bool ready_mutate_islocked() { 258 return __scheduler_lock ->write_lock;263 return __scheduler_lock.lock.write_lock; 259 264 } 260 265 #endif -
libcfa/src/concurrency/kernel/startup.cfa
r3ce3fb9 r36cc24a 113 113 KERNEL_STORAGE(thread$, mainThread); 114 114 KERNEL_STORAGE(__stack_t, mainThreadCtx); 115 KERNEL_STORAGE(__scheduler_RWLock_t, __scheduler_lock);115 // KERNEL_STORAGE(__scheduler_RWLock_t, __scheduler_lock); 116 116 KERNEL_STORAGE(eventfd_t, mainIdleEventFd); 117 117 KERNEL_STORAGE(io_future_t, mainIdleFuture); … … 123 123 processor * mainProcessor; 124 124 thread$ * mainThread; 125 __scheduler_RWLock_t * __scheduler_lock;126 125 127 126 extern "C" { … … 148 147 }; 149 148 149 __scheduler_RWLock_t __scheduler_lock @= { 0 }; 150 150 151 #if defined(CFA_HAVE_LINUX_LIBRSEQ) 151 152 // No data needed … … 198 199 199 200 // Initialize the global scheduler lock 200 __scheduler_lock = (__scheduler_RWLock_t*)&storage___scheduler_lock;201 ( *__scheduler_lock){};201 // __scheduler_lock = (__scheduler_RWLock_t*)&storage___scheduler_lock; 202 (__scheduler_lock){}; 202 203 203 204 // Initialize the main cluster … … 336 337 ^(*mainCluster){}; 337 338 338 ^( *__scheduler_lock){};339 ^(__scheduler_lock){}; 339 340 340 341 ^(__cfa_dbg_global_clusters.list){};
Note:
See TracChangeset
for help on using the changeset viewer.