Changeset 741e22c
- Timestamp:
- Aug 16, 2022, 4:01:54 PM (2 years ago)
- Branches:
- ADT, ast-experimental, master, pthread-emulation
- Children:
- 71cf630
- Parents:
- ee0176b
- Location:
- libcfa/src/concurrency/kernel
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/kernel/cluster.cfa
ree0176b r741e22c 93 93 //======================================================================= 94 94 void ?{}(__scheduler_RWLock_t & this) { 95 this. max = __max_processors();96 this. alloc = 0;97 this. ready = 0;98 this. data = alloc(this.max);99 this. write_lock = false;100 101 /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this. alloc), &this.alloc));102 /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this. ready), &this.ready));95 this.lock.max = __max_processors(); 96 this.lock.alloc = 0; 97 this.lock.ready = 0; 98 this.lock.data = alloc(this.lock.max); 99 this.lock.write_lock = false; 100 101 /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.lock.alloc), &this.lock.alloc)); 102 /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.lock.ready), &this.lock.ready)); 103 103 104 104 } 105 105 void ^?{}(__scheduler_RWLock_t & this) { 106 free(this. data);106 free(this.lock.data); 107 107 } 108 108 … … 110 110 //======================================================================= 111 111 // Lock-Free registering/unregistering of threads 112 unsigned register_proc_id( void ) with( *__scheduler_lock) {112 unsigned register_proc_id( void ) with(__scheduler_lock->lock) { 113 113 __kernel_rseq_register(); 114 114 … … 132 132 } 133 133 134 if(max <= alloc) abort("Trying to create more than %ud processors", __scheduler_lock-> max);134 if(max <= alloc) abort("Trying to create more than %ud processors", __scheduler_lock->lock.max); 135 135 136 136 // Step - 2 : F&A to get a new spot in the array. 137 137 uint_fast32_t n = __atomic_fetch_add(&alloc, 1, __ATOMIC_SEQ_CST); 138 if(max <= n) abort("Trying to create more than %ud processors", __scheduler_lock-> max);138 if(max <= n) abort("Trying to create more than %ud processors", __scheduler_lock->lock.max); 139 139 140 140 // Step - 3 : Mark space as used and then publish it. … … 154 154 } 155 155 156 void unregister_proc_id( unsigned id ) with( *__scheduler_lock) {156 void unregister_proc_id( unsigned id ) with(__scheduler_lock->lock) { 157 157 /* paranoid */ verify(id < ready); 158 158 /* paranoid */ verify(id == kernelTLS().sched_id); … … 169 169 // Writer side : acquire when changing the ready queue, e.g. adding more 170 170 // queues or removing them. 171 uint_fast32_t ready_mutate_lock( void ) with( *__scheduler_lock) {171 uint_fast32_t ready_mutate_lock( void ) with(__scheduler_lock->lock) { 172 172 /* paranoid */ verify( ! __preemption_enabled() ); 173 173 … … 196 196 } 197 197 198 void ready_mutate_unlock( uint_fast32_t last_s ) with( *__scheduler_lock) {198 void ready_mutate_unlock( uint_fast32_t last_s ) with(__scheduler_lock->lock) { 199 199 /* paranoid */ verify( ! __preemption_enabled() ); 200 200 -
libcfa/src/concurrency/kernel/cluster.hfa
ree0176b r741e22c 24 24 // Calc moving average based on existing average, before and current time. 25 25 static inline unsigned long long moving_average(unsigned long long currtsc, unsigned long long instsc, unsigned long long old_avg) { 26 /* paranoid */ verifyf( currtsc < 45000000000000000, "Suspiciously large current time: %'llu (%llx)\n", currtsc, currtsc );27 /* paranoid */ verifyf( instsc < 45000000000000000, "Suspiciously large insert time: %'llu (%llx)\n", instsc, instsc );28 26 /* paranoid */ verifyf( old_avg < 15000000000000, "Suspiciously large previous average: %'llu (%llx)\n", old_avg, old_avg ); 29 27 -
libcfa/src/concurrency/kernel/private.hfa
ree0176b r741e22c 184 184 // have been hard-coded to for the ready-queue for 185 185 // simplicity and performance 186 struct __scheduler_RWLock_t { 187 // total cachelines allocated 188 unsigned int max; 189 190 // cachelines currently in use 191 volatile unsigned int alloc; 192 193 // cachelines ready to itereate over 194 // (!= to alloc when thread is in second half of doregister) 195 volatile unsigned int ready; 196 197 // writer lock 198 volatile bool write_lock; 199 200 // data pointer 201 volatile bool * volatile * data; 186 union __attribute__((aligned(64))) __scheduler_RWLock_t { 187 struct { 188 // total cachelines allocated 189 unsigned int max; 190 191 // cachelines currently in use 192 volatile unsigned int alloc; 193 194 // cachelines ready to itereate over 195 // (!= to alloc when thread is in second half of doregister) 196 volatile unsigned int ready; 197 198 // writer lock 199 volatile bool write_lock; 200 201 // data pointer 202 volatile bool * volatile * data; 203 } lock; 204 char pad[192]; 202 205 }; 203 206 … … 210 213 // Reader side : acquire when using the ready queue to schedule but not 211 214 // creating/destroying queues 212 static inline void ready_schedule_lock(void) with( *__scheduler_lock) {215 static inline void ready_schedule_lock(void) with(__scheduler_lock->lock) { 213 216 /* paranoid */ verify( ! __preemption_enabled() ); 214 217 /* paranoid */ verify( ! kernelTLS().in_sched_lock ); … … 235 238 } 236 239 237 static inline void ready_schedule_unlock(void) with( *__scheduler_lock) {240 static inline void ready_schedule_unlock(void) with(__scheduler_lock->lock) { 238 241 /* paranoid */ verify( ! __preemption_enabled() ); 239 242 /* paranoid */ verify( data[kernelTLS().sched_id] == &kernelTLS().sched_lock ); … … 256 259 257 260 static inline bool ready_mutate_islocked() { 258 return __scheduler_lock-> write_lock;261 return __scheduler_lock->lock.write_lock; 259 262 } 260 263 #endif
Note: See TracChangeset
for help on using the changeset viewer.