Changeset c993b15 for libcfa/src/concurrency/kernel_private.hfa
- Timestamp:
- Apr 29, 2021, 4:26:25 PM (3 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- 3eb55f98
- Parents:
- b2fc7ad9
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/kernel_private.hfa
rb2fc7ad9 rc993b15 25 25 // Scheduler 26 26 27 struct __attribute__((aligned(128))) __scheduler_lock_id_t;28 27 29 28 extern "C" { … … 80 79 // Lock-Free registering/unregistering of threads 81 80 // Register a processor to a given cluster and get its unique id in return 82 void register_proc_id( struct __processor_id_t *);81 unsigned register_proc_id( void ); 83 82 84 83 // Unregister a processor from a given cluster using its id, getting back the original pointer 85 void unregister_proc_id( struct __processor_id_t * proc);84 void unregister_proc_id( unsigned ); 86 85 87 86 //======================================================================= … … 112 111 } 113 112 114 // Cells use by the reader writer lock 115 // while not generic it only relies on a opaque pointer 116 struct __attribute__((aligned(128))) __scheduler_lock_id_t { 117 // Spin lock used as the underlying lock 118 volatile bool lock; 119 120 // Handle pointing to the proc owning this cell 121 // Used for allocating cells and debugging 122 __processor_id_t * volatile handle; 123 124 #ifdef __CFA_WITH_VERIFY__ 125 // Debug, check if this is owned for reading 126 bool owned; 127 #endif 128 }; 129 130 static_assert( sizeof(struct __scheduler_lock_id_t) <= __alignof(struct __scheduler_lock_id_t)); 113 114 115 131 116 132 117 //----------------------------------------------------------------------- … … 147 132 148 133 // writer lock 149 volatile bool lock;134 volatile bool write_lock; 150 135 151 136 // data pointer 152 __scheduler_lock_id_t* data;137 volatile bool * volatile * data; 153 138 }; 154 139 … … 163 148 static inline void ready_schedule_lock(void) with(*__scheduler_lock) { 164 149 /* paranoid */ verify( ! __preemption_enabled() ); 165 /* paranoid */ verify( kernelTLS().this_proc_id ); 166 167 unsigned iproc = kernelTLS().this_proc_id->id; 168 /*paranoid*/ verify(data[iproc].handle == kernelTLS().this_proc_id); 169 /*paranoid*/ verify(iproc < ready); 150 /* paranoid */ verify( ! kernelTLS().in_sched_lock ); 151 /* paranoid */ verify( data[kernelTLS().sched_id] == &kernelTLS().sched_lock ); 152 /* paranoid */ verify( !kernelTLS().this_processor || kernelTLS().this_processor->unique_id == kernelTLS().sched_id ); 170 153 171 154 // Step 1 : make sure no writer are in the middle of the critical section 172 while(__atomic_load_n(& lock, (int)__ATOMIC_RELAXED))155 while(__atomic_load_n(&write_lock, (int)__ATOMIC_RELAXED)) 173 156 Pause(); 174 157 … … 179 162 180 163 // Step 2 : acquire our local lock 181 __atomic_acquire( & data[iproc].lock );182 /*paranoid*/ verify( data[iproc].lock);164 __atomic_acquire( &kernelTLS().sched_lock ); 165 /*paranoid*/ verify(kernelTLS().sched_lock); 183 166 184 167 #ifdef __CFA_WITH_VERIFY__ 185 168 // Debug, check if this is owned for reading 186 data[iproc].owned= true;169 kernelTLS().in_sched_lock = true; 187 170 #endif 188 171 } … … 190 173 static inline void ready_schedule_unlock(void) with(*__scheduler_lock) { 191 174 /* paranoid */ verify( ! __preemption_enabled() ); 192 /* paranoid */ verify( kernelTLS().this_proc_id ); 193 194 unsigned iproc = kernelTLS().this_proc_id->id; 195 /*paranoid*/ verify(data[iproc].handle == kernelTLS().this_proc_id); 196 /*paranoid*/ verify(iproc < ready); 197 /*paranoid*/ verify(data[iproc].lock); 198 /*paranoid*/ verify(data[iproc].owned); 175 /* paranoid */ verify( data[kernelTLS().sched_id] == &kernelTLS().sched_lock ); 176 /* paranoid */ verify( !kernelTLS().this_processor || kernelTLS().this_processor->unique_id == kernelTLS().sched_id ); 177 /* paranoid */ verify( kernelTLS().sched_lock ); 178 /* paranoid */ verify( kernelTLS().in_sched_lock ); 199 179 #ifdef __CFA_WITH_VERIFY__ 200 180 // Debug, check if this is owned for reading 201 data[iproc].owned= false;181 kernelTLS().in_sched_lock = false; 202 182 #endif 203 __atomic_unlock(& data[iproc].lock);183 __atomic_unlock(&kernelTLS().sched_lock); 204 184 } 205 185 … … 207 187 static inline bool ready_schedule_islocked(void) { 208 188 /* paranoid */ verify( ! __preemption_enabled() ); 209 /*paranoid*/ verify( kernelTLS().this_proc_id ); 210 __processor_id_t * proc = kernelTLS().this_proc_id; 211 return __scheduler_lock->data[proc->id].owned; 189 /* paranoid */ verify( (!kernelTLS().in_sched_lock) || kernelTLS().sched_lock ); 190 return kernelTLS().sched_lock; 212 191 } 213 192 214 193 static inline bool ready_mutate_islocked() { 215 return __scheduler_lock-> lock;194 return __scheduler_lock->write_lock; 216 195 } 217 196 #endif … … 228 207 // Register a processor to a given cluster and get its unique id in return 229 208 // For convenience, also acquires the lock 230 static inline uint_fast32_t ready_mutate_register( struct __processor_id_t * proc ) { 231 register_proc_id( proc ); 232 return ready_mutate_lock(); 209 static inline [unsigned, uint_fast32_t] ready_mutate_register() { 210 unsigned id = register_proc_id(); 211 uint_fast32_t last = ready_mutate_lock(); 212 return [id, last]; 233 213 } 234 214 235 215 // Unregister a processor from a given cluster using its id, getting back the original pointer 236 216 // assumes the lock is acquired 237 static inline void ready_mutate_unregister( struct __processor_id_t * proc, uint_fast32_t last_s ) {217 static inline void ready_mutate_unregister( unsigned id, uint_fast32_t last_s ) { 238 218 ready_mutate_unlock( last_s ); 239 unregister_proc_id( proc);219 unregister_proc_id( id ); 240 220 } 241 221
Note: See TracChangeset
for help on using the changeset viewer.