Changeset a7504db
- Timestamp:
- Apr 14, 2021, 4:07:18 PM (3 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- 6a9b12b
- Parents:
- a33c113
- Location:
- libcfa/src/concurrency
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/kernel/startup.cfa
ra33c113 ra7504db 489 489 #endif 490 490 491 lock( this.cltr->idles );492 int target = this.cltr->idles.total += 1u;493 unlock( this.cltr->idles );494 495 491 // Register and Lock the RWlock so no-one pushes/pops while we are changing the queue 496 492 uint_fast32_t last_size = ready_mutate_register((__processor_id_t*)&this); 493 int target = this.cltr->idles.total += 1u; 497 494 498 495 // Adjust the ready queue size … … 507 504 // Not a ctor, it just preps the destruction but should not destroy members 508 505 static void deinit(processor & this) { 509 lock( this.cltr->idles );510 int target = this.cltr->idles.total -= 1u;511 unlock( this.cltr->idles );512 513 506 // Lock the RWlock so no-one pushes/pops while we are changing the queue 514 507 uint_fast32_t last_size = ready_mutate_lock(); 508 int target = this.cltr->idles.total -= 1u; 515 509 516 510 // Adjust the ready queue size -
libcfa/src/concurrency/kernel_private.hfa
ra33c113 ra7504db 89 89 // Unregister a processor from a given cluster using its id, getting back the original pointer 90 90 void unregister_proc_id( struct __processor_id_t * proc ); 91 92 //-----------------------------------------------------------------------93 // Cluster idle lock/unlock94 static inline void lock(__cluster_idles & this) {95 for() {96 uint64_t l = this.lock;97 if(98 (0 == (l % 2))99 && __atomic_compare_exchange_n(&this.lock, &l, l + 1, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)100 ) return;101 Pause();102 }103 }104 105 static inline void unlock(__cluster_idles & this) {106 /* paranoid */ verify( 1 == (this.lock % 2) );107 __atomic_fetch_add( &this.lock, 1, __ATOMIC_SEQ_CST );108 }109 91 110 92 //======================================================================= … … 263 245 } 264 246 247 //----------------------------------------------------------------------- 248 // Cluster idle lock/unlock 249 static inline void lock(__cluster_idles & this) { 250 /* paranoid */ verify( ! __preemption_enabled() ); 251 252 // Start by locking the global RWlock so that we know no-one is 253 // adding/removing processors while we mess with the idle lock 254 ready_schedule_lock(); 255 256 // Simple counting lock, acquired, acquired by incrementing the counter 257 // to an odd number 258 for() { 259 uint64_t l = this.lock; 260 if( 261 (0 == (l % 2)) 262 && __atomic_compare_exchange_n(&this.lock, &l, l + 1, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) 263 ) return; 264 Pause(); 265 } 266 267 /* paranoid */ verify( ! __preemption_enabled() ); 268 } 269 270 static inline void unlock(__cluster_idles & this) { 271 /* paranoid */ verify( ! __preemption_enabled() ); 272 273 /* paranoid */ verify( 1 == (this.lock % 2) ); 274 // Simple couting lock, release by incrementing to an even number 275 __atomic_fetch_add( &this.lock, 1, __ATOMIC_SEQ_CST ); 276 277 // Release the global lock, which we acquired when locking 278 ready_schedule_unlock(); 279 280 /* paranoid */ verify( ! __preemption_enabled() ); 281 } 282 265 283 //======================================================================= 266 284 // Ready-Queue API
Note: See TracChangeset
for help on using the changeset viewer.