Changeset a33c113
- Timestamp:
- Apr 14, 2021, 3:41:06 PM (4 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- a7504db
- Parents:
- 634a5c2
- Location:
- libcfa/src/concurrency
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/kernel/startup.cfa
r634a5c2 ra33c113 493 493 unlock( this.cltr->idles ); 494 494 495 id = doregister((__processor_id_t*)&this); 496 497 // Lock the RWlock so no-one pushes/pops while we are changing the queue 498 uint_fast32_t last_size = ready_mutate_lock(); 495 // Register and Lock the RWlock so no-one pushes/pops while we are changing the queue 496 uint_fast32_t last_size = ready_mutate_register((__processor_id_t*)&this); 499 497 500 498 // Adjust the ready queue size … … 519 517 ready_queue_shrink( this.cltr, target ); 520 518 521 // Unlock the RWlock 522 ready_mutate_unlock( last_size ); 523 524 // Finally we don't need the read_lock any more 525 unregister((__processor_id_t*)&this); 519 // Unlock the RWlock and unregister: we don't need the read_lock any more 520 ready_mutate_unregister((__processor_id_t*)&this, last_size ); 526 521 527 522 close(this.idle); -
libcfa/src/concurrency/kernel_private.hfa
r634a5c2 ra33c113 83 83 // Cluster lock API 84 84 //======================================================================= 85 // Cells use by the reader writer lock86 // while not generic it only relies on a opaque pointer87 struct __attribute__((aligned(128))) __scheduler_lock_id_t {88 // Spin lock used as the underlying lock89 volatile bool lock;90 91 // Handle pointing to the proc owning this cell92 // Used for allocating cells and debugging93 __processor_id_t * volatile handle;94 95 #ifdef __CFA_WITH_VERIFY__96 // Debug, check if this is owned for reading97 bool owned;98 #endif99 };100 101 static_assert( sizeof(struct __scheduler_lock_id_t) <= __alignof(struct __scheduler_lock_id_t));102 103 85 // Lock-Free registering/unregistering of threads 104 86 // Register a processor to a given cluster and get its unique id in return 105 unsigned doregister( struct __processor_id_t * proc);87 void register_proc_id( struct __processor_id_t * ); 106 88 107 89 // Unregister a processor from a given cluster using its id, getting back the original pointer 108 void unregister( struct __processor_id_t * proc );90 void unregister_proc_id( struct __processor_id_t * proc ); 109 91 110 92 //----------------------------------------------------------------------- … … 153 135 } 154 136 137 // Cells use by the reader writer lock 138 // while not generic it only relies on a opaque pointer 139 struct __attribute__((aligned(128))) __scheduler_lock_id_t { 140 // Spin lock used as the underlying lock 141 volatile bool lock; 142 143 // Handle pointing to the proc owning this cell 144 // Used for allocating cells and debugging 145 __processor_id_t * volatile handle; 146 147 #ifdef __CFA_WITH_VERIFY__ 148 // Debug, check if this is owned for reading 149 bool owned; 150 #endif 151 }; 152 153 static_assert( sizeof(struct __scheduler_lock_id_t) <= __alignof(struct __scheduler_lock_id_t)); 154 155 155 //----------------------------------------------------------------------- 156 156 // Reader-Writer lock protecting the ready-queues … … 247 247 void ready_mutate_unlock( uint_fast32_t /* value returned by lock */ ); 248 248 249 //----------------------------------------------------------------------- 250 // Lock-Free registering/unregistering of threads 251 // Register a processor to a given cluster and get its unique id in return 252 // For convenience, also acquires the lock 253 static inline uint_fast32_t ready_mutate_register( struct __processor_id_t * proc ) { 254 register_proc_id( proc ); 255 return ready_mutate_lock(); 256 } 257 258 // Unregister a processor from a given cluster using its id, getting back the original pointer 259 // assumes the lock is acquired 260 static inline void ready_mutate_unregister( struct __processor_id_t * proc, uint_fast32_t last_s ) { 261 ready_mutate_unlock( last_s ); 262 unregister_proc_id( proc ); 263 } 264 249 265 //======================================================================= 250 266 // Ready-Queue API -
libcfa/src/concurrency/preemption.cfa
r634a5c2 ra33c113 712 712 static void * alarm_loop( __attribute__((unused)) void * args ) { 713 713 __processor_id_t id; 714 id.id = doregister(&id);714 register_proc_id(&id); 715 715 __cfaabi_tls.this_proc_id = &id; 716 716 … … 773 773 EXIT: 774 774 __cfaabi_dbg_print_safe( "Kernel : Preemption thread stopping\n" ); 775 unregister(&id);775 register_proc_id(&id); 776 776 777 777 return 0p; -
libcfa/src/concurrency/ready_queue.cfa
r634a5c2 ra33c113 94 94 //======================================================================= 95 95 // Lock-Free registering/unregistering of threads 96 unsigned doregister( struct __processor_id_t * proc ) with(*__scheduler_lock) {96 void register_proc_id( struct __processor_id_t * proc ) with(*__scheduler_lock) { 97 97 __cfadbg_print_safe(ready_queue, "Kernel : Registering proc %p for RW-Lock\n", proc); 98 98 … … 108 108 /*paranoid*/ verify(0 == (__alignof__(data[i]) % cache_line_size)); 109 109 /*paranoid*/ verify((((uintptr_t)&data[i]) % cache_line_size) == 0); 110 returni;110 proc->id = i; 111 111 } 112 112 } … … 135 135 /*paranoid*/ verify(__alignof__(data[n]) == (2 * cache_line_size)); 136 136 /*paranoid*/ verify((((uintptr_t)&data[n]) % cache_line_size) == 0); 137 returnn;138 } 139 140 void unregister ( struct __processor_id_t * proc ) with(*__scheduler_lock) {137 proc->id = n; 138 } 139 140 void unregister_proc_id( struct __processor_id_t * proc ) with(*__scheduler_lock) { 141 141 unsigned id = proc->id; 142 142 /*paranoid*/ verify(id < ready);
Note: See TracChangeset
for help on using the changeset viewer.