Changeset b388ee81
- Timestamp:
- Jun 11, 2020, 6:47:27 PM (4 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- 9b1dcc2
- Parents:
- 61d7bec
- Location:
- libcfa/src/concurrency
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/kernel.cfa
r61d7bec rb388ee81 125 125 //----------------------------------------------------------------------------- 126 126 // Kernel storage 127 KERNEL_STORAGE(cluster, mainCluster); 128 KERNEL_STORAGE(processor, mainProcessor); 129 KERNEL_STORAGE($thread, mainThread); 130 KERNEL_STORAGE(__stack_t, mainThreadCtx); 131 132 cluster * mainCluster; 133 processor * mainProcessor; 134 $thread * mainThread; 127 KERNEL_STORAGE(cluster, mainCluster); 128 KERNEL_STORAGE(processor, mainProcessor); 129 KERNEL_STORAGE($thread, mainThread); 130 KERNEL_STORAGE(__stack_t, mainThreadCtx); 131 KERNEL_STORAGE(__scheduler_RWLock_t, __scheduler_lock); 132 133 cluster * mainCluster; 134 processor * mainProcessor; 135 $thread * mainThread; 136 __scheduler_RWLock_t * __scheduler_lock; 135 137 136 138 extern "C" { … … 262 264 this.preemption_rate = preemption_rate; 263 265 ready_queue{}; 264 ready_lock{};265 266 266 267 #if !defined(__CFA_NO_STATISTICS__) … … 299 300 // register the processor unless it's the main thread which is handled in the boot sequence 300 301 if(this != mainProcessor) { 301 this->id = doregister 2(this->cltr,this);302 this->id = doregister(this); 302 303 ready_queue_grow( this->cltr ); 303 304 } … … 345 346 if(this != mainProcessor) { 346 347 ready_queue_shrink( this->cltr ); 347 unregister 2(this->cltr,this);348 unregister(this); 348 349 } 349 350 else { … … 622 623 if (thrd->preempted == __NO_PREEMPTION) thrd->state = Ready; 623 624 624 ready_schedule_lock( thrd->curr_cluster, kernelTLS.this_processor);625 ready_schedule_lock( kernelTLS.this_processor ); 625 626 push( thrd->curr_cluster, thrd ); 626 627 627 628 __wake_one(thrd->curr_cluster); 628 ready_schedule_unlock( thrd->curr_cluster, kernelTLS.this_processor);629 ready_schedule_unlock( kernelTLS.this_processor ); 629 630 630 631 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); … … 635 636 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 636 637 637 ready_schedule_lock( this, kernelTLS.this_processor);638 ready_schedule_lock( kernelTLS.this_processor ); 638 639 $thread * head = pop( this ); 639 ready_schedule_unlock( this, kernelTLS.this_processor);640 ready_schedule_unlock( kernelTLS.this_processor ); 640 641 641 642 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); … … 749 750 __cfa_dbg_global_clusters.lock{}; 750 751 752 // Initialize the global scheduler lock 753 __scheduler_lock = (__scheduler_RWLock_t*)&storage___scheduler_lock; 754 (*__scheduler_lock){}; 755 751 756 // Initialize the main cluster 752 757 mainCluster = (cluster *)&storage_mainCluster; … … 793 798 (*mainProcessor){}; 794 799 795 mainProcessor->id = doregister 2(mainCluster,mainProcessor);800 mainProcessor->id = doregister(mainProcessor); 796 801 797 802 //initialize the global state variables … … 848 853 kernel_stop_preemption(); 849 854 850 unregister 2(mainCluster,mainProcessor);855 unregister(mainProcessor); 851 856 852 857 // Destroy the main processor and its context in reverse order of construction … … 866 871 867 872 ^(*mainCluster){}; 873 874 ^(*__scheduler_lock){}; 868 875 869 876 ^(__cfa_dbg_global_clusters.list){}; -
libcfa/src/concurrency/kernel.hfa
r61d7bec rb388ee81 125 125 //----------------------------------------------------------------------------- 126 126 // Cluster Tools 127 128 // Cells use by the reader writer lock129 // while not generic it only relies on a opaque pointer130 struct __processor_id;131 132 // Reader-Writer lock protecting the ready-queue133 // while this lock is mostly generic some aspects134 // have been hard-coded to for the ready-queue for135 // simplicity and performance136 struct __clusterRWLock_t {137 // total cachelines allocated138 unsigned int max;139 140 // cachelines currently in use141 volatile unsigned int alloc;142 143 // cachelines ready to itereate over144 // (!= to alloc when thread is in second half of doregister)145 volatile unsigned int ready;146 147 // writer lock148 volatile bool lock;149 150 // data pointer151 __processor_id * data;152 };153 154 void ?{}(__clusterRWLock_t & this);155 void ^?{}(__clusterRWLock_t & this);156 127 157 128 // Intrusives lanes which are used by the relaxed ready queue … … 236 207 // Cluster 237 208 struct cluster { 238 // Ready queue locks239 __clusterRWLock_t ready_lock;240 241 209 // Ready queue for threads 242 210 __ready_queue_t ready_queue; -
libcfa/src/concurrency/kernel_private.hfa
r61d7bec rb388ee81 106 106 // Cluster lock API 107 107 //======================================================================= 108 // Cells use by the reader writer lock 109 // while not generic it only relies on a opaque pointer 108 110 struct __attribute__((aligned(64))) __processor_id { 109 111 processor * volatile handle; … … 113 115 // Lock-Free registering/unregistering of threads 114 116 // Register a processor to a given cluster and get its unique id in return 115 unsigned doregister 2( struct cluster * cltr,struct processor * proc );117 unsigned doregister( struct processor * proc ); 116 118 117 119 // Unregister a processor from a given cluster using its id, getting back the original pointer 118 void unregister 2( struct cluster * cltr,struct processor * proc );120 void unregister( struct processor * proc ); 119 121 120 122 //======================================================================= … … 146 148 147 149 //----------------------------------------------------------------------- 150 // Reader-Writer lock protecting the ready-queues 151 // while this lock is mostly generic some aspects 152 // have been hard-coded to for the ready-queue for 153 // simplicity and performance 154 struct __scheduler_RWLock_t { 155 // total cachelines allocated 156 unsigned int max; 157 158 // cachelines currently in use 159 volatile unsigned int alloc; 160 161 // cachelines ready to itereate over 162 // (!= to alloc when thread is in second half of doregister) 163 volatile unsigned int ready; 164 165 // writer lock 166 volatile bool lock; 167 168 // data pointer 169 __processor_id * data; 170 }; 171 172 void ?{}(__scheduler_RWLock_t & this); 173 void ^?{}(__scheduler_RWLock_t & this); 174 175 extern __scheduler_RWLock_t * __scheduler_lock; 176 177 //----------------------------------------------------------------------- 148 178 // Reader side : acquire when using the ready queue to schedule but not 149 179 // creating/destroying queues 150 static inline void ready_schedule_lock( struct cluster * cltr, struct processor * proc) with(cltr->ready_lock) {180 static inline void ready_schedule_lock( struct processor * proc) with(*__scheduler_lock) { 151 181 unsigned iproc = proc->id; 152 182 /*paranoid*/ verify(data[iproc].handle == proc); … … 167 197 } 168 198 169 static inline void ready_schedule_unlock( struct cluster * cltr, struct processor * proc) with(cltr->ready_lock) {199 static inline void ready_schedule_unlock( struct processor * proc) with(*__scheduler_lock) { 170 200 unsigned iproc = proc->id; 171 201 /*paranoid*/ verify(data[iproc].handle == proc); … … 178 208 // Writer side : acquire when changing the ready queue, e.g. adding more 179 209 // queues or removing them. 180 uint_fast32_t ready_mutate_lock( struct cluster & cltr);181 182 void ready_mutate_unlock( struct cluster & cltr,uint_fast32_t /* value returned by lock */ );210 uint_fast32_t ready_mutate_lock( void ); 211 212 void ready_mutate_unlock( uint_fast32_t /* value returned by lock */ ); 183 213 184 214 //======================================================================= -
libcfa/src/concurrency/ready_queue.cfa
r61d7bec rb388ee81 29 29 // fall back to a magic number 30 30 #ifndef __CFA_MAX_PROCESSORS__ 31 #define __CFA_MAX_PROCESSORS__ 1 2831 #define __CFA_MAX_PROCESSORS__ 1024 32 32 #endif 33 33 … … 57 57 // Cluster wide reader-writer lock 58 58 //======================================================================= 59 void ?{}(__ clusterRWLock_t & this) {59 void ?{}(__scheduler_RWLock_t & this) { 60 60 this.max = __max_processors(); 61 61 this.alloc = 0; … … 70 70 71 71 } 72 void ^?{}(__ clusterRWLock_t & this) {72 void ^?{}(__scheduler_RWLock_t & this) { 73 73 free(this.data); 74 74 } … … 81 81 //======================================================================= 82 82 // Lock-Free registering/unregistering of threads 83 unsigned doregister 2( struct cluster * cltr, struct processor * proc ) with(cltr->ready_lock) {84 __cfadbg_print_safe(ready_queue, "Kernel : Registering proc %p with cluster %p\n", proc, cltr);83 unsigned doregister( struct processor * proc ) with(*__scheduler_lock) { 84 __cfadbg_print_safe(ready_queue, "Kernel : Registering proc %p for RW-Lock\n", proc); 85 85 86 86 // Step - 1 : check if there is already space in the data … … 99 99 } 100 100 101 if(max <= alloc) abort("Trying to create more than %ud processors", cltr->ready_lock.max);101 if(max <= alloc) abort("Trying to create more than %ud processors", __scheduler_lock->max); 102 102 103 103 // Step - 2 : F&A to get a new spot in the array. 104 104 uint_fast32_t n = __atomic_fetch_add(&alloc, 1, __ATOMIC_SEQ_CST); 105 if(max <= n) abort("Trying to create more than %ud processors", cltr->ready_lock.max);105 if(max <= n) abort("Trying to create more than %ud processors", __scheduler_lock->max); 106 106 107 107 // Step - 3 : Mark space as used and then publish it. … … 125 125 } 126 126 127 void unregister 2( struct cluster * cltr, struct processor * proc ) with(cltr->ready_lock) {127 void unregister( struct processor * proc ) with(*__scheduler_lock) { 128 128 unsigned id = proc->id; 129 129 /*paranoid*/ verify(id < ready); … … 137 137 // Writer side : acquire when changing the ready queue, e.g. adding more 138 138 // queues or removing them. 139 uint_fast32_t ready_mutate_lock( struct cluster & cltr ) with(cltr.ready_lock) {139 uint_fast32_t ready_mutate_lock( void ) with(*__scheduler_lock) { 140 140 // Step 1 : lock global lock 141 141 // It is needed to avoid processors that register mid Critical-Section … … 155 155 } 156 156 157 void ready_mutate_unlock( struct cluster & cltr, uint_fast32_t last_s ) with(cltr.ready_lock) {157 void ready_mutate_unlock( uint_fast32_t last_s ) with(*__scheduler_lock) { 158 158 // Step 1 : release local locks 159 159 // This must be done while the global lock is held to avoid … … 811 811 void ready_queue_grow (struct cluster * cltr) { 812 812 // Lock the RWlock so no-one pushes/pops while we are changing the queue 813 uint_fast32_t last_size = ready_mutate_lock( *cltr);813 uint_fast32_t last_size = ready_mutate_lock(); 814 814 815 815 __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue\n"); … … 858 858 859 859 // Unlock the RWlock 860 ready_mutate_unlock( *cltr,last_size );860 ready_mutate_unlock( last_size ); 861 861 } 862 862 … … 864 864 void ready_queue_shrink(struct cluster * cltr) { 865 865 // Lock the RWlock so no-one pushes/pops while we are changing the queue 866 uint_fast32_t last_size = ready_mutate_lock( *cltr);866 uint_fast32_t last_size = ready_mutate_lock(); 867 867 868 868 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue\n"); … … 956 956 957 957 // Unlock the RWlock 958 ready_mutate_unlock( *cltr,last_size );958 ready_mutate_unlock( last_size ); 959 959 } 960 960
Note: See TracChangeset
for help on using the changeset viewer.