Changeset 64a7146
- Timestamp:
- Jun 19, 2020, 3:49:43 PM (4 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- 68f36f4
- Parents:
- 04b5cef
- Location:
- libcfa/src
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/kernel.cfa
r04b5cef r64a7146 118 118 // Kernel Scheduling logic 119 119 static $thread * __next_thread(cluster * this); 120 static bool __has_next_thread(cluster * this); 120 121 static void __run_thread(processor * this, $thread * dst); 121 static $thread * __halt(processor * this);122 static bool __wake_one(cluster * cltr);123 122 static bool __wake_proc(processor *); 123 static bool __wake_one(struct __processor_id_t * id, cluster * cltr); 124 static void __halt(processor * this); 124 125 125 126 //----------------------------------------------------------------------------- … … 276 277 #endif 277 278 278 procs{ __get };279 idles{ __get };280 279 threads{ __get }; 281 280 … … 315 314 if(this != mainProcessor) { 316 315 this->id = doregister((__processor_id_t*)this); 317 ready_queue_grow( this->cltr ); 316 // Lock the RWlock so no-one pushes/pops while we are changing the queue 317 uint_fast32_t last_size = ready_mutate_lock(); 318 319 // Adjust the ready queue size 320 ready_queue_grow( this->cltr ); 321 322 // Unlock the RWlock 323 ready_mutate_unlock( last_size ); 318 324 } 319 325 … … 330 336 // Try to get the next thread 331 337 readyThread = __next_thread( this->cltr ); 332 333 // If no ready thread334 if( readyThread == 0p ) {335 // Block until a thread is ready336 readyThread = __halt(this);337 }338 338 339 339 // Check if we actually found a thread … … 349 349 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 350 350 } 351 else { 352 // Block until a thread is ready 353 __halt(this); 354 } 351 355 } 352 356 … … 360 364 // unregister the processor unless it's the main thread which is handled in the boot sequence 361 365 if(this != mainProcessor) { 362 ready_queue_shrink( this->cltr ); 366 // Lock the RWlock so no-one pushes/pops while we are changing the queue 367 uint_fast32_t last_size = ready_mutate_lock(); 368 369 // Adjust the ready queue size 370 ready_queue_shrink( this->cltr ); 371 372 // Make sure we aren't on the idle queue 373 unsafe_remove( this->cltr->idles, this ); 374 Link(processor) * link = &this->cltr->idles.stack; 375 for() { 376 processor * next = link->top; 377 if( next == this ) { 378 link->top = getNext(this)->top; 379 break; 380 } 381 if( next == 0p ) break; 382 link = getNext(next); 383 } 384 385 // Unlock the RWlock 386 ready_mutate_unlock( last_size ); 387 388 // Finally we don't need the read_lock any more 363 389 unregister((__processor_id_t*)this); 364 390 } … … 646 672 push( thrd->curr_cluster, thrd ); 647 673 648 __wake_one( thrd->curr_cluster);674 __wake_one(id, thrd->curr_cluster); 649 675 ready_schedule_unlock( id ); 650 676 … … 662 688 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 663 689 return head; 690 } 691 692 // KERNEL ONLY 693 static bool __has_next_thread(cluster * this) with( *this ) { 694 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 695 696 ready_schedule_lock ( (__processor_id_t*)kernelTLS.this_processor ); 697 bool not_empty = query( this ); 698 ready_schedule_unlock( (__processor_id_t*)kernelTLS.this_processor ); 699 700 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 701 return not_empty; 664 702 } 665 703 … … 900 938 // Kernel Idle Sleep 901 939 //============================================================================================= 902 static $thread * __halt(processor * this) with( *this ) {903 // if( do_terminate ) return 0p;904 905 // // First, lock the cluster idle906 // lock( cltr->idle_lock __cfaabi_dbg_ctx2 );907 908 // // Check if we can find a thread909 // if( $thread * found = __next_thread( cltr ) ) {910 // unlock( cltr->idle_lock );911 // return found;912 // }913 914 // // Move this processor from the active list to the idle list915 // move_to_front(cltr->procs, cltr->idles, *this);916 917 // // Unlock the idle lock so we don't go to sleep with a lock918 // unlock (cltr->idle_lock);919 920 // // We are ready to sleep921 // __cfadbg_print_safe(runtime_core, "Kernel : Processor %p ready to sleep\n", this);922 // wait( idle );923 924 // // We have woken up925 // __cfadbg_print_safe(runtime_core, "Kernel : Processor %p woke up and ready to run\n", this);926 927 // // Get ourself off the idle list928 // with( *cltr ) {929 // lock (idle_lock __cfaabi_dbg_ctx2);930 // move_to_front(idles, procs, *this);931 // unlock(idle_lock);932 // }933 934 // Don't check the ready queue again, we may not be in a position to run a thread935 return 0p;936 }937 938 940 // Wake a thread from the front if there are any 939 static bool __wake_one(cluster * this) { 940 // // First, lock the cluster idle 941 // lock( this->idle_lock __cfaabi_dbg_ctx2 ); 942 943 // // Check if there is someone to wake up 944 // if( !this->idles.head ) { 945 // // Nope unlock and return false 946 // unlock( this->idle_lock ); 947 // return false; 948 // } 949 950 // // Wake them up 951 // __cfadbg_print_safe(runtime_core, "Kernel : waking Processor %p\n", this->idles.head); 952 // /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 953 // post( this->idles.head->idle ); 954 955 // // Unlock and return true 956 // unlock( this->idle_lock ); 957 // return true; 958 959 return false; 941 static bool __wake_one(struct __processor_id_t * id, cluster * this) { 942 /* paranoid */ verify( ready_schedule_islocked( id ) ); 943 944 // Check if there is a sleeping processor 945 processor * p = pop(this->idles); 946 947 // If no one is sleeping, we are done 948 if( 0p == p ) return false; 949 950 // We found a processor, wake it up 951 post( p->idle ); 952 953 return true; 960 954 } 961 955 962 956 // Unconditionnaly wake a thread 963 957 static bool __wake_proc(processor * this) { 964 // __cfadbg_print_safe(runtime_core, "Kernel : waking Processor %p\n", this); 965 966 // disable_interrupts(); 967 // /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 968 // bool ret = post( this->idle ); 969 // enable_interrupts( __cfaabi_dbg_ctx ); 970 971 // return ret; 972 973 return false; 958 __cfadbg_print_safe(runtime_core, "Kernel : waking Processor %p\n", this); 959 960 disable_interrupts(); 961 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 962 bool ret = post( this->idle ); 963 enable_interrupts( __cfaabi_dbg_ctx ); 964 965 return ret; 966 } 967 968 static void __halt(processor * this) with( *this ) { 969 if( do_terminate ) return; 970 971 // Push self to queue 972 push(cltr->idles, *this); 973 974 // Makre sure we don't miss a thread 975 if( __has_next_thread(cltr) ) { 976 // A thread was posted, make sure a processor is woken up 977 struct __processor_id_t *id = (struct __processor_id_t *) this; 978 ready_schedule_lock ( id ); 979 __wake_one( id, cltr ); 980 ready_schedule_unlock( id ); 981 } 982 983 wait( idle ); 974 984 } 975 985 … … 1131 1141 1132 1142 void doregister( cluster * cltr, processor * proc ) { 1133 lock (cltr->idle_lock __cfaabi_dbg_ctx2);1134 cltr->nprocessors += 1;1135 push_front(cltr->procs, *proc);1136 unlock (cltr->idle_lock);1143 // lock (cltr->idle_lock __cfaabi_dbg_ctx2); 1144 // cltr->nprocessors += 1; 1145 // push_front(cltr->procs, *proc); 1146 // unlock (cltr->idle_lock); 1137 1147 } 1138 1148 1139 1149 void unregister( cluster * cltr, processor * proc ) { 1140 lock (cltr->idle_lock __cfaabi_dbg_ctx2);1141 remove(cltr->procs, *proc );1142 cltr->nprocessors -= 1;1143 unlock(cltr->idle_lock);1150 // lock (cltr->idle_lock __cfaabi_dbg_ctx2); 1151 // remove(cltr->procs, *proc ); 1152 // cltr->nprocessors -= 1; 1153 // unlock(cltr->idle_lock); 1144 1154 } 1145 1155 -
libcfa/src/concurrency/kernel.hfa
r04b5cef r64a7146 23 23 #include "coroutine.hfa" 24 24 25 #include "containers/stackLockFree.hfa" 26 25 27 extern "C" { 26 28 #include <pthread.h> … … 101 103 102 104 // Link lists fields 103 struct __dbg_node_cltr { 104 processor * next; 105 processor * prev; 106 } node; 105 Link(processor) link; 107 106 108 107 #ifdef __CFA_DEBUG__ … … 119 118 static inline void ?{}(processor & this, const char name[]) { this{name, *mainCluster }; } 120 119 121 static inline [processor *&, processor *& ] __get( processor & this ) __attribute__((const)) { return this.node.[next, prev]; }120 static inline Link(processor) * getNext( processor * this ) { return &this->link; } 122 121 123 122 //----------------------------------------------------------------------------- … … 185 184 Duration preemption_rate; 186 185 187 // List of processors 188 __spinlock_t idle_lock; 189 __dllist_t(struct processor) procs; 190 __dllist_t(struct processor) idles; 186 // List of idle processors 187 StackLF(processor) idles; 191 188 unsigned int nprocessors; 192 189 -
libcfa/src/concurrency/kernel_private.hfa
r04b5cef r64a7146 112 112 // while not generic it only relies on a opaque pointer 113 113 struct __attribute__((aligned(128))) __scheduler_lock_id_t { 114 // Spin lock used as the underlying lock 115 volatile bool lock; 116 117 // Handle pointing to the proc owning this cell 118 // Used for allocating cells and debugging 114 119 __processor_id_t * volatile handle; 115 volatile bool lock; 116 }; 120 121 #ifdef __CFA_WITH_VERIFY__ 122 // Debug, check if this is owned for reading 123 bool owned; 124 #endif 125 }; 126 127 static_assert( sizeof(struct __scheduler_lock_id_t) <= __alignof(struct __scheduler_lock_id_t)); 117 128 118 129 // Lock-Free registering/unregistering of threads … … 198 209 __atomic_acquire( &data[iproc].lock ); 199 210 /*paranoid*/ verify(data[iproc].lock); 211 212 #ifdef __CFA_WITH_VERIFY__ 213 // Debug, check if this is owned for reading 214 data[iproc].owned = true; 215 #endif 200 216 } 201 217 … … 205 221 /*paranoid*/ verify(iproc < ready); 206 222 /*paranoid*/ verify(data[iproc].lock); 223 /*paranoid*/ verify(data[iproc].owned); 224 #ifdef __CFA_WITH_VERIFY__ 225 // Debug, check if this is owned for reading 226 data[iproc].owned = false; 227 #endif 207 228 __atomic_unlock(&data[iproc].lock); 208 229 } 230 231 #ifdef __CFA_WITH_VERIFY__ 232 static inline bool ready_schedule_islocked( struct __processor_id_t * proc) { 233 return __scheduler_lock->data[proc->id].owned; 234 } 235 236 static inline bool ready_mutate_islocked() { 237 return __scheduler_lock->lock; 238 } 239 #endif 209 240 210 241 //----------------------------------------------------------------------- … … 217 248 //======================================================================= 218 249 // Ready-Queue API 250 //----------------------------------------------------------------------- 251 // pop thread from the ready queue of a cluster 252 // returns 0p if empty 253 __attribute__((hot)) bool query(struct cluster * cltr); 254 219 255 //----------------------------------------------------------------------- 220 256 // push thread onto a ready queue for a cluster -
libcfa/src/concurrency/ready_queue.cfa
r04b5cef r64a7146 81 81 this.handle = proc; 82 82 this.lock = false; 83 #ifdef __CFA_WITH_VERIFY__ 84 this.owned = false; 85 #endif 83 86 } 84 87 … … 97 100 && __atomic_compare_exchange_n( &data[i].handle, &null, proc, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) { 98 101 /*paranoid*/ verify(i < ready); 99 /*paranoid*/ verify( __alignof__(data[i]) == cache_line_size);102 /*paranoid*/ verify(0 == (__alignof__(data[i]) % cache_line_size)); 100 103 /*paranoid*/ verify((((uintptr_t)&data[i]) % cache_line_size) == 0); 101 104 return i; … … 562 565 563 566 //----------------------------------------------------------------------- 567 __attribute__((hot)) bool query(struct cluster * cltr) { 568 return query(cltr->ready_queue.snzi); 569 } 570 571 //----------------------------------------------------------------------- 564 572 __attribute__((hot)) bool push(struct cluster * cltr, struct $thread * thrd) with (cltr->ready_queue) { 565 573 __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr); … … 761 769 // Grow the ready queue 762 770 void ready_queue_grow (struct cluster * cltr) { 763 // Lock the RWlock so no-one pushes/pops while we are changing the queue 764 uint_fast32_t last_size = ready_mutate_lock(); 765 771 /* paranoid */ verify( ready_mutate_islocked() ); 766 772 __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue\n"); 767 773 … … 808 814 __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue done\n"); 809 815 810 // Unlock the RWlock 811 ready_mutate_unlock( last_size ); 816 /* paranoid */ verify( ready_mutate_islocked() ); 812 817 } 813 818 814 819 // Shrink the ready queue 815 820 void ready_queue_shrink(struct cluster * cltr) { 816 // Lock the RWlock so no-one pushes/pops while we are changing the queue 817 uint_fast32_t last_size = ready_mutate_lock(); 818 821 /* paranoid */ verify( ready_mutate_islocked() ); 819 822 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue\n"); 820 823 … … 889 892 890 893 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue done\n"); 891 892 // Unlock the RWlock 893 ready_mutate_unlock( last_size ); 894 } 894 /* paranoid */ verify( ready_mutate_islocked() ); 895 } -
libcfa/src/containers/stackLockFree.hfa
r04b5cef r64a7146 32 32 33 33 forall( otype T | { Link(T) * getNext( T * ); } ) { 34 34 struct StackLF { 35 35 Link(T) stack; 36 36 }; // StackLF … … 56 56 } // for 57 57 } // pop 58 59 bool unsafe_remove( StackLF(T) & this, T * node ) with(this) { 60 Link(T) * link = &stack; 61 for() { 62 T * next = link->top; 63 if( next == node ) { 64 link->top = getNext( node )->top; 65 return true; 66 } 67 if( next == 0p ) return false; 68 link = getNext(next); 69 } 70 } 58 71 } // distribution 59 72 } // distribution
Note: See TracChangeset
for help on using the changeset viewer.