Changeset 504a7dc
- Timestamp:
- May 12, 2020, 1:32:45 PM (5 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- 4fa44e7
- Parents:
- 6a490b2
- Location:
- libcfa/src
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/bits/debug.hfa
r6a490b2 r504a7dc 52 52 || defined(__CFA_DEBUG_PRINT_IO__) || defined(__CFA_DEBUG_PRINT_IO_CORE__) \ 53 53 || defined(__CFA_DEBUG_PRINT_MONITOR__) || defined(__CFA_DEBUG_PRINT_PREEMPTION__) \ 54 || defined(__CFA_DEBUG_PRINT_RUNTIME_CORE__) || defined(__CFA_DEBUG_PRINT_EXCEPTION__) 54 || defined(__CFA_DEBUG_PRINT_RUNTIME_CORE__) || defined(__CFA_DEBUG_PRINT_EXCEPTION__) \ 55 || defined(__CFA_DEBUG_PRINT_READY_QUEUE__) 55 56 #include <stdio.h> 56 57 #include <unistd.h> -
libcfa/src/concurrency/kernel.cfa
r6a490b2 r504a7dc 120 120 static void __run_thread(processor * this, $thread * dst); 121 121 static $thread * __halt(processor * this); 122 static bool __wake_one(cluster * cltr , bool was_empty);122 static bool __wake_one(cluster * cltr); 123 123 static bool __wake_proc(processor *); 124 124 … … 299 299 // register the processor unless it's the main thread which is handled in the boot sequence 300 300 if(this != mainProcessor) { 301 this->id = doregister (this->cltr, this);301 this->id = doregister2(this->cltr, this); 302 302 ready_queue_grow( this->cltr ); 303 303 } 304 304 305 doregister(this->cltr, this); 305 306 306 307 { … … 325 326 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 326 327 /* paranoid */ verifyf( readyThread->state == Ready || readyThread->preempted != __NO_PREEMPTION, "state : %d, preempted %d\n", readyThread->state, readyThread->preempted); 327 /* paranoid */ verifyf( readyThread-> next == 0p, "Expected null got %p", readyThread->next );328 /* paranoid */ verifyf( readyThread->link.next == 0p, "Expected null got %p", readyThread->link.next ); 328 329 329 330 // We found a thread run it … … 337 338 } 338 339 340 unregister(this->cltr, this); 341 339 342 V( this->terminated ); 340 343 … … 342 345 if(this != mainProcessor) { 343 346 ready_queue_shrink( this->cltr ); 344 unregister (this->cltr, this);347 unregister2(this->cltr, this); 345 348 } 346 349 else { … … 610 613 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 611 614 /* paranoid */ #if defined( __CFA_WITH_VERIFY__ ) 612 /* paranoid */ if( thrd->state == Blocked || thrd->state == Start ) assertf( thrd->preempted == __NO_PREEMPTION,613 614 /* paranoid */ if( thrd->preempted != __NO_PREEMPTION ) assertf(thrd->state == Active || thrd->state == Rerun,615 615 /* paranoid */ if( thrd->state == Blocked || thrd->state == Start ) assertf( thrd->preempted == __NO_PREEMPTION, 616 "Error inactive thread marked as preempted, state %d, preemption %d\n", thrd->state, thrd->preempted ); 617 /* paranoid */ if( thrd->preempted != __NO_PREEMPTION ) assertf(thrd->state == Active || thrd->state == Rerun, 618 "Error preempted thread marked as not currently running, state %d, preemption %d\n", thrd->state, thrd->preempted ); 616 619 /* paranoid */ #endif 617 620 /* paranoid */ verifyf( thrd->link.next == 0p, "Expected null got %p", thrd->link.next ); … … 620 623 621 624 ready_schedule_lock(thrd->curr_cluster, kernelTLS.this_processor); 622 bool was_empty = push( thrd->curr_cluster, thrd ); 625 push( thrd->curr_cluster, thrd ); 626 627 __wake_one(thrd->curr_cluster); 623 628 ready_schedule_unlock(thrd->curr_cluster, kernelTLS.this_processor); 624 625 __wake_one(thrd->curr_cluster, was_empty);626 629 627 630 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); … … 718 721 // If that is the case, abandon the preemption. 719 722 bool preempted = false; 720 if(thrd-> next == 0p) {723 if(thrd->link.next == 0p) { 721 724 preempted = true; 722 725 thrd->preempted = reason; … … 789 792 (*mainProcessor){}; 790 793 791 mainProcessor->id = doregister (mainCluster, mainProcessor);794 mainProcessor->id = doregister2(mainCluster, mainProcessor); 792 795 793 796 //initialize the global state variables … … 844 847 kernel_stop_preemption(); 845 848 846 unregister (mainCluster, mainProcessor);849 unregister2(mainCluster, mainProcessor); 847 850 848 851 // Destroy the main processor and its context in reverse order of construction … … 909 912 910 913 // Wake a thread from the front if there are any 911 static bool __wake_one(cluster * this, __attribute__((unused)) bool force) { 912 // if we don't want to force check if we know it's false 913 // if( !this->idles.head && !force ) return false; 914 914 static bool __wake_one(cluster * this) { 915 915 // First, lock the cluster idle 916 916 lock( this->idle_lock __cfaabi_dbg_ctx2 ); … … 1099 1099 cltr->nthreads -= 1; 1100 1100 unlock(cltr->thread_list_lock); 1101 } 1102 1103 void doregister( cluster * cltr, processor * proc ) { 1104 lock (cltr->idle_lock __cfaabi_dbg_ctx2); 1105 cltr->nprocessors += 1; 1106 push_front(cltr->procs, *proc); 1107 unlock (cltr->idle_lock); 1108 } 1109 1110 void unregister( cluster * cltr, processor * proc ) { 1111 lock (cltr->idle_lock __cfaabi_dbg_ctx2); 1112 remove(cltr->procs, *proc ); 1113 cltr->nprocessors -= 1; 1114 unlock(cltr->idle_lock); 1101 1115 } 1102 1116 -
libcfa/src/concurrency/kernel.hfa
r6a490b2 r504a7dc 93 93 94 94 // Link lists fields 95 struct __dbg_node_ proc{96 structprocessor * next;97 structprocessor * prev;95 struct __dbg_node_cltr { 96 processor * next; 97 processor * prev; 98 98 } node; 99 99 … … 162 162 // Link lists fields 163 163 // instrusive link field for threads 164 // must be exactly as in thread_desc164 // must be exactly as in $thread 165 165 __thread_desc_link link; 166 166 } before, after; … … 286 286 287 287 // List of processors 288 __spinlock_t proc_list_lock; 288 __spinlock_t idle_lock; 289 __dllist_t(struct processor) procs; 289 290 __dllist_t(struct processor) idles; 291 unsigned int nprocessors; 290 292 291 293 // List of threads -
libcfa/src/concurrency/kernel_private.hfa
r6a490b2 r504a7dc 100 100 void unregister( struct cluster * cltr, struct $thread & thrd ); 101 101 102 void doregister( struct cluster * cltr, struct processor * proc ); 103 void unregister( struct cluster * cltr, struct processor * proc ); 104 102 105 //======================================================================= 103 106 // Cluster lock API … … 110 113 // Lock-Free registering/unregistering of threads 111 114 // Register a processor to a given cluster and get its unique id in return 112 unsigned doregister ( struct cluster * cltr, struct processor * proc );115 unsigned doregister2( struct cluster * cltr, struct processor * proc ); 113 116 114 117 // Unregister a processor from a given cluster using its id, getting back the original pointer 115 void unregister ( struct cluster * cltr, struct processor * proc );118 void unregister2( struct cluster * cltr, struct processor * proc ); 116 119 117 120 //======================================================================= … … 184 187 // push thread onto a ready queue for a cluster 185 188 // returns true if the list was previously empty, false otherwise 186 __attribute__((hot)) bool push(struct cluster * cltr, struct thread_desc* thrd);189 __attribute__((hot)) bool push(struct cluster * cltr, struct $thread * thrd); 187 190 188 191 //----------------------------------------------------------------------- 189 192 // pop thread from the ready queue of a cluster 190 193 // returns 0p if empty 191 __attribute__((hot)) thread_desc* pop(struct cluster * cltr);194 __attribute__((hot)) struct $thread * pop(struct cluster * cltr); 192 195 193 196 //----------------------------------------------------------------------- -
libcfa/src/concurrency/monitor.cfa
r6a490b2 r504a7dc 114 114 115 115 // Some one else has the monitor, wait in line for it 116 /* paranoid */ verify( thrd-> next == 0p );116 /* paranoid */ verify( thrd->link.next == 0p ); 117 117 append( this->entry_queue, thrd ); 118 /* paranoid */ verify( thrd-> next == 1p );118 /* paranoid */ verify( thrd->link.next == 1p ); 119 119 120 120 unlock( this->lock ); … … 199 199 200 200 // Some one else has the monitor, wait in line for it 201 /* paranoid */ verify( thrd-> next == 0p );201 /* paranoid */ verify( thrd->link.next == 0p ); 202 202 append( this->entry_queue, thrd ); 203 /* paranoid */ verify( thrd-> next == 1p );203 /* paranoid */ verify( thrd->link.next == 1p ); 204 204 unlock( this->lock ); 205 205 … … 761 761 $thread * new_owner = pop_head( this->entry_queue ); 762 762 /* paranoid */ verifyf( !this->owner || kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 763 /* paranoid */ verify( !new_owner || new_owner-> next == 0p );763 /* paranoid */ verify( !new_owner || new_owner->link.next == 0p ); 764 764 __set_owner( this, new_owner ); 765 765 … … 883 883 } 884 884 885 __cfaabi_dbg_print_safe( "Kernel : Runing %i (%p)\n", ready2run, ready2run ? (thread _desc*)node->waiting_thread : (thread_desc*)0p );885 __cfaabi_dbg_print_safe( "Kernel : Runing %i (%p)\n", ready2run, ready2run ? (thread*)node->waiting_thread : (thread*)0p ); 886 886 return ready2run ? node->waiting_thread : 0p; 887 887 } -
libcfa/src/concurrency/ready_queue.cfa
r6a490b2 r504a7dc 15 15 16 16 #define __cforall_thread__ 17 #define __CFA_DEBUG_PRINT_READY_QUEUE__ 17 18 18 19 #include "bits/defs.hfa" … … 34 35 const char * max_cores_s = getenv("CFA_MAX_PROCESSORS"); 35 36 if(!max_cores_s) { 36 __cfa abi_dbg_print_nolock("No CFA_MAX_PROCESSORS in ENV");37 __cfadbg_print_nolock(ready_queue, "No CFA_MAX_PROCESSORS in ENV\n"); 37 38 return __CFA_MAX_PROCESSORS__; 38 39 } … … 41 42 long int max_cores_l = strtol(max_cores_s, &endptr, 10); 42 43 if(max_cores_l < 1 || max_cores_l > 65535) { 43 __cfa abi_dbg_print_nolock("CFA_MAX_PROCESSORS out of range : %ld", max_cores_l);44 __cfadbg_print_nolock(ready_queue, "CFA_MAX_PROCESSORS out of range : %ld\n", max_cores_l); 44 45 return __CFA_MAX_PROCESSORS__; 45 46 } 46 47 if('\0' != *endptr) { 47 __cfa abi_dbg_print_nolock("CFA_MAX_PROCESSORS not a decimal number : %s", max_cores_s);48 __cfadbg_print_nolock(ready_queue, "CFA_MAX_PROCESSORS not a decimal number : %s\n", max_cores_s); 48 49 return __CFA_MAX_PROCESSORS__; 49 50 } … … 152 153 //======================================================================= 153 154 // Lock-Free registering/unregistering of threads 154 unsigned doregister( struct cluster * cltr, struct processor * proc ) with(cltr->ready_lock) { 155 unsigned doregister2( struct cluster * cltr, struct processor * proc ) with(cltr->ready_lock) { 156 __cfadbg_print_safe(ready_queue, "Kernel : Registering proc %p with cluster %p\n", proc, cltr); 157 155 158 // Step - 1 : check if there is already space in the data 156 159 uint_fast32_t s = ready; … … 185 188 } 186 189 190 __cfadbg_print_safe(ready_queue, "Kernel : Registering proc %p done, id %u\n", proc, n); 191 187 192 // Return new spot. 188 193 /*paranoid*/ verify(n < ready); … … 192 197 } 193 198 194 void unregister ( struct cluster * cltr, struct processor * proc ) with(cltr->ready_lock) {199 void unregister2( struct cluster * cltr, struct processor * proc ) with(cltr->ready_lock) { 195 200 unsigned id = proc->id; 196 201 /*paranoid*/ verify(id < ready); 197 202 /*paranoid*/ verify(proc == __atomic_load_n(&data[id].handle, __ATOMIC_RELAXED)); 198 203 __atomic_store_n(&data[id].handle, 0p, __ATOMIC_RELEASE); 204 205 __cfadbg_print_safe(ready_queue, "Kernel : Unregister proc %p\n", proc); 199 206 } 200 207 … … 241 248 //======================================================================= 242 249 // Get the head pointer (one before the first element) from the anchor 243 static inline thread_desc* head(const __intrusive_lane_t & this) {244 thread_desc * rhead = (thread_desc*)(245 (uintptr_t)( &this.before ) - offsetof( thread_desc, link )250 static inline $thread * head(const __intrusive_lane_t & this) { 251 $thread * rhead = ($thread *)( 252 (uintptr_t)( &this.before ) - offsetof( $thread, link ) 246 253 ); 247 254 /* paranoid */ verify(rhead); … … 250 257 251 258 // Get the tail pointer (one after the last element) from the anchor 252 static inline thread_desc* tail(const __intrusive_lane_t & this) {253 thread_desc * rtail = (thread_desc*)(254 (uintptr_t)( &this.after ) - offsetof( thread_desc, link )259 static inline $thread * tail(const __intrusive_lane_t & this) { 260 $thread * rtail = ($thread *)( 261 (uintptr_t)( &this.after ) - offsetof( $thread, link ) 255 262 ); 256 263 /* paranoid */ verify(rtail); … … 261 268 void ?{}( __intrusive_lane_t & this ) { 262 269 this.lock = false; 263 this.last_id = -1u; 264 this.count = 0u; 270 #if defined(__CFA_WITH_VERIFY__) 271 this.last_id = -1u; 272 this.count = 0u; 273 #endif 265 274 266 275 this.before.link.prev = 0p; … … 279 288 280 289 // We add a boat-load of assertions here because the anchor code is very fragile 281 /* paranoid */ verify(((uintptr_t)( head(this) ) + offsetof( thread_desc, link )) == (uintptr_t)(&this.before));282 /* paranoid */ verify(((uintptr_t)( tail(this) ) + offsetof( thread_desc, link )) == (uintptr_t)(&this.after ));290 /* paranoid */ verify(((uintptr_t)( head(this) ) + offsetof( $thread, link )) == (uintptr_t)(&this.before)); 291 /* paranoid */ verify(((uintptr_t)( tail(this) ) + offsetof( $thread, link )) == (uintptr_t)(&this.after )); 283 292 /* paranoid */ verify(head(this)->link.prev == 0p ); 284 293 /* paranoid */ verify(head(this)->link.next == tail(this) ); … … 311 320 // Push a thread onto this lane 312 321 // returns true of lane was empty before push, false otherwise 313 bool push(__intrusive_lane_t & this, thread_desc* node) {322 bool push(__intrusive_lane_t & this, $thread * node) { 314 323 #if defined(__CFA_WITH_VERIFY__) 315 324 /* paranoid */ verify(this.lock); … … 317 326 /* paranoid */ verify(node->link.next == 0p); 318 327 /* paranoid */ verify(node->link.prev == 0p); 328 /* paranoid */ verify(tail(this)->link.next == 0p); 329 /* paranoid */ verify(head(this)->link.prev == 0p); 319 330 320 331 this.count++; 321 332 322 333 if(this.before.link.ts == 0l) { 323 /* paranoid */ verify(tail(this)->link.next == 0p);324 334 /* paranoid */ verify(tail(this)->link.prev == head(this)); 325 335 /* paranoid */ verify(head(this)->link.next == tail(this)); 326 /* paranoid */ verify(head(this)->link.prev == 0p); 336 } else { 337 /* paranoid */ verify(tail(this)->link.prev != head(this)); 338 /* paranoid */ verify(head(this)->link.next != tail(this)); 327 339 } 328 340 #endif 329 341 330 342 // Get the relevant nodes locally 331 thread_desc* tail = tail(this);332 thread_desc* prev = tail->link.prev;343 $thread * tail = tail(this); 344 $thread * prev = tail->link.prev; 333 345 334 346 // Do the push … … 358 370 // returns popped 359 371 // returns true of lane was empty before push, false otherwise 360 [ thread_desc*, bool] pop(__intrusive_lane_t & this) {372 [$thread *, bool] pop(__intrusive_lane_t & this) { 361 373 /* paranoid */ verify(this.lock); 362 374 /* paranoid */ verify(this.before.link.ts != 0ul); 363 375 364 376 // Get anchors locally 365 thread_desc* head = head(this);366 thread_desc* tail = tail(this);377 $thread * head = head(this); 378 $thread * tail = tail(this); 367 379 368 380 // Get the relevant nodes locally 369 thread_desc* node = head->link.next;370 thread_desc* next = node->link.next;381 $thread * node = head->link.next; 382 $thread * next = node->link.next; 371 383 372 384 #if defined(__CFA_WITH_VERIFY__) … … 391 403 392 404 // Check if we emptied list and return accordingly 405 /* paranoid */ verify(tail(this)->link.next == 0p); 406 /* paranoid */ verify(head(this)->link.prev == 0p); 393 407 if(next == tail) { 394 408 /* paranoid */ verify(this.before.link.ts == 0); 395 /* paranoid */ verify(tail(this)->link.next == 0p);396 409 /* paranoid */ verify(tail(this)->link.prev == head(this)); 397 410 /* paranoid */ verify(head(this)->link.next == tail(this)); 398 /* paranoid */ verify(head(this)->link.prev == 0p);399 411 return [node, true]; 400 412 } 401 413 else { 402 414 /* paranoid */ verify(next->link.ts != 0); 415 /* paranoid */ verify(tail(this)->link.prev != head(this)); 416 /* paranoid */ verify(head(this)->link.next != tail(this)); 403 417 /* paranoid */ verify(this.before.link.ts != 0); 404 418 return [node, false]; … … 508 522 // Conditional check 509 523 verifyf( 510 strict == STRICT &&// Conditional check if it was expected to be cleared524 strict != STRICT || // Conditional check if it was expected to be cleared 511 525 ((mask[word] & (1ull << bit)) == 0), 512 526 "Before set %llu:%llu (%u), %llx & %llx", word, bit, index, mask[word], (1ull << bit) … … 518 532 // Conditional check 519 533 verifyf( 520 strict == STRICT &&// Conditional check if it was expected to be cleared534 strict != STRICT || // Conditional check if it was expected to be cleared 521 535 !ret, 522 536 "Bit was not set but bts returned true" … … 561 575 562 576 //----------------------------------------------------------------------- 563 __attribute__((hot)) bool push(struct cluster * cltr, struct thread_desc* thrd) with (cltr->ready_queue) {577 __attribute__((hot)) bool push(struct cluster * cltr, struct $thread * thrd) with (cltr->ready_queue) { 564 578 // write timestamp 565 579 thrd->link.ts = rdtscl(); … … 569 583 do { 570 584 // Pick the index of a lane 571 unsigned i =tls_rand() % lanes.count;585 i = __tls_rand() % lanes.count; 572 586 573 587 #if !defined(__CFA_NO_STATISTICS__) … … 594 608 size_t ret = __atomic_fetch_add( &used.count, 1z, __ATOMIC_SEQ_CST); 595 609 596 // Check if the entire qu ue used to be empty610 // Check if the entire queue used to be empty 597 611 first = (ret == 0); 598 612 … … 624 638 //----------------------------------------------------------------------- 625 639 // Given 2 indexes, pick the list with the oldest push an try to pop from it 626 static struct thread_desc* try_pop(struct cluster * cltr, unsigned i, unsigned j) with (cltr->ready_queue) {640 static struct $thread * try_pop(struct cluster * cltr, unsigned i, unsigned j) with (cltr->ready_queue) { 627 641 #if !defined(__CFA_NO_STATISTICS__) 628 642 tls.pick.pop.attempt++; … … 662 676 663 677 // Actually pop the list 664 struct thread_desc* thrd;678 struct $thread * thrd; 665 679 bool emptied; 666 680 [thrd, emptied] = pop(lane); … … 704 718 705 719 // Pop from the ready queue from a given cluster 706 __attribute__((hot)) thread_desc* pop(struct cluster * cltr) with (cltr->ready_queue) {720 __attribute__((hot)) $thread * pop(struct cluster * cltr) with (cltr->ready_queue) { 707 721 /* paranoid */ verify( lanes.count > 0 ); 708 722 … … 716 730 717 731 // Pick two lists at random 718 unsigned ri = tls_rand();719 unsigned rj = tls_rand();732 unsigned ri = __tls_rand(); 733 unsigned rj = __tls_rand(); 720 734 721 735 // Find which __cfa_readyQ_mask_t the two lists belong … … 748 762 749 763 // try popping from the 2 picked lists 750 struct thread_desc* thrd = try_pop(cltr, i, j);764 struct $thread * thrd = try_pop(cltr, i, j); 751 765 if(thrd) return thrd; 752 766 #else 753 767 // Pick two lists at random 754 int i = tls_rand() % __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );755 int j = tls_rand() % __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );768 int i = __tls_rand() % __atomic_load_n( &lanes.count, __ATOMIC_RELAXED ); 769 int j = __tls_rand() % __atomic_load_n( &lanes.count, __ATOMIC_RELAXED ); 756 770 757 771 // try popping from the 2 picked lists 758 struct thread_desc* thrd = try_pop(cltr, i, j);772 struct $thread * thrd = try_pop(cltr, i, j); 759 773 if(thrd) return thrd; 760 774 #endif … … 825 839 uint_fast32_t last_size = ready_mutate_lock( *cltr ); 826 840 827 __cfa abi_dbg_print_safe("Kernel : Growing ready queue\n");841 __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue\n"); 828 842 829 843 // Make sure that everything is consistent … … 862 876 /* paranoid */ check( cltr->ready_queue ); 863 877 864 __cfa abi_dbg_print_safe("Kernel : Growing ready queue done\n");878 __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue done\n"); 865 879 866 880 // Unlock the RWlock … … 873 887 uint_fast32_t last_size = ready_mutate_lock( *cltr ); 874 888 875 __cfa abi_dbg_print_safe("Kernel : Shrinking ready queue\n");889 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue\n"); 876 890 877 891 // Make sure that everything is consistent … … 896 910 897 911 // for printing count the number of displaced threads 898 #if defined(__CFA_DEBUG_PRINT__) 912 #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__) 899 913 __attribute__((unused)) size_t displaced = 0; 900 914 #endif … … 908 922 // As long as we can pop from this lane to push the threads somewhere else in the queue 909 923 while(!is_empty(lanes.data[idx])) { 910 struct thread_desc* thrd;924 struct $thread * thrd; 911 925 __attribute__((unused)) bool _; 912 926 [thrd, _] = pop(lanes.data[idx]); … … 915 929 916 930 // for printing count the number of displaced threads 917 #if defined(__CFA_DEBUG_PRINT__) 931 #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__) 918 932 displaced++; 919 933 #endif … … 930 944 } 931 945 932 __cfa abi_dbg_print_safe("Kernel : Shrinking ready queue displaced %zu threads\n", displaced);946 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue displaced %zu threads\n", displaced); 933 947 934 948 // recompute the used.count instead of maintaining it … … 958 972 /* paranoid */ check( cltr->ready_queue ); 959 973 960 __cfa abi_dbg_print_safe("Kernel : Shrinking ready queue done\n");974 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue done\n"); 961 975 962 976 // Unlock the RWlock
Note: See TracChangeset
for help on using the changeset viewer.