- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/kernel.cfa
rada0246d r29cb302 118 118 // Kernel Scheduling logic 119 119 static $thread * __next_thread(cluster * this); 120 static bool __has_next_thread(cluster * this); 120 121 static void __run_thread(processor * this, $thread * dst); 121 static $thread * __halt(processor * this);122 static bool __wake_one(cluster * cltr, bool was_empty);123 122 static bool __wake_proc(processor *); 123 static bool __wake_one(struct __processor_id_t * id, cluster * cltr); 124 static void __halt(processor * this); 124 125 125 126 //----------------------------------------------------------------------------- 126 127 // Kernel storage 127 KERNEL_STORAGE(cluster, mainCluster); 128 KERNEL_STORAGE(processor, mainProcessor); 129 KERNEL_STORAGE($thread, mainThread); 130 KERNEL_STORAGE(__stack_t, mainThreadCtx); 131 132 cluster * mainCluster; 133 processor * mainProcessor; 134 $thread * mainThread; 128 KERNEL_STORAGE(cluster, mainCluster); 129 KERNEL_STORAGE(processor, mainProcessor); 130 KERNEL_STORAGE($thread, mainThread); 131 KERNEL_STORAGE(__stack_t, mainThreadCtx); 132 KERNEL_STORAGE(__scheduler_RWLock_t, __scheduler_lock); 133 #if !defined(__CFA_NO_STATISTICS__) 134 KERNEL_STORAGE(__stats_t, mainProcStats); 135 #endif 136 137 cluster * mainCluster; 138 processor * mainProcessor; 139 $thread * mainThread; 140 __scheduler_RWLock_t * __scheduler_lock; 135 141 136 142 extern "C" { … … 144 150 thread_local struct KernelThreadData kernelTLS __attribute__ ((tls_model ( "initial-exec" ))) = { 145 151 NULL, // cannot use 0p 152 NULL, 146 153 NULL, 147 154 { 1, false, false }, … … 190 197 191 198 void ?{}( $thread & this, current_stack_info_t * info) with( this ) { 199 ticket = 1; 192 200 state = Start; 193 201 self_cor{ info }; … … 197 205 self_mon.recursion = 1; 198 206 self_mon_p = &self_mon; 199 next = 0p; 207 link.next = 0p; 208 link.prev = 0p; 200 209 201 210 node.next = 0p; … … 220 229 static void * __invoke_processor(void * arg); 221 230 222 void ?{}(processor & this, const char name[], cluster & cltr) with( this ) {231 void ?{}(processor & this, const char name[], cluster & _cltr) with( this ) { 223 232 this.name = name; 224 this.cltr = &cltr; 233 this.cltr = &_cltr; 234 id = -1u; 225 235 terminated{ 0 }; 226 236 destroyer = 0p; … … 235 245 236 246 this.stack = __create_pthread( &this.kernel_thread, __invoke_processor, (void *)&this ); 247 __atomic_fetch_add( &cltr->nprocessors, 1u, __ATOMIC_SEQ_CST ); 237 248 238 249 __cfadbg_print_safe(runtime_core, "Kernel : core %p created\n", &this); … … 254 265 255 266 free( this.stack ); 267 268 __atomic_fetch_sub( &cltr->nprocessors, 1u, __ATOMIC_SEQ_CST ); 256 269 } 257 270 … … 259 272 this.name = name; 260 273 this.preemption_rate = preemption_rate; 274 this.nprocessors = 0; 261 275 ready_queue{}; 262 ready_queue_lock{};263 276 264 277 #if !defined(__CFA_NO_STATISTICS__) 265 278 print_stats = false; 279 stats = alloc(); 280 __init_stats( stats ); 266 281 #endif 267 282 268 procs{ __get };269 idles{ __get };270 283 threads{ __get }; 271 284 … … 277 290 void ^?{}(cluster & this) { 278 291 __kernel_io_shutdown( this, &this == mainCluster ); 292 293 #if !defined(__CFA_NO_STATISTICS__) 294 if(this.print_stats) { 295 __print_stats( this.stats ); 296 } 297 free( this.stats ); 298 #endif 279 299 280 300 unregister(this); … … 295 315 __cfadbg_print_safe(runtime_core, "Kernel : core %p starting\n", this); 296 316 297 doregister(this->cltr, this); 317 // register the processor unless it's the main thread which is handled in the boot sequence 318 if(this != mainProcessor) { 319 this->id = doregister((__processor_id_t*)this); 320 // Lock the RWlock so no-one pushes/pops while we are changing the queue 321 uint_fast32_t last_size = ready_mutate_lock(); 322 323 // Adjust the ready queue size 324 ready_queue_grow( this->cltr ); 325 326 // Unlock the RWlock 327 ready_mutate_unlock( last_size ); 328 } 298 329 299 330 { … … 308 339 readyThread = __next_thread( this->cltr ); 309 340 310 // If no ready thread311 if( readyThread == 0p ) {312 // Block until a thread is ready313 readyThread = __halt(this);314 }315 316 341 // Check if we actually found a thread 317 342 if( readyThread ) { 318 343 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 319 344 /* paranoid */ verifyf( readyThread->state == Ready || readyThread->preempted != __NO_PREEMPTION, "state : %d, preempted %d\n", readyThread->state, readyThread->preempted); 320 /* paranoid */ verifyf( readyThread->next == 0p, "Expected null got %p", readyThread->next ); 345 /* paranoid */ verifyf( readyThread->link.next == 0p, "Expected null got %p", readyThread->link.next ); 346 __builtin_prefetch( readyThread->context.SP ); 321 347 322 348 // We found a thread run it … … 325 351 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 326 352 } 353 else { 354 // Block until a thread is ready 355 __halt(this); 356 } 327 357 } 328 358 … … 330 360 } 331 361 332 unregister(this->cltr, this);333 334 362 V( this->terminated ); 335 363 364 // unregister the processor unless it's the main thread which is handled in the boot sequence 365 if(this != mainProcessor) { 366 // Lock the RWlock so no-one pushes/pops while we are changing the queue 367 uint_fast32_t last_size = ready_mutate_lock(); 368 369 // Adjust the ready queue size 370 ready_queue_shrink( this->cltr ); 371 372 // Make sure we aren't on the idle queue 373 #if !defined(__CFA_NO_STATISTICS__) 374 bool removed = 375 #endif 376 unsafe_remove( this->cltr->idles, this ); 377 378 #if !defined(__CFA_NO_STATISTICS__) 379 if(removed) __tls_stats()->ready.sleep.exits++; 380 #endif 381 382 // Unlock the RWlock 383 ready_mutate_unlock( last_size ); 384 385 // Finally we don't need the read_lock any more 386 unregister((__processor_id_t*)this); 387 } 388 else { 389 // HACK : the coroutine context switch expects this_thread to be set 390 // and it make sense for it to be set in all other cases except here 391 // fake it 392 kernelTLS.this_thread = mainThread; 393 } 394 336 395 __cfadbg_print_safe(runtime_core, "Kernel : core %p terminated\n", this); 337 338 // HACK : the coroutine context switch expects this_thread to be set339 // and it make sense for it to be set in all other cases except here340 // fake it341 if( this == mainProcessor ) kernelTLS.this_thread = mainThread;342 396 } 343 397 … … 360 414 // Actually run the thread 361 415 RUNNING: while(true) { 362 if(unlikely(thrd_dst->preempted)) { 363 thrd_dst->preempted = __NO_PREEMPTION; 364 verify(thrd_dst->state == Active || thrd_dst->state == Rerun); 365 } else { 366 verify(thrd_dst->state == Blocked || thrd_dst->state == Ready); // Ready means scheduled normally, blocked means rerun 367 thrd_dst->state = Active; 368 } 416 thrd_dst->preempted = __NO_PREEMPTION; 417 thrd_dst->state = Active; 369 418 370 419 __cfaabi_dbg_debug_do( … … 398 447 if(unlikely(thrd_dst->preempted != __NO_PREEMPTION)) { 399 448 // The thread was preempted, reschedule it and reset the flag 400 __schedule_thread( thrd_dst );449 __schedule_thread( (__processor_id_t*)this, thrd_dst ); 401 450 break RUNNING; 402 451 } 403 452 453 if(unlikely(thrd_dst->state == Halted)) { 454 // The thread has halted, it should never be scheduled/run again 455 // We may need to wake someone up here since 456 unpark( this->destroyer __cfaabi_dbg_ctx2 ); 457 this->destroyer = 0p; 458 break RUNNING; 459 } 460 461 /* paranoid */ verify( thrd_dst->state == Active ); 462 thrd_dst->state = Blocked; 463 404 464 // set state of processor coroutine to active and the thread to inactive 405 static_assert(sizeof(thrd_dst->state) == sizeof(int)); 406 enum coroutine_state old_state = __atomic_exchange_n(&thrd_dst->state, Blocked, __ATOMIC_SEQ_CST); 407 __cfaabi_dbg_debug_do( thrd_dst->park_result = old_state; ) 408 switch(old_state) { 409 case Halted: 410 // The thread has halted, it should never be scheduled/run again, leave it back to Halted and move on 411 thrd_dst->state = Halted; 412 413 // We may need to wake someone up here since 414 unpark( this->destroyer __cfaabi_dbg_ctx2 ); 415 this->destroyer = 0p; 416 break RUNNING; 417 case Active: 465 int old_ticket = __atomic_fetch_sub(&thrd_dst->ticket, 1, __ATOMIC_SEQ_CST); 466 __cfaabi_dbg_debug_do( thrd_dst->park_result = old_ticket; ) 467 switch(old_ticket) { 468 case 1: 418 469 // This is case 1, the regular case, nothing more is needed 419 470 break RUNNING; 420 case Rerun:471 case 2: 421 472 // This is case 2, the racy case, someone tried to run this thread before it finished blocking 422 473 // In this case, just run it again. … … 424 475 default: 425 476 // This makes no sense, something is wrong abort 426 abort( "Finished running a thread that was Blocked/Start/Primed %d\n", old_state);477 abort(); 427 478 } 428 479 } … … 438 489 $coroutine * proc_cor = get_coroutine(kernelTLS.this_processor->runner); 439 490 $thread * thrd_src = kernelTLS.this_thread; 491 492 #if !defined(__CFA_NO_STATISTICS__) 493 struct processor * last_proc = kernelTLS.this_processor; 494 #endif 440 495 441 496 // Run the thread on this processor … … 453 508 } 454 509 510 #if !defined(__CFA_NO_STATISTICS__) 511 if(last_proc != kernelTLS.this_processor) { 512 __tls_stats()->ready.threads.migration++; 513 } 514 #endif 515 455 516 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 456 517 /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) < ((uintptr_t)__get_stack(thrd_src->curr_cor)->base ), "ERROR : Returning $thread %p has been corrupted.\n StackPointer too small.\n", thrd_src ); … … 463 524 // It effectively constructs a coroutine by stealing the pthread stack 464 525 static void * __invoke_processor(void * arg) { 526 #if !defined( __CFA_NO_STATISTICS__ ) 527 __stats_t local_stats; 528 __init_stats( &local_stats ); 529 kernelTLS.this_stats = &local_stats; 530 #endif 531 465 532 processor * proc = (processor *) arg; 466 533 kernelTLS.this_processor = proc; … … 494 561 __cfadbg_print_safe(runtime_core, "Kernel : core %p main ended (%p)\n", proc, &proc->runner); 495 562 563 #if !defined(__CFA_NO_STATISTICS__) 564 __tally_stats(proc->cltr->stats, &local_stats); 565 #endif 566 496 567 return 0p; 497 568 } … … 591 662 // Scheduler routines 592 663 // KERNEL ONLY 593 void __schedule_thread( $thread * thrd ) with( *thrd->curr_cluster ) { 664 void __schedule_thread( struct __processor_id_t * id, $thread * thrd ) { 665 /* paranoid */ verify( thrd ); 666 /* paranoid */ verify( thrd->state != Halted ); 594 667 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 595 668 /* paranoid */ #if defined( __CFA_WITH_VERIFY__ ) 596 /* paranoid */ if( thrd->state == Blocked || thrd->state == Start ) assertf( thrd->preempted == __NO_PREEMPTION,597 598 /* paranoid */ if( thrd->preempted != __NO_PREEMPTION ) assertf(thrd->state == Active || thrd->state == Rerun,599 669 /* paranoid */ if( thrd->state == Blocked || thrd->state == Start ) assertf( thrd->preempted == __NO_PREEMPTION, 670 "Error inactive thread marked as preempted, state %d, preemption %d\n", thrd->state, thrd->preempted ); 671 /* paranoid */ if( thrd->preempted != __NO_PREEMPTION ) assertf(thrd->state == Active, 672 "Error preempted thread marked as not currently running, state %d, preemption %d\n", thrd->state, thrd->preempted ); 600 673 /* paranoid */ #endif 601 /* paranoid */ verifyf( thrd-> next == 0p, "Expected null got %p", thrd->next );674 /* paranoid */ verifyf( thrd->link.next == 0p, "Expected null got %p", thrd->link.next ); 602 675 603 676 if (thrd->preempted == __NO_PREEMPTION) thrd->state = Ready; 604 677 605 lock ( ready_queue_lock __cfaabi_dbg_ctx2 ); 606 bool was_empty = !(ready_queue != 0); 607 append( ready_queue, thrd ); 608 unlock( ready_queue_lock ); 609 610 __wake_one(thrd->curr_cluster, was_empty); 678 ready_schedule_lock ( id ); 679 push( thrd->curr_cluster, thrd ); 680 681 #if !defined(__CFA_NO_STATISTICS__) 682 bool woke = 683 #endif 684 __wake_one(id, thrd->curr_cluster); 685 686 #if !defined(__CFA_NO_STATISTICS__) 687 if(woke) __tls_stats()->ready.sleep.wakes++; 688 #endif 689 ready_schedule_unlock( id ); 611 690 612 691 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); … … 617 696 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 618 697 619 lock( ready_queue_lock __cfaabi_dbg_ctx2);620 $thread * head = pop_head( ready_queue);621 unlock( ready_queue_lock);698 ready_schedule_lock ( (__processor_id_t*)kernelTLS.this_processor ); 699 $thread * head = pop( this ); 700 ready_schedule_unlock( (__processor_id_t*)kernelTLS.this_processor ); 622 701 623 702 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); … … 625 704 } 626 705 706 // KERNEL ONLY 707 static bool __has_next_thread(cluster * this) with( *this ) { 708 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 709 710 ready_schedule_lock ( (__processor_id_t*)kernelTLS.this_processor ); 711 bool not_empty = query( this ); 712 ready_schedule_unlock( (__processor_id_t*)kernelTLS.this_processor ); 713 714 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 715 return not_empty; 716 } 717 627 718 // KERNEL ONLY unpark with out disabling interrupts 628 void __unpark( $thread * thrd __cfaabi_dbg_ctx_param2 ) { 629 static_assert(sizeof(thrd->state) == sizeof(int)); 630 719 void __unpark( struct __processor_id_t * id, $thread * thrd __cfaabi_dbg_ctx_param2 ) { 631 720 // record activity 632 721 __cfaabi_dbg_debug_do( char * old_caller = thrd->unpark_caller; ) 633 722 __cfaabi_dbg_record_thrd( *thrd, false, caller ); 634 723 635 enum coroutine_state old_state = __atomic_exchange_n(&thrd->state, Rerun, __ATOMIC_SEQ_CST);636 __cfaabi_dbg_debug_do( thrd->unpark_result = old_ state; )637 switch(old_ state) {638 case Active:724 int old_ticket = __atomic_fetch_add(&thrd->ticket, 1, __ATOMIC_SEQ_CST); 725 __cfaabi_dbg_debug_do( thrd->unpark_result = old_ticket; thrd->unpark_state = thrd->state; ) 726 switch(old_ticket) { 727 case 1: 639 728 // Wake won the race, the thread will reschedule/rerun itself 640 729 break; 641 case Blocked:730 case 0: 642 731 /* paranoid */ verify( ! thrd->preempted != __NO_PREEMPTION ); 732 /* paranoid */ verify( thrd->state == Blocked ); 643 733 644 734 // Wake lost the race, 645 thrd->state = Blocked; 646 __schedule_thread( thrd ); 735 __schedule_thread( id, thrd ); 647 736 break; 648 case Rerun:649 abort("More than one thread attempted to schedule thread %p\n", thrd);650 break;651 case Halted:652 case Start:653 case Primed:654 737 default: 655 738 // This makes no sense, something is wrong abort … … 662 745 663 746 disable_interrupts(); 664 __unpark( thrd __cfaabi_dbg_ctx_fwd2 );747 __unpark( (__processor_id_t*)kernelTLS.this_processor, thrd __cfaabi_dbg_ctx_fwd2 ); 665 748 enable_interrupts( __cfaabi_dbg_ctx ); 666 749 } … … 697 780 698 781 $thread * thrd = kernelTLS.this_thread; 699 /* paranoid */ verify(thrd->state == Active || thrd->state == Rerun);782 /* paranoid */ verify(thrd->state == Active); 700 783 701 784 // SKULLDUGGERY: It is possible that we are preempting this thread just before … … 704 787 // If that is the case, abandon the preemption. 705 788 bool preempted = false; 706 if(thrd-> next == 0p) {789 if(thrd->link.next == 0p) { 707 790 preempted = true; 708 791 thrd->preempted = reason; … … 730 813 __cfa_dbg_global_clusters.list{ __get }; 731 814 __cfa_dbg_global_clusters.lock{}; 815 816 // Initialize the global scheduler lock 817 __scheduler_lock = (__scheduler_RWLock_t*)&storage___scheduler_lock; 818 (*__scheduler_lock){}; 732 819 733 820 // Initialize the main cluster … … 764 851 pending_preemption = false; 765 852 kernel_thread = pthread_self(); 853 id = -1u; 766 854 767 855 runner{ &this }; 768 856 __cfadbg_print_safe(runtime_core, "Kernel : constructed main processor context %p\n", &runner); 857 858 __atomic_fetch_add( &cltr->nprocessors, 1u, __ATOMIC_SEQ_CST ); 769 859 } 770 860 … … 774 864 (*mainProcessor){}; 775 865 866 mainProcessor->id = doregister( (__processor_id_t*)mainProcessor); 867 776 868 //initialize the global state variables 777 869 kernelTLS.this_processor = mainProcessor; 778 870 kernelTLS.this_thread = mainThread; 779 871 872 #if !defined( __CFA_NO_STATISTICS__ ) 873 kernelTLS.this_stats = (__stats_t *)& storage_mainProcStats; 874 __init_stats( kernelTLS.this_stats ); 875 #endif 876 780 877 // Enable preemption 781 878 kernel_start_preemption(); … … 783 880 // Add the main thread to the ready queue 784 881 // once resume is called on mainProcessor->runner the mainThread needs to be scheduled like any normal thread 785 __schedule_thread( mainThread);882 __schedule_thread((__processor_id_t *)mainProcessor, mainThread); 786 883 787 884 // SKULLDUGGERY: Force a context switch to the main processor to set the main thread's context to the current UNIX … … 827 924 kernel_stop_preemption(); 828 925 926 unregister((__processor_id_t*)mainProcessor); 927 829 928 // Destroy the main processor and its context in reverse order of construction 830 929 // These were manually constructed so we need manually destroy them 831 930 void ^?{}(processor & this) with( this ){ 832 931 /* paranoid */ verify( this.do_terminate == true ); 932 __atomic_fetch_sub( &cltr->nprocessors, 1u, __ATOMIC_SEQ_CST ); 933 __cfaabi_dbg_print_safe("Kernel : destroyed main processor context %p\n", &runner); 833 934 } 834 935 … … 836 937 837 938 // Final step, destroy the main thread since it is no longer needed 939 838 940 // Since we provided a stack to this taxk it will not destroy anything 839 941 /* paranoid */ verify(mainThread->self_cor.stack.storage == (__stack_t*)(((uintptr_t)&storage_mainThreadCtx)| 0x1)); … … 842 944 ^(*mainCluster){}; 843 945 946 ^(*__scheduler_lock){}; 947 844 948 ^(__cfa_dbg_global_clusters.list){}; 845 949 ^(__cfa_dbg_global_clusters.lock){}; … … 851 955 // Kernel Idle Sleep 852 956 //============================================================================================= 853 static $thread * __halt(processor * this) with( *this ) {854 if( do_terminate ) return 0p;855 856 // First, lock the cluster idle857 lock( cltr->idle_lock __cfaabi_dbg_ctx2 );858 859 // Check if we can find a thread860 if( $thread * found = __next_thread( cltr ) ) {861 unlock( cltr->idle_lock );862 return found;863 }864 865 // Move this processor from the active list to the idle list866 move_to_front(cltr->procs, cltr->idles, *this);867 868 // Unlock the idle lock so we don't go to sleep with a lock869 unlock (cltr->idle_lock);870 871 // We are ready to sleep872 __cfadbg_print_safe(runtime_core, "Kernel : Processor %p ready to sleep\n", this);873 wait( idle );874 875 // We have woken up876 __cfadbg_print_safe(runtime_core, "Kernel : Processor %p woke up and ready to run\n", this);877 878 // Get ourself off the idle list879 with( *cltr ) {880 lock (idle_lock __cfaabi_dbg_ctx2);881 move_to_front(idles, procs, *this);882 unlock(idle_lock);883 }884 885 // Don't check the ready queue again, we may not be in a position to run a thread886 return 0p;887 }888 889 957 // Wake a thread from the front if there are any 890 static bool __wake_one(cluster * this, __attribute__((unused)) bool force) { 891 // if we don't want to force check if we know it's false 892 // if( !this->idles.head && !force ) return false; 893 894 // First, lock the cluster idle 895 lock( this->idle_lock __cfaabi_dbg_ctx2 ); 896 897 // Check if there is someone to wake up 898 if( !this->idles.head ) { 899 // Nope unlock and return false 900 unlock( this->idle_lock ); 901 return false; 902 } 903 904 // Wake them up 905 __cfadbg_print_safe(runtime_core, "Kernel : waking Processor %p\n", this->idles.head); 906 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 907 post( this->idles.head->idle ); 908 909 // Unlock and return true 910 unlock( this->idle_lock ); 958 static bool __wake_one(struct __processor_id_t * id, cluster * this) { 959 /* paranoid */ verify( ready_schedule_islocked( id ) ); 960 961 // Check if there is a sleeping processor 962 processor * p = pop(this->idles); 963 964 // If no one is sleeping, we are done 965 if( 0p == p ) return false; 966 967 // We found a processor, wake it up 968 post( p->idle ); 969 911 970 return true; 912 971 } … … 922 981 923 982 return ret; 983 } 984 985 static void __halt(processor * this) with( *this ) { 986 if( do_terminate ) return; 987 988 #if !defined(__CFA_NO_STATISTICS__) 989 __tls_stats()->ready.sleep.halts++; 990 #endif 991 // Push self to queue 992 push(cltr->idles, *this); 993 994 // Makre sure we don't miss a thread 995 if( __has_next_thread(cltr) ) { 996 // A thread was posted, make sure a processor is woken up 997 struct __processor_id_t *id = (struct __processor_id_t *) this; 998 ready_schedule_lock ( id ); 999 __wake_one( id, cltr ); 1000 ready_schedule_unlock( id ); 1001 #if !defined(__CFA_NO_STATISTICS__) 1002 __tls_stats()->ready.sleep.cancels++; 1003 #endif 1004 } 1005 1006 wait( idle ); 924 1007 } 925 1008 … … 1078 1161 cltr->nthreads -= 1; 1079 1162 unlock(cltr->thread_list_lock); 1080 }1081 1082 void doregister( cluster * cltr, processor * proc ) {1083 lock (cltr->idle_lock __cfaabi_dbg_ctx2);1084 cltr->nprocessors += 1;1085 push_front(cltr->procs, *proc);1086 unlock (cltr->idle_lock);1087 }1088 1089 void unregister( cluster * cltr, processor * proc ) {1090 lock (cltr->idle_lock __cfaabi_dbg_ctx2);1091 remove(cltr->procs, *proc );1092 cltr->nprocessors -= 1;1093 unlock(cltr->idle_lock);1094 1163 } 1095 1164
Note:
See TracChangeset
for help on using the changeset viewer.