Changeset e25ef8c for libcfa/src/concurrency/monitor.cfa
- Timestamp:
- Nov 27, 2024, 12:22:58 PM (3 weeks ago)
- Branches:
- master
- Children:
- 108b2c7
- Parents:
- de7b7a5
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/monitor.cfa
rde7b7a5 re25ef8c 10 10 // Created On : Thd Feb 23 12:27:26 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Thu Nov 21 08:31:55202413 // Update Count : 1812 // Last Modified On : Wed Nov 27 12:13:14 2024 13 // Update Count : 72 14 14 // 15 15 … … 27 27 //----------------------------------------------------------------------------- 28 28 // Forward declarations 29 static inline void __set_owner 30 static inline void __set_owner 31 static inline void set_mask 29 static inline void __set_owner( monitor$ * this, thread$ * owner ); 30 static inline void __set_owner( monitor$ * storage [], __lock_size_t count, thread$ * owner ); 31 static inline void set_mask( monitor$ * storage [], __lock_size_t count, const __waitfor_mask_t & mask ); 32 32 static inline void reset_mask( monitor$ * this ); 33 33 … … 35 35 static inline bool is_accepted( monitor$ * this, const __monitor_group_t & monitors ); 36 36 37 static inline void lock_all 38 static inline void lock_all 37 static inline void lock_all( __spinlock_t * locks [], __lock_size_t count ); 38 static inline void lock_all( monitor$ * source [], __spinlock_t * /*out*/ locks [], __lock_size_t count ); 39 39 static inline void unlock_all( __spinlock_t * locks [], __lock_size_t count ); 40 40 static inline void unlock_all( monitor$ * locks [], __lock_size_t count ); 41 41 42 static inline void save 42 static inline void save( monitor$ * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*out*/ recursions [], __waitfor_mask_t /*out*/ masks [] ); 43 43 static inline void restore( monitor$ * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*in */ recursions [], __waitfor_mask_t /*in */ masks [] ); 44 44 … … 47 47 static inline void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t * owner ); 48 48 49 static inline void init 49 static inline void init( __lock_size_t count, monitor$ * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ); 50 50 static inline void init_push( __lock_size_t count, monitor$ * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ); 51 51 52 static inline thread$ * check_condition( __condition_criterion_t * );53 static inline void brand_condition( condition & );52 static inline thread$ * check_condition ( __condition_criterion_t * ); 53 static inline void brand_condition( condition & ); 54 54 static inline [thread$ *, int] search_entry_queue( const __waitfor_mask_t &, monitor$ * monitors [], __lock_size_t count ); 55 55 56 56 forall(T & | sized( T )) 57 57 static inline __lock_size_t insert_unique( T * array [], __lock_size_t & size, T * val ); 58 static inline __lock_size_t count_max 59 static inline __lock_size_t aggregate 58 static inline __lock_size_t count_max( const __waitfor_mask_t & mask ); 59 static inline __lock_size_t aggregate( monitor$ * storage [], const __waitfor_mask_t & mask ); 60 60 61 61 //----------------------------------------------------------------------------- 62 62 // Useful defines 63 #define wait_ctx( thrd, user_info) /* Create the necessary information to use the signaller stack*/ \64 __condition_node_t waiter = { thrd, count, user_info }; /* Create the node specific to this wait operation*/ \65 __condition_criterion_t criteria[count]; /* Create the creteria this wait operation needs to wake up*/ \66 init( count, monitors, waiter, criteria ); /* Link everything together */ \67 68 #define wait_ctx_primed( thrd, user_info) /* Create the necessary information to use the signaller stack*/ \69 __condition_node_t waiter = { thrd, count, user_info }; /* Create the node specific to this wait operation*/ \70 __condition_criterion_t criteria[count]; /* Create the creteria this wait operation needs to wake up*/ \71 init_push( count, monitors, waiter, criteria ); /* Link everything together and push it to the AS-Stack */ \72 73 #define monitor_ctx( mons, cnt ) 74 monitor$ ** monitors = mons; /* Save the targeted monitors*/ \75 __lock_size_t count = cnt; /* Save the count to a local variable*/ \76 unsigned int recursions[ count ]; /* Save the current recursion levels to restore them later*/ \77 __waitfor_mask_t masks [ count ]; /* Save the current waitfor masks to restore them later*/ \78 __spinlock_t * locks [ count ]; /* We need to pass-in an array of locks to BlockInternal */ \63 #define wait_ctx( thrd, user_info ) /* Create the necessary information to use the signaller stack */ \ 64 __condition_node_t waiter = { thrd, count, user_info }; /* Create the node specific to this wait operation */ \ 65 __condition_criterion_t criteria[count]; /* Create the creteria this wait operation needs to wake up */ \ 66 init( count, monitors, waiter, criteria ); /* Link everything together */ 67 68 #define wait_ctx_primed( thrd, user_info ) /* Create the necessary information to use the signaller stack */ \ 69 __condition_node_t waiter = { thrd, count, user_info }; /* Create the node specific to this wait operation */ \ 70 __condition_criterion_t criteria[count]; /* Create the creteria this wait operation needs to wake up */ \ 71 init_push( count, monitors, waiter, criteria ); /* Link everything together and push it to the AS-Stack */ 72 73 #define monitor_ctx( mons, cnt ) /* Define that create the necessary struct for internal/external scheduling operations */ \ 74 monitor$ ** monitors = mons; /* Save the targeted monitors */ \ 75 __lock_size_t count = cnt; /* Save the count to a local variable */ \ 76 unsigned int recursions[count]; /* Save the current recursion levels to restore them later */ \ 77 __waitfor_mask_t masks[count]; /* Save the current waitfor masks to restore them later */ \ 78 __spinlock_t * locks[count]; /* We need to pass-in an array of locks to BlockInternal */ 79 79 80 80 #define monitor_save save ( monitors, count, locks, recursions, masks ) … … 93 93 __cfaabi_dbg_print_safe( "Kernel : %10p Entering mon %p (%p)\n", thrd, this, this->owner); 94 94 95 if ( unlikely(0 != (0x1 & (uintptr_t)this->owner)) ) {95 if ( unlikely(0 != (0x1 & (uintptr_t)this->owner)) ) { 96 96 abort( "Attempt by thread \"%.256s\" (%p) to access joined monitor %p.", thrd->self_cor.name, thrd, this ); 97 } 98 else if( !this->owner ) { 97 } else if ( !this->owner ) { 99 98 // No one has the monitor, just take it 100 99 __set_owner( this, thrd ); 101 100 102 101 __cfaabi_dbg_print_safe( "Kernel : mon is free \n" ); 103 } 104 else if( this->owner == thrd) { 102 } else if ( this->owner == thrd) { 105 103 // We already have the monitor, just note how many times we took it 106 104 this->recursion += 1; 107 105 108 106 __cfaabi_dbg_print_safe( "Kernel : mon already owned \n" ); 109 } 110 else if( is_accepted( this, group) ) { 107 } else if ( is_accepted( this, group) ) { 111 108 // Some one was waiting for us, enter 112 109 __set_owner( this, thrd ); … … 116 113 117 114 __cfaabi_dbg_print_safe( "Kernel : mon accepts \n" ); 118 } 119 else { 115 } else { 120 116 __cfaabi_dbg_print_safe( "Kernel : blocking \n" ); 121 117 … … 156 152 157 153 158 if ( !this->owner ) {154 if ( !this->owner ) { 159 155 __cfaabi_dbg_print_safe( "Kernel : Destroying free mon %p\n", this); 160 156 … … 167 163 unlock( this->lock ); 168 164 return; 169 } 170 else if( this->owner == thrd && !join) { 165 } else if ( this->owner == thrd && !join) { 171 166 // We already have the monitor... but where about to destroy it so the nesting will fail 172 167 // Abort! … … 176 171 // because join will not release the monitor after it executed. 177 172 // to avoid that it sets the owner to the special value thrd | 1p before exiting 178 else if ( this->owner == (thread$*)(1 | (uintptr_t)thrd) ) {173 else if ( this->owner == (thread$*)(1 | (uintptr_t)thrd) ) { 179 174 // restore the owner and just return 180 175 __cfaabi_dbg_print_safe( "Kernel : Destroying free mon %p\n", this); … … 196 191 monitor$ ** monitors = &this; 197 192 __monitor_group_t group = { &this, 1, func }; 198 if ( is_accepted( this, group) ) {193 if ( is_accepted( this, group) ) { 199 194 __cfaabi_dbg_print_safe( "Kernel : mon accepts dtor, block and signal it \n" ); 200 195 … … 224 219 __cfaabi_dbg_print_safe( "Kernel : Destroying %p\n", this); 225 220 return; 226 } 227 else { 221 } else { 228 222 __cfaabi_dbg_print_safe( "Kernel : blocking \n" ); 229 223 … … 259 253 // If we haven't left the last level of recursion 260 254 // it means we don't need to do anything 261 if ( this->recursion != 0) {255 if ( this->recursion != 0) { 262 256 __cfaabi_dbg_print_safe( "Kernel : recursion still %d\n", this->recursion); 263 257 unlock( this->lock ); … … 283 277 static void __dtor_leave( monitor$ * this, bool join ) { 284 278 __cfaabi_dbg_debug_do( 285 if ( active_thread() != this->owner ) {279 if ( active_thread() != this->owner ) { 286 280 abort( "Destroyed monitor %p has inconsistent owner, expected %p got %p.\n", this, active_thread(), this->owner); 287 281 } 288 if ( this->recursion != 1 && !join ) {282 if ( this->recursion != 1 && !join ) { 289 283 abort( "Destroyed monitor %p has %d outstanding nested calls.\n", this, this->recursion - 1); 290 284 } … … 332 326 // relies on the monitor array being sorted 333 327 static inline void enter( __monitor_group_t monitors ) { 334 for ( __lock_size_t i = 0; i < monitors.size; i++) {328 for ( i; monitors.size ) { 335 329 __enter( monitors[i], monitors ); 336 330 } … … 340 334 // relies on the monitor array being sorted 341 335 static inline void leave(monitor$ * monitors [], __lock_size_t count) { 342 for ( __lock_size_t i = count - 1; i >= 0; i--) {336 for ( i; -~= count - 1 ) { 343 337 __leave( monitors[i] ); 344 338 } … … 454 448 455 449 // Create storage for monitor context 456 monitor_ctx( this.monitors, this.monitor_count ); 450 monitor_ctx( this.monitors, this.monitor_count ); // creates monitors, count, recursions, masks, locks 457 451 458 452 // Create the node specific to this wait operation … … 477 471 478 472 // Remove any duplicate threads 479 for ( __lock_size_t i = 0; i < count; i++) {473 for ( i; count ) { 480 474 thread$ * new_owner = next_thread( monitors[i] ); 481 475 insert_unique( threads, thread_count, new_owner ); … … 483 477 484 478 // Unlock the locks, we don't need them anymore 485 for (int i = 0; i < count; i++) {479 for ( i; count ) { 486 480 unlock( *locks[i] ); 487 481 } 488 482 489 483 // Wake the threads 490 for (int i = 0; i < thread_count; i++) {484 for ( i; thread_count ) { 491 485 unpark( threads[i] ); 492 486 } … … 500 494 501 495 bool signal( condition & this ) libcfa_public { 502 if ( is_empty( this ) ) { return false; }496 if ( is_empty( this ) ) { return false; } 503 497 504 498 //Check that everything is as expected … … 513 507 } 514 508 515 for (int i = 0; i < this.monitor_count; i++) {509 for ( i; this.monitor_count ) { 516 510 if ( this.monitors[i] != this_thrd->monitors[i] ) { 517 511 abort( "Signal on condition %p made with different monitor, expected %p got %p", &this, this.monitors[i], this_thrd->monitors[i] ); … … 529 523 530 524 //Add the thread to the proper AS stack 531 for (int i = 0; i < count; i++) {525 for ( i; count ) { 532 526 __condition_criterion_t * crit = &node->criteria[i]; 533 527 assert( !crit->ready ); … … 542 536 543 537 bool signal_block( condition & this ) libcfa_public { 544 if ( !this.blocked.head ) { return false; }538 if ( !this.blocked.head ) { return false; } 545 539 546 540 //Check that everything is as expected … … 549 543 550 544 // Create storage for monitor context 551 monitor_ctx( this.monitors, this.monitor_count ); 545 monitor_ctx( this.monitors, this.monitor_count ); // creates monitors, count, recursions, masks, locks 552 546 553 547 // Lock all monitors (aggregates the locks them as well) 554 548 lock_all( monitors, locks, count ); 555 556 549 557 550 // Create the node specific to this wait operation … … 576 569 park(); 577 570 578 579 571 // WE WOKE UP 580 581 572 582 573 __cfaabi_dbg_print_buffer_local( "Kernel : signal_block returned\n" ); … … 621 612 __cfaabi_dbg_print_buffer_decl( "Kernel : waitfor %"PRIdFAST16" (s: %"PRIdFAST16", m: %"PRIdFAST16")\n", actual_count, mask.size, (__lock_size_t)max); 622 613 623 if (actual_count == 0) return;614 if (actual_count == 0) return; 624 615 625 616 __cfaabi_dbg_print_buffer_local( "Kernel : waitfor internal proceeding\n" ); 626 617 627 618 // Create storage for monitor context 628 monitor_ctx( mon_storage, actual_count ); 619 monitor_ctx( mon_storage, actual_count ); // creates monitors, count, recursions, masks, locks 629 620 630 621 // Lock all monitors (aggregates the locks as well) … … 636 627 [next, index] = search_entry_queue( mask, monitors, count ); 637 628 638 if ( next ) {629 if ( next ) { 639 630 *mask.accepted = index; 640 631 __acceptable_t& accepted = mask[index]; 641 if ( accepted.is_dtor ) {632 if ( accepted.is_dtor ) { 642 633 __cfaabi_dbg_print_buffer_local( "Kernel : dtor already there\n" ); 643 634 verifyf( accepted.size == 1, "ERROR: Accepted dtor has more than 1 mutex parameter." ); … … 662 653 __cfaabi_dbg_print_buffer_local( "Kernel : baton of %"PRIdFAST16" monitors : ", count ); 663 654 #ifdef __CFA_DEBUG_PRINT__ 664 for ( int i = 0; i < count; i++) {655 for ( i; count ) { 665 656 __cfaabi_dbg_print_buffer_local( "%p %p ", monitors[i], monitors[i]->signal_stack.top ); 666 657 } … … 692 683 693 684 694 if ( duration == 0 ) {685 if ( duration == 0 ) { 695 686 __cfaabi_dbg_print_buffer_local( "Kernel : non-blocking, exiting\n" ); 696 687 … … 712 703 set_mask( monitors, count, mask ); 713 704 714 for ( __lock_size_t i = 0; i < count; i++) {705 for ( i; count ) { 715 706 verify( monitors[i]->owner == active_thread() ); 716 707 } … … 752 743 monitors[0]->owner = owner; 753 744 monitors[0]->recursion = 1; 754 for ( __lock_size_t i = 1; i < count; i++) {745 for ( i; 1~count ) { 755 746 /* paranoid */ verify ( monitors[i]->lock.lock ); 756 747 /* paranoid */ verifyf( monitors[i]->owner == active_thread(), "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), monitors[i]->owner, monitors[i]->recursion, monitors[i] ); … … 761 752 762 753 static inline void set_mask( monitor$ * storage [], __lock_size_t count, const __waitfor_mask_t & mask ) { 763 for ( __lock_size_t i = 0; i < count; i++) {754 for ( i; count) { 764 755 storage[i]->mask = mask; 765 756 } … … 776 767 __cfaabi_dbg_print_safe( "Kernel : mon %p AS-stack top %p\n", this, this->signal_stack.top); 777 768 __condition_criterion_t * urgent = pop( this->signal_stack ); 778 if ( urgent ) {769 if ( urgent ) { 779 770 //The signaller stack is not empty, 780 771 //regardless of if we are ready to baton pass, … … 801 792 802 793 // Check if there are any acceptable functions 803 if ( !it ) return false;794 if ( !it ) return false; 804 795 805 796 // If this isn't the first monitor to test this, there is no reason to repeat the test. 806 if ( this != group[0] ) return group[0]->mask.accepted >= 0;797 if ( this != group[0] ) return group[0]->mask.accepted >= 0; 807 798 808 799 // For all acceptable functions check if this is the current function. 809 for ( __lock_size_t i = 0; i < count; i++, it++ ) {810 if ( *it == group ) {800 for ( __lock_size_t i = 0; i < count; i++, it++ ) { 801 if ( *it == group ) { 811 802 *this->mask.accepted = i; 812 803 return true; … … 819 810 820 811 static inline void init( __lock_size_t count, monitor$ * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) { 821 for ( __lock_size_t i = 0; i < count; i++) {812 for ( i; count ) { 822 813 (criteria[i]){ monitors[i], waiter }; 823 814 } … … 827 818 828 819 static inline void init_push( __lock_size_t count, monitor$ * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) { 829 for ( __lock_size_t i = 0; i < count; i++) {820 for ( i; count ) { 830 821 (criteria[i]){ monitors[i], waiter }; 831 822 __cfaabi_dbg_print_safe( "Kernel : target %p = %p\n", criteria[i].target, &criteria[i] ); … … 837 828 838 829 static inline void lock_all( __spinlock_t * locks [], __lock_size_t count ) { 839 for ( __lock_size_t i = 0; i < count; i++) {830 for ( i; count ) { 840 831 lock( *locks[i] __cfaabi_dbg_ctx2 ); 841 832 } … … 843 834 844 835 static inline void lock_all( monitor$ * source [], __spinlock_t * /*out*/ locks [], __lock_size_t count ) { 845 for ( __lock_size_t i = 0; i < count; i++) {836 for ( i; count ) { 846 837 __spinlock_t * l = &source[i]->lock; 847 838 lock( *l __cfaabi_dbg_ctx2 ); 848 if (locks) locks[i] = l;839 if (locks) locks[i] = l; 849 840 } 850 841 } 851 842 852 843 static inline void unlock_all( __spinlock_t * locks [], __lock_size_t count ) { 853 for ( __lock_size_t i = 0; i < count; i++) {844 for ( i; count ) { 854 845 unlock( *locks[i] ); 855 846 } … … 857 848 858 849 static inline void unlock_all( monitor$ * locks [], __lock_size_t count ) { 859 for ( __lock_size_t i = 0; i < count; i++) {850 for ( i; count ) { 860 851 unlock( locks[i]->lock ); 861 852 } … … 869 860 __waitfor_mask_t /*out*/ masks [] 870 861 ) { 871 for ( __lock_size_t i = 0; i < count; i++) {862 for ( i; count ) { 872 863 recursions[i] = ctx[i]->recursion; 873 masks[i] 864 masks[i] = ctx[i]->mask; 874 865 } 875 866 } … … 883 874 ) { 884 875 lock_all( locks, count ); 885 for ( __lock_size_t i = 0; i < count; i++) {876 for ( i; count ) { 886 877 ctx[i]->recursion = recursions[i]; 887 ctx[i]->mask 878 ctx[i]->mask = masks[i]; 888 879 } 889 880 unlock_all( locks, count ); … … 901 892 bool ready2run = true; 902 893 903 for( int i = 0; i < count; i++ ) { 904 894 for ( i; count ) { 905 895 // __cfaabi_dbg_print_safe( "Checking %p for %p\n", &criteria[i], target ); 906 if ( &criteria[i] == target ) {896 if ( &criteria[i] == target ) { 907 897 criteria[i].ready = true; 908 898 // __cfaabi_dbg_print_safe( "True\n" ); … … 918 908 static inline void brand_condition( condition & this ) { 919 909 thread$ * thrd = active_thread(); 920 if ( !this.monitors ) {910 if ( !this.monitors ) { 921 911 // __cfaabi_dbg_print_safe( "Branding\n" ); 922 912 assertf( thrd->monitors.data != 0p, "No current monitor to brand condition %p", thrd->monitors.data ); … … 924 914 925 915 this.monitors = (monitor$ **)malloc( this.monitor_count * sizeof( *this.monitors ) ); 926 for ( int i = 0; i < this.monitor_count; i++) {916 for ( i; this.monitor_count ) { 927 917 this.monitors[i] = thrd->monitors[i]; 928 918 } … … 947 937 // For each acceptable check if it matches 948 938 int i = 0; 949 __acceptable_t * end = end(mask);939 __acceptable_t * end = end(mask); 950 940 __acceptable_t * begin = begin(mask); 951 941 for ( __acceptable_t * it = begin; it != end; it++, i++ ) { … … 961 951 #endif 962 952 } 963 #endif 964 int i = 0; 965 __acceptable_t * end = end (mask); 966 __acceptable_t * begin = begin(mask); 953 #else 967 954 // For each acceptable (respect lexical priority in waitfor statement) 968 for ( __acceptable_t * it = begin; it != end; it++, i++ ) { 955 __acceptable_t * it = end(mask); it--; // end is passed the last node, so backup 956 for ( int i = mask.size - 1; i >= 0; i -= 1, it-- ) { 969 957 #if defined( __CFA_WITH_VERIFY__ ) 970 958 thread$ * last = 0p; … … 980 968 // For each thread in the entry-queue check for a match 981 969 if ( *it == curr->monitors ) { 982 // If match, return it after remov eing from the entry queue970 // If match, return it after removing from the entry queue 983 971 return [remove( entry_queue, thrd_it ), i]; 984 972 } // if … … 989 977 } // for 990 978 } // for 979 #endif 991 980 return [0, -1]; 992 981 } 993 982 994 forall( T & | sized( T ))983 forall( T & | sized( T ) ) 995 984 static inline __lock_size_t insert_unique( T * array [], __lock_size_t & size, T * val ) { 996 if ( !val ) return size;997 998 for ( __lock_size_t i = 0; i <= size; i++) {999 if ( array[i] == val ) return size;985 if ( ! val ) return size; 986 987 for ( __lock_size_t i; ~= size ) { 988 if ( array[i] == val ) return size; 1000 989 } 1001 990 1002 991 array[size] = val; 1003 size = size + 1; 1004 return size; 992 return size += 1; 1005 993 } 1006 994 1007 995 static inline __lock_size_t count_max( const __waitfor_mask_t & mask ) { 1008 996 __lock_size_t max = 0; 1009 for ( __lock_size_t i = 0; i < mask.size; i++) {997 for ( i; mask.size ) { 1010 998 __acceptable_t & accepted = mask[i]; 1011 999 max += accepted.size; … … 1016 1004 static inline __lock_size_t aggregate( monitor$ * storage [], const __waitfor_mask_t & mask ) { 1017 1005 __lock_size_t size = 0; 1018 for ( __lock_size_t i = 0; i < mask.size; i++) {1006 for ( i; mask.size ) { 1019 1007 __acceptable_t & accepted = mask[i]; 1020 1008 __libcfa_small_sort( accepted.data, accepted.size ); 1021 for ( __lock_size_t j = 0; j < accepted.size; j++) {1009 for ( __lock_size_t j = 0; j < accepted.size; j++) { 1022 1010 insert_unique( storage, size, accepted[j] ); 1023 1011 } … … 1040 1028 __cfaabi_dbg_print_safe( "Kernel : %10p Entering mon %p (%p)\n", thrd, this, this->owner); 1041 1029 1042 if ( unlikely(0 != (0x1 & (uintptr_t)this->owner)) ) {1030 if ( unlikely(0 != (0x1 & (uintptr_t)this->owner)) ) { 1043 1031 abort( "Attempt by thread \"%.256s\" (%p) to access joined monitor %p.", thrd->self_cor.name, thrd, this ); 1044 } 1045 else if( !this->owner ) { 1032 } else if ( !this->owner ) { 1046 1033 // No one has the monitor, just take it 1047 1034 __set_owner( this, thrd ); 1048 1035 1049 1036 __cfaabi_dbg_print_safe( "Kernel : mon is free \n" ); 1050 } 1051 else if( this->owner == thrd) { 1037 } else if ( this->owner == thrd) { 1052 1038 // We already have the monitor, just note how many times we took it 1053 1039 this->recursion += 1; 1054 1040 1055 1041 __cfaabi_dbg_print_safe( "Kernel : mon already owned \n" ); 1056 } 1057 else { 1042 } else { 1058 1043 __cfaabi_dbg_print_safe( "Kernel : blocking \n" ); 1059 1044
Note: See TracChangeset
for help on using the changeset viewer.