Changeset c84b4be
- Timestamp:
- Dec 13, 2019, 1:45:49 PM (5 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- 75ca7f4
- Parents:
- 983edfd
- Location:
- libcfa/src
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/bits/debug.hfa
r983edfd rc84b4be 37 37 #include <stdarg.h> 38 38 #include <stdio.h> 39 #include <unistd.h> 39 40 40 41 extern void __cfaabi_bits_write( int fd, const char *buffer, int len ); … … 49 50 #endif 50 51 52 // #define __CFA_DEBUG_PRINT__ 53 51 54 #ifdef __CFA_DEBUG_PRINT__ 52 55 #define __cfaabi_dbg_write( buffer, len ) __cfaabi_bits_write( STDERR_FILENO, buffer, len ) 53 56 #define __cfaabi_dbg_acquire() __cfaabi_bits_acquire() 54 57 #define __cfaabi_dbg_release() __cfaabi_bits_release() 55 #define __cfaabi_dbg_print_safe(...) __cfaabi_bits_print_safe (__VA_ARGS__)56 #define __cfaabi_dbg_print_nolock(...) __cfaabi_bits_print_nolock (__VA_ARGS__)57 #define __cfaabi_dbg_print_buffer(...) __cfaabi_bits_print_buffer (__VA_ARGS__)58 #define __cfaabi_dbg_print_buffer_decl(...) char __dbg_text[256]; int __dbg_len = snprintf( __dbg_text, 256, __VA_ARGS__ ); __cfaabi_bits_write( __dbg_text, __dbg_len );59 #define __cfaabi_dbg_print_buffer_local(...) __dbg_len = snprintf( __dbg_text, 256, __VA_ARGS__ ); __cfaabi_ dbg_write(__dbg_text, __dbg_len );58 #define __cfaabi_dbg_print_safe(...) __cfaabi_bits_print_safe ( STDERR_FILENO, __VA_ARGS__ ) 59 #define __cfaabi_dbg_print_nolock(...) __cfaabi_bits_print_nolock( STDERR_FILENO, __VA_ARGS__ ) 60 #define __cfaabi_dbg_print_buffer(...) __cfaabi_bits_print_buffer( STDERR_FILENO, __VA_ARGS__ ) 61 #define __cfaabi_dbg_print_buffer_decl(...) char __dbg_text[256]; int __dbg_len = snprintf( __dbg_text, 256, __VA_ARGS__ ); __cfaabi_bits_write( STDERR_FILENO, __dbg_text, __dbg_len ); 62 #define __cfaabi_dbg_print_buffer_local(...) __dbg_len = snprintf( __dbg_text, 256, __VA_ARGS__ ); __cfaabi_bits_write( STDERR_FILENO, __dbg_text, __dbg_len ); 60 63 #else 61 64 #define __cfaabi_dbg_write(...) ((void)0) -
libcfa/src/concurrency/kernel.cfa
r983edfd rc84b4be 556 556 557 557 with( *thrd->curr_cluster ) { 558 if(was_empty) {559 lock (proc_list_lock __cfaabi_dbg_ctx2);560 if(idles) {561 wake_fast(idles.head);562 }563 unlock (proc_list_lock);564 }565 else if( struct processor * idle = idles.head ) {566 wake_fast(idle);567 }558 // if(was_empty) { 559 // lock (proc_list_lock __cfaabi_dbg_ctx2); 560 // if(idles) { 561 // wake_fast(idles.head); 562 // } 563 // unlock (proc_list_lock); 564 // } 565 // else if( struct processor * idle = idles.head ) { 566 // wake_fast(idle); 567 // } 568 568 } 569 569 … … 825 825 //============================================================================================= 826 826 static void halt(processor * this) with( *this ) { 827 // verify( ! __atomic_load_n(&do_terminate, __ATOMIC_SEQ_CST) );828 829 with( *cltr ) {830 lock (proc_list_lock __cfaabi_dbg_ctx2);831 push_front(idles, *this);832 unlock (proc_list_lock);833 }834 835 __cfaabi_dbg_print_safe("Kernel : Processor %p ready to sleep\n", this);836 837 wait( idleLock );838 839 __cfaabi_dbg_print_safe("Kernel : Processor %p woke up and ready to run\n", this);840 841 with( *cltr ) {842 lock (proc_list_lock __cfaabi_dbg_ctx2);843 remove (idles, *this);844 unlock (proc_list_lock);845 }827 // // verify( ! __atomic_load_n(&do_terminate, __ATOMIC_SEQ_CST) ); 828 829 // with( *cltr ) { 830 // lock (proc_list_lock __cfaabi_dbg_ctx2); 831 // push_front(idles, *this); 832 // unlock (proc_list_lock); 833 // } 834 835 // __cfaabi_dbg_print_safe("Kernel : Processor %p ready to sleep\n", this); 836 837 // wait( idleLock ); 838 839 // __cfaabi_dbg_print_safe("Kernel : Processor %p woke up and ready to run\n", this); 840 841 // with( *cltr ) { 842 // lock (proc_list_lock __cfaabi_dbg_ctx2); 843 // remove (idles, *this); 844 // unlock (proc_list_lock); 845 // } 846 846 } 847 847 -
libcfa/src/concurrency/kernel.hfa
r983edfd rc84b4be 194 194 volatile bool lock; 195 195 unsigned int last_id; 196 unsigned int count; 196 197 197 198 // anchor for the head and the tail of the queue -
libcfa/src/concurrency/monitor.cfa
r983edfd rc84b4be 816 816 } 817 817 818 __cfaabi_dbg_print_safe( "Kernel : Runing %i (%p)\n", ready2run, ready2run ? node->waiting_thread :0p );818 __cfaabi_dbg_print_safe( "Kernel : Runing %i (%p)\n", ready2run, ready2run ? (thread_desc*)node->waiting_thread : (thread_desc*)0p ); 819 819 return ready2run ? node->waiting_thread : 0p; 820 820 } -
libcfa/src/concurrency/preemption.cfa
r983edfd rc84b4be 120 120 // If there are still alarms pending, reset the timer 121 121 if( alarms->head ) { 122 __cfaabi_dbg_print_buffer_decl( " KERNEL: @%ju(%ju) resetting alarm to %ju.\n", currtime.tv, __kernel_get_time().tv, (alarms->head->alarm - currtime).tv);122 // __cfaabi_dbg_print_buffer_decl( " KERNEL: @%ju(%ju) resetting alarm to %ju.\n", currtime.tv, __kernel_get_time().tv, (alarms->head->alarm - currtime).tv); 123 123 Duration delta = alarms->head->alarm - currtime; 124 124 Duration caped = max(delta, 50`us); -
libcfa/src/concurrency/ready_queue.cfa
r983edfd rc84b4be 240 240 this.lock = false; 241 241 this.last_id = -1u; 242 this.count = 0u; 242 243 243 244 this.before.link.prev = 0p; … … 283 284 /* paranoid */ verify(tail(this)->link.next == 0p ); 284 285 /* paranoid */ verify(tail(this)->link.prev == head(this) ); 286 /* paranoid */ verify(this.count == 0u ); 285 287 } 286 288 … … 292 294 verify(node->link.next == 0p); 293 295 verify(node->link.prev == 0p); 296 297 this.count++; 294 298 295 299 if(this.before.link.ts == 0l) { … … 345 349 } 346 350 351 this.count--; 347 352 /* paranoid */ verify(node); 348 353 … … 693 698 void ready_queue_grow (struct cluster * cltr) { 694 699 uint_fast32_t last_size = ready_mutate_lock( *cltr ); 700 __cfaabi_dbg_print_safe("Kernel : Growing ready queue\n"); 695 701 check( cltr->ready_queue ); 696 702 … … 723 729 // Make sure that everything is consistent 724 730 check( cltr->ready_queue ); 731 __cfaabi_dbg_print_safe("Kernel : Growing ready queue done\n"); 725 732 ready_mutate_unlock( *cltr, last_size ); 726 733 } … … 728 735 void ready_queue_shrink(struct cluster * cltr) { 729 736 uint_fast32_t last_size = ready_mutate_lock( *cltr ); 737 __cfaabi_dbg_print_safe("Kernel : Shrinking ready queue\n"); 730 738 with( cltr->ready_queue ) { 739 #if defined(__CFA_WITH_VERIFY__) 740 size_t nthreads = 0; 741 for( idx; (size_t)list.count ) { 742 nthreads += list.data[idx].count; 743 } 744 #endif 745 731 746 size_t ocount = list.count; 732 747 // Check that we have some space left … … 737 752 // redistribute old data 738 753 verify(ocount > list.count); 754 __attribute__((unused)) size_t displaced = 0; 739 755 for( idx; (size_t)list.count ~ ocount) { 740 756 // This is not strictly needed but makes checking invariants much easier … … 747 763 verify(thrd); 748 764 push(cltr, thrd); 765 displaced++; 749 766 } 750 767 … … 755 772 ^(list.data[idx]){}; 756 773 } 774 775 __cfaabi_dbg_print_safe("Kernel : Shrinking ready queue displaced %zu threads\n", displaced); 757 776 758 777 // clear the now unused masks … … 776 795 777 796 empty.count = 0; 778 for( i ; lword ) {797 for( i ; 0 ~= lword ) { 779 798 empty.count += __builtin_popcountl(empty.mask[i]); 780 799 } … … 788 807 fix(list.data[idx]); 789 808 } 809 810 #if defined(__CFA_WITH_VERIFY__) 811 for( idx; (size_t)list.count ) { 812 nthreads -= list.data[idx].count; 813 } 814 assertf(nthreads == 0, "Shrinking changed number of threads"); 815 #endif 790 816 } 791 817 792 818 // Make sure that everything is consistent 793 819 check( cltr->ready_queue ); 820 __cfaabi_dbg_print_safe("Kernel : Shrinking ready queue done\n"); 794 821 ready_mutate_unlock( *cltr, last_size ); 795 822 }
Note: See TracChangeset
for help on using the changeset viewer.