Changeset 64a7146 for libcfa/src/concurrency/ready_queue.cfa
- Timestamp:
- Jun 19, 2020, 3:49:43 PM (4 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- 68f36f4
- Parents:
- 04b5cef
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/ready_queue.cfa
r04b5cef r64a7146 81 81 this.handle = proc; 82 82 this.lock = false; 83 #ifdef __CFA_WITH_VERIFY__ 84 this.owned = false; 85 #endif 83 86 } 84 87 … … 97 100 && __atomic_compare_exchange_n( &data[i].handle, &null, proc, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) { 98 101 /*paranoid*/ verify(i < ready); 99 /*paranoid*/ verify( __alignof__(data[i]) == cache_line_size);102 /*paranoid*/ verify(0 == (__alignof__(data[i]) % cache_line_size)); 100 103 /*paranoid*/ verify((((uintptr_t)&data[i]) % cache_line_size) == 0); 101 104 return i; … … 562 565 563 566 //----------------------------------------------------------------------- 567 __attribute__((hot)) bool query(struct cluster * cltr) { 568 return query(cltr->ready_queue.snzi); 569 } 570 571 //----------------------------------------------------------------------- 564 572 __attribute__((hot)) bool push(struct cluster * cltr, struct $thread * thrd) with (cltr->ready_queue) { 565 573 __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr); … … 761 769 // Grow the ready queue 762 770 void ready_queue_grow (struct cluster * cltr) { 763 // Lock the RWlock so no-one pushes/pops while we are changing the queue 764 uint_fast32_t last_size = ready_mutate_lock(); 765 771 /* paranoid */ verify( ready_mutate_islocked() ); 766 772 __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue\n"); 767 773 … … 808 814 __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue done\n"); 809 815 810 // Unlock the RWlock 811 ready_mutate_unlock( last_size ); 816 /* paranoid */ verify( ready_mutate_islocked() ); 812 817 } 813 818 814 819 // Shrink the ready queue 815 820 void ready_queue_shrink(struct cluster * cltr) { 816 // Lock the RWlock so no-one pushes/pops while we are changing the queue 817 uint_fast32_t last_size = ready_mutate_lock(); 818 821 /* paranoid */ verify( ready_mutate_islocked() ); 819 822 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue\n"); 820 823 … … 889 892 890 893 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue done\n"); 891 892 // Unlock the RWlock 893 ready_mutate_unlock( last_size ); 894 } 894 /* paranoid */ verify( ready_mutate_islocked() ); 895 }
Note: See TracChangeset
for help on using the changeset viewer.