Changeset 1b143de
- Timestamp:
- May 12, 2020, 4:26:07 PM (4 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- 2f1cb37, 9da5a50
- Parents:
- 4fa44e7
- Location:
- libcfa/src/concurrency
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/invoke.h
r4fa44e7 r1b143de 166 166 struct $thread * next; 167 167 struct $thread * prev; 168 unsigned long long ts;168 volatile unsigned long long ts; 169 169 }; 170 170 -
libcfa/src/concurrency/io.cfa
r4fa44e7 r1b143de 357 357 // The thread was preempted and now it is on the ready queue 358 358 /* paranoid */ verify( thrd.state == Active ); // The thread better be in this state 359 /* paranoid */ verify( thrd.next == 1p ); // The thread should be the last on the list359 /* paranoid */ verify( thrd.next != 0p ); // The thread should be the last on the list 360 360 /* paranoid */ verify( this.ready_queue.head == &thrd ); // The thread should be the only thing on the list 361 361 -
libcfa/src/concurrency/monitor.cfa
r4fa44e7 r1b143de 907 907 // For each thread in the entry-queue 908 908 for( $thread ** thrd_it = &entry_queue.head; 909 *thrd_it;909 (*thrd_it) != 1p; 910 910 thrd_it = &(*thrd_it)->link.next 911 911 ) { -
libcfa/src/concurrency/ready_queue.cfa
r4fa44e7 r1b143de 15 15 16 16 #define __cforall_thread__ 17 #define __CFA_DEBUG_PRINT_READY_QUEUE__17 // #define __CFA_DEBUG_PRINT_READY_QUEUE__ 18 18 19 19 #include "bits/defs.hfa" … … 112 112 // Helpers used by extract 113 113 // (_mask_bitsidx() & X) returns a bit index valid for a __cfa_readyQ_mask_t, where X is any integer 114 static inline __cfa_readyQ_mask_t _mask_bitsidx () { return (8 * sizeof(__cfa_readyQ_mask_t)) - 1; }114 static inline __cfa_readyQ_mask_t _mask_bitsidx () __attribute__ ((const)) { return (8 * sizeof(__cfa_readyQ_mask_t)) - 1; } 115 115 116 116 // (X >> _mask_shiftidx()) retuns an index into an array of __cfa_readyQ_mask_t 117 static inline __cfa_readyQ_mask_t _mask_shiftidx() { return (8 * sizeof(__cfa_readyQ_mask_t)) - __builtin_clzl(_mask_bitsidx()); }117 static inline __cfa_readyQ_mask_t _mask_shiftidx() __attribute__ ((const)) { return (8 * sizeof(__cfa_readyQ_mask_t)) - __builtin_clzl(_mask_bitsidx()); } 118 118 119 119 … … 121 121 // Given an index into the large mask, returns the bit index and which __cfa_readyQ_mask_t index in the array 122 122 static inline [__cfa_readyQ_mask_t, __cfa_readyQ_mask_t] extract(__cfa_readyQ_mask_t idx) { 123 __cfa_readyQ_mask_t word = idx >> _mask_ bitsidx();124 __cfa_readyQ_mask_t bit = idx & _mask_ shiftidx();123 __cfa_readyQ_mask_t word = idx >> _mask_shiftidx(); 124 __cfa_readyQ_mask_t bit = idx & _mask_bitsidx(); 125 125 return [bit, word]; 126 126 } … … 188 188 } 189 189 190 __cfadbg_print_safe(ready_queue, "Kernel : Registering proc %p done, id % u\n", proc, n);190 __cfadbg_print_safe(ready_queue, "Kernel : Registering proc %p done, id %lu\n", proc, n); 191 191 192 192 // Return new spot. … … 304 304 /* paranoid */ verifyf(((intptr_t)(&this) % 128) == 0, "Expected address to be aligned %p %% 128 == %zd", &this, ((intptr_t)(&this) % 128)); 305 305 306 /* paranoid */ verifyf(_mask_shiftidx() == 6 , "% zu", _mask_shiftidx());307 /* paranoid */ verifyf(_mask_bitsidx () == 63, "% zu", _mask_bitsidx());306 /* paranoid */ verifyf(_mask_shiftidx() == 6 , "%llu", _mask_shiftidx()); 307 /* paranoid */ verifyf(_mask_bitsidx () == 63, "%llu", _mask_bitsidx()); 308 308 } 309 309 … … 422 422 // Check whether or not list is empty 423 423 static inline bool is_empty(__intrusive_lane_t & this) { 424 verify( (this.before.link.ts == 0) == (this.count == 0) );424 // Cannot verify here since it may not be locked 425 425 return this.before.link.ts == 0; 426 426 } … … 428 428 // Return the timestamp 429 429 static inline unsigned long long ts(__intrusive_lane_t & this) { 430 verify( this.before.link.ts == this.before.link.next->link.ts );430 // Cannot verify here since it may not be locked 431 431 return this.before.link.ts; 432 432 } … … 520 520 [bit, word] = extract(index); 521 521 522 __cfadbg_print_safe(ready_queue, "Kernel : Ready queue extracted index %u as [bit %llu, word %llu]\n", index, bit, word); 523 522 524 // Conditional check 523 525 verifyf( … … 552 554 // Conditional check 553 555 verifyf( 554 strict == STRICT &&// Conditional check if it was expected to be set556 strict != STRICT || // Conditional check if it was expected to be set 555 557 ((mask[word] & (1ull << bit)) != 0), 556 558 "Before clear %llu:%llu (%u), %llx & %llx", word, bit, index, mask[word], (1ull << bit) … … 562 564 // Conditional check 563 565 verifyf( 564 strict == STRICT &&// Conditional check if it was expected to be cleared566 strict != STRICT || // Conditional check if it was expected to be cleared 565 567 ret, 566 568 "Bit was set but btr returned false" … … 576 578 //----------------------------------------------------------------------- 577 579 __attribute__((hot)) bool push(struct cluster * cltr, struct $thread * thrd) with (cltr->ready_queue) { 580 __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p (mask %llu)\n", thrd, cltr, used.mask[0]); 581 578 582 // write timestamp 579 583 thrd->link.ts = rdtscl(); … … 605 609 // If this lane used to be empty we need to do more 606 610 if(lane_first) { 611 // Update the bit mask 612 mask_set((__cfa_readyQ_mask_t *)used.mask, i, STRICT); 613 607 614 // Update the global count 608 615 size_t ret = __atomic_fetch_add( &used.count, 1z, __ATOMIC_SEQ_CST); … … 610 617 // Check if the entire queue used to be empty 611 618 first = (ret == 0); 612 613 // Update the bit mask614 mask_set((__cfa_readyQ_mask_t *)used.mask, i, STRICT);615 619 } 616 620 … … 624 628 // Unlock and return 625 629 __atomic_unlock( &lanes.data[i].lock ); 630 631 __cfadbg_print_safe(ready_queue, "Kernel : Pushed %p on cluster %p (idx: %u, mask %llu, first %d)\n", thrd, cltr, i, used.mask[0], lane_first); 626 632 627 633 // Update statistics … … 807 813 808 814 if(sl.before.link.ts == 0l) { 809 assert(tail(sl)->link.next == 0p);810 815 assert(tail(sl)->link.prev == head(sl)); 811 816 assert(head(sl)->link.next == tail(sl)); 812 assert(head(sl)->link.prev == 0p); 817 } else { 818 assert(tail(sl)->link.prev != head(sl)); 819 assert(head(sl)->link.next != tail(sl)); 813 820 } 814 821 } … … 917 924 for( idx; (size_t)lanes.count ~ ocount) { 918 925 // Lock is not strictly needed but makes checking invariants much easier 919 bool locked = __atomic_try_acquire(&lanes.data[idx].lock);926 __attribute__((unused)) bool locked = __atomic_try_acquire(&lanes.data[idx].lock); 920 927 verify(locked); 921 928
Note: See TracChangeset
for help on using the changeset viewer.