Changes in / [4a40fca7:cb0bcf1]
- Location:
- libcfa/src/concurrency
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/io.cfa
r4a40fca7 rcb0bcf1 594 594 lock( queue.lock __cfaabi_dbg_ctx2 ); 595 595 { 596 was_empty = empty(queue.queue);596 was_empty = queue.queue`isEmpty; 597 597 598 598 // Add our request to the list 599 add( queue.queue, item );599 insert_last( queue.queue, item ); 600 600 601 601 // Mark as pending … … 632 632 // notify the arbiter that new allocations are available 633 633 static void __ioarbiter_notify( io_arbiter$ & this, io_context$ * ctx ) { 634 /* paranoid */ verify( ! empty(this.pending.queue));634 /* paranoid */ verify( !this.pending.queue`isEmpty ); 635 635 /* paranoid */ verify( __preemption_enabled() ); 636 636 … … 642 642 // as long as there are pending allocations try to satisfy them 643 643 // for simplicity do it in FIFO order 644 while( ! empty(this.pending.queue)) {644 while( !this.pending.queue`isEmpty ) { 645 645 // get first pending allocs 646 646 __u32 have = ctx->sq.free_ring.tail - ctx->sq.free_ring.head; 647 __pending_alloc & pa = (__pending_alloc&) head( this.pending.queue);647 __pending_alloc & pa = (__pending_alloc&)(this.pending.queue`first); 648 648 649 649 // check if we have enough to satisfy the request … … 651 651 652 652 // if there are enough allocations it means we can drop the request 653 drop( this.pending.queue );653 try_pop_front( this.pending.queue ); 654 654 655 655 /* paranoid */__attribute__((unused)) bool ret = … … 727 727 // pop each operation one at a time. 728 728 // There is no wait morphing because of the io sq ring 729 while( ! empty(ctx.ext_sq.queue)) {729 while( !ctx.ext_sq.queue`isEmpty ) { 730 730 // drop the element from the queue 731 __external_io & ei = (__external_io&) drop( ctx.ext_sq.queue );731 __external_io & ei = (__external_io&)try_pop_front( ctx.ext_sq.queue ); 732 732 733 733 // submit it -
libcfa/src/concurrency/io/types.hfa
r4a40fca7 rcb0bcf1 24 24 25 25 #include "bits/locks.hfa" 26 #include "bits/queue.hfa"27 26 #include "iofwd.hfa" 28 27 #include "kernel/fwd.hfa" 29 28 30 29 #if defined(CFA_HAVE_LINUX_IO_URING_H) 31 #include "bits/sequence.hfa"32 30 #include "monitor.hfa" 33 31 … … 120 118 struct __outstanding_io { 121 119 // intrusive link fields 122 inline Colable;120 inline dlink(__outstanding_io); 123 121 124 122 // primitive on which to block until the io is processed 125 123 oneshot waitctx; 126 124 }; 127 static inline __outstanding_io *& Next( __outstanding_io * n ) { return (__outstanding_io *)Next( (Colable *)n ); } 125 P9_EMBEDDED( __outstanding_io, dlink(__outstanding_io) ) 128 126 129 127 // queue of operations that are outstanding … … 134 132 135 133 // the actual queue 136 Queue(__outstanding_io) queue;134 dlist(__outstanding_io) queue; 137 135 138 136 // volatile used to avoid the need for taking the lock if it's empty -
libcfa/src/concurrency/pthread.cfa
r4a40fca7 rcb0bcf1 20 20 #include <errno.h> 21 21 #include "locks.hfa" 22 #include "bits/stack.hfa"23 #include "bits/sequence.hfa"24 25 22 26 23 #define check_nonnull(x) asm("": "+rm"(x)); if( x == 0p ) return EINVAL; … … 34 31 35 32 struct pthread_values{ 36 inline Seqable;33 inline dlink(pthread_values); 37 34 void * value; 38 35 bool in_use; 39 36 }; 40 41 static inline { 42 pthread_values *& Back( pthread_values * n ) { 43 return (pthread_values *)Back( (Seqable *)n ); 44 } 45 46 pthread_values *& Next( pthread_values * n ) { 47 return (pthread_values *)Next( (Colable *)n ); 48 } 49 } 37 P9_EMBEDDED( pthread_values, dlink(pthread_values) ) 50 38 51 39 struct pthread_keys { 52 40 bool in_use; 53 41 void (* destructor)( void * ); 54 Sequence(pthread_values) threads;42 dlist( pthread_values ) threads; 55 43 }; 56 44 … … 78 66 79 67 struct Pthread_kernel_threads{ 80 inline Colable;68 inline dlink(Pthread_kernel_threads); 81 69 processor p; 82 70 }; 83 84 Pthread_kernel_threads *& Next( Pthread_kernel_threads * n ) { 85 return (Pthread_kernel_threads *)Next( (Colable *)n ); 86 } 87 88 static Stack(Pthread_kernel_threads) cfa_pthreads_kernel_threads; 71 P9_EMBEDDED( Pthread_kernel_threads, dlink(Pthread_kernel_threads) ) 72 73 static dlist(Pthread_kernel_threads) cfa_pthreads_kernel_threads; 89 74 static bool cfa_pthreads_kernel_threads_zero = false; // set to zero ? 90 75 static int cfa_pthreads_no_kernel_threads = 1; // number of kernel threads … … 231 216 key = &cfa_pthread_keys[i]; 232 217 value->in_use = false; 233 remove(key->threads, *value); 218 remove(*value); 219 234 220 // if a key value has a non-NULL destructor pointer, and the thread has a non-NULL value associated with that key, 235 221 // the value of the key is set to NULL, and then the function pointed to is called with the previously associated value as its sole argument. … … 551 537 552 538 // Remove key from all threads with a value. 553 pthread_values& p; 554 Sequence(pthread_values)& head = cfa_pthread_keys[key].threads; 555 for ( SeqIter(pthread_values) iter = { head }; iter | p; ) { 556 remove(head, p); 557 p.in_use = false; 558 } 539 540 // Sequence(pthread_values)& head = cfa_pthread_keys[key].threads; 541 // for ( SeqIter(pthread_values) iter = { head }; iter | p; ) { 542 // remove(head, p); 543 // p.in_use = false; 544 // } 545 pthread_values * p = &try_pop_front( cfa_pthread_keys[key].threads ); 546 for ( ; p; ) { 547 p->in_use = false; 548 p = &try_pop_front( cfa_pthread_keys[key].threads ); 549 } 559 550 unlock(key_lock); 560 551 return 0; … … 585 576 if ( ! entry.in_use ) { 586 577 entry.in_use = true; 587 add(cfa_pthread_keys[key].threads, entry);578 insert_last(cfa_pthread_keys[key].threads, entry); 588 579 } // if 589 580 entry.value = (void *)value; … … 612 603 //######################### Parallelism ######################### 613 604 void pthread_delete_kernel_threads_() __THROW { // see uMain::~uMain 614 Pthread_kernel_threads& p; 615 for ( StackIter(Pthread_kernel_threads) iter = {cfa_pthreads_kernel_threads}; iter | p; ) { 616 delete(&p); 605 Pthread_kernel_threads * p = &try_pop_front(cfa_pthreads_kernel_threads); 606 for ( ; p; ) { 607 delete(p); 608 p = &try_pop_front(cfa_pthreads_kernel_threads); 617 609 } // for 618 610 } // pthread_delete_kernel_threads_ … … 631 623 lock( concurrency_lock ); 632 624 for ( ; new_level > cfa_pthreads_no_kernel_threads; cfa_pthreads_no_kernel_threads += 1 ) { // add processors ? 633 push(cfa_pthreads_kernel_threads, *new() );625 insert_last(cfa_pthreads_kernel_threads, *new() ); 634 626 } // for 635 627 for ( ; new_level < cfa_pthreads_no_kernel_threads; cfa_pthreads_no_kernel_threads -= 1 ) { // remove processors ? 636 delete(& pop(cfa_pthreads_kernel_threads));628 delete(&try_pop_front(cfa_pthreads_kernel_threads)); 637 629 } // for 638 630 unlock( concurrency_lock );
Note: See TracChangeset
for help on using the changeset viewer.