Changeset 6d2386e
- Timestamp:
- Nov 10, 2017, 11:41:35 AM (7 years ago)
- Branches:
- ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, resolv-new, with_gc
- Children:
- 20ffcf3, 403b388, 490db327
- Parents:
- 3edc2df (diff), 34c6c767 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- src/libcfa
- Files:
-
- 3 added
- 9 edited
Legend:
- Unmodified
- Added
- Removed
-
src/libcfa/Makefile.am
r3edc2df r6d2386e 95 95 96 96 cfa_includedir = $(CFA_INCDIR) 97 nobase_cfa_include_HEADERS = ${headers} ${stdhdr} math gmp concurrency/invoke.h 97 nobase_cfa_include_HEADERS = \ 98 ${headers} \ 99 ${stdhdr} \ 100 math \ 101 gmp \ 102 bits/defs.h \ 103 bits/locks.h \ 104 concurrency/invoke.h \ 105 libhdr.h \ 106 libhdr/libalign.h \ 107 libhdr/libdebug.h \ 108 libhdr/libtools.h 98 109 99 110 CLEANFILES = libcfa-prelude.c -
src/libcfa/Makefile.in
r3edc2df r6d2386e 264 264 containers/result containers/vector concurrency/coroutine \ 265 265 concurrency/thread concurrency/kernel concurrency/monitor \ 266 ${shell echo stdhdr/*} math gmp concurrency/invoke.h 266 ${shell echo stdhdr/*} math gmp bits/defs.h bits/locks.h \ 267 concurrency/invoke.h libhdr.h libhdr/libalign.h \ 268 libhdr/libdebug.h libhdr/libtools.h 267 269 HEADERS = $(nobase_cfa_include_HEADERS) 268 270 am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) … … 430 432 stdhdr = ${shell echo stdhdr/*} 431 433 cfa_includedir = $(CFA_INCDIR) 432 nobase_cfa_include_HEADERS = ${headers} ${stdhdr} math gmp concurrency/invoke.h 434 nobase_cfa_include_HEADERS = \ 435 ${headers} \ 436 ${stdhdr} \ 437 math \ 438 gmp \ 439 bits/defs.h \ 440 bits/locks.h \ 441 concurrency/invoke.h \ 442 libhdr.h \ 443 libhdr/libalign.h \ 444 libhdr/libdebug.h \ 445 libhdr/libtools.h 446 433 447 CLEANFILES = libcfa-prelude.c 434 448 all: all-am -
src/libcfa/concurrency/alarm.c
r3edc2df r6d2386e 186 186 187 187 disable_interrupts(); 188 lock( &event_kernel->lock DEBUG_CTX2 );188 lock( event_kernel->lock DEBUG_CTX2 ); 189 189 { 190 190 verify( validate( alarms ) ); … … 196 196 } 197 197 } 198 unlock( &event_kernel->lock );198 unlock( event_kernel->lock ); 199 199 this->set = true; 200 200 enable_interrupts( DEBUG_CTX ); … … 203 203 void unregister_self( alarm_node_t * this ) { 204 204 disable_interrupts(); 205 lock( &event_kernel->lock DEBUG_CTX2 );205 lock( event_kernel->lock DEBUG_CTX2 ); 206 206 { 207 207 verify( validate( &event_kernel->alarms ) ); 208 208 remove( &event_kernel->alarms, this ); 209 209 } 210 unlock( &event_kernel->lock );210 unlock( event_kernel->lock ); 211 211 enable_interrupts( DEBUG_CTX ); 212 212 this->set = false; -
src/libcfa/concurrency/invoke.h
r3edc2df r6d2386e 14 14 // 15 15 16 #include <stdbool.h>17 #include <stdint.h>16 #include "bits/defs.h" 17 #include "bits/locks.h" 18 18 19 19 #ifdef __CFORALL__ … … 25 25 #define _INVOKE_H_ 26 26 27 #define unlikely(x) __builtin_expect(!!(x), 0)28 #define thread_local _Thread_local29 30 27 typedef void (*fptr_t)(); 31 28 typedef int_fast16_t __lock_size_t; 32 33 struct spinlock {34 volatile int lock;35 #ifdef __CFA_DEBUG__36 const char * prev_name;37 void* prev_thrd;38 #endif39 };40 29 41 30 struct __thread_queue_t { … … 58 47 void push( struct __condition_stack_t &, struct __condition_criterion_t * ); 59 48 struct __condition_criterion_t * pop( struct __condition_stack_t & ); 60 61 void ?{}(spinlock & this);62 void ^?{}(spinlock & this);63 49 } 64 50 #endif … … 122 108 struct monitor_desc { 123 109 // spinlock to protect internal data 124 struct spinlocklock;110 struct __spinlock_t lock; 125 111 126 112 // current owner of the monitor -
src/libcfa/concurrency/kernel
r3edc2df r6d2386e 26 26 //----------------------------------------------------------------------------- 27 27 // Locks 28 // Lock the spinlock, spin if already acquired29 void lock ( spinlock * DEBUG_CTX_PARAM2 );28 // // Lock the spinlock, spin if already acquired 29 // void lock ( spinlock * DEBUG_CTX_PARAM2 ); 30 30 31 // Lock the spinlock, yield repeatedly if already acquired32 void lock_yield( spinlock * DEBUG_CTX_PARAM2 );31 // // Lock the spinlock, yield repeatedly if already acquired 32 // void lock_yield( spinlock * DEBUG_CTX_PARAM2 ); 33 33 34 // Lock the spinlock, return false if already acquired35 bool try_lock ( spinlock * DEBUG_CTX_PARAM2 );34 // // Lock the spinlock, return false if already acquired 35 // bool try_lock ( spinlock * DEBUG_CTX_PARAM2 ); 36 36 37 // Unlock the spinlock38 void unlock ( spinlock * );37 // // Unlock the spinlock 38 // void unlock ( spinlock * ); 39 39 40 40 struct semaphore { 41 spinlocklock;41 __spinlock_t lock; 42 42 int count; 43 43 __thread_queue_t waiting; … … 54 54 struct cluster { 55 55 // Ready queue locks 56 spinlockready_queue_lock;56 __spinlock_t ready_queue_lock; 57 57 58 58 // Ready queue for threads … … 74 74 FinishOpCode action_code; 75 75 thread_desc * thrd; 76 spinlock* lock;77 spinlock** locks;76 __spinlock_t * lock; 77 __spinlock_t ** locks; 78 78 unsigned short lock_count; 79 79 thread_desc ** thrds; -
src/libcfa/concurrency/kernel.c
r3edc2df r6d2386e 242 242 void finishRunning(processor * this) { 243 243 if( this->finish.action_code == Release ) { 244 unlock( this->finish.lock );244 unlock( *this->finish.lock ); 245 245 } 246 246 else if( this->finish.action_code == Schedule ) { … … 248 248 } 249 249 else if( this->finish.action_code == Release_Schedule ) { 250 unlock( this->finish.lock );250 unlock( *this->finish.lock ); 251 251 ScheduleThread( this->finish.thrd ); 252 252 } 253 253 else if( this->finish.action_code == Release_Multi ) { 254 254 for(int i = 0; i < this->finish.lock_count; i++) { 255 unlock( this->finish.locks[i] );255 unlock( *this->finish.locks[i] ); 256 256 } 257 257 } 258 258 else if( this->finish.action_code == Release_Multi_Schedule ) { 259 259 for(int i = 0; i < this->finish.lock_count; i++) { 260 unlock( this->finish.locks[i] );260 unlock( *this->finish.locks[i] ); 261 261 } 262 262 for(int i = 0; i < this->finish.thrd_count; i++) { … … 334 334 verifyf( thrd->next == NULL, "Expected null got %p", thrd->next ); 335 335 336 lock( &this_processor->cltr->ready_queue_lock DEBUG_CTX2 );336 lock( this_processor->cltr->ready_queue_lock DEBUG_CTX2 ); 337 337 append( this_processor->cltr->ready_queue, thrd ); 338 unlock( &this_processor->cltr->ready_queue_lock );338 unlock( this_processor->cltr->ready_queue_lock ); 339 339 340 340 verify( disable_preempt_count > 0 ); … … 343 343 thread_desc * nextThread(cluster * this) { 344 344 verify( disable_preempt_count > 0 ); 345 lock( &this->ready_queue_lock DEBUG_CTX2 );345 lock( this->ready_queue_lock DEBUG_CTX2 ); 346 346 thread_desc * head = pop_head( this->ready_queue ); 347 unlock( &this->ready_queue_lock );347 unlock( this->ready_queue_lock ); 348 348 verify( disable_preempt_count > 0 ); 349 349 return head; … … 358 358 } 359 359 360 void BlockInternal( spinlock* lock ) {360 void BlockInternal( __spinlock_t * lock ) { 361 361 disable_interrupts(); 362 362 this_processor->finish.action_code = Release; … … 384 384 } 385 385 386 void BlockInternal( spinlock* lock, thread_desc * thrd ) {386 void BlockInternal( __spinlock_t * lock, thread_desc * thrd ) { 387 387 assert(thrd); 388 388 disable_interrupts(); … … 398 398 } 399 399 400 void BlockInternal( spinlock* locks [], unsigned short count) {400 void BlockInternal(__spinlock_t * locks [], unsigned short count) { 401 401 disable_interrupts(); 402 402 this_processor->finish.action_code = Release_Multi; … … 411 411 } 412 412 413 void BlockInternal( spinlock* locks [], unsigned short lock_count, thread_desc * thrds [], unsigned short thrd_count) {413 void BlockInternal(__spinlock_t * locks [], unsigned short lock_count, thread_desc * thrds [], unsigned short thrd_count) { 414 414 disable_interrupts(); 415 415 this_processor->finish.action_code = Release_Multi_Schedule; … … 426 426 } 427 427 428 void LeaveThread( spinlock* lock, thread_desc * thrd) {428 void LeaveThread(__spinlock_t * lock, thread_desc * thrd) { 429 429 verify( disable_preempt_count > 0 ); 430 430 this_processor->finish.action_code = thrd ? Release_Schedule : Release; … … 516 516 } 517 517 518 static spinlockkernel_abort_lock;519 static spinlockkernel_debug_lock;518 static __spinlock_t kernel_abort_lock; 519 static __spinlock_t kernel_debug_lock; 520 520 static bool kernel_abort_called = false; 521 521 … … 523 523 // abort cannot be recursively entered by the same or different processors because all signal handlers return when 524 524 // the globalAbort flag is true. 525 lock( &kernel_abort_lock DEBUG_CTX2 );525 lock( kernel_abort_lock DEBUG_CTX2 ); 526 526 527 527 // first task to abort ? 528 528 if ( !kernel_abort_called ) { // not first task to abort ? 529 529 kernel_abort_called = true; 530 unlock( &kernel_abort_lock );530 unlock( kernel_abort_lock ); 531 531 } 532 532 else { 533 unlock( &kernel_abort_lock );533 unlock( kernel_abort_lock ); 534 534 535 535 sigset_t mask; … … 561 561 extern "C" { 562 562 void __lib_debug_acquire() { 563 lock( &kernel_debug_lock DEBUG_CTX2 );563 lock( kernel_debug_lock DEBUG_CTX2 ); 564 564 } 565 565 566 566 void __lib_debug_release() { 567 unlock( &kernel_debug_lock );567 unlock( kernel_debug_lock ); 568 568 } 569 569 } … … 574 574 //----------------------------------------------------------------------------- 575 575 // Locks 576 void ?{}( spinlock & this ) {577 this.lock = 0;578 }579 void ^?{}( spinlock & this ) {580 581 }582 583 bool try_lock( spinlock * this DEBUG_CTX_PARAM2 ) {584 return this->lock == 0 && __sync_lock_test_and_set_4( &this->lock, 1 ) == 0;585 }586 587 void lock( spinlock * this DEBUG_CTX_PARAM2 ) {588 for ( unsigned int i = 1;; i += 1 ) {589 if ( this->lock == 0 && __sync_lock_test_and_set_4( &this->lock, 1 ) == 0 ) { break; }590 }591 LIB_DEBUG_DO(592 this->prev_name = caller;593 this->prev_thrd = this_thread;594 )595 }596 597 void lock_yield( spinlock * this DEBUG_CTX_PARAM2 ) {598 for ( unsigned int i = 1;; i += 1 ) {599 if ( this->lock == 0 && __sync_lock_test_and_set_4( &this->lock, 1 ) == 0 ) { break; }600 yield();601 }602 LIB_DEBUG_DO(603 this->prev_name = caller;604 this->prev_thrd = this_thread;605 )606 }607 608 609 void unlock( spinlock * this ) {610 __sync_lock_release_4( &this->lock );611 }612 613 576 void ?{}( semaphore & this, int count = 1 ) { 614 577 (this.lock){}; … … 619 582 620 583 void P(semaphore & this) { 621 lock( &this.lock DEBUG_CTX2 );584 lock( this.lock DEBUG_CTX2 ); 622 585 this.count -= 1; 623 586 if ( this.count < 0 ) { … … 629 592 } 630 593 else { 631 unlock( &this.lock );594 unlock( this.lock ); 632 595 } 633 596 } … … 635 598 void V(semaphore & this) { 636 599 thread_desc * thrd = NULL; 637 lock( &this.lock DEBUG_CTX2 );600 lock( this.lock DEBUG_CTX2 ); 638 601 this.count += 1; 639 602 if ( this.count <= 0 ) { … … 642 605 } 643 606 644 unlock( &this.lock );607 unlock( this.lock ); 645 608 646 609 // make new owner -
src/libcfa/concurrency/kernel_private.h
r3edc2df r6d2386e 45 45 //Block current thread and release/wake-up the following resources 46 46 void BlockInternal(void); 47 void BlockInternal( spinlock* lock);47 void BlockInternal(__spinlock_t * lock); 48 48 void BlockInternal(thread_desc * thrd); 49 void BlockInternal( spinlock* lock, thread_desc * thrd);50 void BlockInternal( spinlock* locks [], unsigned short count);51 void BlockInternal( spinlock* locks [], unsigned short count, thread_desc * thrds [], unsigned short thrd_count);52 void LeaveThread( spinlock* lock, thread_desc * thrd);49 void BlockInternal(__spinlock_t * lock, thread_desc * thrd); 50 void BlockInternal(__spinlock_t * locks [], unsigned short count); 51 void BlockInternal(__spinlock_t * locks [], unsigned short count, thread_desc * thrds [], unsigned short thrd_count); 52 void LeaveThread(__spinlock_t * lock, thread_desc * thrd); 53 53 54 54 //----------------------------------------------------------------------------- … … 66 66 struct event_kernel_t { 67 67 alarm_list_t alarms; 68 spinlocklock;68 __spinlock_t lock; 69 69 }; 70 70 -
src/libcfa/concurrency/monitor.c
r3edc2df r6d2386e 34 34 static inline bool is_accepted( monitor_desc * this, const __monitor_group_t & monitors ); 35 35 36 static inline void lock_all ( spinlock* locks [], __lock_size_t count );37 static inline void lock_all ( monitor_desc * source [], spinlock* /*out*/ locks [], __lock_size_t count );38 static inline void unlock_all( spinlock* locks [], __lock_size_t count );36 static inline void lock_all ( __spinlock_t * locks [], __lock_size_t count ); 37 static inline void lock_all ( monitor_desc * source [], __spinlock_t * /*out*/ locks [], __lock_size_t count ); 38 static inline void unlock_all( __spinlock_t * locks [], __lock_size_t count ); 39 39 static inline void unlock_all( monitor_desc * locks [], __lock_size_t count ); 40 40 41 static inline void save ( monitor_desc * ctx [], __lock_size_t count, spinlock* locks [], unsigned int /*out*/ recursions [], __waitfor_mask_t /*out*/ masks [] );42 static inline void restore( monitor_desc * ctx [], __lock_size_t count, spinlock* locks [], unsigned int /*in */ recursions [], __waitfor_mask_t /*in */ masks [] );41 static inline void save ( monitor_desc * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*out*/ recursions [], __waitfor_mask_t /*out*/ masks [] ); 42 static inline void restore( monitor_desc * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*in */ recursions [], __waitfor_mask_t /*in */ masks [] ); 43 43 44 44 static inline void init ( __lock_size_t count, monitor_desc * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ); … … 53 53 static inline __lock_size_t count_max ( const __waitfor_mask_t & mask ); 54 54 static inline __lock_size_t aggregate ( monitor_desc * storage [], const __waitfor_mask_t & mask ); 55 56 #ifndef __CFA_LOCK_NO_YIELD 57 #define DO_LOCK lock_yield 58 #else 59 #define DO_LOCK lock 60 #endif 55 61 56 62 //----------------------------------------------------------------------------- … … 71 77 unsigned int recursions[ count ]; /* Save the current recursion levels to restore them later */ \ 72 78 __waitfor_mask_t masks [ count ]; /* Save the current waitfor masks to restore them later */ \ 73 spinlock * locks[ count ]; /* We need to pass-in an array of locks to BlockInternal */ \79 __spinlock_t * locks [ count ]; /* We need to pass-in an array of locks to BlockInternal */ \ 74 80 75 81 #define monitor_save save ( monitors, count, locks, recursions, masks ) … … 84 90 // Enter single monitor 85 91 static void __enter_monitor_desc( monitor_desc * this, const __monitor_group_t & group ) { 86 // Lock the monitor spinlock , lock_yield to reduce contention87 lock_yield( &this->lock DEBUG_CTX2 );92 // Lock the monitor spinlock 93 DO_LOCK( this->lock DEBUG_CTX2 ); 88 94 thread_desc * thrd = this_thread; 89 95 … … 127 133 128 134 // Release the lock and leave 129 unlock( &this->lock );135 unlock( this->lock ); 130 136 return; 131 137 } 132 138 133 139 static void __enter_monitor_dtor( monitor_desc * this, fptr_t func ) { 134 // Lock the monitor spinlock , lock_yield to reduce contention135 lock_yield( &this->lock DEBUG_CTX2 );140 // Lock the monitor spinlock 141 DO_LOCK( this->lock DEBUG_CTX2 ); 136 142 thread_desc * thrd = this_thread; 137 143 … … 145 151 set_owner( this, thrd ); 146 152 147 unlock( &this->lock );153 unlock( this->lock ); 148 154 return; 149 155 } … … 196 202 // Leave single monitor 197 203 void __leave_monitor_desc( monitor_desc * this ) { 198 // Lock the monitor spinlock, lock_yieldto reduce contention199 lock_yield( &this->lock DEBUG_CTX2 );204 // Lock the monitor spinlock, DO_LOCK to reduce contention 205 DO_LOCK( this->lock DEBUG_CTX2 ); 200 206 201 207 LIB_DEBUG_PRINT_SAFE("Kernel : %10p Leaving mon %p (%p)\n", this_thread, this, this->owner); … … 210 216 if( this->recursion != 0) { 211 217 LIB_DEBUG_PRINT_SAFE("Kernel : recursion still %d\n", this->recursion); 212 unlock( &this->lock );218 unlock( this->lock ); 213 219 return; 214 220 } … … 218 224 219 225 // We can now let other threads in safely 220 unlock( &this->lock );226 unlock( this->lock ); 221 227 222 228 //We need to wake-up the thread … … 243 249 244 250 // Lock the monitor now 245 lock_yield( &this->lock DEBUG_CTX2 );251 DO_LOCK( this->lock DEBUG_CTX2 ); 246 252 247 253 disable_interrupts(); … … 730 736 } 731 737 732 static inline void lock_all( spinlock* locks [], __lock_size_t count ) {738 static inline void lock_all( __spinlock_t * locks [], __lock_size_t count ) { 733 739 for( __lock_size_t i = 0; i < count; i++ ) { 734 lock_yield(locks[i] DEBUG_CTX2 );735 } 736 } 737 738 static inline void lock_all( monitor_desc * source [], spinlock* /*out*/ locks [], __lock_size_t count ) {740 DO_LOCK( *locks[i] DEBUG_CTX2 ); 741 } 742 } 743 744 static inline void lock_all( monitor_desc * source [], __spinlock_t * /*out*/ locks [], __lock_size_t count ) { 739 745 for( __lock_size_t i = 0; i < count; i++ ) { 740 spinlock* l = &source[i]->lock;741 lock_yield(l DEBUG_CTX2 );746 __spinlock_t * l = &source[i]->lock; 747 DO_LOCK( *l DEBUG_CTX2 ); 742 748 if(locks) locks[i] = l; 743 749 } 744 750 } 745 751 746 static inline void unlock_all( spinlock* locks [], __lock_size_t count ) {752 static inline void unlock_all( __spinlock_t * locks [], __lock_size_t count ) { 747 753 for( __lock_size_t i = 0; i < count; i++ ) { 748 unlock( locks[i] );754 unlock( *locks[i] ); 749 755 } 750 756 } … … 752 758 static inline void unlock_all( monitor_desc * locks [], __lock_size_t count ) { 753 759 for( __lock_size_t i = 0; i < count; i++ ) { 754 unlock( &locks[i]->lock );760 unlock( locks[i]->lock ); 755 761 } 756 762 } … … 759 765 monitor_desc * ctx [], 760 766 __lock_size_t count, 761 __attribute((unused)) spinlock* locks [],767 __attribute((unused)) __spinlock_t * locks [], 762 768 unsigned int /*out*/ recursions [], 763 769 __waitfor_mask_t /*out*/ masks [] … … 772 778 monitor_desc * ctx [], 773 779 __lock_size_t count, 774 spinlock* locks [],780 __spinlock_t * locks [], 775 781 unsigned int /*out*/ recursions [], 776 782 __waitfor_mask_t /*out*/ masks [] -
src/libcfa/concurrency/preemption.c
r3edc2df r6d2386e 355 355 case SI_KERNEL: 356 356 // LIB_DEBUG_PRINT_SAFE("Kernel : Preemption thread tick\n"); 357 lock( &event_kernel->lock DEBUG_CTX2 );357 lock( event_kernel->lock DEBUG_CTX2 ); 358 358 tick_preemption(); 359 unlock( &event_kernel->lock );359 unlock( event_kernel->lock ); 360 360 break; 361 361 // Signal was not sent by the kernel but by an other thread
Note: See TracChangeset
for help on using the changeset viewer.