Changeset 6ff4507
- Timestamp:
- Sep 21, 2017, 12:31:48 PM (8 years ago)
- Branches:
- ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, resolv-new, with_gc
- Children:
- a843067
- Parents:
- 7453a68
- Location:
- src/libcfa
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
src/libcfa/Makefile.am
r7453a68 r6ff4507 36 36 ${AM_V_GEN}@BACKEND_CC@ @CFA_FLAGS@ -D__CFA_DEBUG__ -O0 -c -o $@ $< 37 37 38 EXTRA_FLAGS = -g -Wall -W error -Wno-unused-function -imacros libcfa-prelude.c @CFA_FLAGS@38 EXTRA_FLAGS = -g -Wall -Wno-unused-function -imacros libcfa-prelude.c @CFA_FLAGS@ 39 39 40 40 AM_CCASFLAGS = @CFA_FLAGS@ -
src/libcfa/Makefile.in
r7453a68 r6ff4507 416 416 ARFLAGS = cr 417 417 lib_LIBRARIES = $(am__append_1) $(am__append_2) 418 EXTRA_FLAGS = -g -Wall -W error -Wno-unused-function -imacros libcfa-prelude.c @CFA_FLAGS@418 EXTRA_FLAGS = -g -Wall -Wno-unused-function -imacros libcfa-prelude.c @CFA_FLAGS@ 419 419 AM_CCASFLAGS = @CFA_FLAGS@ 420 420 headers = fstream iostream iterator limits rational stdlib \ -
src/libcfa/concurrency/monitor.c
r7453a68 r6ff4507 24 24 // Forward declarations 25 25 static inline void set_owner( monitor_desc * this, thread_desc * owner ); 26 static inline void set_owner( monitor_desc ** storage, short count, thread_desc * owner ); 27 static inline void set_mask ( monitor_desc ** storage, short count, const __waitfor_mask_t & mask ); 28 26 29 static inline thread_desc * next_thread( monitor_desc * this ); 27 30 static inline bool is_accepted( monitor_desc * this, const __monitor_group_t & monitors ); … … 32 35 static inline void unlock_all( monitor_desc ** locks, unsigned short count ); 33 36 34 static inline void save _recursion ( monitor_desc ** ctx, unsigned int * /*out*/ recursions, unsigned short count);35 static inline void restore _recursion( monitor_desc ** ctx, unsigned int * /*in */ recursions, unsigned short count);37 static inline void save ( monitor_desc ** ctx, short count, spinlock ** locks, unsigned int * /*out*/ recursions, __waitfor_mask_t * /*out*/ masks ); 38 static inline void restore( monitor_desc ** ctx, short count, spinlock ** locks, unsigned int * /*in */ recursions, __waitfor_mask_t * /*in */ masks ); 36 39 37 40 static inline void init ( int count, monitor_desc ** monitors, __condition_node_t * waiter, __condition_criterion_t * criteria ); 38 41 static inline void init_push( int count, monitor_desc ** monitors, __condition_node_t * waiter, __condition_criterion_t * criteria ); 39 42 40 static inline thread_desc * check_condition( __condition_criterion_t * ); 41 static inline void brand_condition( condition * ); 42 static inline unsigned short insert_unique( thread_desc ** thrds, unsigned short end, thread_desc * val ); 43 43 static inline thread_desc * check_condition ( __condition_criterion_t * ); 44 static inline void brand_condition ( condition * ); 44 45 static inline [thread_desc *, int] search_entry_queue( const __waitfor_mask_t &, monitor_desc ** monitors, int count ); 45 46 46 static inline short count_max( const __waitfor_mask_t & mask ); 47 static inline short aggregate( monitor_desc ** storage, const __waitfor_mask_t & mask ); 48 static inline void set_mask ( monitor_desc ** storage, short count, const __waitfor_mask_t & mask ); 47 forall(dtype T | sized( T )) 48 static inline short insert_unique( T ** array, short & size, T * val ); 49 static inline short count_max ( const __waitfor_mask_t & mask ); 50 static inline short aggregate ( monitor_desc ** storage, const __waitfor_mask_t & mask ); 49 51 50 52 //----------------------------------------------------------------------------- 51 53 // Useful defines 52 #define wait_ctx(thrd, user_info) /* Create the necessary information to use the signaller stack */ \ 53 __condition_node_t waiter = { thrd, count, user_info }; /* Create the node specific to this wait operation */ \ 54 __condition_criterion_t criteria[count]; /* Create the creteria this wait operation needs to wake up */ \ 55 init( count, monitors, &waiter, criteria ); /* Link everything together */ \ 56 57 #define wait_ctx_primed(thrd, user_info) /* Create the necessary information to use the signaller stack */ \ 58 __condition_node_t waiter = { thrd, count, user_info }; /* Create the node specific to this wait operation */ \ 59 __condition_criterion_t criteria[count]; /* Create the creteria this wait operation needs to wake up */ \ 60 init_push( count, monitors, &waiter, criteria ); /* Link everything together and push it to the AS-Stack */ \ 61 62 #define monitor_ctx( mons, cnt ) /* Define that create the necessary struct for internal/external scheduling operations */ \ 63 monitor_desc ** monitors = mons; /* Save the targeted monitors */ \ 64 unsigned short count = cnt; /* Save the count to a local variable */ \ 65 unsigned int recursions[ count ]; /* Save the current recursion levels to restore them later */ \ 66 spinlock * locks [ count ]; /* We need to pass-in an array of locks to BlockInternal */ \ 54 #define wait_ctx(thrd, user_info) /* Create the necessary information to use the signaller stack */ \ 55 __condition_node_t waiter = { thrd, count, user_info }; /* Create the node specific to this wait operation */ \ 56 __condition_criterion_t criteria[count]; /* Create the creteria this wait operation needs to wake up */ \ 57 init( count, monitors, &waiter, criteria ); /* Link everything together */ \ 58 59 #define wait_ctx_primed(thrd, user_info) /* Create the necessary information to use the signaller stack */ \ 60 __condition_node_t waiter = { thrd, count, user_info }; /* Create the node specific to this wait operation */ \ 61 __condition_criterion_t criteria[count]; /* Create the creteria this wait operation needs to wake up */ \ 62 init_push( count, monitors, &waiter, criteria ); /* Link everything together and push it to the AS-Stack */ \ 63 64 #define monitor_ctx( mons, cnt ) /* Define that create the necessary struct for internal/external scheduling operations */ \ 65 monitor_desc ** monitors = mons; /* Save the targeted monitors */ \ 66 unsigned short count = cnt; /* Save the count to a local variable */ \ 67 unsigned int recursions[ count ]; /* Save the current recursion levels to restore them later */ \ 68 __waitfor_mask_t masks[ count ]; /* Save the current waitfor masks to restore them later */ \ 69 spinlock * locks [ count ]; /* We need to pass-in an array of locks to BlockInternal */ \ 70 71 #define monitor_save save ( monitors, count, locks, recursions, masks ) 72 #define monitor_restore restore( monitors, count, locks, recursions, masks ) 73 74 #define blockAndWake( thrd, cnt ) /* Create the necessary information to use the signaller stack */ \ 75 monitor_save; /* Save monitor states */ \ 76 BlockInternal( locks, count, thrd, cnt ); /* Everything is ready to go to sleep */ \ 77 monitor_restore; /* We are back, restore the owners and recursions */ \ 78 67 79 68 80 //----------------------------------------------------------------------------- … … 277 289 append( &this->blocked, &waiter ); 278 290 279 // Lock all monitors (aggregates the lock themas well)291 // Lock all monitors (aggregates the locks as well) 280 292 lock_all( monitors, locks, count ); 281 293 282 294 // Find the next thread(s) to run 283 unsignedshort thread_count = 0;295 short thread_count = 0; 284 296 thread_desc * threads[ count ]; 285 297 for(int i = 0; i < count; i++) { … … 290 302 for( int i = 0; i < count; i++) { 291 303 thread_desc * new_owner = next_thread( monitors[i] ); 292 thread_count = insert_unique( threads, thread_count, new_owner ); 293 } 294 295 // Save monitor state 296 save_recursion( monitors, recursions, count ); 297 298 // Everything is ready to go to sleep 299 BlockInternal( locks, count, threads, thread_count ); 300 301 302 // WE WOKE UP 303 304 305 // We are back, restore the owners and recursions 306 lock_all( locks, count ); 307 restore_recursion( monitors, recursions, count ); 308 unlock_all( locks, count ); 304 insert_unique( threads, thread_count, new_owner ); 305 } 306 307 blockAndWake( threads, thread_count ); 309 308 } 310 309 … … 368 367 369 368 //save contexts 370 save_recursion( monitors, recursions, count );369 monitor_save; 371 370 372 371 //Find the thread to run 373 372 thread_desc * signallee = pop_head( &this->blocked )->waiting_thread; 374 for(int i = 0; i < count; i++) { 375 set_owner( monitors[i], signallee ); 376 } 373 set_owner( monitors, count, signallee ); 377 374 378 375 //Everything is ready to go to sleep … … 383 380 384 381 385 //We are back, restore the owners and recursions 386 lock_all( locks, count ); 387 restore_recursion( monitors, recursions, count ); 388 unlock_all( locks, count ); 382 //We are back, restore the masks and recursions 383 monitor_restore; 389 384 390 385 return true; … … 424 419 monitor_ctx( mon_storage, actual_count ); 425 420 426 // Lock all monitors (aggregates the lock themas well)421 // Lock all monitors (aggregates the locks as well) 427 422 lock_all( monitors, locks, count ); 428 423 … … 437 432 } 438 433 else { 439 save_recursion( monitors, recursions, count ); 440 441 // Everything is ready to go to sleep 442 BlockInternal( locks, count, &next, 1 ); 443 444 445 //WE WOKE UP 446 447 448 //We are back, restore the owners and recursions 449 lock_all( locks, count ); 450 restore_recursion( monitors, recursions, count ); 451 unlock_all( locks, count ); 434 blockAndWake( &next, 1 ); 452 435 } 453 436 … … 463 446 464 447 465 save_recursion( monitors, recursions, count );448 monitor_save; 466 449 set_mask( monitors, count, mask ); 467 450 468 469 // Everything is ready to go to sleep 470 BlockInternal( locks, count ); 471 472 451 BlockInternal( locks, count ); // Everything is ready to go to sleep 473 452 //WE WOKE UP 474 475 476 //We are back, restore the owners and recursions 477 lock_all( locks, count ); 478 restore_recursion( monitors, recursions, count ); 479 unlock_all( locks, count ); 480 481 return mask.accepted; 453 monitor_restore; //We are back, restore the masks and recursions 482 454 } 483 455 … … 491 463 //We are passing the monitor to someone else, which means recursion level is not 0 492 464 this->recursion = owner ? 1 : 0; 465 } 466 467 static inline void set_owner( monitor_desc ** monitors, short count, thread_desc * owner ) { 468 for( int i = 0; i < count; i++ ) { 469 set_owner( monitors[i], owner ); 470 } 471 } 472 473 static inline void set_mask( monitor_desc ** storage, short count, const __waitfor_mask_t & mask ) { 474 for(int i = 0; i < count; i++) { 475 storage[i]->mask = mask; 476 } 493 477 } 494 478 … … 513 497 } 514 498 499 static inline bool is_accepted( monitor_desc * this, const __monitor_group_t & group ) { 500 __acceptable_t * it = this->mask.clauses; // Optim 501 int count = this->mask.size; 502 503 // Check if there are any acceptable functions 504 if( !it ) return -1; 505 506 // If this isn't the first monitor to test this, there is no reason to repeat the test. 507 if( this != group[0] ) return group[0]->mask.accepted >= 0; 508 509 // For all acceptable functions check if this is the current function. 510 for( short i = 0; i < count; i++, it++ ) { 511 if( *it == group ) { 512 *this->mask.accepted = i; 513 return true; 514 } 515 } 516 517 // No function matched 518 return false; 519 } 520 515 521 static inline void init( int count, monitor_desc ** monitors, __condition_node_t * waiter, __condition_criterion_t * criteria ) { 516 522 for(int i = 0; i < count; i++) { … … 556 562 } 557 563 558 559 static inline void save_recursion ( monitor_desc ** ctx, unsigned int * /*out*/ recursions, unsigned short count ) { 564 static inline void save ( monitor_desc ** ctx, short count, __attribute((unused)) spinlock ** locks, unsigned int * /*out*/ recursions, __waitfor_mask_t * /*out*/ masks ) { 560 565 for( int i = 0; i < count; i++ ) { 561 566 recursions[i] = ctx[i]->recursion; 562 } 563 } 564 565 static inline void restore_recursion( monitor_desc ** ctx, unsigned int * /*in */ recursions, unsigned short count ) { 567 masks[i] = ctx[i]->mask; 568 } 569 } 570 571 static inline void restore( monitor_desc ** ctx, short count, spinlock ** locks, unsigned int * /*out*/ recursions, __waitfor_mask_t * /*out*/ masks ) { 572 lock_all( locks, count ); 566 573 for( int i = 0; i < count; i++ ) { 567 574 ctx[i]->recursion = recursions[i]; 568 } 575 ctx[i]->mask = masks[i]; 576 } 577 unlock_all( locks, count ); 569 578 } 570 579 … … 607 616 } 608 617 } 609 }610 611 static inline unsigned short insert_unique( thread_desc ** thrds, unsigned short end, thread_desc * val ) {612 if( !val ) return end;613 614 for(int i = 0; i <= end; i++) {615 if( thrds[i] == val ) return end;616 }617 618 thrds[end] = val;619 return end + 1;620 }621 622 static inline bool is_accepted( monitor_desc * this, const __monitor_group_t & group ) {623 __acceptable_t * it = this->mask.clauses; // Optim624 int count = this->mask.size;625 626 // Check if there are any acceptable functions627 if( !it ) return -1;628 629 // If this isn't the first monitor to test this, there is no reason to repeat the test.630 if( this != group[0] ) return group[0]->mask.accepted >= 0;631 632 // For all acceptable functions check if this is the current function.633 for( short i = 0; i < count; i++, it++ ) {634 if( *it == group ) {635 *this->mask.accepted = i;636 return true;637 }638 }639 640 // No function matched641 return false;642 618 } 643 619 … … 668 644 } 669 645 646 forall(dtype T | sized( T )) 647 static inline short insert_unique( T ** array, short & size, T * val ) { 648 if( !val ) return size; 649 650 for(int i = 0; i <= size; i++) { 651 if( array[i] == val ) return size; 652 } 653 654 array[size] = val; 655 size = size + 1; 656 return size; 657 } 658 670 659 static inline short count_max( const __waitfor_mask_t & mask ) { 671 660 short max = 0; … … 677 666 678 667 static inline short aggregate( monitor_desc ** storage, const __waitfor_mask_t & mask ) { 679 #warning function not implemented 680 return 0; 681 } 682 683 static inline void set_mask( monitor_desc ** storage, short count, const __waitfor_mask_t & mask ) { 684 for(int i = 0; i < count; i++) { 685 storage[i]->mask = mask; 686 } 687 } 688 668 short size = 0; 669 for( int i = 0; i < mask.size; i++ ) { 670 for( int j = 0; j < mask.clauses[i].size; j++) { 671 insert_unique( storage, size, mask.clauses[i].list[j] ); 672 } 673 } 674 qsort( storage, size ); 675 return size; 676 } 689 677 690 678 void ?{}( __condition_blocked_queue_t & this ) {
Note: See TracChangeset
for help on using the changeset viewer.