Changeset 490db327
- Timestamp:
- Nov 15, 2017, 2:01:08 PM (7 years ago)
- Branches:
- ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, resolv-new, with_gc
- Children:
- 20632a2
- Parents:
- c95b115 (diff), 6d2386e (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- src
- Files:
-
- 4 added
- 24 edited
Legend:
- Unmodified
- Added
- Removed
-
src/Concurrency/Keywords.cc
rc95b115 r490db327 553 553 ), 554 554 new ListInit( 555 map_range < std::list<Initializer*> > ( args, [ this](DeclarationWithType * var ){555 map_range < std::list<Initializer*> > ( args, [](DeclarationWithType * var ){ 556 556 Type * type = var->get_type()->clone(); 557 557 type->set_mutex( false ); -
src/InitTweak/GenInit.cc
rc95b115 r490db327 214 214 } 215 215 // a type is managed if it appears in the map of known managed types, or if it contains any polymorphism (is a type variable or generic type containing a type variable) 216 return managedTypes.find( SymTab::Mangler::mangle ( type ) ) != managedTypes.end() || GenPoly::isPolyType( type );216 return managedTypes.find( SymTab::Mangler::mangleConcrete( type ) ) != managedTypes.end() || GenPoly::isPolyType( type ); 217 217 } 218 218 … … 232 232 Type * type = InitTweak::getPointerBase( params.front()->get_type() ); 233 233 assert( type ); 234 managedTypes.insert( SymTab::Mangler::mangle ( type ) );234 managedTypes.insert( SymTab::Mangler::mangleConcrete( type ) ); 235 235 } 236 236 } … … 242 242 if ( ObjectDecl * field = dynamic_cast< ObjectDecl * >( member ) ) { 243 243 if ( isManaged( field ) ) { 244 // generic parameters should not play a role in determining whether a generic type is constructed - construct all generic types, so that 245 // polymorphic constructors make generic types managed types 244 246 StructInstType inst( Type::Qualifiers(), aggregateDecl ); 245 managedTypes.insert( SymTab::Mangler::mangle ( &inst ) );247 managedTypes.insert( SymTab::Mangler::mangleConcrete( &inst ) ); 246 248 break; 247 249 } -
src/InitTweak/InitTweak.cc
rc95b115 r490db327 98 98 class InitExpander::ExpanderImpl { 99 99 public: 100 virtual ~ExpanderImpl() = default; 100 101 virtual std::list< Expression * > next( std::list< Expression * > & indices ) = 0; 101 102 virtual Statement * buildListInit( UntypedExpr * callExpr, std::list< Expression * > & indices ) = 0; … … 105 106 public: 106 107 InitImpl( Initializer * init ) : init( init ) {} 108 virtual ~InitImpl() = default; 107 109 108 110 virtual std::list< Expression * > next( __attribute((unused)) std::list< Expression * > & indices ) { … … 121 123 public: 122 124 ExprImpl( Expression * expr ) : arg( expr ) {} 123 124 ~ExprImpl() { delete arg; } 125 virtual ~ExprImpl() { delete arg; } 125 126 126 127 virtual std::list< Expression * > next( std::list< Expression * > & indices ) { -
src/ResolvExpr/AlternativeFinder.cc
rc95b115 r490db327 22 22 #include <memory> // for allocator_traits<>::value_type 23 23 #include <utility> // for pair 24 #include <vector> // for vector 24 25 25 26 #include "Alternative.h" // for AltList, Alternative … … 333 334 tmpCost.incPoly( -tmpCost.get_polyCost() ); 334 335 if ( tmpCost != Cost::zero ) { 335 // if ( convCost != Cost::zero ) {336 336 Type *newType = formalType->clone(); 337 337 env.apply( newType ); … … 405 405 /// needAssertions.insert( needAssertions.end(), (*tyvar)->get_assertions().begin(), (*tyvar)->get_assertions().end() ); 406 406 } 407 }408 409 /// instantiate a single argument by matching actuals from [actualIt, actualEnd) against formalType,410 /// producing expression(s) in out and their total cost in cost.411 template< typename AltIterator, typename OutputIterator >412 bool instantiateArgument( Type * formalType, Initializer * defaultValue, AltIterator & actualIt, AltIterator actualEnd, OpenVarSet & openVars, TypeEnvironment & resultEnv, AssertionSet & resultNeed, AssertionSet & resultHave, const SymTab::Indexer & indexer, Cost & cost, OutputIterator out ) {413 if ( TupleType * tupleType = dynamic_cast< TupleType * >( formalType ) ) {414 // formalType is a TupleType - group actuals into a TupleExpr whose type unifies with the TupleType415 std::list< Expression * > exprs;416 for ( Type * type : *tupleType ) {417 if ( ! instantiateArgument( type, defaultValue, actualIt, actualEnd, openVars, resultEnv, resultNeed, resultHave, indexer, cost, back_inserter( exprs ) ) ) {418 deleteAll( exprs );419 return false;420 }421 }422 *out++ = new TupleExpr( exprs );423 } else if ( TypeInstType * ttype = Tuples::isTtype( formalType ) ) {424 // xxx - mixing default arguments with variadic??425 std::list< Expression * > exprs;426 for ( ; actualIt != actualEnd; ++actualIt ) {427 exprs.push_back( actualIt->expr->clone() );428 cost += actualIt->cost;429 }430 Expression * arg = nullptr;431 if ( exprs.size() == 1 && Tuples::isTtype( exprs.front()->get_result() ) ) {432 // the case where a ttype value is passed directly is special, e.g. for argument forwarding purposes433 // xxx - what if passing multiple arguments, last of which is ttype?434 // xxx - what would happen if unify was changed so that unifying tuple types flattened both before unifying lists? then pass in TupleType(ttype) below.435 arg = exprs.front();436 } else {437 arg = new TupleExpr( exprs );438 }439 assert( arg && arg->get_result() );440 if ( ! unify( ttype, arg->get_result(), resultEnv, resultNeed, resultHave, openVars, indexer ) ) {441 return false;442 }443 *out++ = arg;444 } else if ( actualIt != actualEnd ) {445 // both actualType and formalType are atomic (non-tuple) types - if they unify446 // then accept actual as an argument, otherwise return false (fail to instantiate argument)447 Expression * actual = actualIt->expr;448 Type * actualType = actual->get_result();449 450 PRINT(451 std::cerr << "formal type is ";452 formalType->print( std::cerr );453 std::cerr << std::endl << "actual type is ";454 actualType->print( std::cerr );455 std::cerr << std::endl;456 )457 if ( ! unify( formalType, actualType, resultEnv, resultNeed, resultHave, openVars, indexer ) ) {458 // std::cerr << "unify failed" << std::endl;459 return false;460 }461 // move the expression from the alternative to the output iterator462 *out++ = actual;463 actualIt->expr = nullptr;464 cost += actualIt->cost;465 ++actualIt;466 } else {467 // End of actuals - Handle default values468 if ( SingleInit *si = dynamic_cast<SingleInit *>( defaultValue )) {469 if ( CastExpr * castExpr = dynamic_cast< CastExpr * >( si->get_value() ) ) {470 // so far, only constant expressions are accepted as default values471 if ( ConstantExpr *cnstexpr = dynamic_cast<ConstantExpr *>( castExpr->get_arg() ) ) {472 if ( Constant *cnst = dynamic_cast<Constant *>( cnstexpr->get_constant() ) ) {473 if ( unify( formalType, cnst->get_type(), resultEnv, resultNeed, resultHave, openVars, indexer ) ) {474 *out++ = cnstexpr->clone();475 return true;476 } // if477 } // if478 } // if479 }480 } // if481 return false;482 } // if483 return true;484 }485 486 bool AlternativeFinder::instantiateFunction( std::list< DeclarationWithType* >& formals, const AltList &actuals, bool isVarArgs, OpenVarSet& openVars, TypeEnvironment &resultEnv, AssertionSet &resultNeed, AssertionSet &resultHave, AltList & out ) {487 simpleCombineEnvironments( actuals.begin(), actuals.end(), resultEnv );488 // make sure we don't widen any existing bindings489 for ( TypeEnvironment::iterator i = resultEnv.begin(); i != resultEnv.end(); ++i ) {490 i->allowWidening = false;491 }492 resultEnv.extractOpenVars( openVars );493 494 // flatten actuals so that each actual has an atomic (non-tuple) type495 AltList exploded;496 Tuples::explode( actuals, indexer, back_inserter( exploded ) );497 498 AltList::iterator actualExpr = exploded.begin();499 AltList::iterator actualEnd = exploded.end();500 for ( DeclarationWithType * formal : formals ) {501 // match flattened actuals with formal parameters - actuals will be grouped to match502 // with formals as appropriate503 Cost cost = Cost::zero;504 std::list< Expression * > newExprs;505 ObjectDecl * obj = strict_dynamic_cast< ObjectDecl * >( formal );506 if ( ! instantiateArgument( obj->get_type(), obj->get_init(), actualExpr, actualEnd, openVars, resultEnv, resultNeed, resultHave, indexer, cost, back_inserter( newExprs ) ) ) {507 deleteAll( newExprs );508 return false;509 }510 // success - produce argument as a new alternative511 assert( newExprs.size() == 1 );512 out.push_back( Alternative( newExprs.front(), resultEnv, cost ) );513 }514 if ( actualExpr != actualEnd ) {515 // there are still actuals remaining, but we've run out of formal parameters to match against516 // this is okay only if the function is variadic517 if ( ! isVarArgs ) {518 return false;519 }520 out.splice( out.end(), exploded, actualExpr, actualEnd );521 }522 return true;523 407 } 524 408 … … 675 559 } 676 560 677 template< typename OutputIterator > 678 void AlternativeFinder::makeFunctionAlternatives( const Alternative &func, FunctionType *funcType, const AltList &actualAlt, OutputIterator out ) { 679 OpenVarSet openVars; 680 AssertionSet resultNeed, resultHave; 681 TypeEnvironment resultEnv( func.env ); 682 makeUnifiableVars( funcType, openVars, resultNeed ); 683 resultEnv.add( funcType->get_forall() ); // add all type variables as open variables now so that those not used in the parameter list are still considered open 684 AltList instantiatedActuals; // filled by instantiate function 561 /// Gets a default value from an initializer, nullptr if not present 562 ConstantExpr* getDefaultValue( Initializer* init ) { 563 if ( SingleInit* si = dynamic_cast<SingleInit*>( init ) ) { 564 if ( CastExpr* ce = dynamic_cast<CastExpr*>( si->get_value() ) ) { 565 return dynamic_cast<ConstantExpr*>( ce->get_arg() ); 566 } 567 } 568 return nullptr; 569 } 570 571 /// State to iteratively build a match of parameter expressions to arguments 572 struct ArgPack { 573 AltList actuals; ///< Arguments included in this pack 574 TypeEnvironment env; ///< Environment for this pack 575 AssertionSet need; ///< Assertions outstanding for this pack 576 AssertionSet have; ///< Assertions found for this pack 577 OpenVarSet openVars; ///< Open variables for this pack 578 unsigned nextArg; ///< Index of next argument in arguments list 579 std::vector<Alternative> expls; ///< Exploded actuals left over from last match 580 unsigned nextExpl; ///< Index of next exploded alternative to use 581 std::vector<unsigned> tupleEls; /// Number of elements in current tuple element(s) 582 583 ArgPack(const TypeEnvironment& env, const AssertionSet& need, const AssertionSet& have, 584 const OpenVarSet& openVars) 585 : actuals(), env(env), need(need), have(have), openVars(openVars), nextArg(0), 586 expls(), nextExpl(0), tupleEls() {} 587 588 /// Starts a new tuple expression 589 void beginTuple() { 590 if ( ! tupleEls.empty() ) ++tupleEls.back(); 591 tupleEls.push_back(0); 592 } 593 594 /// Ends a tuple expression, consolidating the appropriate actuals 595 void endTuple() { 596 // set up new Tuple alternative 597 std::list<Expression*> exprs; 598 Cost cost = Cost::zero; 599 600 // transfer elements into alternative 601 for (unsigned i = 0; i < tupleEls.back(); ++i) { 602 exprs.push_front( actuals.back().expr ); 603 actuals.back().expr = nullptr; 604 cost += actuals.back().cost; 605 actuals.pop_back(); 606 } 607 tupleEls.pop_back(); 608 609 // build new alternative 610 actuals.emplace_back( new TupleExpr( exprs ), this->env, cost ); 611 } 612 613 /// Clones and adds an actual, returns this 614 ArgPack& withArg( Expression* expr, Cost cost = Cost::zero ) { 615 actuals.emplace_back( expr->clone(), this->env, cost ); 616 if ( ! tupleEls.empty() ) ++tupleEls.back(); 617 return *this; 618 } 619 }; 620 621 /// Instantiates an argument to match a formal, returns false if no results left 622 bool instantiateArgument( Type* formalType, Initializer* initializer, 623 const std::vector< AlternativeFinder >& args, 624 std::vector<ArgPack>& results, std::vector<ArgPack>& nextResults, 625 const SymTab::Indexer& indexer ) { 626 if ( TupleType* tupleType = dynamic_cast<TupleType*>( formalType ) ) { 627 // formalType is a TupleType - group actuals into a TupleExpr 628 for ( ArgPack& result : results ) { result.beginTuple(); } 629 for ( Type* type : *tupleType ) { 630 // xxx - dropping initializer changes behaviour from previous, but seems correct 631 if ( ! instantiateArgument( type, nullptr, args, results, nextResults, indexer ) ) 632 return false; 633 } 634 for ( ArgPack& result : results ) { result.endTuple(); } 635 return true; 636 } else if ( TypeInstType* ttype = Tuples::isTtype( formalType ) ) { 637 // formalType is a ttype, consumes all remaining arguments 638 // xxx - mixing default arguments with variadic?? 639 std::vector<ArgPack> finalResults{}; /// list of completed tuples 640 // start tuples 641 for ( ArgPack& result : results ) { 642 result.beginTuple(); 643 644 // use rest of exploded tuple if present 645 while ( result.nextExpl < result.expls.size() ) { 646 const Alternative& actual = result.expls[result.nextExpl]; 647 result.env.addActual( actual.env, result.openVars ); 648 result.withArg( actual.expr ); 649 ++result.nextExpl; 650 } 651 } 652 // iterate until all results completed 653 while ( ! results.empty() ) { 654 // add another argument to results 655 for ( ArgPack& result : results ) { 656 // finish result when out of arguments 657 if ( result.nextArg >= args.size() ) { 658 Type* argType = result.actuals.back().expr->get_result(); 659 if ( result.tupleEls.back() == 1 && Tuples::isTtype( argType ) ) { 660 // the case where a ttype value is passed directly is special, e.g. for 661 // argument forwarding purposes 662 // xxx - what if passing multiple arguments, last of which is ttype? 663 // xxx - what would happen if unify was changed so that unifying tuple 664 // types flattened both before unifying lists? then pass in TupleType 665 // (ttype) below. 666 result.tupleEls.pop_back(); 667 } else { 668 // collapse leftover arguments into tuple 669 result.endTuple(); 670 argType = result.actuals.back().expr->get_result(); 671 } 672 // check unification for ttype before adding to final 673 if ( unify( ttype, argType, result.env, result.need, result.have, 674 result.openVars, indexer ) ) { 675 finalResults.push_back( std::move(result) ); 676 } 677 continue; 678 } 679 680 // add each possible next argument 681 for ( const Alternative& actual : args[result.nextArg] ) { 682 ArgPack aResult = result; // copy to clone everything 683 // add details of actual to result 684 aResult.env.addActual( actual.env, aResult.openVars ); 685 Cost cost = actual.cost; 686 687 // explode argument 688 std::vector<Alternative> exploded; 689 Tuples::explode( actual, indexer, back_inserter( exploded ) ); 690 691 // add exploded argument to tuple 692 for ( Alternative& aActual : exploded ) { 693 aResult.withArg( aActual.expr, cost ); 694 cost = Cost::zero; 695 } 696 ++aResult.nextArg; 697 nextResults.push_back( std::move(aResult) ); 698 } 699 } 700 701 // reset for next round 702 results.swap( nextResults ); 703 nextResults.clear(); 704 } 705 results.swap( finalResults ); 706 return ! results.empty(); 707 } 708 709 // iterate each current subresult 710 for ( unsigned iResult = 0; iResult < results.size(); ++iResult ) { 711 ArgPack& result = results[iResult]; 712 713 if ( result.nextExpl < result.expls.size() ) { 714 // use remainder of exploded tuple if present 715 const Alternative& actual = result.expls[result.nextExpl]; 716 result.env.addActual( actual.env, result.openVars ); 717 Type* actualType = actual.expr->get_result(); 718 719 PRINT( 720 std::cerr << "formal type is "; 721 formalType->print( std::cerr ); 722 std::cerr << std::endl << "actual type is "; 723 actualType->print( std::cerr ); 724 std::cerr << std::endl; 725 ) 726 727 if ( unify( formalType, actualType, result.env, result.need, result.have, 728 result.openVars, indexer ) ) { 729 ++result.nextExpl; 730 nextResults.push_back( std::move(result.withArg( actual.expr )) ); 731 } 732 733 continue; 734 } else if ( result.nextArg >= args.size() ) { 735 // use default initializers if out of arguments 736 if ( ConstantExpr* cnstExpr = getDefaultValue( initializer ) ) { 737 if ( Constant* cnst = dynamic_cast<Constant*>( cnstExpr->get_constant() ) ) { 738 if ( unify( formalType, cnst->get_type(), result.env, result.need, 739 result.have, result.openVars, indexer ) ) { 740 nextResults.push_back( std::move(result.withArg( cnstExpr )) ); 741 } 742 } 743 } 744 continue; 745 } 746 747 // Check each possible next argument 748 for ( const Alternative& actual : args[result.nextArg] ) { 749 ArgPack aResult = result; // copy to clone everything 750 // add details of actual to result 751 aResult.env.addActual( actual.env, aResult.openVars ); 752 753 // explode argument 754 std::vector<Alternative> exploded; 755 Tuples::explode( actual, indexer, back_inserter( exploded ) ); 756 if ( exploded.empty() ) { 757 // skip empty tuple arguments 758 ++aResult.nextArg; 759 results.push_back( std::move(aResult) ); 760 continue; 761 } 762 763 // consider only first exploded actual 764 const Alternative& aActual = exploded.front(); 765 Type* actualType = aActual.expr->get_result()->clone(); 766 767 PRINT( 768 std::cerr << "formal type is "; 769 formalType->print( std::cerr ); 770 std::cerr << std::endl << "actual type is "; 771 actualType->print( std::cerr ); 772 std::cerr << std::endl; 773 ) 774 775 // attempt to unify types 776 if ( unify( formalType, actualType, aResult.env, aResult.need, aResult.have, aResult.openVars, indexer ) ) { 777 // add argument 778 aResult.withArg( aActual.expr, actual.cost ); 779 ++aResult.nextArg; 780 if ( exploded.size() > 1 ) { 781 // other parts of tuple left over 782 aResult.expls = std::move( exploded ); 783 aResult.nextExpl = 1; 784 } 785 nextResults.push_back( std::move(aResult) ); 786 } 787 } 788 } 789 790 // reset for next parameter 791 results.swap( nextResults ); 792 nextResults.clear(); 793 794 return ! results.empty(); 795 } 796 797 template<typename OutputIterator> 798 void AlternativeFinder::makeFunctionAlternatives( const Alternative &func, 799 FunctionType *funcType, const std::vector< AlternativeFinder > &args, 800 OutputIterator out ) { 801 OpenVarSet funcOpenVars; 802 AssertionSet funcNeed, funcHave; 803 TypeEnvironment funcEnv( func.env ); 804 makeUnifiableVars( funcType, funcOpenVars, funcNeed ); 805 // add all type variables as open variables now so that those not used in the parameter 806 // list are still considered open. 807 funcEnv.add( funcType->get_forall() ); 808 685 809 if ( targetType && ! targetType->isVoid() && ! funcType->get_returnVals().empty() ) { 686 810 // attempt to narrow based on expected target type 687 811 Type * returnType = funcType->get_returnVals().front()->get_type(); 688 if ( ! unify( returnType, targetType, resultEnv, resultNeed, resultHave, openVars, indexer ) ) { 689 // unification failed, don't pursue this alternative 812 if ( ! unify( returnType, targetType, funcEnv, funcNeed, funcHave, funcOpenVars, 813 indexer ) ) { 814 // unification failed, don't pursue this function alternative 690 815 return; 691 816 } 692 817 } 693 818 694 if ( instantiateFunction( funcType->get_parameters(), actualAlt, funcType->get_isVarArgs(), openVars, resultEnv, resultNeed, resultHave, instantiatedActuals ) ) { 819 // iteratively build matches, one parameter at a time 820 std::vector<ArgPack> results{ ArgPack{ funcEnv, funcNeed, funcHave, funcOpenVars } }; 821 std::vector<ArgPack> nextResults{}; 822 for ( DeclarationWithType* formal : funcType->get_parameters() ) { 823 ObjectDecl* obj = strict_dynamic_cast< ObjectDecl* >( formal ); 824 if ( ! instantiateArgument( 825 obj->get_type(), obj->get_init(), args, results, nextResults, indexer ) ) 826 return; 827 } 828 829 // filter out results that don't use all the arguments, and aren't variadic 830 std::vector<ArgPack> finalResults{}; 831 if ( funcType->get_isVarArgs() ) { 832 for ( ArgPack& result : results ) { 833 // use rest of exploded tuple if present 834 while ( result.nextExpl < result.expls.size() ) { 835 const Alternative& actual = result.expls[result.nextExpl]; 836 result.env.addActual( actual.env, result.openVars ); 837 result.withArg( actual.expr ); 838 ++result.nextExpl; 839 } 840 } 841 842 while ( ! results.empty() ) { 843 // build combinations for all remaining arguments 844 for ( ArgPack& result : results ) { 845 // keep if used all arguments 846 if ( result.nextArg >= args.size() ) { 847 finalResults.push_back( std::move(result) ); 848 continue; 849 } 850 851 // add each possible next argument 852 for ( const Alternative& actual : args[result.nextArg] ) { 853 ArgPack aResult = result; // copy to clone everything 854 // add details of actual to result 855 aResult.env.addActual( actual.env, aResult.openVars ); 856 Cost cost = actual.cost; 857 858 // explode argument 859 std::vector<Alternative> exploded; 860 Tuples::explode( actual, indexer, back_inserter( exploded ) ); 861 862 // add exploded argument to arg list 863 for ( Alternative& aActual : exploded ) { 864 aResult.withArg( aActual.expr, cost ); 865 cost = Cost::zero; 866 } 867 ++aResult.nextArg; 868 nextResults.push_back( std::move(aResult) ); 869 } 870 } 871 872 // reset for next round 873 results.swap( nextResults ); 874 nextResults.clear(); 875 } 876 } else { 877 // filter out results that don't use all the arguments 878 for ( ArgPack& result : results ) { 879 if ( result.nextExpl >= result.expls.size() && result.nextArg >= args.size() ) { 880 finalResults.push_back( std::move(result) ); 881 } 882 } 883 } 884 885 // validate matching combos, add to final result list 886 for ( ArgPack& result : finalResults ) { 695 887 ApplicationExpr *appExpr = new ApplicationExpr( func.expr->clone() ); 696 Alternative newAlt( appExpr, result Env, sumCost( instantiatedActuals ) );697 makeExprList( instantiatedActuals, appExpr->get_args() );888 Alternative newAlt( appExpr, result.env, sumCost( result.actuals ) ); 889 makeExprList( result.actuals, appExpr->get_args() ); 698 890 PRINT( 699 891 std::cerr << "instantiate function success: " << appExpr << std::endl; 700 892 std::cerr << "need assertions:" << std::endl; 701 printAssertionSet( result Need, std::cerr, 8 );893 printAssertionSet( result.need, std::cerr, 8 ); 702 894 ) 703 inferParameters( result Need, resultHave, newAlt,openVars, out );895 inferParameters( result.need, result.have, newAlt, result.openVars, out ); 704 896 } 705 897 } … … 711 903 if ( funcFinder.alternatives.empty() ) return; 712 904 713 std::list< AlternativeFinder > argAlternatives; 714 findSubExprs( untypedExpr->begin_args(), untypedExpr->end_args(), back_inserter( argAlternatives ) ); 715 716 std::list< AltList > possibilities; 717 combos( argAlternatives.begin(), argAlternatives.end(), back_inserter( possibilities ) ); 905 std::vector< AlternativeFinder > argAlternatives; 906 findSubExprs( untypedExpr->begin_args(), untypedExpr->end_args(), 907 back_inserter( argAlternatives ) ); 718 908 719 909 // take care of possible tuple assignments 720 910 // if not tuple assignment, assignment is taken care of as a normal function call 721 Tuples::handleTupleAssignment( *this, untypedExpr, possibilities );911 Tuples::handleTupleAssignment( *this, untypedExpr, argAlternatives ); 722 912 723 913 // find function operators … … 744 934 Alternative newFunc( *func ); 745 935 referenceToRvalueConversion( newFunc.expr ); 746 for ( std::list< AltList >::iterator actualAlt = possibilities.begin(); actualAlt != possibilities.end(); ++actualAlt ) { 747 // XXX 748 //Designators::check_alternative( function, *actualAlt ); 749 makeFunctionAlternatives( newFunc, function, *actualAlt, std::back_inserter( candidates ) ); 750 } 936 makeFunctionAlternatives( newFunc, function, argAlternatives, 937 std::back_inserter( candidates ) ); 751 938 } 752 939 } else if ( TypeInstType *typeInst = dynamic_cast< TypeInstType* >( func->expr->get_result()->stripReferences() ) ) { // handle ftype (e.g. *? on function pointer) … … 756 943 Alternative newFunc( *func ); 757 944 referenceToRvalueConversion( newFunc.expr ); 758 for ( std::list< AltList >::iterator actualAlt = possibilities.begin(); actualAlt != possibilities.end(); ++actualAlt ) { 759 makeFunctionAlternatives( newFunc, function, *actualAlt, std::back_inserter( candidates ) ); 760 } // for 945 makeFunctionAlternatives( newFunc, function, argAlternatives, 946 std::back_inserter( candidates ) ); 761 947 } // if 762 948 } // if 763 } 764 765 // try each function operator ?() with the current function alternative and each of the argument combinations 766 for ( AltList::iterator funcOp = funcOpFinder.alternatives.begin(); funcOp != funcOpFinder.alternatives.end(); ++funcOp ) { 767 // check if the type is pointer to function 768 if ( PointerType *pointer = dynamic_cast< PointerType* >( funcOp->expr->get_result()->stripReferences() ) ) { 769 if ( FunctionType *function = dynamic_cast< FunctionType* >( pointer->get_base() ) ) { 949 } 950 } catch ( SemanticError &e ) { 951 errors.append( e ); 952 } 953 } // for 954 955 // try each function operator ?() with each function alternative 956 if ( ! funcOpFinder.alternatives.empty() ) { 957 // add function alternatives to front of argument list 958 argAlternatives.insert( argAlternatives.begin(), std::move(funcFinder) ); 959 960 for ( AltList::iterator funcOp = funcOpFinder.alternatives.begin(); 961 funcOp != funcOpFinder.alternatives.end(); ++funcOp ) { 962 try { 963 // check if type is a pointer to function 964 if ( PointerType* pointer = dynamic_cast<PointerType*>( 965 funcOp->expr->get_result()->stripReferences() ) ) { 966 if ( FunctionType* function = 967 dynamic_cast<FunctionType*>( pointer->get_base() ) ) { 770 968 Alternative newFunc( *funcOp ); 771 969 referenceToRvalueConversion( newFunc.expr ); 772 for ( std::list< AltList >::iterator actualAlt = possibilities.begin(); actualAlt != possibilities.end(); ++actualAlt ) { 773 AltList currentAlt; 774 currentAlt.push_back( *func ); 775 currentAlt.insert( currentAlt.end(), actualAlt->begin(), actualAlt->end() ); 776 makeFunctionAlternatives( newFunc, function, currentAlt, std::back_inserter( candidates ) ); 777 } // for 778 } // if 779 } // if 780 } // for 781 } catch ( SemanticError &e ) { 782 errors.append( e ); 783 } 784 } // for 970 makeFunctionAlternatives( newFunc, function, argAlternatives, 971 std::back_inserter( candidates ) ); 972 } 973 } 974 } catch ( SemanticError &e ) { 975 errors.append( e ); 976 } 977 } 978 } 785 979 786 980 // Implement SFINAE; resolution errors are only errors if there aren't any non-erroneous resolutions -
src/ResolvExpr/AlternativeFinder.h
rc95b115 r490db327 34 34 public: 35 35 AlternativeFinder( const SymTab::Indexer &indexer, const TypeEnvironment &env ); 36 37 AlternativeFinder( const AlternativeFinder& o ) 38 : indexer(o.indexer), alternatives(o.alternatives), env(o.env), 39 targetType(o.targetType) {} 40 41 AlternativeFinder( AlternativeFinder&& o ) 42 : indexer(o.indexer), alternatives(std::move(o.alternatives)), env(o.env), 43 targetType(o.targetType) {} 44 45 AlternativeFinder& operator= ( const AlternativeFinder& o ) { 46 if (&o == this) return *this; 47 48 // horrific nasty hack to rebind references... 49 alternatives.~AltList(); 50 new(this) AlternativeFinder(o); 51 return *this; 52 } 53 54 AlternativeFinder& operator= ( AlternativeFinder&& o ) { 55 if (&o == this) return *this; 56 57 // horrific nasty hack to rebind references... 58 alternatives.~AltList(); 59 new(this) AlternativeFinder(std::move(o)); 60 return *this; 61 } 62 36 63 void find( Expression *expr, bool adjust = false, bool prune = true, bool failFast = true ); 37 64 /// Calls find with the adjust flag set; adjustment turns array and function types into equivalent pointer types … … 99 126 /// Adds alternatives for offsetof expressions, given the base type and name of the member 100 127 template< typename StructOrUnionType > void addOffsetof( StructOrUnionType *aggInst, const std::string &name ); 101 bool instantiateFunction( std::list< DeclarationWithType* >& formals, const AltList &actuals, bool isVarArgs, OpenVarSet& openVars, TypeEnvironment &resultEnv, AssertionSet &resultNeed, AssertionSet &resultHave, AltList & out ); 102 template< typename OutputIterator > 103 void makeFunctionAlternatives( const Alternative &func, FunctionType *funcType, const AltList &actualAlt, OutputIterator out ); 128 template<typename OutputIterator> 129 void makeFunctionAlternatives( const Alternative &func, FunctionType *funcType, const std::vector< AlternativeFinder >& args, OutputIterator out ); 104 130 template< typename OutputIterator > 105 131 void inferParameters( const AssertionSet &need, AssertionSet &have, const Alternative &newAlt, OpenVarSet &openVars, OutputIterator out ); -
src/ResolvExpr/TypeEnvironment.cc
rc95b115 r490db327 201 201 } 202 202 203 void TypeEnvironment::addActual( const TypeEnvironment& actualEnv, OpenVarSet& openVars ) { 204 for ( const EqvClass& c : actualEnv ) { 205 EqvClass c2 = c; 206 c2.allowWidening = false; 207 for ( const std::string& var : c2.vars ) { 208 openVars[ var ] = c2.data; 209 } 210 env.push_back( std::move(c2) ); 211 } 212 } 213 203 214 } // namespace ResolvExpr 204 215 -
src/ResolvExpr/TypeEnvironment.h
rc95b115 r490db327 86 86 TypeEnvironment *clone() const { return new TypeEnvironment( *this ); } 87 87 88 /// Iteratively adds the environment of a new actual (with allowWidening = false), 89 /// and extracts open variables. 90 void addActual( const TypeEnvironment& actualEnv, OpenVarSet& openVars ); 91 88 92 typedef std::list< EqvClass >::iterator iterator; 89 93 iterator begin() { return env.begin(); } -
src/SymTab/Mangler.cc
rc95b115 r490db327 32 32 namespace SymTab { 33 33 std::string Mangler::mangleType( Type * ty ) { 34 Mangler mangler( false, true );34 Mangler mangler( false, true, true ); 35 35 maybeAccept( ty, mangler ); 36 36 return mangler.get_mangleName(); 37 37 } 38 38 39 Mangler::Mangler( bool mangleOverridable, bool typeMode ) 40 : nextVarNum( 0 ), isTopLevel( true ), mangleOverridable( mangleOverridable ), typeMode( typeMode ) {} 39 std::string Mangler::mangleConcrete( Type* ty ) { 40 Mangler mangler( false, false, false ); 41 maybeAccept( ty, mangler ); 42 return mangler.get_mangleName(); 43 } 44 45 Mangler::Mangler( bool mangleOverridable, bool typeMode, bool mangleGenericParams ) 46 : nextVarNum( 0 ), isTopLevel( true ), mangleOverridable( mangleOverridable ), typeMode( typeMode ), mangleGenericParams( mangleGenericParams ) {} 41 47 42 48 Mangler::Mangler( const Mangler &rhs ) : mangleName() { … … 166 172 167 173 mangleName << ( refType->get_name().length() + prefix.length() ) << prefix << refType->get_name(); 168 } 169 170 void Mangler::mangleGenericRef( ReferenceToType * refType, std::string prefix ) { 171 printQualifiers( refType ); 172 173 std::ostringstream oldName( mangleName.str() ); 174 mangleName.clear(); 175 176 mangleName << prefix << refType->get_name(); 177 178 std::list< Expression* >& params = refType->get_parameters(); 179 if ( ! params.empty() ) { 180 mangleName << "_"; 181 for ( std::list< Expression* >::const_iterator param = params.begin(); param != params.end(); ++param ) { 182 TypeExpr *paramType = dynamic_cast< TypeExpr* >( *param ); 183 assertf(paramType, "Aggregate parameters should be type expressions: %s", toString(*param).c_str()); 184 maybeAccept( paramType->get_type(), *this ); 174 175 if ( mangleGenericParams ) { 176 std::list< Expression* >& params = refType->get_parameters(); 177 if ( ! params.empty() ) { 178 mangleName << "_"; 179 for ( std::list< Expression* >::const_iterator param = params.begin(); param != params.end(); ++param ) { 180 TypeExpr *paramType = dynamic_cast< TypeExpr* >( *param ); 181 assertf(paramType, "Aggregate parameters should be type expressions: %s", toString(*param).c_str()); 182 maybeAccept( paramType->get_type(), *this ); 183 } 184 mangleName << "_"; 185 185 } 186 mangleName << "_";187 186 } 188 189 oldName << mangleName.str().length() << mangleName.str();190 mangleName.str( oldName.str() );191 187 } 192 188 193 189 void Mangler::visit( StructInstType * aggregateUseType ) { 194 if ( typeMode ) mangleGenericRef( aggregateUseType, "s" ); 195 else mangleRef( aggregateUseType, "s" ); 190 mangleRef( aggregateUseType, "s" ); 196 191 } 197 192 198 193 void Mangler::visit( UnionInstType * aggregateUseType ) { 199 if ( typeMode ) mangleGenericRef( aggregateUseType, "u" ); 200 else mangleRef( aggregateUseType, "u" ); 194 mangleRef( aggregateUseType, "u" ); 201 195 } 202 196 … … 285 279 varNums[ (*i)->name ] = std::pair< int, int >( nextVarNum++, (int)(*i)->get_kind() ); 286 280 for ( std::list< DeclarationWithType* >::iterator assert = (*i)->assertions.begin(); assert != (*i)->assertions.end(); ++assert ) { 287 Mangler sub_mangler( mangleOverridable, typeMode );281 Mangler sub_mangler( mangleOverridable, typeMode, mangleGenericParams ); 288 282 sub_mangler.nextVarNum = nextVarNum; 289 283 sub_mangler.isTopLevel = false; -
src/SymTab/Mangler.h
rc95b115 r490db327 30 30 /// Mangle syntax tree object; primary interface to clients 31 31 template< typename SynTreeClass > 32 static std::string mangle( SynTreeClass *decl, bool mangleOverridable = true, bool typeMode = false );32 static std::string mangle( SynTreeClass *decl, bool mangleOverridable = true, bool typeMode = false, bool mangleGenericParams = true ); 33 33 /// Mangle a type name; secondary interface 34 34 static std::string mangleType( Type* ty ); 35 /// Mangle ignoring generic type parameters 36 static std::string mangleConcrete( Type* ty ); 37 35 38 36 39 virtual void visit( ObjectDecl *declaration ); … … 62 65 bool mangleOverridable; ///< Specially mangle overridable built-in methods 63 66 bool typeMode; ///< Produce a unique mangled name for a type 67 bool mangleGenericParams; ///< Include generic parameters in name mangling if true 64 68 65 Mangler( bool mangleOverridable, bool typeMode );69 Mangler( bool mangleOverridable, bool typeMode, bool mangleGenericParams ); 66 70 Mangler( const Mangler & ); 67 71 68 72 void mangleDecl( DeclarationWithType *declaration ); 69 73 void mangleRef( ReferenceToType *refType, std::string prefix ); 70 void mangleGenericRef( ReferenceToType *refType, std::string prefix );71 74 72 75 void printQualifiers( Type *type ); … … 74 77 75 78 template< typename SynTreeClass > 76 std::string Mangler::mangle( SynTreeClass *decl, bool mangleOverridable, bool typeMode ) {77 Mangler mangler( mangleOverridable, typeMode );79 std::string Mangler::mangle( SynTreeClass *decl, bool mangleOverridable, bool typeMode, bool mangleGenericParams ) { 80 Mangler mangler( mangleOverridable, typeMode, mangleGenericParams ); 78 81 maybeAccept( decl, mangler ); 79 82 return mangler.get_mangleName(); -
src/Tuples/TupleAssignment.cc
rc95b115 r490db327 20 20 #include <memory> // for unique_ptr, allocator_trai... 21 21 #include <string> // for string 22 #include <vector> 22 23 23 24 #include "CodeGen/OperatorTable.h" … … 33 34 #include "ResolvExpr/Resolver.h" // for resolveCtorInit 34 35 #include "ResolvExpr/TypeEnvironment.h" // for TypeEnvironment 36 #include "ResolvExpr/typeops.h" // for combos 35 37 #include "SynTree/Declaration.h" // for ObjectDecl 36 38 #include "SynTree/Expression.h" // for Expression, CastExpr, Name... … … 52 54 // dispatcher for Tuple (multiple and mass) assignment operations 53 55 TupleAssignSpotter( ResolvExpr::AlternativeFinder & ); 54 void spot( UntypedExpr * expr, const std::list<ResolvExpr::AltList> &possibilities );56 void spot( UntypedExpr * expr, std::vector<ResolvExpr::AlternativeFinder> &args ); 55 57 56 58 private: … … 59 61 struct Matcher { 60 62 public: 61 Matcher( TupleAssignSpotter &spotter, const ResolvExpr::AltList & alts ); 63 Matcher( TupleAssignSpotter &spotter, const ResolvExpr::AltList& lhs, const 64 ResolvExpr::AltList& rhs ); 62 65 virtual ~Matcher() {} 63 66 virtual void match( std::list< Expression * > &out ) = 0; … … 72 75 struct MassAssignMatcher : public Matcher { 73 76 public: 74 MassAssignMatcher( TupleAssignSpotter &spotter, const ResolvExpr::AltList & alts ); 77 MassAssignMatcher( TupleAssignSpotter &spotter, const ResolvExpr::AltList& lhs, 78 const ResolvExpr::AltList& rhs ) : Matcher(spotter, lhs, rhs) {} 75 79 virtual void match( std::list< Expression * > &out ); 76 80 }; … … 78 82 struct MultipleAssignMatcher : public Matcher { 79 83 public: 80 MultipleAssignMatcher( TupleAssignSpotter &spot, const ResolvExpr::AltList & alts ); 84 MultipleAssignMatcher( TupleAssignSpotter &spotter, const ResolvExpr::AltList& lhs, 85 const ResolvExpr::AltList& rhs ) : Matcher(spotter, lhs, rhs) {} 81 86 virtual void match( std::list< Expression * > &out ); 82 87 }; … … 114 119 } 115 120 116 void handleTupleAssignment( ResolvExpr::AlternativeFinder & currentFinder, UntypedExpr * expr, const std::list<ResolvExpr::AltList> &possibilities ) { 121 void handleTupleAssignment( ResolvExpr::AlternativeFinder & currentFinder, UntypedExpr * expr, 122 std::vector<ResolvExpr::AlternativeFinder> &args ) { 117 123 TupleAssignSpotter spotter( currentFinder ); 118 spotter.spot( expr, possibilities );124 spotter.spot( expr, args ); 119 125 } 120 126 … … 122 128 : currentFinder(f) {} 123 129 124 void TupleAssignSpotter::spot( UntypedExpr * expr, const std::list<ResolvExpr::AltList> &possibilities ) { 130 void TupleAssignSpotter::spot( UntypedExpr * expr, 131 std::vector<ResolvExpr::AlternativeFinder> &args ) { 125 132 if ( NameExpr *op = dynamic_cast< NameExpr * >(expr->get_function()) ) { 126 133 if ( CodeGen::isCtorDtorAssign( op->get_name() ) ) { 127 fname = op->get_name(); 128 PRINT( std::cerr << "TupleAssignment: " << fname << std::endl; ) 129 for ( std::list<ResolvExpr::AltList>::const_iterator ali = possibilities.begin(); ali != possibilities.end(); ++ali ) { 130 if ( ali->size() == 0 ) continue; // AlternativeFinder will natrually handle this case, if it's legal 131 if ( ali->size() <= 1 && CodeGen::isAssignment( op->get_name() ) ) { 132 // what does it mean if an assignment takes 1 argument? maybe someone defined such a function, in which case AlternativeFinder will naturally handle it 133 continue; 134 fname = op->get_name(); 135 136 // AlternativeFinder will naturally handle this case case, if it's legal 137 if ( args.size() == 0 ) return; 138 139 // if an assignment only takes 1 argument, that's odd, but maybe someone wrote 140 // the function, in which case AlternativeFinder will handle it normally 141 if ( args.size() == 1 && CodeGen::isAssignment( fname ) ) return; 142 143 // look over all possible left-hand-sides 144 for ( ResolvExpr::Alternative& lhsAlt : args[0] ) { 145 // skip non-tuple LHS 146 if ( ! refToTuple(lhsAlt.expr) ) continue; 147 148 // explode is aware of casts - ensure every LHS expression is sent into explode 149 // with a reference cast 150 // xxx - this seems to change the alternatives before the normal 151 // AlternativeFinder flow; maybe this is desired? 152 if ( ! dynamic_cast<CastExpr*>( lhsAlt.expr ) ) { 153 lhsAlt.expr = new CastExpr( lhsAlt.expr, 154 new ReferenceType( Type::Qualifiers(), 155 lhsAlt.expr->get_result()->clone() ) ); 134 156 } 135 157 136 assert( ! ali->empty() ); 137 // grab args 2-N and group into a TupleExpr 138 const ResolvExpr::Alternative & alt1 = ali->front(); 139 auto begin = std::next(ali->begin(), 1), end = ali->end(); 140 PRINT( std::cerr << "alt1 is " << alt1.expr << std::endl; ) 141 if ( refToTuple(alt1.expr) ) { 142 PRINT( std::cerr << "and is reference to tuple" << std::endl; ) 143 if ( isMultAssign( begin, end ) ) { 144 PRINT( std::cerr << "possible multiple assignment" << std::endl; ) 145 matcher.reset( new MultipleAssignMatcher( *this, *ali ) ); 146 } else { 147 // mass assignment 148 PRINT( std::cerr << "possible mass assignment" << std::endl; ) 149 matcher.reset( new MassAssignMatcher( *this, *ali ) ); 158 // explode the LHS so that each field of a tuple-valued-expr is assigned 159 ResolvExpr::AltList lhs; 160 explode( lhsAlt, currentFinder.get_indexer(), back_inserter(lhs), true ); 161 for ( ResolvExpr::Alternative& alt : lhs ) { 162 // each LHS value must be a reference - some come in with a cast expression, 163 // if not just cast to reference here 164 if ( ! dynamic_cast<ReferenceType*>( alt.expr->get_result() ) ) { 165 alt.expr = new CastExpr( alt.expr, 166 new ReferenceType( Type::Qualifiers(), 167 alt.expr->get_result()->clone() ) ); 150 168 } 169 } 170 171 if ( args.size() == 1 ) { 172 // mass default-initialization/destruction 173 ResolvExpr::AltList rhs{}; 174 matcher.reset( new MassAssignMatcher( *this, lhs, rhs ) ); 151 175 match(); 176 } else if ( args.size() > 2 ) { 177 // expand all possible RHS possibilities 178 // TODO build iterative version of this instead of using combos 179 std::vector< ResolvExpr::AltList > rhsAlts; 180 combos( std::next(args.begin(), 1), args.end(), 181 std::back_inserter( rhsAlts ) ); 182 for ( const ResolvExpr::AltList& rhsAlt : rhsAlts ) { 183 // multiple assignment 184 ResolvExpr::AltList rhs; 185 explode( rhsAlt, currentFinder.get_indexer(), 186 std::back_inserter(rhs), true ); 187 matcher.reset( new MultipleAssignMatcher( *this, lhs, rhs ) ); 188 match(); 189 } 190 } else { 191 for ( const ResolvExpr::Alternative& rhsAlt : args[1] ) { 192 ResolvExpr::AltList rhs; 193 if ( isTuple(rhsAlt.expr) ) { 194 // multiple assignment 195 explode( rhsAlt, currentFinder.get_indexer(), 196 std::back_inserter(rhs), true ); 197 matcher.reset( new MultipleAssignMatcher( *this, lhs, rhs ) ); 198 } else { 199 // mass assignment 200 rhs.push_back( rhsAlt ); 201 matcher.reset( new MassAssignMatcher( *this, lhs, rhs ) ); 202 } 203 match(); 204 } 152 205 } 153 206 } … … 169 222 ResolvExpr::AltList current; 170 223 // now resolve new assignments 171 for ( std::list< Expression * >::iterator i = new_assigns.begin(); i != new_assigns.end(); ++i ) { 224 for ( std::list< Expression * >::iterator i = new_assigns.begin(); 225 i != new_assigns.end(); ++i ) { 172 226 PRINT( 173 227 std::cerr << "== resolving tuple assign ==" << std::endl; … … 175 229 ) 176 230 177 ResolvExpr::AlternativeFinder finder( currentFinder.get_indexer(), currentFinder.get_environ() ); 231 ResolvExpr::AlternativeFinder finder{ currentFinder.get_indexer(), 232 currentFinder.get_environ() }; 178 233 try { 179 234 finder.findWithAdjustment(*i); … … 196 251 // combine assignment environments into combined expression environment 197 252 simpleCombineEnvironments( current.begin(), current.end(), matcher->compositeEnv ); 198 currentFinder.get_alternatives().push_front( ResolvExpr::Alternative(new TupleAssignExpr(solved_assigns, matcher->tmpDecls), matcher->compositeEnv, ResolvExpr::sumCost( current ) + matcher->baseCost ) ); 199 } 200 201 TupleAssignSpotter::Matcher::Matcher( TupleAssignSpotter &spotter, const ResolvExpr::AltList &alts ) : spotter(spotter), baseCost( ResolvExpr::sumCost( alts ) ) { 202 assert( ! alts.empty() ); 203 // combine argument environments into combined expression environment 204 simpleCombineEnvironments( alts.begin(), alts.end(), compositeEnv ); 205 206 ResolvExpr::Alternative lhsAlt = alts.front(); 207 // explode is aware of casts - ensure every LHS expression is sent into explode with a reference cast 208 if ( ! dynamic_cast< CastExpr * >( lhsAlt.expr ) ) { 209 lhsAlt.expr = new CastExpr( lhsAlt.expr, new ReferenceType( Type::Qualifiers(), lhsAlt.expr->get_result()->clone() ) ); 210 } 211 212 // explode the lhs so that each field of the tuple-valued-expr is assigned. 213 explode( lhsAlt, spotter.currentFinder.get_indexer(), back_inserter(lhs), true ); 214 215 for ( ResolvExpr::Alternative & alt : lhs ) { 216 // every LHS value must be a reference - some come in with a cast expression, if it doesn't just cast to reference here. 217 if ( ! dynamic_cast< ReferenceType * >( alt.expr->get_result() ) ) { 218 alt.expr = new CastExpr( alt.expr, new ReferenceType( Type::Qualifiers(), alt.expr->get_result()->clone() ) ); 219 } 220 } 221 } 222 223 TupleAssignSpotter::MassAssignMatcher::MassAssignMatcher( TupleAssignSpotter &spotter, const ResolvExpr::AltList & alts ) : Matcher( spotter, alts ) { 224 assert( alts.size() == 1 || alts.size() == 2 ); 225 if ( alts.size() == 2 ) { 226 rhs.push_back( alts.back() ); 227 } 228 } 229 230 TupleAssignSpotter::MultipleAssignMatcher::MultipleAssignMatcher( TupleAssignSpotter &spotter, const ResolvExpr::AltList & alts ) : Matcher( spotter, alts ) { 231 // explode the rhs so that each field of the tuple-valued-expr is assigned. 232 explode( std::next(alts.begin(), 1), alts.end(), spotter.currentFinder.get_indexer(), back_inserter(rhs), true ); 253 currentFinder.get_alternatives().push_front( ResolvExpr::Alternative( 254 new TupleAssignExpr(solved_assigns, matcher->tmpDecls), matcher->compositeEnv, 255 ResolvExpr::sumCost( current ) + matcher->baseCost ) ); 256 } 257 258 TupleAssignSpotter::Matcher::Matcher( TupleAssignSpotter &spotter, 259 const ResolvExpr::AltList &lhs, const ResolvExpr::AltList &rhs ) 260 : lhs(lhs), rhs(rhs), spotter(spotter), 261 baseCost( ResolvExpr::sumCost( lhs ) + ResolvExpr::sumCost( rhs ) ) { 262 simpleCombineEnvironments( lhs.begin(), lhs.end(), compositeEnv ); 263 simpleCombineEnvironments( rhs.begin(), rhs.end(), compositeEnv ); 233 264 } 234 265 -
src/Tuples/Tuples.h
rc95b115 r490db327 17 17 18 18 #include <string> 19 #include <vector> 19 20 20 21 #include "SynTree/Expression.h" … … 26 27 namespace Tuples { 27 28 // TupleAssignment.cc 28 void handleTupleAssignment( ResolvExpr::AlternativeFinder & currentFinder, UntypedExpr * assign, const std::list<ResolvExpr::AltList> & possibilities ); 29 29 void handleTupleAssignment( ResolvExpr::AlternativeFinder & currentFinder, UntypedExpr * assign, 30 std::vector< ResolvExpr::AlternativeFinder >& args ); 31 30 32 // TupleExpansion.cc 31 33 /// expands z.[a, b.[x, y], c] into [z.a, z.b.x, z.b.y, z.c], inserting UniqueExprs as appropriate -
src/benchmark/Makefile.am
rc95b115 r490db327 133 133 ## ========================================================================================================= 134 134 creation$(EXEEXT) :\ 135 creation-pthread.run \ 136 creation-cfa_coroutine.run \ 137 creation-cfa_thread.run \ 138 creation-upp_coroutine.run \ 135 creation-pthread.run \ 136 creation-cfa_coroutine.run \ 137 creation-cfa_coroutine_eager.run \ 138 creation-cfa_thread.run \ 139 creation-upp_coroutine.run \ 139 140 creation-upp_thread.run 140 141 … … 142 143 ${CC} creation/cfa_cor.c -DBENCH_N=10000000 -I. -nodebug -lrt -quiet @CFA_FLAGS@ ${AM_CFLAGS} ${CFLAGS} ${ccflags} 143 144 145 creation-cfa_coroutine_eager$(EXEEXT): 146 ${CC} creation/cfa_cor.c -DBENCH_N=10000000 -I. -nodebug -lrt -quiet @CFA_FLAGS@ ${AM_CFLAGS} ${CFLAGS} ${ccflags} -DEAGER 147 144 148 creation-cfa_thread$(EXEEXT): 145 149 ${CC} creation/cfa_thrd.c -DBENCH_N=10000000 -I. -nodebug -lrt -quiet @CFA_FLAGS@ ${AM_CFLAGS} ${CFLAGS} ${ccflags} … … 153 157 creation-pthread$(EXEEXT): 154 158 @BACKEND_CC@ creation/pthreads.c -DBENCH_N=250000 -I. -lrt -pthread ${AM_CFLAGS} ${CFLAGS} ${ccflags} 159 160 ## ========================================================================================================= 161 162 compile$(EXEEXT) :\ 163 compile-array$(EXEEXT) \ 164 compile-attributes$(EXEEXT) \ 165 compile-empty$(EXEEXT) \ 166 compile-expression$(EXEEXT) \ 167 compile-io$(EXEEXT) \ 168 compile-monitor$(EXEEXT) \ 169 compile-operators$(EXEEXT) \ 170 compile-thread$(EXEEXT) \ 171 compile-typeof$(EXEEXT) 172 173 174 compile-array$(EXEEXT): 175 @printf '%20s\t' $(subst compile-,,$@) 176 @/usr/bin/time -f "%E" ${CC} -quiet -fsyntax-only -w ../tests/array.c 177 178 compile-attributes$(EXEEXT): 179 @printf '%20s\t' $(subst compile-,,$@) 180 @/usr/bin/time -f "%E" ${CC} -quiet -fsyntax-only -w ../tests/attributes.c 181 182 compile-empty$(EXEEXT): 183 @printf '%20s\t' $(subst compile-,,$@) 184 @/usr/bin/time -f "%E" ${CC} -quiet -fsyntax-only -w compile/empty.c 185 186 compile-expression$(EXEEXT): 187 @printf '%20s\t' $(subst compile-,,$@) 188 @/usr/bin/time -f "%E" ${CC} -quiet -fsyntax-only -w ../tests/expression.c 189 190 compile-io$(EXEEXT): 191 @printf '%20s\t' $(subst compile-,,$@) 192 @/usr/bin/time -f "%E" ${CC} -quiet -fsyntax-only -w ../tests/io.c 193 194 compile-monitor$(EXEEXT): 195 @printf '%20s\t' $(subst compile-,,$@) 196 @/usr/bin/time -f "%E" ${CC} -quiet -fsyntax-only -w ../tests/monitor.c 197 198 compile-operators$(EXEEXT): 199 @printf '%20s\t' $(subst compile-,,$@) 200 @/usr/bin/time -f "%E" ${CC} -quiet -fsyntax-only -w ../tests/operators.c 201 202 compile-thread$(EXEEXT): 203 @printf '%20s\t' $(subst compile-,,$@) 204 @/usr/bin/time -f "%E" ${CC} -quiet -fsyntax-only -w ../tests/thread.c 205 206 compile-typeof$(EXEEXT): 207 @printf '%20s\t' $(subst compile-,,$@) 208 @/usr/bin/time -f "%E" ${CC} -quiet -fsyntax-only -w ../tests/typeof.c 209 155 210 156 211 ## ========================================================================================================= -
src/benchmark/Makefile.in
rc95b115 r490db327 124 124 esac 125 125 am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) 126 am__DIST_COMMON = $(srcdir)/Makefile.in 126 am__DIST_COMMON = $(srcdir)/Makefile.in compile 127 127 DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) 128 128 ACLOCAL = @ACLOCAL@ … … 544 544 545 545 creation$(EXEEXT) :\ 546 creation-pthread.run \ 547 creation-cfa_coroutine.run \ 548 creation-cfa_thread.run \ 549 creation-upp_coroutine.run \ 546 creation-pthread.run \ 547 creation-cfa_coroutine.run \ 548 creation-cfa_coroutine_eager.run \ 549 creation-cfa_thread.run \ 550 creation-upp_coroutine.run \ 550 551 creation-upp_thread.run 551 552 … … 553 554 ${CC} creation/cfa_cor.c -DBENCH_N=10000000 -I. -nodebug -lrt -quiet @CFA_FLAGS@ ${AM_CFLAGS} ${CFLAGS} ${ccflags} 554 555 556 creation-cfa_coroutine_eager$(EXEEXT): 557 ${CC} creation/cfa_cor.c -DBENCH_N=10000000 -I. -nodebug -lrt -quiet @CFA_FLAGS@ ${AM_CFLAGS} ${CFLAGS} ${ccflags} -DEAGER 558 555 559 creation-cfa_thread$(EXEEXT): 556 560 ${CC} creation/cfa_thrd.c -DBENCH_N=10000000 -I. -nodebug -lrt -quiet @CFA_FLAGS@ ${AM_CFLAGS} ${CFLAGS} ${ccflags} … … 564 568 creation-pthread$(EXEEXT): 565 569 @BACKEND_CC@ creation/pthreads.c -DBENCH_N=250000 -I. -lrt -pthread ${AM_CFLAGS} ${CFLAGS} ${ccflags} 570 571 compile$(EXEEXT) :\ 572 compile-array$(EXEEXT) \ 573 compile-attributes$(EXEEXT) \ 574 compile-empty$(EXEEXT) \ 575 compile-expression$(EXEEXT) \ 576 compile-io$(EXEEXT) \ 577 compile-monitor$(EXEEXT) \ 578 compile-operators$(EXEEXT) \ 579 compile-thread$(EXEEXT) \ 580 compile-typeof$(EXEEXT) \ 581 compile-vector_test$(EXEEXT) 582 583 compile-array$(EXEEXT): 584 @printf '%20s\t' $(subst compile-,,$@) 585 @/usr/bin/time -f "%E" ${CC} -quiet -fsyntax-only -w ../tests/array.c 586 587 compile-attributes$(EXEEXT): 588 @printf '%20s\t' $(subst compile-,,$@) 589 @/usr/bin/time -f "%E" ${CC} -quiet -fsyntax-only -w ../tests/attributes.c 590 591 compile-empty$(EXEEXT): 592 @printf '%20s\t' $(subst compile-,,$@) 593 @/usr/bin/time -f "%E" ${CC} -quiet -fsyntax-only -w compile/empty.c 594 595 compile-expression$(EXEEXT): 596 @printf '%20s\t' $(subst compile-,,$@) 597 @/usr/bin/time -f "%E" ${CC} -quiet -fsyntax-only -w ../tests/expression.c 598 599 compile-io$(EXEEXT): 600 @printf '%20s\t' $(subst compile-,,$@) 601 @/usr/bin/time -f "%E" ${CC} -quiet -fsyntax-only -w ../tests/io.c 602 603 compile-monitor$(EXEEXT): 604 @printf '%20s\t' $(subst compile-,,$@) 605 @/usr/bin/time -f "%E" ${CC} -quiet -fsyntax-only -w ../tests/monitor.c 606 607 compile-operators$(EXEEXT): 608 @printf '%20s\t' $(subst compile-,,$@) 609 @/usr/bin/time -f "%E" ${CC} -quiet -fsyntax-only -w ../tests/operators.c 610 611 compile-thread$(EXEEXT): 612 @printf '%20s\t' $(subst compile-,,$@) 613 @/usr/bin/time -f "%E" ${CC} -quiet -fsyntax-only -w ../tests/thread.c 614 615 compile-typeof$(EXEEXT): 616 @printf '%20s\t' $(subst compile-,,$@) 617 @/usr/bin/time -f "%E" ${CC} -quiet -fsyntax-only -w ../tests/typeof.c 618 619 compile-vector_test$(EXEEXT): 620 @printf '%20s\t' $(subst compile-,,$@) 621 @/usr/bin/time -f "%E" ${CC} -quiet -fsyntax-only -w ../tests/vector_test.c 566 622 567 623 %.run : %$(EXEEXT) ${REPEAT} -
src/benchmark/creation/cfa_cor.c
rc95b115 r490db327 5 5 6 6 coroutine MyCoroutine {}; 7 void ?{} (MyCoroutine & this) { prime(this); } 7 void ?{} (MyCoroutine & this) { 8 #ifdef EAGER 9 prime(this); 10 #endif 11 } 8 12 void main(MyCoroutine & this) {} 9 13 -
src/libcfa/Makefile.am
rc95b115 r490db327 95 95 96 96 cfa_includedir = $(CFA_INCDIR) 97 nobase_cfa_include_HEADERS = ${headers} ${stdhdr} math gmp concurrency/invoke.h 97 nobase_cfa_include_HEADERS = \ 98 ${headers} \ 99 ${stdhdr} \ 100 math \ 101 gmp \ 102 bits/defs.h \ 103 bits/locks.h \ 104 concurrency/invoke.h \ 105 libhdr.h \ 106 libhdr/libalign.h \ 107 libhdr/libdebug.h \ 108 libhdr/libtools.h 98 109 99 110 CLEANFILES = libcfa-prelude.c -
src/libcfa/Makefile.in
rc95b115 r490db327 264 264 containers/result containers/vector concurrency/coroutine \ 265 265 concurrency/thread concurrency/kernel concurrency/monitor \ 266 ${shell echo stdhdr/*} math gmp concurrency/invoke.h 266 ${shell echo stdhdr/*} math gmp bits/defs.h bits/locks.h \ 267 concurrency/invoke.h libhdr.h libhdr/libalign.h \ 268 libhdr/libdebug.h libhdr/libtools.h 267 269 HEADERS = $(nobase_cfa_include_HEADERS) 268 270 am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) … … 430 432 stdhdr = ${shell echo stdhdr/*} 431 433 cfa_includedir = $(CFA_INCDIR) 432 nobase_cfa_include_HEADERS = ${headers} ${stdhdr} math gmp concurrency/invoke.h 434 nobase_cfa_include_HEADERS = \ 435 ${headers} \ 436 ${stdhdr} \ 437 math \ 438 gmp \ 439 bits/defs.h \ 440 bits/locks.h \ 441 concurrency/invoke.h \ 442 libhdr.h \ 443 libhdr/libalign.h \ 444 libhdr/libdebug.h \ 445 libhdr/libtools.h 446 433 447 CLEANFILES = libcfa-prelude.c 434 448 all: all-am -
src/libcfa/concurrency/alarm.c
rc95b115 r490db327 186 186 187 187 disable_interrupts(); 188 lock( &event_kernel->lock DEBUG_CTX2 );188 lock( event_kernel->lock DEBUG_CTX2 ); 189 189 { 190 190 verify( validate( alarms ) ); … … 196 196 } 197 197 } 198 unlock( &event_kernel->lock );198 unlock( event_kernel->lock ); 199 199 this->set = true; 200 200 enable_interrupts( DEBUG_CTX ); … … 203 203 void unregister_self( alarm_node_t * this ) { 204 204 disable_interrupts(); 205 lock( &event_kernel->lock DEBUG_CTX2 );205 lock( event_kernel->lock DEBUG_CTX2 ); 206 206 { 207 207 verify( validate( &event_kernel->alarms ) ); 208 208 remove( &event_kernel->alarms, this ); 209 209 } 210 unlock( &event_kernel->lock );210 unlock( event_kernel->lock ); 211 211 enable_interrupts( DEBUG_CTX ); 212 212 this->set = false; -
src/libcfa/concurrency/invoke.h
rc95b115 r490db327 14 14 // 15 15 16 #include <stdbool.h>17 #include <stdint.h>16 #include "bits/defs.h" 17 #include "bits/locks.h" 18 18 19 19 #ifdef __CFORALL__ … … 25 25 #define _INVOKE_H_ 26 26 27 #define unlikely(x) __builtin_expect(!!(x), 0)28 #define thread_local _Thread_local29 30 27 typedef void (*fptr_t)(); 31 28 typedef int_fast16_t __lock_size_t; 32 33 struct spinlock {34 volatile int lock;35 #ifdef __CFA_DEBUG__36 const char * prev_name;37 void* prev_thrd;38 #endif39 };40 29 41 30 struct __thread_queue_t { … … 58 47 void push( struct __condition_stack_t &, struct __condition_criterion_t * ); 59 48 struct __condition_criterion_t * pop( struct __condition_stack_t & ); 60 61 void ?{}(spinlock & this);62 void ^?{}(spinlock & this);63 49 } 64 50 #endif … … 122 108 struct monitor_desc { 123 109 // spinlock to protect internal data 124 struct spinlocklock;110 struct __spinlock_t lock; 125 111 126 112 // current owner of the monitor -
src/libcfa/concurrency/kernel
rc95b115 r490db327 26 26 //----------------------------------------------------------------------------- 27 27 // Locks 28 // Lock the spinlock, spin if already acquired29 void lock ( spinlock * DEBUG_CTX_PARAM2 );28 // // Lock the spinlock, spin if already acquired 29 // void lock ( spinlock * DEBUG_CTX_PARAM2 ); 30 30 31 // Lock the spinlock, yield repeatedly if already acquired32 void lock_yield( spinlock * DEBUG_CTX_PARAM2 );31 // // Lock the spinlock, yield repeatedly if already acquired 32 // void lock_yield( spinlock * DEBUG_CTX_PARAM2 ); 33 33 34 // Lock the spinlock, return false if already acquired35 bool try_lock ( spinlock * DEBUG_CTX_PARAM2 );34 // // Lock the spinlock, return false if already acquired 35 // bool try_lock ( spinlock * DEBUG_CTX_PARAM2 ); 36 36 37 // Unlock the spinlock38 void unlock ( spinlock * );37 // // Unlock the spinlock 38 // void unlock ( spinlock * ); 39 39 40 40 struct semaphore { 41 spinlocklock;41 __spinlock_t lock; 42 42 int count; 43 43 __thread_queue_t waiting; … … 54 54 struct cluster { 55 55 // Ready queue locks 56 spinlockready_queue_lock;56 __spinlock_t ready_queue_lock; 57 57 58 58 // Ready queue for threads … … 74 74 FinishOpCode action_code; 75 75 thread_desc * thrd; 76 spinlock* lock;77 spinlock** locks;76 __spinlock_t * lock; 77 __spinlock_t ** locks; 78 78 unsigned short lock_count; 79 79 thread_desc ** thrds; -
src/libcfa/concurrency/kernel.c
rc95b115 r490db327 242 242 void finishRunning(processor * this) { 243 243 if( this->finish.action_code == Release ) { 244 unlock( this->finish.lock );244 unlock( *this->finish.lock ); 245 245 } 246 246 else if( this->finish.action_code == Schedule ) { … … 248 248 } 249 249 else if( this->finish.action_code == Release_Schedule ) { 250 unlock( this->finish.lock );250 unlock( *this->finish.lock ); 251 251 ScheduleThread( this->finish.thrd ); 252 252 } 253 253 else if( this->finish.action_code == Release_Multi ) { 254 254 for(int i = 0; i < this->finish.lock_count; i++) { 255 unlock( this->finish.locks[i] );255 unlock( *this->finish.locks[i] ); 256 256 } 257 257 } 258 258 else if( this->finish.action_code == Release_Multi_Schedule ) { 259 259 for(int i = 0; i < this->finish.lock_count; i++) { 260 unlock( this->finish.locks[i] );260 unlock( *this->finish.locks[i] ); 261 261 } 262 262 for(int i = 0; i < this->finish.thrd_count; i++) { … … 334 334 verifyf( thrd->next == NULL, "Expected null got %p", thrd->next ); 335 335 336 lock( &this_processor->cltr->ready_queue_lock DEBUG_CTX2 );336 lock( this_processor->cltr->ready_queue_lock DEBUG_CTX2 ); 337 337 append( this_processor->cltr->ready_queue, thrd ); 338 unlock( &this_processor->cltr->ready_queue_lock );338 unlock( this_processor->cltr->ready_queue_lock ); 339 339 340 340 verify( disable_preempt_count > 0 ); … … 343 343 thread_desc * nextThread(cluster * this) { 344 344 verify( disable_preempt_count > 0 ); 345 lock( &this->ready_queue_lock DEBUG_CTX2 );345 lock( this->ready_queue_lock DEBUG_CTX2 ); 346 346 thread_desc * head = pop_head( this->ready_queue ); 347 unlock( &this->ready_queue_lock );347 unlock( this->ready_queue_lock ); 348 348 verify( disable_preempt_count > 0 ); 349 349 return head; … … 358 358 } 359 359 360 void BlockInternal( spinlock* lock ) {360 void BlockInternal( __spinlock_t * lock ) { 361 361 disable_interrupts(); 362 362 this_processor->finish.action_code = Release; … … 384 384 } 385 385 386 void BlockInternal( spinlock* lock, thread_desc * thrd ) {386 void BlockInternal( __spinlock_t * lock, thread_desc * thrd ) { 387 387 assert(thrd); 388 388 disable_interrupts(); … … 398 398 } 399 399 400 void BlockInternal( spinlock* locks [], unsigned short count) {400 void BlockInternal(__spinlock_t * locks [], unsigned short count) { 401 401 disable_interrupts(); 402 402 this_processor->finish.action_code = Release_Multi; … … 411 411 } 412 412 413 void BlockInternal( spinlock* locks [], unsigned short lock_count, thread_desc * thrds [], unsigned short thrd_count) {413 void BlockInternal(__spinlock_t * locks [], unsigned short lock_count, thread_desc * thrds [], unsigned short thrd_count) { 414 414 disable_interrupts(); 415 415 this_processor->finish.action_code = Release_Multi_Schedule; … … 426 426 } 427 427 428 void LeaveThread( spinlock* lock, thread_desc * thrd) {428 void LeaveThread(__spinlock_t * lock, thread_desc * thrd) { 429 429 verify( disable_preempt_count > 0 ); 430 430 this_processor->finish.action_code = thrd ? Release_Schedule : Release; … … 516 516 } 517 517 518 static spinlockkernel_abort_lock;519 static spinlockkernel_debug_lock;518 static __spinlock_t kernel_abort_lock; 519 static __spinlock_t kernel_debug_lock; 520 520 static bool kernel_abort_called = false; 521 521 … … 523 523 // abort cannot be recursively entered by the same or different processors because all signal handlers return when 524 524 // the globalAbort flag is true. 525 lock( &kernel_abort_lock DEBUG_CTX2 );525 lock( kernel_abort_lock DEBUG_CTX2 ); 526 526 527 527 // first task to abort ? 528 528 if ( !kernel_abort_called ) { // not first task to abort ? 529 529 kernel_abort_called = true; 530 unlock( &kernel_abort_lock );530 unlock( kernel_abort_lock ); 531 531 } 532 532 else { 533 unlock( &kernel_abort_lock );533 unlock( kernel_abort_lock ); 534 534 535 535 sigset_t mask; … … 561 561 extern "C" { 562 562 void __lib_debug_acquire() { 563 lock( &kernel_debug_lock DEBUG_CTX2 );563 lock( kernel_debug_lock DEBUG_CTX2 ); 564 564 } 565 565 566 566 void __lib_debug_release() { 567 unlock( &kernel_debug_lock );567 unlock( kernel_debug_lock ); 568 568 } 569 569 } … … 574 574 //----------------------------------------------------------------------------- 575 575 // Locks 576 void ?{}( spinlock & this ) {577 this.lock = 0;578 }579 void ^?{}( spinlock & this ) {580 581 }582 583 bool try_lock( spinlock * this DEBUG_CTX_PARAM2 ) {584 return this->lock == 0 && __sync_lock_test_and_set_4( &this->lock, 1 ) == 0;585 }586 587 void lock( spinlock * this DEBUG_CTX_PARAM2 ) {588 for ( unsigned int i = 1;; i += 1 ) {589 if ( this->lock == 0 && __sync_lock_test_and_set_4( &this->lock, 1 ) == 0 ) { break; }590 }591 LIB_DEBUG_DO(592 this->prev_name = caller;593 this->prev_thrd = this_thread;594 )595 }596 597 void lock_yield( spinlock * this DEBUG_CTX_PARAM2 ) {598 for ( unsigned int i = 1;; i += 1 ) {599 if ( this->lock == 0 && __sync_lock_test_and_set_4( &this->lock, 1 ) == 0 ) { break; }600 yield();601 }602 LIB_DEBUG_DO(603 this->prev_name = caller;604 this->prev_thrd = this_thread;605 )606 }607 608 609 void unlock( spinlock * this ) {610 __sync_lock_release_4( &this->lock );611 }612 613 576 void ?{}( semaphore & this, int count = 1 ) { 614 577 (this.lock){}; … … 619 582 620 583 void P(semaphore & this) { 621 lock( &this.lock DEBUG_CTX2 );584 lock( this.lock DEBUG_CTX2 ); 622 585 this.count -= 1; 623 586 if ( this.count < 0 ) { … … 629 592 } 630 593 else { 631 unlock( &this.lock );594 unlock( this.lock ); 632 595 } 633 596 } … … 635 598 void V(semaphore & this) { 636 599 thread_desc * thrd = NULL; 637 lock( &this.lock DEBUG_CTX2 );600 lock( this.lock DEBUG_CTX2 ); 638 601 this.count += 1; 639 602 if ( this.count <= 0 ) { … … 642 605 } 643 606 644 unlock( &this.lock );607 unlock( this.lock ); 645 608 646 609 // make new owner -
src/libcfa/concurrency/kernel_private.h
rc95b115 r490db327 45 45 //Block current thread and release/wake-up the following resources 46 46 void BlockInternal(void); 47 void BlockInternal( spinlock* lock);47 void BlockInternal(__spinlock_t * lock); 48 48 void BlockInternal(thread_desc * thrd); 49 void BlockInternal( spinlock* lock, thread_desc * thrd);50 void BlockInternal( spinlock* locks [], unsigned short count);51 void BlockInternal( spinlock* locks [], unsigned short count, thread_desc * thrds [], unsigned short thrd_count);52 void LeaveThread( spinlock* lock, thread_desc * thrd);49 void BlockInternal(__spinlock_t * lock, thread_desc * thrd); 50 void BlockInternal(__spinlock_t * locks [], unsigned short count); 51 void BlockInternal(__spinlock_t * locks [], unsigned short count, thread_desc * thrds [], unsigned short thrd_count); 52 void LeaveThread(__spinlock_t * lock, thread_desc * thrd); 53 53 54 54 //----------------------------------------------------------------------------- … … 66 66 struct event_kernel_t { 67 67 alarm_list_t alarms; 68 spinlocklock;68 __spinlock_t lock; 69 69 }; 70 70 -
src/libcfa/concurrency/monitor.c
rc95b115 r490db327 34 34 static inline bool is_accepted( monitor_desc * this, const __monitor_group_t & monitors ); 35 35 36 static inline void lock_all ( spinlock* locks [], __lock_size_t count );37 static inline void lock_all ( monitor_desc * source [], spinlock* /*out*/ locks [], __lock_size_t count );38 static inline void unlock_all( spinlock* locks [], __lock_size_t count );36 static inline void lock_all ( __spinlock_t * locks [], __lock_size_t count ); 37 static inline void lock_all ( monitor_desc * source [], __spinlock_t * /*out*/ locks [], __lock_size_t count ); 38 static inline void unlock_all( __spinlock_t * locks [], __lock_size_t count ); 39 39 static inline void unlock_all( monitor_desc * locks [], __lock_size_t count ); 40 40 41 static inline void save ( monitor_desc * ctx [], __lock_size_t count, spinlock* locks [], unsigned int /*out*/ recursions [], __waitfor_mask_t /*out*/ masks [] );42 static inline void restore( monitor_desc * ctx [], __lock_size_t count, spinlock* locks [], unsigned int /*in */ recursions [], __waitfor_mask_t /*in */ masks [] );41 static inline void save ( monitor_desc * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*out*/ recursions [], __waitfor_mask_t /*out*/ masks [] ); 42 static inline void restore( monitor_desc * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*in */ recursions [], __waitfor_mask_t /*in */ masks [] ); 43 43 44 44 static inline void init ( __lock_size_t count, monitor_desc * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ); … … 53 53 static inline __lock_size_t count_max ( const __waitfor_mask_t & mask ); 54 54 static inline __lock_size_t aggregate ( monitor_desc * storage [], const __waitfor_mask_t & mask ); 55 56 #ifndef __CFA_LOCK_NO_YIELD 57 #define DO_LOCK lock_yield 58 #else 59 #define DO_LOCK lock 60 #endif 55 61 56 62 //----------------------------------------------------------------------------- … … 71 77 unsigned int recursions[ count ]; /* Save the current recursion levels to restore them later */ \ 72 78 __waitfor_mask_t masks [ count ]; /* Save the current waitfor masks to restore them later */ \ 73 spinlock * locks[ count ]; /* We need to pass-in an array of locks to BlockInternal */ \79 __spinlock_t * locks [ count ]; /* We need to pass-in an array of locks to BlockInternal */ \ 74 80 75 81 #define monitor_save save ( monitors, count, locks, recursions, masks ) … … 84 90 // Enter single monitor 85 91 static void __enter_monitor_desc( monitor_desc * this, const __monitor_group_t & group ) { 86 // Lock the monitor spinlock , lock_yield to reduce contention87 lock_yield( &this->lock DEBUG_CTX2 );92 // Lock the monitor spinlock 93 DO_LOCK( this->lock DEBUG_CTX2 ); 88 94 thread_desc * thrd = this_thread; 89 95 … … 127 133 128 134 // Release the lock and leave 129 unlock( &this->lock );135 unlock( this->lock ); 130 136 return; 131 137 } 132 138 133 139 static void __enter_monitor_dtor( monitor_desc * this, fptr_t func ) { 134 // Lock the monitor spinlock , lock_yield to reduce contention135 lock_yield( &this->lock DEBUG_CTX2 );140 // Lock the monitor spinlock 141 DO_LOCK( this->lock DEBUG_CTX2 ); 136 142 thread_desc * thrd = this_thread; 137 143 … … 145 151 set_owner( this, thrd ); 146 152 147 unlock( &this->lock );153 unlock( this->lock ); 148 154 return; 149 155 } … … 196 202 // Leave single monitor 197 203 void __leave_monitor_desc( monitor_desc * this ) { 198 // Lock the monitor spinlock, lock_yieldto reduce contention199 lock_yield( &this->lock DEBUG_CTX2 );204 // Lock the monitor spinlock, DO_LOCK to reduce contention 205 DO_LOCK( this->lock DEBUG_CTX2 ); 200 206 201 207 LIB_DEBUG_PRINT_SAFE("Kernel : %10p Leaving mon %p (%p)\n", this_thread, this, this->owner); … … 210 216 if( this->recursion != 0) { 211 217 LIB_DEBUG_PRINT_SAFE("Kernel : recursion still %d\n", this->recursion); 212 unlock( &this->lock );218 unlock( this->lock ); 213 219 return; 214 220 } … … 218 224 219 225 // We can now let other threads in safely 220 unlock( &this->lock );226 unlock( this->lock ); 221 227 222 228 //We need to wake-up the thread … … 243 249 244 250 // Lock the monitor now 245 lock_yield( &this->lock DEBUG_CTX2 );251 DO_LOCK( this->lock DEBUG_CTX2 ); 246 252 247 253 disable_interrupts(); … … 730 736 } 731 737 732 static inline void lock_all( spinlock* locks [], __lock_size_t count ) {738 static inline void lock_all( __spinlock_t * locks [], __lock_size_t count ) { 733 739 for( __lock_size_t i = 0; i < count; i++ ) { 734 lock_yield(locks[i] DEBUG_CTX2 );735 } 736 } 737 738 static inline void lock_all( monitor_desc * source [], spinlock* /*out*/ locks [], __lock_size_t count ) {740 DO_LOCK( *locks[i] DEBUG_CTX2 ); 741 } 742 } 743 744 static inline void lock_all( monitor_desc * source [], __spinlock_t * /*out*/ locks [], __lock_size_t count ) { 739 745 for( __lock_size_t i = 0; i < count; i++ ) { 740 spinlock* l = &source[i]->lock;741 lock_yield(l DEBUG_CTX2 );746 __spinlock_t * l = &source[i]->lock; 747 DO_LOCK( *l DEBUG_CTX2 ); 742 748 if(locks) locks[i] = l; 743 749 } 744 750 } 745 751 746 static inline void unlock_all( spinlock* locks [], __lock_size_t count ) {752 static inline void unlock_all( __spinlock_t * locks [], __lock_size_t count ) { 747 753 for( __lock_size_t i = 0; i < count; i++ ) { 748 unlock( locks[i] );754 unlock( *locks[i] ); 749 755 } 750 756 } … … 752 758 static inline void unlock_all( monitor_desc * locks [], __lock_size_t count ) { 753 759 for( __lock_size_t i = 0; i < count; i++ ) { 754 unlock( &locks[i]->lock );760 unlock( locks[i]->lock ); 755 761 } 756 762 } … … 759 765 monitor_desc * ctx [], 760 766 __lock_size_t count, 761 __attribute((unused)) spinlock* locks [],767 __attribute((unused)) __spinlock_t * locks [], 762 768 unsigned int /*out*/ recursions [], 763 769 __waitfor_mask_t /*out*/ masks [] … … 772 778 monitor_desc * ctx [], 773 779 __lock_size_t count, 774 spinlock* locks [],780 __spinlock_t * locks [], 775 781 unsigned int /*out*/ recursions [], 776 782 __waitfor_mask_t /*out*/ masks [] -
src/libcfa/concurrency/preemption.c
rc95b115 r490db327 355 355 case SI_KERNEL: 356 356 // LIB_DEBUG_PRINT_SAFE("Kernel : Preemption thread tick\n"); 357 lock( &event_kernel->lock DEBUG_CTX2 );357 lock( event_kernel->lock DEBUG_CTX2 ); 358 358 tick_preemption(); 359 unlock( &event_kernel->lock );359 unlock( event_kernel->lock ); 360 360 break; 361 361 // Signal was not sent by the kernel but by an other thread -
src/prelude/builtins.c
rc95b115 r490db327 80 80 } // ?\? 81 81 82 static inline forall( otype T | { void ?{}( T & this, one_t ); T ?*?( T, T ); double ?/?( double, T ); } ) 83 double ?\?( T x, signed long int y ) { 84 if ( y >= 0 ) return (double)(x \ (unsigned long int)y); 85 else return 1.0 / x \ (unsigned long int)(-y); 86 } // ?\? 82 // FIXME (x \ (unsigned long int)y) relies on X ?\?(T, unsigned long) a function that is neither 83 // defined, nor passed as an assertion parameter. Without user-defined conversions, cannot specify 84 // X as a type that casts to double, yet it doesn't make sense to write functions with that type 85 // signature where X is double. 86 87 // static inline forall( otype T | { void ?{}( T & this, one_t ); T ?*?( T, T ); double ?/?( double, T ); } ) 88 // double ?\?( T x, signed long int y ) { 89 // if ( y >= 0 ) return (double)(x \ (unsigned long int)y); 90 // else return 1.0 / x \ (unsigned long int)(-y); 91 // } // ?\? 87 92 88 93 static inline long int ?\=?( long int & x, unsigned long int y ) { x = x \ y; return x; }
Note: See TracChangeset
for help on using the changeset viewer.