Changeset 911348cd


Ignore:
Timestamp:
Jul 15, 2016, 11:34:46 AM (8 years ago)
Author:
Thierry Delisle <tdelisle@…>
Branches:
ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, ctor, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, memory, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, resolv-new, with_gc
Children:
52c97dd
Parents:
ed5ad08
Message:

added comments and options support for multi-threaded tests

File:
1 edited

Legend:

Unmodified
Added
Removed
  • src/tests/test.py

    red5ad08 r911348cd  
    1616################################################################################
    1717
     18# Test class that defines what a test is
    1819class Test:
    1920    def __init__(self, name, path):
    2021        self.name, self.path = name, path
    2122
     23# parses the Makefile to find the machine type (32-bit / 64-bit)
    2224def getMachineType():
    2325        with open('Makefile') as file:
     
    2628                return m.group(1)
    2729
     30# reads the directory ./.expect and indentifies the tests
    2831def listTests():
    2932        machineType = getMachineType()
    3033
     34        # tests directly in the .expect folder will always be processed
    3135        generic_list = map(lambda fname: Test(fname, fname),
    3236                [splitext(f)[0] for f in listdir('./.expect')
     
    3438                ])
    3539
     40        # tests in the machineType folder will be ran only for the corresponding compiler
    3641        typed_list = map(lambda fname: Test( fname, "%s/%s" % (machineType, fname) ),
    3742                [splitext(f)[0] for f in listdir("./.expect/%s" % machineType)
     
    3944                ])
    4045
     46        # append both lists to get
    4147        return generic_list + typed_list
    4248
     49# helper functions to run terminal commands
    4350def sh(cmd, dry_run = False, print2stdout = True):
    44         if dry_run :
     51        if dry_run :    # if this is a dry_run, only print the commands that would be ran
    4552                print("cmd: %s" % cmd)
    4653                return 0, None
    47         else :
     54        else :                  # otherwise create a pipe and run the desired command
    4855                proc = Popen(cmd, stdout=None if print2stdout else PIPE, stderr=STDOUT, shell=True)
    4956                out, err = proc.communicate()
    5057                return proc.returncode, out
    5158
     59# helper function to replace patterns in a file
    5260def file_replace(fname, pat, s_after):
    5361    # first, see if the pattern is even in the file.
     
    6573        os.rename(out_fname, fname)
    6674
     75# tests output may differ depending on the depth of the makefile
    6776def fix_MakeLevel(file) :
    6877        if environ.get('MAKELEVEL') :
    6978                file_replace(file, "make\[%i\]" % int(environ.get('MAKELEVEL')), 'make' )
    7079
     80# helper function to check if a files contains only a spacific string
    7181def fileContainsOnly(file, text) :
    7282        with open(file) as f:
    7383                ff = f.read().strip()
    7484                result = ff == text.strip()
    75                 #
    76                 # print("Comparing :\n\t'%s'\nWith:\n\t'%s'" % (ff, text))
    77                 # print("Result is : \n\t", end="")
    78                 # print(result)
    7985
    8086                return result;
    8187
     88# check whether or not a file is executable
    8289def fileIsExecutable(file) :
    8390        try :
     
    9097                return False
    9198
     99# find the test data for a given test name
    92100def filterTests(testname) :
    93101        found = [test for test in allTests if test.name == testname]
     
    99107def run_test_instance(test, generate, dry_run):
    100108
     109        # find the output file based on the test name and options flag
    101110        out_file = (".out/%s.log" % test.name) if not generate else (".expect/%s.txt" % test.path)
    102111
     112        # remove any outputs from the previous tests to prevent side effects
    103113        sh("rm -f %s" % out_file, dry_run)
    104114        sh("rm -f %s > /dev/null 2>&1" % test.name, dry_run)
     
    107117        make_ret, _ = sh("%s %s 2> %s 1> /dev/null" % (make_cmd, test.name, out_file), dry_run)
    108118
     119        # if the make command succeds continue otherwise skip to diff
    109120        if make_ret == 0 :
    110121                # fetch optional input
     
    121132        error = None
    122133
     134        # fix output to prevent make depth to cause issues
    123135        fix_MakeLevel(out_file)
    124136
    125137        if generate :
     138                # if we are ounly generating the output we still need to check that the test actually exists
    126139                if not dry_run and fileContainsOnly(out_file, "make: *** No rule to make target `%s'.  Stop." % test.name) :
    127140                        retcode = 1;
     
    145158                                        ".expect/%s.txt .out/%s.log")
    146159
     160                # fetch return code and error from the diff command
    147161                retcode, error = sh(diff_cmd % (test.path, test.name), dry_run, False)
    148162
     
    152166        return retcode, error
    153167
    154 def run_tests(tests, generate, dry_run) :
     168# run the given list of tests with the given parameters
     169def run_tests(tests, generate, dry_run, jobs) :
     170        # clean the sandbox from previous commands
    155171        sh("%s clean > /dev/null 2>&1" % make_cmd, dry_run)
     172
     173        #make sure the required folder are present
    156174        sh('mkdir -p .out .expect', dry_run)
    157175
     
    160178
    161179        failed = False;
     180        # for eeach test to run
    162181        for t in tests:
     182                # print formated name
    163183                print("%20s  " % t.name, end="")
    164184                sys.stdout.flush()
     185
     186                #run the test instance and collect the result
    165187                test_failed, error = run_test_instance(t, generate, dry_run)
     188
     189                # aggregate test suite result
    166190                failed = test_failed or failed
    167191
     192                # update output based on current action
    168193                if generate :
    169194                        failed_txt = "ERROR"
     
    173198                        success_txt = "PASSED"
    174199
     200                #print result with error if needed
    175201                print(failed_txt if test_failed else success_txt)
    176202                if error :
    177203                        print(error, file=sys.stderr)
    178204
     205        #clean the workspace
    179206        sh("%s clean > /dev/null 2>&1" % make_cmd, dry_run)
    180207
     
    184211#               main loop
    185212################################################################################
     213# create a parser with the arguments for the tests script
    186214parser = argparse.ArgumentParser(description='Script which runs cforall tests')
    187215parser.add_argument('--dry-run', help='Don\'t run the tests, only output the commands', action='store_true')
     
    189217parser.add_argument('--all', help='Run all test available', action='store_true')
    190218parser.add_argument('--regenerate-expected', help='Regenerate the .expect by running the specified tets, can be used with --all option', action='store_true')
     219parser.add_argument('-j', '--jobs', help='Number of tests to run simultaneously', type=int, default='1')
    191220parser.add_argument('tests', metavar='test', type=str, nargs='*', help='a list of tests to run')
    192221
     222# parse the command line arguments
    193223options = parser.parse_args()
    194224
     225# script must have at least some tests to run
    195226if (len(options.tests) > 0  and     options.all and not options.list) \
    196227or (len(options.tests) == 0 and not options.all and not options.list) :
     
    199230        sys.exit(1)
    200231
     232# fetch the liest of all valid tests
    201233allTests = listTests()
    202234
     235# if user wants all tests than no other treatement of the test list is required
    203236if options.all or options.list :
    204237        tests = allTests
    205238
    206239else :
     240        #otherwise we need to validate that the test list that was entered is valid
    207241        tests = []
    208242
     243        # if we are regenerating the tests we need to find the information of the
     244        # already existing tests and create new info for the new tests
    209245        if options.regenerate_expected :
    210246                tests = map(filterTests, options.tests)
    211247
    212248        else :
     249                # otherwise we only need to validate that all tests are present in the complete list
    213250                for testname in options.tests:
    214251                        test = [t for t in allTests if t.name == testname]
     
    219256                                print('ERROR: No expected file for test %s, ignoring it' % testname, file=sys.stderr)
    220257
     258        # make sure we have at least some test to run
    221259        if len(tests) == 0 :
    222260                print('ERROR: No valid test to run', file=sys.stderr)
    223261                sys.exit(1)
    224262
     263# sort the test alphabetically for convenience
    225264tests.sort(key=lambda t: t.name)
    226265
     266# check if the user already passed in a number of jobs for multi-threading
    227267make_flags = environ.get('MAKEFLAGS')
    228 make_cmd = "make" if make_flags and "-j" in make_flags else "make -j8"
    229 
     268make_has_max_jobs = make_flags and "-j" in make_flags
     269make_max_jobs = re.search("(-j|--jobs)\s*([0-9]+)", make_flags).group(2) if make_has_max_jobs else None
     270make_cmd = "make" if make_has_max_jobs else "make -j8"
     271
     272# make sure we have a valid number of jobs that corresponds to user input
     273options.jobs = int(make_max_jobs) if make_max_jobs else options.jobs
     274if options.jobs <= 0 :
     275        print('ERROR: Invalid number of jobs', file=sys.stderr)
     276        sys.exit(1)
     277
     278
     279# users may want to simply list the tests
    230280if options.list :
    231281        print("\n".join(map(lambda t: "%s (%s)" % (t.name, t.path), tests)))
    232282
    233283else :
     284        # otherwise run all tests and make sure to return the correct error code
    234285        sys.exit( run_tests(tests, options.regenerate_expected, options.dry_run) )
Note: See TracChangeset for help on using the changeset viewer.