Changeset 31e46b8 for src/tests/test.py


Ignore:
Timestamp:
Jul 22, 2016, 2:05:52 PM (9 years ago)
Author:
Rob Schluntz <rschlunt@…>
Branches:
ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, ctor, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, memory, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, resolv-new, with_gc
Children:
ccb447e, e4957e7
Parents:
956a9c77 (diff), ef3b335 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge branch 'master' of plg.uwaterloo.ca:/u/cforall/software/cfa/cfa-cc

File:
1 edited

Legend:

Unmodified
Added
Removed
  • src/tests/test.py

    r956a9c77 r31e46b8  
    1515#               help functions
    1616################################################################################
     17
     18# Test class that defines what a test is
     19class Test:
     20    def __init__(self, name, path):
     21        self.name, self.path = name, path
     22
     23# parses the Makefile to find the machine type (32-bit / 64-bit)
     24def getMachineType():
     25        sh('echo "int main() { return 0; }" > .dummy.c')
     26        sh("make .dummy", print2stdout=False)
     27        _, out = sh("file .dummy", print2stdout=False)
     28        sh("rm -f .dummy.c > /dev/null 2>&1")
     29        sh("rm -f .dummy > /dev/null 2>&1")
     30        return re.search("ELF\s([0-9]+)-bit", out).group(1)
     31
     32# reads the directory ./.expect and indentifies the tests
    1733def listTests():
    18         list = [splitext(f)[0] for f in listdir('./.expect')
     34        machineType = getMachineType()
     35
     36        print(machineType)
     37
     38        # tests directly in the .expect folder will always be processed
     39        generic_list = map(lambda fname: Test(fname, fname),
     40                [splitext(f)[0] for f in listdir('./.expect')
    1941                if not f.startswith('.') and f.endswith('.txt')
    20                 ]
    21 
    22         return list
    23 
     42                ])
     43
     44        # tests in the machineType folder will be ran only for the corresponding compiler
     45        typed_list = map(lambda fname: Test( fname, "%s/%s" % (machineType, fname) ),
     46                [splitext(f)[0] for f in listdir("./.expect/%s" % machineType)
     47                if not f.startswith('.') and f.endswith('.txt')
     48                ])
     49
     50        # append both lists to get
     51        return generic_list + typed_list
     52
     53# helper functions to run terminal commands
    2454def sh(cmd, dry_run = False, print2stdout = True):
    25         if dry_run :
     55        if dry_run :    # if this is a dry_run, only print the commands that would be ran
    2656                print("cmd: %s" % cmd)
    2757                return 0, None
    28         else :
     58        else :                  # otherwise create a pipe and run the desired command
    2959                proc = Popen(cmd, stdout=None if print2stdout else PIPE, stderr=STDOUT, shell=True)
    3060                out, err = proc.communicate()
    3161                return proc.returncode, out
    3262
     63# helper function to replace patterns in a file
    3364def file_replace(fname, pat, s_after):
    3465    # first, see if the pattern is even in the file.
     
    4677        os.rename(out_fname, fname)
    4778
     79# tests output may differ depending on the depth of the makefile
    4880def fix_MakeLevel(file) :
    4981        if environ.get('MAKELEVEL') :
    5082                file_replace(file, "make\[%i\]" % int(environ.get('MAKELEVEL')), 'make' )
    5183
     84# helper function to check if a files contains only a spacific string
    5285def fileContainsOnly(file, text) :
    5386        with open(file) as f:
    5487                ff = f.read().strip()
    5588                result = ff == text.strip()
    56                 #
    57                 # print("Comparing :\n\t'%s'\nWith:\n\t'%s'" % (ff, text))
    58                 # print("Result is : \n\t", end="")
    59                 # print(result)
    6089
    6190                return result;
    6291
     92# check whether or not a file is executable
    6393def fileIsExecutable(file) :
    6494        try :
     
    71101                return False
    72102
     103# find the test data for a given test name
     104def filterTests(testname) :
     105        found = [test for test in allTests if test.name == testname]
     106        return (found[0] if len(found) == 1 else Test(testname, testname) )
     107
    73108################################################################################
    74109#               running test functions
     
    76111def run_test_instance(test, generate, dry_run):
    77112
    78         out_file = (".out/%s.log" % test) if not generate else (".expect/%s.txt" % test)
    79 
     113        # find the output file based on the test name and options flag
     114        out_file = (".out/%s.log" % test.name) if not generate else (".expect/%s.txt" % test.path)
     115
     116        # remove any outputs from the previous tests to prevent side effects
    80117        sh("rm -f %s" % out_file, dry_run)
    81         sh("rm -f %s > /dev/null 2>&1" % test, dry_run)
     118        sh("rm -f %s > /dev/null 2>&1" % test.name, dry_run)
    82119
    83120        # build, skipping to next test on error
    84         make_ret, _ = sh("%s %s 2> %s 1> /dev/null" % (make_cmd, test, out_file), dry_run)
    85 
     121        make_ret, _ = sh("%s %s 2> %s 1> /dev/null" % (make_cmd, test.name, out_file), dry_run)
     122
     123        # if the make command succeds continue otherwise skip to diff
    86124        if make_ret == 0 :
    87125                # fetch optional input
    88                 stdinput = "< .in/%s.txt" % test if isfile(".in/%s.txt" % test) else ""
    89 
    90                 if fileIsExecutable(test) :
     126                stdinput = "< .in/%s.txt" % test.name if isfile(".in/%s.txt" % test.path) else ""
     127
     128                if fileIsExecutable(test.name) :
    91129                        # run test
    92                         sh("./%s %s > %s 2>&1" % (test, stdinput, out_file), dry_run)
     130                        sh("./%s %s > %s 2>&1" % (test.name, stdinput, out_file), dry_run)
    93131                else :
    94132                        # simply cat the result into the output
    95                         sh("cat %s > %s" % (test, out_file), dry_run)
     133                        sh("cat %s > %s" % (test.name, out_file), dry_run)
    96134
    97135        retcode = 0
    98136        error = None
    99137
     138        # fix output to prevent make depth to cause issues
    100139        fix_MakeLevel(out_file)
    101140
    102141        if generate :
    103                 if not dry_run and fileContainsOnly(out_file, "make: *** No rule to make target `%s'.  Stop." % test) :
     142                # if we are ounly generating the output we still need to check that the test actually exists
     143                if not dry_run and fileContainsOnly(out_file, "make: *** No rule to make target `%s'.  Stop." % test.name) :
    104144                        retcode = 1;
    105145                        error = "\t\tNo make target for test %s!" % test
     
    122162                                        ".expect/%s.txt .out/%s.log")
    123163
    124                 retcode, error = sh(diff_cmd % (test, test), dry_run, False)
     164                # fetch return code and error from the diff command
     165                retcode, error = sh(diff_cmd % (test.path, test.name), dry_run, False)
    125166
    126167        # clean the executable
    127         sh("rm -f %s > /dev/null 2>&1" % test, dry_run)
     168        sh("rm -f %s > /dev/null 2>&1" % test.name, dry_run)
    128169
    129170        return retcode, error
    130171
    131 def run_tests(tests, generate, dry_run) :
     172# run the given list of tests with the given parameters
     173def run_tests(tests, generate, dry_run, jobs) :
     174        # clean the sandbox from previous commands
    132175        sh("%s clean > /dev/null 2>&1" % make_cmd, dry_run)
     176
     177        #make sure the required folder are present
    133178        sh('mkdir -p .out .expect', dry_run)
    134179
     
    137182
    138183        failed = False;
     184        # for eeach test to run
    139185        for t in tests:
    140                 print("%20s  " % t, end="")
    141                 sys.stdout.flush()
     186                # print formated name
     187                name_txt = "%20s  " % t.name
     188
     189                #run the test instance and collect the result
    142190                test_failed, error = run_test_instance(t, generate, dry_run)
     191
     192                # aggregate test suite result
    143193                failed = test_failed or failed
    144194
     195                # update output based on current action
    145196                if generate :
    146197                        failed_txt = "ERROR"
     
    150201                        success_txt = "PASSED"
    151202
    152                 print(failed_txt if test_failed else success_txt)
     203                #print result with error if needed
     204                text = name_txt + (failed_txt if test_failed else success_txt)
     205                out = sys.stdout
    153206                if error :
    154                         print(error, file=sys.stderr)
    155 
     207                        text = text + "\n" + error
     208                        out = sys.stderr
     209
     210                print(text, file = out);
     211                sys.stdout.flush()
     212                sys.stderr.flush()
     213
     214
     215        #clean the workspace
    156216        sh("%s clean > /dev/null 2>&1" % make_cmd, dry_run)
    157217
     
    161221#               main loop
    162222################################################################################
     223# create a parser with the arguments for the tests script
    163224parser = argparse.ArgumentParser(description='Script which runs cforall tests')
    164225parser.add_argument('--dry-run', help='Don\'t run the tests, only output the commands', action='store_true')
     
    166227parser.add_argument('--all', help='Run all test available', action='store_true')
    167228parser.add_argument('--regenerate-expected', help='Regenerate the .expect by running the specified tets, can be used with --all option', action='store_true')
     229parser.add_argument('-j', '--jobs', help='Number of tests to run simultaneously', type=int, default='8')
    168230parser.add_argument('tests', metavar='test', type=str, nargs='*', help='a list of tests to run')
    169231
     232# parse the command line arguments
    170233options = parser.parse_args()
    171234
     235# script must have at least some tests to run
    172236if (len(options.tests) > 0  and     options.all and not options.list) \
    173237or (len(options.tests) == 0 and not options.all and not options.list) :
     
    176240        sys.exit(1)
    177241
     242# fetch the liest of all valid tests
    178243allTests = listTests()
    179244
     245# if user wants all tests than no other treatement of the test list is required
    180246if options.all or options.list :
    181247        tests = allTests
    182248
    183249else :
     250        #otherwise we need to validate that the test list that was entered is valid
    184251        tests = []
    185         for test in options.tests:
    186                 if test in allTests or options.regenerate_expected :
    187                         tests.append(test)
    188                 else :
    189                         print('ERROR: No expected file for test %s, ignoring it' % test, file=sys.stderr)
    190 
     252
     253        # if we are regenerating the tests we need to find the information of the
     254        # already existing tests and create new info for the new tests
     255        if options.regenerate_expected :
     256                tests = map(filterTests, options.tests)
     257
     258        else :
     259                # otherwise we only need to validate that all tests are present in the complete list
     260                for testname in options.tests:
     261                        test = [t for t in allTests if t.name == testname]
     262
     263                        if len(test) != 0 :
     264                                tests.append( test[0] )
     265                        else :
     266                                print('ERROR: No expected file for test %s, ignoring it' % testname, file=sys.stderr)
     267
     268        # make sure we have at least some test to run
    191269        if len(tests) == 0 :
    192270                print('ERROR: No valid test to run', file=sys.stderr)
    193271                sys.exit(1)
    194272
    195 tests.sort()
     273# sort the test alphabetically for convenience
     274tests.sort(key=lambda t: t.name)
     275
     276# check if the user already passed in a number of jobs for multi-threading
    196277make_flags = environ.get('MAKEFLAGS')
     278make_has_max_jobs = re.search("(-j|--jobs)\s*([0-9]+)", make_flags) if make_flags else None
     279make_max_jobs = make_has_max_jobs.group(2) if make_has_max_jobs else None
    197280make_cmd = "make" if make_flags and "-j" in make_flags else "make -j8"
    198281
     282# make sure we have a valid number of jobs that corresponds to user input
     283options.jobs = int(make_max_jobs) if make_max_jobs else options.jobs
     284if options.jobs <= 0 :
     285        print('ERROR: Invalid number of jobs', file=sys.stderr)
     286        sys.exit(1)
     287
     288# users may want to simply list the tests
    199289if options.list :
    200         print("\n".join(tests))
     290        print("\n".join(map(lambda t: "%s (%s)" % (t.name, t.path), tests)))
    201291
    202292else :
    203         sys.exit( run_tests(tests, options.regenerate_expected, options.dry_run) )
     293        # otherwise run all tests and make sure to return the correct error code
     294        sys.exit( run_tests(tests, options.regenerate_expected, options.dry_run, options.jobs) )
Note: See TracChangeset for help on using the changeset viewer.