Changeset 90152a4 for tests/test.py


Ignore:
Timestamp:
Aug 27, 2018, 4:40:34 PM (7 years ago)
Author:
Rob Schluntz <rschlunt@…>
Branches:
ADT, arm-eh, ast-experimental, cleanup-dtors, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
Children:
b7c89aa
Parents:
f9feab8 (diff), 305581d (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge branch 'master' into cleanup-dtors

File:
1 moved

Legend:

Unmodified
Added
Removed
  • tests/test.py

    rf9feab8 r90152a4  
    99import re
    1010import sys
     11import time
    1112
    1213################################################################################
     
    1819
    1920        def matchTest(path):
    20                 match = re.search("(\.[\w\/\-_]*)\/.expect\/([\w\-_]+)(\.[\w\-_]+)?\.txt", path)
     21                match = re.search("%s\/([\w\/\-_]*).expect\/([\w\-_]+)(\.[\w\-_]+)?\.txt" % settings.SRCDIR, path)
    2122                if match :
    2223                        test = Test()
     
    4243        if includes:
    4344                test_list = [x for x in test_list if
    44                         x.path.startswith( tuple(includes) )
     45                        x.target().startswith( tuple(includes) )
    4546                ]
    4647
     
    4849        if excludes:
    4950                test_list = [x for x in test_list if not
    50                         x.path.startswith( tuple(excludes) )
     51                        x.target().startswith( tuple(excludes) )
    5152                ]
    5253
     
    6162        if options.regenerate_expected :
    6263                for testname in options.tests :
     64                        testname = canonicalPath( testname )
    6365                        if Test.valid_name(testname):
    64                                 found = [test for test in allTests if test.target() == testname]
     66                                found = [test for test in allTests if canonicalPath( test.target() ) == testname]
    6567                                tests.append( found[0] if len(found) == 1 else Test.from_target(testname) )
    6668                        else :
     
    7779                                print('ERROR: No expected file for test %s, ignoring it' % testname, file=sys.stderr)
    7880
    79         # make sure we have at least some test to run
    80         if not tests :
    81                 print('ERROR: No valid test to run', file=sys.stderr)
    82                 sys.exit(1)
    83 
    8481        return tests
    8582
     
    8885        # create a parser with the arguments for the tests script
    8986        parser = argparse.ArgumentParser(description='Script which runs cforall tests')
    90         parser.add_argument('--debug', help='Run all tests in debug or release', type=yes_no, default='no')
     87        parser.add_argument('--debug', help='Run all tests in debug or release', type=yes_no, default='yes')
     88        parser.add_argument('--install', help='Run all tests based on installed binaries or tree binaries', type=yes_no, default='no')
    9189        parser.add_argument('--arch', help='Test for specific architecture', type=str, default='')
     90        parser.add_argument('--timeout', help='Maximum duration in seconds after a single test is considered to have timed out', type=int, default=60)
     91        parser.add_argument('--global-timeout', help='Maximum cumulative duration in seconds after the ALL tests are considered to have timed out', type=int, default=7200)
    9292        parser.add_argument('--dry-run', help='Don\'t run the tests, only output the commands', action='store_true')
    9393        parser.add_argument('--list', help='List all test available', action='store_true')
    9494        parser.add_argument('--all', help='Run all test available', action='store_true')
    9595        parser.add_argument('--regenerate-expected', help='Regenerate the .expect by running the specified tets, can be used with --all option', action='store_true')
    96         parser.add_argument('-j', '--jobs', help='Number of tests to run simultaneously', type=int, default='8')
     96        parser.add_argument('-j', '--jobs', help='Number of tests to run simultaneously', type=int)
    9797        parser.add_argument('--list-comp', help='List all valide arguments', action='store_true')
    9898        parser.add_argument('-I','--include', help='Directory of test to include, can be used multiple time, All  if omitted', action='append')
     
    100100        parser.add_argument('tests', metavar='test', type=str, nargs='*', help='a list of tests to run')
    101101
    102         options =  parser.parse_args()
     102        try:
     103                options =  parser.parse_args()
     104        except:
     105                print('ERROR: invalid arguments', file=sys.stderr)
     106                parser.print_help(sys.stderr)
     107                sys.exit(1)
    103108
    104109        # script must have at least some tests to run or be listing
     
    119124#               running test functions
    120125################################################################################
     126# fix the absolute paths in the output
     127def fixoutput( fname ):
     128        if not is_ascii(fname):
     129                return
     130
     131        file_replace(fname, "%s/" % settings.SRCDIR, "")
     132
     133
    121134# logic to run a single test and return the result (No handling of printing or other test framework logic)
    122135def run_single_test(test):
    123136
    124137        # find the output file based on the test name and options flag
     138        exe_file = test.target_executable();
    125139        out_file = test.target_output()
    126140        err_file = test.error_log()
     
    131145        test.prepare()
    132146
    133         # remove any outputs from the previous tests to prevent side effects
    134         rm( (out_file, err_file, test.target()) )
    135 
    136147        # build, skipping to next test on error
     148        before = time.time()
    137149        make_ret, _ = make( test.target(),
    138150                redirects  = "2> %s 1> /dev/null" % out_file,
    139151                error_file = err_file
    140152        )
     153        after = time.time()
     154
     155        comp_dur = after - before
     156
     157        run_dur = None
    141158
    142159        # if the make command succeds continue otherwise skip to diff
    143160        if make_ret == 0 or settings.dry_run:
    144                 if settings.dry_run or fileIsExecutable(test.target()) :
     161                before = time.time()
     162                if settings.dry_run or fileIsExecutable(exe_file) :
    145163                        # run test
    146                         retcode, _ = sh("timeout 60 %s > %s 2>&1" % (test.target(), out_file), input = in_file)
     164                        retcode, _ = sh("timeout %d %s > %s 2>&1" % (settings.timeout.single, exe_file, out_file), input = in_file)
    147165                else :
    148166                        # simply cat the result into the output
    149                         retcode, _ = sh("cat %s > %s" % (test.target(), out_file))
     167                        retcode, _ = sh("cat %s > %s" % (exe_file, out_file))
     168
     169                after = time.time()
     170                run_dur = after - before
    150171        else:
    151172                retcode, _ = sh("mv %s %s" % (err_file, out_file))
     
    153174
    154175        if retcode == 0:
     176                # fixoutput(out_file)
    155177                if settings.generating :
    156178                        # if we are ounly generating the output we still need to check that the test actually exists
     
    173195        sh("rm -f %s > /dev/null 2>&1" % test.target())
    174196
    175         return retcode, error
     197        return retcode, error, [comp_dur, run_dur]
    176198
    177199# run a single test and handle the errors, outputs, printing, exception handling, etc.
     
    182204                name_txt = "%20s  " % t.name
    183205
    184                 retcode, error = run_single_test(t)
     206                retcode, error, duration = run_single_test(t)
    185207
    186208                # update output based on current action
    187                 result_txt = TestResult.toString( retcode )
     209                result_txt = TestResult.toString( retcode, duration )
    188210
    189211                #print result with error if needed
     
    214236                        tests,
    215237                        chunksize = 1
    216                 ).get(7200)
     238                ).get(settings.timeout.total)
    217239        except KeyboardInterrupt:
    218240                pool.terminate()
     
    234256################################################################################
    235257if __name__ == "__main__":
    236         #always run from same folder
    237         chdir()
    238258
    239259        # parse the command line arguments
     
    254274                tests = validTests( options )
    255275
     276        # make sure we have at least some test to run
     277        if not tests :
     278                print('ERROR: No valid test to run', file=sys.stderr)
     279                sys.exit(1)
     280
     281
    256282        # sort the test alphabetically for convenience
    257283        tests.sort(key=lambda t: (t.arch if t.arch else '') + t.target())
     
    259285        # users may want to simply list the tests
    260286        if options.list_comp :
    261                 print("-h --help --debug --dry-run --list --arch --all --regenerate-expected -j --jobs ", end='')
     287                print("-h --help --debug --dry-run --list --arch --all --regenerate-expected --install --timeout --global-timeout -j --jobs ", end='')
    262288                print(" ".join(map(lambda t: "%s" % (t.target()), tests)))
    263289
    264290        elif options.list :
    265291                print("Listing for %s:%s"% (settings.arch.string, settings.debug.string))
    266                 print("\n".join(map(lambda t: "%s" % (t.toString()), tests)))
     292                fancy_print("\n".join(map(lambda t: "%s" % (t.toString()), tests)))
    267293
    268294        else :
     295                # check the build configuration works
     296                settings.validate()
     297
    269298                options.jobs, forceJobs = jobCount( options, tests )
    270299                settings.updateMakeCmd(forceJobs, options.jobs)
Note: See TracChangeset for help on using the changeset viewer.