Changeset 933f32f for tests/test.py


Ignore:
Timestamp:
May 24, 2019, 10:19:41 AM (6 years ago)
Author:
Thierry Delisle <tdelisle@…>
Branches:
ADT, arm-eh, ast-experimental, cleanup-dtors, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
Children:
d908563
Parents:
6a9d4b4 (diff), 292642a (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge branch 'master' into cleanup-dtors

File:
1 edited

Legend:

Unmodified
Added
Removed
  • tests/test.py

    r6a9d4b4 r933f32f  
    1 #!/usr/bin/python
    2 from __future__ import print_function
     1#!/usr/bin/python3
    32
    43from pybin.tools import *
     
    98import re
    109import sys
     10import tempfile
    1111import time
    1212
     
    1515################################################################################
    1616
    17 def findTests():
     17def find_tests():
    1818        expected = []
    1919
    20         def matchTest(path):
     20        def match_test(path):
    2121                match = re.search("^%s\/([\w\/\-_]*).expect\/([\w\-_]+)(\.[\w\-_]+)?\.txt$" % settings.SRCDIR, path)
    2222                if match :
     
    2828                                expected.append(test)
    2929
    30         pathWalk( matchTest )
     30        path_walk( match_test )
    3131
    3232        return expected
    3333
    3434# reads the directory ./.expect and indentifies the tests
    35 def listTests( includes, excludes ):
     35def list_tests( includes, excludes ):
    3636        # tests directly in the .expect folder will always be processed
    37         test_list = findTests()
     37        test_list = find_tests()
    3838
    3939        # if we have a limited number of includes, filter by them
     
    5252
    5353# from the found tests, filter all the valid tests/desired tests
    54 def validTests( options ):
     54def valid_tests( options ):
    5555        tests = []
    5656
     
    5959        if options.regenerate_expected :
    6060                for testname in options.tests :
    61                         testname = canonicalPath( testname )
     61                        testname = canonical_path( testname )
    6262                        if Test.valid_name(testname):
    63                                 found = [test for test in allTests if canonicalPath( test.target() ) == testname]
     63                                found = [test for test in all_tests if canonical_path( test.target() ) == testname]
    6464                                tests.append( found[0] if len(found) == 1 else Test.from_target(testname) )
    6565                        else :
     
    6969                # otherwise we only need to validate that all tests are present in the complete list
    7070                for testname in options.tests:
    71                         test = [t for t in allTests if pathCmp( t.target(), testname )]
     71                        test = [t for t in all_tests if path_cmp( t.target(), testname )]
    7272
    7373                        if test :
     
    7979
    8080# parses the option
    81 def getOptions():
     81def parse_args():
    8282        # create a parser with the arguments for the tests script
    8383        parser = argparse.ArgumentParser(description='Script which runs cforall tests')
     
    102102                print('ERROR: invalid arguments', file=sys.stderr)
    103103                parser.print_help(sys.stderr)
    104                 sys.exit(1)
     104                sys.exit(1)
    105105
    106106        # script must have at least some tests to run or be listing
     
    112112        # check that exactly one of the booleans is set to true
    113113        if not sum( (listing, all_tests, some_tests, some_dirs) ) > 0 :
    114                 print('ERROR: must have option \'--all\', \'--list\', \'--include\', \'-I\' or non-empty test list', file=sys.stderr)
     114                print('''ERROR: must have option '--all', '--list', '--include', '-I' or non-empty test list''', file=sys.stderr)
    115115                parser.print_help()
    116116                sys.exit(1)
     
    124124        return val == 0 or settings.dry_run
    125125
    126 def isExe(file):
    127         return settings.dry_run or fileIsExecutable(file)
    128 
    129 def noRule(file, target):
    130         return not settings.dry_run and fileContainsOnly(file, "make: *** No rule to make target `%s'.  Stop." % target)
     126def no_rule(file, target):
     127        return not settings.dry_run and file_contains_only(file, "make: *** No rule to make target `%s'.  Stop." % target)
    131128
    132129# logic to run a single test and return the result (No handling of printing or other test framework logic)
     
    145142        # build, skipping to next test on error
    146143        with Timed() as comp_dur:
    147                 make_ret, _ = make( test.target(),      redirects  = ("2> %s 1> /dev/null" % out_file), error_file = err_file )
    148 
    149         # if the make command succeds continue otherwise skip to diff
     144                make_ret, _ = make( test.target(), output=subprocess.DEVNULL, error=out_file, error_file = err_file )
     145
    150146        run_dur = None
    151         if success(make_ret):
    152                 with Timed() as run_dur:
    153                         if isExe(exe_file):
    154                                 # run test
    155                                 retcode = run(exe_file, out_file, in_file)
     147        # run everything in a temp directory to make sure core file are handled properly
     148        with tempdir():
     149                # if the make command succeds continue otherwise skip to diff
     150                if success(make_ret):
     151                        with Timed() as run_dur:
     152                                if settings.dry_run or is_exe(exe_file):
     153                                        # run test
     154                                        retcode, _ = sh(exe_file, output=out_file, input=in_file, timeout=True)
     155                                else :
     156                                        # simply cat the result into the output
     157                                        retcode = cat(exe_file, out_file)
     158                else:
     159                        retcode = mv(err_file, out_file)
     160
     161                if success(retcode):
     162                        if settings.generating :
     163                                # if we are ounly generating the output we still need to check that the test actually exists
     164                                if no_rule(out_file, test.target()) :
     165                                        retcode = 1
     166                                        error = "\t\tNo make target for test %s!" % test.target()
     167                                        rm(out_file)
     168                                else:
     169                                        error = None
    156170                        else :
    157                                 # simply cat the result into the output
    158                                 retcode = cat(exe_file, out_file)
    159         else:
    160                 retcode = mv(err_file, out_file)
    161 
    162         if success(retcode):
    163                 if settings.generating :
    164                         # if we are ounly generating the output we still need to check that the test actually exists
    165                         if noRule(out_file, test.target()) :
    166                                 retcode = 1
    167                                 error = "\t\tNo make target for test %s!" % test.target()
    168                                 rm(out_file)
    169                         else:
    170                                 error = None
    171                 else :
    172                         # fetch return code and error from the diff command
    173                         retcode, error = diff(cmp_file, out_file)
    174 
    175         else:
    176                 with open (out_file, "r") as myfile:
    177                         error = myfile.read()
    178 
    179                 ret, info = coreInfo(exe_file)
    180                 error = error + info
     171                                # fetch return code and error from the diff command
     172                                retcode, error = diff(cmp_file, out_file)
     173
     174                else:
     175                        with open (out_file, "r") as myfile:
     176                                error = myfile.read()
     177
     178                        ret, info = core_info(exe_file)
     179                        error = error + info if error else info
    181180
    182181
     
    189188# run a single test and handle the errors, outputs, printing, exception handling, etc.
    190189def run_test_worker(t) :
    191 
    192         with SignalHandling():
     190        try :
    193191                # print formated name
    194                 name_txt = "%24s  " % t.name
     192                name_txt = '{0:{width}}  '.format(t.target(), width=settings.output_width)
    195193
    196194                retcode, error, duration = run_single_test(t)
     
    200198
    201199                #print result with error if needed
    202                 text = name_txt + result_txt
     200                text = '\t' + name_txt + result_txt
    203201                out = sys.stdout
    204202                if error :
    205                         text = text + "\n" + error
     203                        text = text + '\n' + error
    206204                        out = sys.stderr
    207205
     
    210208                sys.stderr.flush()
    211209
    212         return retcode != TestResult.SUCCESS
     210                return retcode != TestResult.SUCCESS
     211        except KeyboardInterrupt:
     212                False
    213213
    214214# run the given list of tests with the given parameters
    215215def run_tests(tests, jobs) :
    216216        # clean the sandbox from previous commands
    217         make('clean', redirects = '> /dev/null 2>&1')
     217        make('clean', output=subprocess.DEVNULL, error=subprocess.DEVNULL)
    218218
    219219        # create the executor for our jobs and handle the signal properly
    220         pool = setupPool(jobs)
     220        pool = multiprocessing.Pool(jobs)
    221221
    222222        # for each test to run
     
    233233
    234234        # clean the workspace
    235         make('clean', redirects = '> /dev/null 2>&1')
     235        make('clean', output=subprocess.DEVNULL, error=subprocess.DEVNULL)
    236236
    237237        for failed in results:
     
    248248
    249249        # parse the command line arguments
    250         options = getOptions()
     250        options = parse_args()
    251251
    252252        # init global settings
     
    254254
    255255        # fetch the liest of all valid tests
    256         allTests = listTests( options.include, options.exclude )
     256        all_tests = list_tests( options.include, options.exclude )
    257257
    258258
    259259        # if user wants all tests than no other treatement of the test list is required
    260260        if options.all or options.list or options.list_comp or options.include :
    261                 tests = allTests
     261                tests = all_tests
    262262
    263263        #otherwise we need to validate that the test list that was entered is valid
    264264        else :
    265                 tests = validTests( options )
     265                tests = valid_tests( options )
    266266
    267267        # make sure we have at least some test to run
     
    281281        elif options.list :
    282282                print("Listing for %s:%s"% (settings.arch.string, settings.debug.string))
    283                 fancy_print("\n".join(map(lambda t: "%s" % (t.toString()), tests)))
     283                fancy_print("\n".join(map(lambda t: t.toString(), tests)))
    284284
    285285        else :
    286286                # check the build configuration works
     287                settings.prep_output(tests)
    287288                settings.validate()
    288289
    289                 options.jobs, forceJobs = jobCount( options, tests )
    290                 settings.updateMakeCmd(forceJobs, options.jobs)
    291 
    292                 print('%s (%s:%s) on %i cores' % (
    293                         'Regenerate tests' if settings.generating else 'Running',
     290                options.jobs, forceJobs = job_count( options, tests )
     291                settings.update_make_cmd(forceJobs, options.jobs)
     292
     293                print('%s %i tests on %i cores (%s:%s)' % (
     294                        'Regenerating' if settings.generating else 'Running',
     295                        len(tests),
     296                        options.jobs,
    294297                        settings.arch.string,
    295                         settings.debug.string,
    296                         options.jobs
     298                        settings.debug.string
    297299                ))
    298300
Note: See TracChangeset for help on using the changeset viewer.