Changeset c13e8dc8 for src/tests/test.py


Ignore:
Timestamp:
Dec 5, 2017, 2:35:03 PM (8 years ago)
Author:
Rob Schluntz <rschlunt@…>
Branches:
ADT, arm-eh, ast-experimental, cleanup-dtors, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
Children:
f9feab8
Parents:
9c35431 (diff), 65197c2 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge branch 'master' into cleanup-dtors

File:
1 edited

Legend:

Unmodified
Added
Removed
  • src/tests/test.py

    r9c35431 rc13e8dc8  
    22from __future__ import print_function
    33
    4 from functools import partial
    5 from multiprocessing import Pool
    6 from os import listdir, environ
    7 from os.path import isfile, join, splitext
    84from pybin.tools import *
     5from pybin.test_run import *
     6from pybin import settings
    97
    108import argparse
    11 import multiprocessing
    12 import os
    139import re
    14 import signal
    1510import sys
    1611
     
    1914################################################################################
    2015
    21 # Test class that defines what a test is
    22 class Test:
    23     def __init__(self, name, path):
    24         self.name, self.path = name, path
    25 
    26 class TestResult:
    27         SUCCESS = 0
    28         FAILURE = 1
    29         TIMEOUT = 124
    30 
    31 # parses the Makefile to find the machine type (32-bit / 64-bit)
    32 def getMachineType():
    33         sh('echo "void ?{}(int&a,int b){}int main(){return 0;}" > .dummy.c')
    34         ret, out = sh("make .dummy -s", print2stdout=True)
    35 
    36         if ret != 0:
    37                 print("Failed to identify architecture:")
    38                 print(out)
    39                 print("Stopping")
    40                 rm( (".dummy.c",".dummy") )
    41                 sys.exit(1)
    42 
    43         _, out = sh("file .dummy", print2stdout=False)
    44         rm( (".dummy.c",".dummy") )
    45 
    46         return re.search("ELF\s([0-9]+)-bit", out).group(1)
    47 
    48 def listTestsFolder(folder) :
    49         path = ('./.expect/%s/' % folder) if folder else './.expect/'
    50         subpath = "%s/" % folder if folder else ""
     16def findTests():
     17        expected = []
     18
     19        def matchTest(path):
     20                match = re.search("(\.[\w\/\-_]*)\/.expect\/([\w\-_]+)(\.[\w\-_]+)?\.txt", path)
     21                if match :
     22                        test = Test()
     23                        test.name = match.group(2)
     24                        test.path = match.group(1)
     25                        test.arch = match.group(3)[1:] if match.group(3) else None
     26                        if settings.arch.match(test.arch):
     27                                expected.append(test)
     28
     29        pathWalk( matchTest )
     30
     31        return expected
     32
     33# reads the directory ./.expect and indentifies the tests
     34def listTests( includes, excludes ):
     35        includes = [canonicalPath( i ) for i in includes] if includes else None
     36        excludes = [canonicalPath( i ) for i in excludes] if excludes else None
    5137
    5238        # tests directly in the .expect folder will always be processed
    53         return map(lambda fname: Test(fname, subpath + fname),
    54                 [splitext(f)[0] for f in listdir( path )
    55                 if not f.startswith('.') and f.endswith('.txt')
    56                 ])
    57 
    58 # reads the directory ./.expect and indentifies the tests
    59 def listTests( concurrent ):
    60         machineType = getMachineType()
    61 
    62         # tests directly in the .expect folder will always be processed
    63         generic_list = listTestsFolder( "" )
    64 
    65         # tests in the machineType folder will be ran only for the corresponding compiler
    66         typed_list = listTestsFolder( machineType )
    67 
    68         # tests in the concurrent folder will be ran only if concurrency is enabled
    69         concurrent_list = listTestsFolder( "concurrent" ) if concurrent else []
    70 
    71         # append both lists to get
    72         return generic_list + typed_list + concurrent_list;
     39        test_list = findTests()
     40
     41        # if we have a limited number of includes, filter by them
     42        if includes:
     43                test_list = [x for x in test_list if
     44                        x.path.startswith( tuple(includes) )
     45                ]
     46
     47        # # if we have a folders to excludes, filter by them
     48        if excludes:
     49                test_list = [x for x in test_list if not
     50                        x.path.startswith( tuple(excludes) )
     51                ]
     52
     53        return test_list
    7354
    7455# from the found tests, filter all the valid tests/desired tests
     
    8061        if options.regenerate_expected :
    8162                for testname in options.tests :
    82                         if testname.endswith( (".c", ".cc", ".cpp") ):
     63                        if Test.valid_name(testname):
     64                                found = [test for test in allTests if test.target() == testname]
     65                                tests.append( found[0] if len(found) == 1 else Test.from_target(testname) )
     66                        else :
    8367                                print('ERROR: "%s", tests are not allowed to end with a C/C++/CFA extension, ignoring it' % testname, file=sys.stderr)
    84                         else :
    85                                 found = [test for test in allTests if test.name == testname]
    86                                 tests.append( found[0] if len(found) == 1 else Test(testname, testname) )
    8768
    8869        else :
    8970                # otherwise we only need to validate that all tests are present in the complete list
    9071                for testname in options.tests:
    91                         test = [t for t in allTests if t.name == testname]
    92 
    93                         if len(test) != 0 :
     72                        test = [t for t in allTests if pathCmp( t.target(), testname )]
     73
     74                        if test :
    9475                                tests.append( test[0] )
    9576                        else :
     
    9778
    9879        # make sure we have at least some test to run
    99         if len(tests) == 0 :
     80        if not tests :
    10081                print('ERROR: No valid test to run', file=sys.stderr)
    10182                sys.exit(1)
     
    10889        parser = argparse.ArgumentParser(description='Script which runs cforall tests')
    10990        parser.add_argument('--debug', help='Run all tests in debug or release', type=yes_no, default='no')
    110         parser.add_argument('--concurrent', help='Run concurrent tests', type=yes_no, default='yes')
     91        parser.add_argument('--arch', help='Test for specific architecture', type=str, default='')
    11192        parser.add_argument('--dry-run', help='Don\'t run the tests, only output the commands', action='store_true')
    11293        parser.add_argument('--list', help='List all test available', action='store_true')
     
    11596        parser.add_argument('-j', '--jobs', help='Number of tests to run simultaneously', type=int, default='8')
    11697        parser.add_argument('--list-comp', help='List all valide arguments', action='store_true')
     98        parser.add_argument('-I','--include', help='Directory of test to include, can be used multiple time, All  if omitted', action='append')
     99        parser.add_argument('-E','--exclude', help='Directory of test to exclude, can be used multiple time, None if omitted', action='append')
    117100        parser.add_argument('tests', metavar='test', type=str, nargs='*', help='a list of tests to run')
    118101
     
    123106        all_tests  = options.all
    124107        some_tests = len(options.tests) > 0
     108        some_dirs  = len(options.include) > 0 if options.include else 0
    125109
    126110        # check that exactly one of the booleans is set to true
    127         if not sum( (listing, all_tests, some_tests) ) == 1 :
    128                 print('ERROR: must have option \'--all\', \'--list\' or non-empty test list', file=sys.stderr)
     111        if not sum( (listing, all_tests, some_tests, some_dirs) ) > 0 :
     112                print('ERROR: must have option \'--all\', \'--list\', \'--include\', \'-I\' or non-empty test list', file=sys.stderr)
    129113                parser.print_help()
    130114                sys.exit(1)
     
    132116        return options
    133117
    134 def jobCount( options ):
    135         # check if the user already passed in a number of jobs for multi-threading
    136         make_flags = environ.get('MAKEFLAGS')
    137         make_jobs_fds = re.search("--jobserver-(auth|fds)=\s*([0-9]+),([0-9]+)", make_flags) if make_flags else None
    138         if make_jobs_fds :
    139                 tokens = os.read(int(make_jobs_fds.group(2)), 1024)
    140                 options.jobs = len(tokens)
    141                 os.write(int(make_jobs_fds.group(3)), tokens)
    142         else :
    143                 options.jobs = multiprocessing.cpu_count()
    144 
    145         # make sure we have a valid number of jobs that corresponds to user input
    146         if options.jobs <= 0 :
    147                 print('ERROR: Invalid number of jobs', file=sys.stderr)
    148                 sys.exit(1)
    149 
    150         return min( options.jobs, len(tests) ), True if make_flags else False
    151 
    152118################################################################################
    153119#               running test functions
    154120################################################################################
    155121# logic to run a single test and return the result (No handling of printing or other test framework logic)
    156 def run_single_test(test, generate, dry_run, debug):
     122def run_single_test(test):
    157123
    158124        # find the output file based on the test name and options flag
    159         out_file = (".out/%s.log" % test.name) if not generate else (".expect/%s.txt" % test.path)
    160         err_file = ".err/%s.log" % test.name
     125        out_file = test.target_output()
     126        err_file = test.error_log()
     127        cmp_file = test.expect()
     128        in_file  = test.input()
     129
     130        # prepare the proper directories
     131        test.prepare()
    161132
    162133        # remove any outputs from the previous tests to prevent side effects
    163         rm( (out_file, err_file, test.name), dry_run )
    164 
    165         options = "-debug" if debug else "-nodebug"
     134        rm( (out_file, err_file, test.target()) )
    166135
    167136        # build, skipping to next test on error
    168         make_ret, _ = sh("""%s test=yes DEBUG_FLAGS="%s" %s 2> %s 1> /dev/null""" % (make_cmd, options, test.name, out_file), dry_run)
    169 
    170         retcode = 0
    171         error = None
     137        make_ret, _ = make( test.target(),
     138                redirects  = "2> %s 1> /dev/null" % out_file,
     139                error_file = err_file
     140        )
    172141
    173142        # if the make command succeds continue otherwise skip to diff
    174         if make_ret == 0 :
    175                 # fetch optional input
    176                 stdinput = "< .in/%s.txt" % test.name if isfile(".in/%s.txt" % test.name) else ""
    177 
    178                 if fileIsExecutable(test.name) :
     143        if make_ret == 0 or settings.dry_run:
     144                if settings.dry_run or fileIsExecutable(test.target()) :
    179145                        # run test
    180                         retcode, _ = sh("timeout 60 ./%s %s > %s 2>&1" % (test.name, stdinput, out_file), dry_run)
     146                        retcode, _ = sh("timeout 60 %s > %s 2>&1" % (test.target(), out_file), input = in_file)
    181147                else :
    182148                        # simply cat the result into the output
    183                         sh("cat %s > %s" % (test.name, out_file), dry_run)
    184 
    185         else :
    186                 # command failed save the log to less temporary file
    187                 sh("mv %s %s" % (err_file, out_file), dry_run)
     149                        retcode, _ = sh("cat %s > %s" % (test.target(), out_file))
     150        else:
     151                retcode, _ = sh("mv %s %s" % (err_file, out_file))
     152
    188153
    189154        if retcode == 0:
    190                 if generate :
     155                if settings.generating :
    191156                        # if we are ounly generating the output we still need to check that the test actually exists
    192                         if not dry_run and fileContainsOnly(out_file, "make: *** No rule to make target `%s'.  Stop." % test.name) :
     157                        if not settings.dry_run and fileContainsOnly(out_file, "make: *** No rule to make target `%s'.  Stop." % test.target()) :
    193158                                retcode = 1;
    194                                 error = "\t\tNo make target for test %s!" % test.name
     159                                error = "\t\tNo make target for test %s!" % test.target()
    195160                                sh("rm %s" % out_file, False)
     161                        else:
     162                                error = None
    196163                else :
    197164                        # fetch return code and error from the diff command
    198                         retcode, error = diff(".expect/%s.txt" % test.path, ".out/%s.log" % test.name, dry_run)
     165                        retcode, error = diff(cmp_file, out_file)
    199166
    200167        else:
     
    204171
    205172        # clean the executable
    206         sh("rm -f %s > /dev/null 2>&1" % test.name, dry_run)
     173        sh("rm -f %s > /dev/null 2>&1" % test.target())
    207174
    208175        return retcode, error
    209176
    210177# run a single test and handle the errors, outputs, printing, exception handling, etc.
    211 def run_test_worker(t, generate, dry_run, debug) :
    212 
    213         signal.signal(signal.SIGINT, signal.SIG_DFL)
    214         # print formated name
    215         name_txt = "%20s  " % t.name
    216 
    217         retcode, error = run_single_test(t, generate, dry_run, debug)
    218 
    219         # update output based on current action
    220         if generate :
    221                 if   retcode == TestResult.SUCCESS:     result_txt = "Done"
    222                 elif retcode == TestResult.TIMEOUT:     result_txt = "TIMEOUT"
    223                 else :                                          result_txt = "ERROR code %d" % retcode
    224         else :
    225                 if   retcode == TestResult.SUCCESS:     result_txt = "PASSED"
    226                 elif retcode == TestResult.TIMEOUT:     result_txt = "TIMEOUT"
    227                 else :                                          result_txt = "FAILED with code %d" % retcode
    228 
    229         #print result with error if needed
    230         text = name_txt + result_txt
    231         out = sys.stdout
    232         if error :
    233                 text = text + "\n" + error
    234                 out = sys.stderr
    235 
    236         print(text, file = out)
    237         sys.stdout.flush()
    238         sys.stderr.flush()
    239         signal.signal(signal.SIGINT, signal.SIG_IGN)
     178def run_test_worker(t) :
     179
     180        with SignalHandling():
     181                # print formated name
     182                name_txt = "%20s  " % t.name
     183
     184                retcode, error = run_single_test(t)
     185
     186                # update output based on current action
     187                result_txt = TestResult.toString( retcode )
     188
     189                #print result with error if needed
     190                text = name_txt + result_txt
     191                out = sys.stdout
     192                if error :
     193                        text = text + "\n" + error
     194                        out = sys.stderr
     195
     196                print(text, file = out)
     197                sys.stdout.flush()
     198                sys.stderr.flush()
    240199
    241200        return retcode != TestResult.SUCCESS
    242201
    243202# run the given list of tests with the given parameters
    244 def run_tests(tests, generate, dry_run, jobs, debug) :
     203def run_tests(tests, jobs) :
    245204        # clean the sandbox from previous commands
    246         sh("%s clean > /dev/null 2>&1" % make_cmd, dry_run)
    247 
    248         # make sure the required folder are present
    249         sh('mkdir -p .out .expect .err', dry_run)
    250 
    251         if generate :
    252                 print( "Regenerate tests for: " )
     205        make('clean', redirects = '> /dev/null 2>&1')
    253206
    254207        # create the executor for our jobs and handle the signal properly
    255         original_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_IGN)
    256         pool = Pool(jobs)
    257         signal.signal(signal.SIGINT, original_sigint_handler)
     208        pool = setupPool(jobs)
    258209
    259210        # for each test to run
    260211        try :
    261                 results = pool.map_async(partial(run_test_worker, generate=generate, dry_run=dry_run, debug=debug), tests, chunksize = 1 ).get(7200)
     212                results = pool.map_async(
     213                        run_test_worker,
     214                        tests,
     215                        chunksize = 1
     216                ).get(7200)
    262217        except KeyboardInterrupt:
    263218                pool.terminate()
     
    266221
    267222        # clean the workspace
    268         sh("%s clean > /dev/null 2>&1" % make_cmd, dry_run)
     223        make('clean', redirects = '> /dev/null 2>&1')
    269224
    270225        for failed in results:
     
    285240        options = getOptions()
    286241
     242        # init global settings
     243        settings.init( options )
     244
    287245        # fetch the liest of all valid tests
    288         allTests = listTests( options.concurrent )
     246        allTests = listTests( options.include, options.exclude )
    289247
    290248        # if user wants all tests than no other treatement of the test list is required
    291         if options.all or options.list or options.list_comp :
     249        if options.all or options.list or options.list_comp or options.include :
    292250                tests = allTests
    293251
     252        #otherwise we need to validate that the test list that was entered is valid
    294253        else :
    295                 #otherwise we need to validate that the test list that was entered is valid
    296254                tests = validTests( options )
    297255
    298256        # sort the test alphabetically for convenience
    299         tests.sort(key=lambda t: t.name)
     257        tests.sort(key=lambda t: (t.arch if t.arch else '') + t.target())
    300258
    301259        # users may want to simply list the tests
    302260        if options.list_comp :
    303                 print("-h --help --debug --concurrent --dry-run --list --all --regenerate-expected -j --jobs ", end='')
    304                 print(" ".join(map(lambda t: "%s" % (t.name), tests)))
     261                print("-h --help --debug --dry-run --list --arch --all --regenerate-expected -j --jobs ", end='')
     262                print(" ".join(map(lambda t: "%s" % (t.target()), tests)))
    305263
    306264        elif options.list :
    307                 print("\n".join(map(lambda t: "%s (%s)" % (t.name, t.path), tests)))
     265                print("Listing for %s:%s"% (settings.arch.string, settings.debug.string))
     266                print("\n".join(map(lambda t: "%s" % (t.toString()), tests)))
    308267
    309268        else :
    310                 options.jobs, forceJobs = jobCount( options )
    311 
    312                 print('Running (%s) on %i cores' % ("debug" if options.debug else "no debug", options.jobs))
    313                 make_cmd = "make" if forceJobs else ("make -j%i" % options.jobs)
     269                options.jobs, forceJobs = jobCount( options, tests )
     270                settings.updateMakeCmd(forceJobs, options.jobs)
     271
     272                print('%s (%s:%s) on %i cores' % (
     273                        'Regenerate tests' if settings.generating else 'Running',
     274                        settings.arch.string,
     275                        settings.debug.string,
     276                        options.jobs
     277                ))
    314278
    315279                # otherwise run all tests and make sure to return the correct error code
    316                 sys.exit( run_tests(tests, options.regenerate_expected, options.dry_run, options.jobs, options.debug) )
     280                sys.exit( run_tests(tests, options.jobs) )
Note: See TracChangeset for help on using the changeset viewer.