Changeset bacc36c for src/tests/test.py


Ignore:
Timestamp:
Dec 4, 2017, 6:01:29 PM (4 years ago)
Author:
Thierry Delisle <tdelisle@…>
Branches:
aaron-thesis, arm-eh, cleanup-dtors, deferred_resn, demangler, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, resolv-new, with_gc
Children:
209383b
Parents:
0ad0c55
Message:

Major test cleanup by modularizing further into pybin

File:
1 edited

Legend:

Unmodified
Added
Removed
  • src/tests/test.py

    r0ad0c55 rbacc36c  
    22from __future__ import print_function
    33
    4 from functools import partial
    5 from multiprocessing import Pool
    6 from os import listdir, environ
    7 from os.path import isfile, join, splitext
    84from pybin.tools import *
    95from pybin.test_run import *
     6from pybin import settings
    107
    118import argparse
    12 import multiprocessing
    13 import os
     9import functools
    1410import re
    15 import signal
    1611import sys
    1712
     
    2318        expected = []
    2419
    25         def step(_, dirname, names):
    26                 for name in names:
    27                         path = os.path.join(dirname, name)
    28 
    29                         match = re.search("(\.[\w\/\-_]*)\/.expect\/([\w\-_]+)(\.[\w\-_]+)?\.txt", path)
    30                         if match :
    31                                 test = Test()
    32                                 test.name = match.group(2)
    33                                 test.path = match.group(1)
    34                                 test.arch = match.group(3)[1:] if match.group(3) else None
    35                                 expected.append(test)
    36 
    37         # Start the walk
    38         os.path.walk('.', step, '')
     20        def findTest(path):
     21                match = re.search("(\.[\w\/\-_]*)\/.expect\/([\w\-_]+)(\.[\w\-_]+)?\.txt", path)
     22                if match :
     23                        test = Test()
     24                        test.name = match.group(2)
     25                        test.path = match.group(1)
     26                        test.arch = match.group(3)[1:] if match.group(3) else None
     27                        expected.append(test)
     28
     29        pathWalk( findTest )
    3930
    4031        return expected
     
    4233# reads the directory ./.expect and indentifies the tests
    4334def listTests( includes, excludes ):
    44         includes = [os.path.normpath( os.path.join('.',i) ) for i in includes] if includes else None
    45         excludes = [os.path.normpath( os.path.join('.',i) ) for i in excludes] if excludes else None
     35        includes = [canonicalPath( i ) for i in includes] if includes else None
     36        excludes = [canonicalPath( i ) for i in excludes] if excludes else None
    4637
    4738        # tests directly in the .expect folder will always be processed
     
    5142        if includes:
    5243                test_list = [x for x in test_list if
    53                         os.path.normpath( x.path ).startswith( tuple(includes) )
     44                        x.path.startswith( tuple(includes) )
    5445                ]
    5546
     
    5748        if excludes:
    5849                test_list = [x for x in test_list if not
    59                         os.path.normpath( x.path ).startswith( tuple(excludes) )
     50                        x.path.startswith( tuple(excludes) )
    6051                ]
    6152
     
    7061        if options.regenerate_expected :
    7162                for testname in options.tests :
    72                         if testname.endswith( (".c", ".cc", ".cpp") ):
     63                        if Test.valid_name(testname):
     64                                found = [test for test in allTests if test.target() == testname]
     65                                tests.append( found[0] if len(found) == 1 else Test.from_target(testname) )
     66                        else :
    7367                                print('ERROR: "%s", tests are not allowed to end with a C/C++/CFA extension, ignoring it' % testname, file=sys.stderr)
    74                         else :
    75                                 found = [test for test in allTests if test.name == testname]
    76                                 tests.append( found[0] if len(found) == 1 else Test(testname, testname) )
    7768
    7869        else :
    7970                # otherwise we only need to validate that all tests are present in the complete list
    8071                for testname in options.tests:
    81                         test = [t for t in allTests if os.path.normpath( t.target() ) == os.path.normpath( testname )]
    82 
    83                         if len(test) != 0 :
     72                        test = [t for t in allTests if pathCmp( t.target(), testname )]
     73
     74                        if test :
    8475                                tests.append( test[0] )
    8576                        else :
     
    8778
    8879        # make sure we have at least some test to run
    89         if len(tests) == 0 :
     80        if tests :
    9081                print('ERROR: No valid test to run', file=sys.stderr)
    9182                sys.exit(1)
    9283
    9384        return tests
    94 
    95 class TestResult:
    96         SUCCESS = 0
    97         FAILURE = 1
    98         TIMEOUT = 124
    9985
    10086# parses the option
     
    10389        parser = argparse.ArgumentParser(description='Script which runs cforall tests')
    10490        parser.add_argument('--debug', help='Run all tests in debug or release', type=yes_no, default='no')
    105         parser.add_argument('--arch', help='Test for specific architecture', type=str, default=getMachineType())
     91        parser.add_argument('--arch', help='Test for specific architecture', type=str, default='')
    10692        parser.add_argument('--dry-run', help='Don\'t run the tests, only output the commands', action='store_true')
    10793        parser.add_argument('--list', help='List all test available', action='store_true')
     
    130116        return options
    131117
    132 def jobCount( options ):
    133         # check if the user already passed in a number of jobs for multi-threading
    134         make_flags = environ.get('MAKEFLAGS')
    135         make_jobs_fds = re.search("--jobserver-(auth|fds)=\s*([0-9]+),([0-9]+)", make_flags) if make_flags else None
    136         if make_jobs_fds :
    137                 tokens = os.read(int(make_jobs_fds.group(2)), 1024)
    138                 options.jobs = len(tokens)
    139                 os.write(int(make_jobs_fds.group(3)), tokens)
    140         else :
    141                 options.jobs = multiprocessing.cpu_count()
    142 
    143         # make sure we have a valid number of jobs that corresponds to user input
    144         if options.jobs <= 0 :
    145                 print('ERROR: Invalid number of jobs', file=sys.stderr)
    146                 sys.exit(1)
    147 
    148         return min( options.jobs, len(tests) ), True if make_flags else False
    149 
    150118################################################################################
    151119#               running test functions
    152120################################################################################
    153121# logic to run a single test and return the result (No handling of printing or other test framework logic)
    154 def run_single_test(test, generate, dry_run, debug):
     122def run_single_test(test, debug):
    155123
    156124        # find the output file based on the test name and options flag
    157         out_file = test.output_file() if not generate else test.expect_file()
    158         err_file = test.error_file()
    159         cmp_file = test.expect_file()
    160         in_file  = test.input_file()
     125        out_file = test.target_output()
     126        err_file = test.error_log()
     127        cmp_file = test.expect()
     128        in_file  = test.input()
    161129
    162130        # prepare the proper directories
    163         test.prepare( dry_run )
     131        test.prepare()
    164132
    165133        # remove any outputs from the previous tests to prevent side effects
    166         rm( (out_file, err_file, test.target()), dry_run )
     134        rm( (out_file, err_file, test.target()) )
    167135
    168136        options = "-debug" if debug else "-nodebug"
    169137
    170 
    171138        # build, skipping to next test on error
    172         make_ret, _ = sh("""%s  DEBUG_FLAGS="%s" %s test="%s" 2> %s 1> /dev/null""" % (make_cmd, options, test.target(), err_file, out_file), dry_run)
     139        make_ret, _ = make( test.target(),
     140                flags      = """DEBUG_FLAGS="%s" """ % options,
     141                redirects  = "2> %s 1> /dev/null" % out_file,
     142                error_file = err_file
     143        )
    173144
    174145        retcode = 0
     
    176147
    177148        # if the make command succeds continue otherwise skip to diff
    178         if make_ret == 0 or dry_run:
    179                 # fetch optional input
    180                 stdinput = "< %s" % in_file if isfile(in_file) else ""
    181 
    182                 if dry_run or fileIsExecutable(test.target()) :
     149        if make_ret == 0 or settings.dry_run:
     150                if settings.dry_run or fileIsExecutable(test.target()) :
    183151                        # run test
    184                         retcode, _ = sh("timeout 60 ./%s %s > %s 2>&1" % (test.target(), stdinput, out_file), dry_run)
     152                        retcode, _ = sh("timeout 60 %s > %s 2>&1" % (test.target(), out_file), input = in_file)
    185153                else :
    186154                        # simply cat the result into the output
    187                         sh("cat %s > %s" % (test.target(), out_file), dry_run)
     155                        sh("cat %s > %s" % (test.target(), out_file))
    188156        else:
    189                 sh("mv %s %s" % (err_file, out_file), dry_run)
     157                sh("mv %s %s" % (err_file, out_file))
    190158
    191159
    192160        if retcode == 0:
    193                 if generate :
     161                if settings.generating :
    194162                        # if we are ounly generating the output we still need to check that the test actually exists
    195                         if not dry_run and fileContainsOnly(out_file, "make: *** No rule to make target `%s'.  Stop." % test.target()) :
     163                        if not settings.dry_run and fileContainsOnly(out_file, "make: *** No rule to make target `%s'.  Stop." % test.target()) :
    196164                                retcode = 1;
    197165                                error = "\t\tNo make target for test %s!" % test.target()
     
    199167                else :
    200168                        # fetch return code and error from the diff command
    201                         retcode, error = diff(cmp_file, out_file, dry_run)
     169                        retcode, error = diff(cmp_file, out_file)
    202170
    203171        else:
     
    207175
    208176        # clean the executable
    209         sh("rm -f %s > /dev/null 2>&1" % test.target(), dry_run)
     177        sh("rm -f %s > /dev/null 2>&1" % test.target())
    210178
    211179        return retcode, error
    212180
    213181# run a single test and handle the errors, outputs, printing, exception handling, etc.
    214 def run_test_worker(t, generate, dry_run, debug) :
    215 
    216         signal.signal(signal.SIGINT, signal.SIG_DFL)
    217         # print formated name
    218         name_txt = "%20s  " % t.name
    219 
    220         retcode, error = run_single_test(t, generate, dry_run, debug)
    221 
    222         # update output based on current action
    223         if generate :
    224                 if   retcode == TestResult.SUCCESS:     result_txt = "Done"
    225                 elif retcode == TestResult.TIMEOUT:     result_txt = "TIMEOUT"
    226                 else :                                          result_txt = "ERROR code %d" % retcode
    227         else :
    228                 if   retcode == TestResult.SUCCESS:     result_txt = "PASSED"
    229                 elif retcode == TestResult.TIMEOUT:     result_txt = "TIMEOUT"
    230                 else :                                          result_txt = "FAILED with code %d" % retcode
    231 
    232         #print result with error if needed
    233         text = name_txt + result_txt
    234         out = sys.stdout
    235         if error :
    236                 text = text + "\n" + error
    237                 out = sys.stderr
    238 
    239         print(text, file = out)
    240         sys.stdout.flush()
    241         sys.stderr.flush()
    242         signal.signal(signal.SIGINT, signal.SIG_IGN)
     182def run_test_worker(t, debug) :
     183
     184        with SignalHandling():
     185                # print formated name
     186                name_txt = "%20s  " % t.name
     187
     188                retcode, error = run_single_test(t, debug)
     189
     190                # update output based on current action
     191                result_txt = TestResult.toString( retcode )
     192
     193                #print result with error if needed
     194                text = name_txt + result_txt
     195                out = sys.stdout
     196                if error :
     197                        text = text + "\n" + error
     198                        out = sys.stderr
     199
     200                print(text, file = out)
     201                sys.stdout.flush()
     202                sys.stderr.flush()
    243203
    244204        return retcode != TestResult.SUCCESS
    245205
    246206# run the given list of tests with the given parameters
    247 def run_tests(tests, generate, dry_run, jobs, debug) :
     207def run_tests(tests, jobs, debug) :
    248208        # clean the sandbox from previous commands
    249         sh("%s clean > /dev/null 2>&1" % make_cmd, dry_run)
    250 
    251         if generate :
    252                 print( "Regenerate tests for: " )
     209        make('clean', redirects = '> /dev/null 2>&1')
    253210
    254211        # create the executor for our jobs and handle the signal properly
    255         original_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_IGN)
    256         pool = Pool(jobs)
    257         signal.signal(signal.SIGINT, original_sigint_handler)
     212        pool = setupPool(jobs)
    258213
    259214        # for each test to run
    260215        try :
    261                 results = pool.map_async(partial(run_test_worker, generate=generate, dry_run=dry_run, debug=debug), tests, chunksize = 1 ).get(7200)
     216                results = pool.map_async(
     217                        functools.partial(run_test_worker, debug=debug),
     218                        tests,
     219                        chunksize = 1
     220                ).get(7200)
    262221        except KeyboardInterrupt:
    263222                pool.terminate()
     
    266225
    267226        # clean the workspace
    268         sh("%s clean > /dev/null 2>&1" % make_cmd, dry_run)
     227        make('clean', redirects = '> /dev/null 2>&1')
    269228
    270229        for failed in results:
     
    285244        options = getOptions()
    286245
     246        # init global settings
     247        settings.init( options )
     248
    287249        # fetch the liest of all valid tests
    288250        allTests = listTests( options.include, options.exclude )
     
    297259
    298260        # sort the test alphabetically for convenience
    299         tests.sort(key=lambda t: os.path.join(t.path, t.name))
     261        tests.sort(key=lambda t: t.target())
    300262
    301263        # users may want to simply list the tests
     
    305267
    306268        elif options.list :
    307                 print("Listing for %s:%s"% (options.arch, "debug" if options.debug else "no debug"))
     269                print("Listing for %s:%s"% (settings.arch.toString(), "debug" if options.debug else "no debug"))
    308270                print("\n".join(map(lambda t: "%s" % (t.toString()), tests)))
    309271
    310272        else :
    311                 options.jobs, forceJobs = jobCount( options )
    312 
    313                 print('Running (%s:%s) on %i cores' % (options.arch, "debug" if options.debug else "no debug", options.jobs))
    314                 make_cmd = "make" if forceJobs else ("make -j%i" % options.jobs)
     273                options.jobs, forceJobs = jobCount( options, tests )
     274                settings.updateMakeCmd(forceJobs, options.jobs)
     275
     276                print('%s (%s:%s) on %i cores' % (
     277                        'Regenerate tests' if settings.generating else 'Running',
     278                        settings.arch.toString(),
     279                        "debug" if options.debug else "no debug",
     280                        options.jobs
     281                ))
    315282
    316283                # otherwise run all tests and make sure to return the correct error code
    317                 sys.exit( run_tests(tests, options.regenerate_expected, options.dry_run, options.jobs, options.debug) )
     284                sys.exit( run_tests(tests, options.jobs, options.debug) )
Note: See TracChangeset for help on using the changeset viewer.