#!/usr/bin/python from __future__ import print_function from pybin.tools import * from pybin.test_run import * from pybin import settings import argparse import re import sys import time ################################################################################ # help functions ################################################################################ def findTests(): expected = [] def matchTest(path): match = re.search("(\.[\w\/\-_]*)\/.expect\/([\w\-_]+)(\.[\w\-_]+)?\.txt", path) if match : test = Test() test.name = match.group(2) test.path = match.group(1) test.arch = match.group(3)[1:] if match.group(3) else None if settings.arch.match(test.arch): expected.append(test) pathWalk( matchTest ) return expected # reads the directory ./.expect and indentifies the tests def listTests( includes, excludes ): includes = [canonicalPath( i ) for i in includes] if includes else None excludes = [canonicalPath( i ) for i in excludes] if excludes else None # tests directly in the .expect folder will always be processed test_list = findTests() # if we have a limited number of includes, filter by them if includes: test_list = [x for x in test_list if x.target().startswith( tuple(includes) ) ] # # if we have a folders to excludes, filter by them if excludes: test_list = [x for x in test_list if not x.target().startswith( tuple(excludes) ) ] return test_list # from the found tests, filter all the valid tests/desired tests def validTests( options ): tests = [] # if we are regenerating the tests we need to find the information of the # already existing tests and create new info for the new tests if options.regenerate_expected : for testname in options.tests : testname = canonicalPath( testname ) if Test.valid_name(testname): found = [test for test in allTests if test.target() == testname] tests.append( found[0] if len(found) == 1 else Test.from_target(testname) ) else : print('ERROR: "%s", tests are not allowed to end with a C/C++/CFA extension, ignoring it' % testname, file=sys.stderr) else : # otherwise we only need to validate that all tests are present in the complete list for testname in options.tests: test = [t for t in allTests if pathCmp( t.target(), testname )] if test : tests.append( test[0] ) else : print('ERROR: No expected file for test %s, ignoring it' % testname, file=sys.stderr) # make sure we have at least some test to run if not tests : print('ERROR: No valid test to run', file=sys.stderr) sys.exit(1) return tests # parses the option def getOptions(): # create a parser with the arguments for the tests script parser = argparse.ArgumentParser(description='Script which runs cforall tests') parser.add_argument('--debug', help='Run all tests in debug or release', type=yes_no, default='no') parser.add_argument('--arch', help='Test for specific architecture', type=str, default='') parser.add_argument('--dry-run', help='Don\'t run the tests, only output the commands', action='store_true') parser.add_argument('--list', help='List all test available', action='store_true') parser.add_argument('--all', help='Run all test available', action='store_true') parser.add_argument('--regenerate-expected', help='Regenerate the .expect by running the specified tets, can be used with --all option', action='store_true') parser.add_argument('-j', '--jobs', help='Number of tests to run simultaneously', type=int, default='8') parser.add_argument('--list-comp', help='List all valide arguments', action='store_true') parser.add_argument('-I','--include', help='Directory of test to include, can be used multiple time, All if omitted', action='append') parser.add_argument('-E','--exclude', help='Directory of test to exclude, can be used multiple time, None if omitted', action='append') parser.add_argument('tests', metavar='test', type=str, nargs='*', help='a list of tests to run') options = parser.parse_args() # script must have at least some tests to run or be listing listing = options.list or options.list_comp all_tests = options.all some_tests = len(options.tests) > 0 some_dirs = len(options.include) > 0 if options.include else 0 # check that exactly one of the booleans is set to true if not sum( (listing, all_tests, some_tests, some_dirs) ) > 0 : print('ERROR: must have option \'--all\', \'--list\', \'--include\', \'-I\' or non-empty test list', file=sys.stderr) parser.print_help() sys.exit(1) return options ################################################################################ # running test functions ################################################################################ # logic to run a single test and return the result (No handling of printing or other test framework logic) def run_single_test(test): # find the output file based on the test name and options flag out_file = test.target_output() err_file = test.error_log() cmp_file = test.expect() in_file = test.input() # prepare the proper directories test.prepare() # remove any outputs from the previous tests to prevent side effects rm( (out_file, err_file, test.target()) ) # build, skipping to next test on error before = time.time() make_ret, _ = make( test.target(), redirects = "2> %s 1> /dev/null" % out_file, error_file = err_file ) after = time.time() comp_dur = after - before run_dur = None # if the make command succeds continue otherwise skip to diff if make_ret == 0 or settings.dry_run: before = time.time() if settings.dry_run or fileIsExecutable(test.target()) : # run test retcode, _ = sh("timeout 60 %s > %s 2>&1" % (test.target(), out_file), input = in_file) else : # simply cat the result into the output retcode, _ = sh("cat %s > %s" % (test.target(), out_file)) after = time.time() run_dur = after - before else: retcode, _ = sh("mv %s %s" % (err_file, out_file)) if retcode == 0: if settings.generating : # if we are ounly generating the output we still need to check that the test actually exists if not settings.dry_run and fileContainsOnly(out_file, "make: *** No rule to make target `%s'. Stop." % test.target()) : retcode = 1; error = "\t\tNo make target for test %s!" % test.target() sh("rm %s" % out_file, False) else: error = None else : # fetch return code and error from the diff command retcode, error = diff(cmp_file, out_file) else: with open (out_file, "r") as myfile: error = myfile.read() # clean the executable sh("rm -f %s > /dev/null 2>&1" % test.target()) return retcode, error, [comp_dur, run_dur] # run a single test and handle the errors, outputs, printing, exception handling, etc. def run_test_worker(t) : with SignalHandling(): # print formated name name_txt = "%20s " % t.name retcode, error, duration = run_single_test(t) # update output based on current action result_txt = TestResult.toString( retcode, duration ) #print result with error if needed text = name_txt + result_txt out = sys.stdout if error : text = text + "\n" + error out = sys.stderr print(text, file = out) sys.stdout.flush() sys.stderr.flush() return retcode != TestResult.SUCCESS # run the given list of tests with the given parameters def run_tests(tests, jobs) : # clean the sandbox from previous commands make('clean', redirects = '> /dev/null 2>&1') # create the executor for our jobs and handle the signal properly pool = setupPool(jobs) # for each test to run try : results = pool.map_async( run_test_worker, tests, chunksize = 1 ).get(7200) except KeyboardInterrupt: pool.terminate() print("Tests interrupted by user") sys.exit(1) # clean the workspace make('clean', redirects = '> /dev/null 2>&1') for failed in results: if failed : return 1 return 0 ################################################################################ # main loop ################################################################################ if __name__ == "__main__": #always run from same folder chdir() # parse the command line arguments options = getOptions() # init global settings settings.init( options ) # fetch the liest of all valid tests allTests = listTests( options.include, options.exclude ) # if user wants all tests than no other treatement of the test list is required if options.all or options.list or options.list_comp or options.include : tests = allTests #otherwise we need to validate that the test list that was entered is valid else : tests = validTests( options ) # sort the test alphabetically for convenience tests.sort(key=lambda t: (t.arch if t.arch else '') + t.target()) # users may want to simply list the tests if options.list_comp : print("-h --help --debug --dry-run --list --arch --all --regenerate-expected -j --jobs ", end='') print(" ".join(map(lambda t: "%s" % (t.target()), tests))) elif options.list : print("Listing for %s:%s"% (settings.arch.string, settings.debug.string)) print("\n".join(map(lambda t: "%s" % (t.toString()), tests))) else : options.jobs, forceJobs = jobCount( options, tests ) settings.updateMakeCmd(forceJobs, options.jobs) print('%s (%s:%s) on %i cores' % ( 'Regenerate tests' if settings.generating else 'Running', settings.arch.string, settings.debug.string, options.jobs )) # otherwise run all tests and make sure to return the correct error code sys.exit( run_tests(tests, options.jobs) )