#!/usr/bin/python from __future__ import print_function from functools import partial from multiprocessing import Pool from os import listdir, environ from os.path import isfile, join, splitext from subprocess import Popen, PIPE, STDOUT import argparse import os import re import stat import sys ################################################################################ # help functions ################################################################################ # Test class that defines what a test is class Test: def __init__(self, name, path): self.name, self.path = name, path # parses the Makefile to find the machine type (32-bit / 64-bit) def getMachineType(): sh('echo "int main() { return 0; }" > .dummy.c') sh("make .dummy", print2stdout=False) _, out = sh("file .dummy", print2stdout=False) sh("rm -f .dummy.c > /dev/null 2>&1") sh("rm -f .dummy > /dev/null 2>&1") return re.search("ELF\s([0-9]+)-bit", out).group(1) # reads the directory ./.expect and indentifies the tests def listTests(): machineType = getMachineType() # tests directly in the .expect folder will always be processed generic_list = map(lambda fname: Test(fname, fname), [splitext(f)[0] for f in listdir('./.expect') if not f.startswith('.') and f.endswith('.txt') ]) # tests in the machineType folder will be ran only for the corresponding compiler typed_list = map(lambda fname: Test( fname, "%s/%s" % (machineType, fname) ), [splitext(f)[0] for f in listdir("./.expect/%s" % machineType) if not f.startswith('.') and f.endswith('.txt') ]) # append both lists to get return generic_list + typed_list # helper functions to run terminal commands def sh(cmd, dry_run = False, print2stdout = True): if dry_run : # if this is a dry_run, only print the commands that would be ran print("cmd: %s" % cmd) return 0, None else : # otherwise create a pipe and run the desired command proc = Popen(cmd, stdout=None if print2stdout else PIPE, stderr=STDOUT, shell=True) out, err = proc.communicate() return proc.returncode, out # helper function to replace patterns in a file def file_replace(fname, pat, s_after): # first, see if the pattern is even in the file. with open(fname) as f: if not any(re.search(pat, line) for line in f): return # pattern does not occur in file so we are done. # pattern is in the file, so perform replace operation. with open(fname) as f: out_fname = fname + ".tmp" out = open(out_fname, "w") for line in f: out.write(re.sub(pat, s_after, line)) out.close() os.rename(out_fname, fname) # tests output may differ depending on the depth of the makefile def fix_MakeLevel(file) : if environ.get('MAKELEVEL') : file_replace(file, "make\[%i\]" % int(environ.get('MAKELEVEL')), 'make' ) # helper function to check if a files contains only a spacific string def fileContainsOnly(file, text) : with open(file) as f: ff = f.read().strip() result = ff == text.strip() return result; # check whether or not a file is executable def fileIsExecutable(file) : try : fileinfo = os.stat(file) return bool(fileinfo.st_mode & stat.S_IXUSR) except Exception as inst: print(type(inst)) # the exception instance print(inst.args) # arguments stored in .args print(inst) return False ################################################################################ # running test functions ################################################################################ def run_single_test(test, generate, dry_run): # find the output file based on the test name and options flag out_file = (".out/%s.log" % test.name) if not generate else (".expect/%s.txt" % test.path) # remove any outputs from the previous tests to prevent side effects sh("rm -f %s" % out_file, dry_run) sh("rm -f %s > /dev/null 2>&1" % test.name, dry_run) # build, skipping to next test on error make_ret, _ = sh("%s %s 2> %s 1> /dev/null" % (make_cmd, test.name, out_file), dry_run) # if the make command succeds continue otherwise skip to diff if make_ret == 0 : # fetch optional input stdinput = "< .in/%s.txt" % test.name if isfile(".in/%s.txt" % test.path) else "" if fileIsExecutable(test.name) : # run test sh("./%s %s > %s 2>&1" % (test.name, stdinput, out_file), dry_run) else : # simply cat the result into the output sh("cat %s > %s" % (test.name, out_file), dry_run) retcode = 0 error = None # fix output to prevent make depth to cause issues fix_MakeLevel(out_file) if generate : # if we are ounly generating the output we still need to check that the test actually exists if not dry_run and fileContainsOnly(out_file, "make: *** No rule to make target `%s'. Stop." % test.name) : retcode = 1; error = "\t\tNo make target for test %s!" % test.name sh("rm %s" % out_file, False) else : # diff the output of the files diff_cmd = ("diff --old-group-format='\t\tmissing lines :\n" "%%<' \\\n" "--new-group-format='\t\tnew lines :\n" "%%>' \\\n" "--unchanged-group-format='%%=' \\" "--changed-group-format='\t\texpected :\n" "%%<\n" "\t\tgot :\n" "%%>' \\\n" "--new-line-format='\t\t%%dn\t%%L' \\\n" "--old-line-format='\t\t%%dn\t%%L' \\\n" "--unchanged-line-format='' \\\n" ".expect/%s.txt .out/%s.log") # fetch return code and error from the diff command retcode, error = sh(diff_cmd % (test.path, test.name), dry_run, False) # clean the executable sh("rm -f %s > /dev/null 2>&1" % test.name, dry_run) return retcode, error def run_test_instance(t, generate, dry_run) : # print formated name name_txt = "%20s " % t.name #run the test instance and collect the result test_failed, error = run_single_test(t, generate, dry_run) # update output based on current action if generate : failed_txt = "ERROR" success_txt = "Done" else : failed_txt = "FAILED" success_txt = "PASSED" #print result with error if needed text = name_txt + (failed_txt if test_failed else success_txt) out = sys.stdout if error : text = text + "\n" + error out = sys.stderr print(text, file = out); sys.stdout.flush() sys.stderr.flush() return test_failed # run the given list of tests with the given parameters def run_tests(tests, generate, dry_run, jobs) : # clean the sandbox from previous commands sh("%s clean > /dev/null 2>&1" % make_cmd, dry_run) #make sure the required folder are present sh('mkdir -p .out .expect', dry_run) if generate : print( "Regenerate tests for: " ) # for each test to run pool = Pool(jobs) try : results = pool.map_async(partial(run_test_instance, generate=generate, dry_run=dry_run), tests ).get(99999999) except KeyboardInterrupt: pool.terminate() print("Tests interrupted by user") sys.exit(1) #clean the workspace sh("%s clean > /dev/null 2>&1" % make_cmd, dry_run) for failed in results: if failed : return 1 return 0 ################################################################################ # main loop ################################################################################ # create a parser with the arguments for the tests script parser = argparse.ArgumentParser(description='Script which runs cforall tests') parser.add_argument('--dry-run', help='Don\'t run the tests, only output the commands', action='store_true') parser.add_argument('--list', help='List all test available', action='store_true') parser.add_argument('--all', help='Run all test available', action='store_true') parser.add_argument('--regenerate-expected', help='Regenerate the .expect by running the specified tets, can be used with --all option', action='store_true') parser.add_argument('-j', '--jobs', help='Number of tests to run simultaneously', type=int, default='8') parser.add_argument('tests', metavar='test', type=str, nargs='*', help='a list of tests to run') # parse the command line arguments options = parser.parse_args() # script must have at least some tests to run if (len(options.tests) > 0 and options.all and not options.list) \ or (len(options.tests) == 0 and not options.all and not options.list) : print('ERROR: must have option \'--all\' or non-empty test list', file=sys.stderr) parser.print_help() sys.exit(1) # fetch the liest of all valid tests allTests = listTests() # if user wants all tests than no other treatement of the test list is required if options.all or options.list : tests = allTests else : #otherwise we need to validate that the test list that was entered is valid tests = [] # if we are regenerating the tests we need to find the information of the # already existing tests and create new info for the new tests if options.regenerate_expected : for testname in options.tests : if testname.endswith(".c") or testname.endswith(".cc") or testname.endswith(".cpp") : print('ERROR: "%s", tests are not allowed to end with a C/C++/CFA extension, ignoring it' % testname, file=sys.stderr) else : found = [test for test in allTests if test.name == testname] tests.append( found[0] if len(found) == 1 else Test(testname, testname) ) else : # otherwise we only need to validate that all tests are present in the complete list for testname in options.tests: test = [t for t in allTests if t.name == testname] if len(test) != 0 : tests.append( test[0] ) else : print('ERROR: No expected file for test %s, ignoring it' % testname, file=sys.stderr) # make sure we have at least some test to run if len(tests) == 0 : print('ERROR: No valid test to run', file=sys.stderr) sys.exit(1) # sort the test alphabetically for convenience tests.sort(key=lambda t: t.name) # check if the user already passed in a number of jobs for multi-threading make_flags = environ.get('MAKEFLAGS') make_jobs_fds = re.search("--jobserver-fds=\s*([0-9]+),([0-9]+)", make_flags) if make_flags else None if make_jobs_fds : tokens = os.read(int(make_jobs_fds.group(1)), 1024) options.jobs = len(tokens) os.write(int(make_jobs_fds.group(2)), tokens) # make sure we have a valid number of jobs that corresponds to user input if options.jobs <= 0 : print('ERROR: Invalid number of jobs', file=sys.stderr) sys.exit(1) print('Running on %i cores' % options.jobs) make_cmd = "make" if make_flags else ("make -j%i" % options.jobs) # users may want to simply list the tests if options.list : print("\n".join(map(lambda t: "%s (%s)" % (t.name, t.path), tests))) else : # otherwise run all tests and make sure to return the correct error code sys.exit( run_tests(tests, options.regenerate_expected, options.dry_run, options.jobs) )