source: src/tests/test.py@ a2d4d1c

ADT aaron-thesis arm-eh ast-experimental cleanup-dtors deferred_resn demangler enum forall-pointer-decay jacob/cs343-translation jenkins-sandbox new-ast new-ast-unique-expr new-env no_list persistent-indexer pthread-emulation qualifiedEnum resolv-new with_gc
Last change on this file since a2d4d1c was 3eab308c, checked in by Peter A. Buhr <pabuhr@…>, 8 years ago

fix input-file bug

  • Property mode set to 100755
File size: 10.9 KB
RevLine 
[945047e]1#!/usr/bin/python
[efc15918]2from __future__ import print_function
3
[ced2e989]4from functools import partial
5from multiprocessing import Pool
[122cac7]6from os import listdir, environ
[0534c3c]7from os.path import isfile, join, splitext
[c07d724]8from pybin.tools import *
[efc15918]9
10import argparse
[9fcb5e4]11import multiprocessing
[122cac7]12import os
13import re
[9fcb5e4]14import signal
[efc15918]15import sys
16
17################################################################################
18# help functions
19################################################################################
[f1231f2]20
[911348cd]21# Test class that defines what a test is
[f1231f2]22class Test:
23 def __init__(self, name, path):
24 self.name, self.path = name, path
25
[c2d5e28]26class TestResult:
27 SUCCESS = 0
28 FAILURE = 1
29 TIMEOUT = 124
30
[911348cd]31# parses the Makefile to find the machine type (32-bit / 64-bit)
[f1231f2]32def getMachineType():
[2afec66]33 sh('echo "void ?{}(int&a,int b){}int main(){return 0;}" > .dummy.c')
[c07d724]34 ret, out = sh("make .dummy -s", print2stdout=True)
[f803a75]35
[00303d50]36 if ret != 0:
[47f9422]37 print("Failed to identify architecture:")
[00303d50]38 print(out)
[47f9422]39 print("Stopping")
[c07d724]40 rm( (".dummy.c",".dummy") )
[47f9422]41 sys.exit(1)
[86c8fd6]42
[20340c2]43 _, out = sh("file .dummy", print2stdout=False)
[c07d724]44 rm( (".dummy.c",".dummy") )
45
[20340c2]46 return re.search("ELF\s([0-9]+)-bit", out).group(1)
[f1231f2]47
[be65cca]48def listTestsFolder(folder) :
[871b664]49 path = ('./.expect/%s/' % folder) if folder else './.expect/'
50 subpath = "%s/" % folder if folder else ""
[f1231f2]51
[911348cd]52 # tests directly in the .expect folder will always be processed
[871b664]53 return map(lambda fname: Test(fname, subpath + fname),
[be65cca]54 [splitext(f)[0] for f in listdir( path )
[0534c3c]55 if not f.startswith('.') and f.endswith('.txt')
[f1231f2]56 ])
[efc15918]57
[be65cca]58# reads the directory ./.expect and indentifies the tests
59def listTests( concurrent ):
60 machineType = getMachineType()
61
62 # tests directly in the .expect folder will always be processed
[871b664]63 generic_list = listTestsFolder( "" )
[be65cca]64
[911348cd]65 # tests in the machineType folder will be ran only for the corresponding compiler
[be65cca]66 typed_list = listTestsFolder( machineType )
67
68 # tests in the concurrent folder will be ran only if concurrency is enabled
69 concurrent_list = listTestsFolder( "concurrent" ) if concurrent else []
[f1231f2]70
[911348cd]71 # append both lists to get
[be65cca]72 return generic_list + typed_list + concurrent_list;
[efc15918]73
[c07d724]74# from the found tests, filter all the valid tests/desired tests
75def validTests( options ):
76 tests = []
77
78 # if we are regenerating the tests we need to find the information of the
79 # already existing tests and create new info for the new tests
80 if options.regenerate_expected :
81 for testname in options.tests :
82 if testname.endswith( (".c", ".cc", ".cpp") ):
83 print('ERROR: "%s", tests are not allowed to end with a C/C++/CFA extension, ignoring it' % testname, file=sys.stderr)
84 else :
85 found = [test for test in allTests if test.name == testname]
86 tests.append( found[0] if len(found) == 1 else Test(testname, testname) )
87
88 else :
89 # otherwise we only need to validate that all tests are present in the complete list
90 for testname in options.tests:
91 test = [t for t in allTests if t.name == testname]
92
93 if len(test) != 0 :
94 tests.append( test[0] )
95 else :
96 print('ERROR: No expected file for test %s, ignoring it' % testname, file=sys.stderr)
97
98 # make sure we have at least some test to run
99 if len(tests) == 0 :
100 print('ERROR: No valid test to run', file=sys.stderr)
101 sys.exit(1)
102
103 return tests
104
105# parses the option
106def getOptions():
107 # create a parser with the arguments for the tests script
108 parser = argparse.ArgumentParser(description='Script which runs cforall tests')
109 parser.add_argument('--debug', help='Run all tests in debug or release', type=yes_no, default='no')
110 parser.add_argument('--concurrent', help='Run concurrent tests', type=yes_no, default='yes')
111 parser.add_argument('--dry-run', help='Don\'t run the tests, only output the commands', action='store_true')
112 parser.add_argument('--list', help='List all test available', action='store_true')
113 parser.add_argument('--all', help='Run all test available', action='store_true')
114 parser.add_argument('--regenerate-expected', help='Regenerate the .expect by running the specified tets, can be used with --all option', action='store_true')
115 parser.add_argument('-j', '--jobs', help='Number of tests to run simultaneously', type=int, default='8')
116 parser.add_argument('--list-comp', help='List all valide arguments', action='store_true')
117 parser.add_argument('tests', metavar='test', type=str, nargs='*', help='a list of tests to run')
118
119 options = parser.parse_args()
120
121 # script must have at least some tests to run or be listing
122 listing = options.list or options.list_comp
123 all_tests = options.all
124 some_tests = len(options.tests) > 0
125
126 # check that exactly one of the booleans is set to true
127 if not sum( (listing, all_tests, some_tests) ) == 1 :
128 print('ERROR: must have option \'--all\', \'--list\' or non-empty test list', file=sys.stderr)
129 parser.print_help()
130 sys.exit(1)
131
132 return options
133
134def jobCount( options ):
135 # check if the user already passed in a number of jobs for multi-threading
136 make_flags = environ.get('MAKEFLAGS')
137 make_jobs_fds = re.search("--jobserver-(auth|fds)=\s*([0-9]+),([0-9]+)", make_flags) if make_flags else None
138 if make_jobs_fds :
139 tokens = os.read(int(make_jobs_fds.group(2)), 1024)
140 options.jobs = len(tokens)
141 os.write(int(make_jobs_fds.group(3)), tokens)
142 else :
143 options.jobs = multiprocessing.cpu_count()
144
145 # make sure we have a valid number of jobs that corresponds to user input
146 if options.jobs <= 0 :
147 print('ERROR: Invalid number of jobs', file=sys.stderr)
148 sys.exit(1)
149
150 return min( options.jobs, len(tests) ), True if make_flags else False
[122cac7]151
[efc15918]152################################################################################
153# running test functions
154################################################################################
[c07d724]155# logic to run a single test and return the result (No handling of printing or other test framework logic)
[6a1bdfd]156def run_single_test(test, generate, dry_run, debug):
[3c1d702]157
[c07d724]158 # find the output file based on the test name and options flag
159 out_file = (".out/%s.log" % test.name) if not generate else (".expect/%s.txt" % test.path)
160 err_file = ".err/%s.log" % test.name
[efc15918]161
[c07d724]162 # remove any outputs from the previous tests to prevent side effects
[44f44617]163 rm( (out_file, err_file, test.name), dry_run )
[6a1bdfd]164
[c07d724]165 options = "-debug" if debug else "-nodebug"
[efc15918]166
[c07d724]167 # build, skipping to next test on error
[b5f9829]168 make_ret, _ = sh("""%s test=yes DEBUG_FLAGS="%s" %s 2> %s 1> /dev/null""" % (make_cmd, options, test.name, out_file), dry_run)
[efc15918]169
[c2d5e28]170 retcode = 0
171 error = None
172
[c07d724]173 # if the make command succeds continue otherwise skip to diff
174 if make_ret == 0 :
175 # fetch optional input
[3eab308c]176 stdinput = "< .in/%s.txt" % test.name if isfile(".in/%s.txt" % test.name) else ""
[efc15918]177
[c07d724]178 if fileIsExecutable(test.name) :
179 # run test
[c2d5e28]180 retcode, _ = sh("timeout 60 ./%s %s > %s 2>&1" % (test.name, stdinput, out_file), dry_run)
[9fcb5e4]181 else :
[c07d724]182 # simply cat the result into the output
183 sh("cat %s > %s" % (test.name, out_file), dry_run)
[4e9151f]184
[c07d724]185 else :
186 # command failed save the log to less temporary file
187 sh("mv %s %s" % (err_file, out_file), dry_run)
[122cac7]188
[c2d5e28]189 if retcode == 0:
190 if generate :
191 # if we are ounly generating the output we still need to check that the test actually exists
192 if not dry_run and fileContainsOnly(out_file, "make: *** No rule to make target `%s'. Stop." % test.name) :
193 retcode = 1;
194 error = "\t\tNo make target for test %s!" % test.name
195 sh("rm %s" % out_file, False)
196 else :
197 # fetch return code and error from the diff command
198 retcode, error = diff(".expect/%s.txt" % test.path, ".out/%s.log" % test.name, dry_run)
[ac032b5]199
200 else:
201 with open (out_file, "r") as myfile:
202 error = myfile.read()
203
[b5f9829]204
[c07d724]205 # clean the executable
206 sh("rm -f %s > /dev/null 2>&1" % test.name, dry_run)
[efc15918]207
[472ca32]208 return retcode, error
[efc15918]209
[c07d724]210# run a single test and handle the errors, outputs, printing, exception handling, etc.
211def run_test_worker(t, generate, dry_run, debug) :
[ced2e989]212
[9fcb5e4]213 signal.signal(signal.SIGINT, signal.SIG_DFL)
214 # print formated name
215 name_txt = "%20s " % t.name
[ced2e989]216
[c2d5e28]217 retcode, error = run_single_test(t, generate, dry_run, debug)
[0a1a680]218
[9fcb5e4]219 # update output based on current action
220 if generate :
[c2d5e28]221 if retcode == TestResult.SUCCESS: result_txt = "Done"
222 elif retcode == TestResult.TIMEOUT: result_txt = "TIMEOUT"
[b706db1]223 else : result_txt = "ERROR code %d" % retcode
[9fcb5e4]224 else :
[c2d5e28]225 if retcode == TestResult.SUCCESS: result_txt = "PASSED"
226 elif retcode == TestResult.TIMEOUT: result_txt = "TIMEOUT"
[b706db1]227 else : result_txt = "FAILED with code %d" % retcode
[0a1a680]228
[9fcb5e4]229 #print result with error if needed
[c2d5e28]230 text = name_txt + result_txt
[9fcb5e4]231 out = sys.stdout
232 if error :
233 text = text + "\n" + error
234 out = sys.stderr
[be65cca]235
[c2d5e28]236 print(text, file = out)
[9fcb5e4]237 sys.stdout.flush()
238 sys.stderr.flush()
239 signal.signal(signal.SIGINT, signal.SIG_IGN)
240
[c2d5e28]241 return retcode != TestResult.SUCCESS
[ced2e989]242
[911348cd]243# run the given list of tests with the given parameters
[6a1bdfd]244def run_tests(tests, generate, dry_run, jobs, debug) :
[911348cd]245 # clean the sandbox from previous commands
[74358c3]246 sh("%s clean > /dev/null 2>&1" % make_cmd, dry_run)
[911348cd]247
[c07d724]248 # make sure the required folder are present
[4e9151f]249 sh('mkdir -p .out .expect .err', dry_run)
[3c1d702]250
251 if generate :
[ebcd82b]252 print( "Regenerate tests for: " )
[efc15918]253
[c07d724]254 # create the executor for our jobs and handle the signal properly
[9fcb5e4]255 original_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_IGN)
[ced2e989]256 pool = Pool(jobs)
[9fcb5e4]257 signal.signal(signal.SIGINT, original_sigint_handler)
[c07d724]258
259 # for each test to run
[ced2e989]260 try :
[b78275b]261 results = pool.map_async(partial(run_test_worker, generate=generate, dry_run=dry_run, debug=debug), tests, chunksize = 1 ).get(7200)
[ced2e989]262 except KeyboardInterrupt:
263 pool.terminate()
264 print("Tests interrupted by user")
265 sys.exit(1)
[efc15918]266
[c07d724]267 # clean the workspace
[74358c3]268 sh("%s clean > /dev/null 2>&1" % make_cmd, dry_run)
[efc15918]269
[ced2e989]270 for failed in results:
271 if failed :
272 return 1
273
274 return 0
[efc15918]275
[6a1bdfd]276
[efc15918]277################################################################################
278# main loop
279################################################################################
[c07d724]280if __name__ == "__main__":
281 #always run from same folder
[f803a75]282 chdir()
283
[c07d724]284 # parse the command line arguments
285 options = getOptions()
[f1231f2]286
[c07d724]287 # fetch the liest of all valid tests
288 allTests = listTests( options.concurrent )
[f1231f2]289
[c07d724]290 # if user wants all tests than no other treatement of the test list is required
291 if options.all or options.list or options.list_comp :
292 tests = allTests
[0534c3c]293
[c07d724]294 else :
295 #otherwise we need to validate that the test list that was entered is valid
296 tests = validTests( options )
[0534c3c]297
[c07d724]298 # sort the test alphabetically for convenience
299 tests.sort(key=lambda t: t.name)
[f1231f2]300
[c07d724]301 # users may want to simply list the tests
302 if options.list_comp :
303 print("-h --help --debug --concurrent --dry-run --list --all --regenerate-expected -j --jobs ", end='')
304 print(" ".join(map(lambda t: "%s" % (t.name), tests)))
[911348cd]305
[c07d724]306 elif options.list :
307 print("\n".join(map(lambda t: "%s (%s)" % (t.name, t.path), tests)))
[911348cd]308
[b98c913]309 else :
[c07d724]310 options.jobs, forceJobs = jobCount( options )
[b98c913]311
[c07d724]312 print('Running (%s) on %i cores' % ("debug" if options.debug else "no debug", options.jobs))
313 make_cmd = "make" if forceJobs else ("make -j%i" % options.jobs)
[efc15918]314
[c07d724]315 # otherwise run all tests and make sure to return the correct error code
316 sys.exit( run_tests(tests, options.regenerate_expected, options.dry_run, options.jobs, options.debug) )
Note: See TracBrowser for help on using the repository browser.