source: tests/test.py@ 8dbfb7e

ADT arm-eh ast-experimental cleanup-dtors enum forall-pointer-decay jacob/cs343-translation jenkins-sandbox new-ast new-ast-unique-expr pthread-emulation qualifiedEnum
Last change on this file since 8dbfb7e was 1bb2488, checked in by tdelisle <tdelisle@…>, 7 years ago

No longer need to use popen and signal handling in test.py

  • Property mode set to 100755
File size: 9.8 KB
Line 
1#!/usr/bin/python3
2
3from pybin.tools import *
4from pybin.test_run import *
5from pybin import settings
6
7import argparse
8import re
9import sys
10import time
11
12################################################################################
13# help functions
14################################################################################
15
16def find_tests():
17 expected = []
18
19 def match_test(path):
20 match = re.search("^%s\/([\w\/\-_]*).expect\/([\w\-_]+)(\.[\w\-_]+)?\.txt$" % settings.SRCDIR, path)
21 if match :
22 test = Test()
23 test.name = match.group(2)
24 test.path = match.group(1)
25 test.arch = match.group(3)[1:] if match.group(3) else None
26 if settings.arch.match(test.arch):
27 expected.append(test)
28
29 path_walk( match_test )
30
31 return expected
32
33# reads the directory ./.expect and indentifies the tests
34def list_tests( includes, excludes ):
35 # tests directly in the .expect folder will always be processed
36 test_list = find_tests()
37
38 # if we have a limited number of includes, filter by them
39 if includes:
40 test_list = [x for x in test_list if
41 x.target().startswith( tuple(includes) )
42 ]
43
44 # # if we have a folders to excludes, filter by them
45 if excludes:
46 test_list = [x for x in test_list if not
47 x.target().startswith( tuple(excludes) )
48 ]
49
50 return test_list
51
52# from the found tests, filter all the valid tests/desired tests
53def valid_tests( options ):
54 tests = []
55
56 # if we are regenerating the tests we need to find the information of the
57 # already existing tests and create new info for the new tests
58 if options.regenerate_expected :
59 for testname in options.tests :
60 testname = canonical_path( testname )
61 if Test.valid_name(testname):
62 found = [test for test in all_tests if canonical_path( test.target() ) == testname]
63 tests.append( found[0] if len(found) == 1 else Test.from_target(testname) )
64 else :
65 print('ERROR: "%s", tests are not allowed to end with a C/C++/CFA extension, ignoring it' % testname, file=sys.stderr)
66
67 else :
68 # otherwise we only need to validate that all tests are present in the complete list
69 for testname in options.tests:
70 test = [t for t in all_tests if path_cmp( t.target(), testname )]
71
72 if test :
73 tests.append( test[0] )
74 else :
75 print('ERROR: No expected file for test %s, ignoring it' % testname, file=sys.stderr)
76
77 return tests
78
79# parses the option
80def parse_args():
81 # create a parser with the arguments for the tests script
82 parser = argparse.ArgumentParser(description='Script which runs cforall tests')
83 parser.add_argument('--debug', help='Run all tests in debug or release', type=yes_no, default='yes')
84 parser.add_argument('--install', help='Run all tests based on installed binaries or tree binaries', type=yes_no, default='no')
85 parser.add_argument('--arch', help='Test for specific architecture', type=str, default='')
86 parser.add_argument('--timeout', help='Maximum duration in seconds after a single test is considered to have timed out', type=int, default=60)
87 parser.add_argument('--global-timeout', help='Maximum cumulative duration in seconds after the ALL tests are considered to have timed out', type=int, default=7200)
88 parser.add_argument('--dry-run', help='Don\'t run the tests, only output the commands', action='store_true')
89 parser.add_argument('--list', help='List all test available', action='store_true')
90 parser.add_argument('--all', help='Run all test available', action='store_true')
91 parser.add_argument('--regenerate-expected', help='Regenerate the .expect by running the specified tets, can be used with --all option', action='store_true')
92 parser.add_argument('-j', '--jobs', help='Number of tests to run simultaneously', type=int)
93 parser.add_argument('--list-comp', help='List all valide arguments', action='store_true')
94 parser.add_argument('-I','--include', help='Directory of test to include, can be used multiple time, All if omitted', action='append')
95 parser.add_argument('-E','--exclude', help='Directory of test to exclude, can be used multiple time, None if omitted', action='append')
96 parser.add_argument('tests', metavar='test', type=str, nargs='*', help='a list of tests to run')
97
98 try:
99 options = parser.parse_args()
100 except:
101 print('ERROR: invalid arguments', file=sys.stderr)
102 parser.print_help(sys.stderr)
103 sys.exit(1)
104
105 # script must have at least some tests to run or be listing
106 listing = options.list or options.list_comp
107 all_tests = options.all
108 some_tests = len(options.tests) > 0
109 some_dirs = len(options.include) > 0 if options.include else 0
110
111 # check that exactly one of the booleans is set to true
112 if not sum( (listing, all_tests, some_tests, some_dirs) ) > 0 :
113 print('''ERROR: must have option '--all', '--list', '--include', '-I' or non-empty test list''', file=sys.stderr)
114 parser.print_help()
115 sys.exit(1)
116
117 return options
118
119################################################################################
120# running test functions
121################################################################################
122def success(val):
123 return val == 0 or settings.dry_run
124
125def no_rule(file, target):
126 return not settings.dry_run and file_contains_only(file, "make: *** No rule to make target `%s'. Stop." % target)
127
128# logic to run a single test and return the result (No handling of printing or other test framework logic)
129def run_single_test(test):
130
131 # find the output file based on the test name and options flag
132 exe_file = test.target_executable();
133 out_file = test.target_output()
134 err_file = test.error_log()
135 cmp_file = test.expect()
136 in_file = test.input()
137
138 # prepare the proper directories
139 test.prepare()
140
141 # build, skipping to next test on error
142 with Timed() as comp_dur:
143 make_ret, _ = make( test.target(), redirects = ("2> %s 1> /dev/null" % out_file), error_file = err_file )
144
145 # if the make command succeds continue otherwise skip to diff
146 run_dur = None
147 if success(make_ret):
148 with Timed() as run_dur:
149 if settings.dry_run or is_exe(exe_file):
150 # run test
151 retcode, _ = sh(exe_file, output=out_file, input=in_file, timeout=True)
152 else :
153 # simply cat the result into the output
154 retcode = cat(exe_file, out_file)
155 else:
156 retcode = mv(err_file, out_file)
157
158 if success(retcode):
159 if settings.generating :
160 # if we are ounly generating the output we still need to check that the test actually exists
161 if no_rule(out_file, test.target()) :
162 retcode = 1
163 error = "\t\tNo make target for test %s!" % test.target()
164 rm(out_file)
165 else:
166 error = None
167 else :
168 # fetch return code and error from the diff command
169 retcode, error = diff(cmp_file, out_file)
170
171 else:
172 with open (out_file, "r") as myfile:
173 error = myfile.read()
174
175 ret, info = core_info(exe_file)
176 error = error + info
177
178
179
180 # clean the executable
181 rm(exe_file)
182
183 return retcode, error, [comp_dur.duration, run_dur.duration if run_dur else None]
184
185# run a single test and handle the errors, outputs, printing, exception handling, etc.
186def run_test_worker(t) :
187 try :
188 # print formated name
189 name_txt = '{0:{width}} '.format(t.target(), width=settings.output_width)
190
191 retcode, error, duration = run_single_test(t)
192
193 # update output based on current action
194 result_txt = TestResult.toString( retcode, duration )
195
196 #print result with error if needed
197 text = name_txt + result_txt
198 out = sys.stdout
199 if error :
200 text = text + "\n" + error
201 out = sys.stderr
202
203 print(text, file = out)
204 sys.stdout.flush()
205 sys.stderr.flush()
206
207 return retcode != TestResult.SUCCESS
208 except KeyboardInterrupt:
209 False
210
211# run the given list of tests with the given parameters
212def run_tests(tests, jobs) :
213 # clean the sandbox from previous commands
214 make('clean', redirects = '> /dev/null 2>&1')
215
216 # create the executor for our jobs and handle the signal properly
217 pool = multiprocessing.Pool(jobs)
218
219 # for each test to run
220 try :
221 results = pool.map_async(
222 run_test_worker,
223 tests,
224 chunksize = 1
225 ).get(settings.timeout.total)
226 except KeyboardInterrupt:
227 pool.terminate()
228 print("Tests interrupted by user")
229 sys.exit(1)
230
231 # clean the workspace
232 make('clean', redirects = '> /dev/null 2>&1')
233
234 for failed in results:
235 if failed :
236 return 1
237
238 return 0
239
240
241################################################################################
242# main loop
243################################################################################
244if __name__ == "__main__":
245
246 # parse the command line arguments
247 options = parse_args()
248
249 # init global settings
250 settings.init( options )
251
252 # fetch the liest of all valid tests
253 all_tests = list_tests( options.include, options.exclude )
254
255
256 # if user wants all tests than no other treatement of the test list is required
257 if options.all or options.list or options.list_comp or options.include :
258 tests = all_tests
259
260 #otherwise we need to validate that the test list that was entered is valid
261 else :
262 tests = valid_tests( options )
263
264 # make sure we have at least some test to run
265 if not tests :
266 print('ERROR: No valid test to run', file=sys.stderr)
267 sys.exit(1)
268
269
270 # sort the test alphabetically for convenience
271 tests.sort(key=lambda t: (t.arch if t.arch else '') + t.target())
272
273 # users may want to simply list the tests
274 if options.list_comp :
275 print("-h --help --debug --dry-run --list --arch --all --regenerate-expected --install --timeout --global-timeout -j --jobs ", end='')
276 print(" ".join(map(lambda t: "%s" % (t.target()), tests)))
277
278 elif options.list :
279 print("Listing for %s:%s"% (settings.arch.string, settings.debug.string))
280 fancy_print("\n".join(map(lambda t: t.toString(), tests)))
281
282 else :
283 # check the build configuration works
284 settings.prep_output(tests)
285 settings.validate()
286
287 options.jobs, forceJobs = job_count( options, tests )
288 settings.update_make_cmd(forceJobs, options.jobs)
289
290 print('%s (%s:%s) on %i cores' % (
291 'Regenerate tests' if settings.generating else 'Running',
292 settings.arch.string,
293 settings.debug.string,
294 options.jobs
295 ))
296
297 # otherwise run all tests and make sure to return the correct error code
298 sys.exit( run_tests(tests, options.jobs) )
Note: See TracBrowser for help on using the repository browser.