1 | #!/usr/bin/python |
---|
2 | from __future__ import print_function |
---|
3 | |
---|
4 | from functools import partial |
---|
5 | from multiprocessing import Pool |
---|
6 | from os import listdir, environ |
---|
7 | from os.path import isfile, join, splitext |
---|
8 | from pybin.tools import * |
---|
9 | |
---|
10 | import argparse |
---|
11 | import multiprocessing |
---|
12 | import os |
---|
13 | import re |
---|
14 | import signal |
---|
15 | import sys |
---|
16 | |
---|
17 | ################################################################################ |
---|
18 | # help functions |
---|
19 | ################################################################################ |
---|
20 | |
---|
21 | # Test class that defines what a test is |
---|
22 | class Test: |
---|
23 | def __init__(self, name, path): |
---|
24 | self.name, self.path = name, path |
---|
25 | |
---|
26 | class TestResult: |
---|
27 | SUCCESS = 0 |
---|
28 | FAILURE = 1 |
---|
29 | TIMEOUT = 124 |
---|
30 | |
---|
31 | # parses the Makefile to find the machine type (32-bit / 64-bit) |
---|
32 | def getMachineType(): |
---|
33 | sh('echo "void ?{}(int&a,int b){}int main(){return 0;}" > .dummy.c') |
---|
34 | ret, out = sh("make .dummy -s", print2stdout=True) |
---|
35 | |
---|
36 | if ret != 0: |
---|
37 | print("Failed to identify architecture:") |
---|
38 | print(out) |
---|
39 | print("Stopping") |
---|
40 | rm( (".dummy.c",".dummy") ) |
---|
41 | sys.exit(1) |
---|
42 | |
---|
43 | _, out = sh("file .dummy", print2stdout=False) |
---|
44 | rm( (".dummy.c",".dummy") ) |
---|
45 | |
---|
46 | return re.search("ELF\s([0-9]+)-bit", out).group(1) |
---|
47 | |
---|
48 | def listTestsFolder(folder) : |
---|
49 | path = ('./.expect/%s/' % folder) if folder else './.expect/' |
---|
50 | subpath = "%s/" % folder if folder else "" |
---|
51 | |
---|
52 | # tests directly in the .expect folder will always be processed |
---|
53 | return map(lambda fname: Test(fname, subpath + fname), |
---|
54 | [splitext(f)[0] for f in listdir( path ) |
---|
55 | if not f.startswith('.') and f.endswith('.txt') |
---|
56 | ]) |
---|
57 | |
---|
58 | # reads the directory ./.expect and indentifies the tests |
---|
59 | def listTests( concurrent ): |
---|
60 | machineType = getMachineType() |
---|
61 | |
---|
62 | # tests directly in the .expect folder will always be processed |
---|
63 | generic_list = listTestsFolder( "" ) |
---|
64 | |
---|
65 | # tests in the machineType folder will be ran only for the corresponding compiler |
---|
66 | typed_list = listTestsFolder( machineType ) |
---|
67 | |
---|
68 | # tests in the concurrent folder will be ran only if concurrency is enabled |
---|
69 | concurrent_list = listTestsFolder( "concurrent" ) if concurrent else [] |
---|
70 | |
---|
71 | # append both lists to get |
---|
72 | return generic_list + typed_list + concurrent_list; |
---|
73 | |
---|
74 | # from the found tests, filter all the valid tests/desired tests |
---|
75 | def validTests( options ): |
---|
76 | tests = [] |
---|
77 | |
---|
78 | # if we are regenerating the tests we need to find the information of the |
---|
79 | # already existing tests and create new info for the new tests |
---|
80 | if options.regenerate_expected : |
---|
81 | for testname in options.tests : |
---|
82 | if testname.endswith( (".c", ".cc", ".cpp") ): |
---|
83 | print('ERROR: "%s", tests are not allowed to end with a C/C++/CFA extension, ignoring it' % testname, file=sys.stderr) |
---|
84 | else : |
---|
85 | found = [test for test in allTests if test.name == testname] |
---|
86 | tests.append( found[0] if len(found) == 1 else Test(testname, testname) ) |
---|
87 | |
---|
88 | else : |
---|
89 | # otherwise we only need to validate that all tests are present in the complete list |
---|
90 | for testname in options.tests: |
---|
91 | test = [t for t in allTests if t.name == testname] |
---|
92 | |
---|
93 | if len(test) != 0 : |
---|
94 | tests.append( test[0] ) |
---|
95 | else : |
---|
96 | print('ERROR: No expected file for test %s, ignoring it' % testname, file=sys.stderr) |
---|
97 | |
---|
98 | # make sure we have at least some test to run |
---|
99 | if len(tests) == 0 : |
---|
100 | print('ERROR: No valid test to run', file=sys.stderr) |
---|
101 | sys.exit(1) |
---|
102 | |
---|
103 | return tests |
---|
104 | |
---|
105 | # parses the option |
---|
106 | def getOptions(): |
---|
107 | # create a parser with the arguments for the tests script |
---|
108 | parser = argparse.ArgumentParser(description='Script which runs cforall tests') |
---|
109 | parser.add_argument('--debug', help='Run all tests in debug or release', type=yes_no, default='no') |
---|
110 | parser.add_argument('--concurrent', help='Run concurrent tests', type=yes_no, default='yes') |
---|
111 | parser.add_argument('--dry-run', help='Don\'t run the tests, only output the commands', action='store_true') |
---|
112 | parser.add_argument('--list', help='List all test available', action='store_true') |
---|
113 | parser.add_argument('--all', help='Run all test available', action='store_true') |
---|
114 | parser.add_argument('--regenerate-expected', help='Regenerate the .expect by running the specified tets, can be used with --all option', action='store_true') |
---|
115 | parser.add_argument('-j', '--jobs', help='Number of tests to run simultaneously', type=int, default='8') |
---|
116 | parser.add_argument('--list-comp', help='List all valide arguments', action='store_true') |
---|
117 | parser.add_argument('tests', metavar='test', type=str, nargs='*', help='a list of tests to run') |
---|
118 | |
---|
119 | options = parser.parse_args() |
---|
120 | |
---|
121 | # script must have at least some tests to run or be listing |
---|
122 | listing = options.list or options.list_comp |
---|
123 | all_tests = options.all |
---|
124 | some_tests = len(options.tests) > 0 |
---|
125 | |
---|
126 | # check that exactly one of the booleans is set to true |
---|
127 | if not sum( (listing, all_tests, some_tests) ) == 1 : |
---|
128 | print('ERROR: must have option \'--all\', \'--list\' or non-empty test list', file=sys.stderr) |
---|
129 | parser.print_help() |
---|
130 | sys.exit(1) |
---|
131 | |
---|
132 | return options |
---|
133 | |
---|
134 | def jobCount( options ): |
---|
135 | # check if the user already passed in a number of jobs for multi-threading |
---|
136 | make_flags = environ.get('MAKEFLAGS') |
---|
137 | make_jobs_fds = re.search("--jobserver-(auth|fds)=\s*([0-9]+),([0-9]+)", make_flags) if make_flags else None |
---|
138 | if make_jobs_fds : |
---|
139 | tokens = os.read(int(make_jobs_fds.group(2)), 1024) |
---|
140 | options.jobs = len(tokens) |
---|
141 | os.write(int(make_jobs_fds.group(3)), tokens) |
---|
142 | else : |
---|
143 | options.jobs = multiprocessing.cpu_count() |
---|
144 | |
---|
145 | # make sure we have a valid number of jobs that corresponds to user input |
---|
146 | if options.jobs <= 0 : |
---|
147 | print('ERROR: Invalid number of jobs', file=sys.stderr) |
---|
148 | sys.exit(1) |
---|
149 | |
---|
150 | return min( options.jobs, len(tests) ), True if make_flags else False |
---|
151 | |
---|
152 | ################################################################################ |
---|
153 | # running test functions |
---|
154 | ################################################################################ |
---|
155 | # logic to run a single test and return the result (No handling of printing or other test framework logic) |
---|
156 | def run_single_test(test, generate, dry_run, debug): |
---|
157 | |
---|
158 | # find the output file based on the test name and options flag |
---|
159 | out_file = (".out/%s.log" % test.name) if not generate else (".expect/%s.txt" % test.path) |
---|
160 | err_file = ".err/%s.log" % test.name |
---|
161 | |
---|
162 | # remove any outputs from the previous tests to prevent side effects |
---|
163 | rm( (out_file, err_file, test.name), dry_run ) |
---|
164 | |
---|
165 | options = "-debug" if debug else "-nodebug" |
---|
166 | |
---|
167 | # build, skipping to next test on error |
---|
168 | make_ret, _ = sh("""%s test=yes DEBUG_FLAGS="%s" %s 2> %s 1> /dev/null""" % (make_cmd, options, test.name, out_file), dry_run) |
---|
169 | |
---|
170 | retcode = 0 |
---|
171 | error = None |
---|
172 | |
---|
173 | # if the make command succeds continue otherwise skip to diff |
---|
174 | if make_ret == 0 : |
---|
175 | # fetch optional input |
---|
176 | stdinput = "< .in/%s.txt" % test.name if isfile(".in/%s.txt" % test.name) else "" |
---|
177 | |
---|
178 | if fileIsExecutable(test.name) : |
---|
179 | # run test |
---|
180 | retcode, _ = sh("timeout 60 ./%s %s > %s 2>&1" % (test.name, stdinput, out_file), dry_run) |
---|
181 | else : |
---|
182 | # simply cat the result into the output |
---|
183 | sh("cat %s > %s" % (test.name, out_file), dry_run) |
---|
184 | |
---|
185 | else : |
---|
186 | # command failed save the log to less temporary file |
---|
187 | sh("mv %s %s" % (err_file, out_file), dry_run) |
---|
188 | |
---|
189 | if retcode == 0: |
---|
190 | if generate : |
---|
191 | # if we are ounly generating the output we still need to check that the test actually exists |
---|
192 | if not dry_run and fileContainsOnly(out_file, "make: *** No rule to make target `%s'. Stop." % test.name) : |
---|
193 | retcode = 1; |
---|
194 | error = "\t\tNo make target for test %s!" % test.name |
---|
195 | sh("rm %s" % out_file, False) |
---|
196 | else : |
---|
197 | # fetch return code and error from the diff command |
---|
198 | retcode, error = diff(".expect/%s.txt" % test.path, ".out/%s.log" % test.name, dry_run) |
---|
199 | |
---|
200 | else: |
---|
201 | with open (out_file, "r") as myfile: |
---|
202 | error = myfile.read() |
---|
203 | |
---|
204 | |
---|
205 | # clean the executable |
---|
206 | sh("rm -f %s > /dev/null 2>&1" % test.name, dry_run) |
---|
207 | |
---|
208 | return retcode, error |
---|
209 | |
---|
210 | # run a single test and handle the errors, outputs, printing, exception handling, etc. |
---|
211 | def run_test_worker(t, generate, dry_run, debug) : |
---|
212 | |
---|
213 | signal.signal(signal.SIGINT, signal.SIG_DFL) |
---|
214 | # print formated name |
---|
215 | name_txt = "%20s " % t.name |
---|
216 | |
---|
217 | retcode, error = run_single_test(t, generate, dry_run, debug) |
---|
218 | |
---|
219 | # update output based on current action |
---|
220 | if generate : |
---|
221 | if retcode == TestResult.SUCCESS: result_txt = "Done" |
---|
222 | elif retcode == TestResult.TIMEOUT: result_txt = "TIMEOUT" |
---|
223 | else : result_txt = "ERROR code %d" % retcode |
---|
224 | else : |
---|
225 | if retcode == TestResult.SUCCESS: result_txt = "PASSED" |
---|
226 | elif retcode == TestResult.TIMEOUT: result_txt = "TIMEOUT" |
---|
227 | else : result_txt = "FAILED with code %d" % retcode |
---|
228 | |
---|
229 | #print result with error if needed |
---|
230 | text = name_txt + result_txt |
---|
231 | out = sys.stdout |
---|
232 | if error : |
---|
233 | text = text + "\n" + error |
---|
234 | out = sys.stderr |
---|
235 | |
---|
236 | print(text, file = out) |
---|
237 | sys.stdout.flush() |
---|
238 | sys.stderr.flush() |
---|
239 | signal.signal(signal.SIGINT, signal.SIG_IGN) |
---|
240 | |
---|
241 | return retcode != TestResult.SUCCESS |
---|
242 | |
---|
243 | # run the given list of tests with the given parameters |
---|
244 | def run_tests(tests, generate, dry_run, jobs, debug) : |
---|
245 | # clean the sandbox from previous commands |
---|
246 | sh("%s clean > /dev/null 2>&1" % make_cmd, dry_run) |
---|
247 | |
---|
248 | # make sure the required folder are present |
---|
249 | sh('mkdir -p .out .expect .err', dry_run) |
---|
250 | |
---|
251 | if generate : |
---|
252 | print( "Regenerate tests for: " ) |
---|
253 | |
---|
254 | # create the executor for our jobs and handle the signal properly |
---|
255 | original_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_IGN) |
---|
256 | pool = Pool(jobs) |
---|
257 | signal.signal(signal.SIGINT, original_sigint_handler) |
---|
258 | |
---|
259 | # for each test to run |
---|
260 | try : |
---|
261 | results = pool.map_async(partial(run_test_worker, generate=generate, dry_run=dry_run, debug=debug), tests, chunksize = 1 ).get(7200) |
---|
262 | except KeyboardInterrupt: |
---|
263 | pool.terminate() |
---|
264 | print("Tests interrupted by user") |
---|
265 | sys.exit(1) |
---|
266 | |
---|
267 | # clean the workspace |
---|
268 | sh("%s clean > /dev/null 2>&1" % make_cmd, dry_run) |
---|
269 | |
---|
270 | for failed in results: |
---|
271 | if failed : |
---|
272 | return 1 |
---|
273 | |
---|
274 | return 0 |
---|
275 | |
---|
276 | |
---|
277 | ################################################################################ |
---|
278 | # main loop |
---|
279 | ################################################################################ |
---|
280 | if __name__ == "__main__": |
---|
281 | #always run from same folder |
---|
282 | chdir() |
---|
283 | |
---|
284 | # parse the command line arguments |
---|
285 | options = getOptions() |
---|
286 | |
---|
287 | # fetch the liest of all valid tests |
---|
288 | allTests = listTests( options.concurrent ) |
---|
289 | |
---|
290 | # if user wants all tests than no other treatement of the test list is required |
---|
291 | if options.all or options.list or options.list_comp : |
---|
292 | tests = allTests |
---|
293 | |
---|
294 | else : |
---|
295 | #otherwise we need to validate that the test list that was entered is valid |
---|
296 | tests = validTests( options ) |
---|
297 | |
---|
298 | # sort the test alphabetically for convenience |
---|
299 | tests.sort(key=lambda t: t.name) |
---|
300 | |
---|
301 | # users may want to simply list the tests |
---|
302 | if options.list_comp : |
---|
303 | print("-h --help --debug --concurrent --dry-run --list --all --regenerate-expected -j --jobs ", end='') |
---|
304 | print(" ".join(map(lambda t: "%s" % (t.name), tests))) |
---|
305 | |
---|
306 | elif options.list : |
---|
307 | print("\n".join(map(lambda t: "%s (%s)" % (t.name, t.path), tests))) |
---|
308 | |
---|
309 | else : |
---|
310 | options.jobs, forceJobs = jobCount( options ) |
---|
311 | |
---|
312 | print('Running (%s) on %i cores' % ("debug" if options.debug else "no debug", options.jobs)) |
---|
313 | make_cmd = "make" if forceJobs else ("make -j%i" % options.jobs) |
---|
314 | |
---|
315 | # otherwise run all tests and make sure to return the correct error code |
---|
316 | sys.exit( run_tests(tests, options.regenerate_expected, options.dry_run, options.jobs, options.debug) ) |
---|