source: src/tests/test.py @ 240e1ff

ADTaaron-thesisarm-ehast-experimentalcleanup-dtorsdeferred_resndemanglerenumforall-pointer-decayjacob/cs343-translationjenkins-sandboxnew-astnew-ast-unique-exprnew-envno_listpersistent-indexerpthread-emulationqualifiedEnumresolv-newwith_gc
Last change on this file since 240e1ff was c07d724, checked in by Thierry Delisle <tdelisle@…>, 7 years ago

Cleaned-up test script, notably by creating a pybin folder and a tools script inside it

  • Property mode set to 100755
File size: 10.5 KB
Line 
1#!/usr/bin/python
2from __future__ import print_function
3
4from functools import partial
5from multiprocessing import Pool
6from os import listdir, environ
7from os.path import isfile, join, splitext
8from pybin.tools import *
9
10import argparse
11import multiprocessing
12import os
13import re
14import signal
15import sys
16
17################################################################################
18#               help functions
19################################################################################
20
21# Test class that defines what a test is
22class Test:
23    def __init__(self, name, path):
24        self.name, self.path = name, path
25
26# parses the Makefile to find the machine type (32-bit / 64-bit)
27def getMachineType():
28        sh('echo "void ?{}(int*a,int b){}int main(){return 0;}" > .dummy.c')
29        ret, out = sh("make .dummy -s", print2stdout=True)
30       
31        if ret != 0:
32                print("Failed to identify architecture:")
33                print(out)
34                print("Stopping")
35                rm( (".dummy.c",".dummy") )
36                sys.exit(1)
37
38        _, out = sh("file .dummy", print2stdout=False)
39        rm( (".dummy.c",".dummy") )
40
41        return re.search("ELF\s([0-9]+)-bit", out).group(1)
42
43def listTestsFolder(folder) :
44        path = ('./.expect/%s/' % folder) if folder else './.expect/'
45        subpath = "%s/" % folder if folder else ""
46
47        # tests directly in the .expect folder will always be processed
48        return map(lambda fname: Test(fname, subpath + fname),
49                [splitext(f)[0] for f in listdir( path )
50                if not f.startswith('.') and f.endswith('.txt')
51                ])
52
53# reads the directory ./.expect and indentifies the tests
54def listTests( concurrent ):
55        machineType = getMachineType()
56
57        # tests directly in the .expect folder will always be processed
58        generic_list = listTestsFolder( "" )
59
60        # tests in the machineType folder will be ran only for the corresponding compiler
61        typed_list = listTestsFolder( machineType )
62
63        # tests in the concurrent folder will be ran only if concurrency is enabled
64        concurrent_list = listTestsFolder( "concurrent" ) if concurrent else []
65
66        # append both lists to get
67        return generic_list + typed_list + concurrent_list;
68
69# from the found tests, filter all the valid tests/desired tests
70def validTests( options ):
71        tests = []
72
73        # if we are regenerating the tests we need to find the information of the
74        # already existing tests and create new info for the new tests
75        if options.regenerate_expected :
76                for testname in options.tests :
77                        if testname.endswith( (".c", ".cc", ".cpp") ):
78                                print('ERROR: "%s", tests are not allowed to end with a C/C++/CFA extension, ignoring it' % testname, file=sys.stderr)
79                        else :
80                                found = [test for test in allTests if test.name == testname]
81                                tests.append( found[0] if len(found) == 1 else Test(testname, testname) )
82
83        else :
84                # otherwise we only need to validate that all tests are present in the complete list
85                for testname in options.tests:
86                        test = [t for t in allTests if t.name == testname]
87
88                        if len(test) != 0 :
89                                tests.append( test[0] )
90                        else :
91                                print('ERROR: No expected file for test %s, ignoring it' % testname, file=sys.stderr)
92
93        # make sure we have at least some test to run
94        if len(tests) == 0 :
95                print('ERROR: No valid test to run', file=sys.stderr)
96                sys.exit(1)
97
98        return tests
99
100# parses the option
101def getOptions():
102        # create a parser with the arguments for the tests script
103        parser = argparse.ArgumentParser(description='Script which runs cforall tests')
104        parser.add_argument('--debug', help='Run all tests in debug or release', type=yes_no, default='no')
105        parser.add_argument('--concurrent', help='Run concurrent tests', type=yes_no, default='yes')
106        parser.add_argument('--dry-run', help='Don\'t run the tests, only output the commands', action='store_true')
107        parser.add_argument('--list', help='List all test available', action='store_true')
108        parser.add_argument('--all', help='Run all test available', action='store_true')
109        parser.add_argument('--regenerate-expected', help='Regenerate the .expect by running the specified tets, can be used with --all option', action='store_true')
110        parser.add_argument('-j', '--jobs', help='Number of tests to run simultaneously', type=int, default='8')
111        parser.add_argument('--list-comp', help='List all valide arguments', action='store_true')
112        parser.add_argument('tests', metavar='test', type=str, nargs='*', help='a list of tests to run')
113
114        options =  parser.parse_args()
115
116        # script must have at least some tests to run or be listing
117        listing    = options.list or options.list_comp
118        all_tests  = options.all
119        some_tests = len(options.tests) > 0
120
121        # check that exactly one of the booleans is set to true
122        if not sum( (listing, all_tests, some_tests) ) == 1 :
123                print('ERROR: must have option \'--all\', \'--list\' or non-empty test list', file=sys.stderr)
124                parser.print_help()
125                sys.exit(1)
126
127        return options
128
129def jobCount( options ):
130        # check if the user already passed in a number of jobs for multi-threading
131        make_flags = environ.get('MAKEFLAGS')
132        make_jobs_fds = re.search("--jobserver-(auth|fds)=\s*([0-9]+),([0-9]+)", make_flags) if make_flags else None
133        if make_jobs_fds :
134                tokens = os.read(int(make_jobs_fds.group(2)), 1024)
135                options.jobs = len(tokens)
136                os.write(int(make_jobs_fds.group(3)), tokens)
137        else :
138                options.jobs = multiprocessing.cpu_count()
139
140        # make sure we have a valid number of jobs that corresponds to user input
141        if options.jobs <= 0 :
142                print('ERROR: Invalid number of jobs', file=sys.stderr)
143                sys.exit(1)
144
145        return min( options.jobs, len(tests) ), True if make_flags else False
146
147################################################################################
148#               running test functions
149################################################################################
150# logic to run a single test and return the result (No handling of printing or other test framework logic)
151def run_single_test(test, generate, dry_run, debug):
152
153        # find the output file based on the test name and options flag
154        out_file = (".out/%s.log" % test.name) if not generate else (".expect/%s.txt" % test.path)
155        err_file = ".err/%s.log" % test.name
156
157        # remove any outputs from the previous tests to prevent side effects
158        rm( (out_file, test.name), dry_run )
159
160        options = "-debug" if debug else "-nodebug"
161
162        # build, skipping to next test on error
163        make_ret, _ = sh("""%s test=yes EXTRA_FLAGS="-quiet %s" %s 2> %s 1> /dev/null""" % (make_cmd, options, test.name, out_file), dry_run)
164
165        # if the make command succeds continue otherwise skip to diff
166        if make_ret == 0 :
167                # fetch optional input
168                stdinput = "< .in/%s.txt" % test.name if isfile(".in/%s.txt" % test.path) else ""
169
170                if fileIsExecutable(test.name) :
171                        # run test
172                        sh("./%s %s > %s 2>&1" % (test.name, stdinput, out_file), dry_run)
173                else :
174                        # simply cat the result into the output
175                        sh("cat %s > %s" % (test.name, out_file), dry_run)
176
177        else :
178                # command failed save the log to less temporary file
179                sh("mv %s %s" % (err_file, out_file), dry_run)
180
181        retcode = 0
182        error = None
183
184        if generate :
185                # if we are ounly generating the output we still need to check that the test actually exists
186                if not dry_run and fileContainsOnly(out_file, "make: *** No rule to make target `%s'.  Stop." % test.name) :
187                        retcode = 1;
188                        error = "\t\tNo make target for test %s!" % test.name
189                        sh("rm %s" % out_file, False)
190
191        else :
192                # fetch return code and error from the diff command
193                retcode, error = diff(".expect/%s.txt" % test.path, ".out/%s.log" % test.name, dry_run)
194       
195        # clean the executable
196        sh("rm -f %s > /dev/null 2>&1" % test.name, dry_run)
197
198        return retcode, error
199
200# run a single test and handle the errors, outputs, printing, exception handling, etc.
201def run_test_worker(t, generate, dry_run, debug) :
202
203        signal.signal(signal.SIGINT, signal.SIG_DFL)
204        # print formated name
205        name_txt = "%20s  " % t.name
206
207        #run the test instance and collect the result
208        test_failed, error = run_single_test(t, generate, dry_run, debug)
209
210        # update output based on current action
211        if generate :
212                failed_txt = "ERROR"
213                success_txt = "Done"
214        else :
215                failed_txt = "FAILED"
216                success_txt = "PASSED"
217
218        #print result with error if needed
219        text = name_txt + (failed_txt if test_failed else success_txt)
220        out = sys.stdout
221        if error :
222                text = text + "\n" + error
223                out = sys.stderr
224
225        print(text, file = out);
226        sys.stdout.flush()
227        sys.stderr.flush()
228        signal.signal(signal.SIGINT, signal.SIG_IGN)
229
230        return test_failed
231
232# run the given list of tests with the given parameters
233def run_tests(tests, generate, dry_run, jobs, debug) :
234        # clean the sandbox from previous commands
235        sh("%s clean > /dev/null 2>&1" % make_cmd, dry_run)
236
237        # make sure the required folder are present
238        sh('mkdir -p .out .expect .err', dry_run)
239
240        if generate :
241                print( "Regenerate tests for: " )
242
243        # create the executor for our jobs and handle the signal properly
244        original_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_IGN)
245        pool = Pool(jobs)
246        signal.signal(signal.SIGINT, original_sigint_handler)
247
248        # for each test to run
249        try :
250                results = pool.map_async(partial(run_test_worker, generate=generate, dry_run=dry_run, debug=debug), tests ).get(3600)
251        except KeyboardInterrupt:
252                pool.terminate()
253                print("Tests interrupted by user")
254                sys.exit(1)
255
256        # clean the workspace
257        sh("%s clean > /dev/null 2>&1" % make_cmd, dry_run)
258
259        for failed in results:
260                if failed :
261                        return 1
262
263        return 0
264
265
266################################################################################
267#               main loop
268################################################################################
269if __name__ == "__main__":
270        #always run from same folder
271        chdir() 
272       
273        # parse the command line arguments
274        options = getOptions()
275
276        # fetch the liest of all valid tests
277        allTests = listTests( options.concurrent )
278
279        # if user wants all tests than no other treatement of the test list is required
280        if options.all or options.list or options.list_comp :
281                tests = allTests
282
283        else :
284                #otherwise we need to validate that the test list that was entered is valid
285                tests = validTests( options )
286
287        # sort the test alphabetically for convenience
288        tests.sort(key=lambda t: t.name)
289
290        # users may want to simply list the tests
291        if options.list_comp :
292                print("-h --help --debug --concurrent --dry-run --list --all --regenerate-expected -j --jobs ", end='')
293                print(" ".join(map(lambda t: "%s" % (t.name), tests)))
294
295        elif options.list :
296                print("\n".join(map(lambda t: "%s (%s)" % (t.name, t.path), tests)))
297
298        else :
299                options.jobs, forceJobs = jobCount( options )
300
301                print('Running (%s) on %i cores' % ("debug" if options.debug else "no debug", options.jobs))
302                make_cmd = "make" if forceJobs else ("make -j%i" % options.jobs)
303
304                # otherwise run all tests and make sure to return the correct error code
305                sys.exit( run_tests(tests, options.regenerate_expected, options.dry_run, options.jobs, options.debug) )
Note: See TracBrowser for help on using the repository browser.