source: src/tests/test.py @ 02559df

aaron-thesisarm-ehcleanup-dtorsdeferred_resndemanglerjacob/cs343-translationjenkins-sandboxnew-astnew-ast-unique-exprno_listpersistent-indexer
Last change on this file since 02559df was 02559df, checked in by Thierry Delisle <tdelisle@…>, 3 years ago

Apparently automake doesn't actually work without dependency tracking so diable them by hand in the tests instead

  • Property mode set to 100755
File size: 9.5 KB
Line 
1#!/usr/bin/python
2from __future__ import print_function
3
4from pybin.tools import *
5from pybin.test_run import *
6from pybin import settings
7
8import argparse
9import re
10import sys
11import time
12
13################################################################################
14#               help functions
15################################################################################
16
17def findTests():
18        expected = []
19
20        def matchTest(path):
21                match = re.search("%s\/([\w\/\-_]*).expect\/([\w\-_]+)(\.[\w\-_]+)?\.txt" % settings.SRCDIR, path)
22                if match :
23                        test = Test()
24                        test.name = match.group(2)
25                        test.path = match.group(1)
26                        test.arch = match.group(3)[1:] if match.group(3) else None
27                        if settings.arch.match(test.arch):
28                                expected.append(test)
29
30        pathWalk( matchTest )
31
32        return expected
33
34# reads the directory ./.expect and indentifies the tests
35def listTests( includes, excludes ):
36        includes = [canonicalPath( i ) for i in includes] if includes else None
37        excludes = [canonicalPath( i ) for i in excludes] if excludes else None
38
39        # tests directly in the .expect folder will always be processed
40        test_list = findTests()
41
42        # if we have a limited number of includes, filter by them
43        if includes:
44                test_list = [x for x in test_list if
45                        x.target().startswith( tuple(includes) )
46                ]
47
48        # # if we have a folders to excludes, filter by them
49        if excludes:
50                test_list = [x for x in test_list if not
51                        x.target().startswith( tuple(excludes) )
52                ]
53
54        return test_list
55
56# from the found tests, filter all the valid tests/desired tests
57def validTests( options ):
58        tests = []
59
60        # if we are regenerating the tests we need to find the information of the
61        # already existing tests and create new info for the new tests
62        if options.regenerate_expected :
63                for testname in options.tests :
64                        testname = canonicalPath( testname )
65                        if Test.valid_name(testname):
66                                found = [test for test in allTests if canonicalPath( test.target() ) == testname]
67                                tests.append( found[0] if len(found) == 1 else Test.from_target(testname) )
68                        else :
69                                print('ERROR: "%s", tests are not allowed to end with a C/C++/CFA extension, ignoring it' % testname, file=sys.stderr)
70
71        else :
72                # otherwise we only need to validate that all tests are present in the complete list
73                for testname in options.tests:
74                        test = [t for t in allTests if pathCmp( t.target(), testname )]
75
76                        if test :
77                                tests.append( test[0] )
78                        else :
79                                print('ERROR: No expected file for test %s, ignoring it' % testname, file=sys.stderr)
80
81        return tests
82
83# parses the option
84def getOptions():
85        # create a parser with the arguments for the tests script
86        parser = argparse.ArgumentParser(description='Script which runs cforall tests')
87        parser.add_argument('--debug', help='Run all tests in debug or release', type=yes_no, default='no')
88        parser.add_argument('--arch', help='Test for specific architecture', type=str, default='')
89        parser.add_argument('--dry-run', help='Don\'t run the tests, only output the commands', action='store_true')
90        parser.add_argument('--list', help='List all test available', action='store_true')
91        parser.add_argument('--all', help='Run all test available', action='store_true')
92        parser.add_argument('--regenerate-expected', help='Regenerate the .expect by running the specified tets, can be used with --all option', action='store_true')
93        parser.add_argument('-j', '--jobs', help='Number of tests to run simultaneously', type=int)
94        parser.add_argument('--list-comp', help='List all valide arguments', action='store_true')
95        parser.add_argument('-I','--include', help='Directory of test to include, can be used multiple time, All  if omitted', action='append')
96        parser.add_argument('-E','--exclude', help='Directory of test to exclude, can be used multiple time, None if omitted', action='append')
97        parser.add_argument('tests', metavar='test', type=str, nargs='*', help='a list of tests to run')
98
99        options =  parser.parse_args()
100
101        # script must have at least some tests to run or be listing
102        listing    = options.list or options.list_comp
103        all_tests  = options.all
104        some_tests = len(options.tests) > 0
105        some_dirs  = len(options.include) > 0 if options.include else 0
106
107        # check that exactly one of the booleans is set to true
108        if not sum( (listing, all_tests, some_tests, some_dirs) ) > 0 :
109                print('ERROR: must have option \'--all\', \'--list\', \'--include\', \'-I\' or non-empty test list', file=sys.stderr)
110                parser.print_help()
111                sys.exit(1)
112
113        return options
114
115################################################################################
116#               running test functions
117################################################################################
118# fix the absolute paths in the output
119def fixoutput( fname ):
120        if not is_ascii(fname):
121                return
122
123        file_replace(fname, "%s/" % settings.SRCDIR, "")
124
125
126# logic to run a single test and return the result (No handling of printing or other test framework logic)
127def run_single_test(test):
128
129        # find the output file based on the test name and options flag
130        exe_file = test.target_executable();
131        out_file = test.target_output()
132        err_file = test.error_log()
133        cmp_file = test.expect()
134        in_file  = test.input()
135
136        # prepare the proper directories
137        test.prepare()
138
139        # build, skipping to next test on error
140        before = time.time()
141        make_ret, _ = make( test.target(),
142                redirects  = "2> %s 1> /dev/null" % out_file,
143                error_file = err_file
144        )
145        after = time.time()
146
147        comp_dur = after - before
148
149        run_dur = None
150
151        # if the make command succeds continue otherwise skip to diff
152        if make_ret == 0 or settings.dry_run:
153                before = time.time()
154                if settings.dry_run or fileIsExecutable(exe_file) :
155                        # run test
156                        retcode, _ = sh("timeout 60 %s > %s 2>&1" % (exe_file, out_file), input = in_file)
157                else :
158                        # simply cat the result into the output
159                        retcode, _ = sh("cat %s > %s" % (exe_file, out_file))
160
161                after = time.time()
162                run_dur = after - before
163        else:
164                retcode, _ = sh("mv %s %s" % (err_file, out_file))
165
166
167        if retcode == 0:
168                if settings.generating :
169                        # if we are ounly generating the output we still need to check that the test actually exists
170                        if not settings.dry_run and fileContainsOnly(out_file, "make: *** No rule to make target `%s'.  Stop." % test.target()) :
171                                retcode = 1;
172                                error = "\t\tNo make target for test %s!" % test.target()
173                                sh("rm %s" % out_file, False)
174                        else:
175                                error = None
176                else :
177                        # fetch return code and error from the diff command
178                        fixoutput(out_file)
179                        retcode, error = diff(cmp_file, out_file)
180
181        else:
182                with open (out_file, "r") as myfile:
183                        error = myfile.read()
184
185
186        # clean the executable
187        sh("rm -f %s > /dev/null 2>&1" % test.target())
188
189        return retcode, error, [comp_dur, run_dur]
190
191# run a single test and handle the errors, outputs, printing, exception handling, etc.
192def run_test_worker(t) :
193
194        with SignalHandling():
195                # print formated name
196                name_txt = "%20s  " % t.name
197
198                retcode, error, duration = run_single_test(t)
199
200                # update output based on current action
201                result_txt = TestResult.toString( retcode, duration )
202
203                #print result with error if needed
204                text = name_txt + result_txt
205                out = sys.stdout
206                if error :
207                        text = text + "\n" + error
208                        out = sys.stderr
209
210                print(text, file = out)
211                sys.stdout.flush()
212                sys.stderr.flush()
213
214        return retcode != TestResult.SUCCESS
215
216# run the given list of tests with the given parameters
217def run_tests(tests, jobs) :
218        # clean the sandbox from previous commands
219        make('clean', redirects = '> /dev/null 2>&1')
220
221        # automake doesn't clean the dependencies so do it by hand
222        sh("find %s -type d -name .deps -delete" % settings.BUIDDIR)
223
224        # create the executor for our jobs and handle the signal properly
225        pool = setupPool(jobs)
226
227        # for each test to run
228        try :
229                results = pool.map_async(
230                        run_test_worker,
231                        tests,
232                        chunksize = 1
233                ).get(7200)
234        except KeyboardInterrupt:
235                pool.terminate()
236                print("Tests interrupted by user")
237                sys.exit(1)
238
239        # clean the workspace
240        make('clean', redirects = '> /dev/null 2>&1')
241
242        for failed in results:
243                if failed :
244                        return 1
245
246        return 0
247
248
249################################################################################
250#               main loop
251################################################################################
252if __name__ == "__main__":
253
254        # parse the command line arguments
255        options = getOptions()
256
257        # init global settings
258        settings.init( options )
259
260        # fetch the liest of all valid tests
261        allTests = listTests( options.include, options.exclude )
262
263        # if user wants all tests than no other treatement of the test list is required
264        if options.all or options.list or options.list_comp or options.include :
265                tests = allTests
266
267        #otherwise we need to validate that the test list that was entered is valid
268        else :
269                tests = validTests( options )
270
271        # make sure we have at least some test to run
272        if not tests :
273                print('ERROR: No valid test to run', file=sys.stderr)
274                sys.exit(1)
275
276
277        # sort the test alphabetically for convenience
278        tests.sort(key=lambda t: (t.arch if t.arch else '') + t.target())
279
280        # users may want to simply list the tests
281        if options.list_comp :
282                print("-h --help --debug --dry-run --list --arch --all --regenerate-expected -j --jobs ", end='')
283                print(" ".join(map(lambda t: "%s" % (t.target()), tests)))
284
285        elif options.list :
286                print("Listing for %s:%s"% (settings.arch.string, settings.debug.string))
287                fancy_print("\n".join(map(lambda t: "%s" % (t.toString()), tests)))
288
289        else :
290                options.jobs, forceJobs = jobCount( options, tests )
291                settings.updateMakeCmd(forceJobs, options.jobs)
292
293                print('%s (%s:%s) on %i cores' % (
294                        'Regenerate tests' if settings.generating else 'Running',
295                        settings.arch.string,
296                        settings.debug.string,
297                        options.jobs
298                ))
299
300                # otherwise run all tests and make sure to return the correct error code
301                sys.exit( run_tests(tests, options.jobs) )
Note: See TracBrowser for help on using the repository browser.