source: tests/test.py @ 5307c33

ADTaaron-thesisarm-ehast-experimentalcleanup-dtorsenumforall-pointer-decayjacob/cs343-translationjenkins-sandboxnew-astnew-ast-unique-exprno_listpersistent-indexerpthread-emulationqualifiedEnum
Last change on this file since 5307c33 was f7d3215, checked in by Thierry Delisle <tdelisle@…>, 6 years ago

Hopefully more robust fix for relative vs absolutepaths in tests

  • Property mode set to 100755
File size: 10.1 KB
Line 
1#!/usr/bin/python
2from __future__ import print_function
3
4from pybin.tools import *
5from pybin.test_run import *
6from pybin import settings
7
8import argparse
9import re
10import sys
11import time
12
13################################################################################
14#               help functions
15################################################################################
16
17def findTests():
18        expected = []
19
20        def matchTest(path):
21                match = re.search("%s\/([\w\/\-_]*).expect\/([\w\-_]+)(\.[\w\-_]+)?\.txt" % settings.SRCDIR, path)
22                if match :
23                        test = Test()
24                        test.name = match.group(2)
25                        test.path = match.group(1)
26                        test.arch = match.group(3)[1:] if match.group(3) else None
27                        if settings.arch.match(test.arch):
28                                expected.append(test)
29
30        pathWalk( matchTest )
31
32        return expected
33
34# reads the directory ./.expect and indentifies the tests
35def listTests( includes, excludes ):
36        includes = [canonicalPath( i ) for i in includes] if includes else None
37        excludes = [canonicalPath( i ) for i in excludes] if excludes else None
38
39        # tests directly in the .expect folder will always be processed
40        test_list = findTests()
41
42        # if we have a limited number of includes, filter by them
43        if includes:
44                test_list = [x for x in test_list if
45                        x.target().startswith( tuple(includes) )
46                ]
47
48        # # if we have a folders to excludes, filter by them
49        if excludes:
50                test_list = [x for x in test_list if not
51                        x.target().startswith( tuple(excludes) )
52                ]
53
54        return test_list
55
56# from the found tests, filter all the valid tests/desired tests
57def validTests( options ):
58        tests = []
59
60        # if we are regenerating the tests we need to find the information of the
61        # already existing tests and create new info for the new tests
62        if options.regenerate_expected :
63                for testname in options.tests :
64                        testname = canonicalPath( testname )
65                        if Test.valid_name(testname):
66                                found = [test for test in allTests if canonicalPath( test.target() ) == testname]
67                                tests.append( found[0] if len(found) == 1 else Test.from_target(testname) )
68                        else :
69                                print('ERROR: "%s", tests are not allowed to end with a C/C++/CFA extension, ignoring it' % testname, file=sys.stderr)
70
71        else :
72                # otherwise we only need to validate that all tests are present in the complete list
73                for testname in options.tests:
74                        test = [t for t in allTests if pathCmp( t.target(), testname )]
75
76                        if test :
77                                tests.append( test[0] )
78                        else :
79                                print('ERROR: No expected file for test %s, ignoring it' % testname, file=sys.stderr)
80
81        return tests
82
83# parses the option
84def getOptions():
85        # create a parser with the arguments for the tests script
86        parser = argparse.ArgumentParser(description='Script which runs cforall tests')
87        parser.add_argument('--debug', help='Run all tests in debug or release', type=yes_no, default='yes')
88        parser.add_argument('--install', help='Run all tests based on installed binaries or tree binaries', type=yes_no, default='no')
89        parser.add_argument('--arch', help='Test for specific architecture', type=str, default='')
90        parser.add_argument('--timeout', help='Maximum duration in seconds after a single test is considered to have timed out', type=int, default=60)
91        parser.add_argument('--global-timeout', help='Maximum cumulative duration in seconds after the ALL tests are considered to have timed out', type=int, default=7200)
92        parser.add_argument('--dry-run', help='Don\'t run the tests, only output the commands', action='store_true')
93        parser.add_argument('--list', help='List all test available', action='store_true')
94        parser.add_argument('--all', help='Run all test available', action='store_true')
95        parser.add_argument('--regenerate-expected', help='Regenerate the .expect by running the specified tets, can be used with --all option', action='store_true')
96        parser.add_argument('-j', '--jobs', help='Number of tests to run simultaneously', type=int)
97        parser.add_argument('--list-comp', help='List all valide arguments', action='store_true')
98        parser.add_argument('-I','--include', help='Directory of test to include, can be used multiple time, All  if omitted', action='append')
99        parser.add_argument('-E','--exclude', help='Directory of test to exclude, can be used multiple time, None if omitted', action='append')
100        parser.add_argument('tests', metavar='test', type=str, nargs='*', help='a list of tests to run')
101
102        try:
103                options =  parser.parse_args()
104        except:
105                print('ERROR: invalid arguments', file=sys.stderr)
106                parser.print_help(sys.stderr)
107                sys.exit(1)
108
109        # script must have at least some tests to run or be listing
110        listing    = options.list or options.list_comp
111        all_tests  = options.all
112        some_tests = len(options.tests) > 0
113        some_dirs  = len(options.include) > 0 if options.include else 0
114
115        # check that exactly one of the booleans is set to true
116        if not sum( (listing, all_tests, some_tests, some_dirs) ) > 0 :
117                print('ERROR: must have option \'--all\', \'--list\', \'--include\', \'-I\' or non-empty test list', file=sys.stderr)
118                parser.print_help()
119                sys.exit(1)
120
121        return options
122
123################################################################################
124#               running test functions
125################################################################################
126# fix the absolute paths in the output
127def fixoutput( fname ):
128        if not is_ascii(fname):
129                return
130
131        file_replace(fname, "%s/" % settings.SRCDIR, "")
132
133
134# logic to run a single test and return the result (No handling of printing or other test framework logic)
135def run_single_test(test):
136
137        # find the output file based on the test name and options flag
138        exe_file = test.target_executable();
139        out_file = test.target_output()
140        err_file = test.error_log()
141        cmp_file = test.expect()
142        in_file  = test.input()
143
144        # prepare the proper directories
145        test.prepare()
146
147        # build, skipping to next test on error
148        before = time.time()
149        make_ret, _ = make( test.target(),
150                redirects  = "2> %s 1> /dev/null" % out_file,
151                error_file = err_file
152        )
153        after = time.time()
154
155        comp_dur = after - before
156
157        run_dur = None
158
159        # if the make command succeds continue otherwise skip to diff
160        if make_ret == 0 or settings.dry_run:
161                before = time.time()
162                if settings.dry_run or fileIsExecutable(exe_file) :
163                        # run test
164                        retcode, _ = sh("timeout %d %s > %s 2>&1" % (settings.timeout.single, exe_file, out_file), input = in_file)
165                else :
166                        # simply cat the result into the output
167                        retcode, _ = sh("cat %s > %s" % (exe_file, out_file))
168
169                after = time.time()
170                run_dur = after - before
171        else:
172                retcode, _ = sh("mv %s %s" % (err_file, out_file))
173
174
175        if retcode == 0:
176                # fixoutput(out_file)
177                if settings.generating :
178                        # if we are ounly generating the output we still need to check that the test actually exists
179                        if not settings.dry_run and fileContainsOnly(out_file, "make: *** No rule to make target `%s'.  Stop." % test.target()) :
180                                retcode = 1;
181                                error = "\t\tNo make target for test %s!" % test.target()
182                                sh("rm %s" % out_file, False)
183                        else:
184                                error = None
185                else :
186                        # fetch return code and error from the diff command
187                        retcode, error = diff(cmp_file, out_file)
188
189        else:
190                with open (out_file, "r") as myfile:
191                        error = myfile.read()
192
193
194        # clean the executable
195        sh("rm -f %s > /dev/null 2>&1" % test.target())
196
197        return retcode, error, [comp_dur, run_dur]
198
199# run a single test and handle the errors, outputs, printing, exception handling, etc.
200def run_test_worker(t) :
201
202        with SignalHandling():
203                # print formated name
204                name_txt = "%20s  " % t.name
205
206                retcode, error, duration = run_single_test(t)
207
208                # update output based on current action
209                result_txt = TestResult.toString( retcode, duration )
210
211                #print result with error if needed
212                text = name_txt + result_txt
213                out = sys.stdout
214                if error :
215                        text = text + "\n" + error
216                        out = sys.stderr
217
218                print(text, file = out)
219                sys.stdout.flush()
220                sys.stderr.flush()
221
222        return retcode != TestResult.SUCCESS
223
224# run the given list of tests with the given parameters
225def run_tests(tests, jobs) :
226        # clean the sandbox from previous commands
227        make('clean', redirects = '> /dev/null 2>&1')
228
229        # create the executor for our jobs and handle the signal properly
230        pool = setupPool(jobs)
231
232        # for each test to run
233        try :
234                results = pool.map_async(
235                        run_test_worker,
236                        tests,
237                        chunksize = 1
238                ).get(settings.timeout.total)
239        except KeyboardInterrupt:
240                pool.terminate()
241                print("Tests interrupted by user")
242                sys.exit(1)
243
244        # clean the workspace
245        make('clean', redirects = '> /dev/null 2>&1')
246
247        for failed in results:
248                if failed :
249                        return 1
250
251        return 0
252
253
254################################################################################
255#               main loop
256################################################################################
257if __name__ == "__main__":
258
259        # parse the command line arguments
260        options = getOptions()
261
262        # init global settings
263        settings.init( options )
264
265        # fetch the liest of all valid tests
266        allTests = listTests( options.include, options.exclude )
267
268        # if user wants all tests than no other treatement of the test list is required
269        if options.all or options.list or options.list_comp or options.include :
270                tests = allTests
271
272        #otherwise we need to validate that the test list that was entered is valid
273        else :
274                tests = validTests( options )
275
276        # make sure we have at least some test to run
277        if not tests :
278                print('ERROR: No valid test to run', file=sys.stderr)
279                sys.exit(1)
280
281
282        # sort the test alphabetically for convenience
283        tests.sort(key=lambda t: (t.arch if t.arch else '') + t.target())
284
285        # users may want to simply list the tests
286        if options.list_comp :
287                print("-h --help --debug --dry-run --list --arch --all --regenerate-expected --install --timeout --global-timeout -j --jobs ", end='')
288                print(" ".join(map(lambda t: "%s" % (t.target()), tests)))
289
290        elif options.list :
291                print("Listing for %s:%s"% (settings.arch.string, settings.debug.string))
292                fancy_print("\n".join(map(lambda t: "%s" % (t.toString()), tests)))
293
294        else :
295                # check the build configuration works
296                settings.validate()
297
298                options.jobs, forceJobs = jobCount( options, tests )
299                settings.updateMakeCmd(forceJobs, options.jobs)
300
301                print('%s (%s:%s) on %i cores' % (
302                        'Regenerate tests' if settings.generating else 'Running',
303                        settings.arch.string,
304                        settings.debug.string,
305                        options.jobs
306                ))
307
308                # otherwise run all tests and make sure to return the correct error code
309                sys.exit( run_tests(tests, options.jobs) )
Note: See TracBrowser for help on using the repository browser.