source: tests/test.py @ a2f2fda

ADTarm-ehast-experimentalenumforall-pointer-decayjacob/cs343-translationnew-ast-unique-exprpthread-emulationqualifiedEnum
Last change on this file since a2f2fda was a2f2fda, checked in by Thierry Delisle <tdelisle@…>, 3 years ago

Finished support for running tests for new and old ast

  • Property mode set to 100755
File size: 14.9 KB
RevLine 
[5b993e0]1#!/usr/bin/python3
[efc15918]2
[c07d724]3from pybin.tools import *
[0ad0c55]4from pybin.test_run import *
[bacc36c]5from pybin import settings
[efc15918]6
7import argparse
[136f86b]8import itertools
[122cac7]9import re
[efc15918]10import sys
[f806b61]11import tempfile
[ca54499]12import time
[efc15918]13
[2cd949b]14import os
15import psutil
16import signal
17
[efc15918]18################################################################################
19#               help functions
20################################################################################
[f1231f2]21
[5bf1f3e]22def find_tests():
[0ad0c55]23        expected = []
[f1231f2]24
[5bf1f3e]25        def match_test(path):
[a2f2fda]26                match = re.search("^%s\/([\w\/\-_]*).expect\/([\w\-_]+)(\.nast|\.oast)?(\.[\w\-_]+)?\.txt$" % settings.SRCDIR, path)
[bacc36c]27                if match :
28                        test = Test()
29                        test.name = match.group(2)
30                        test.path = match.group(1)
[a2f2fda]31                        test.arch = match.group(4)[1:] if match.group(4) else None
32
33                        astv = match.group(3)[1:] if match.group(3) else None
34                        if astv == 'oast':
35                                test.astv = 'old'
36                        elif astv == 'nast':
37                                test.astv = 'new'
38                        elif astv:
39                                print('ERROR: "%s", expect file has astv but it is not "nast" or "oast"' % testname, file=sys.stderr)
40                                sys.exit(1)
41
[136f86b]42                        expected.append(test)
[f803a75]43
[5bf1f3e]44        path_walk( match_test )
[c07d724]45
[0ad0c55]46        return expected
[efc15918]47
[be65cca]48# reads the directory ./.expect and indentifies the tests
[5bf1f3e]49def list_tests( includes, excludes ):
[be65cca]50        # tests directly in the .expect folder will always be processed
[5bf1f3e]51        test_list = find_tests()
[be65cca]52
[0ad0c55]53        # if we have a limited number of includes, filter by them
54        if includes:
55                test_list = [x for x in test_list if
[a85e44c]56                        x.target().startswith( tuple(includes) )
[0ad0c55]57                ]
[be65cca]58
[0ad0c55]59        # # if we have a folders to excludes, filter by them
60        if excludes:
61                test_list = [x for x in test_list if not
[a85e44c]62                        x.target().startswith( tuple(excludes) )
[0ad0c55]63                ]
[f1231f2]64
[136f86b]65        # sort the test alphabetically for convenience
66        test_list.sort(key=lambda t: ('~' if t.arch else '') + t.target() + (t.arch if t.arch else ''))
67
[0ad0c55]68        return test_list
[efc15918]69
[c07d724]70# from the found tests, filter all the valid tests/desired tests
[5bf1f3e]71def valid_tests( options ):
[c07d724]72        tests = []
73
74        # if we are regenerating the tests we need to find the information of the
75        # already existing tests and create new info for the new tests
76        if options.regenerate_expected :
77                for testname in options.tests :
[a2f2fda]78                        testname = os.path.normpath( os.path.join(settings.SRCDIR, testname) )
79
[41af19c]80                        # first check if this is a valid name to regenerate
[bacc36c]81                        if Test.valid_name(testname):
[41af19c]82                                # this is a valid name, let's check if it already exists
[5bf1f3e]83                                found = [test for test in all_tests if canonical_path( test.target() ) == testname]
[a2f2fda]84                                setup = itertools.product(settings.all_arch if options.arch else [None], settings.all_ast if options.ast else [None])
[41af19c]85                                if not found:
[a2f2fda]86                                        # it's a new name, create it according to the name and specified architecture/ast version
87                                        tests.extend( [Test.new_target(testname, arch, ast) for arch, ast in setup] )
[41af19c]88                                elif len(found) == 1 and not found[0].arch:
89                                        # we found a single test, the user better be wanting to create a cross platform test
90                                        if options.arch:
91                                                print('ERROR: "%s", test has no specified architecture but --arch was specified, ignoring it' % testname, file=sys.stderr)
[a2f2fda]92                                        elif options.ast:
93                                                print('ERROR: "%s", test has no specified ast version but --ast was specified, ignoring it' % testname, file=sys.stderr)
[41af19c]94                                        else:
95                                                tests.append( found[0] )
96                                else:
97                                        # this test is already cross platform, just add a test for each platform the user asked
[a2f2fda]98                                        tests.extend( [Test.new_target(testname, arch, ast) for arch, ast in setup] )
[41af19c]99
100                                        # print a warning if it users didn't ask for a specific architecture
101                                        if not options.arch:
102                                                print('WARNING: "%s", test has architecture specific expected files but --arch was not specified, regenerating only for current host' % testname, file=sys.stderr)
103
[a2f2fda]104
105                                        # print a warning if it users didn't ask for a specific ast version
106                                        if not options.ast:
107                                                print('WARNING: "%s", test has ast version specific expected files but --ast was not specified, regenerating only for current ast' % testname, file=sys.stderr)
108
[c07d724]109                        else :
[bacc36c]110                                print('ERROR: "%s", tests are not allowed to end with a C/C++/CFA extension, ignoring it' % testname, file=sys.stderr)
[c07d724]111
112        else :
113                # otherwise we only need to validate that all tests are present in the complete list
114                for testname in options.tests:
[5bf1f3e]115                        test = [t for t in all_tests if path_cmp( t.target(), testname )]
[c07d724]116
[bacc36c]117                        if test :
[136f86b]118                                tests.extend( test )
[c07d724]119                        else :
120                                print('ERROR: No expected file for test %s, ignoring it' % testname, file=sys.stderr)
121
122        return tests
123
124# parses the option
[5bf1f3e]125def parse_args():
[c07d724]126        # create a parser with the arguments for the tests script
127        parser = argparse.ArgumentParser(description='Script which runs cforall tests')
[99581ee]128        parser.add_argument('--ast', help='Test for specific ast', type=comma_separated(str), default=None)
129        parser.add_argument('--arch', help='Test for specific architecture', type=comma_separated(str), default=None)
[136f86b]130        parser.add_argument('--debug', help='Run all tests in debug or release', type=comma_separated(yes_no), default='yes')
131        parser.add_argument('--install', help='Run all tests based on installed binaries or tree binaries', type=comma_separated(yes_no), default='no')
132        parser.add_argument('--continue', help='When multiple specifications are passed (debug/install/arch), sets whether or not to continue if the last specification failed', type=yes_no, default='yes', dest='continue_')
[ebb7b66]133        parser.add_argument('--timeout', help='Maximum duration in seconds after a single test is considered to have timed out', type=int, default=120)
[afe8882]134        parser.add_argument('--global-timeout', help='Maximum cumulative duration in seconds after the ALL tests are considered to have timed out', type=int, default=7200)
[d658183]135        parser.add_argument('--timeout-with-gdb', help='Instead of killing the command when it times out, orphan it and print process id to allow gdb to attach', type=yes_no, default="no")
[c07d724]136        parser.add_argument('--dry-run', help='Don\'t run the tests, only output the commands', action='store_true')
137        parser.add_argument('--list', help='List all test available', action='store_true')
138        parser.add_argument('--all', help='Run all test available', action='store_true')
139        parser.add_argument('--regenerate-expected', help='Regenerate the .expect by running the specified tets, can be used with --all option', action='store_true')
[dcfedca]140        parser.add_argument('--archive-errors', help='If called with a valid path, on test crashes the test script will copy the core dump and the executable to the specified path.', type=str, default='')
[d142ec5]141        parser.add_argument('-j', '--jobs', help='Number of tests to run simultaneously', type=int)
[c07d724]142        parser.add_argument('--list-comp', help='List all valide arguments', action='store_true')
[0ad0c55]143        parser.add_argument('-I','--include', help='Directory of test to include, can be used multiple time, All  if omitted', action='append')
144        parser.add_argument('-E','--exclude', help='Directory of test to exclude, can be used multiple time, None if omitted', action='append')
[c07d724]145        parser.add_argument('tests', metavar='test', type=str, nargs='*', help='a list of tests to run')
146
[575a6e5]147        try:
148                options =  parser.parse_args()
149        except:
150                print('ERROR: invalid arguments', file=sys.stderr)
151                parser.print_help(sys.stderr)
[5b993e0]152                sys.exit(1)
[c07d724]153
154        # script must have at least some tests to run or be listing
155        listing    = options.list or options.list_comp
156        all_tests  = options.all
157        some_tests = len(options.tests) > 0
[0ad0c55]158        some_dirs  = len(options.include) > 0 if options.include else 0
[c07d724]159
160        # check that exactly one of the booleans is set to true
[0ad0c55]161        if not sum( (listing, all_tests, some_tests, some_dirs) ) > 0 :
[5b993e0]162                print('''ERROR: must have option '--all', '--list', '--include', '-I' or non-empty test list''', file=sys.stderr)
[c07d724]163                parser.print_help()
164                sys.exit(1)
165
166        return options
167
[efc15918]168################################################################################
169#               running test functions
170################################################################################
[0c13238]171def success(val):
172        return val == 0 or settings.dry_run
[f85bc15]173
[5bf1f3e]174def no_rule(file, target):
175        return not settings.dry_run and file_contains_only(file, "make: *** No rule to make target `%s'.  Stop." % target)
[f85bc15]176
[c07d724]177# logic to run a single test and return the result (No handling of printing or other test framework logic)
[209383b]178def run_single_test(test):
[3c1d702]179
[c07d724]180        # find the output file based on the test name and options flag
[f85bc15]181        exe_file = test.target_executable();
[bacc36c]182        out_file = test.target_output()
183        err_file = test.error_log()
184        cmp_file = test.expect()
185        in_file  = test.input()
[0ad0c55]186
187        # prepare the proper directories
[bacc36c]188        test.prepare()
[efc15918]189
[e6cfb4e2]190        # ----------
191        # MAKE
192        # ----------
[c07d724]193        # build, skipping to next test on error
[0c13238]194        with Timed() as comp_dur:
[d65f92c]195                make_ret, _ = make( test.target(), output_file=subprocess.DEVNULL, error=out_file, error_file = err_file )
[efc15918]196
[e6cfb4e2]197        # ----------
198        # RUN
199        # ----------
[f806b61]200        # run everything in a temp directory to make sure core file are handled properly
[e6cfb4e2]201        run_dur = None
[f806b61]202        with tempdir():
[103c292]203                # if the make command succeeds continue otherwise skip to diff
[f806b61]204                if success(make_ret):
205                        with Timed() as run_dur:
206                                if settings.dry_run or is_exe(exe_file):
207                                        # run test
[d65f92c]208                                        retcode, _ = sh(exe_file, output_file=out_file, input_file=in_file, timeout=True)
[f806b61]209                                else :
210                                        # simply cat the result into the output
211                                        retcode = cat(exe_file, out_file)
212                else:
213                        retcode = mv(err_file, out_file)
214
215                if success(retcode):
216                        if settings.generating :
[eb67b47]217                                # if we are only generating the output we still need to check that the test actually exists
[f806b61]218                                if no_rule(out_file, test.target()) :
219                                        retcode = 1
220                                        error = "\t\tNo make target for test %s!" % test.target()
221                                        rm(out_file)
222                                else:
223                                        error = None
[0c13238]224                        else :
[f806b61]225                                # fetch return code and error from the diff command
226                                retcode, error = diff(cmp_file, out_file)
227
228                else:
[62cc231]229                        if os.stat(out_file).st_size < 1048576:
[09bbe78]230                                with open (out_file, "r", encoding='latin-1') as myfile:  # use latin-1 so all chars mean something.
[65583e2]231                                        error = myfile.read()
232                        else:
233                                error = "Output log can't be read, file is bigger than 1MB, see {} for actual error\n".format(out_file)
[f806b61]234
235                        ret, info = core_info(exe_file)
236                        error = error + info if error else info
[0c13238]237
[dcfedca]238                        if settings.archive:
239                                error = error + '\n' + core_archive(settings.archive, test.target(), exe_file)
240
[0c13238]241
[b5f9829]242
[c07d724]243        # clean the executable
[0c13238]244        rm(exe_file)
[efc15918]245
[0c13238]246        return retcode, error, [comp_dur.duration, run_dur.duration if run_dur else None]
[efc15918]247
[c07d724]248# run a single test and handle the errors, outputs, printing, exception handling, etc.
[209383b]249def run_test_worker(t) :
[1bb2488]250        try :
[bacc36c]251                # print formated name
[5bf1f3e]252                name_txt = '{0:{width}}  '.format(t.target(), width=settings.output_width)
[ced2e989]253
[ca54499]254                retcode, error, duration = run_single_test(t)
[0a1a680]255
[bacc36c]256                # update output based on current action
[ca54499]257                result_txt = TestResult.toString( retcode, duration )
[bacc36c]258
259                #print result with error if needed
[a45fc7b]260                text = '\t' + name_txt + result_txt
[bacc36c]261                out = sys.stdout
262                if error :
[2b10f95]263                        text = text + '\n' + error
[bacc36c]264
[e791851]265                return retcode == TestResult.SUCCESS, text
[1bb2488]266        except KeyboardInterrupt:
[35a408b7]267                return False, ""
[a2f2fda]268        # except Exception as ex:
269        #       print("Unexpected error in worker thread running {}: {}".format(t.target(), ex), file=sys.stderr)
270        #       sys.stderr.flush()
271        #       return False, ""
[35a408b7]272
[ced2e989]273
[911348cd]274# run the given list of tests with the given parameters
[209383b]275def run_tests(tests, jobs) :
[911348cd]276        # clean the sandbox from previous commands
[d65f92c]277        make('clean', output_file=subprocess.DEVNULL, error=subprocess.DEVNULL)
[efc15918]278
[21c3ea1]279        # create the executor for our jobs
280        pool = multiprocessing.Pool(jobs)
[c07d724]281
[e791851]282        failed = False
283
[c07d724]284        # for each test to run
[ced2e989]285        try :
[e791851]286                num = len(tests)
287                fancy = sys.stdout.isatty()
[35a408b7]288                results = pool.imap_unordered(
[209383b]289                        run_test_worker,
[bacc36c]290                        tests,
291                        chunksize = 1
[35a408b7]292                )
293
294                for i, (succ, txt) in enumerate(timed(results, timeout = settings.timeout.total), 1) :
[e791851]295                        if not succ :
296                                failed = True
297
298                        print("       " + txt)
299
300                        if(fancy and i != num):
301                                print("%d/%d" % (i, num), end='\r')
302                                sys.stdout.flush()
303
[ced2e989]304        except KeyboardInterrupt:
[e791851]305                print("Tests interrupted by user", file=sys.stderr)
[35a408b7]306                pool.terminate()
307                pool.join()
[e791851]308                failed = True
309        except multiprocessing.TimeoutError:
310                print("ERROR: Test suite timed out", file=sys.stderr)
[35a408b7]311                pool.terminate()
312                pool.join()
[e791851]313                failed = True
[35a408b7]314                killgroup() # needed to cleanly kill all children
315
[efc15918]316
[c07d724]317        # clean the workspace
[d65f92c]318        make('clean', output_file=subprocess.DEVNULL, error=subprocess.DEVNULL)
[efc15918]319
[136f86b]320        return failed
[efc15918]321
[6a1bdfd]322
[efc15918]323################################################################################
324#               main loop
325################################################################################
[c07d724]326if __name__ == "__main__":
[f803a75]327
[c07d724]328        # parse the command line arguments
[5bf1f3e]329        options = parse_args()
[f1231f2]330
[bacc36c]331        # init global settings
332        settings.init( options )
333
[c07d724]334        # users may want to simply list the tests
335        if options.list_comp :
[2980667]336                # fetch the liest of all valid tests
337                tests = list_tests( None, None )
338
339                # print the possible options
[0f3d844]340                print("-h --help --debug --dry-run --list --arch --all --regenerate-expected --archive-errors --install --timeout --global-timeout --timeout-with-gdb -j --jobs -I --include -E --exclude --continue ", end='')
[0ad0c55]341                print(" ".join(map(lambda t: "%s" % (t.target()), tests)))
[911348cd]342
[c07d724]343        elif options.list :
[2980667]344                # fetch the liest of all valid tests
345                tests = list_tests( options.include, options.exclude )
346
347                # print the available tests
[5b993e0]348                fancy_print("\n".join(map(lambda t: t.toString(), tests)))
[911348cd]349
[b98c913]350        else :
[2980667]351                # fetch the liest of all valid tests
352                all_tests = list_tests( options.include, options.exclude )
353
354                # if user wants all tests than no other treatement of the test list is required
355                if options.all or options.include :
356                        tests = all_tests
357
358                #otherwise we need to validate that the test list that was entered is valid
359                else :
360                        tests = valid_tests( options )
361
362                # make sure we have at least some test to run
363                if not tests :
364                        print('ERROR: No valid test to run', file=sys.stderr)
365                        sys.exit(1)
366
[136f86b]367                # prep invariants
[d3c1c6a]368                settings.prep_output(tests)
[136f86b]369                failed = 0
370
[f866d15]371                # check if the expected files aren't empty
372                if not options.regenerate_expected:
373                        for t in tests:
374                                if is_empty(t.expect()):
375                                        print('WARNING: test "{}" has empty .expect file'.format(t.target()), file=sys.stderr)
376
[136f86b]377                # for each build configurations, run the test
[76de075]378                with Timed() as total_dur:
[99581ee]379                        for ast, arch, debug, install in itertools.product(settings.all_ast, settings.all_arch, settings.all_debug, settings.all_install):
380                                settings.ast     = ast
[76de075]381                                settings.arch    = arch
382                                settings.debug   = debug
383                                settings.install = install
384
385                                # filter out the tests for a different architecture
386                                # tests are the same across debug/install
[a2f2fda]387                                local_tests = settings.ast.filter( tests )
388                                local_tests = settings.arch.filter( local_tests )
[76de075]389                                options.jobs, forceJobs = job_count( options, local_tests )
390                                settings.update_make_cmd(forceJobs, options.jobs)
391
392                                # check the build configuration works
393                                settings.validate()
394
395                                # print configuration
[99581ee]396                                print('%s %i tests on %i cores (%s:%s - %s)' % (
[76de075]397                                        'Regenerating' if settings.generating else 'Running',
398                                        len(local_tests),
399                                        options.jobs,
[a2f2fda]400                                        settings.ast.string,
[76de075]401                                        settings.arch.string,
[a2f2fda]402                                        settings.debug.string
[76de075]403                                ))
[a2f2fda]404                                if not local_tests :
405                                        print('WARNING: No tests for this configuration')
406                                        continue
[76de075]407
408                                # otherwise run all tests and make sure to return the correct error code
409                                failed = run_tests(local_tests, options.jobs)
410                                if failed:
411                                        result = 1
412                                        if not settings.continue_:
413                                                break
414
415                print('Tests took %s' % fmtDur( total_dur.duration ))
[136f86b]416                sys.exit( failed )
Note: See TracBrowser for help on using the repository browser.