source: tests/test.py @ 0e398ad

ADTast-experimental
Last change on this file since 0e398ad was a51b8f6, checked in by Thierry Delisle <tdelisle@…>, 2 years ago

Fix bug with removal of old ast.

  • Property mode set to 100755
File size: 15.2 KB
RevLine 
[5b993e0]1#!/usr/bin/python3
[efc15918]2
[c07d724]3from pybin.tools import *
[0ad0c55]4from pybin.test_run import *
[bacc36c]5from pybin import settings
[efc15918]6
7import argparse
[136f86b]8import itertools
[122cac7]9import re
[efc15918]10import sys
[f806b61]11import tempfile
[ca54499]12import time
[efc15918]13
[2cd949b]14import os
15import signal
16
[efc15918]17################################################################################
18#               help functions
19################################################################################
[f1231f2]20
[5bf1f3e]21def find_tests():
[0ad0c55]22        expected = []
[f1231f2]23
[5bf1f3e]24        def match_test(path):
[a659b31]25                match = re.search("^%s\/([\w\/\-_]*).expect\/([\w\-_\+]+)(\.[\w\-_]+)?\.txt$" % settings.SRCDIR, path)
[bacc36c]26                if match :
27                        test = Test()
28                        test.name = match.group(2)
29                        test.path = match.group(1)
[0fc91db1]30                        test.arch = match.group(3)[1:] if match.group(3) else None
[a2f2fda]31
[136f86b]32                        expected.append(test)
[f803a75]33
[5bf1f3e]34        path_walk( match_test )
[c07d724]35
[0ad0c55]36        return expected
[efc15918]37
[be65cca]38# reads the directory ./.expect and indentifies the tests
[5bf1f3e]39def list_tests( includes, excludes ):
[be65cca]40        # tests directly in the .expect folder will always be processed
[5bf1f3e]41        test_list = find_tests()
[be65cca]42
[0ad0c55]43        # if we have a limited number of includes, filter by them
44        if includes:
45                test_list = [x for x in test_list if
[a85e44c]46                        x.target().startswith( tuple(includes) )
[0ad0c55]47                ]
[be65cca]48
[0ad0c55]49        # # if we have a folders to excludes, filter by them
50        if excludes:
51                test_list = [x for x in test_list if not
[a85e44c]52                        x.target().startswith( tuple(excludes) )
[0ad0c55]53                ]
[f1231f2]54
[136f86b]55        # sort the test alphabetically for convenience
56        test_list.sort(key=lambda t: ('~' if t.arch else '') + t.target() + (t.arch if t.arch else ''))
57
[0ad0c55]58        return test_list
[efc15918]59
[c07d724]60# from the found tests, filter all the valid tests/desired tests
[5bf1f3e]61def valid_tests( options ):
[c07d724]62        tests = []
63
64        # if we are regenerating the tests we need to find the information of the
65        # already existing tests and create new info for the new tests
66        if options.regenerate_expected :
67                for testname in options.tests :
[a2f2fda]68                        testname = os.path.normpath( os.path.join(settings.SRCDIR, testname) )
69
[41af19c]70                        # first check if this is a valid name to regenerate
[bacc36c]71                        if Test.valid_name(testname):
[41af19c]72                                # this is a valid name, let's check if it already exists
[5bf1f3e]73                                found = [test for test in all_tests if canonical_path( test.target() ) == testname]
[a51b8f6]74                                setup = settings.all_arch if options.arch else [None]
[41af19c]75                                if not found:
[0fc91db1]76                                        # it's a new name, create it according to the name and specified architecture
77                                        tests.extend( [Test.new_target(testname, arch) for arch in setup] )
[41af19c]78                                elif len(found) == 1 and not found[0].arch:
79                                        # we found a single test, the user better be wanting to create a cross platform test
80                                        if options.arch:
81                                                print('ERROR: "%s", test has no specified architecture but --arch was specified, ignoring it' % testname, file=sys.stderr)
82                                        else:
83                                                tests.append( found[0] )
84                                else:
85                                        # this test is already cross platform, just add a test for each platform the user asked
[0fc91db1]86                                        tests.extend( [Test.new_target(testname, arch) for arch in setup] )
[41af19c]87
88                                        # print a warning if it users didn't ask for a specific architecture
[ad4832f1]89                                        found_arch = [f.arch for f in found if f.arch]
90                                        if found_arch and not options.arch:
[41af19c]91                                                print('WARNING: "%s", test has architecture specific expected files but --arch was not specified, regenerating only for current host' % testname, file=sys.stderr)
92
[c07d724]93                        else :
[bacc36c]94                                print('ERROR: "%s", tests are not allowed to end with a C/C++/CFA extension, ignoring it' % testname, file=sys.stderr)
[c07d724]95
96        else :
97                # otherwise we only need to validate that all tests are present in the complete list
98                for testname in options.tests:
[5bf1f3e]99                        test = [t for t in all_tests if path_cmp( t.target(), testname )]
[c07d724]100
[bacc36c]101                        if test :
[136f86b]102                                tests.extend( test )
[c07d724]103                        else :
104                                print('ERROR: No expected file for test %s, ignoring it' % testname, file=sys.stderr)
105
106        return tests
107
108# parses the option
[5bf1f3e]109def parse_args():
[c07d724]110        # create a parser with the arguments for the tests script
111        parser = argparse.ArgumentParser(description='Script which runs cforall tests')
[99581ee]112        parser.add_argument('--arch', help='Test for specific architecture', type=comma_separated(str), default=None)
[136f86b]113        parser.add_argument('--debug', help='Run all tests in debug or release', type=comma_separated(yes_no), default='yes')
114        parser.add_argument('--install', help='Run all tests based on installed binaries or tree binaries', type=comma_separated(yes_no), default='no')
115        parser.add_argument('--continue', help='When multiple specifications are passed (debug/install/arch), sets whether or not to continue if the last specification failed', type=yes_no, default='yes', dest='continue_')
[7831e8fb]116        parser.add_argument('--timeout', help='Maximum duration in seconds after a single test is considered to have timed out', type=int, default=180)
[afe8882]117        parser.add_argument('--global-timeout', help='Maximum cumulative duration in seconds after the ALL tests are considered to have timed out', type=int, default=7200)
[d658183]118        parser.add_argument('--timeout-with-gdb', help='Instead of killing the command when it times out, orphan it and print process id to allow gdb to attach', type=yes_no, default="no")
[c07d724]119        parser.add_argument('--dry-run', help='Don\'t run the tests, only output the commands', action='store_true')
120        parser.add_argument('--list', help='List all test available', action='store_true')
121        parser.add_argument('--all', help='Run all test available', action='store_true')
122        parser.add_argument('--regenerate-expected', help='Regenerate the .expect by running the specified tets, can be used with --all option', action='store_true')
[dcfedca]123        parser.add_argument('--archive-errors', help='If called with a valid path, on test crashes the test script will copy the core dump and the executable to the specified path.', type=str, default='')
[ef56087]124        parser.add_argument('-j', '--jobs', help='Number of tests to run simultaneously, 0 (default) for unlimited', nargs='?', const=0, type=int)
[c07d724]125        parser.add_argument('--list-comp', help='List all valide arguments', action='store_true')
[a468e1e9]126        parser.add_argument('--list-dist', help='List all tests for distribution', action='store_true')
[0ad0c55]127        parser.add_argument('-I','--include', help='Directory of test to include, can be used multiple time, All  if omitted', action='append')
128        parser.add_argument('-E','--exclude', help='Directory of test to exclude, can be used multiple time, None if omitted', action='append')
[c07d724]129        parser.add_argument('tests', metavar='test', type=str, nargs='*', help='a list of tests to run')
130
[575a6e5]131        try:
132                options =  parser.parse_args()
133        except:
134                print('ERROR: invalid arguments', file=sys.stderr)
135                parser.print_help(sys.stderr)
[5b993e0]136                sys.exit(1)
[c07d724]137
138        # script must have at least some tests to run or be listing
[a468e1e9]139        listing    = options.list or options.list_comp or options.list_dist
[c07d724]140        all_tests  = options.all
141        some_tests = len(options.tests) > 0
[0ad0c55]142        some_dirs  = len(options.include) > 0 if options.include else 0
[c07d724]143
144        # check that exactly one of the booleans is set to true
[0ad0c55]145        if not sum( (listing, all_tests, some_tests, some_dirs) ) > 0 :
[5b993e0]146                print('''ERROR: must have option '--all', '--list', '--include', '-I' or non-empty test list''', file=sys.stderr)
[c07d724]147                parser.print_help()
148                sys.exit(1)
149
150        return options
151
[efc15918]152################################################################################
153#               running test functions
154################################################################################
[0c13238]155def success(val):
156        return val == 0 or settings.dry_run
[f85bc15]157
[5bf1f3e]158def no_rule(file, target):
159        return not settings.dry_run and file_contains_only(file, "make: *** No rule to make target `%s'.  Stop." % target)
[f85bc15]160
[c07d724]161# logic to run a single test and return the result (No handling of printing or other test framework logic)
[209383b]162def run_single_test(test):
[3c1d702]163
[c07d724]164        # find the output file based on the test name and options flag
[f85bc15]165        exe_file = test.target_executable();
[bacc36c]166        out_file = test.target_output()
167        err_file = test.error_log()
168        cmp_file = test.expect()
169        in_file  = test.input()
[0ad0c55]170
171        # prepare the proper directories
[bacc36c]172        test.prepare()
[efc15918]173
[e6cfb4e2]174        # ----------
175        # MAKE
176        # ----------
[c07d724]177        # build, skipping to next test on error
[0c13238]178        with Timed() as comp_dur:
[cc9b520]179                make_ret, _, _ = make( test.target(), output_file=subprocess.DEVNULL, error=out_file, error_file = err_file )
[efc15918]180
[e6cfb4e2]181        # ----------
182        # RUN
183        # ----------
[f806b61]184        # run everything in a temp directory to make sure core file are handled properly
[e6cfb4e2]185        run_dur = None
[f806b61]186        with tempdir():
[103c292]187                # if the make command succeeds continue otherwise skip to diff
[f806b61]188                if success(make_ret):
189                        with Timed() as run_dur:
190                                if settings.dry_run or is_exe(exe_file):
191                                        # run test
[f58522b0]192                                        retcode, _, _ = sh(exe_file, output_file=out_file, input_file=in_file, timeout=True, nice=True)
[f806b61]193                                else :
194                                        # simply cat the result into the output
195                                        retcode = cat(exe_file, out_file)
196                else:
197                        retcode = mv(err_file, out_file)
198
199                if success(retcode):
200                        if settings.generating :
[eb67b47]201                                # if we are only generating the output we still need to check that the test actually exists
[f806b61]202                                if no_rule(out_file, test.target()) :
203                                        retcode = 1
204                                        error = "\t\tNo make target for test %s!" % test.target()
205                                        rm(out_file)
206                                else:
207                                        error = None
[0c13238]208                        else :
[f806b61]209                                # fetch return code and error from the diff command
[cc9b520]210                                retcode, error, _ = diff(cmp_file, out_file)
[f806b61]211
212                else:
[62cc231]213                        if os.stat(out_file).st_size < 1048576:
[09bbe78]214                                with open (out_file, "r", encoding='latin-1') as myfile:  # use latin-1 so all chars mean something.
[65583e2]215                                        error = myfile.read()
216                        else:
217                                error = "Output log can't be read, file is bigger than 1MB, see {} for actual error\n".format(out_file)
[f806b61]218
[b053083]219                        ret, info = core_info(exe_file)
220                        error = error + info if error else info
[0c13238]221
[dcfedca]222                        if settings.archive:
223                                error = error + '\n' + core_archive(settings.archive, test.target(), exe_file)
224
[0c13238]225
[b5f9829]226
[c07d724]227        # clean the executable
[0c13238]228        rm(exe_file)
[efc15918]229
[0c13238]230        return retcode, error, [comp_dur.duration, run_dur.duration if run_dur else None]
[efc15918]231
[c07d724]232# run a single test and handle the errors, outputs, printing, exception handling, etc.
[209383b]233def run_test_worker(t) :
[1bb2488]234        try :
[bacc36c]235                # print formated name
[767a8ef]236                name_txt = t.format_target(width=settings.output_width) + '  '
[ced2e989]237
[ca54499]238                retcode, error, duration = run_single_test(t)
[0a1a680]239
[bacc36c]240                # update output based on current action
[172a88d]241                result_key, result_txt = TestResult.toString( retcode, duration )
[bacc36c]242
243                #print result with error if needed
[a45fc7b]244                text = '\t' + name_txt + result_txt
[bacc36c]245                out = sys.stdout
246                if error :
[2b10f95]247                        text = text + '\n' + error
[bacc36c]248
[172a88d]249                return retcode == TestResult.SUCCESS, result_key, text
[1bb2488]250        except KeyboardInterrupt:
[172a88d]251                return False, 'keybrd', ""
[a2f2fda]252        # except Exception as ex:
253        #       print("Unexpected error in worker thread running {}: {}".format(t.target(), ex), file=sys.stderr)
254        #       sys.stderr.flush()
255        #       return False, ""
[35a408b7]256
[ced2e989]257
[911348cd]258# run the given list of tests with the given parameters
[209383b]259def run_tests(tests, jobs) :
[911348cd]260        # clean the sandbox from previous commands
[d65f92c]261        make('clean', output_file=subprocess.DEVNULL, error=subprocess.DEVNULL)
[efc15918]262
[21c3ea1]263        # create the executor for our jobs
264        pool = multiprocessing.Pool(jobs)
[c07d724]265
[e791851]266        failed = False
[172a88d]267        rescnts = {     'pass': 0, 'fail': 0, 'time': 0, 'keybrd': 0 }
268        other = 0
[e791851]269
[c07d724]270        # for each test to run
[ced2e989]271        try :
[e791851]272                num = len(tests)
273                fancy = sys.stdout.isatty()
[35a408b7]274                results = pool.imap_unordered(
[209383b]275                        run_test_worker,
[bacc36c]276                        tests,
277                        chunksize = 1
[35a408b7]278                )
279
[172a88d]280                for i, (succ, code, txt) in enumerate(timed(results, timeout = settings.timeout.total), 1) :
281                        if code in rescnts.keys():
282                                rescnts[code] += 1
283                        else:
284                                other += 1
285
[e791851]286                        if not succ :
287                                failed = True
288
289                        print("       " + txt)
290
291                        if(fancy and i != num):
292                                print("%d/%d" % (i, num), end='\r')
293                                sys.stdout.flush()
294
[ced2e989]295        except KeyboardInterrupt:
[e791851]296                print("Tests interrupted by user", file=sys.stderr)
[35a408b7]297                pool.terminate()
298                pool.join()
[e791851]299                failed = True
300        except multiprocessing.TimeoutError:
301                print("ERROR: Test suite timed out", file=sys.stderr)
[35a408b7]302                pool.terminate()
303                pool.join()
[e791851]304                failed = True
[35a408b7]305                killgroup() # needed to cleanly kill all children
306
[efc15918]307
[c07d724]308        # clean the workspace
[d65f92c]309        make('clean', output_file=subprocess.DEVNULL, error=subprocess.DEVNULL)
[efc15918]310
[172a88d]311        print("{} passes, {} failures, {} timeouts, {} cancelled, {} other".format(rescnts['pass'], rescnts['fail'], rescnts['time'], rescnts['keybrd'], other))
312
[136f86b]313        return failed
[efc15918]314
[6a1bdfd]315
[efc15918]316################################################################################
317#               main loop
318################################################################################
[c07d724]319if __name__ == "__main__":
[f803a75]320
[c07d724]321        # parse the command line arguments
[5bf1f3e]322        options = parse_args()
[f1231f2]323
[bacc36c]324        # init global settings
325        settings.init( options )
326
[a468e1e9]327        # --------------------------------------------------
328        # list all the test for auto completion programs
329        # not pretty, single line, with the command line options
[c07d724]330        if options.list_comp :
[2980667]331                # fetch the liest of all valid tests
332                tests = list_tests( None, None )
333
334                # print the possible options
[e173d3c]335                print("-h --help --debug --dry-run --list --arch --all --regenerate-expected --archive-errors --install --timeout --global-timeout --timeout-with-gdb -j --jobs -I --include -E --exclude --continue ", end='')
[0ad0c55]336                print(" ".join(map(lambda t: "%s" % (t.target()), tests)))
[911348cd]337
[a468e1e9]338                # done
339                sys.exit(0)
340
341        # --------------------------------------------------
342        # list all the test for auto completion programs
343        if options.list_dist :
344                # fetch the liest of all valid tests
345                tests = list_tests( None, None )
346
347                for t in tests:
348                        print(os.path.relpath(t.expect(), settings.SRCDIR), end=' ')
349                        print(os.path.relpath(t.input() , settings.SRCDIR), end=' ')
[cc9b520]350                        code, out, err = make_recon(t.target())
[a468e1e9]351
352                        if code != 0:
[cc9b520]353                                print('ERROR: recond failed for test {}: {} \'{}\''.format(t.target(), code, err), file=sys.stderr)
[a468e1e9]354                                sys.exit(1)
355
356                        print(' '.join(re.findall('([^\s]+\.cfa)', out)), end=' ')
357
358                print('')
359
360                # done
361                sys.exit(0)
362
363
364        # --------------------------------------------------
365        # list all the tests for users, in a pretty format
366        if options.list :
[2980667]367                # fetch the liest of all valid tests
368                tests = list_tests( options.include, options.exclude )
369
370                # print the available tests
[5b993e0]371                fancy_print("\n".join(map(lambda t: t.toString(), tests)))
[911348cd]372
[a468e1e9]373                # done
374                sys.exit(0)
375
376        # fetch the liest of all valid tests
377        all_tests = list_tests( options.include, options.exclude )
378
379        # if user wants all tests than no other treatement of the test list is required
380        if options.all or options.include :
381                tests = all_tests
382
383        #otherwise we need to validate that the test list that was entered is valid
[b98c913]384        else :
[a468e1e9]385                tests = valid_tests( options )
386
387        # make sure we have at least some test to run
388        if not tests :
389                print('ERROR: No valid test to run', file=sys.stderr)
390                sys.exit(1)
391
392        # prep invariants
393        settings.prep_output(tests)
394        failed = 0
395
396        # check if the expected files aren't empty
397        if not options.regenerate_expected:
398                for t in tests:
399                        if is_empty(t.expect()):
400                                print('WARNING: test "{}" has empty .expect file'.format(t.target()), file=sys.stderr)
401
[fc01219]402        options.jobs = job_count( options )
[9cd44ba]403
[a468e1e9]404        # for each build configurations, run the test
405        with Timed() as total_dur:
[0fc91db1]406                for arch, debug, install in itertools.product(settings.all_arch, settings.all_debug, settings.all_install):
[a468e1e9]407                        settings.arch    = arch
408                        settings.debug   = debug
409                        settings.install = install
410
411                        # filter out the tests for a different architecture
412                        # tests are the same across debug/install
[0fc91db1]413                        local_tests = settings.arch.filter( tests )
[a468e1e9]414
415                        # check the build configuration works
416                        settings.validate()
[9cd44ba]417                        jobs = min(options.jobs, len(local_tests))
[a468e1e9]418
419                        # print configuration
[0fc91db1]420                        print('%s %i tests on %i cores (%s - %s)' % (
[a468e1e9]421                                'Regenerating' if settings.generating else 'Running',
422                                len(local_tests),
[9cd44ba]423                                jobs,
[a468e1e9]424                                settings.arch.string,
425                                settings.debug.string
426                        ))
427                        if not local_tests :
428                                print('WARNING: No tests for this configuration')
429                                continue
430
431                        # otherwise run all tests and make sure to return the correct error code
[9cd44ba]432                        failed = run_tests(local_tests, jobs)
[a468e1e9]433                        if failed:
434                                if not settings.continue_:
435                                        break
436
437        print('Tests took %s' % fmtDur( total_dur.duration ))
438        sys.exit( failed )
Note: See TracBrowser for help on using the repository browser.