source: tests/test.py @ 474d610

arm-ehjacob/cs343-translationnew-ast-unique-expr
Last change on this file since 474d610 was a468e1e9, checked in by Thierry Delisle <tdelisle@…>, 10 months ago

Test script now supports --list-dist which list all the files needed for tests.
Added dist-hook using --list-dist.

  • Property mode set to 100755
File size: 16.0 KB
RevLine 
[5b993e0]1#!/usr/bin/python3
[efc15918]2
[c07d724]3from pybin.tools import *
[0ad0c55]4from pybin.test_run import *
[bacc36c]5from pybin import settings
[efc15918]6
7import argparse
[136f86b]8import itertools
[122cac7]9import re
[efc15918]10import sys
[f806b61]11import tempfile
[ca54499]12import time
[efc15918]13
[2cd949b]14import os
15import psutil
16import signal
17
[efc15918]18################################################################################
19#               help functions
20################################################################################
[f1231f2]21
[5bf1f3e]22def find_tests():
[0ad0c55]23        expected = []
[f1231f2]24
[5bf1f3e]25        def match_test(path):
[a2f2fda]26                match = re.search("^%s\/([\w\/\-_]*).expect\/([\w\-_]+)(\.nast|\.oast)?(\.[\w\-_]+)?\.txt$" % settings.SRCDIR, path)
[bacc36c]27                if match :
28                        test = Test()
29                        test.name = match.group(2)
30                        test.path = match.group(1)
[a2f2fda]31                        test.arch = match.group(4)[1:] if match.group(4) else None
32
33                        astv = match.group(3)[1:] if match.group(3) else None
34                        if astv == 'oast':
35                                test.astv = 'old'
36                        elif astv == 'nast':
37                                test.astv = 'new'
38                        elif astv:
39                                print('ERROR: "%s", expect file has astv but it is not "nast" or "oast"' % testname, file=sys.stderr)
40                                sys.exit(1)
41
[136f86b]42                        expected.append(test)
[f803a75]43
[5bf1f3e]44        path_walk( match_test )
[c07d724]45
[0ad0c55]46        return expected
[efc15918]47
[be65cca]48# reads the directory ./.expect and indentifies the tests
[5bf1f3e]49def list_tests( includes, excludes ):
[be65cca]50        # tests directly in the .expect folder will always be processed
[5bf1f3e]51        test_list = find_tests()
[be65cca]52
[0ad0c55]53        # if we have a limited number of includes, filter by them
54        if includes:
55                test_list = [x for x in test_list if
[a85e44c]56                        x.target().startswith( tuple(includes) )
[0ad0c55]57                ]
[be65cca]58
[0ad0c55]59        # # if we have a folders to excludes, filter by them
60        if excludes:
61                test_list = [x for x in test_list if not
[a85e44c]62                        x.target().startswith( tuple(excludes) )
[0ad0c55]63                ]
[f1231f2]64
[136f86b]65        # sort the test alphabetically for convenience
66        test_list.sort(key=lambda t: ('~' if t.arch else '') + t.target() + (t.arch if t.arch else ''))
67
[0ad0c55]68        return test_list
[efc15918]69
[c07d724]70# from the found tests, filter all the valid tests/desired tests
[5bf1f3e]71def valid_tests( options ):
[c07d724]72        tests = []
73
74        # if we are regenerating the tests we need to find the information of the
75        # already existing tests and create new info for the new tests
76        if options.regenerate_expected :
77                for testname in options.tests :
[a2f2fda]78                        testname = os.path.normpath( os.path.join(settings.SRCDIR, testname) )
79
[41af19c]80                        # first check if this is a valid name to regenerate
[bacc36c]81                        if Test.valid_name(testname):
[41af19c]82                                # this is a valid name, let's check if it already exists
[5bf1f3e]83                                found = [test for test in all_tests if canonical_path( test.target() ) == testname]
[a2f2fda]84                                setup = itertools.product(settings.all_arch if options.arch else [None], settings.all_ast if options.ast else [None])
[41af19c]85                                if not found:
[a2f2fda]86                                        # it's a new name, create it according to the name and specified architecture/ast version
87                                        tests.extend( [Test.new_target(testname, arch, ast) for arch, ast in setup] )
[41af19c]88                                elif len(found) == 1 and not found[0].arch:
89                                        # we found a single test, the user better be wanting to create a cross platform test
90                                        if options.arch:
91                                                print('ERROR: "%s", test has no specified architecture but --arch was specified, ignoring it' % testname, file=sys.stderr)
[a2f2fda]92                                        elif options.ast:
93                                                print('ERROR: "%s", test has no specified ast version but --ast was specified, ignoring it' % testname, file=sys.stderr)
[41af19c]94                                        else:
95                                                tests.append( found[0] )
96                                else:
97                                        # this test is already cross platform, just add a test for each platform the user asked
[a2f2fda]98                                        tests.extend( [Test.new_target(testname, arch, ast) for arch, ast in setup] )
[41af19c]99
100                                        # print a warning if it users didn't ask for a specific architecture
[ad4832f1]101                                        found_arch = [f.arch for f in found if f.arch]
102                                        if found_arch and not options.arch:
[41af19c]103                                                print('WARNING: "%s", test has architecture specific expected files but --arch was not specified, regenerating only for current host' % testname, file=sys.stderr)
104
[a2f2fda]105
106                                        # print a warning if it users didn't ask for a specific ast version
[ad4832f1]107                                        found_astv = [f.astv for f in found if f.astv]
108                                        if found_astv and not options.ast:
[a2f2fda]109                                                print('WARNING: "%s", test has ast version specific expected files but --ast was not specified, regenerating only for current ast' % testname, file=sys.stderr)
110
[c07d724]111                        else :
[bacc36c]112                                print('ERROR: "%s", tests are not allowed to end with a C/C++/CFA extension, ignoring it' % testname, file=sys.stderr)
[c07d724]113
114        else :
115                # otherwise we only need to validate that all tests are present in the complete list
116                for testname in options.tests:
[5bf1f3e]117                        test = [t for t in all_tests if path_cmp( t.target(), testname )]
[c07d724]118
[bacc36c]119                        if test :
[136f86b]120                                tests.extend( test )
[c07d724]121                        else :
122                                print('ERROR: No expected file for test %s, ignoring it' % testname, file=sys.stderr)
123
124        return tests
125
126# parses the option
[5bf1f3e]127def parse_args():
[c07d724]128        # create a parser with the arguments for the tests script
129        parser = argparse.ArgumentParser(description='Script which runs cforall tests')
[99581ee]130        parser.add_argument('--ast', help='Test for specific ast', type=comma_separated(str), default=None)
131        parser.add_argument('--arch', help='Test for specific architecture', type=comma_separated(str), default=None)
[136f86b]132        parser.add_argument('--debug', help='Run all tests in debug or release', type=comma_separated(yes_no), default='yes')
133        parser.add_argument('--install', help='Run all tests based on installed binaries or tree binaries', type=comma_separated(yes_no), default='no')
134        parser.add_argument('--continue', help='When multiple specifications are passed (debug/install/arch), sets whether or not to continue if the last specification failed', type=yes_no, default='yes', dest='continue_')
[ebb7b66]135        parser.add_argument('--timeout', help='Maximum duration in seconds after a single test is considered to have timed out', type=int, default=120)
[afe8882]136        parser.add_argument('--global-timeout', help='Maximum cumulative duration in seconds after the ALL tests are considered to have timed out', type=int, default=7200)
[d658183]137        parser.add_argument('--timeout-with-gdb', help='Instead of killing the command when it times out, orphan it and print process id to allow gdb to attach', type=yes_no, default="no")
[c07d724]138        parser.add_argument('--dry-run', help='Don\'t run the tests, only output the commands', action='store_true')
139        parser.add_argument('--list', help='List all test available', action='store_true')
140        parser.add_argument('--all', help='Run all test available', action='store_true')
141        parser.add_argument('--regenerate-expected', help='Regenerate the .expect by running the specified tets, can be used with --all option', action='store_true')
[dcfedca]142        parser.add_argument('--archive-errors', help='If called with a valid path, on test crashes the test script will copy the core dump and the executable to the specified path.', type=str, default='')
[d142ec5]143        parser.add_argument('-j', '--jobs', help='Number of tests to run simultaneously', type=int)
[c07d724]144        parser.add_argument('--list-comp', help='List all valide arguments', action='store_true')
[a468e1e9]145        parser.add_argument('--list-dist', help='List all tests for distribution', action='store_true')
[0ad0c55]146        parser.add_argument('-I','--include', help='Directory of test to include, can be used multiple time, All  if omitted', action='append')
147        parser.add_argument('-E','--exclude', help='Directory of test to exclude, can be used multiple time, None if omitted', action='append')
[c07d724]148        parser.add_argument('tests', metavar='test', type=str, nargs='*', help='a list of tests to run')
149
[575a6e5]150        try:
151                options =  parser.parse_args()
152        except:
153                print('ERROR: invalid arguments', file=sys.stderr)
154                parser.print_help(sys.stderr)
[5b993e0]155                sys.exit(1)
[c07d724]156
157        # script must have at least some tests to run or be listing
[a468e1e9]158        listing    = options.list or options.list_comp or options.list_dist
[c07d724]159        all_tests  = options.all
160        some_tests = len(options.tests) > 0
[0ad0c55]161        some_dirs  = len(options.include) > 0 if options.include else 0
[c07d724]162
163        # check that exactly one of the booleans is set to true
[0ad0c55]164        if not sum( (listing, all_tests, some_tests, some_dirs) ) > 0 :
[5b993e0]165                print('''ERROR: must have option '--all', '--list', '--include', '-I' or non-empty test list''', file=sys.stderr)
[c07d724]166                parser.print_help()
167                sys.exit(1)
168
169        return options
170
[efc15918]171################################################################################
172#               running test functions
173################################################################################
[0c13238]174def success(val):
175        return val == 0 or settings.dry_run
[f85bc15]176
[5bf1f3e]177def no_rule(file, target):
178        return not settings.dry_run and file_contains_only(file, "make: *** No rule to make target `%s'.  Stop." % target)
[f85bc15]179
[c07d724]180# logic to run a single test and return the result (No handling of printing or other test framework logic)
[209383b]181def run_single_test(test):
[3c1d702]182
[c07d724]183        # find the output file based on the test name and options flag
[f85bc15]184        exe_file = test.target_executable();
[bacc36c]185        out_file = test.target_output()
186        err_file = test.error_log()
187        cmp_file = test.expect()
188        in_file  = test.input()
[0ad0c55]189
190        # prepare the proper directories
[bacc36c]191        test.prepare()
[efc15918]192
[e6cfb4e2]193        # ----------
194        # MAKE
195        # ----------
[c07d724]196        # build, skipping to next test on error
[0c13238]197        with Timed() as comp_dur:
[d65f92c]198                make_ret, _ = make( test.target(), output_file=subprocess.DEVNULL, error=out_file, error_file = err_file )
[efc15918]199
[e6cfb4e2]200        # ----------
201        # RUN
202        # ----------
[f806b61]203        # run everything in a temp directory to make sure core file are handled properly
[e6cfb4e2]204        run_dur = None
[f806b61]205        with tempdir():
[103c292]206                # if the make command succeeds continue otherwise skip to diff
[f806b61]207                if success(make_ret):
208                        with Timed() as run_dur:
209                                if settings.dry_run or is_exe(exe_file):
210                                        # run test
[d65f92c]211                                        retcode, _ = sh(exe_file, output_file=out_file, input_file=in_file, timeout=True)
[f806b61]212                                else :
213                                        # simply cat the result into the output
214                                        retcode = cat(exe_file, out_file)
215                else:
216                        retcode = mv(err_file, out_file)
217
218                if success(retcode):
219                        if settings.generating :
[eb67b47]220                                # if we are only generating the output we still need to check that the test actually exists
[f806b61]221                                if no_rule(out_file, test.target()) :
222                                        retcode = 1
223                                        error = "\t\tNo make target for test %s!" % test.target()
224                                        rm(out_file)
225                                else:
226                                        error = None
[0c13238]227                        else :
[f806b61]228                                # fetch return code and error from the diff command
229                                retcode, error = diff(cmp_file, out_file)
230
231                else:
[62cc231]232                        if os.stat(out_file).st_size < 1048576:
[09bbe78]233                                with open (out_file, "r", encoding='latin-1') as myfile:  # use latin-1 so all chars mean something.
[65583e2]234                                        error = myfile.read()
235                        else:
236                                error = "Output log can't be read, file is bigger than 1MB, see {} for actual error\n".format(out_file)
[f806b61]237
238                        ret, info = core_info(exe_file)
239                        error = error + info if error else info
[0c13238]240
[dcfedca]241                        if settings.archive:
242                                error = error + '\n' + core_archive(settings.archive, test.target(), exe_file)
243
[0c13238]244
[b5f9829]245
[c07d724]246        # clean the executable
[0c13238]247        rm(exe_file)
[efc15918]248
[0c13238]249        return retcode, error, [comp_dur.duration, run_dur.duration if run_dur else None]
[efc15918]250
[c07d724]251# run a single test and handle the errors, outputs, printing, exception handling, etc.
[209383b]252def run_test_worker(t) :
[1bb2488]253        try :
[bacc36c]254                # print formated name
[5bf1f3e]255                name_txt = '{0:{width}}  '.format(t.target(), width=settings.output_width)
[ced2e98]256
[ca54499]257                retcode, error, duration = run_single_test(t)
[0a1a680]258
[bacc36c]259                # update output based on current action
[ca54499]260                result_txt = TestResult.toString( retcode, duration )
[bacc36c]261
262                #print result with error if needed
[a45fc7b]263                text = '\t' + name_txt + result_txt
[bacc36c]264                out = sys.stdout
265                if error :
[2b10f95]266                        text = text + '\n' + error
[bacc36c]267
[e791851]268                return retcode == TestResult.SUCCESS, text
[1bb2488]269        except KeyboardInterrupt:
[35a408b7]270                return False, ""
[a2f2fda]271        # except Exception as ex:
272        #       print("Unexpected error in worker thread running {}: {}".format(t.target(), ex), file=sys.stderr)
273        #       sys.stderr.flush()
274        #       return False, ""
[35a408b7]275
[ced2e98]276
[911348cd]277# run the given list of tests with the given parameters
[209383b]278def run_tests(tests, jobs) :
[911348cd]279        # clean the sandbox from previous commands
[d65f92c]280        make('clean', output_file=subprocess.DEVNULL, error=subprocess.DEVNULL)
[efc15918]281
[21c3ea1]282        # create the executor for our jobs
283        pool = multiprocessing.Pool(jobs)
[c07d724]284
[e791851]285        failed = False
286
[c07d724]287        # for each test to run
[ced2e98]288        try :
[e791851]289                num = len(tests)
290                fancy = sys.stdout.isatty()
[35a408b7]291                results = pool.imap_unordered(
[209383b]292                        run_test_worker,
[bacc36c]293                        tests,
294                        chunksize = 1
[35a408b7]295                )
296
297                for i, (succ, txt) in enumerate(timed(results, timeout = settings.timeout.total), 1) :
[e791851]298                        if not succ :
299                                failed = True
300
301                        print("       " + txt)
302
303                        if(fancy and i != num):
304                                print("%d/%d" % (i, num), end='\r')
305                                sys.stdout.flush()
306
[ced2e98]307        except KeyboardInterrupt:
[e791851]308                print("Tests interrupted by user", file=sys.stderr)
[35a408b7]309                pool.terminate()
310                pool.join()
[e791851]311                failed = True
312        except multiprocessing.TimeoutError:
313                print("ERROR: Test suite timed out", file=sys.stderr)
[35a408b7]314                pool.terminate()
315                pool.join()
[e791851]316                failed = True
[35a408b7]317                killgroup() # needed to cleanly kill all children
318
[efc15918]319
[c07d724]320        # clean the workspace
[d65f92c]321        make('clean', output_file=subprocess.DEVNULL, error=subprocess.DEVNULL)
[efc15918]322
[136f86b]323        return failed
[efc15918]324
[6a1bdfd]325
[efc15918]326################################################################################
327#               main loop
328################################################################################
[c07d724]329if __name__ == "__main__":
[f803a75]330
[c07d724]331        # parse the command line arguments
[5bf1f3e]332        options = parse_args()
[f1231f2]333
[bacc36c]334        # init global settings
335        settings.init( options )
336
[a468e1e9]337        # --------------------------------------------------
338        # list all the test for auto completion programs
339        # not pretty, single line, with the command line options
[c07d724]340        if options.list_comp :
[2980667]341                # fetch the liest of all valid tests
342                tests = list_tests( None, None )
343
344                # print the possible options
[a468e1e9]345                print("-h --help --debug --dry-run --list --ast=new --ast=old --arch --all --regenerate-expected --archive-errors --install --timeout --global-timeout --timeout-with-gdb -j --jobs -I --include -E --exclude --continue ", end='')
[0ad0c55]346                print(" ".join(map(lambda t: "%s" % (t.target()), tests)))
[911348cd]347
[a468e1e9]348                # done
349                sys.exit(0)
350
351        # --------------------------------------------------
352        # list all the test for auto completion programs
353        if options.list_dist :
354                # fetch the liest of all valid tests
355                tests = list_tests( None, None )
356
357                for t in tests:
358                        print(os.path.relpath(t.expect(), settings.SRCDIR), end=' ')
359                        print(os.path.relpath(t.input() , settings.SRCDIR), end=' ')
360                        code, out = make_recon(t.target())
361
362                        if code != 0:
363                                print('ERROR: recond failed for test {}'.format(t.target()), file=sys.stderr)
364                                sys.exit(1)
365
366                        print(' '.join(re.findall('([^\s]+\.cfa)', out)), end=' ')
367
368                print('')
369
370                # done
371                sys.exit(0)
372
373
374        # --------------------------------------------------
375        # list all the tests for users, in a pretty format
376        if options.list :
[2980667]377                # fetch the liest of all valid tests
378                tests = list_tests( options.include, options.exclude )
379
380                # print the available tests
[5b993e0]381                fancy_print("\n".join(map(lambda t: t.toString(), tests)))
[911348cd]382
[a468e1e9]383                # done
384                sys.exit(0)
385
386        # fetch the liest of all valid tests
387        all_tests = list_tests( options.include, options.exclude )
388
389        # if user wants all tests than no other treatement of the test list is required
390        if options.all or options.include :
391                tests = all_tests
392
393        #otherwise we need to validate that the test list that was entered is valid
[b98c913]394        else :
[a468e1e9]395                tests = valid_tests( options )
396
397        # make sure we have at least some test to run
398        if not tests :
399                print('ERROR: No valid test to run', file=sys.stderr)
400                sys.exit(1)
401
402        # prep invariants
403        settings.prep_output(tests)
404        failed = 0
405
406        # check if the expected files aren't empty
407        if not options.regenerate_expected:
408                for t in tests:
409                        if is_empty(t.expect()):
410                                print('WARNING: test "{}" has empty .expect file'.format(t.target()), file=sys.stderr)
411
412        # for each build configurations, run the test
413        with Timed() as total_dur:
414                for ast, arch, debug, install in itertools.product(settings.all_ast, settings.all_arch, settings.all_debug, settings.all_install):
415                        settings.ast     = ast
416                        settings.arch    = arch
417                        settings.debug   = debug
418                        settings.install = install
419
420                        # filter out the tests for a different architecture
421                        # tests are the same across debug/install
422                        local_tests = settings.ast.filter( tests )
423                        local_tests = settings.arch.filter( local_tests )
424                        options.jobs, forceJobs = job_count( options, local_tests )
425                        settings.update_make_cmd(forceJobs, options.jobs)
426
427                        # check the build configuration works
428                        settings.validate()
429
430                        # print configuration
431                        print('%s %i tests on %i cores (%s:%s - %s)' % (
432                                'Regenerating' if settings.generating else 'Running',
433                                len(local_tests),
434                                options.jobs,
435                                settings.ast.string,
436                                settings.arch.string,
437                                settings.debug.string
438                        ))
439                        if not local_tests :
440                                print('WARNING: No tests for this configuration')
441                                continue
442
443                        # otherwise run all tests and make sure to return the correct error code
444                        failed = run_tests(local_tests, options.jobs)
445                        if failed:
446                                result = 1
447                                if not settings.continue_:
448                                        break
449
450        print('Tests took %s' % fmtDur( total_dur.duration ))
451        sys.exit( failed )
Note: See TracBrowser for help on using the repository browser.