Changeset 6a490b2 for tests/test.py


Ignore:
Timestamp:
May 11, 2020, 1:53:29 PM (6 years ago)
Author:
Thierry Delisle <tdelisle@…>
Branches:
ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
Children:
504a7dc
Parents:
b7d6a36 (diff), a7b486b (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge branch 'master' into relaxed_ready

File:
1 edited

Legend:

Unmodified
Added
Removed
  • tests/test.py

    rb7d6a36 r6a490b2  
    66
    77import argparse
     8import itertools
    89import re
    910import sys
     
    2930                        test.path = match.group(1)
    3031                        test.arch = match.group(3)[1:] if match.group(3) else None
    31                         if settings.arch.match(test.arch):
    32                                 expected.append(test)
     32                        expected.append(test)
    3333
    3434        path_walk( match_test )
     
    5353                ]
    5454
     55        # sort the test alphabetically for convenience
     56        test_list.sort(key=lambda t: ('~' if t.arch else '') + t.target() + (t.arch if t.arch else ''))
     57
    5558        return test_list
    5659
     
    6467                for testname in options.tests :
    6568                        testname = canonical_path( testname )
     69                        # first check if this is a valid name to regenerate
    6670                        if Test.valid_name(testname):
     71                                # this is a valid name, let's check if it already exists
    6772                                found = [test for test in all_tests if canonical_path( test.target() ) == testname]
    68                                 tests.append( found[0] if len(found) == 1 else Test.from_target(testname) )
     73                                if not found:
     74                                        # it's a new name, create it according to the name and specified architecture
     75                                        if options.arch:
     76                                                # user specified one or multiple architectures, assume the tests will have architecture specific results
     77                                                tests.extend( [Test.new_target(testname, arch) for arch in settings.all_arch] )
     78                                        else:
     79                                                # user didn't specify an architecture, just create a cross platform test
     80                                                tests.append( Test.new_target( testname, None ) )
     81                                elif len(found) == 1 and not found[0].arch:
     82                                        # we found a single test, the user better be wanting to create a cross platform test
     83                                        if options.arch:
     84                                                print('ERROR: "%s", test has no specified architecture but --arch was specified, ignoring it' % testname, file=sys.stderr)
     85                                        else:
     86                                                tests.append( found[0] )
     87                                else:
     88                                        # this test is already cross platform, just add a test for each platform the user asked
     89                                        tests.extend( [Test.new_target(testname, arch) for arch in settings.all_arch] )
     90
     91                                        # print a warning if it users didn't ask for a specific architecture
     92                                        if not options.arch:
     93                                                print('WARNING: "%s", test has architecture specific expected files but --arch was not specified, regenerating only for current host' % testname, file=sys.stderr)
     94
    6995                        else :
    7096                                print('ERROR: "%s", tests are not allowed to end with a C/C++/CFA extension, ignoring it' % testname, file=sys.stderr)
     
    76102
    77103                        if test :
    78                                 tests.append( test[0] )
     104                                tests.extend( test )
    79105                        else :
    80106                                print('ERROR: No expected file for test %s, ignoring it' % testname, file=sys.stderr)
     
    86112        # create a parser with the arguments for the tests script
    87113        parser = argparse.ArgumentParser(description='Script which runs cforall tests')
    88         parser.add_argument('--debug', help='Run all tests in debug or release', type=yes_no, default='yes')
    89         parser.add_argument('--install', help='Run all tests based on installed binaries or tree binaries', type=yes_no, default='no')
    90         parser.add_argument('--arch', help='Test for specific architecture', type=str, default='')
     114        parser.add_argument('--debug', help='Run all tests in debug or release', type=comma_separated(yes_no), default='yes')
     115        parser.add_argument('--install', help='Run all tests based on installed binaries or tree binaries', type=comma_separated(yes_no), default='no')
     116        parser.add_argument('--arch', help='Test for specific architecture', type=comma_separated(str), default=None)
     117        parser.add_argument('--continue', help='When multiple specifications are passed (debug/install/arch), sets whether or not to continue if the last specification failed', type=yes_no, default='yes', dest='continue_')
    91118        parser.add_argument('--timeout', help='Maximum duration in seconds after a single test is considered to have timed out', type=int, default=60)
    92119        parser.add_argument('--global-timeout', help='Maximum cumulative duration in seconds after the ALL tests are considered to have timed out', type=int, default=7200)
     120        parser.add_argument('--timeout-with-gdb', help='Instead of killing the command when it times out, orphan it and print process id to allow gdb to attach', type=yes_no, default="no")
    93121        parser.add_argument('--dry-run', help='Don\'t run the tests, only output the commands', action='store_true')
    94122        parser.add_argument('--list', help='List all test available', action='store_true')
     
    178206
    179207                else:
    180                         with open (out_file, "r") as myfile:
    181                                 error = myfile.read()
     208                        if os.stat(out_file).st_size < 1048576:
     209                                with open (out_file, "r") as myfile:
     210                                        error = myfile.read()
     211                        else:
     212                                error = "Output log can't be read, file is bigger than 1MB, see {} for actual error\n".format(out_file)
    182213
    183214                        ret, info = core_info(exe_file)
     
    215246                return False, ""
    216247        except Exception as ex:
    217                 print("Unexpected error in worker thread: %s" % ex, file=sys.stderr)
     248                print("Unexpected error in worker thread running {}: {}".format(t.target(), ex), file=sys.stderr)
    218249                sys.stderr.flush()
    219250                return False, ""
     
    278309        make('clean', output_file=subprocess.DEVNULL, error=subprocess.DEVNULL)
    279310
    280         return 1 if failed else 0
     311        return failed
    281312
    282313
     
    292323        settings.init( options )
    293324
    294         # fetch the liest of all valid tests
    295         all_tests = list_tests( options.include, options.exclude )
    296 
    297 
    298         # if user wants all tests than no other treatement of the test list is required
    299         if options.all or options.list or options.list_comp or options.include :
    300                 tests = all_tests
    301 
    302         #otherwise we need to validate that the test list that was entered is valid
    303         else :
    304                 tests = valid_tests( options )
    305 
    306         # make sure we have at least some test to run
    307         if not tests :
    308                 print('ERROR: No valid test to run', file=sys.stderr)
    309                 sys.exit(1)
    310 
    311 
    312         # sort the test alphabetically for convenience
    313         tests.sort(key=lambda t: (t.arch if t.arch else '') + t.target())
    314 
    315325        # users may want to simply list the tests
    316326        if options.list_comp :
    317                 print("-h --help --debug --dry-run --list --arch --all --regenerate-expected --archive-errors --install --timeout --global-timeout -j --jobs ", end='')
     327                # fetch the liest of all valid tests
     328                tests = list_tests( None, None )
     329
     330                # print the possible options
     331                print("-h --help --debug --dry-run --list --arch --all --regenerate-expected --archive-errors --install --timeout --global-timeout --timeout-with-gdb -j --jobs -I --include -E --exclude --continue ", end='')
    318332                print(" ".join(map(lambda t: "%s" % (t.target()), tests)))
    319333
    320334        elif options.list :
    321                 print("Listing for %s:%s"% (settings.arch.string, settings.debug.string))
     335                # fetch the liest of all valid tests
     336                tests = list_tests( options.include, options.exclude )
     337
     338                # print the available tests
    322339                fancy_print("\n".join(map(lambda t: t.toString(), tests)))
    323340
    324341        else :
    325                 # check the build configuration works
     342                # fetch the liest of all valid tests
     343                all_tests = list_tests( options.include, options.exclude )
     344
     345                # if user wants all tests than no other treatement of the test list is required
     346                if options.all or options.include :
     347                        tests = all_tests
     348
     349                #otherwise we need to validate that the test list that was entered is valid
     350                else :
     351                        tests = valid_tests( options )
     352
     353                # make sure we have at least some test to run
     354                if not tests :
     355                        print('ERROR: No valid test to run', file=sys.stderr)
     356                        sys.exit(1)
     357
     358                # prep invariants
    326359                settings.prep_output(tests)
    327                 settings.validate()
    328 
    329                 options.jobs, forceJobs = job_count( options, tests )
    330                 settings.update_make_cmd(forceJobs, options.jobs)
    331 
    332                 print('%s %i tests on %i cores (%s:%s)' % (
    333                         'Regenerating' if settings.generating else 'Running',
    334                         len(tests),
    335                         options.jobs,
    336                         settings.arch.string,
    337                         settings.debug.string
    338                 ))
    339 
    340                 # otherwise run all tests and make sure to return the correct error code
    341                 sys.exit( run_tests(tests, options.jobs) )
     360                failed = 0
     361
     362                # for each build configurations, run the test
     363                for arch, debug, install in itertools.product(settings.all_arch, settings.all_debug, settings.all_install):
     364                        settings.arch    = arch
     365                        settings.debug   = debug
     366                        settings.install = install
     367
     368                        # filter out the tests for a different architecture
     369                        # tests are the same across debug/install
     370                        local_tests = settings.arch.filter( tests )
     371                        options.jobs, forceJobs = job_count( options, local_tests )
     372                        settings.update_make_cmd(forceJobs, options.jobs)
     373
     374                        # check the build configuration works
     375                        settings.validate()
     376
     377                        # print configuration
     378                        print('%s %i tests on %i cores (%s:%s)' % (
     379                                'Regenerating' if settings.generating else 'Running',
     380                                len(local_tests),
     381                                options.jobs,
     382                                settings.arch.string,
     383                                settings.debug.string
     384                        ))
     385
     386                        # otherwise run all tests and make sure to return the correct error code
     387                        failed = run_tests(local_tests, options.jobs)
     388                        if failed:
     389                                result = 1
     390                                if not settings.continue_:
     391                                        break
     392
     393
     394                sys.exit( failed )
Note: See TracChangeset for help on using the changeset viewer.