Ignore:
File:
1 edited

Legend:

Unmodified
Added
Removed
  • tests/test.py

    r62cc231 r103c292  
    66
    77import argparse
    8 import itertools
    98import re
    109import sys
     
    3029                        test.path = match.group(1)
    3130                        test.arch = match.group(3)[1:] if match.group(3) else None
    32                         expected.append(test)
     31                        if settings.arch.match(test.arch):
     32                                expected.append(test)
    3333
    3434        path_walk( match_test )
     
    5353                ]
    5454
    55         # sort the test alphabetically for convenience
    56         test_list.sort(key=lambda t: ('~' if t.arch else '') + t.target() + (t.arch if t.arch else ''))
    57 
    5855        return test_list
    5956
     
    6764                for testname in options.tests :
    6865                        testname = canonical_path( testname )
    69                         # first check if this is a valid name to regenerate
    7066                        if Test.valid_name(testname):
    71                                 # this is a valid name, let's check if it already exists
    7267                                found = [test for test in all_tests if canonical_path( test.target() ) == testname]
    73                                 if not found:
    74                                         # it's a new name, create it according to the name and specified architecture
    75                                         if options.arch:
    76                                                 # user specified one or multiple architectures, assume the tests will have architecture specific results
    77                                                 tests.extend( [Test.new_target(testname, arch) for arch in settings.all_arch] )
    78                                         else:
    79                                                 # user didn't specify an architecture, just create a cross platform test
    80                                                 tests.append( Test.new_target( testname, None ) )
    81                                 elif len(found) == 1 and not found[0].arch:
    82                                         # we found a single test, the user better be wanting to create a cross platform test
    83                                         if options.arch:
    84                                                 print('ERROR: "%s", test has no specified architecture but --arch was specified, ignoring it' % testname, file=sys.stderr)
    85                                         else:
    86                                                 tests.append( found[0] )
    87                                 else:
    88                                         # this test is already cross platform, just add a test for each platform the user asked
    89                                         tests.extend( [Test.new_target(testname, arch) for arch in settings.all_arch] )
    90 
    91                                         # print a warning if it users didn't ask for a specific architecture
    92                                         if not options.arch:
    93                                                 print('WARNING: "%s", test has architecture specific expected files but --arch was not specified, regenerating only for current host' % testname, file=sys.stderr)
    94 
     68                                tests.append( found[0] if len(found) == 1 else Test.from_target(testname) )
    9569                        else :
    9670                                print('ERROR: "%s", tests are not allowed to end with a C/C++/CFA extension, ignoring it' % testname, file=sys.stderr)
     
    10276
    10377                        if test :
    104                                 tests.extend( test )
     78                                tests.append( test[0] )
    10579                        else :
    10680                                print('ERROR: No expected file for test %s, ignoring it' % testname, file=sys.stderr)
     
    11286        # create a parser with the arguments for the tests script
    11387        parser = argparse.ArgumentParser(description='Script which runs cforall tests')
    114         parser.add_argument('--debug', help='Run all tests in debug or release', type=comma_separated(yes_no), default='yes')
    115         parser.add_argument('--install', help='Run all tests based on installed binaries or tree binaries', type=comma_separated(yes_no), default='no')
    116         parser.add_argument('--arch', help='Test for specific architecture', type=comma_separated(str), default=None)
    117         parser.add_argument('--continue', help='When multiple specifications are passed (debug/install/arch), sets whether or not to continue if the last specification failed', type=yes_no, default='yes', dest='continue_')
     88        parser.add_argument('--debug', help='Run all tests in debug or release', type=yes_no, default='yes')
     89        parser.add_argument('--install', help='Run all tests based on installed binaries or tree binaries', type=yes_no, default='no')
     90        parser.add_argument('--arch', help='Test for specific architecture', type=str, default='')
    11891        parser.add_argument('--timeout', help='Maximum duration in seconds after a single test is considered to have timed out', type=int, default=60)
    11992        parser.add_argument('--global-timeout', help='Maximum cumulative duration in seconds after the ALL tests are considered to have timed out', type=int, default=7200)
    120         parser.add_argument('--timeout-with-gdb', help='Instead of killing the command when it times out, orphan it and print process id to allow gdb to attach', type=yes_no, default="no")
    12193        parser.add_argument('--dry-run', help='Don\'t run the tests, only output the commands', action='store_true')
    12294        parser.add_argument('--list', help='List all test available', action='store_true')
     
    206178
    207179                else:
    208                         if os.stat(out_file).st_size < 1048576:
    209                                 with open (out_file, "r") as myfile:
    210                                         error = myfile.read()
    211                         else:
    212                                 error = "Output log can't be read, file is bigger than 1MB, see {} for actual error\n".format(out_file)
     180                        with open (out_file, "r") as myfile:
     181                                error = myfile.read()
    213182
    214183                        ret, info = core_info(exe_file)
     
    246215                return False, ""
    247216        except Exception as ex:
    248                 print("Unexpected error in worker thread running {}: {}".format(t.target(), ex), file=sys.stderr)
     217                print("Unexpected error in worker thread: %s" % ex, file=sys.stderr)
    249218                sys.stderr.flush()
    250219                return False, ""
     
    309278        make('clean', output_file=subprocess.DEVNULL, error=subprocess.DEVNULL)
    310279
    311         return failed
     280        return 1 if failed else 0
    312281
    313282
     
    323292        settings.init( options )
    324293
     294        # fetch the liest of all valid tests
     295        all_tests = list_tests( options.include, options.exclude )
     296
     297
     298        # if user wants all tests than no other treatement of the test list is required
     299        if options.all or options.list or options.list_comp or options.include :
     300                tests = all_tests
     301
     302        #otherwise we need to validate that the test list that was entered is valid
     303        else :
     304                tests = valid_tests( options )
     305
     306        # make sure we have at least some test to run
     307        if not tests :
     308                print('ERROR: No valid test to run', file=sys.stderr)
     309                sys.exit(1)
     310
     311
     312        # sort the test alphabetically for convenience
     313        tests.sort(key=lambda t: (t.arch if t.arch else '') + t.target())
     314
    325315        # users may want to simply list the tests
    326316        if options.list_comp :
    327                 # fetch the liest of all valid tests
    328                 tests = list_tests( None, None )
    329 
    330                 # print the possible options
    331                 print("-h --help --debug --dry-run --list --arch --all --regenerate-expected --archive-errors --install --timeout --global-timeout --timeout-with-gdb -j --jobs -I --include -E --exclude --continue ", end='')
     317                print("-h --help --debug --dry-run --list --arch --all --regenerate-expected --archive-errors --install --timeout --global-timeout -j --jobs ", end='')
    332318                print(" ".join(map(lambda t: "%s" % (t.target()), tests)))
    333319
    334320        elif options.list :
    335                 # fetch the liest of all valid tests
    336                 tests = list_tests( options.include, options.exclude )
    337 
    338                 # print the available tests
     321                print("Listing for %s:%s"% (settings.arch.string, settings.debug.string))
    339322                fancy_print("\n".join(map(lambda t: t.toString(), tests)))
    340323
    341324        else :
    342                 # fetch the liest of all valid tests
    343                 all_tests = list_tests( options.include, options.exclude )
    344 
    345                 # if user wants all tests than no other treatement of the test list is required
    346                 if options.all or options.include :
    347                         tests = all_tests
    348 
    349                 #otherwise we need to validate that the test list that was entered is valid
    350                 else :
    351                         tests = valid_tests( options )
    352 
    353                 # make sure we have at least some test to run
    354                 if not tests :
    355                         print('ERROR: No valid test to run', file=sys.stderr)
    356                         sys.exit(1)
    357 
    358                 # prep invariants
     325                # check the build configuration works
    359326                settings.prep_output(tests)
    360                 failed = 0
    361 
    362                 # for each build configurations, run the test
    363                 for arch, debug, install in itertools.product(settings.all_arch, settings.all_debug, settings.all_install):
    364                         settings.arch    = arch
    365                         settings.debug   = debug
    366                         settings.install = install
    367 
    368                         # filter out the tests for a different architecture
    369                         # tests are the same across debug/install
    370                         local_tests = settings.arch.filter( tests )
    371                         options.jobs, forceJobs = job_count( options, local_tests )
    372                         settings.update_make_cmd(forceJobs, options.jobs)
    373 
    374                         # check the build configuration works
    375                         settings.validate()
    376 
    377                         # print configuration
    378                         print('%s %i tests on %i cores (%s:%s)' % (
    379                                 'Regenerating' if settings.generating else 'Running',
    380                                 len(local_tests),
    381                                 options.jobs,
    382                                 settings.arch.string,
    383                                 settings.debug.string
    384                         ))
    385 
    386                         # otherwise run all tests and make sure to return the correct error code
    387                         failed = run_tests(local_tests, options.jobs)
    388                         if failed:
    389                                 result = 1
    390                                 if not settings.continue_:
    391                                         break
    392 
    393 
    394                 sys.exit( failed )
     327                settings.validate()
     328
     329                options.jobs, forceJobs = job_count( options, tests )
     330                settings.update_make_cmd(forceJobs, options.jobs)
     331
     332                print('%s %i tests on %i cores (%s:%s)' % (
     333                        'Regenerating' if settings.generating else 'Running',
     334                        len(tests),
     335                        options.jobs,
     336                        settings.arch.string,
     337                        settings.debug.string
     338                ))
     339
     340                # otherwise run all tests and make sure to return the correct error code
     341                sys.exit( run_tests(tests, options.jobs) )
Note: See TracChangeset for help on using the changeset viewer.