[5b993e0] | 1 | #!/usr/bin/python3 |
---|
[efc15918] | 2 | |
---|
[c07d724] | 3 | from pybin.tools import * |
---|
[0ad0c55] | 4 | from pybin.test_run import * |
---|
[bacc36c] | 5 | from pybin import settings |
---|
[efc15918] | 6 | |
---|
| 7 | import argparse |
---|
[136f86b] | 8 | import itertools |
---|
[122cac7] | 9 | import re |
---|
[efc15918] | 10 | import sys |
---|
[f806b61] | 11 | import tempfile |
---|
[ca54499] | 12 | import time |
---|
[efc15918] | 13 | |
---|
[2cd949b] | 14 | import os |
---|
| 15 | import signal |
---|
| 16 | |
---|
[efc15918] | 17 | ################################################################################ |
---|
| 18 | # help functions |
---|
| 19 | ################################################################################ |
---|
[f1231f2] | 20 | |
---|
[5bf1f3e] | 21 | def find_tests(): |
---|
[0ad0c55] | 22 | expected = [] |
---|
[f1231f2] | 23 | |
---|
[5bf1f3e] | 24 | def match_test(path): |
---|
[a659b31] | 25 | match = re.search("^%s\/([\w\/\-_]*).expect\/([\w\-_\+]+)(\.[\w\-_]+)?\.txt$" % settings.SRCDIR, path) |
---|
[bacc36c] | 26 | if match : |
---|
| 27 | test = Test() |
---|
| 28 | test.name = match.group(2) |
---|
| 29 | test.path = match.group(1) |
---|
[0fc91db1] | 30 | test.arch = match.group(3)[1:] if match.group(3) else None |
---|
[a2f2fda] | 31 | |
---|
[136f86b] | 32 | expected.append(test) |
---|
[f803a75] | 33 | |
---|
[5bf1f3e] | 34 | path_walk( match_test ) |
---|
[c07d724] | 35 | |
---|
[0ad0c55] | 36 | return expected |
---|
[efc15918] | 37 | |
---|
[be65cca] | 38 | # reads the directory ./.expect and indentifies the tests |
---|
[5bf1f3e] | 39 | def list_tests( includes, excludes ): |
---|
[be65cca] | 40 | # tests directly in the .expect folder will always be processed |
---|
[5bf1f3e] | 41 | test_list = find_tests() |
---|
[be65cca] | 42 | |
---|
[0ad0c55] | 43 | # if we have a limited number of includes, filter by them |
---|
| 44 | if includes: |
---|
| 45 | test_list = [x for x in test_list if |
---|
[a85e44c] | 46 | x.target().startswith( tuple(includes) ) |
---|
[0ad0c55] | 47 | ] |
---|
[be65cca] | 48 | |
---|
[0ad0c55] | 49 | # # if we have a folders to excludes, filter by them |
---|
| 50 | if excludes: |
---|
| 51 | test_list = [x for x in test_list if not |
---|
[a85e44c] | 52 | x.target().startswith( tuple(excludes) ) |
---|
[0ad0c55] | 53 | ] |
---|
[f1231f2] | 54 | |
---|
[136f86b] | 55 | # sort the test alphabetically for convenience |
---|
| 56 | test_list.sort(key=lambda t: ('~' if t.arch else '') + t.target() + (t.arch if t.arch else '')) |
---|
| 57 | |
---|
[0ad0c55] | 58 | return test_list |
---|
[efc15918] | 59 | |
---|
[c07d724] | 60 | # from the found tests, filter all the valid tests/desired tests |
---|
[5bf1f3e] | 61 | def valid_tests( options ): |
---|
[c07d724] | 62 | tests = [] |
---|
| 63 | |
---|
| 64 | # if we are regenerating the tests we need to find the information of the |
---|
| 65 | # already existing tests and create new info for the new tests |
---|
| 66 | if options.regenerate_expected : |
---|
| 67 | for testname in options.tests : |
---|
[a2f2fda] | 68 | testname = os.path.normpath( os.path.join(settings.SRCDIR, testname) ) |
---|
| 69 | |
---|
[41af19c] | 70 | # first check if this is a valid name to regenerate |
---|
[bacc36c] | 71 | if Test.valid_name(testname): |
---|
[41af19c] | 72 | # this is a valid name, let's check if it already exists |
---|
[5bf1f3e] | 73 | found = [test for test in all_tests if canonical_path( test.target() ) == testname] |
---|
[a51b8f6] | 74 | setup = settings.all_arch if options.arch else [None] |
---|
[41af19c] | 75 | if not found: |
---|
[0fc91db1] | 76 | # it's a new name, create it according to the name and specified architecture |
---|
| 77 | tests.extend( [Test.new_target(testname, arch) for arch in setup] ) |
---|
[41af19c] | 78 | elif len(found) == 1 and not found[0].arch: |
---|
| 79 | # we found a single test, the user better be wanting to create a cross platform test |
---|
| 80 | if options.arch: |
---|
| 81 | print('ERROR: "%s", test has no specified architecture but --arch was specified, ignoring it' % testname, file=sys.stderr) |
---|
| 82 | else: |
---|
| 83 | tests.append( found[0] ) |
---|
| 84 | else: |
---|
| 85 | # this test is already cross platform, just add a test for each platform the user asked |
---|
[0fc91db1] | 86 | tests.extend( [Test.new_target(testname, arch) for arch in setup] ) |
---|
[41af19c] | 87 | |
---|
| 88 | # print a warning if it users didn't ask for a specific architecture |
---|
[ad4832f1] | 89 | found_arch = [f.arch for f in found if f.arch] |
---|
| 90 | if found_arch and not options.arch: |
---|
[41af19c] | 91 | print('WARNING: "%s", test has architecture specific expected files but --arch was not specified, regenerating only for current host' % testname, file=sys.stderr) |
---|
| 92 | |
---|
[c07d724] | 93 | else : |
---|
[bacc36c] | 94 | print('ERROR: "%s", tests are not allowed to end with a C/C++/CFA extension, ignoring it' % testname, file=sys.stderr) |
---|
[c07d724] | 95 | |
---|
| 96 | else : |
---|
| 97 | # otherwise we only need to validate that all tests are present in the complete list |
---|
| 98 | for testname in options.tests: |
---|
[5bf1f3e] | 99 | test = [t for t in all_tests if path_cmp( t.target(), testname )] |
---|
[c07d724] | 100 | |
---|
[bacc36c] | 101 | if test : |
---|
[136f86b] | 102 | tests.extend( test ) |
---|
[c07d724] | 103 | else : |
---|
| 104 | print('ERROR: No expected file for test %s, ignoring it' % testname, file=sys.stderr) |
---|
| 105 | |
---|
| 106 | return tests |
---|
| 107 | |
---|
| 108 | # parses the option |
---|
[5bf1f3e] | 109 | def parse_args(): |
---|
[c07d724] | 110 | # create a parser with the arguments for the tests script |
---|
| 111 | parser = argparse.ArgumentParser(description='Script which runs cforall tests') |
---|
[99581ee] | 112 | parser.add_argument('--arch', help='Test for specific architecture', type=comma_separated(str), default=None) |
---|
[136f86b] | 113 | parser.add_argument('--debug', help='Run all tests in debug or release', type=comma_separated(yes_no), default='yes') |
---|
| 114 | parser.add_argument('--install', help='Run all tests based on installed binaries or tree binaries', type=comma_separated(yes_no), default='no') |
---|
| 115 | parser.add_argument('--continue', help='When multiple specifications are passed (debug/install/arch), sets whether or not to continue if the last specification failed', type=yes_no, default='yes', dest='continue_') |
---|
[d144c26] | 116 | parser.add_argument('--no-invariant', help='Tell the compiler to not check invariant while running.', action='store_false') |
---|
[7831e8fb] | 117 | parser.add_argument('--timeout', help='Maximum duration in seconds after a single test is considered to have timed out', type=int, default=180) |
---|
[afe8882] | 118 | parser.add_argument('--global-timeout', help='Maximum cumulative duration in seconds after the ALL tests are considered to have timed out', type=int, default=7200) |
---|
[d658183] | 119 | parser.add_argument('--timeout-with-gdb', help='Instead of killing the command when it times out, orphan it and print process id to allow gdb to attach', type=yes_no, default="no") |
---|
[c07d724] | 120 | parser.add_argument('--dry-run', help='Don\'t run the tests, only output the commands', action='store_true') |
---|
| 121 | parser.add_argument('--list', help='List all test available', action='store_true') |
---|
| 122 | parser.add_argument('--all', help='Run all test available', action='store_true') |
---|
| 123 | parser.add_argument('--regenerate-expected', help='Regenerate the .expect by running the specified tets, can be used with --all option', action='store_true') |
---|
[dcfedca] | 124 | parser.add_argument('--archive-errors', help='If called with a valid path, on test crashes the test script will copy the core dump and the executable to the specified path.', type=str, default='') |
---|
[ef56087] | 125 | parser.add_argument('-j', '--jobs', help='Number of tests to run simultaneously, 0 (default) for unlimited', nargs='?', const=0, type=int) |
---|
[c07d724] | 126 | parser.add_argument('--list-comp', help='List all valide arguments', action='store_true') |
---|
[a468e1e9] | 127 | parser.add_argument('--list-dist', help='List all tests for distribution', action='store_true') |
---|
[0ad0c55] | 128 | parser.add_argument('-I','--include', help='Directory of test to include, can be used multiple time, All if omitted', action='append') |
---|
| 129 | parser.add_argument('-E','--exclude', help='Directory of test to exclude, can be used multiple time, None if omitted', action='append') |
---|
[c07d724] | 130 | parser.add_argument('tests', metavar='test', type=str, nargs='*', help='a list of tests to run') |
---|
| 131 | |
---|
[575a6e5] | 132 | try: |
---|
| 133 | options = parser.parse_args() |
---|
| 134 | except: |
---|
| 135 | print('ERROR: invalid arguments', file=sys.stderr) |
---|
| 136 | parser.print_help(sys.stderr) |
---|
[5b993e0] | 137 | sys.exit(1) |
---|
[c07d724] | 138 | |
---|
| 139 | # script must have at least some tests to run or be listing |
---|
[a468e1e9] | 140 | listing = options.list or options.list_comp or options.list_dist |
---|
[c07d724] | 141 | all_tests = options.all |
---|
| 142 | some_tests = len(options.tests) > 0 |
---|
[0ad0c55] | 143 | some_dirs = len(options.include) > 0 if options.include else 0 |
---|
[c07d724] | 144 | |
---|
| 145 | # check that exactly one of the booleans is set to true |
---|
[0ad0c55] | 146 | if not sum( (listing, all_tests, some_tests, some_dirs) ) > 0 : |
---|
[5b993e0] | 147 | print('''ERROR: must have option '--all', '--list', '--include', '-I' or non-empty test list''', file=sys.stderr) |
---|
[c07d724] | 148 | parser.print_help() |
---|
| 149 | sys.exit(1) |
---|
| 150 | |
---|
| 151 | return options |
---|
| 152 | |
---|
[efc15918] | 153 | ################################################################################ |
---|
| 154 | # running test functions |
---|
| 155 | ################################################################################ |
---|
[0c13238] | 156 | def success(val): |
---|
| 157 | return val == 0 or settings.dry_run |
---|
[f85bc15] | 158 | |
---|
[5bf1f3e] | 159 | def no_rule(file, target): |
---|
| 160 | return not settings.dry_run and file_contains_only(file, "make: *** No rule to make target `%s'. Stop." % target) |
---|
[f85bc15] | 161 | |
---|
[c07d724] | 162 | # logic to run a single test and return the result (No handling of printing or other test framework logic) |
---|
[209383b] | 163 | def run_single_test(test): |
---|
[3c1d702] | 164 | |
---|
[c07d724] | 165 | # find the output file based on the test name and options flag |
---|
[f85bc15] | 166 | exe_file = test.target_executable(); |
---|
[bacc36c] | 167 | out_file = test.target_output() |
---|
| 168 | err_file = test.error_log() |
---|
| 169 | cmp_file = test.expect() |
---|
| 170 | in_file = test.input() |
---|
[0ad0c55] | 171 | |
---|
| 172 | # prepare the proper directories |
---|
[bacc36c] | 173 | test.prepare() |
---|
[efc15918] | 174 | |
---|
[a5ea261] | 175 | # extra flags for cfa to pass through make. |
---|
| 176 | cfa_flags = 'CFAFLAGS=--invariant' if settings.invariant else None |
---|
| 177 | |
---|
[e6cfb4e2] | 178 | # ---------- |
---|
| 179 | # MAKE |
---|
| 180 | # ---------- |
---|
[c07d724] | 181 | # build, skipping to next test on error |
---|
[0c13238] | 182 | with Timed() as comp_dur: |
---|
[a5ea261] | 183 | make_ret, _, _ = make(test.target(), flags=cfa_flags, output_file=subprocess.DEVNULL, error=out_file, error_file=err_file) |
---|
[efc15918] | 184 | |
---|
[e6cfb4e2] | 185 | # ---------- |
---|
| 186 | # RUN |
---|
| 187 | # ---------- |
---|
[f806b61] | 188 | # run everything in a temp directory to make sure core file are handled properly |
---|
[e6cfb4e2] | 189 | run_dur = None |
---|
[f806b61] | 190 | with tempdir(): |
---|
[103c292] | 191 | # if the make command succeeds continue otherwise skip to diff |
---|
[f806b61] | 192 | if success(make_ret): |
---|
| 193 | with Timed() as run_dur: |
---|
| 194 | if settings.dry_run or is_exe(exe_file): |
---|
| 195 | # run test |
---|
[f58522b0] | 196 | retcode, _, _ = sh(exe_file, output_file=out_file, input_file=in_file, timeout=True, nice=True) |
---|
[f806b61] | 197 | else : |
---|
| 198 | # simply cat the result into the output |
---|
| 199 | retcode = cat(exe_file, out_file) |
---|
| 200 | else: |
---|
| 201 | retcode = mv(err_file, out_file) |
---|
| 202 | |
---|
| 203 | if success(retcode): |
---|
| 204 | if settings.generating : |
---|
[eb67b47] | 205 | # if we are only generating the output we still need to check that the test actually exists |
---|
[f806b61] | 206 | if no_rule(out_file, test.target()) : |
---|
| 207 | retcode = 1 |
---|
| 208 | error = "\t\tNo make target for test %s!" % test.target() |
---|
| 209 | rm(out_file) |
---|
| 210 | else: |
---|
| 211 | error = None |
---|
[0c13238] | 212 | else : |
---|
[f806b61] | 213 | # fetch return code and error from the diff command |
---|
[cc9b520] | 214 | retcode, error, _ = diff(cmp_file, out_file) |
---|
[f806b61] | 215 | |
---|
| 216 | else: |
---|
[62cc231] | 217 | if os.stat(out_file).st_size < 1048576: |
---|
[09bbe78] | 218 | with open (out_file, "r", encoding='latin-1') as myfile: # use latin-1 so all chars mean something. |
---|
[65583e2] | 219 | error = myfile.read() |
---|
| 220 | else: |
---|
| 221 | error = "Output log can't be read, file is bigger than 1MB, see {} for actual error\n".format(out_file) |
---|
[f806b61] | 222 | |
---|
[b053083] | 223 | ret, info = core_info(exe_file) |
---|
| 224 | error = error + info if error else info |
---|
[0c13238] | 225 | |
---|
[dcfedca] | 226 | if settings.archive: |
---|
| 227 | error = error + '\n' + core_archive(settings.archive, test.target(), exe_file) |
---|
| 228 | |
---|
[0c13238] | 229 | |
---|
[b5f9829] | 230 | |
---|
[c07d724] | 231 | # clean the executable |
---|
[0c13238] | 232 | rm(exe_file) |
---|
[efc15918] | 233 | |
---|
[0c13238] | 234 | return retcode, error, [comp_dur.duration, run_dur.duration if run_dur else None] |
---|
[efc15918] | 235 | |
---|
[c07d724] | 236 | # run a single test and handle the errors, outputs, printing, exception handling, etc. |
---|
[209383b] | 237 | def run_test_worker(t) : |
---|
[1bb2488] | 238 | try : |
---|
[bacc36c] | 239 | # print formated name |
---|
[767a8ef] | 240 | name_txt = t.format_target(width=settings.output_width) + ' ' |
---|
[ced2e989] | 241 | |
---|
[ca54499] | 242 | retcode, error, duration = run_single_test(t) |
---|
[0a1a680] | 243 | |
---|
[bacc36c] | 244 | # update output based on current action |
---|
[172a88d] | 245 | result_key, result_txt = TestResult.toString( retcode, duration ) |
---|
[bacc36c] | 246 | |
---|
| 247 | #print result with error if needed |
---|
[a45fc7b] | 248 | text = '\t' + name_txt + result_txt |
---|
[bacc36c] | 249 | out = sys.stdout |
---|
| 250 | if error : |
---|
[2b10f95] | 251 | text = text + '\n' + error |
---|
[bacc36c] | 252 | |
---|
[172a88d] | 253 | return retcode == TestResult.SUCCESS, result_key, text |
---|
[1bb2488] | 254 | except KeyboardInterrupt: |
---|
[172a88d] | 255 | return False, 'keybrd', "" |
---|
[a2f2fda] | 256 | # except Exception as ex: |
---|
| 257 | # print("Unexpected error in worker thread running {}: {}".format(t.target(), ex), file=sys.stderr) |
---|
| 258 | # sys.stderr.flush() |
---|
| 259 | # return False, "" |
---|
[35a408b7] | 260 | |
---|
[ced2e989] | 261 | |
---|
[911348cd] | 262 | # run the given list of tests with the given parameters |
---|
[209383b] | 263 | def run_tests(tests, jobs) : |
---|
[911348cd] | 264 | # clean the sandbox from previous commands |
---|
[d65f92c] | 265 | make('clean', output_file=subprocess.DEVNULL, error=subprocess.DEVNULL) |
---|
[efc15918] | 266 | |
---|
[21c3ea1] | 267 | # create the executor for our jobs |
---|
| 268 | pool = multiprocessing.Pool(jobs) |
---|
[c07d724] | 269 | |
---|
[e791851] | 270 | failed = False |
---|
[172a88d] | 271 | rescnts = { 'pass': 0, 'fail': 0, 'time': 0, 'keybrd': 0 } |
---|
| 272 | other = 0 |
---|
[e791851] | 273 | |
---|
[c07d724] | 274 | # for each test to run |
---|
[ced2e989] | 275 | try : |
---|
[e791851] | 276 | num = len(tests) |
---|
| 277 | fancy = sys.stdout.isatty() |
---|
[35a408b7] | 278 | results = pool.imap_unordered( |
---|
[209383b] | 279 | run_test_worker, |
---|
[bacc36c] | 280 | tests, |
---|
| 281 | chunksize = 1 |
---|
[35a408b7] | 282 | ) |
---|
| 283 | |
---|
[172a88d] | 284 | for i, (succ, code, txt) in enumerate(timed(results, timeout = settings.timeout.total), 1) : |
---|
| 285 | if code in rescnts.keys(): |
---|
| 286 | rescnts[code] += 1 |
---|
| 287 | else: |
---|
| 288 | other += 1 |
---|
| 289 | |
---|
[e791851] | 290 | if not succ : |
---|
| 291 | failed = True |
---|
| 292 | |
---|
| 293 | print(" " + txt) |
---|
| 294 | |
---|
| 295 | if(fancy and i != num): |
---|
| 296 | print("%d/%d" % (i, num), end='\r') |
---|
| 297 | sys.stdout.flush() |
---|
| 298 | |
---|
[ced2e989] | 299 | except KeyboardInterrupt: |
---|
[e791851] | 300 | print("Tests interrupted by user", file=sys.stderr) |
---|
[35a408b7] | 301 | pool.terminate() |
---|
| 302 | pool.join() |
---|
[e791851] | 303 | failed = True |
---|
| 304 | except multiprocessing.TimeoutError: |
---|
| 305 | print("ERROR: Test suite timed out", file=sys.stderr) |
---|
[35a408b7] | 306 | pool.terminate() |
---|
| 307 | pool.join() |
---|
[e791851] | 308 | failed = True |
---|
[35a408b7] | 309 | killgroup() # needed to cleanly kill all children |
---|
| 310 | |
---|
[efc15918] | 311 | |
---|
[c07d724] | 312 | # clean the workspace |
---|
[d65f92c] | 313 | make('clean', output_file=subprocess.DEVNULL, error=subprocess.DEVNULL) |
---|
[efc15918] | 314 | |
---|
[172a88d] | 315 | print("{} passes, {} failures, {} timeouts, {} cancelled, {} other".format(rescnts['pass'], rescnts['fail'], rescnts['time'], rescnts['keybrd'], other)) |
---|
| 316 | |
---|
[136f86b] | 317 | return failed |
---|
[efc15918] | 318 | |
---|
[6a1bdfd] | 319 | |
---|
[efc15918] | 320 | ################################################################################ |
---|
| 321 | # main loop |
---|
| 322 | ################################################################################ |
---|
[c07d724] | 323 | if __name__ == "__main__": |
---|
[f803a75] | 324 | |
---|
[c07d724] | 325 | # parse the command line arguments |
---|
[5bf1f3e] | 326 | options = parse_args() |
---|
[f1231f2] | 327 | |
---|
[bacc36c] | 328 | # init global settings |
---|
| 329 | settings.init( options ) |
---|
| 330 | |
---|
[a468e1e9] | 331 | # -------------------------------------------------- |
---|
| 332 | # list all the test for auto completion programs |
---|
| 333 | # not pretty, single line, with the command line options |
---|
[c07d724] | 334 | if options.list_comp : |
---|
[2980667] | 335 | # fetch the liest of all valid tests |
---|
| 336 | tests = list_tests( None, None ) |
---|
| 337 | |
---|
| 338 | # print the possible options |
---|
[e173d3c] | 339 | print("-h --help --debug --dry-run --list --arch --all --regenerate-expected --archive-errors --install --timeout --global-timeout --timeout-with-gdb -j --jobs -I --include -E --exclude --continue ", end='') |
---|
[0ad0c55] | 340 | print(" ".join(map(lambda t: "%s" % (t.target()), tests))) |
---|
[911348cd] | 341 | |
---|
[a468e1e9] | 342 | # done |
---|
| 343 | sys.exit(0) |
---|
| 344 | |
---|
| 345 | # -------------------------------------------------- |
---|
| 346 | # list all the test for auto completion programs |
---|
| 347 | if options.list_dist : |
---|
| 348 | # fetch the liest of all valid tests |
---|
| 349 | tests = list_tests( None, None ) |
---|
| 350 | |
---|
| 351 | for t in tests: |
---|
| 352 | print(os.path.relpath(t.expect(), settings.SRCDIR), end=' ') |
---|
| 353 | print(os.path.relpath(t.input() , settings.SRCDIR), end=' ') |
---|
[cc9b520] | 354 | code, out, err = make_recon(t.target()) |
---|
[a468e1e9] | 355 | |
---|
| 356 | if code != 0: |
---|
[cc9b520] | 357 | print('ERROR: recond failed for test {}: {} \'{}\''.format(t.target(), code, err), file=sys.stderr) |
---|
[a468e1e9] | 358 | sys.exit(1) |
---|
| 359 | |
---|
| 360 | print(' '.join(re.findall('([^\s]+\.cfa)', out)), end=' ') |
---|
| 361 | |
---|
| 362 | print('') |
---|
| 363 | |
---|
| 364 | # done |
---|
| 365 | sys.exit(0) |
---|
| 366 | |
---|
| 367 | |
---|
| 368 | # -------------------------------------------------- |
---|
| 369 | # list all the tests for users, in a pretty format |
---|
| 370 | if options.list : |
---|
[2980667] | 371 | # fetch the liest of all valid tests |
---|
| 372 | tests = list_tests( options.include, options.exclude ) |
---|
| 373 | |
---|
| 374 | # print the available tests |
---|
[5b993e0] | 375 | fancy_print("\n".join(map(lambda t: t.toString(), tests))) |
---|
[911348cd] | 376 | |
---|
[a468e1e9] | 377 | # done |
---|
| 378 | sys.exit(0) |
---|
| 379 | |
---|
| 380 | # fetch the liest of all valid tests |
---|
| 381 | all_tests = list_tests( options.include, options.exclude ) |
---|
| 382 | |
---|
| 383 | # if user wants all tests than no other treatement of the test list is required |
---|
| 384 | if options.all or options.include : |
---|
| 385 | tests = all_tests |
---|
| 386 | |
---|
| 387 | #otherwise we need to validate that the test list that was entered is valid |
---|
[b98c913] | 388 | else : |
---|
[a468e1e9] | 389 | tests = valid_tests( options ) |
---|
| 390 | |
---|
| 391 | # make sure we have at least some test to run |
---|
| 392 | if not tests : |
---|
| 393 | print('ERROR: No valid test to run', file=sys.stderr) |
---|
| 394 | sys.exit(1) |
---|
| 395 | |
---|
| 396 | # prep invariants |
---|
| 397 | settings.prep_output(tests) |
---|
| 398 | failed = 0 |
---|
| 399 | |
---|
| 400 | # check if the expected files aren't empty |
---|
| 401 | if not options.regenerate_expected: |
---|
| 402 | for t in tests: |
---|
| 403 | if is_empty(t.expect()): |
---|
| 404 | print('WARNING: test "{}" has empty .expect file'.format(t.target()), file=sys.stderr) |
---|
| 405 | |
---|
[fc01219] | 406 | options.jobs = job_count( options ) |
---|
[9cd44ba] | 407 | |
---|
[a468e1e9] | 408 | # for each build configurations, run the test |
---|
| 409 | with Timed() as total_dur: |
---|
[0fc91db1] | 410 | for arch, debug, install in itertools.product(settings.all_arch, settings.all_debug, settings.all_install): |
---|
[a468e1e9] | 411 | settings.arch = arch |
---|
| 412 | settings.debug = debug |
---|
| 413 | settings.install = install |
---|
| 414 | |
---|
| 415 | # filter out the tests for a different architecture |
---|
| 416 | # tests are the same across debug/install |
---|
[0fc91db1] | 417 | local_tests = settings.arch.filter( tests ) |
---|
[a468e1e9] | 418 | |
---|
| 419 | # check the build configuration works |
---|
| 420 | settings.validate() |
---|
[9cd44ba] | 421 | jobs = min(options.jobs, len(local_tests)) |
---|
[a468e1e9] | 422 | |
---|
| 423 | # print configuration |
---|
[0fc91db1] | 424 | print('%s %i tests on %i cores (%s - %s)' % ( |
---|
[a468e1e9] | 425 | 'Regenerating' if settings.generating else 'Running', |
---|
| 426 | len(local_tests), |
---|
[9cd44ba] | 427 | jobs, |
---|
[a468e1e9] | 428 | settings.arch.string, |
---|
| 429 | settings.debug.string |
---|
| 430 | )) |
---|
| 431 | if not local_tests : |
---|
| 432 | print('WARNING: No tests for this configuration') |
---|
| 433 | continue |
---|
| 434 | |
---|
| 435 | # otherwise run all tests and make sure to return the correct error code |
---|
[9cd44ba] | 436 | failed = run_tests(local_tests, jobs) |
---|
[a468e1e9] | 437 | if failed: |
---|
| 438 | if not settings.continue_: |
---|
| 439 | break |
---|
| 440 | |
---|
| 441 | print('Tests took %s' % fmtDur( total_dur.duration )) |
---|
| 442 | sys.exit( failed ) |
---|