| [5b993e0] | 1 | #!/usr/bin/python3 | 
|---|
| [efc15918] | 2 |  | 
|---|
| [c07d724] | 3 | from pybin.tools import * | 
|---|
| [0ad0c55] | 4 | from pybin.test_run import * | 
|---|
| [bacc36c] | 5 | from pybin import settings | 
|---|
| [efc15918] | 6 |  | 
|---|
|  | 7 | import argparse | 
|---|
| [136f86b] | 8 | import itertools | 
|---|
| [122cac7] | 9 | import re | 
|---|
| [efc15918] | 10 | import sys | 
|---|
| [f806b61] | 11 | import tempfile | 
|---|
| [ca54499] | 12 | import time | 
|---|
| [efc15918] | 13 |  | 
|---|
| [2cd949b] | 14 | import os | 
|---|
|  | 15 | import psutil | 
|---|
|  | 16 | import signal | 
|---|
|  | 17 |  | 
|---|
| [efc15918] | 18 | ################################################################################ | 
|---|
|  | 19 | #               help functions | 
|---|
|  | 20 | ################################################################################ | 
|---|
| [f1231f2] | 21 |  | 
|---|
| [5bf1f3e] | 22 | def find_tests(): | 
|---|
| [0ad0c55] | 23 | expected = [] | 
|---|
| [f1231f2] | 24 |  | 
|---|
| [5bf1f3e] | 25 | def match_test(path): | 
|---|
| [6044200] | 26 | match = re.search("^%s\/([\w\/\-_]*).expect\/([\w\-_]+)(\.[\w\-_]+)?\.txt$" % settings.SRCDIR, path) | 
|---|
| [bacc36c] | 27 | if match : | 
|---|
|  | 28 | test = Test() | 
|---|
|  | 29 | test.name = match.group(2) | 
|---|
|  | 30 | test.path = match.group(1) | 
|---|
|  | 31 | test.arch = match.group(3)[1:] if match.group(3) else None | 
|---|
| [136f86b] | 32 | expected.append(test) | 
|---|
| [f803a75] | 33 |  | 
|---|
| [5bf1f3e] | 34 | path_walk( match_test ) | 
|---|
| [c07d724] | 35 |  | 
|---|
| [0ad0c55] | 36 | return expected | 
|---|
| [efc15918] | 37 |  | 
|---|
| [be65cca] | 38 | # reads the directory ./.expect and indentifies the tests | 
|---|
| [5bf1f3e] | 39 | def list_tests( includes, excludes ): | 
|---|
| [be65cca] | 40 | # tests directly in the .expect folder will always be processed | 
|---|
| [5bf1f3e] | 41 | test_list = find_tests() | 
|---|
| [be65cca] | 42 |  | 
|---|
| [0ad0c55] | 43 | # if we have a limited number of includes, filter by them | 
|---|
|  | 44 | if includes: | 
|---|
|  | 45 | test_list = [x for x in test_list if | 
|---|
| [a85e44c] | 46 | x.target().startswith( tuple(includes) ) | 
|---|
| [0ad0c55] | 47 | ] | 
|---|
| [be65cca] | 48 |  | 
|---|
| [0ad0c55] | 49 | # # if we have a folders to excludes, filter by them | 
|---|
|  | 50 | if excludes: | 
|---|
|  | 51 | test_list = [x for x in test_list if not | 
|---|
| [a85e44c] | 52 | x.target().startswith( tuple(excludes) ) | 
|---|
| [0ad0c55] | 53 | ] | 
|---|
| [f1231f2] | 54 |  | 
|---|
| [136f86b] | 55 | # sort the test alphabetically for convenience | 
|---|
|  | 56 | test_list.sort(key=lambda t: ('~' if t.arch else '') + t.target() + (t.arch if t.arch else '')) | 
|---|
|  | 57 |  | 
|---|
| [0ad0c55] | 58 | return test_list | 
|---|
| [efc15918] | 59 |  | 
|---|
| [c07d724] | 60 | # from the found tests, filter all the valid tests/desired tests | 
|---|
| [5bf1f3e] | 61 | def valid_tests( options ): | 
|---|
| [c07d724] | 62 | tests = [] | 
|---|
|  | 63 |  | 
|---|
|  | 64 | # if we are regenerating the tests we need to find the information of the | 
|---|
|  | 65 | # already existing tests and create new info for the new tests | 
|---|
|  | 66 | if options.regenerate_expected : | 
|---|
|  | 67 | for testname in options.tests : | 
|---|
| [5bf1f3e] | 68 | testname = canonical_path( testname ) | 
|---|
| [41af19c] | 69 | # first check if this is a valid name to regenerate | 
|---|
| [bacc36c] | 70 | if Test.valid_name(testname): | 
|---|
| [41af19c] | 71 | # this is a valid name, let's check if it already exists | 
|---|
| [5bf1f3e] | 72 | found = [test for test in all_tests if canonical_path( test.target() ) == testname] | 
|---|
| [41af19c] | 73 | if not found: | 
|---|
|  | 74 | # it's a new name, create it according to the name and specified architecture | 
|---|
|  | 75 | if options.arch: | 
|---|
|  | 76 | # user specified one or multiple architectures, assume the tests will have architecture specific results | 
|---|
|  | 77 | tests.extend( [Test.new_target(testname, arch) for arch in settings.all_arch] ) | 
|---|
|  | 78 | else: | 
|---|
|  | 79 | # user didn't specify an architecture, just create a cross platform test | 
|---|
|  | 80 | tests.append( Test.new_target( testname, None ) ) | 
|---|
|  | 81 | elif len(found) == 1 and not found[0].arch: | 
|---|
|  | 82 | # we found a single test, the user better be wanting to create a cross platform test | 
|---|
|  | 83 | if options.arch: | 
|---|
|  | 84 | print('ERROR: "%s", test has no specified architecture but --arch was specified, ignoring it' % testname, file=sys.stderr) | 
|---|
|  | 85 | else: | 
|---|
|  | 86 | tests.append( found[0] ) | 
|---|
|  | 87 | else: | 
|---|
|  | 88 | # this test is already cross platform, just add a test for each platform the user asked | 
|---|
|  | 89 | tests.extend( [Test.new_target(testname, arch) for arch in settings.all_arch] ) | 
|---|
|  | 90 |  | 
|---|
|  | 91 | # print a warning if it users didn't ask for a specific architecture | 
|---|
|  | 92 | if not options.arch: | 
|---|
|  | 93 | print('WARNING: "%s", test has architecture specific expected files but --arch was not specified, regenerating only for current host' % testname, file=sys.stderr) | 
|---|
|  | 94 |  | 
|---|
| [c07d724] | 95 | else : | 
|---|
| [bacc36c] | 96 | print('ERROR: "%s", tests are not allowed to end with a C/C++/CFA extension, ignoring it' % testname, file=sys.stderr) | 
|---|
| [c07d724] | 97 |  | 
|---|
|  | 98 | else : | 
|---|
|  | 99 | # otherwise we only need to validate that all tests are present in the complete list | 
|---|
|  | 100 | for testname in options.tests: | 
|---|
| [5bf1f3e] | 101 | test = [t for t in all_tests if path_cmp( t.target(), testname )] | 
|---|
| [c07d724] | 102 |  | 
|---|
| [bacc36c] | 103 | if test : | 
|---|
| [136f86b] | 104 | tests.extend( test ) | 
|---|
| [c07d724] | 105 | else : | 
|---|
|  | 106 | print('ERROR: No expected file for test %s, ignoring it' % testname, file=sys.stderr) | 
|---|
|  | 107 |  | 
|---|
|  | 108 | return tests | 
|---|
|  | 109 |  | 
|---|
|  | 110 | # parses the option | 
|---|
| [5bf1f3e] | 111 | def parse_args(): | 
|---|
| [c07d724] | 112 | # create a parser with the arguments for the tests script | 
|---|
|  | 113 | parser = argparse.ArgumentParser(description='Script which runs cforall tests') | 
|---|
| [136f86b] | 114 | parser.add_argument('--debug', help='Run all tests in debug or release', type=comma_separated(yes_no), default='yes') | 
|---|
|  | 115 | parser.add_argument('--install', help='Run all tests based on installed binaries or tree binaries', type=comma_separated(yes_no), default='no') | 
|---|
| [41af19c] | 116 | parser.add_argument('--arch', help='Test for specific architecture', type=comma_separated(str), default=None) | 
|---|
| [136f86b] | 117 | parser.add_argument('--continue', help='When multiple specifications are passed (debug/install/arch), sets whether or not to continue if the last specification failed', type=yes_no, default='yes', dest='continue_') | 
|---|
| [ebb7b66] | 118 | parser.add_argument('--timeout', help='Maximum duration in seconds after a single test is considered to have timed out', type=int, default=120) | 
|---|
| [afe8882] | 119 | parser.add_argument('--global-timeout', help='Maximum cumulative duration in seconds after the ALL tests are considered to have timed out', type=int, default=7200) | 
|---|
| [d658183] | 120 | parser.add_argument('--timeout-with-gdb', help='Instead of killing the command when it times out, orphan it and print process id to allow gdb to attach', type=yes_no, default="no") | 
|---|
| [c07d724] | 121 | parser.add_argument('--dry-run', help='Don\'t run the tests, only output the commands', action='store_true') | 
|---|
|  | 122 | parser.add_argument('--list', help='List all test available', action='store_true') | 
|---|
|  | 123 | parser.add_argument('--all', help='Run all test available', action='store_true') | 
|---|
|  | 124 | parser.add_argument('--regenerate-expected', help='Regenerate the .expect by running the specified tets, can be used with --all option', action='store_true') | 
|---|
| [dcfedca] | 125 | parser.add_argument('--archive-errors', help='If called with a valid path, on test crashes the test script will copy the core dump and the executable to the specified path.', type=str, default='') | 
|---|
| [d142ec5] | 126 | parser.add_argument('-j', '--jobs', help='Number of tests to run simultaneously', type=int) | 
|---|
| [c07d724] | 127 | parser.add_argument('--list-comp', help='List all valide arguments', action='store_true') | 
|---|
| [0ad0c55] | 128 | parser.add_argument('-I','--include', help='Directory of test to include, can be used multiple time, All  if omitted', action='append') | 
|---|
|  | 129 | parser.add_argument('-E','--exclude', help='Directory of test to exclude, can be used multiple time, None if omitted', action='append') | 
|---|
| [c07d724] | 130 | parser.add_argument('tests', metavar='test', type=str, nargs='*', help='a list of tests to run') | 
|---|
|  | 131 |  | 
|---|
| [575a6e5] | 132 | try: | 
|---|
|  | 133 | options =  parser.parse_args() | 
|---|
|  | 134 | except: | 
|---|
|  | 135 | print('ERROR: invalid arguments', file=sys.stderr) | 
|---|
|  | 136 | parser.print_help(sys.stderr) | 
|---|
| [5b993e0] | 137 | sys.exit(1) | 
|---|
| [c07d724] | 138 |  | 
|---|
|  | 139 | # script must have at least some tests to run or be listing | 
|---|
|  | 140 | listing    = options.list or options.list_comp | 
|---|
|  | 141 | all_tests  = options.all | 
|---|
|  | 142 | some_tests = len(options.tests) > 0 | 
|---|
| [0ad0c55] | 143 | some_dirs  = len(options.include) > 0 if options.include else 0 | 
|---|
| [c07d724] | 144 |  | 
|---|
|  | 145 | # check that exactly one of the booleans is set to true | 
|---|
| [0ad0c55] | 146 | if not sum( (listing, all_tests, some_tests, some_dirs) ) > 0 : | 
|---|
| [5b993e0] | 147 | print('''ERROR: must have option '--all', '--list', '--include', '-I' or non-empty test list''', file=sys.stderr) | 
|---|
| [c07d724] | 148 | parser.print_help() | 
|---|
|  | 149 | sys.exit(1) | 
|---|
|  | 150 |  | 
|---|
|  | 151 | return options | 
|---|
|  | 152 |  | 
|---|
| [efc15918] | 153 | ################################################################################ | 
|---|
|  | 154 | #               running test functions | 
|---|
|  | 155 | ################################################################################ | 
|---|
| [0c13238] | 156 | def success(val): | 
|---|
|  | 157 | return val == 0 or settings.dry_run | 
|---|
| [f85bc15] | 158 |  | 
|---|
| [5bf1f3e] | 159 | def no_rule(file, target): | 
|---|
|  | 160 | return not settings.dry_run and file_contains_only(file, "make: *** No rule to make target `%s'.  Stop." % target) | 
|---|
| [f85bc15] | 161 |  | 
|---|
| [c07d724] | 162 | # logic to run a single test and return the result (No handling of printing or other test framework logic) | 
|---|
| [209383b] | 163 | def run_single_test(test): | 
|---|
| [3c1d702] | 164 |  | 
|---|
| [c07d724] | 165 | # find the output file based on the test name and options flag | 
|---|
| [f85bc15] | 166 | exe_file = test.target_executable(); | 
|---|
| [bacc36c] | 167 | out_file = test.target_output() | 
|---|
|  | 168 | err_file = test.error_log() | 
|---|
|  | 169 | cmp_file = test.expect() | 
|---|
|  | 170 | in_file  = test.input() | 
|---|
| [0ad0c55] | 171 |  | 
|---|
|  | 172 | # prepare the proper directories | 
|---|
| [bacc36c] | 173 | test.prepare() | 
|---|
| [efc15918] | 174 |  | 
|---|
| [c07d724] | 175 | # build, skipping to next test on error | 
|---|
| [0c13238] | 176 | with Timed() as comp_dur: | 
|---|
| [d65f92c] | 177 | make_ret, _ = make( test.target(), output_file=subprocess.DEVNULL, error=out_file, error_file = err_file ) | 
|---|
| [efc15918] | 178 |  | 
|---|
| [0c13238] | 179 | run_dur = None | 
|---|
| [f806b61] | 180 | # run everything in a temp directory to make sure core file are handled properly | 
|---|
|  | 181 | with tempdir(): | 
|---|
| [103c292] | 182 | # if the make command succeeds continue otherwise skip to diff | 
|---|
| [f806b61] | 183 | if success(make_ret): | 
|---|
|  | 184 | with Timed() as run_dur: | 
|---|
|  | 185 | if settings.dry_run or is_exe(exe_file): | 
|---|
|  | 186 | # run test | 
|---|
| [d65f92c] | 187 | retcode, _ = sh(exe_file, output_file=out_file, input_file=in_file, timeout=True) | 
|---|
| [f806b61] | 188 | else : | 
|---|
|  | 189 | # simply cat the result into the output | 
|---|
|  | 190 | retcode = cat(exe_file, out_file) | 
|---|
|  | 191 | else: | 
|---|
|  | 192 | retcode = mv(err_file, out_file) | 
|---|
|  | 193 |  | 
|---|
|  | 194 | if success(retcode): | 
|---|
|  | 195 | if settings.generating : | 
|---|
| [eb67b47] | 196 | # if we are only generating the output we still need to check that the test actually exists | 
|---|
| [f806b61] | 197 | if no_rule(out_file, test.target()) : | 
|---|
|  | 198 | retcode = 1 | 
|---|
|  | 199 | error = "\t\tNo make target for test %s!" % test.target() | 
|---|
|  | 200 | rm(out_file) | 
|---|
|  | 201 | else: | 
|---|
|  | 202 | error = None | 
|---|
| [0c13238] | 203 | else : | 
|---|
| [f806b61] | 204 | # fetch return code and error from the diff command | 
|---|
|  | 205 | retcode, error = diff(cmp_file, out_file) | 
|---|
|  | 206 |  | 
|---|
|  | 207 | else: | 
|---|
| [62cc231] | 208 | if os.stat(out_file).st_size < 1048576: | 
|---|
| [65583e2] | 209 | with open (out_file, "r") as myfile: | 
|---|
|  | 210 | error = myfile.read() | 
|---|
|  | 211 | else: | 
|---|
|  | 212 | error = "Output log can't be read, file is bigger than 1MB, see {} for actual error\n".format(out_file) | 
|---|
| [f806b61] | 213 |  | 
|---|
|  | 214 | ret, info = core_info(exe_file) | 
|---|
|  | 215 | error = error + info if error else info | 
|---|
| [0c13238] | 216 |  | 
|---|
| [dcfedca] | 217 | if settings.archive: | 
|---|
|  | 218 | error = error + '\n' + core_archive(settings.archive, test.target(), exe_file) | 
|---|
|  | 219 |  | 
|---|
| [0c13238] | 220 |  | 
|---|
| [b5f9829] | 221 |  | 
|---|
| [c07d724] | 222 | # clean the executable | 
|---|
| [0c13238] | 223 | rm(exe_file) | 
|---|
| [efc15918] | 224 |  | 
|---|
| [0c13238] | 225 | return retcode, error, [comp_dur.duration, run_dur.duration if run_dur else None] | 
|---|
| [efc15918] | 226 |  | 
|---|
| [c07d724] | 227 | # run a single test and handle the errors, outputs, printing, exception handling, etc. | 
|---|
| [209383b] | 228 | def run_test_worker(t) : | 
|---|
| [1bb2488] | 229 | try : | 
|---|
| [bacc36c] | 230 | # print formated name | 
|---|
| [5bf1f3e] | 231 | name_txt = '{0:{width}}  '.format(t.target(), width=settings.output_width) | 
|---|
| [ced2e989] | 232 |  | 
|---|
| [ca54499] | 233 | retcode, error, duration = run_single_test(t) | 
|---|
| [0a1a680] | 234 |  | 
|---|
| [bacc36c] | 235 | # update output based on current action | 
|---|
| [ca54499] | 236 | result_txt = TestResult.toString( retcode, duration ) | 
|---|
| [bacc36c] | 237 |  | 
|---|
|  | 238 | #print result with error if needed | 
|---|
| [a45fc7b] | 239 | text = '\t' + name_txt + result_txt | 
|---|
| [bacc36c] | 240 | out = sys.stdout | 
|---|
|  | 241 | if error : | 
|---|
| [2b10f95] | 242 | text = text + '\n' + error | 
|---|
| [bacc36c] | 243 |  | 
|---|
| [e791851] | 244 | return retcode == TestResult.SUCCESS, text | 
|---|
| [1bb2488] | 245 | except KeyboardInterrupt: | 
|---|
| [35a408b7] | 246 | return False, "" | 
|---|
| [8364209] | 247 | except Exception as ex: | 
|---|
| [5d10e8a] | 248 | print("Unexpected error in worker thread running {}: {}".format(t.target(), ex), file=sys.stderr) | 
|---|
| [35a408b7] | 249 | sys.stderr.flush() | 
|---|
|  | 250 | return False, "" | 
|---|
|  | 251 |  | 
|---|
| [ced2e989] | 252 |  | 
|---|
| [911348cd] | 253 | # run the given list of tests with the given parameters | 
|---|
| [209383b] | 254 | def run_tests(tests, jobs) : | 
|---|
| [911348cd] | 255 | # clean the sandbox from previous commands | 
|---|
| [d65f92c] | 256 | make('clean', output_file=subprocess.DEVNULL, error=subprocess.DEVNULL) | 
|---|
| [efc15918] | 257 |  | 
|---|
| [2cd949b] | 258 | # since python prints stacks by default on a interrupt, redo the interrupt handling to be silent | 
|---|
|  | 259 | def worker_init(): | 
|---|
|  | 260 | def sig_int(signal_num, frame): | 
|---|
|  | 261 | pass | 
|---|
|  | 262 |  | 
|---|
|  | 263 | signal.signal(signal.SIGINT, sig_int) | 
|---|
|  | 264 |  | 
|---|
| [c07d724] | 265 | # create the executor for our jobs and handle the signal properly | 
|---|
| [2cd949b] | 266 | pool = multiprocessing.Pool(jobs, worker_init) | 
|---|
| [c07d724] | 267 |  | 
|---|
| [e791851] | 268 | failed = False | 
|---|
|  | 269 |  | 
|---|
| [2cd949b] | 270 | def stop(x, y): | 
|---|
|  | 271 | print("Tests interrupted by user", file=sys.stderr) | 
|---|
|  | 272 | sys.exit(1) | 
|---|
|  | 273 | signal.signal(signal.SIGINT, stop) | 
|---|
|  | 274 |  | 
|---|
| [c07d724] | 275 | # for each test to run | 
|---|
| [ced2e989] | 276 | try : | 
|---|
| [e791851] | 277 | num = len(tests) | 
|---|
|  | 278 | fancy = sys.stdout.isatty() | 
|---|
| [35a408b7] | 279 | results = pool.imap_unordered( | 
|---|
| [209383b] | 280 | run_test_worker, | 
|---|
| [bacc36c] | 281 | tests, | 
|---|
|  | 282 | chunksize = 1 | 
|---|
| [35a408b7] | 283 | ) | 
|---|
|  | 284 |  | 
|---|
|  | 285 | for i, (succ, txt) in enumerate(timed(results, timeout = settings.timeout.total), 1) : | 
|---|
| [e791851] | 286 | if not succ : | 
|---|
|  | 287 | failed = True | 
|---|
|  | 288 |  | 
|---|
|  | 289 | print("       " + txt) | 
|---|
|  | 290 |  | 
|---|
|  | 291 | if(fancy and i != num): | 
|---|
|  | 292 | print("%d/%d" % (i, num), end='\r') | 
|---|
|  | 293 | sys.stdout.flush() | 
|---|
|  | 294 |  | 
|---|
| [ced2e989] | 295 | except KeyboardInterrupt: | 
|---|
| [e791851] | 296 | print("Tests interrupted by user", file=sys.stderr) | 
|---|
| [35a408b7] | 297 | pool.terminate() | 
|---|
|  | 298 | pool.join() | 
|---|
| [e791851] | 299 | failed = True | 
|---|
|  | 300 | except multiprocessing.TimeoutError: | 
|---|
|  | 301 | print("ERROR: Test suite timed out", file=sys.stderr) | 
|---|
| [35a408b7] | 302 | pool.terminate() | 
|---|
|  | 303 | pool.join() | 
|---|
| [e791851] | 304 | failed = True | 
|---|
| [35a408b7] | 305 | killgroup() # needed to cleanly kill all children | 
|---|
|  | 306 |  | 
|---|
| [efc15918] | 307 |  | 
|---|
| [c07d724] | 308 | # clean the workspace | 
|---|
| [d65f92c] | 309 | make('clean', output_file=subprocess.DEVNULL, error=subprocess.DEVNULL) | 
|---|
| [efc15918] | 310 |  | 
|---|
| [136f86b] | 311 | return failed | 
|---|
| [efc15918] | 312 |  | 
|---|
| [6a1bdfd] | 313 |  | 
|---|
| [efc15918] | 314 | ################################################################################ | 
|---|
|  | 315 | #               main loop | 
|---|
|  | 316 | ################################################################################ | 
|---|
| [c07d724] | 317 | if __name__ == "__main__": | 
|---|
| [f803a75] | 318 |  | 
|---|
| [c07d724] | 319 | # parse the command line arguments | 
|---|
| [5bf1f3e] | 320 | options = parse_args() | 
|---|
| [f1231f2] | 321 |  | 
|---|
| [bacc36c] | 322 | # init global settings | 
|---|
|  | 323 | settings.init( options ) | 
|---|
|  | 324 |  | 
|---|
| [c07d724] | 325 | # users may want to simply list the tests | 
|---|
|  | 326 | if options.list_comp : | 
|---|
| [29806675] | 327 | # fetch the liest of all valid tests | 
|---|
|  | 328 | tests = list_tests( None, None ) | 
|---|
|  | 329 |  | 
|---|
|  | 330 | # print the possible options | 
|---|
| [0f3d844] | 331 | print("-h --help --debug --dry-run --list --arch --all --regenerate-expected --archive-errors --install --timeout --global-timeout --timeout-with-gdb -j --jobs -I --include -E --exclude --continue ", end='') | 
|---|
| [0ad0c55] | 332 | print(" ".join(map(lambda t: "%s" % (t.target()), tests))) | 
|---|
| [911348cd] | 333 |  | 
|---|
| [c07d724] | 334 | elif options.list : | 
|---|
| [29806675] | 335 | # fetch the liest of all valid tests | 
|---|
|  | 336 | tests = list_tests( options.include, options.exclude ) | 
|---|
|  | 337 |  | 
|---|
|  | 338 | # print the available tests | 
|---|
| [5b993e0] | 339 | fancy_print("\n".join(map(lambda t: t.toString(), tests))) | 
|---|
| [911348cd] | 340 |  | 
|---|
| [b98c913] | 341 | else : | 
|---|
| [29806675] | 342 | # fetch the liest of all valid tests | 
|---|
|  | 343 | all_tests = list_tests( options.include, options.exclude ) | 
|---|
|  | 344 |  | 
|---|
|  | 345 | # if user wants all tests than no other treatement of the test list is required | 
|---|
|  | 346 | if options.all or options.include : | 
|---|
|  | 347 | tests = all_tests | 
|---|
|  | 348 |  | 
|---|
|  | 349 | #otherwise we need to validate that the test list that was entered is valid | 
|---|
|  | 350 | else : | 
|---|
|  | 351 | tests = valid_tests( options ) | 
|---|
|  | 352 |  | 
|---|
|  | 353 | # make sure we have at least some test to run | 
|---|
|  | 354 | if not tests : | 
|---|
|  | 355 | print('ERROR: No valid test to run', file=sys.stderr) | 
|---|
|  | 356 | sys.exit(1) | 
|---|
|  | 357 |  | 
|---|
| [136f86b] | 358 | # prep invariants | 
|---|
| [d3c1c6a] | 359 | settings.prep_output(tests) | 
|---|
| [136f86b] | 360 | failed = 0 | 
|---|
|  | 361 |  | 
|---|
|  | 362 | # for each build configurations, run the test | 
|---|
| [76de075] | 363 | with Timed() as total_dur: | 
|---|
|  | 364 | for arch, debug, install in itertools.product(settings.all_arch, settings.all_debug, settings.all_install): | 
|---|
|  | 365 | settings.arch    = arch | 
|---|
|  | 366 | settings.debug   = debug | 
|---|
|  | 367 | settings.install = install | 
|---|
|  | 368 |  | 
|---|
|  | 369 | # filter out the tests for a different architecture | 
|---|
|  | 370 | # tests are the same across debug/install | 
|---|
|  | 371 | local_tests = settings.arch.filter( tests ) | 
|---|
|  | 372 | options.jobs, forceJobs = job_count( options, local_tests ) | 
|---|
|  | 373 | settings.update_make_cmd(forceJobs, options.jobs) | 
|---|
|  | 374 |  | 
|---|
|  | 375 | # check the build configuration works | 
|---|
|  | 376 | settings.validate() | 
|---|
|  | 377 |  | 
|---|
|  | 378 | # print configuration | 
|---|
|  | 379 | print('%s %i tests on %i cores (%s:%s)' % ( | 
|---|
|  | 380 | 'Regenerating' if settings.generating else 'Running', | 
|---|
|  | 381 | len(local_tests), | 
|---|
|  | 382 | options.jobs, | 
|---|
|  | 383 | settings.arch.string, | 
|---|
|  | 384 | settings.debug.string | 
|---|
|  | 385 | )) | 
|---|
|  | 386 |  | 
|---|
|  | 387 | # otherwise run all tests and make sure to return the correct error code | 
|---|
|  | 388 | failed = run_tests(local_tests, options.jobs) | 
|---|
|  | 389 | if failed: | 
|---|
|  | 390 | result = 1 | 
|---|
|  | 391 | if not settings.continue_: | 
|---|
|  | 392 | break | 
|---|
|  | 393 |  | 
|---|
|  | 394 | print('Tests took %s' % fmtDur( total_dur.duration )) | 
|---|
| [136f86b] | 395 | sys.exit( failed ) | 
|---|