| 1 | #!/usr/bin/python3
|
|---|
| 2 |
|
|---|
| 3 | from pybin.tools import *
|
|---|
| 4 | from pybin.test_run import *
|
|---|
| 5 | from pybin import settings
|
|---|
| 6 |
|
|---|
| 7 | import argparse
|
|---|
| 8 | import itertools
|
|---|
| 9 | import re
|
|---|
| 10 | import sys
|
|---|
| 11 | import tempfile
|
|---|
| 12 | import time
|
|---|
| 13 |
|
|---|
| 14 | import os
|
|---|
| 15 | import psutil
|
|---|
| 16 | import signal
|
|---|
| 17 |
|
|---|
| 18 | ################################################################################
|
|---|
| 19 | # help functions
|
|---|
| 20 | ################################################################################
|
|---|
| 21 |
|
|---|
| 22 | def find_tests():
|
|---|
| 23 | expected = []
|
|---|
| 24 |
|
|---|
| 25 | def match_test(path):
|
|---|
| 26 | match = re.search("^%s\/([\w\/\-_]*).expect\/([\w\-_]+)(\.[\w\-_]+)?\.txt$" % settings.SRCDIR, path)
|
|---|
| 27 | if match :
|
|---|
| 28 | test = Test()
|
|---|
| 29 | test.name = match.group(2)
|
|---|
| 30 | test.path = match.group(1)
|
|---|
| 31 | test.arch = match.group(3)[1:] if match.group(3) else None
|
|---|
| 32 | expected.append(test)
|
|---|
| 33 |
|
|---|
| 34 | path_walk( match_test )
|
|---|
| 35 |
|
|---|
| 36 | return expected
|
|---|
| 37 |
|
|---|
| 38 | # reads the directory ./.expect and indentifies the tests
|
|---|
| 39 | def list_tests( includes, excludes ):
|
|---|
| 40 | # tests directly in the .expect folder will always be processed
|
|---|
| 41 | test_list = find_tests()
|
|---|
| 42 |
|
|---|
| 43 | # if we have a limited number of includes, filter by them
|
|---|
| 44 | if includes:
|
|---|
| 45 | test_list = [x for x in test_list if
|
|---|
| 46 | x.target().startswith( tuple(includes) )
|
|---|
| 47 | ]
|
|---|
| 48 |
|
|---|
| 49 | # # if we have a folders to excludes, filter by them
|
|---|
| 50 | if excludes:
|
|---|
| 51 | test_list = [x for x in test_list if not
|
|---|
| 52 | x.target().startswith( tuple(excludes) )
|
|---|
| 53 | ]
|
|---|
| 54 |
|
|---|
| 55 | # sort the test alphabetically for convenience
|
|---|
| 56 | test_list.sort(key=lambda t: ('~' if t.arch else '') + t.target() + (t.arch if t.arch else ''))
|
|---|
| 57 |
|
|---|
| 58 | return test_list
|
|---|
| 59 |
|
|---|
| 60 | # from the found tests, filter all the valid tests/desired tests
|
|---|
| 61 | def valid_tests( options ):
|
|---|
| 62 | tests = []
|
|---|
| 63 |
|
|---|
| 64 | # if we are regenerating the tests we need to find the information of the
|
|---|
| 65 | # already existing tests and create new info for the new tests
|
|---|
| 66 | if options.regenerate_expected :
|
|---|
| 67 | for testname in options.tests :
|
|---|
| 68 | testname = canonical_path( testname )
|
|---|
| 69 | # first check if this is a valid name to regenerate
|
|---|
| 70 | if Test.valid_name(testname):
|
|---|
| 71 | # this is a valid name, let's check if it already exists
|
|---|
| 72 | found = [test for test in all_tests if canonical_path( test.target() ) == testname]
|
|---|
| 73 | if not found:
|
|---|
| 74 | # it's a new name, create it according to the name and specified architecture
|
|---|
| 75 | if options.arch:
|
|---|
| 76 | # user specified one or multiple architectures, assume the tests will have architecture specific results
|
|---|
| 77 | tests.extend( [Test.new_target(testname, arch) for arch in settings.all_arch] )
|
|---|
| 78 | else:
|
|---|
| 79 | # user didn't specify an architecture, just create a cross platform test
|
|---|
| 80 | tests.append( Test.new_target( testname, None ) )
|
|---|
| 81 | elif len(found) == 1 and not found[0].arch:
|
|---|
| 82 | # we found a single test, the user better be wanting to create a cross platform test
|
|---|
| 83 | if options.arch:
|
|---|
| 84 | print('ERROR: "%s", test has no specified architecture but --arch was specified, ignoring it' % testname, file=sys.stderr)
|
|---|
| 85 | else:
|
|---|
| 86 | tests.append( found[0] )
|
|---|
| 87 | else:
|
|---|
| 88 | # this test is already cross platform, just add a test for each platform the user asked
|
|---|
| 89 | tests.extend( [Test.new_target(testname, arch) for arch in settings.all_arch] )
|
|---|
| 90 |
|
|---|
| 91 | # print a warning if it users didn't ask for a specific architecture
|
|---|
| 92 | if not options.arch:
|
|---|
| 93 | print('WARNING: "%s", test has architecture specific expected files but --arch was not specified, regenerating only for current host' % testname, file=sys.stderr)
|
|---|
| 94 |
|
|---|
| 95 | else :
|
|---|
| 96 | print('ERROR: "%s", tests are not allowed to end with a C/C++/CFA extension, ignoring it' % testname, file=sys.stderr)
|
|---|
| 97 |
|
|---|
| 98 | else :
|
|---|
| 99 | # otherwise we only need to validate that all tests are present in the complete list
|
|---|
| 100 | for testname in options.tests:
|
|---|
| 101 | test = [t for t in all_tests if path_cmp( t.target(), testname )]
|
|---|
| 102 |
|
|---|
| 103 | if test :
|
|---|
| 104 | tests.extend( test )
|
|---|
| 105 | else :
|
|---|
| 106 | print('ERROR: No expected file for test %s, ignoring it' % testname, file=sys.stderr)
|
|---|
| 107 |
|
|---|
| 108 | return tests
|
|---|
| 109 |
|
|---|
| 110 | # parses the option
|
|---|
| 111 | def parse_args():
|
|---|
| 112 | # create a parser with the arguments for the tests script
|
|---|
| 113 | parser = argparse.ArgumentParser(description='Script which runs cforall tests')
|
|---|
| 114 | parser.add_argument('--debug', help='Run all tests in debug or release', type=comma_separated(yes_no), default='yes')
|
|---|
| 115 | parser.add_argument('--install', help='Run all tests based on installed binaries or tree binaries', type=comma_separated(yes_no), default='no')
|
|---|
| 116 | parser.add_argument('--arch', help='Test for specific architecture', type=comma_separated(str), default=None)
|
|---|
| 117 | parser.add_argument('--continue', help='When multiple specifications are passed (debug/install/arch), sets whether or not to continue if the last specification failed', type=yes_no, default='yes', dest='continue_')
|
|---|
| 118 | parser.add_argument('--timeout', help='Maximum duration in seconds after a single test is considered to have timed out', type=int, default=60)
|
|---|
| 119 | parser.add_argument('--global-timeout', help='Maximum cumulative duration in seconds after the ALL tests are considered to have timed out', type=int, default=7200)
|
|---|
| 120 | parser.add_argument('--timeout-with-gdb', help='Instead of killing the command when it times out, orphan it and print process id to allow gdb to attach', type=yes_no, default="no")
|
|---|
| 121 | parser.add_argument('--dry-run', help='Don\'t run the tests, only output the commands', action='store_true')
|
|---|
| 122 | parser.add_argument('--list', help='List all test available', action='store_true')
|
|---|
| 123 | parser.add_argument('--all', help='Run all test available', action='store_true')
|
|---|
| 124 | parser.add_argument('--regenerate-expected', help='Regenerate the .expect by running the specified tets, can be used with --all option', action='store_true')
|
|---|
| 125 | parser.add_argument('--archive-errors', help='If called with a valid path, on test crashes the test script will copy the core dump and the executable to the specified path.', type=str, default='')
|
|---|
| 126 | parser.add_argument('-j', '--jobs', help='Number of tests to run simultaneously', type=int)
|
|---|
| 127 | parser.add_argument('--list-comp', help='List all valide arguments', action='store_true')
|
|---|
| 128 | parser.add_argument('-I','--include', help='Directory of test to include, can be used multiple time, All if omitted', action='append')
|
|---|
| 129 | parser.add_argument('-E','--exclude', help='Directory of test to exclude, can be used multiple time, None if omitted', action='append')
|
|---|
| 130 | parser.add_argument('tests', metavar='test', type=str, nargs='*', help='a list of tests to run')
|
|---|
| 131 |
|
|---|
| 132 | try:
|
|---|
| 133 | options = parser.parse_args()
|
|---|
| 134 | except:
|
|---|
| 135 | print('ERROR: invalid arguments', file=sys.stderr)
|
|---|
| 136 | parser.print_help(sys.stderr)
|
|---|
| 137 | sys.exit(1)
|
|---|
| 138 |
|
|---|
| 139 | # script must have at least some tests to run or be listing
|
|---|
| 140 | listing = options.list or options.list_comp
|
|---|
| 141 | all_tests = options.all
|
|---|
| 142 | some_tests = len(options.tests) > 0
|
|---|
| 143 | some_dirs = len(options.include) > 0 if options.include else 0
|
|---|
| 144 |
|
|---|
| 145 | # check that exactly one of the booleans is set to true
|
|---|
| 146 | if not sum( (listing, all_tests, some_tests, some_dirs) ) > 0 :
|
|---|
| 147 | print('''ERROR: must have option '--all', '--list', '--include', '-I' or non-empty test list''', file=sys.stderr)
|
|---|
| 148 | parser.print_help()
|
|---|
| 149 | sys.exit(1)
|
|---|
| 150 |
|
|---|
| 151 | return options
|
|---|
| 152 |
|
|---|
| 153 | ################################################################################
|
|---|
| 154 | # running test functions
|
|---|
| 155 | ################################################################################
|
|---|
| 156 | def success(val):
|
|---|
| 157 | return val == 0 or settings.dry_run
|
|---|
| 158 |
|
|---|
| 159 | def no_rule(file, target):
|
|---|
| 160 | return not settings.dry_run and file_contains_only(file, "make: *** No rule to make target `%s'. Stop." % target)
|
|---|
| 161 |
|
|---|
| 162 | # logic to run a single test and return the result (No handling of printing or other test framework logic)
|
|---|
| 163 | def run_single_test(test):
|
|---|
| 164 |
|
|---|
| 165 | # find the output file based on the test name and options flag
|
|---|
| 166 | exe_file = test.target_executable();
|
|---|
| 167 | out_file = test.target_output()
|
|---|
| 168 | err_file = test.error_log()
|
|---|
| 169 | cmp_file = test.expect()
|
|---|
| 170 | in_file = test.input()
|
|---|
| 171 |
|
|---|
| 172 | # prepare the proper directories
|
|---|
| 173 | test.prepare()
|
|---|
| 174 |
|
|---|
| 175 | # build, skipping to next test on error
|
|---|
| 176 | with Timed() as comp_dur:
|
|---|
| 177 | make_ret, _ = make( test.target(), output_file=subprocess.DEVNULL, error=out_file, error_file = err_file )
|
|---|
| 178 |
|
|---|
| 179 | run_dur = None
|
|---|
| 180 | # run everything in a temp directory to make sure core file are handled properly
|
|---|
| 181 | with tempdir():
|
|---|
| 182 | # if the make command succeeds continue otherwise skip to diff
|
|---|
| 183 | if success(make_ret):
|
|---|
| 184 | with Timed() as run_dur:
|
|---|
| 185 | if settings.dry_run or is_exe(exe_file):
|
|---|
| 186 | # run test
|
|---|
| 187 | retcode, _ = sh(exe_file, output_file=out_file, input_file=in_file, timeout=True)
|
|---|
| 188 | else :
|
|---|
| 189 | # simply cat the result into the output
|
|---|
| 190 | retcode = cat(exe_file, out_file)
|
|---|
| 191 | else:
|
|---|
| 192 | retcode = mv(err_file, out_file)
|
|---|
| 193 |
|
|---|
| 194 | if success(retcode):
|
|---|
| 195 | if settings.generating :
|
|---|
| 196 | # if we are ounly generating the output we still need to check that the test actually exists
|
|---|
| 197 | if no_rule(out_file, test.target()) :
|
|---|
| 198 | retcode = 1
|
|---|
| 199 | error = "\t\tNo make target for test %s!" % test.target()
|
|---|
| 200 | rm(out_file)
|
|---|
| 201 | else:
|
|---|
| 202 | error = None
|
|---|
| 203 | else :
|
|---|
| 204 | # fetch return code and error from the diff command
|
|---|
| 205 | retcode, error = diff(cmp_file, out_file)
|
|---|
| 206 |
|
|---|
| 207 | else:
|
|---|
| 208 | if os.stat(out_file).st_size < 1048576:
|
|---|
| 209 | with open (out_file, "r") as myfile:
|
|---|
| 210 | error = myfile.read()
|
|---|
| 211 | else:
|
|---|
| 212 | error = "Output log can't be read, file is bigger than 1MB, see {} for actual error\n".format(out_file)
|
|---|
| 213 |
|
|---|
| 214 | ret, info = core_info(exe_file)
|
|---|
| 215 | error = error + info if error else info
|
|---|
| 216 |
|
|---|
| 217 | if settings.archive:
|
|---|
| 218 | error = error + '\n' + core_archive(settings.archive, test.target(), exe_file)
|
|---|
| 219 |
|
|---|
| 220 |
|
|---|
| 221 |
|
|---|
| 222 | # clean the executable
|
|---|
| 223 | rm(exe_file)
|
|---|
| 224 |
|
|---|
| 225 | return retcode, error, [comp_dur.duration, run_dur.duration if run_dur else None]
|
|---|
| 226 |
|
|---|
| 227 | # run a single test and handle the errors, outputs, printing, exception handling, etc.
|
|---|
| 228 | def run_test_worker(t) :
|
|---|
| 229 | try :
|
|---|
| 230 | # print formated name
|
|---|
| 231 | name_txt = '{0:{width}} '.format(t.target(), width=settings.output_width)
|
|---|
| 232 |
|
|---|
| 233 | retcode, error, duration = run_single_test(t)
|
|---|
| 234 |
|
|---|
| 235 | # update output based on current action
|
|---|
| 236 | result_txt = TestResult.toString( retcode, duration )
|
|---|
| 237 |
|
|---|
| 238 | #print result with error if needed
|
|---|
| 239 | text = '\t' + name_txt + result_txt
|
|---|
| 240 | out = sys.stdout
|
|---|
| 241 | if error :
|
|---|
| 242 | text = text + '\n' + error
|
|---|
| 243 |
|
|---|
| 244 | return retcode == TestResult.SUCCESS, text
|
|---|
| 245 | except KeyboardInterrupt:
|
|---|
| 246 | return False, ""
|
|---|
| 247 | except Exception as ex:
|
|---|
| 248 | print("Unexpected error in worker thread running {}: {}".format(t.target(), ex), file=sys.stderr)
|
|---|
| 249 | sys.stderr.flush()
|
|---|
| 250 | return False, ""
|
|---|
| 251 |
|
|---|
| 252 |
|
|---|
| 253 | # run the given list of tests with the given parameters
|
|---|
| 254 | def run_tests(tests, jobs) :
|
|---|
| 255 | # clean the sandbox from previous commands
|
|---|
| 256 | make('clean', output_file=subprocess.DEVNULL, error=subprocess.DEVNULL)
|
|---|
| 257 |
|
|---|
| 258 | # since python prints stacks by default on a interrupt, redo the interrupt handling to be silent
|
|---|
| 259 | def worker_init():
|
|---|
| 260 | def sig_int(signal_num, frame):
|
|---|
| 261 | pass
|
|---|
| 262 |
|
|---|
| 263 | signal.signal(signal.SIGINT, sig_int)
|
|---|
| 264 |
|
|---|
| 265 | # create the executor for our jobs and handle the signal properly
|
|---|
| 266 | pool = multiprocessing.Pool(jobs, worker_init)
|
|---|
| 267 |
|
|---|
| 268 | failed = False
|
|---|
| 269 |
|
|---|
| 270 | def stop(x, y):
|
|---|
| 271 | print("Tests interrupted by user", file=sys.stderr)
|
|---|
| 272 | sys.exit(1)
|
|---|
| 273 | signal.signal(signal.SIGINT, stop)
|
|---|
| 274 |
|
|---|
| 275 | # for each test to run
|
|---|
| 276 | try :
|
|---|
| 277 | num = len(tests)
|
|---|
| 278 | fancy = sys.stdout.isatty()
|
|---|
| 279 | results = pool.imap_unordered(
|
|---|
| 280 | run_test_worker,
|
|---|
| 281 | tests,
|
|---|
| 282 | chunksize = 1
|
|---|
| 283 | )
|
|---|
| 284 |
|
|---|
| 285 | for i, (succ, txt) in enumerate(timed(results, timeout = settings.timeout.total), 1) :
|
|---|
| 286 | if not succ :
|
|---|
| 287 | failed = True
|
|---|
| 288 |
|
|---|
| 289 | print(" " + txt)
|
|---|
| 290 |
|
|---|
| 291 | if(fancy and i != num):
|
|---|
| 292 | print("%d/%d" % (i, num), end='\r')
|
|---|
| 293 | sys.stdout.flush()
|
|---|
| 294 |
|
|---|
| 295 | except KeyboardInterrupt:
|
|---|
| 296 | print("Tests interrupted by user", file=sys.stderr)
|
|---|
| 297 | pool.terminate()
|
|---|
| 298 | pool.join()
|
|---|
| 299 | failed = True
|
|---|
| 300 | except multiprocessing.TimeoutError:
|
|---|
| 301 | print("ERROR: Test suite timed out", file=sys.stderr)
|
|---|
| 302 | pool.terminate()
|
|---|
| 303 | pool.join()
|
|---|
| 304 | failed = True
|
|---|
| 305 | killgroup() # needed to cleanly kill all children
|
|---|
| 306 |
|
|---|
| 307 |
|
|---|
| 308 | # clean the workspace
|
|---|
| 309 | make('clean', output_file=subprocess.DEVNULL, error=subprocess.DEVNULL)
|
|---|
| 310 |
|
|---|
| 311 | return failed
|
|---|
| 312 |
|
|---|
| 313 |
|
|---|
| 314 | ################################################################################
|
|---|
| 315 | # main loop
|
|---|
| 316 | ################################################################################
|
|---|
| 317 | if __name__ == "__main__":
|
|---|
| 318 |
|
|---|
| 319 | # parse the command line arguments
|
|---|
| 320 | options = parse_args()
|
|---|
| 321 |
|
|---|
| 322 | # init global settings
|
|---|
| 323 | settings.init( options )
|
|---|
| 324 |
|
|---|
| 325 | # users may want to simply list the tests
|
|---|
| 326 | if options.list_comp :
|
|---|
| 327 | # fetch the liest of all valid tests
|
|---|
| 328 | tests = list_tests( None, None )
|
|---|
| 329 |
|
|---|
| 330 | # print the possible options
|
|---|
| 331 | print("-h --help --debug --dry-run --list --arch --all --regenerate-expected --archive-errors --install --timeout --global-timeout --timeout-with-gdb -j --jobs -I --include -E --exclude --continue ", end='')
|
|---|
| 332 | print(" ".join(map(lambda t: "%s" % (t.target()), tests)))
|
|---|
| 333 |
|
|---|
| 334 | elif options.list :
|
|---|
| 335 | # fetch the liest of all valid tests
|
|---|
| 336 | tests = list_tests( options.include, options.exclude )
|
|---|
| 337 |
|
|---|
| 338 | # print the available tests
|
|---|
| 339 | fancy_print("\n".join(map(lambda t: t.toString(), tests)))
|
|---|
| 340 |
|
|---|
| 341 | else :
|
|---|
| 342 | # fetch the liest of all valid tests
|
|---|
| 343 | all_tests = list_tests( options.include, options.exclude )
|
|---|
| 344 |
|
|---|
| 345 | # if user wants all tests than no other treatement of the test list is required
|
|---|
| 346 | if options.all or options.include :
|
|---|
| 347 | tests = all_tests
|
|---|
| 348 |
|
|---|
| 349 | #otherwise we need to validate that the test list that was entered is valid
|
|---|
| 350 | else :
|
|---|
| 351 | tests = valid_tests( options )
|
|---|
| 352 |
|
|---|
| 353 | # make sure we have at least some test to run
|
|---|
| 354 | if not tests :
|
|---|
| 355 | print('ERROR: No valid test to run', file=sys.stderr)
|
|---|
| 356 | sys.exit(1)
|
|---|
| 357 |
|
|---|
| 358 | # prep invariants
|
|---|
| 359 | settings.prep_output(tests)
|
|---|
| 360 | failed = 0
|
|---|
| 361 |
|
|---|
| 362 | # for each build configurations, run the test
|
|---|
| 363 | for arch, debug, install in itertools.product(settings.all_arch, settings.all_debug, settings.all_install):
|
|---|
| 364 | settings.arch = arch
|
|---|
| 365 | settings.debug = debug
|
|---|
| 366 | settings.install = install
|
|---|
| 367 |
|
|---|
| 368 | # filter out the tests for a different architecture
|
|---|
| 369 | # tests are the same across debug/install
|
|---|
| 370 | local_tests = settings.arch.filter( tests )
|
|---|
| 371 | options.jobs, forceJobs = job_count( options, local_tests )
|
|---|
| 372 | settings.update_make_cmd(forceJobs, options.jobs)
|
|---|
| 373 |
|
|---|
| 374 | # check the build configuration works
|
|---|
| 375 | settings.validate()
|
|---|
| 376 |
|
|---|
| 377 | # print configuration
|
|---|
| 378 | print('%s %i tests on %i cores (%s:%s)' % (
|
|---|
| 379 | 'Regenerating' if settings.generating else 'Running',
|
|---|
| 380 | len(local_tests),
|
|---|
| 381 | options.jobs,
|
|---|
| 382 | settings.arch.string,
|
|---|
| 383 | settings.debug.string
|
|---|
| 384 | ))
|
|---|
| 385 |
|
|---|
| 386 | # otherwise run all tests and make sure to return the correct error code
|
|---|
| 387 | failed = run_tests(local_tests, options.jobs)
|
|---|
| 388 | if failed:
|
|---|
| 389 | result = 1
|
|---|
| 390 | if not settings.continue_:
|
|---|
| 391 | break
|
|---|
| 392 |
|
|---|
| 393 |
|
|---|
| 394 | sys.exit( failed )
|
|---|