Changeset a468e1e9 for tests/test.py
- Timestamp:
- Dec 8, 2020, 11:03:42 AM (4 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- a778e8e
- Parents:
- 08ce416
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
tests/test.py
r08ce416 ra468e1e9 143 143 parser.add_argument('-j', '--jobs', help='Number of tests to run simultaneously', type=int) 144 144 parser.add_argument('--list-comp', help='List all valide arguments', action='store_true') 145 parser.add_argument('--list-dist', help='List all tests for distribution', action='store_true') 145 146 parser.add_argument('-I','--include', help='Directory of test to include, can be used multiple time, All if omitted', action='append') 146 147 parser.add_argument('-E','--exclude', help='Directory of test to exclude, can be used multiple time, None if omitted', action='append') … … 155 156 156 157 # script must have at least some tests to run or be listing 157 listing = options.list or options.list_comp 158 listing = options.list or options.list_comp or options.list_dist 158 159 all_tests = options.all 159 160 some_tests = len(options.tests) > 0 … … 334 335 settings.init( options ) 335 336 336 # users may want to simply list the tests 337 # -------------------------------------------------- 338 # list all the test for auto completion programs 339 # not pretty, single line, with the command line options 337 340 if options.list_comp : 338 341 # fetch the liest of all valid tests … … 340 343 341 344 # print the possible options 342 print("-h --help --debug --dry-run --list --a rch --all --regenerate-expected --archive-errors --install --timeout --global-timeout --timeout-with-gdb -j --jobs -I --include -E --exclude --continue ", end='')345 print("-h --help --debug --dry-run --list --ast=new --ast=old --arch --all --regenerate-expected --archive-errors --install --timeout --global-timeout --timeout-with-gdb -j --jobs -I --include -E --exclude --continue ", end='') 343 346 print(" ".join(map(lambda t: "%s" % (t.target()), tests))) 344 347 345 elif options.list : 348 # done 349 sys.exit(0) 350 351 # -------------------------------------------------- 352 # list all the test for auto completion programs 353 if options.list_dist : 354 # fetch the liest of all valid tests 355 tests = list_tests( None, None ) 356 357 for t in tests: 358 print(os.path.relpath(t.expect(), settings.SRCDIR), end=' ') 359 print(os.path.relpath(t.input() , settings.SRCDIR), end=' ') 360 code, out = make_recon(t.target()) 361 362 if code != 0: 363 print('ERROR: recond failed for test {}'.format(t.target()), file=sys.stderr) 364 sys.exit(1) 365 366 print(' '.join(re.findall('([^\s]+\.cfa)', out)), end=' ') 367 368 print('') 369 370 # done 371 sys.exit(0) 372 373 374 # -------------------------------------------------- 375 # list all the tests for users, in a pretty format 376 if options.list : 346 377 # fetch the liest of all valid tests 347 378 tests = list_tests( options.include, options.exclude ) … … 350 381 fancy_print("\n".join(map(lambda t: t.toString(), tests))) 351 382 383 # done 384 sys.exit(0) 385 386 # fetch the liest of all valid tests 387 all_tests = list_tests( options.include, options.exclude ) 388 389 # if user wants all tests than no other treatement of the test list is required 390 if options.all or options.include : 391 tests = all_tests 392 393 #otherwise we need to validate that the test list that was entered is valid 352 394 else : 353 # fetch the liest of all valid tests 354 all_tests = list_tests( options.include, options.exclude ) 355 356 # if user wants all tests than no other treatement of the test list is required 357 if options.all or options.include : 358 tests = all_tests 359 360 #otherwise we need to validate that the test list that was entered is valid 361 else : 362 tests = valid_tests( options ) 363 364 # make sure we have at least some test to run 365 if not tests : 366 print('ERROR: No valid test to run', file=sys.stderr) 367 sys.exit(1) 368 369 # prep invariants 370 settings.prep_output(tests) 371 failed = 0 372 373 # check if the expected files aren't empty 374 if not options.regenerate_expected: 375 for t in tests: 376 if is_empty(t.expect()): 377 print('WARNING: test "{}" has empty .expect file'.format(t.target()), file=sys.stderr) 378 379 # for each build configurations, run the test 380 with Timed() as total_dur: 381 for ast, arch, debug, install in itertools.product(settings.all_ast, settings.all_arch, settings.all_debug, settings.all_install): 382 settings.ast = ast 383 settings.arch = arch 384 settings.debug = debug 385 settings.install = install 386 387 # filter out the tests for a different architecture 388 # tests are the same across debug/install 389 local_tests = settings.ast.filter( tests ) 390 local_tests = settings.arch.filter( local_tests ) 391 options.jobs, forceJobs = job_count( options, local_tests ) 392 settings.update_make_cmd(forceJobs, options.jobs) 393 394 # check the build configuration works 395 settings.validate() 396 397 # print configuration 398 print('%s %i tests on %i cores (%s:%s - %s)' % ( 399 'Regenerating' if settings.generating else 'Running', 400 len(local_tests), 401 options.jobs, 402 settings.ast.string, 403 settings.arch.string, 404 settings.debug.string 405 )) 406 if not local_tests : 407 print('WARNING: No tests for this configuration') 408 continue 409 410 # otherwise run all tests and make sure to return the correct error code 411 failed = run_tests(local_tests, options.jobs) 412 if failed: 413 result = 1 414 if not settings.continue_: 415 break 416 417 print('Tests took %s' % fmtDur( total_dur.duration )) 418 sys.exit( failed ) 395 tests = valid_tests( options ) 396 397 # make sure we have at least some test to run 398 if not tests : 399 print('ERROR: No valid test to run', file=sys.stderr) 400 sys.exit(1) 401 402 # prep invariants 403 settings.prep_output(tests) 404 failed = 0 405 406 # check if the expected files aren't empty 407 if not options.regenerate_expected: 408 for t in tests: 409 if is_empty(t.expect()): 410 print('WARNING: test "{}" has empty .expect file'.format(t.target()), file=sys.stderr) 411 412 # for each build configurations, run the test 413 with Timed() as total_dur: 414 for ast, arch, debug, install in itertools.product(settings.all_ast, settings.all_arch, settings.all_debug, settings.all_install): 415 settings.ast = ast 416 settings.arch = arch 417 settings.debug = debug 418 settings.install = install 419 420 # filter out the tests for a different architecture 421 # tests are the same across debug/install 422 local_tests = settings.ast.filter( tests ) 423 local_tests = settings.arch.filter( local_tests ) 424 options.jobs, forceJobs = job_count( options, local_tests ) 425 settings.update_make_cmd(forceJobs, options.jobs) 426 427 # check the build configuration works 428 settings.validate() 429 430 # print configuration 431 print('%s %i tests on %i cores (%s:%s - %s)' % ( 432 'Regenerating' if settings.generating else 'Running', 433 len(local_tests), 434 options.jobs, 435 settings.ast.string, 436 settings.arch.string, 437 settings.debug.string 438 )) 439 if not local_tests : 440 print('WARNING: No tests for this configuration') 441 continue 442 443 # otherwise run all tests and make sure to return the correct error code 444 failed = run_tests(local_tests, options.jobs) 445 if failed: 446 result = 1 447 if not settings.continue_: 448 break 449 450 print('Tests took %s' % fmtDur( total_dur.duration )) 451 sys.exit( failed )
Note: See TracChangeset
for help on using the changeset viewer.