Changes in tests/test.py [0f3d844:8364209]
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
tests/test.py
r0f3d844 r8364209 6 6 7 7 import argparse 8 import itertools9 8 import re 10 9 import sys 11 10 import tempfile 12 11 import time 13 14 import os15 import psutil16 import signal17 12 18 13 ################################################################################ … … 30 25 test.path = match.group(1) 31 26 test.arch = match.group(3)[1:] if match.group(3) else None 32 expected.append(test) 27 if settings.arch.match(test.arch): 28 expected.append(test) 33 29 34 30 path_walk( match_test ) … … 52 48 x.target().startswith( tuple(excludes) ) 53 49 ] 54 55 # sort the test alphabetically for convenience56 test_list.sort(key=lambda t: ('~' if t.arch else '') + t.target() + (t.arch if t.arch else ''))57 50 58 51 return test_list … … 79 72 80 73 if test : 81 tests. extend( test)74 tests.append( test[0] ) 82 75 else : 83 76 print('ERROR: No expected file for test %s, ignoring it' % testname, file=sys.stderr) … … 89 82 # create a parser with the arguments for the tests script 90 83 parser = argparse.ArgumentParser(description='Script which runs cforall tests') 91 parser.add_argument('--debug', help='Run all tests in debug or release', type=comma_separated(yes_no), default='yes') 92 parser.add_argument('--install', help='Run all tests based on installed binaries or tree binaries', type=comma_separated(yes_no), default='no') 93 parser.add_argument('--arch', help='Test for specific architecture', type=comma_separated(str), default='') 94 parser.add_argument('--continue', help='When multiple specifications are passed (debug/install/arch), sets whether or not to continue if the last specification failed', type=yes_no, default='yes', dest='continue_') 84 parser.add_argument('--debug', help='Run all tests in debug or release', type=yes_no, default='yes') 85 parser.add_argument('--install', help='Run all tests based on installed binaries or tree binaries', type=yes_no, default='no') 86 parser.add_argument('--arch', help='Test for specific architecture', type=str, default='') 95 87 parser.add_argument('--timeout', help='Maximum duration in seconds after a single test is considered to have timed out', type=int, default=60) 96 88 parser.add_argument('--global-timeout', help='Maximum cumulative duration in seconds after the ALL tests are considered to have timed out', type=int, default=7200) 97 parser.add_argument('--timeout-with-gdb', help='Instead of killing the command when it times out, orphan it and print process id to allow gdb to attach', type=yes_no, default="no")98 89 parser.add_argument('--dry-run', help='Don\'t run the tests, only output the commands', action='store_true') 99 90 parser.add_argument('--list', help='List all test available', action='store_true') … … 157 148 # run everything in a temp directory to make sure core file are handled properly 158 149 with tempdir(): 159 # if the make command succe eds continue otherwise skip to diff150 # if the make command succeds continue otherwise skip to diff 160 151 if success(make_ret): 161 152 with Timed() as run_dur: … … 230 221 make('clean', output_file=subprocess.DEVNULL, error=subprocess.DEVNULL) 231 222 232 # since python prints stacks by default on a interrupt, redo the interrupt handling to be silent233 def worker_init():234 def sig_int(signal_num, frame):235 pass236 237 signal.signal(signal.SIGINT, sig_int)238 239 223 # create the executor for our jobs and handle the signal properly 240 pool = multiprocessing.Pool(jobs , worker_init)224 pool = multiprocessing.Pool(jobs) 241 225 242 226 failed = False 243 244 def stop(x, y):245 print("Tests interrupted by user", file=sys.stderr)246 sys.exit(1)247 signal.signal(signal.SIGINT, stop)248 227 249 228 # for each test to run … … 283 262 make('clean', output_file=subprocess.DEVNULL, error=subprocess.DEVNULL) 284 263 285 return failed264 return 1 if failed else 0 286 265 287 266 … … 297 276 settings.init( options ) 298 277 278 # fetch the liest of all valid tests 279 all_tests = list_tests( options.include, options.exclude ) 280 281 282 # if user wants all tests than no other treatement of the test list is required 283 if options.all or options.list or options.list_comp or options.include : 284 tests = all_tests 285 286 #otherwise we need to validate that the test list that was entered is valid 287 else : 288 tests = valid_tests( options ) 289 290 # make sure we have at least some test to run 291 if not tests : 292 print('ERROR: No valid test to run', file=sys.stderr) 293 sys.exit(1) 294 295 296 # sort the test alphabetically for convenience 297 tests.sort(key=lambda t: (t.arch if t.arch else '') + t.target()) 298 299 299 # users may want to simply list the tests 300 300 if options.list_comp : 301 # fetch the liest of all valid tests 302 tests = list_tests( None, None ) 303 304 # print the possible options 305 print("-h --help --debug --dry-run --list --arch --all --regenerate-expected --archive-errors --install --timeout --global-timeout --timeout-with-gdb -j --jobs -I --include -E --exclude --continue ", end='') 301 print("-h --help --debug --dry-run --list --arch --all --regenerate-expected --archive-errors --install --timeout --global-timeout -j --jobs ", end='') 306 302 print(" ".join(map(lambda t: "%s" % (t.target()), tests))) 307 303 308 304 elif options.list : 309 # fetch the liest of all valid tests 310 tests = list_tests( options.include, options.exclude ) 311 312 # print the available tests 305 print("Listing for %s:%s"% (settings.arch.string, settings.debug.string)) 313 306 fancy_print("\n".join(map(lambda t: t.toString(), tests))) 314 307 315 308 else : 316 # fetch the liest of all valid tests 317 all_tests = list_tests( options.include, options.exclude ) 318 319 # if user wants all tests than no other treatement of the test list is required 320 if options.all or options.include : 321 tests = all_tests 322 323 #otherwise we need to validate that the test list that was entered is valid 324 else : 325 tests = valid_tests( options ) 326 327 # make sure we have at least some test to run 328 if not tests : 329 print('ERROR: No valid test to run', file=sys.stderr) 330 sys.exit(1) 331 332 # prep invariants 309 # check the build configuration works 333 310 settings.prep_output(tests) 334 failed = 0 335 336 # for each build configurations, run the test 337 for arch, debug, install in itertools.product(settings.all_arch, settings.all_debug, settings.all_install): 338 settings.arch = arch 339 settings.debug = debug 340 settings.install = install 341 342 # filter out the tests for a different architecture 343 # tests are the same across debug/install 344 local_tests = settings.arch.filter( tests ) 345 options.jobs, forceJobs = job_count( options, local_tests ) 346 settings.update_make_cmd(forceJobs, options.jobs) 347 348 # check the build configuration works 349 settings.validate() 350 351 # print configuration 352 print('%s %i tests on %i cores (%s:%s)' % ( 353 'Regenerating' if settings.generating else 'Running', 354 len(local_tests), 355 options.jobs, 356 settings.arch.string, 357 settings.debug.string 358 )) 359 360 # otherwise run all tests and make sure to return the correct error code 361 failed = run_tests(local_tests, options.jobs) 362 if failed: 363 result = 1 364 if not settings.continue_: 365 break 366 367 368 sys.exit( failed ) 311 settings.validate() 312 313 options.jobs, forceJobs = job_count( options, tests ) 314 settings.update_make_cmd(forceJobs, options.jobs) 315 316 print('%s %i tests on %i cores (%s:%s)' % ( 317 'Regenerating' if settings.generating else 'Running', 318 len(tests), 319 options.jobs, 320 settings.arch.string, 321 settings.debug.string 322 )) 323 324 # otherwise run all tests and make sure to return the correct error code 325 sys.exit( run_tests(tests, options.jobs) )
Note:
See TracChangeset
for help on using the changeset viewer.