Changeset 90152a4 for tests/test.py
- Timestamp:
- Aug 27, 2018, 4:40:34 PM (7 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, cleanup-dtors, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- b7c89aa
- Parents:
- f9feab8 (diff), 305581d (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)links above to see all the changes relative to each parent. - File:
-
- 1 moved
-
tests/test.py (moved) (moved from src/tests/test.py ) (17 diffs)
Legend:
- Unmodified
- Added
- Removed
-
tests/test.py
rf9feab8 r90152a4 9 9 import re 10 10 import sys 11 import time 11 12 12 13 ################################################################################ … … 18 19 19 20 def matchTest(path): 20 match = re.search(" (\.[\w\/\-_]*)\/.expect\/([\w\-_]+)(\.[\w\-_]+)?\.txt", path)21 match = re.search("%s\/([\w\/\-_]*).expect\/([\w\-_]+)(\.[\w\-_]+)?\.txt" % settings.SRCDIR, path) 21 22 if match : 22 23 test = Test() … … 42 43 if includes: 43 44 test_list = [x for x in test_list if 44 x. path.startswith( tuple(includes) )45 x.target().startswith( tuple(includes) ) 45 46 ] 46 47 … … 48 49 if excludes: 49 50 test_list = [x for x in test_list if not 50 x. path.startswith( tuple(excludes) )51 x.target().startswith( tuple(excludes) ) 51 52 ] 52 53 … … 61 62 if options.regenerate_expected : 62 63 for testname in options.tests : 64 testname = canonicalPath( testname ) 63 65 if Test.valid_name(testname): 64 found = [test for test in allTests if test.target() == testname]66 found = [test for test in allTests if canonicalPath( test.target() ) == testname] 65 67 tests.append( found[0] if len(found) == 1 else Test.from_target(testname) ) 66 68 else : … … 77 79 print('ERROR: No expected file for test %s, ignoring it' % testname, file=sys.stderr) 78 80 79 # make sure we have at least some test to run80 if not tests :81 print('ERROR: No valid test to run', file=sys.stderr)82 sys.exit(1)83 84 81 return tests 85 82 … … 88 85 # create a parser with the arguments for the tests script 89 86 parser = argparse.ArgumentParser(description='Script which runs cforall tests') 90 parser.add_argument('--debug', help='Run all tests in debug or release', type=yes_no, default='no') 87 parser.add_argument('--debug', help='Run all tests in debug or release', type=yes_no, default='yes') 88 parser.add_argument('--install', help='Run all tests based on installed binaries or tree binaries', type=yes_no, default='no') 91 89 parser.add_argument('--arch', help='Test for specific architecture', type=str, default='') 90 parser.add_argument('--timeout', help='Maximum duration in seconds after a single test is considered to have timed out', type=int, default=60) 91 parser.add_argument('--global-timeout', help='Maximum cumulative duration in seconds after the ALL tests are considered to have timed out', type=int, default=7200) 92 92 parser.add_argument('--dry-run', help='Don\'t run the tests, only output the commands', action='store_true') 93 93 parser.add_argument('--list', help='List all test available', action='store_true') 94 94 parser.add_argument('--all', help='Run all test available', action='store_true') 95 95 parser.add_argument('--regenerate-expected', help='Regenerate the .expect by running the specified tets, can be used with --all option', action='store_true') 96 parser.add_argument('-j', '--jobs', help='Number of tests to run simultaneously', type=int , default='8')96 parser.add_argument('-j', '--jobs', help='Number of tests to run simultaneously', type=int) 97 97 parser.add_argument('--list-comp', help='List all valide arguments', action='store_true') 98 98 parser.add_argument('-I','--include', help='Directory of test to include, can be used multiple time, All if omitted', action='append') … … 100 100 parser.add_argument('tests', metavar='test', type=str, nargs='*', help='a list of tests to run') 101 101 102 options = parser.parse_args() 102 try: 103 options = parser.parse_args() 104 except: 105 print('ERROR: invalid arguments', file=sys.stderr) 106 parser.print_help(sys.stderr) 107 sys.exit(1) 103 108 104 109 # script must have at least some tests to run or be listing … … 119 124 # running test functions 120 125 ################################################################################ 126 # fix the absolute paths in the output 127 def fixoutput( fname ): 128 if not is_ascii(fname): 129 return 130 131 file_replace(fname, "%s/" % settings.SRCDIR, "") 132 133 121 134 # logic to run a single test and return the result (No handling of printing or other test framework logic) 122 135 def run_single_test(test): 123 136 124 137 # find the output file based on the test name and options flag 138 exe_file = test.target_executable(); 125 139 out_file = test.target_output() 126 140 err_file = test.error_log() … … 131 145 test.prepare() 132 146 133 # remove any outputs from the previous tests to prevent side effects134 rm( (out_file, err_file, test.target()) )135 136 147 # build, skipping to next test on error 148 before = time.time() 137 149 make_ret, _ = make( test.target(), 138 150 redirects = "2> %s 1> /dev/null" % out_file, 139 151 error_file = err_file 140 152 ) 153 after = time.time() 154 155 comp_dur = after - before 156 157 run_dur = None 141 158 142 159 # if the make command succeds continue otherwise skip to diff 143 160 if make_ret == 0 or settings.dry_run: 144 if settings.dry_run or fileIsExecutable(test.target()) : 161 before = time.time() 162 if settings.dry_run or fileIsExecutable(exe_file) : 145 163 # run test 146 retcode, _ = sh("timeout 60 %s > %s 2>&1" % (test.target(), out_file), input = in_file)164 retcode, _ = sh("timeout %d %s > %s 2>&1" % (settings.timeout.single, exe_file, out_file), input = in_file) 147 165 else : 148 166 # simply cat the result into the output 149 retcode, _ = sh("cat %s > %s" % (test.target(), out_file)) 167 retcode, _ = sh("cat %s > %s" % (exe_file, out_file)) 168 169 after = time.time() 170 run_dur = after - before 150 171 else: 151 172 retcode, _ = sh("mv %s %s" % (err_file, out_file)) … … 153 174 154 175 if retcode == 0: 176 # fixoutput(out_file) 155 177 if settings.generating : 156 178 # if we are ounly generating the output we still need to check that the test actually exists … … 173 195 sh("rm -f %s > /dev/null 2>&1" % test.target()) 174 196 175 return retcode, error 197 return retcode, error, [comp_dur, run_dur] 176 198 177 199 # run a single test and handle the errors, outputs, printing, exception handling, etc. … … 182 204 name_txt = "%20s " % t.name 183 205 184 retcode, error = run_single_test(t)206 retcode, error, duration = run_single_test(t) 185 207 186 208 # update output based on current action 187 result_txt = TestResult.toString( retcode )209 result_txt = TestResult.toString( retcode, duration ) 188 210 189 211 #print result with error if needed … … 214 236 tests, 215 237 chunksize = 1 216 ).get( 7200)238 ).get(settings.timeout.total) 217 239 except KeyboardInterrupt: 218 240 pool.terminate() … … 234 256 ################################################################################ 235 257 if __name__ == "__main__": 236 #always run from same folder237 chdir()238 258 239 259 # parse the command line arguments … … 254 274 tests = validTests( options ) 255 275 276 # make sure we have at least some test to run 277 if not tests : 278 print('ERROR: No valid test to run', file=sys.stderr) 279 sys.exit(1) 280 281 256 282 # sort the test alphabetically for convenience 257 283 tests.sort(key=lambda t: (t.arch if t.arch else '') + t.target()) … … 259 285 # users may want to simply list the tests 260 286 if options.list_comp : 261 print("-h --help --debug --dry-run --list --arch --all --regenerate-expected - j --jobs ", end='')287 print("-h --help --debug --dry-run --list --arch --all --regenerate-expected --install --timeout --global-timeout -j --jobs ", end='') 262 288 print(" ".join(map(lambda t: "%s" % (t.target()), tests))) 263 289 264 290 elif options.list : 265 291 print("Listing for %s:%s"% (settings.arch.string, settings.debug.string)) 266 print("\n".join(map(lambda t: "%s" % (t.toString()), tests)))292 fancy_print("\n".join(map(lambda t: "%s" % (t.toString()), tests))) 267 293 268 294 else : 295 # check the build configuration works 296 settings.validate() 297 269 298 options.jobs, forceJobs = jobCount( options, tests ) 270 299 settings.updateMakeCmd(forceJobs, options.jobs)
Note:
See TracChangeset
for help on using the changeset viewer.