Changeset 933f32f for tests/test.py
- Timestamp:
- May 24, 2019, 10:19:41 AM (6 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, cleanup-dtors, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- d908563
- Parents:
- 6a9d4b4 (diff), 292642a (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
tests/test.py
r6a9d4b4 r933f32f 1 #!/usr/bin/python 2 from __future__ import print_function 1 #!/usr/bin/python3 3 2 4 3 from pybin.tools import * … … 9 8 import re 10 9 import sys 10 import tempfile 11 11 import time 12 12 … … 15 15 ################################################################################ 16 16 17 def find Tests():17 def find_tests(): 18 18 expected = [] 19 19 20 def match Test(path):20 def match_test(path): 21 21 match = re.search("^%s\/([\w\/\-_]*).expect\/([\w\-_]+)(\.[\w\-_]+)?\.txt$" % settings.SRCDIR, path) 22 22 if match : … … 28 28 expected.append(test) 29 29 30 path Walk( matchTest )30 path_walk( match_test ) 31 31 32 32 return expected 33 33 34 34 # reads the directory ./.expect and indentifies the tests 35 def list Tests( includes, excludes ):35 def list_tests( includes, excludes ): 36 36 # tests directly in the .expect folder will always be processed 37 test_list = find Tests()37 test_list = find_tests() 38 38 39 39 # if we have a limited number of includes, filter by them … … 52 52 53 53 # from the found tests, filter all the valid tests/desired tests 54 def valid Tests( options ):54 def valid_tests( options ): 55 55 tests = [] 56 56 … … 59 59 if options.regenerate_expected : 60 60 for testname in options.tests : 61 testname = canonical Path( testname )61 testname = canonical_path( testname ) 62 62 if Test.valid_name(testname): 63 found = [test for test in all Tests if canonicalPath( test.target() ) == testname]63 found = [test for test in all_tests if canonical_path( test.target() ) == testname] 64 64 tests.append( found[0] if len(found) == 1 else Test.from_target(testname) ) 65 65 else : … … 69 69 # otherwise we only need to validate that all tests are present in the complete list 70 70 for testname in options.tests: 71 test = [t for t in all Tests if pathCmp( t.target(), testname )]71 test = [t for t in all_tests if path_cmp( t.target(), testname )] 72 72 73 73 if test : … … 79 79 80 80 # parses the option 81 def getOptions():81 def parse_args(): 82 82 # create a parser with the arguments for the tests script 83 83 parser = argparse.ArgumentParser(description='Script which runs cforall tests') … … 102 102 print('ERROR: invalid arguments', file=sys.stderr) 103 103 parser.print_help(sys.stderr) 104 104 sys.exit(1) 105 105 106 106 # script must have at least some tests to run or be listing … … 112 112 # check that exactly one of the booleans is set to true 113 113 if not sum( (listing, all_tests, some_tests, some_dirs) ) > 0 : 114 print(' ERROR: must have option \'--all\', \'--list\', \'--include\', \'-I\' or non-empty test list', file=sys.stderr)114 print('''ERROR: must have option '--all', '--list', '--include', '-I' or non-empty test list''', file=sys.stderr) 115 115 parser.print_help() 116 116 sys.exit(1) … … 124 124 return val == 0 or settings.dry_run 125 125 126 def isExe(file): 127 return settings.dry_run or fileIsExecutable(file) 128 129 def noRule(file, target): 130 return not settings.dry_run and fileContainsOnly(file, "make: *** No rule to make target `%s'. Stop." % target) 126 def no_rule(file, target): 127 return not settings.dry_run and file_contains_only(file, "make: *** No rule to make target `%s'. Stop." % target) 131 128 132 129 # logic to run a single test and return the result (No handling of printing or other test framework logic) … … 145 142 # build, skipping to next test on error 146 143 with Timed() as comp_dur: 147 make_ret, _ = make( test.target(), redirects = ("2> %s 1> /dev/null" % out_file), error_file = err_file ) 148 149 # if the make command succeds continue otherwise skip to diff 144 make_ret, _ = make( test.target(), output=subprocess.DEVNULL, error=out_file, error_file = err_file ) 145 150 146 run_dur = None 151 if success(make_ret): 152 with Timed() as run_dur: 153 if isExe(exe_file): 154 # run test 155 retcode = run(exe_file, out_file, in_file) 147 # run everything in a temp directory to make sure core file are handled properly 148 with tempdir(): 149 # if the make command succeds continue otherwise skip to diff 150 if success(make_ret): 151 with Timed() as run_dur: 152 if settings.dry_run or is_exe(exe_file): 153 # run test 154 retcode, _ = sh(exe_file, output=out_file, input=in_file, timeout=True) 155 else : 156 # simply cat the result into the output 157 retcode = cat(exe_file, out_file) 158 else: 159 retcode = mv(err_file, out_file) 160 161 if success(retcode): 162 if settings.generating : 163 # if we are ounly generating the output we still need to check that the test actually exists 164 if no_rule(out_file, test.target()) : 165 retcode = 1 166 error = "\t\tNo make target for test %s!" % test.target() 167 rm(out_file) 168 else: 169 error = None 156 170 else : 157 # simply cat the result into the output 158 retcode = cat(exe_file, out_file) 159 else: 160 retcode = mv(err_file, out_file) 161 162 if success(retcode): 163 if settings.generating : 164 # if we are ounly generating the output we still need to check that the test actually exists 165 if noRule(out_file, test.target()) : 166 retcode = 1 167 error = "\t\tNo make target for test %s!" % test.target() 168 rm(out_file) 169 else: 170 error = None 171 else : 172 # fetch return code and error from the diff command 173 retcode, error = diff(cmp_file, out_file) 174 175 else: 176 with open (out_file, "r") as myfile: 177 error = myfile.read() 178 179 ret, info = coreInfo(exe_file) 180 error = error + info 171 # fetch return code and error from the diff command 172 retcode, error = diff(cmp_file, out_file) 173 174 else: 175 with open (out_file, "r") as myfile: 176 error = myfile.read() 177 178 ret, info = core_info(exe_file) 179 error = error + info if error else info 181 180 182 181 … … 189 188 # run a single test and handle the errors, outputs, printing, exception handling, etc. 190 189 def run_test_worker(t) : 191 192 with SignalHandling(): 190 try : 193 191 # print formated name 194 name_txt = "%24s " % t.name192 name_txt = '{0:{width}} '.format(t.target(), width=settings.output_width) 195 193 196 194 retcode, error, duration = run_single_test(t) … … 200 198 201 199 #print result with error if needed 202 text = name_txt + result_txt200 text = '\t' + name_txt + result_txt 203 201 out = sys.stdout 204 202 if error : 205 text = text + "\n"+ error203 text = text + '\n' + error 206 204 out = sys.stderr 207 205 … … 210 208 sys.stderr.flush() 211 209 212 return retcode != TestResult.SUCCESS 210 return retcode != TestResult.SUCCESS 211 except KeyboardInterrupt: 212 False 213 213 214 214 # run the given list of tests with the given parameters 215 215 def run_tests(tests, jobs) : 216 216 # clean the sandbox from previous commands 217 make('clean', redirects = '> /dev/null 2>&1')217 make('clean', output=subprocess.DEVNULL, error=subprocess.DEVNULL) 218 218 219 219 # create the executor for our jobs and handle the signal properly 220 pool = setupPool(jobs)220 pool = multiprocessing.Pool(jobs) 221 221 222 222 # for each test to run … … 233 233 234 234 # clean the workspace 235 make('clean', redirects = '> /dev/null 2>&1')235 make('clean', output=subprocess.DEVNULL, error=subprocess.DEVNULL) 236 236 237 237 for failed in results: … … 248 248 249 249 # parse the command line arguments 250 options = getOptions()250 options = parse_args() 251 251 252 252 # init global settings … … 254 254 255 255 # fetch the liest of all valid tests 256 all Tests = listTests( options.include, options.exclude )256 all_tests = list_tests( options.include, options.exclude ) 257 257 258 258 259 259 # if user wants all tests than no other treatement of the test list is required 260 260 if options.all or options.list or options.list_comp or options.include : 261 tests = all Tests261 tests = all_tests 262 262 263 263 #otherwise we need to validate that the test list that was entered is valid 264 264 else : 265 tests = valid Tests( options )265 tests = valid_tests( options ) 266 266 267 267 # make sure we have at least some test to run … … 281 281 elif options.list : 282 282 print("Listing for %s:%s"% (settings.arch.string, settings.debug.string)) 283 fancy_print("\n".join(map(lambda t: "%s" % (t.toString()), tests)))283 fancy_print("\n".join(map(lambda t: t.toString(), tests))) 284 284 285 285 else : 286 286 # check the build configuration works 287 settings.prep_output(tests) 287 288 settings.validate() 288 289 289 options.jobs, forceJobs = jobCount( options, tests ) 290 settings.updateMakeCmd(forceJobs, options.jobs) 291 292 print('%s (%s:%s) on %i cores' % ( 293 'Regenerate tests' if settings.generating else 'Running', 290 options.jobs, forceJobs = job_count( options, tests ) 291 settings.update_make_cmd(forceJobs, options.jobs) 292 293 print('%s %i tests on %i cores (%s:%s)' % ( 294 'Regenerating' if settings.generating else 'Running', 295 len(tests), 296 options.jobs, 294 297 settings.arch.string, 295 settings.debug.string, 296 options.jobs 298 settings.debug.string 297 299 )) 298 300
Note:
See TracChangeset
for help on using the changeset viewer.