Changeset 31e46b8 for src/tests/test.py
- Timestamp:
- Jul 22, 2016, 2:05:52 PM (9 years ago)
- Branches:
- ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, ctor, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, memory, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, resolv-new, with_gc
- Children:
- ccb447e, e4957e7
- Parents:
- 956a9c77 (diff), ef3b335 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
src/tests/test.py
r956a9c77 r31e46b8 15 15 # help functions 16 16 ################################################################################ 17 18 # Test class that defines what a test is 19 class Test: 20 def __init__(self, name, path): 21 self.name, self.path = name, path 22 23 # parses the Makefile to find the machine type (32-bit / 64-bit) 24 def getMachineType(): 25 sh('echo "int main() { return 0; }" > .dummy.c') 26 sh("make .dummy", print2stdout=False) 27 _, out = sh("file .dummy", print2stdout=False) 28 sh("rm -f .dummy.c > /dev/null 2>&1") 29 sh("rm -f .dummy > /dev/null 2>&1") 30 return re.search("ELF\s([0-9]+)-bit", out).group(1) 31 32 # reads the directory ./.expect and indentifies the tests 17 33 def listTests(): 18 list = [splitext(f)[0] for f in listdir('./.expect') 34 machineType = getMachineType() 35 36 print(machineType) 37 38 # tests directly in the .expect folder will always be processed 39 generic_list = map(lambda fname: Test(fname, fname), 40 [splitext(f)[0] for f in listdir('./.expect') 19 41 if not f.startswith('.') and f.endswith('.txt') 20 ] 21 22 return list 23 42 ]) 43 44 # tests in the machineType folder will be ran only for the corresponding compiler 45 typed_list = map(lambda fname: Test( fname, "%s/%s" % (machineType, fname) ), 46 [splitext(f)[0] for f in listdir("./.expect/%s" % machineType) 47 if not f.startswith('.') and f.endswith('.txt') 48 ]) 49 50 # append both lists to get 51 return generic_list + typed_list 52 53 # helper functions to run terminal commands 24 54 def sh(cmd, dry_run = False, print2stdout = True): 25 if dry_run : 55 if dry_run : # if this is a dry_run, only print the commands that would be ran 26 56 print("cmd: %s" % cmd) 27 57 return 0, None 28 else : 58 else : # otherwise create a pipe and run the desired command 29 59 proc = Popen(cmd, stdout=None if print2stdout else PIPE, stderr=STDOUT, shell=True) 30 60 out, err = proc.communicate() 31 61 return proc.returncode, out 32 62 63 # helper function to replace patterns in a file 33 64 def file_replace(fname, pat, s_after): 34 65 # first, see if the pattern is even in the file. … … 46 77 os.rename(out_fname, fname) 47 78 79 # tests output may differ depending on the depth of the makefile 48 80 def fix_MakeLevel(file) : 49 81 if environ.get('MAKELEVEL') : 50 82 file_replace(file, "make\[%i\]" % int(environ.get('MAKELEVEL')), 'make' ) 51 83 84 # helper function to check if a files contains only a spacific string 52 85 def fileContainsOnly(file, text) : 53 86 with open(file) as f: 54 87 ff = f.read().strip() 55 88 result = ff == text.strip() 56 #57 # print("Comparing :\n\t'%s'\nWith:\n\t'%s'" % (ff, text))58 # print("Result is : \n\t", end="")59 # print(result)60 89 61 90 return result; 62 91 92 # check whether or not a file is executable 63 93 def fileIsExecutable(file) : 64 94 try : … … 71 101 return False 72 102 103 # find the test data for a given test name 104 def filterTests(testname) : 105 found = [test for test in allTests if test.name == testname] 106 return (found[0] if len(found) == 1 else Test(testname, testname) ) 107 73 108 ################################################################################ 74 109 # running test functions … … 76 111 def run_test_instance(test, generate, dry_run): 77 112 78 out_file = (".out/%s.log" % test) if not generate else (".expect/%s.txt" % test) 79 113 # find the output file based on the test name and options flag 114 out_file = (".out/%s.log" % test.name) if not generate else (".expect/%s.txt" % test.path) 115 116 # remove any outputs from the previous tests to prevent side effects 80 117 sh("rm -f %s" % out_file, dry_run) 81 sh("rm -f %s > /dev/null 2>&1" % test , dry_run)118 sh("rm -f %s > /dev/null 2>&1" % test.name, dry_run) 82 119 83 120 # build, skipping to next test on error 84 make_ret, _ = sh("%s %s 2> %s 1> /dev/null" % (make_cmd, test, out_file), dry_run) 85 121 make_ret, _ = sh("%s %s 2> %s 1> /dev/null" % (make_cmd, test.name, out_file), dry_run) 122 123 # if the make command succeds continue otherwise skip to diff 86 124 if make_ret == 0 : 87 125 # fetch optional input 88 stdinput = "< .in/%s.txt" % test if isfile(".in/%s.txt" % test) else ""89 90 if fileIsExecutable(test ) :126 stdinput = "< .in/%s.txt" % test.name if isfile(".in/%s.txt" % test.path) else "" 127 128 if fileIsExecutable(test.name) : 91 129 # run test 92 sh("./%s %s > %s 2>&1" % (test , stdinput, out_file), dry_run)130 sh("./%s %s > %s 2>&1" % (test.name, stdinput, out_file), dry_run) 93 131 else : 94 132 # simply cat the result into the output 95 sh("cat %s > %s" % (test , out_file), dry_run)133 sh("cat %s > %s" % (test.name, out_file), dry_run) 96 134 97 135 retcode = 0 98 136 error = None 99 137 138 # fix output to prevent make depth to cause issues 100 139 fix_MakeLevel(out_file) 101 140 102 141 if generate : 103 if not dry_run and fileContainsOnly(out_file, "make: *** No rule to make target `%s'. Stop." % test) : 142 # if we are ounly generating the output we still need to check that the test actually exists 143 if not dry_run and fileContainsOnly(out_file, "make: *** No rule to make target `%s'. Stop." % test.name) : 104 144 retcode = 1; 105 145 error = "\t\tNo make target for test %s!" % test … … 122 162 ".expect/%s.txt .out/%s.log") 123 163 124 retcode, error = sh(diff_cmd % (test, test), dry_run, False) 164 # fetch return code and error from the diff command 165 retcode, error = sh(diff_cmd % (test.path, test.name), dry_run, False) 125 166 126 167 # clean the executable 127 sh("rm -f %s > /dev/null 2>&1" % test , dry_run)168 sh("rm -f %s > /dev/null 2>&1" % test.name, dry_run) 128 169 129 170 return retcode, error 130 171 131 def run_tests(tests, generate, dry_run) : 172 # run the given list of tests with the given parameters 173 def run_tests(tests, generate, dry_run, jobs) : 174 # clean the sandbox from previous commands 132 175 sh("%s clean > /dev/null 2>&1" % make_cmd, dry_run) 176 177 #make sure the required folder are present 133 178 sh('mkdir -p .out .expect', dry_run) 134 179 … … 137 182 138 183 failed = False; 184 # for eeach test to run 139 185 for t in tests: 140 print("%20s " % t, end="") 141 sys.stdout.flush() 186 # print formated name 187 name_txt = "%20s " % t.name 188 189 #run the test instance and collect the result 142 190 test_failed, error = run_test_instance(t, generate, dry_run) 191 192 # aggregate test suite result 143 193 failed = test_failed or failed 144 194 195 # update output based on current action 145 196 if generate : 146 197 failed_txt = "ERROR" … … 150 201 success_txt = "PASSED" 151 202 152 print(failed_txt if test_failed else success_txt) 203 #print result with error if needed 204 text = name_txt + (failed_txt if test_failed else success_txt) 205 out = sys.stdout 153 206 if error : 154 print(error, file=sys.stderr) 155 207 text = text + "\n" + error 208 out = sys.stderr 209 210 print(text, file = out); 211 sys.stdout.flush() 212 sys.stderr.flush() 213 214 215 #clean the workspace 156 216 sh("%s clean > /dev/null 2>&1" % make_cmd, dry_run) 157 217 … … 161 221 # main loop 162 222 ################################################################################ 223 # create a parser with the arguments for the tests script 163 224 parser = argparse.ArgumentParser(description='Script which runs cforall tests') 164 225 parser.add_argument('--dry-run', help='Don\'t run the tests, only output the commands', action='store_true') … … 166 227 parser.add_argument('--all', help='Run all test available', action='store_true') 167 228 parser.add_argument('--regenerate-expected', help='Regenerate the .expect by running the specified tets, can be used with --all option', action='store_true') 229 parser.add_argument('-j', '--jobs', help='Number of tests to run simultaneously', type=int, default='8') 168 230 parser.add_argument('tests', metavar='test', type=str, nargs='*', help='a list of tests to run') 169 231 232 # parse the command line arguments 170 233 options = parser.parse_args() 171 234 235 # script must have at least some tests to run 172 236 if (len(options.tests) > 0 and options.all and not options.list) \ 173 237 or (len(options.tests) == 0 and not options.all and not options.list) : … … 176 240 sys.exit(1) 177 241 242 # fetch the liest of all valid tests 178 243 allTests = listTests() 179 244 245 # if user wants all tests than no other treatement of the test list is required 180 246 if options.all or options.list : 181 247 tests = allTests 182 248 183 249 else : 250 #otherwise we need to validate that the test list that was entered is valid 184 251 tests = [] 185 for test in options.tests: 186 if test in allTests or options.regenerate_expected : 187 tests.append(test) 188 else : 189 print('ERROR: No expected file for test %s, ignoring it' % test, file=sys.stderr) 190 252 253 # if we are regenerating the tests we need to find the information of the 254 # already existing tests and create new info for the new tests 255 if options.regenerate_expected : 256 tests = map(filterTests, options.tests) 257 258 else : 259 # otherwise we only need to validate that all tests are present in the complete list 260 for testname in options.tests: 261 test = [t for t in allTests if t.name == testname] 262 263 if len(test) != 0 : 264 tests.append( test[0] ) 265 else : 266 print('ERROR: No expected file for test %s, ignoring it' % testname, file=sys.stderr) 267 268 # make sure we have at least some test to run 191 269 if len(tests) == 0 : 192 270 print('ERROR: No valid test to run', file=sys.stderr) 193 271 sys.exit(1) 194 272 195 tests.sort() 273 # sort the test alphabetically for convenience 274 tests.sort(key=lambda t: t.name) 275 276 # check if the user already passed in a number of jobs for multi-threading 196 277 make_flags = environ.get('MAKEFLAGS') 278 make_has_max_jobs = re.search("(-j|--jobs)\s*([0-9]+)", make_flags) if make_flags else None 279 make_max_jobs = make_has_max_jobs.group(2) if make_has_max_jobs else None 197 280 make_cmd = "make" if make_flags and "-j" in make_flags else "make -j8" 198 281 282 # make sure we have a valid number of jobs that corresponds to user input 283 options.jobs = int(make_max_jobs) if make_max_jobs else options.jobs 284 if options.jobs <= 0 : 285 print('ERROR: Invalid number of jobs', file=sys.stderr) 286 sys.exit(1) 287 288 # users may want to simply list the tests 199 289 if options.list : 200 print("\n".join( tests))290 print("\n".join(map(lambda t: "%s (%s)" % (t.name, t.path), tests))) 201 291 202 292 else : 203 sys.exit( run_tests(tests, options.regenerate_expected, options.dry_run) ) 293 # otherwise run all tests and make sure to return the correct error code 294 sys.exit( run_tests(tests, options.regenerate_expected, options.dry_run, options.jobs) )
Note:
See TracChangeset
for help on using the changeset viewer.