- Timestamp:
- Apr 25, 2017, 11:53:19 AM (8 years ago)
- Branches:
- ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, resolv-new, with_gc
- Children:
- cf67057
- Parents:
- 89a9be2
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
src/tests/test.py
r89a9be2 r9fcb5e4 9 9 10 10 import argparse 11 import multiprocessing 11 12 import os 12 13 import re 14 import signal 13 15 import stat 14 16 import sys 15 import multiprocessing16 17 17 18 ################################################################################ … … 119 120 def run_single_test(test, generate, dry_run, debug): 120 121 121 # find the output file based on the test name and options flag 122 out_file = (".out/%s.log" % test.name) if not generate else (".expect/%s.txt" % test.path) 123 err_file = ".err/%s.log" % test.name 124 125 # remove any outputs from the previous tests to prevent side effects 126 sh("rm -f %s" % out_file, dry_run) 127 sh("rm -f %s > /dev/null 2>&1" % test.name, dry_run) 128 129 options = "-debug" if debug else "-nodebug"; 130 131 # build, skipping to next test on error 132 make_ret, _ = sh("""%s EXTRA_FLAGS="-quiet %s" %s 2> %s 1> /dev/null""" % (make_cmd, options, test.name, out_file), dry_run) 133 134 # if the make command succeds continue otherwise skip to diff 135 if make_ret == 0 : 136 # fetch optional input 137 stdinput = "< .in/%s.txt" % test.name if isfile(".in/%s.txt" % test.path) else "" 138 139 if fileIsExecutable(test.name) : 140 # run test 141 sh("./%s %s > %s 2>&1" % (test.name, stdinput, out_file), dry_run) 122 try : 123 # find the output file based on the test name and options flag 124 out_file = (".out/%s.log" % test.name) if not generate else (".expect/%s.txt" % test.path) 125 err_file = ".err/%s.log" % test.name 126 127 # remove any outputs from the previous tests to prevent side effects 128 sh("rm -f %s" % out_file, dry_run) 129 sh("rm -f %s > /dev/null 2>&1" % test.name, dry_run) 130 131 options = "-debug" if debug else "-nodebug"; 132 133 # build, skipping to next test on error 134 make_ret, _ = sh("""%s EXTRA_FLAGS="-quiet %s" %s 2> %s 1> /dev/null""" % (make_cmd, options, test.name, out_file), dry_run) 135 136 # if the make command succeds continue otherwise skip to diff 137 if make_ret == 0 : 138 # fetch optional input 139 stdinput = "< .in/%s.txt" % test.name if isfile(".in/%s.txt" % test.path) else "" 140 141 if fileIsExecutable(test.name) : 142 # run test 143 sh("./%s %s > %s 2>&1" % (test.name, stdinput, out_file), dry_run) 144 else : 145 # simply cat the result into the output 146 sh("cat %s > %s" % (test.name, out_file), dry_run) 147 142 148 else : 143 # simply cat the result into the output 144 sh("cat %s > %s" % (test.name, out_file), dry_run) 145 149 # command failed save the log to less temporary file 150 sh("mv %s %s" % (err_file, out_file), dry_run) 151 152 retcode = 0 153 error = None 154 155 # # fix output to prevent make depth to cause issues 156 # fix_MakeLevel(out_file) 157 158 if generate : 159 # if we are ounly generating the output we still need to check that the test actually exists 160 if not dry_run and fileContainsOnly(out_file, "make: *** No rule to make target `%s'. Stop." % test.name) : 161 retcode = 1; 162 error = "\t\tNo make target for test %s!" % test.name 163 sh("rm %s" % out_file, False) 164 165 else : 166 # diff the output of the files 167 diff_cmd = ("diff --ignore-all-space " 168 "--ignore-blank-lines " 169 "--old-group-format='\t\tmissing lines :\n" 170 "%%<' \\\n" 171 "--new-group-format='\t\tnew lines :\n" 172 "%%>' \\\n" 173 "--unchanged-group-format='%%=' \\" 174 "--changed-group-format='\t\texpected :\n" 175 "%%<\n" 176 "\t\tgot :\n" 177 "%%>' \\\n" 178 "--new-line-format='\t\t%%dn\t%%L' \\\n" 179 "--old-line-format='\t\t%%dn\t%%L' \\\n" 180 "--unchanged-line-format='' \\\n" 181 ".expect/%s.txt .out/%s.log") 182 183 # fetch return code and error from the diff command 184 retcode, error = sh(diff_cmd % (test.path, test.name), dry_run, False) 185 finally : 186 # clean the executable 187 sh("rm -f %s > /dev/null 2>&1" % test.name, dry_run) 188 189 return retcode, error 190 191 def run_test_instance(t, generate, dry_run, debug) : 192 193 signal.signal(signal.SIGINT, signal.SIG_DFL) 194 # print formated name 195 name_txt = "%20s " % t.name 196 197 #run the test instance and collect the result 198 test_failed, error = run_single_test(t, generate, dry_run, debug) 199 200 # update output based on current action 201 if generate : 202 failed_txt = "ERROR" 203 success_txt = "Done" 146 204 else : 147 # command failed save the log to less temporary file 148 sh("mv %s %s" % (err_file, out_file), dry_run) 149 150 retcode = 0 151 error = None 152 153 # # fix output to prevent make depth to cause issues 154 # fix_MakeLevel(out_file) 155 156 if generate : 157 # if we are ounly generating the output we still need to check that the test actually exists 158 if not dry_run and fileContainsOnly(out_file, "make: *** No rule to make target `%s'. Stop." % test.name) : 159 retcode = 1; 160 error = "\t\tNo make target for test %s!" % test.name 161 sh("rm %s" % out_file, False) 162 163 else : 164 # diff the output of the files 165 diff_cmd = ("diff --ignore-all-space " 166 "--ignore-blank-lines " 167 "--old-group-format='\t\tmissing lines :\n" 168 "%%<' \\\n" 169 "--new-group-format='\t\tnew lines :\n" 170 "%%>' \\\n" 171 "--unchanged-group-format='%%=' \\" 172 "--changed-group-format='\t\texpected :\n" 173 "%%<\n" 174 "\t\tgot :\n" 175 "%%>' \\\n" 176 "--new-line-format='\t\t%%dn\t%%L' \\\n" 177 "--old-line-format='\t\t%%dn\t%%L' \\\n" 178 "--unchanged-line-format='' \\\n" 179 ".expect/%s.txt .out/%s.log") 180 181 # fetch return code and error from the diff command 182 retcode, error = sh(diff_cmd % (test.path, test.name), dry_run, False) 183 184 # clean the executable 185 sh("rm -f %s > /dev/null 2>&1" % test.name, dry_run) 186 187 return retcode, error 188 189 def run_test_instance(t, generate, dry_run, debug) : 190 try : 191 # print formated name 192 name_txt = "%20s " % t.name 193 194 #run the test instance and collect the result 195 test_failed, error = run_single_test(t, generate, dry_run, debug) 196 197 # update output based on current action 198 if generate : 199 failed_txt = "ERROR" 200 success_txt = "Done" 201 else : 202 failed_txt = "FAILED" 203 success_txt = "PASSED" 204 205 #print result with error if needed 206 text = name_txt + (failed_txt if test_failed else success_txt) 207 out = sys.stdout 208 if error : 209 text = text + "\n" + error 210 out = sys.stderr 211 212 print(text, file = out); 213 sys.stdout.flush() 214 sys.stderr.flush() 215 return test_failed 216 217 except KeyboardInterrupt: 218 test_failed = True 205 failed_txt = "FAILED" 206 success_txt = "PASSED" 207 208 #print result with error if needed 209 text = name_txt + (failed_txt if test_failed else success_txt) 210 out = sys.stdout 211 if error : 212 text = text + "\n" + error 213 out = sys.stderr 214 215 print(text, file = out); 216 sys.stdout.flush() 217 sys.stderr.flush() 218 signal.signal(signal.SIGINT, signal.SIG_IGN) 219 220 return test_failed 219 221 220 222 … … 231 233 232 234 # for each test to run 235 original_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_IGN) 233 236 pool = Pool(jobs) 237 signal.signal(signal.SIGINT, original_sigint_handler) 234 238 try : 235 239 results = pool.map_async(partial(run_test_instance, generate=generate, dry_run=dry_run, debug=debug), tests ).get(9999)
Note: See TracChangeset
for help on using the changeset viewer.