Changes in src/tests/test.py [b98c913:592b9fa]
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
src/tests/test.py
rb98c913 r592b9fa 9 9 10 10 import argparse 11 import multiprocessing12 11 import os 13 12 import re 14 import signal15 13 import stat 16 14 import sys 15 import multiprocessing 17 16 18 17 ################################################################################ … … 28 27 def getMachineType(): 29 28 sh('echo "void ?{}(int*a,int b){}int main(){return 0;}" > .dummy.c') 30 ret, out = sh("make .dummy -s", print2stdout=False) 31 29 ret, out = sh("make .dummy", print2stdout=False) 32 30 if ret != 0: 33 31 print("Failed to identify architecture:") 34 32 print(out) 35 33 print("Stopping") 36 sh("rm -f .dummy.c > /dev/null 2>&1")37 sh("rm -f .dummy > /dev/null 2>&1")38 34 sys.exit(1) 39 40 35 _, out = sh("file .dummy", print2stdout=False) 41 36 sh("rm -f .dummy.c > /dev/null 2>&1") … … 124 119 def run_single_test(test, generate, dry_run, debug): 125 120 126 try : 127 # find the output file based on the test name and options flag 128 out_file = (".out/%s.log" % test.name) if not generate else (".expect/%s.txt" % test.path) 129 err_file = ".err/%s.log" % test.name 130 131 # remove any outputs from the previous tests to prevent side effects 132 sh("rm -f %s" % out_file, dry_run) 133 sh("rm -f %s > /dev/null 2>&1" % test.name, dry_run) 134 135 options = "-debug" if debug else "-nodebug"; 136 137 # build, skipping to next test on error 138 make_ret, _ = sh("""%s EXTRA_FLAGS="-quiet %s" %s 2> %s 1> /dev/null""" % (make_cmd, options, test.name, out_file), dry_run) 139 140 # if the make command succeds continue otherwise skip to diff 141 if make_ret == 0 : 142 # fetch optional input 143 stdinput = "< .in/%s.txt" % test.name if isfile(".in/%s.txt" % test.path) else "" 144 145 if fileIsExecutable(test.name) : 146 # run test 147 sh("./%s %s > %s 2>&1" % (test.name, stdinput, out_file), dry_run) 148 else : 149 # simply cat the result into the output 150 sh("cat %s > %s" % (test.name, out_file), dry_run) 151 121 # find the output file based on the test name and options flag 122 out_file = (".out/%s.log" % test.name) if not generate else (".expect/%s.txt" % test.path) 123 err_file = ".err/%s.log" % test.name 124 125 # remove any outputs from the previous tests to prevent side effects 126 sh("rm -f %s" % out_file, dry_run) 127 sh("rm -f %s > /dev/null 2>&1" % test.name, dry_run) 128 129 options = "-debug" if debug else "-nodebug"; 130 131 # build, skipping to next test on error 132 make_ret, _ = sh("""%s test=yes EXTRA_FLAGS="-quiet %s" %s 2> %s 1> /dev/null""" % (make_cmd, options, test.name, out_file), dry_run) 133 134 # if the make command succeds continue otherwise skip to diff 135 if make_ret == 0 : 136 # fetch optional input 137 stdinput = "< .in/%s.txt" % test.name if isfile(".in/%s.txt" % test.path) else "" 138 139 if fileIsExecutable(test.name) : 140 # run test 141 sh("./%s %s > %s 2>&1" % (test.name, stdinput, out_file), dry_run) 152 142 else : 153 # command failed save the log to less temporary file 154 sh("mv %s %s" % (err_file, out_file), dry_run) 155 156 retcode = 0 157 error = None 158 159 # # fix output to prevent make depth to cause issues 160 # fix_MakeLevel(out_file) 161 143 # simply cat the result into the output 144 sh("cat %s > %s" % (test.name, out_file), dry_run) 145 146 else : 147 # command failed save the log to less temporary file 148 sh("mv %s %s" % (err_file, out_file), dry_run) 149 150 retcode = 0 151 error = None 152 153 # # fix output to prevent make depth to cause issues 154 # fix_MakeLevel(out_file) 155 156 if generate : 157 # if we are ounly generating the output we still need to check that the test actually exists 158 if not dry_run and fileContainsOnly(out_file, "make: *** No rule to make target `%s'. Stop." % test.name) : 159 retcode = 1; 160 error = "\t\tNo make target for test %s!" % test.name 161 sh("rm %s" % out_file, False) 162 163 else : 164 # diff the output of the files 165 diff_cmd = ("diff --ignore-all-space " 166 "--ignore-blank-lines " 167 "--old-group-format='\t\tmissing lines :\n" 168 "%%<' \\\n" 169 "--new-group-format='\t\tnew lines :\n" 170 "%%>' \\\n" 171 "--unchanged-group-format='%%=' \\" 172 "--changed-group-format='\t\texpected :\n" 173 "%%<\n" 174 "\t\tgot :\n" 175 "%%>' \\\n" 176 "--new-line-format='\t\t%%dn\t%%L' \\\n" 177 "--old-line-format='\t\t%%dn\t%%L' \\\n" 178 "--unchanged-line-format='' \\\n" 179 ".expect/%s.txt .out/%s.log") 180 181 # fetch return code and error from the diff command 182 retcode, error = sh(diff_cmd % (test.path, test.name), dry_run, False) 183 184 # clean the executable 185 sh("rm -f %s > /dev/null 2>&1" % test.name, dry_run) 186 187 return retcode, error 188 189 def run_test_instance(t, generate, dry_run, debug) : 190 try : 191 # print formated name 192 name_txt = "%20s " % t.name 193 194 #run the test instance and collect the result 195 test_failed, error = run_single_test(t, generate, dry_run, debug) 196 197 # update output based on current action 162 198 if generate : 163 # if we are ounly generating the output we still need to check that the test actually exists 164 if not dry_run and fileContainsOnly(out_file, "make: *** No rule to make target `%s'. Stop." % test.name) : 165 retcode = 1; 166 error = "\t\tNo make target for test %s!" % test.name 167 sh("rm %s" % out_file, False) 168 199 failed_txt = "ERROR" 200 success_txt = "Done" 169 201 else : 170 # diff the output of the files 171 diff_cmd = ("diff --ignore-all-space " 172 "--ignore-blank-lines " 173 "--old-group-format='\t\tmissing lines :\n" 174 "%%<' \\\n" 175 "--new-group-format='\t\tnew lines :\n" 176 "%%>' \\\n" 177 "--unchanged-group-format='%%=' \\" 178 "--changed-group-format='\t\texpected :\n" 179 "%%<\n" 180 "\t\tgot :\n" 181 "%%>' \\\n" 182 "--new-line-format='\t\t%%dn\t%%L' \\\n" 183 "--old-line-format='\t\t%%dn\t%%L' \\\n" 184 "--unchanged-line-format='' \\\n" 185 ".expect/%s.txt .out/%s.log") 186 187 # fetch return code and error from the diff command 188 retcode, error = sh(diff_cmd % (test.path, test.name), dry_run, False) 189 finally : 190 # clean the executable 191 sh("rm -f %s > /dev/null 2>&1" % test.name, dry_run) 192 193 return retcode, error 194 195 def run_test_instance(t, generate, dry_run, debug) : 196 197 signal.signal(signal.SIGINT, signal.SIG_DFL) 198 # print formated name 199 name_txt = "%20s " % t.name 200 201 #run the test instance and collect the result 202 test_failed, error = run_single_test(t, generate, dry_run, debug) 203 204 # update output based on current action 205 if generate : 206 failed_txt = "ERROR" 207 success_txt = "Done" 208 else : 209 failed_txt = "FAILED" 210 success_txt = "PASSED" 211 212 #print result with error if needed 213 text = name_txt + (failed_txt if test_failed else success_txt) 214 out = sys.stdout 215 if error : 216 text = text + "\n" + error 217 out = sys.stderr 218 219 print(text, file = out); 220 sys.stdout.flush() 221 sys.stderr.flush() 222 signal.signal(signal.SIGINT, signal.SIG_IGN) 223 224 return test_failed 202 failed_txt = "FAILED" 203 success_txt = "PASSED" 204 205 #print result with error if needed 206 text = name_txt + (failed_txt if test_failed else success_txt) 207 out = sys.stdout 208 if error : 209 text = text + "\n" + error 210 out = sys.stderr 211 212 print(text, file = out); 213 sys.stdout.flush() 214 sys.stderr.flush() 215 return test_failed 216 217 except KeyboardInterrupt: 218 test_failed = True 225 219 226 220 … … 237 231 238 232 # for each test to run 239 original_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_IGN)240 233 pool = Pool(jobs) 241 signal.signal(signal.SIGINT, original_sigint_handler)242 234 try : 243 235 results = pool.map_async(partial(run_test_instance, generate=generate, dry_run=dry_run, debug=debug), tests ).get(9999) … … 268 260 # main loop 269 261 ################################################################################ 270 abspath = os.path.abspath(__file__)271 dname = os.path.dirname(abspath)272 os.chdir(dname)273 274 262 # create a parser with the arguments for the tests script 275 263 parser = argparse.ArgumentParser(description='Script which runs cforall tests') … … 281 269 parser.add_argument('--regenerate-expected', help='Regenerate the .expect by running the specified tets, can be used with --all option', action='store_true') 282 270 parser.add_argument('-j', '--jobs', help='Number of tests to run simultaneously', type=int, default='8') 283 parser.add_argument('--list-comp', help='List all valide arguments', action='store_true')284 271 parser.add_argument('tests', metavar='test', type=str, nargs='*', help='a list of tests to run') 285 286 272 287 273 # parse the command line arguments 288 274 options = parser.parse_args() 289 do_list = options.list or options.list_comp290 275 291 276 # script must have at least some tests to run 292 if (len(options.tests) > 0 and options.all and not do_list) \293 or (len(options.tests) == 0 and not options.all and not do_list) :277 if (len(options.tests) > 0 and options.all and not options.list) \ 278 or (len(options.tests) == 0 and not options.all and not options.list) : 294 279 print('ERROR: must have option \'--all\' or non-empty test list', file=sys.stderr) 295 280 parser.print_help() … … 300 285 301 286 # if user wants all tests than no other treatement of the test list is required 302 if options.all or do_list :287 if options.all or options.list : 303 288 tests = allTests 304 289 … … 335 320 tests.sort(key=lambda t: t.name) 336 321 322 # check if the user already passed in a number of jobs for multi-threading 323 make_flags = environ.get('MAKEFLAGS') 324 make_jobs_fds = re.search("--jobserver-(auth|fds)=\s*([0-9]+),([0-9]+)", make_flags) if make_flags else None 325 if make_jobs_fds : 326 tokens = os.read(int(make_jobs_fds.group(2)), 1024) 327 options.jobs = len(tokens) 328 os.write(int(make_jobs_fds.group(3)), tokens) 329 else : 330 options.jobs = multiprocessing.cpu_count() 331 332 # make sure we have a valid number of jobs that corresponds to user input 333 if options.jobs <= 0 : 334 print('ERROR: Invalid number of jobs', file=sys.stderr) 335 sys.exit(1) 336 337 options.jobs = min( options.jobs, len(tests) ) 338 339 print('Running (%s) on %i cores' % ("debug" if options.debug else "no debug", options.jobs)) 340 make_cmd = "make" if make_flags else ("make -j%i" % options.jobs) 341 337 342 # users may want to simply list the tests 338 if options.list_comp : 339 print("-h --help --debug --concurrent --dry-run --list --all --regenerate-expected -j --jobs ", end='') 340 print(" ".join(map(lambda t: "%s" % (t.name), tests))) 341 342 elif options.list : 343 if options.list : 343 344 print("\n".join(map(lambda t: "%s (%s)" % (t.name, t.path), tests))) 344 345 345 346 else : 346 # check if the user already passed in a number of jobs for multi-threading347 make_flags = environ.get('MAKEFLAGS')348 make_jobs_fds = re.search("--jobserver-(auth|fds)=\s*([0-9]+),([0-9]+)", make_flags) if make_flags else None349 if make_jobs_fds :350 tokens = os.read(int(make_jobs_fds.group(2)), 1024)351 options.jobs = len(tokens)352 os.write(int(make_jobs_fds.group(3)), tokens)353 else :354 options.jobs = multiprocessing.cpu_count()355 356 # make sure we have a valid number of jobs that corresponds to user input357 if options.jobs <= 0 :358 print('ERROR: Invalid number of jobs', file=sys.stderr)359 sys.exit(1)360 361 options.jobs = min( options.jobs, len(tests) )362 363 print('Running (%s) on %i cores' % ("debug" if options.debug else "no debug", options.jobs))364 make_cmd = "make" if make_flags else ("make -j%i" % options.jobs)365 366 347 # otherwise run all tests and make sure to return the correct error code 367 348 sys.exit( run_tests(tests, options.regenerate_expected, options.dry_run, options.jobs, options.debug) )
Note: See TracChangeset
for help on using the changeset viewer.