Changeset c13e8dc8 for src/tests/test.py
- Timestamp:
- Dec 5, 2017, 2:35:03 PM (8 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, cleanup-dtors, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- f9feab8
- Parents:
- 9c35431 (diff), 65197c2 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
src/tests/test.py
r9c35431 rc13e8dc8 2 2 from __future__ import print_function 3 3 4 from functools import partial5 from multiprocessing import Pool6 from os import listdir, environ7 from os.path import isfile, join, splitext8 4 from pybin.tools import * 5 from pybin.test_run import * 6 from pybin import settings 9 7 10 8 import argparse 11 import multiprocessing12 import os13 9 import re 14 import signal15 10 import sys 16 11 … … 19 14 ################################################################################ 20 15 21 # Test class that defines what a test is 22 class Test: 23 def __init__(self, name, path): 24 self.name, self.path = name, path 25 26 class TestResult: 27 SUCCESS = 0 28 FAILURE = 1 29 TIMEOUT = 124 30 31 # parses the Makefile to find the machine type (32-bit / 64-bit) 32 def getMachineType(): 33 sh('echo "void ?{}(int&a,int b){}int main(){return 0;}" > .dummy.c') 34 ret, out = sh("make .dummy -s", print2stdout=True) 35 36 if ret != 0: 37 print("Failed to identify architecture:") 38 print(out) 39 print("Stopping") 40 rm( (".dummy.c",".dummy") ) 41 sys.exit(1) 42 43 _, out = sh("file .dummy", print2stdout=False) 44 rm( (".dummy.c",".dummy") ) 45 46 return re.search("ELF\s([0-9]+)-bit", out).group(1) 47 48 def listTestsFolder(folder) : 49 path = ('./.expect/%s/' % folder) if folder else './.expect/' 50 subpath = "%s/" % folder if folder else "" 16 def findTests(): 17 expected = [] 18 19 def matchTest(path): 20 match = re.search("(\.[\w\/\-_]*)\/.expect\/([\w\-_]+)(\.[\w\-_]+)?\.txt", path) 21 if match : 22 test = Test() 23 test.name = match.group(2) 24 test.path = match.group(1) 25 test.arch = match.group(3)[1:] if match.group(3) else None 26 if settings.arch.match(test.arch): 27 expected.append(test) 28 29 pathWalk( matchTest ) 30 31 return expected 32 33 # reads the directory ./.expect and indentifies the tests 34 def listTests( includes, excludes ): 35 includes = [canonicalPath( i ) for i in includes] if includes else None 36 excludes = [canonicalPath( i ) for i in excludes] if excludes else None 51 37 52 38 # tests directly in the .expect folder will always be processed 53 return map(lambda fname: Test(fname, subpath + fname), 54 [splitext(f)[0] for f in listdir( path ) 55 if not f.startswith('.') and f.endswith('.txt') 56 ]) 57 58 # reads the directory ./.expect and indentifies the tests 59 def listTests( concurrent ): 60 machineType = getMachineType() 61 62 # tests directly in the .expect folder will always be processed 63 generic_list = listTestsFolder( "" ) 64 65 # tests in the machineType folder will be ran only for the corresponding compiler 66 typed_list = listTestsFolder( machineType ) 67 68 # tests in the concurrent folder will be ran only if concurrency is enabled 69 concurrent_list = listTestsFolder( "concurrent" ) if concurrent else [] 70 71 # append both lists to get 72 return generic_list + typed_list + concurrent_list; 39 test_list = findTests() 40 41 # if we have a limited number of includes, filter by them 42 if includes: 43 test_list = [x for x in test_list if 44 x.path.startswith( tuple(includes) ) 45 ] 46 47 # # if we have a folders to excludes, filter by them 48 if excludes: 49 test_list = [x for x in test_list if not 50 x.path.startswith( tuple(excludes) ) 51 ] 52 53 return test_list 73 54 74 55 # from the found tests, filter all the valid tests/desired tests … … 80 61 if options.regenerate_expected : 81 62 for testname in options.tests : 82 if testname.endswith( (".c", ".cc", ".cpp") ): 63 if Test.valid_name(testname): 64 found = [test for test in allTests if test.target() == testname] 65 tests.append( found[0] if len(found) == 1 else Test.from_target(testname) ) 66 else : 83 67 print('ERROR: "%s", tests are not allowed to end with a C/C++/CFA extension, ignoring it' % testname, file=sys.stderr) 84 else :85 found = [test for test in allTests if test.name == testname]86 tests.append( found[0] if len(found) == 1 else Test(testname, testname) )87 68 88 69 else : 89 70 # otherwise we only need to validate that all tests are present in the complete list 90 71 for testname in options.tests: 91 test = [t for t in allTests if t.name == testname]92 93 if len(test) != 0:72 test = [t for t in allTests if pathCmp( t.target(), testname )] 73 74 if test : 94 75 tests.append( test[0] ) 95 76 else : … … 97 78 98 79 # make sure we have at least some test to run 99 if len(tests) == 0:80 if not tests : 100 81 print('ERROR: No valid test to run', file=sys.stderr) 101 82 sys.exit(1) … … 108 89 parser = argparse.ArgumentParser(description='Script which runs cforall tests') 109 90 parser.add_argument('--debug', help='Run all tests in debug or release', type=yes_no, default='no') 110 parser.add_argument('-- concurrent', help='Run concurrent tests', type=yes_no, default='yes')91 parser.add_argument('--arch', help='Test for specific architecture', type=str, default='') 111 92 parser.add_argument('--dry-run', help='Don\'t run the tests, only output the commands', action='store_true') 112 93 parser.add_argument('--list', help='List all test available', action='store_true') … … 115 96 parser.add_argument('-j', '--jobs', help='Number of tests to run simultaneously', type=int, default='8') 116 97 parser.add_argument('--list-comp', help='List all valide arguments', action='store_true') 98 parser.add_argument('-I','--include', help='Directory of test to include, can be used multiple time, All if omitted', action='append') 99 parser.add_argument('-E','--exclude', help='Directory of test to exclude, can be used multiple time, None if omitted', action='append') 117 100 parser.add_argument('tests', metavar='test', type=str, nargs='*', help='a list of tests to run') 118 101 … … 123 106 all_tests = options.all 124 107 some_tests = len(options.tests) > 0 108 some_dirs = len(options.include) > 0 if options.include else 0 125 109 126 110 # check that exactly one of the booleans is set to true 127 if not sum( (listing, all_tests, some_tests ) ) == 1:128 print('ERROR: must have option \'--all\', \'--list\' or non-empty test list', file=sys.stderr)111 if not sum( (listing, all_tests, some_tests, some_dirs) ) > 0 : 112 print('ERROR: must have option \'--all\', \'--list\', \'--include\', \'-I\' or non-empty test list', file=sys.stderr) 129 113 parser.print_help() 130 114 sys.exit(1) … … 132 116 return options 133 117 134 def jobCount( options ):135 # check if the user already passed in a number of jobs for multi-threading136 make_flags = environ.get('MAKEFLAGS')137 make_jobs_fds = re.search("--jobserver-(auth|fds)=\s*([0-9]+),([0-9]+)", make_flags) if make_flags else None138 if make_jobs_fds :139 tokens = os.read(int(make_jobs_fds.group(2)), 1024)140 options.jobs = len(tokens)141 os.write(int(make_jobs_fds.group(3)), tokens)142 else :143 options.jobs = multiprocessing.cpu_count()144 145 # make sure we have a valid number of jobs that corresponds to user input146 if options.jobs <= 0 :147 print('ERROR: Invalid number of jobs', file=sys.stderr)148 sys.exit(1)149 150 return min( options.jobs, len(tests) ), True if make_flags else False151 152 118 ################################################################################ 153 119 # running test functions 154 120 ################################################################################ 155 121 # logic to run a single test and return the result (No handling of printing or other test framework logic) 156 def run_single_test(test , generate, dry_run, debug):122 def run_single_test(test): 157 123 158 124 # find the output file based on the test name and options flag 159 out_file = (".out/%s.log" % test.name) if not generate else (".expect/%s.txt" % test.path) 160 err_file = ".err/%s.log" % test.name 125 out_file = test.target_output() 126 err_file = test.error_log() 127 cmp_file = test.expect() 128 in_file = test.input() 129 130 # prepare the proper directories 131 test.prepare() 161 132 162 133 # remove any outputs from the previous tests to prevent side effects 163 rm( (out_file, err_file, test.name), dry_run ) 164 165 options = "-debug" if debug else "-nodebug" 134 rm( (out_file, err_file, test.target()) ) 166 135 167 136 # build, skipping to next test on error 168 make_ret, _ = sh("""%s test=yes DEBUG_FLAGS="%s" %s 2> %s 1> /dev/null""" % (make_cmd, options, test.name, out_file), dry_run)169 170 retcode = 0171 error = None137 make_ret, _ = make( test.target(), 138 redirects = "2> %s 1> /dev/null" % out_file, 139 error_file = err_file 140 ) 172 141 173 142 # if the make command succeds continue otherwise skip to diff 174 if make_ret == 0 : 175 # fetch optional input 176 stdinput = "< .in/%s.txt" % test.name if isfile(".in/%s.txt" % test.name) else "" 177 178 if fileIsExecutable(test.name) : 143 if make_ret == 0 or settings.dry_run: 144 if settings.dry_run or fileIsExecutable(test.target()) : 179 145 # run test 180 retcode, _ = sh("timeout 60 ./%s %s > %s 2>&1" % (test.name, stdinput, out_file), dry_run)146 retcode, _ = sh("timeout 60 %s > %s 2>&1" % (test.target(), out_file), input = in_file) 181 147 else : 182 148 # simply cat the result into the output 183 sh("cat %s > %s" % (test.name, out_file), dry_run) 184 185 else : 186 # command failed save the log to less temporary file 187 sh("mv %s %s" % (err_file, out_file), dry_run) 149 retcode, _ = sh("cat %s > %s" % (test.target(), out_file)) 150 else: 151 retcode, _ = sh("mv %s %s" % (err_file, out_file)) 152 188 153 189 154 if retcode == 0: 190 if generate:155 if settings.generating : 191 156 # if we are ounly generating the output we still need to check that the test actually exists 192 if not dry_run and fileContainsOnly(out_file, "make: *** No rule to make target `%s'. Stop." % test.name) :157 if not settings.dry_run and fileContainsOnly(out_file, "make: *** No rule to make target `%s'. Stop." % test.target()) : 193 158 retcode = 1; 194 error = "\t\tNo make target for test %s!" % test. name159 error = "\t\tNo make target for test %s!" % test.target() 195 160 sh("rm %s" % out_file, False) 161 else: 162 error = None 196 163 else : 197 164 # fetch return code and error from the diff command 198 retcode, error = diff( ".expect/%s.txt" % test.path, ".out/%s.log" % test.name, dry_run)165 retcode, error = diff(cmp_file, out_file) 199 166 200 167 else: … … 204 171 205 172 # clean the executable 206 sh("rm -f %s > /dev/null 2>&1" % test. name, dry_run)173 sh("rm -f %s > /dev/null 2>&1" % test.target()) 207 174 208 175 return retcode, error 209 176 210 177 # run a single test and handle the errors, outputs, printing, exception handling, etc. 211 def run_test_worker(t, generate, dry_run, debug) : 212 213 signal.signal(signal.SIGINT, signal.SIG_DFL) 214 # print formated name 215 name_txt = "%20s " % t.name 216 217 retcode, error = run_single_test(t, generate, dry_run, debug) 218 219 # update output based on current action 220 if generate : 221 if retcode == TestResult.SUCCESS: result_txt = "Done" 222 elif retcode == TestResult.TIMEOUT: result_txt = "TIMEOUT" 223 else : result_txt = "ERROR code %d" % retcode 224 else : 225 if retcode == TestResult.SUCCESS: result_txt = "PASSED" 226 elif retcode == TestResult.TIMEOUT: result_txt = "TIMEOUT" 227 else : result_txt = "FAILED with code %d" % retcode 228 229 #print result with error if needed 230 text = name_txt + result_txt 231 out = sys.stdout 232 if error : 233 text = text + "\n" + error 234 out = sys.stderr 235 236 print(text, file = out) 237 sys.stdout.flush() 238 sys.stderr.flush() 239 signal.signal(signal.SIGINT, signal.SIG_IGN) 178 def run_test_worker(t) : 179 180 with SignalHandling(): 181 # print formated name 182 name_txt = "%20s " % t.name 183 184 retcode, error = run_single_test(t) 185 186 # update output based on current action 187 result_txt = TestResult.toString( retcode ) 188 189 #print result with error if needed 190 text = name_txt + result_txt 191 out = sys.stdout 192 if error : 193 text = text + "\n" + error 194 out = sys.stderr 195 196 print(text, file = out) 197 sys.stdout.flush() 198 sys.stderr.flush() 240 199 241 200 return retcode != TestResult.SUCCESS 242 201 243 202 # run the given list of tests with the given parameters 244 def run_tests(tests, generate, dry_run, jobs, debug) :203 def run_tests(tests, jobs) : 245 204 # clean the sandbox from previous commands 246 sh("%s clean > /dev/null 2>&1" % make_cmd, dry_run) 247 248 # make sure the required folder are present 249 sh('mkdir -p .out .expect .err', dry_run) 250 251 if generate : 252 print( "Regenerate tests for: " ) 205 make('clean', redirects = '> /dev/null 2>&1') 253 206 254 207 # create the executor for our jobs and handle the signal properly 255 original_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_IGN) 256 pool = Pool(jobs) 257 signal.signal(signal.SIGINT, original_sigint_handler) 208 pool = setupPool(jobs) 258 209 259 210 # for each test to run 260 211 try : 261 results = pool.map_async(partial(run_test_worker, generate=generate, dry_run=dry_run, debug=debug), tests, chunksize = 1 ).get(7200) 212 results = pool.map_async( 213 run_test_worker, 214 tests, 215 chunksize = 1 216 ).get(7200) 262 217 except KeyboardInterrupt: 263 218 pool.terminate() … … 266 221 267 222 # clean the workspace 268 sh("%s clean > /dev/null 2>&1" % make_cmd, dry_run)223 make('clean', redirects = '> /dev/null 2>&1') 269 224 270 225 for failed in results: … … 285 240 options = getOptions() 286 241 242 # init global settings 243 settings.init( options ) 244 287 245 # fetch the liest of all valid tests 288 allTests = listTests( options. concurrent)246 allTests = listTests( options.include, options.exclude ) 289 247 290 248 # if user wants all tests than no other treatement of the test list is required 291 if options.all or options.list or options.list_comp :249 if options.all or options.list or options.list_comp or options.include : 292 250 tests = allTests 293 251 252 #otherwise we need to validate that the test list that was entered is valid 294 253 else : 295 #otherwise we need to validate that the test list that was entered is valid296 254 tests = validTests( options ) 297 255 298 256 # sort the test alphabetically for convenience 299 tests.sort(key=lambda t: t.name)257 tests.sort(key=lambda t: (t.arch if t.arch else '') + t.target()) 300 258 301 259 # users may want to simply list the tests 302 260 if options.list_comp : 303 print("-h --help --debug -- concurrent --dry-run --list--all --regenerate-expected -j --jobs ", end='')304 print(" ".join(map(lambda t: "%s" % (t. name), tests)))261 print("-h --help --debug --dry-run --list --arch --all --regenerate-expected -j --jobs ", end='') 262 print(" ".join(map(lambda t: "%s" % (t.target()), tests))) 305 263 306 264 elif options.list : 307 print("\n".join(map(lambda t: "%s (%s)" % (t.name, t.path), tests))) 265 print("Listing for %s:%s"% (settings.arch.string, settings.debug.string)) 266 print("\n".join(map(lambda t: "%s" % (t.toString()), tests))) 308 267 309 268 else : 310 options.jobs, forceJobs = jobCount( options ) 311 312 print('Running (%s) on %i cores' % ("debug" if options.debug else "no debug", options.jobs)) 313 make_cmd = "make" if forceJobs else ("make -j%i" % options.jobs) 269 options.jobs, forceJobs = jobCount( options, tests ) 270 settings.updateMakeCmd(forceJobs, options.jobs) 271 272 print('%s (%s:%s) on %i cores' % ( 273 'Regenerate tests' if settings.generating else 'Running', 274 settings.arch.string, 275 settings.debug.string, 276 options.jobs 277 )) 314 278 315 279 # otherwise run all tests and make sure to return the correct error code 316 sys.exit( run_tests(tests, options. regenerate_expected, options.dry_run, options.jobs, options.debug) )280 sys.exit( run_tests(tests, options.jobs) )
Note:
See TracChangeset
for help on using the changeset viewer.