| 1 | #!/usr/bin/python | 
|---|
| 2 | from __future__ import print_function | 
|---|
| 3 |  | 
|---|
| 4 | from pybin.tools import * | 
|---|
| 5 | from pybin.test_run import * | 
|---|
| 6 | from pybin import settings | 
|---|
| 7 |  | 
|---|
| 8 | import argparse | 
|---|
| 9 | import re | 
|---|
| 10 | import sys | 
|---|
| 11 | import time | 
|---|
| 12 |  | 
|---|
| 13 | ################################################################################ | 
|---|
| 14 | #               help functions | 
|---|
| 15 | ################################################################################ | 
|---|
| 16 |  | 
|---|
| 17 | def findTests(): | 
|---|
| 18 | expected = [] | 
|---|
| 19 |  | 
|---|
| 20 | def matchTest(path): | 
|---|
| 21 | match = re.search("(\.[\w\/\-_]*)\/.expect\/([\w\-_]+)(\.[\w\-_]+)?\.txt", path) | 
|---|
| 22 | if match : | 
|---|
| 23 | test = Test() | 
|---|
| 24 | test.name = match.group(2) | 
|---|
| 25 | test.path = match.group(1) | 
|---|
| 26 | test.arch = match.group(3)[1:] if match.group(3) else None | 
|---|
| 27 | if settings.arch.match(test.arch): | 
|---|
| 28 | expected.append(test) | 
|---|
| 29 |  | 
|---|
| 30 | pathWalk( matchTest ) | 
|---|
| 31 |  | 
|---|
| 32 | return expected | 
|---|
| 33 |  | 
|---|
| 34 | # reads the directory ./.expect and indentifies the tests | 
|---|
| 35 | def listTests( includes, excludes ): | 
|---|
| 36 | includes = [canonicalPath( i ) for i in includes] if includes else None | 
|---|
| 37 | excludes = [canonicalPath( i ) for i in excludes] if excludes else None | 
|---|
| 38 |  | 
|---|
| 39 | # tests directly in the .expect folder will always be processed | 
|---|
| 40 | test_list = findTests() | 
|---|
| 41 |  | 
|---|
| 42 | # if we have a limited number of includes, filter by them | 
|---|
| 43 | if includes: | 
|---|
| 44 | test_list = [x for x in test_list if | 
|---|
| 45 | x.target().startswith( tuple(includes) ) | 
|---|
| 46 | ] | 
|---|
| 47 |  | 
|---|
| 48 | # # if we have a folders to excludes, filter by them | 
|---|
| 49 | if excludes: | 
|---|
| 50 | test_list = [x for x in test_list if not | 
|---|
| 51 | x.target().startswith( tuple(excludes) ) | 
|---|
| 52 | ] | 
|---|
| 53 |  | 
|---|
| 54 | return test_list | 
|---|
| 55 |  | 
|---|
| 56 | # from the found tests, filter all the valid tests/desired tests | 
|---|
| 57 | def validTests( options ): | 
|---|
| 58 | tests = [] | 
|---|
| 59 |  | 
|---|
| 60 | # if we are regenerating the tests we need to find the information of the | 
|---|
| 61 | # already existing tests and create new info for the new tests | 
|---|
| 62 | if options.regenerate_expected : | 
|---|
| 63 | for testname in options.tests : | 
|---|
| 64 | testname = canonicalPath( testname ) | 
|---|
| 65 | if Test.valid_name(testname): | 
|---|
| 66 | found = [test for test in allTests if test.target() == testname] | 
|---|
| 67 | tests.append( found[0] if len(found) == 1 else Test.from_target(testname) ) | 
|---|
| 68 | else : | 
|---|
| 69 | print('ERROR: "%s", tests are not allowed to end with a C/C++/CFA extension, ignoring it' % testname, file=sys.stderr) | 
|---|
| 70 |  | 
|---|
| 71 | else : | 
|---|
| 72 | # otherwise we only need to validate that all tests are present in the complete list | 
|---|
| 73 | for testname in options.tests: | 
|---|
| 74 | test = [t for t in allTests if pathCmp( t.target(), testname )] | 
|---|
| 75 |  | 
|---|
| 76 | if test : | 
|---|
| 77 | tests.append( test[0] ) | 
|---|
| 78 | else : | 
|---|
| 79 | print('ERROR: No expected file for test %s, ignoring it' % testname, file=sys.stderr) | 
|---|
| 80 |  | 
|---|
| 81 | return tests | 
|---|
| 82 |  | 
|---|
| 83 | # parses the option | 
|---|
| 84 | def getOptions(): | 
|---|
| 85 | # create a parser with the arguments for the tests script | 
|---|
| 86 | parser = argparse.ArgumentParser(description='Script which runs cforall tests') | 
|---|
| 87 | parser.add_argument('--debug', help='Run all tests in debug or release', type=yes_no, default='no') | 
|---|
| 88 | parser.add_argument('--arch', help='Test for specific architecture', type=str, default='') | 
|---|
| 89 | parser.add_argument('--dry-run', help='Don\'t run the tests, only output the commands', action='store_true') | 
|---|
| 90 | parser.add_argument('--list', help='List all test available', action='store_true') | 
|---|
| 91 | parser.add_argument('--all', help='Run all test available', action='store_true') | 
|---|
| 92 | parser.add_argument('--regenerate-expected', help='Regenerate the .expect by running the specified tets, can be used with --all option', action='store_true') | 
|---|
| 93 | parser.add_argument('-j', '--jobs', help='Number of tests to run simultaneously', type=int) | 
|---|
| 94 | parser.add_argument('--list-comp', help='List all valide arguments', action='store_true') | 
|---|
| 95 | parser.add_argument('-I','--include', help='Directory of test to include, can be used multiple time, All  if omitted', action='append') | 
|---|
| 96 | parser.add_argument('-E','--exclude', help='Directory of test to exclude, can be used multiple time, None if omitted', action='append') | 
|---|
| 97 | parser.add_argument('tests', metavar='test', type=str, nargs='*', help='a list of tests to run') | 
|---|
| 98 |  | 
|---|
| 99 | options =  parser.parse_args() | 
|---|
| 100 |  | 
|---|
| 101 | # script must have at least some tests to run or be listing | 
|---|
| 102 | listing    = options.list or options.list_comp | 
|---|
| 103 | all_tests  = options.all | 
|---|
| 104 | some_tests = len(options.tests) > 0 | 
|---|
| 105 | some_dirs  = len(options.include) > 0 if options.include else 0 | 
|---|
| 106 |  | 
|---|
| 107 | # check that exactly one of the booleans is set to true | 
|---|
| 108 | if not sum( (listing, all_tests, some_tests, some_dirs) ) > 0 : | 
|---|
| 109 | print('ERROR: must have option \'--all\', \'--list\', \'--include\', \'-I\' or non-empty test list', file=sys.stderr) | 
|---|
| 110 | parser.print_help() | 
|---|
| 111 | sys.exit(1) | 
|---|
| 112 |  | 
|---|
| 113 | return options | 
|---|
| 114 |  | 
|---|
| 115 | ################################################################################ | 
|---|
| 116 | #               running test functions | 
|---|
| 117 | ################################################################################ | 
|---|
| 118 | # logic to run a single test and return the result (No handling of printing or other test framework logic) | 
|---|
| 119 | def run_single_test(test): | 
|---|
| 120 |  | 
|---|
| 121 | # find the output file based on the test name and options flag | 
|---|
| 122 | out_file = test.target_output() | 
|---|
| 123 | err_file = test.error_log() | 
|---|
| 124 | cmp_file = test.expect() | 
|---|
| 125 | in_file  = test.input() | 
|---|
| 126 |  | 
|---|
| 127 | # prepare the proper directories | 
|---|
| 128 | test.prepare() | 
|---|
| 129 |  | 
|---|
| 130 | # remove any outputs from the previous tests to prevent side effects | 
|---|
| 131 | rm( (out_file, err_file, test.target()) ) | 
|---|
| 132 |  | 
|---|
| 133 | # build, skipping to next test on error | 
|---|
| 134 | before = time.time() | 
|---|
| 135 | make_ret, _ = make( test.target(), | 
|---|
| 136 | redirects  = "2> %s 1> /dev/null" % out_file, | 
|---|
| 137 | error_file = err_file | 
|---|
| 138 | ) | 
|---|
| 139 | after = time.time() | 
|---|
| 140 |  | 
|---|
| 141 | comp_dur = after - before | 
|---|
| 142 |  | 
|---|
| 143 | run_dur = None | 
|---|
| 144 |  | 
|---|
| 145 | # if the make command succeds continue otherwise skip to diff | 
|---|
| 146 | if make_ret == 0 or settings.dry_run: | 
|---|
| 147 | before = time.time() | 
|---|
| 148 | if settings.dry_run or fileIsExecutable(test.target()) : | 
|---|
| 149 | # run test | 
|---|
| 150 | retcode, _ = sh("timeout 60 %s > %s 2>&1" % (test.target(), out_file), input = in_file) | 
|---|
| 151 | else : | 
|---|
| 152 | # simply cat the result into the output | 
|---|
| 153 | retcode, _ = sh("cat %s > %s" % (test.target(), out_file)) | 
|---|
| 154 |  | 
|---|
| 155 | after = time.time() | 
|---|
| 156 | run_dur = after - before | 
|---|
| 157 | else: | 
|---|
| 158 | retcode, _ = sh("mv %s %s" % (err_file, out_file)) | 
|---|
| 159 |  | 
|---|
| 160 |  | 
|---|
| 161 | if retcode == 0: | 
|---|
| 162 | if settings.generating : | 
|---|
| 163 | # if we are ounly generating the output we still need to check that the test actually exists | 
|---|
| 164 | if not settings.dry_run and fileContainsOnly(out_file, "make: *** No rule to make target `%s'.  Stop." % test.target()) : | 
|---|
| 165 | retcode = 1; | 
|---|
| 166 | error = "\t\tNo make target for test %s!" % test.target() | 
|---|
| 167 | sh("rm %s" % out_file, False) | 
|---|
| 168 | else: | 
|---|
| 169 | error = None | 
|---|
| 170 | else : | 
|---|
| 171 | # fetch return code and error from the diff command | 
|---|
| 172 | retcode, error = diff(cmp_file, out_file) | 
|---|
| 173 |  | 
|---|
| 174 | else: | 
|---|
| 175 | with open (out_file, "r") as myfile: | 
|---|
| 176 | error = myfile.read() | 
|---|
| 177 |  | 
|---|
| 178 |  | 
|---|
| 179 | # clean the executable | 
|---|
| 180 | sh("rm -f %s > /dev/null 2>&1" % test.target()) | 
|---|
| 181 |  | 
|---|
| 182 | return retcode, error, [comp_dur, run_dur] | 
|---|
| 183 |  | 
|---|
| 184 | # run a single test and handle the errors, outputs, printing, exception handling, etc. | 
|---|
| 185 | def run_test_worker(t) : | 
|---|
| 186 |  | 
|---|
| 187 | with SignalHandling(): | 
|---|
| 188 | # print formated name | 
|---|
| 189 | name_txt = "%20s  " % t.name | 
|---|
| 190 |  | 
|---|
| 191 | retcode, error, duration = run_single_test(t) | 
|---|
| 192 |  | 
|---|
| 193 | # update output based on current action | 
|---|
| 194 | result_txt = TestResult.toString( retcode, duration ) | 
|---|
| 195 |  | 
|---|
| 196 | #print result with error if needed | 
|---|
| 197 | text = name_txt + result_txt | 
|---|
| 198 | out = sys.stdout | 
|---|
| 199 | if error : | 
|---|
| 200 | text = text + "\n" + error | 
|---|
| 201 | out = sys.stderr | 
|---|
| 202 |  | 
|---|
| 203 | print(text, file = out) | 
|---|
| 204 | sys.stdout.flush() | 
|---|
| 205 | sys.stderr.flush() | 
|---|
| 206 |  | 
|---|
| 207 | return retcode != TestResult.SUCCESS | 
|---|
| 208 |  | 
|---|
| 209 | # run the given list of tests with the given parameters | 
|---|
| 210 | def run_tests(tests, jobs) : | 
|---|
| 211 | # clean the sandbox from previous commands | 
|---|
| 212 | make('clean', redirects = '> /dev/null 2>&1') | 
|---|
| 213 |  | 
|---|
| 214 | # create the executor for our jobs and handle the signal properly | 
|---|
| 215 | pool = setupPool(jobs) | 
|---|
| 216 |  | 
|---|
| 217 | # for each test to run | 
|---|
| 218 | try : | 
|---|
| 219 | results = pool.map_async( | 
|---|
| 220 | run_test_worker, | 
|---|
| 221 | tests, | 
|---|
| 222 | chunksize = 1 | 
|---|
| 223 | ).get(7200) | 
|---|
| 224 | except KeyboardInterrupt: | 
|---|
| 225 | pool.terminate() | 
|---|
| 226 | print("Tests interrupted by user") | 
|---|
| 227 | sys.exit(1) | 
|---|
| 228 |  | 
|---|
| 229 | # clean the workspace | 
|---|
| 230 | make('clean', redirects = '> /dev/null 2>&1') | 
|---|
| 231 |  | 
|---|
| 232 | for failed in results: | 
|---|
| 233 | if failed : | 
|---|
| 234 | return 1 | 
|---|
| 235 |  | 
|---|
| 236 | return 0 | 
|---|
| 237 |  | 
|---|
| 238 |  | 
|---|
| 239 | ################################################################################ | 
|---|
| 240 | #               main loop | 
|---|
| 241 | ################################################################################ | 
|---|
| 242 | if __name__ == "__main__": | 
|---|
| 243 | #always run from same folder | 
|---|
| 244 | chdir() | 
|---|
| 245 |  | 
|---|
| 246 | # parse the command line arguments | 
|---|
| 247 | options = getOptions() | 
|---|
| 248 |  | 
|---|
| 249 | # init global settings | 
|---|
| 250 | settings.init( options ) | 
|---|
| 251 |  | 
|---|
| 252 | # fetch the liest of all valid tests | 
|---|
| 253 | allTests = listTests( options.include, options.exclude ) | 
|---|
| 254 |  | 
|---|
| 255 | # if user wants all tests than no other treatement of the test list is required | 
|---|
| 256 | if options.all or options.list or options.list_comp or options.include : | 
|---|
| 257 | tests = allTests | 
|---|
| 258 |  | 
|---|
| 259 | #otherwise we need to validate that the test list that was entered is valid | 
|---|
| 260 | else : | 
|---|
| 261 | tests = validTests( options ) | 
|---|
| 262 |  | 
|---|
| 263 | # make sure we have at least some test to run | 
|---|
| 264 | if not tests : | 
|---|
| 265 | print('ERROR: No valid test to run', file=sys.stderr) | 
|---|
| 266 | sys.exit(1) | 
|---|
| 267 |  | 
|---|
| 268 |  | 
|---|
| 269 | # sort the test alphabetically for convenience | 
|---|
| 270 | tests.sort(key=lambda t: (t.arch if t.arch else '') + t.target()) | 
|---|
| 271 |  | 
|---|
| 272 | # users may want to simply list the tests | 
|---|
| 273 | if options.list_comp : | 
|---|
| 274 | print("-h --help --debug --dry-run --list --arch --all --regenerate-expected -j --jobs ", end='') | 
|---|
| 275 | print(" ".join(map(lambda t: "%s" % (t.target()), tests))) | 
|---|
| 276 |  | 
|---|
| 277 | elif options.list : | 
|---|
| 278 | print("Listing for %s:%s"% (settings.arch.string, settings.debug.string)) | 
|---|
| 279 | print("\n".join(map(lambda t: "%s" % (t.toString()), tests))) | 
|---|
| 280 |  | 
|---|
| 281 | else : | 
|---|
| 282 | options.jobs, forceJobs = jobCount( options, tests ) | 
|---|
| 283 | settings.updateMakeCmd(forceJobs, options.jobs) | 
|---|
| 284 |  | 
|---|
| 285 | print('%s (%s:%s) on %i cores' % ( | 
|---|
| 286 | 'Regenerate tests' if settings.generating else 'Running', | 
|---|
| 287 | settings.arch.string, | 
|---|
| 288 | settings.debug.string, | 
|---|
| 289 | options.jobs | 
|---|
| 290 | )) | 
|---|
| 291 |  | 
|---|
| 292 | # otherwise run all tests and make sure to return the correct error code | 
|---|
| 293 | sys.exit( run_tests(tests, options.jobs) ) | 
|---|