[5b993e0] | 1 | #!/usr/bin/python3 |
---|
[efc15918] | 2 | |
---|
[c07d724] | 3 | from pybin.tools import * |
---|
[0ad0c55] | 4 | from pybin.test_run import * |
---|
[bacc36c] | 5 | from pybin import settings |
---|
[efc15918] | 6 | |
---|
| 7 | import argparse |
---|
[122cac7] | 8 | import re |
---|
[efc15918] | 9 | import sys |
---|
[ca54499] | 10 | import time |
---|
[efc15918] | 11 | |
---|
| 12 | ################################################################################ |
---|
| 13 | # help functions |
---|
| 14 | ################################################################################ |
---|
[f1231f2] | 15 | |
---|
[5bf1f3e] | 16 | def find_tests(): |
---|
[0ad0c55] | 17 | expected = [] |
---|
[f1231f2] | 18 | |
---|
[5bf1f3e] | 19 | def match_test(path): |
---|
[6044200] | 20 | match = re.search("^%s\/([\w\/\-_]*).expect\/([\w\-_]+)(\.[\w\-_]+)?\.txt$" % settings.SRCDIR, path) |
---|
[bacc36c] | 21 | if match : |
---|
| 22 | test = Test() |
---|
| 23 | test.name = match.group(2) |
---|
| 24 | test.path = match.group(1) |
---|
| 25 | test.arch = match.group(3)[1:] if match.group(3) else None |
---|
[f3b9efc] | 26 | if settings.arch.match(test.arch): |
---|
| 27 | expected.append(test) |
---|
[f803a75] | 28 | |
---|
[5bf1f3e] | 29 | path_walk( match_test ) |
---|
[c07d724] | 30 | |
---|
[0ad0c55] | 31 | return expected |
---|
[efc15918] | 32 | |
---|
[be65cca] | 33 | # reads the directory ./.expect and indentifies the tests |
---|
[5bf1f3e] | 34 | def list_tests( includes, excludes ): |
---|
[be65cca] | 35 | # tests directly in the .expect folder will always be processed |
---|
[5bf1f3e] | 36 | test_list = find_tests() |
---|
[be65cca] | 37 | |
---|
[0ad0c55] | 38 | # if we have a limited number of includes, filter by them |
---|
| 39 | if includes: |
---|
| 40 | test_list = [x for x in test_list if |
---|
[a85e44c] | 41 | x.target().startswith( tuple(includes) ) |
---|
[0ad0c55] | 42 | ] |
---|
[be65cca] | 43 | |
---|
[0ad0c55] | 44 | # # if we have a folders to excludes, filter by them |
---|
| 45 | if excludes: |
---|
| 46 | test_list = [x for x in test_list if not |
---|
[a85e44c] | 47 | x.target().startswith( tuple(excludes) ) |
---|
[0ad0c55] | 48 | ] |
---|
[f1231f2] | 49 | |
---|
[0ad0c55] | 50 | return test_list |
---|
[efc15918] | 51 | |
---|
[c07d724] | 52 | # from the found tests, filter all the valid tests/desired tests |
---|
[5bf1f3e] | 53 | def valid_tests( options ): |
---|
[c07d724] | 54 | tests = [] |
---|
| 55 | |
---|
| 56 | # if we are regenerating the tests we need to find the information of the |
---|
| 57 | # already existing tests and create new info for the new tests |
---|
| 58 | if options.regenerate_expected : |
---|
| 59 | for testname in options.tests : |
---|
[5bf1f3e] | 60 | testname = canonical_path( testname ) |
---|
[bacc36c] | 61 | if Test.valid_name(testname): |
---|
[5bf1f3e] | 62 | found = [test for test in all_tests if canonical_path( test.target() ) == testname] |
---|
[bacc36c] | 63 | tests.append( found[0] if len(found) == 1 else Test.from_target(testname) ) |
---|
[c07d724] | 64 | else : |
---|
[bacc36c] | 65 | print('ERROR: "%s", tests are not allowed to end with a C/C++/CFA extension, ignoring it' % testname, file=sys.stderr) |
---|
[c07d724] | 66 | |
---|
| 67 | else : |
---|
| 68 | # otherwise we only need to validate that all tests are present in the complete list |
---|
| 69 | for testname in options.tests: |
---|
[5bf1f3e] | 70 | test = [t for t in all_tests if path_cmp( t.target(), testname )] |
---|
[c07d724] | 71 | |
---|
[bacc36c] | 72 | if test : |
---|
[c07d724] | 73 | tests.append( test[0] ) |
---|
| 74 | else : |
---|
| 75 | print('ERROR: No expected file for test %s, ignoring it' % testname, file=sys.stderr) |
---|
| 76 | |
---|
| 77 | return tests |
---|
| 78 | |
---|
| 79 | # parses the option |
---|
[5bf1f3e] | 80 | def parse_args(): |
---|
[c07d724] | 81 | # create a parser with the arguments for the tests script |
---|
| 82 | parser = argparse.ArgumentParser(description='Script which runs cforall tests') |
---|
[28582b2] | 83 | parser.add_argument('--debug', help='Run all tests in debug or release', type=yes_no, default='yes') |
---|
[a5121bf] | 84 | parser.add_argument('--install', help='Run all tests based on installed binaries or tree binaries', type=yes_no, default='no') |
---|
[bacc36c] | 85 | parser.add_argument('--arch', help='Test for specific architecture', type=str, default='') |
---|
[afe8882] | 86 | parser.add_argument('--timeout', help='Maximum duration in seconds after a single test is considered to have timed out', type=int, default=60) |
---|
| 87 | parser.add_argument('--global-timeout', help='Maximum cumulative duration in seconds after the ALL tests are considered to have timed out', type=int, default=7200) |
---|
[c07d724] | 88 | parser.add_argument('--dry-run', help='Don\'t run the tests, only output the commands', action='store_true') |
---|
| 89 | parser.add_argument('--list', help='List all test available', action='store_true') |
---|
| 90 | parser.add_argument('--all', help='Run all test available', action='store_true') |
---|
| 91 | parser.add_argument('--regenerate-expected', help='Regenerate the .expect by running the specified tets, can be used with --all option', action='store_true') |
---|
[d142ec5] | 92 | parser.add_argument('-j', '--jobs', help='Number of tests to run simultaneously', type=int) |
---|
[c07d724] | 93 | parser.add_argument('--list-comp', help='List all valide arguments', action='store_true') |
---|
[0ad0c55] | 94 | parser.add_argument('-I','--include', help='Directory of test to include, can be used multiple time, All if omitted', action='append') |
---|
| 95 | parser.add_argument('-E','--exclude', help='Directory of test to exclude, can be used multiple time, None if omitted', action='append') |
---|
[c07d724] | 96 | parser.add_argument('tests', metavar='test', type=str, nargs='*', help='a list of tests to run') |
---|
| 97 | |
---|
[575a6e5] | 98 | try: |
---|
| 99 | options = parser.parse_args() |
---|
| 100 | except: |
---|
| 101 | print('ERROR: invalid arguments', file=sys.stderr) |
---|
| 102 | parser.print_help(sys.stderr) |
---|
[5b993e0] | 103 | sys.exit(1) |
---|
[c07d724] | 104 | |
---|
| 105 | # script must have at least some tests to run or be listing |
---|
| 106 | listing = options.list or options.list_comp |
---|
| 107 | all_tests = options.all |
---|
| 108 | some_tests = len(options.tests) > 0 |
---|
[0ad0c55] | 109 | some_dirs = len(options.include) > 0 if options.include else 0 |
---|
[c07d724] | 110 | |
---|
| 111 | # check that exactly one of the booleans is set to true |
---|
[0ad0c55] | 112 | if not sum( (listing, all_tests, some_tests, some_dirs) ) > 0 : |
---|
[5b993e0] | 113 | print('''ERROR: must have option '--all', '--list', '--include', '-I' or non-empty test list''', file=sys.stderr) |
---|
[c07d724] | 114 | parser.print_help() |
---|
| 115 | sys.exit(1) |
---|
| 116 | |
---|
| 117 | return options |
---|
| 118 | |
---|
[efc15918] | 119 | ################################################################################ |
---|
| 120 | # running test functions |
---|
| 121 | ################################################################################ |
---|
[0c13238] | 122 | def success(val): |
---|
| 123 | return val == 0 or settings.dry_run |
---|
[f85bc15] | 124 | |
---|
[5bf1f3e] | 125 | def no_rule(file, target): |
---|
| 126 | return not settings.dry_run and file_contains_only(file, "make: *** No rule to make target `%s'. Stop." % target) |
---|
[f85bc15] | 127 | |
---|
[c07d724] | 128 | # logic to run a single test and return the result (No handling of printing or other test framework logic) |
---|
[209383b] | 129 | def run_single_test(test): |
---|
[3c1d702] | 130 | |
---|
[c07d724] | 131 | # find the output file based on the test name and options flag |
---|
[f85bc15] | 132 | exe_file = test.target_executable(); |
---|
[bacc36c] | 133 | out_file = test.target_output() |
---|
| 134 | err_file = test.error_log() |
---|
| 135 | cmp_file = test.expect() |
---|
| 136 | in_file = test.input() |
---|
[0ad0c55] | 137 | |
---|
| 138 | # prepare the proper directories |
---|
[bacc36c] | 139 | test.prepare() |
---|
[efc15918] | 140 | |
---|
[c07d724] | 141 | # build, skipping to next test on error |
---|
[0c13238] | 142 | with Timed() as comp_dur: |
---|
| 143 | make_ret, _ = make( test.target(), redirects = ("2> %s 1> /dev/null" % out_file), error_file = err_file ) |
---|
[efc15918] | 144 | |
---|
[c07d724] | 145 | # if the make command succeds continue otherwise skip to diff |
---|
[0c13238] | 146 | run_dur = None |
---|
| 147 | if success(make_ret): |
---|
| 148 | with Timed() as run_dur: |
---|
[5bf1f3e] | 149 | if settings.dry_run or is_exe(exe_file): |
---|
[0c13238] | 150 | # run test |
---|
[1bb2488] | 151 | retcode, _ = sh(exe_file, output=out_file, input=in_file, timeout=True) |
---|
[0c13238] | 152 | else : |
---|
| 153 | # simply cat the result into the output |
---|
| 154 | retcode = cat(exe_file, out_file) |
---|
[0ad0c55] | 155 | else: |
---|
[0c13238] | 156 | retcode = mv(err_file, out_file) |
---|
[0ad0c55] | 157 | |
---|
[0c13238] | 158 | if success(retcode): |
---|
[bacc36c] | 159 | if settings.generating : |
---|
[c2d5e28] | 160 | # if we are ounly generating the output we still need to check that the test actually exists |
---|
[5bf1f3e] | 161 | if no_rule(out_file, test.target()) : |
---|
[0c13238] | 162 | retcode = 1 |
---|
[0ad0c55] | 163 | error = "\t\tNo make target for test %s!" % test.target() |
---|
[0c13238] | 164 | rm(out_file) |
---|
[f3b9efc] | 165 | else: |
---|
| 166 | error = None |
---|
[c2d5e28] | 167 | else : |
---|
| 168 | # fetch return code and error from the diff command |
---|
[bacc36c] | 169 | retcode, error = diff(cmp_file, out_file) |
---|
[ac032b5] | 170 | |
---|
| 171 | else: |
---|
| 172 | with open (out_file, "r") as myfile: |
---|
| 173 | error = myfile.read() |
---|
| 174 | |
---|
[5bf1f3e] | 175 | ret, info = core_info(exe_file) |
---|
[0c13238] | 176 | error = error + info |
---|
| 177 | |
---|
| 178 | |
---|
[b5f9829] | 179 | |
---|
[c07d724] | 180 | # clean the executable |
---|
[0c13238] | 181 | rm(exe_file) |
---|
[efc15918] | 182 | |
---|
[0c13238] | 183 | return retcode, error, [comp_dur.duration, run_dur.duration if run_dur else None] |
---|
[efc15918] | 184 | |
---|
[c07d724] | 185 | # run a single test and handle the errors, outputs, printing, exception handling, etc. |
---|
[209383b] | 186 | def run_test_worker(t) : |
---|
[1bb2488] | 187 | try : |
---|
[bacc36c] | 188 | # print formated name |
---|
[5bf1f3e] | 189 | name_txt = '{0:{width}} '.format(t.target(), width=settings.output_width) |
---|
[ced2e989] | 190 | |
---|
[ca54499] | 191 | retcode, error, duration = run_single_test(t) |
---|
[0a1a680] | 192 | |
---|
[bacc36c] | 193 | # update output based on current action |
---|
[ca54499] | 194 | result_txt = TestResult.toString( retcode, duration ) |
---|
[bacc36c] | 195 | |
---|
| 196 | #print result with error if needed |
---|
| 197 | text = name_txt + result_txt |
---|
| 198 | out = sys.stdout |
---|
| 199 | if error : |
---|
| 200 | text = text + "\n" + error |
---|
| 201 | out = sys.stderr |
---|
| 202 | |
---|
| 203 | print(text, file = out) |
---|
| 204 | sys.stdout.flush() |
---|
| 205 | sys.stderr.flush() |
---|
[9fcb5e4] | 206 | |
---|
[1bb2488] | 207 | return retcode != TestResult.SUCCESS |
---|
| 208 | except KeyboardInterrupt: |
---|
| 209 | False |
---|
[ced2e989] | 210 | |
---|
[911348cd] | 211 | # run the given list of tests with the given parameters |
---|
[209383b] | 212 | def run_tests(tests, jobs) : |
---|
[911348cd] | 213 | # clean the sandbox from previous commands |
---|
[bacc36c] | 214 | make('clean', redirects = '> /dev/null 2>&1') |
---|
[efc15918] | 215 | |
---|
[c07d724] | 216 | # create the executor for our jobs and handle the signal properly |
---|
[1bb2488] | 217 | pool = multiprocessing.Pool(jobs) |
---|
[c07d724] | 218 | |
---|
| 219 | # for each test to run |
---|
[ced2e989] | 220 | try : |
---|
[bacc36c] | 221 | results = pool.map_async( |
---|
[209383b] | 222 | run_test_worker, |
---|
[bacc36c] | 223 | tests, |
---|
| 224 | chunksize = 1 |
---|
[afe8882] | 225 | ).get(settings.timeout.total) |
---|
[ced2e989] | 226 | except KeyboardInterrupt: |
---|
| 227 | pool.terminate() |
---|
| 228 | print("Tests interrupted by user") |
---|
| 229 | sys.exit(1) |
---|
[efc15918] | 230 | |
---|
[c07d724] | 231 | # clean the workspace |
---|
[bacc36c] | 232 | make('clean', redirects = '> /dev/null 2>&1') |
---|
[efc15918] | 233 | |
---|
[ced2e989] | 234 | for failed in results: |
---|
| 235 | if failed : |
---|
| 236 | return 1 |
---|
| 237 | |
---|
| 238 | return 0 |
---|
[efc15918] | 239 | |
---|
[6a1bdfd] | 240 | |
---|
[efc15918] | 241 | ################################################################################ |
---|
| 242 | # main loop |
---|
| 243 | ################################################################################ |
---|
[c07d724] | 244 | if __name__ == "__main__": |
---|
[f803a75] | 245 | |
---|
[c07d724] | 246 | # parse the command line arguments |
---|
[5bf1f3e] | 247 | options = parse_args() |
---|
[f1231f2] | 248 | |
---|
[bacc36c] | 249 | # init global settings |
---|
| 250 | settings.init( options ) |
---|
| 251 | |
---|
[c07d724] | 252 | # fetch the liest of all valid tests |
---|
[5bf1f3e] | 253 | all_tests = list_tests( options.include, options.exclude ) |
---|
[f1231f2] | 254 | |
---|
[0c13238] | 255 | |
---|
[c07d724] | 256 | # if user wants all tests than no other treatement of the test list is required |
---|
[0ad0c55] | 257 | if options.all or options.list or options.list_comp or options.include : |
---|
[5bf1f3e] | 258 | tests = all_tests |
---|
[0534c3c] | 259 | |
---|
[f3b9efc] | 260 | #otherwise we need to validate that the test list that was entered is valid |
---|
[c07d724] | 261 | else : |
---|
[5bf1f3e] | 262 | tests = valid_tests( options ) |
---|
[0534c3c] | 263 | |
---|
[dd226e3] | 264 | # make sure we have at least some test to run |
---|
| 265 | if not tests : |
---|
| 266 | print('ERROR: No valid test to run', file=sys.stderr) |
---|
| 267 | sys.exit(1) |
---|
| 268 | |
---|
| 269 | |
---|
[c07d724] | 270 | # sort the test alphabetically for convenience |
---|
[f3b9efc] | 271 | tests.sort(key=lambda t: (t.arch if t.arch else '') + t.target()) |
---|
[f1231f2] | 272 | |
---|
[c07d724] | 273 | # users may want to simply list the tests |
---|
| 274 | if options.list_comp : |
---|
[afe8882] | 275 | print("-h --help --debug --dry-run --list --arch --all --regenerate-expected --install --timeout --global-timeout -j --jobs ", end='') |
---|
[0ad0c55] | 276 | print(" ".join(map(lambda t: "%s" % (t.target()), tests))) |
---|
[911348cd] | 277 | |
---|
[c07d724] | 278 | elif options.list : |
---|
[f3b9efc] | 279 | print("Listing for %s:%s"% (settings.arch.string, settings.debug.string)) |
---|
[5b993e0] | 280 | fancy_print("\n".join(map(lambda t: t.toString(), tests))) |
---|
[911348cd] | 281 | |
---|
[b98c913] | 282 | else : |
---|
[28582b2] | 283 | # check the build configuration works |
---|
[d3c1c6a] | 284 | settings.prep_output(tests) |
---|
[28582b2] | 285 | settings.validate() |
---|
| 286 | |
---|
[5bf1f3e] | 287 | options.jobs, forceJobs = job_count( options, tests ) |
---|
| 288 | settings.update_make_cmd(forceJobs, options.jobs) |
---|
[b98c913] | 289 | |
---|
[bacc36c] | 290 | print('%s (%s:%s) on %i cores' % ( |
---|
| 291 | 'Regenerate tests' if settings.generating else 'Running', |
---|
[f3b9efc] | 292 | settings.arch.string, |
---|
| 293 | settings.debug.string, |
---|
[bacc36c] | 294 | options.jobs |
---|
| 295 | )) |
---|
[efc15918] | 296 | |
---|
[c07d724] | 297 | # otherwise run all tests and make sure to return the correct error code |
---|
[209383b] | 298 | sys.exit( run_tests(tests, options.jobs) ) |
---|