| [5b993e0] | 1 | #!/usr/bin/python3
 | 
|---|
| [efc15918] | 2 | 
 | 
|---|
| [c07d724] | 3 | from pybin.tools import *
 | 
|---|
| [0ad0c55] | 4 | from pybin.test_run import *
 | 
|---|
| [bacc36c] | 5 | from pybin import settings
 | 
|---|
| [efc15918] | 6 | 
 | 
|---|
 | 7 | import argparse
 | 
|---|
| [136f86b] | 8 | import itertools
 | 
|---|
| [122cac7] | 9 | import re
 | 
|---|
| [efc15918] | 10 | import sys
 | 
|---|
| [f806b61] | 11 | import tempfile
 | 
|---|
| [ca54499] | 12 | import time
 | 
|---|
| [efc15918] | 13 | 
 | 
|---|
| [2cd949b] | 14 | import os
 | 
|---|
 | 15 | import signal
 | 
|---|
 | 16 | 
 | 
|---|
| [efc15918] | 17 | ################################################################################
 | 
|---|
 | 18 | #               help functions
 | 
|---|
 | 19 | ################################################################################
 | 
|---|
| [f1231f2] | 20 | 
 | 
|---|
| [5bf1f3e] | 21 | def find_tests():
 | 
|---|
| [0ad0c55] | 22 |         expected = []
 | 
|---|
| [f1231f2] | 23 | 
 | 
|---|
| [5bf1f3e] | 24 |         def match_test(path):
 | 
|---|
| [a2f2fda] | 25 |                 match = re.search("^%s\/([\w\/\-_]*).expect\/([\w\-_]+)(\.nast|\.oast)?(\.[\w\-_]+)?\.txt$" % settings.SRCDIR, path)
 | 
|---|
| [bacc36c] | 26 |                 if match :
 | 
|---|
 | 27 |                         test = Test()
 | 
|---|
 | 28 |                         test.name = match.group(2)
 | 
|---|
 | 29 |                         test.path = match.group(1)
 | 
|---|
| [a2f2fda] | 30 |                         test.arch = match.group(4)[1:] if match.group(4) else None
 | 
|---|
 | 31 | 
 | 
|---|
 | 32 |                         astv = match.group(3)[1:] if match.group(3) else None
 | 
|---|
 | 33 |                         if astv == 'oast':
 | 
|---|
 | 34 |                                 test.astv = 'old'
 | 
|---|
 | 35 |                         elif astv == 'nast':
 | 
|---|
 | 36 |                                 test.astv = 'new'
 | 
|---|
 | 37 |                         elif astv:
 | 
|---|
 | 38 |                                 print('ERROR: "%s", expect file has astv but it is not "nast" or "oast"' % testname, file=sys.stderr)
 | 
|---|
 | 39 |                                 sys.exit(1)
 | 
|---|
 | 40 | 
 | 
|---|
| [136f86b] | 41 |                         expected.append(test)
 | 
|---|
| [f803a75] | 42 | 
 | 
|---|
| [5bf1f3e] | 43 |         path_walk( match_test )
 | 
|---|
| [c07d724] | 44 | 
 | 
|---|
| [0ad0c55] | 45 |         return expected
 | 
|---|
| [efc15918] | 46 | 
 | 
|---|
| [be65cca] | 47 | # reads the directory ./.expect and indentifies the tests
 | 
|---|
| [5bf1f3e] | 48 | def list_tests( includes, excludes ):
 | 
|---|
| [be65cca] | 49 |         # tests directly in the .expect folder will always be processed
 | 
|---|
| [5bf1f3e] | 50 |         test_list = find_tests()
 | 
|---|
| [be65cca] | 51 | 
 | 
|---|
| [0ad0c55] | 52 |         # if we have a limited number of includes, filter by them
 | 
|---|
 | 53 |         if includes:
 | 
|---|
 | 54 |                 test_list = [x for x in test_list if
 | 
|---|
| [a85e44c] | 55 |                         x.target().startswith( tuple(includes) )
 | 
|---|
| [0ad0c55] | 56 |                 ]
 | 
|---|
| [be65cca] | 57 | 
 | 
|---|
| [0ad0c55] | 58 |         # # if we have a folders to excludes, filter by them
 | 
|---|
 | 59 |         if excludes:
 | 
|---|
 | 60 |                 test_list = [x for x in test_list if not
 | 
|---|
| [a85e44c] | 61 |                         x.target().startswith( tuple(excludes) )
 | 
|---|
| [0ad0c55] | 62 |                 ]
 | 
|---|
| [f1231f2] | 63 | 
 | 
|---|
| [136f86b] | 64 |         # sort the test alphabetically for convenience
 | 
|---|
 | 65 |         test_list.sort(key=lambda t: ('~' if t.arch else '') + t.target() + (t.arch if t.arch else ''))
 | 
|---|
 | 66 | 
 | 
|---|
| [0ad0c55] | 67 |         return test_list
 | 
|---|
| [efc15918] | 68 | 
 | 
|---|
| [c07d724] | 69 | # from the found tests, filter all the valid tests/desired tests
 | 
|---|
| [5bf1f3e] | 70 | def valid_tests( options ):
 | 
|---|
| [c07d724] | 71 |         tests = []
 | 
|---|
 | 72 | 
 | 
|---|
 | 73 |         # if we are regenerating the tests we need to find the information of the
 | 
|---|
 | 74 |         # already existing tests and create new info for the new tests
 | 
|---|
 | 75 |         if options.regenerate_expected :
 | 
|---|
 | 76 |                 for testname in options.tests :
 | 
|---|
| [a2f2fda] | 77 |                         testname = os.path.normpath( os.path.join(settings.SRCDIR, testname) )
 | 
|---|
 | 78 | 
 | 
|---|
| [41af19c] | 79 |                         # first check if this is a valid name to regenerate
 | 
|---|
| [bacc36c] | 80 |                         if Test.valid_name(testname):
 | 
|---|
| [41af19c] | 81 |                                 # this is a valid name, let's check if it already exists
 | 
|---|
| [5bf1f3e] | 82 |                                 found = [test for test in all_tests if canonical_path( test.target() ) == testname]
 | 
|---|
| [a2f2fda] | 83 |                                 setup = itertools.product(settings.all_arch if options.arch else [None], settings.all_ast if options.ast else [None])
 | 
|---|
| [41af19c] | 84 |                                 if not found:
 | 
|---|
| [a2f2fda] | 85 |                                         # it's a new name, create it according to the name and specified architecture/ast version
 | 
|---|
 | 86 |                                         tests.extend( [Test.new_target(testname, arch, ast) for arch, ast in setup] )
 | 
|---|
| [41af19c] | 87 |                                 elif len(found) == 1 and not found[0].arch:
 | 
|---|
 | 88 |                                         # we found a single test, the user better be wanting to create a cross platform test
 | 
|---|
 | 89 |                                         if options.arch:
 | 
|---|
 | 90 |                                                 print('ERROR: "%s", test has no specified architecture but --arch was specified, ignoring it' % testname, file=sys.stderr)
 | 
|---|
| [a2f2fda] | 91 |                                         elif options.ast:
 | 
|---|
 | 92 |                                                 print('ERROR: "%s", test has no specified ast version but --ast was specified, ignoring it' % testname, file=sys.stderr)
 | 
|---|
| [41af19c] | 93 |                                         else:
 | 
|---|
 | 94 |                                                 tests.append( found[0] )
 | 
|---|
 | 95 |                                 else:
 | 
|---|
 | 96 |                                         # this test is already cross platform, just add a test for each platform the user asked
 | 
|---|
| [a2f2fda] | 97 |                                         tests.extend( [Test.new_target(testname, arch, ast) for arch, ast in setup] )
 | 
|---|
| [41af19c] | 98 | 
 | 
|---|
 | 99 |                                         # print a warning if it users didn't ask for a specific architecture
 | 
|---|
| [ad4832f1] | 100 |                                         found_arch = [f.arch for f in found if f.arch]
 | 
|---|
 | 101 |                                         if found_arch and not options.arch:
 | 
|---|
| [41af19c] | 102 |                                                 print('WARNING: "%s", test has architecture specific expected files but --arch was not specified, regenerating only for current host' % testname, file=sys.stderr)
 | 
|---|
 | 103 | 
 | 
|---|
| [a2f2fda] | 104 | 
 | 
|---|
 | 105 |                                         # print a warning if it users didn't ask for a specific ast version
 | 
|---|
| [ad4832f1] | 106 |                                         found_astv = [f.astv for f in found if f.astv]
 | 
|---|
 | 107 |                                         if found_astv and not options.ast:
 | 
|---|
| [a2f2fda] | 108 |                                                 print('WARNING: "%s", test has ast version specific expected files but --ast was not specified, regenerating only for current ast' % testname, file=sys.stderr)
 | 
|---|
 | 109 | 
 | 
|---|
| [c07d724] | 110 |                         else :
 | 
|---|
| [bacc36c] | 111 |                                 print('ERROR: "%s", tests are not allowed to end with a C/C++/CFA extension, ignoring it' % testname, file=sys.stderr)
 | 
|---|
| [c07d724] | 112 | 
 | 
|---|
 | 113 |         else :
 | 
|---|
 | 114 |                 # otherwise we only need to validate that all tests are present in the complete list
 | 
|---|
 | 115 |                 for testname in options.tests:
 | 
|---|
| [5bf1f3e] | 116 |                         test = [t for t in all_tests if path_cmp( t.target(), testname )]
 | 
|---|
| [c07d724] | 117 | 
 | 
|---|
| [bacc36c] | 118 |                         if test :
 | 
|---|
| [136f86b] | 119 |                                 tests.extend( test )
 | 
|---|
| [c07d724] | 120 |                         else :
 | 
|---|
 | 121 |                                 print('ERROR: No expected file for test %s, ignoring it' % testname, file=sys.stderr)
 | 
|---|
 | 122 | 
 | 
|---|
 | 123 |         return tests
 | 
|---|
 | 124 | 
 | 
|---|
 | 125 | # parses the option
 | 
|---|
| [5bf1f3e] | 126 | def parse_args():
 | 
|---|
| [c07d724] | 127 |         # create a parser with the arguments for the tests script
 | 
|---|
 | 128 |         parser = argparse.ArgumentParser(description='Script which runs cforall tests')
 | 
|---|
| [99581ee] | 129 |         parser.add_argument('--ast', help='Test for specific ast', type=comma_separated(str), default=None)
 | 
|---|
 | 130 |         parser.add_argument('--arch', help='Test for specific architecture', type=comma_separated(str), default=None)
 | 
|---|
| [136f86b] | 131 |         parser.add_argument('--debug', help='Run all tests in debug or release', type=comma_separated(yes_no), default='yes')
 | 
|---|
 | 132 |         parser.add_argument('--install', help='Run all tests based on installed binaries or tree binaries', type=comma_separated(yes_no), default='no')
 | 
|---|
 | 133 |         parser.add_argument('--continue', help='When multiple specifications are passed (debug/install/arch), sets whether or not to continue if the last specification failed', type=yes_no, default='yes', dest='continue_')
 | 
|---|
| [ebb7b66] | 134 |         parser.add_argument('--timeout', help='Maximum duration in seconds after a single test is considered to have timed out', type=int, default=120)
 | 
|---|
| [afe8882] | 135 |         parser.add_argument('--global-timeout', help='Maximum cumulative duration in seconds after the ALL tests are considered to have timed out', type=int, default=7200)
 | 
|---|
| [d658183] | 136 |         parser.add_argument('--timeout-with-gdb', help='Instead of killing the command when it times out, orphan it and print process id to allow gdb to attach', type=yes_no, default="no")
 | 
|---|
| [c07d724] | 137 |         parser.add_argument('--dry-run', help='Don\'t run the tests, only output the commands', action='store_true')
 | 
|---|
 | 138 |         parser.add_argument('--list', help='List all test available', action='store_true')
 | 
|---|
 | 139 |         parser.add_argument('--all', help='Run all test available', action='store_true')
 | 
|---|
 | 140 |         parser.add_argument('--regenerate-expected', help='Regenerate the .expect by running the specified tets, can be used with --all option', action='store_true')
 | 
|---|
| [dcfedca] | 141 |         parser.add_argument('--archive-errors', help='If called with a valid path, on test crashes the test script will copy the core dump and the executable to the specified path.', type=str, default='')
 | 
|---|
| [d142ec5] | 142 |         parser.add_argument('-j', '--jobs', help='Number of tests to run simultaneously', type=int)
 | 
|---|
| [c07d724] | 143 |         parser.add_argument('--list-comp', help='List all valide arguments', action='store_true')
 | 
|---|
| [a468e1e9] | 144 |         parser.add_argument('--list-dist', help='List all tests for distribution', action='store_true')
 | 
|---|
| [0ad0c55] | 145 |         parser.add_argument('-I','--include', help='Directory of test to include, can be used multiple time, All  if omitted', action='append')
 | 
|---|
 | 146 |         parser.add_argument('-E','--exclude', help='Directory of test to exclude, can be used multiple time, None if omitted', action='append')
 | 
|---|
| [c07d724] | 147 |         parser.add_argument('tests', metavar='test', type=str, nargs='*', help='a list of tests to run')
 | 
|---|
 | 148 | 
 | 
|---|
| [575a6e5] | 149 |         try:
 | 
|---|
 | 150 |                 options =  parser.parse_args()
 | 
|---|
 | 151 |         except:
 | 
|---|
 | 152 |                 print('ERROR: invalid arguments', file=sys.stderr)
 | 
|---|
 | 153 |                 parser.print_help(sys.stderr)
 | 
|---|
| [5b993e0] | 154 |                 sys.exit(1)
 | 
|---|
| [c07d724] | 155 | 
 | 
|---|
 | 156 |         # script must have at least some tests to run or be listing
 | 
|---|
| [a468e1e9] | 157 |         listing    = options.list or options.list_comp or options.list_dist
 | 
|---|
| [c07d724] | 158 |         all_tests  = options.all
 | 
|---|
 | 159 |         some_tests = len(options.tests) > 0
 | 
|---|
| [0ad0c55] | 160 |         some_dirs  = len(options.include) > 0 if options.include else 0
 | 
|---|
| [c07d724] | 161 | 
 | 
|---|
 | 162 |         # check that exactly one of the booleans is set to true
 | 
|---|
| [0ad0c55] | 163 |         if not sum( (listing, all_tests, some_tests, some_dirs) ) > 0 :
 | 
|---|
| [5b993e0] | 164 |                 print('''ERROR: must have option '--all', '--list', '--include', '-I' or non-empty test list''', file=sys.stderr)
 | 
|---|
| [c07d724] | 165 |                 parser.print_help()
 | 
|---|
 | 166 |                 sys.exit(1)
 | 
|---|
 | 167 | 
 | 
|---|
 | 168 |         return options
 | 
|---|
 | 169 | 
 | 
|---|
| [efc15918] | 170 | ################################################################################
 | 
|---|
 | 171 | #               running test functions
 | 
|---|
 | 172 | ################################################################################
 | 
|---|
| [0c13238] | 173 | def success(val):
 | 
|---|
 | 174 |         return val == 0 or settings.dry_run
 | 
|---|
| [f85bc15] | 175 | 
 | 
|---|
| [5bf1f3e] | 176 | def no_rule(file, target):
 | 
|---|
 | 177 |         return not settings.dry_run and file_contains_only(file, "make: *** No rule to make target `%s'.  Stop." % target)
 | 
|---|
| [f85bc15] | 178 | 
 | 
|---|
| [c07d724] | 179 | # logic to run a single test and return the result (No handling of printing or other test framework logic)
 | 
|---|
| [209383b] | 180 | def run_single_test(test):
 | 
|---|
| [3c1d702] | 181 | 
 | 
|---|
| [c07d724] | 182 |         # find the output file based on the test name and options flag
 | 
|---|
| [f85bc15] | 183 |         exe_file = test.target_executable();
 | 
|---|
| [bacc36c] | 184 |         out_file = test.target_output()
 | 
|---|
 | 185 |         err_file = test.error_log()
 | 
|---|
 | 186 |         cmp_file = test.expect()
 | 
|---|
 | 187 |         in_file  = test.input()
 | 
|---|
| [0ad0c55] | 188 | 
 | 
|---|
 | 189 |         # prepare the proper directories
 | 
|---|
| [bacc36c] | 190 |         test.prepare()
 | 
|---|
| [efc15918] | 191 | 
 | 
|---|
| [e6cfb4e2] | 192 |         # ----------
 | 
|---|
 | 193 |         # MAKE
 | 
|---|
 | 194 |         # ----------
 | 
|---|
| [c07d724] | 195 |         # build, skipping to next test on error
 | 
|---|
| [0c13238] | 196 |         with Timed() as comp_dur:
 | 
|---|
| [d65f92c] | 197 |                 make_ret, _ = make( test.target(), output_file=subprocess.DEVNULL, error=out_file, error_file = err_file )
 | 
|---|
| [efc15918] | 198 | 
 | 
|---|
| [e6cfb4e2] | 199 |         # ----------
 | 
|---|
 | 200 |         # RUN
 | 
|---|
 | 201 |         # ----------
 | 
|---|
| [f806b61] | 202 |         # run everything in a temp directory to make sure core file are handled properly
 | 
|---|
| [e6cfb4e2] | 203 |         run_dur = None
 | 
|---|
| [f806b61] | 204 |         with tempdir():
 | 
|---|
| [103c292] | 205 |                 # if the make command succeeds continue otherwise skip to diff
 | 
|---|
| [f806b61] | 206 |                 if success(make_ret):
 | 
|---|
 | 207 |                         with Timed() as run_dur:
 | 
|---|
 | 208 |                                 if settings.dry_run or is_exe(exe_file):
 | 
|---|
 | 209 |                                         # run test
 | 
|---|
| [d65f92c] | 210 |                                         retcode, _ = sh(exe_file, output_file=out_file, input_file=in_file, timeout=True)
 | 
|---|
| [f806b61] | 211 |                                 else :
 | 
|---|
 | 212 |                                         # simply cat the result into the output
 | 
|---|
 | 213 |                                         retcode = cat(exe_file, out_file)
 | 
|---|
 | 214 |                 else:
 | 
|---|
 | 215 |                         retcode = mv(err_file, out_file)
 | 
|---|
 | 216 | 
 | 
|---|
 | 217 |                 if success(retcode):
 | 
|---|
 | 218 |                         if settings.generating :
 | 
|---|
| [eb67b47] | 219 |                                 # if we are only generating the output we still need to check that the test actually exists
 | 
|---|
| [f806b61] | 220 |                                 if no_rule(out_file, test.target()) :
 | 
|---|
 | 221 |                                         retcode = 1
 | 
|---|
 | 222 |                                         error = "\t\tNo make target for test %s!" % test.target()
 | 
|---|
 | 223 |                                         rm(out_file)
 | 
|---|
 | 224 |                                 else:
 | 
|---|
 | 225 |                                         error = None
 | 
|---|
| [0c13238] | 226 |                         else :
 | 
|---|
| [f806b61] | 227 |                                 # fetch return code and error from the diff command
 | 
|---|
 | 228 |                                 retcode, error = diff(cmp_file, out_file)
 | 
|---|
 | 229 | 
 | 
|---|
 | 230 |                 else:
 | 
|---|
| [62cc231] | 231 |                         if os.stat(out_file).st_size < 1048576:
 | 
|---|
| [09bbe78] | 232 |                                 with open (out_file, "r", encoding='latin-1') as myfile:  # use latin-1 so all chars mean something.
 | 
|---|
| [65583e2] | 233 |                                         error = myfile.read()
 | 
|---|
 | 234 |                         else:
 | 
|---|
 | 235 |                                 error = "Output log can't be read, file is bigger than 1MB, see {} for actual error\n".format(out_file)
 | 
|---|
| [f806b61] | 236 | 
 | 
|---|
 | 237 |                         ret, info = core_info(exe_file)
 | 
|---|
 | 238 |                         error = error + info if error else info
 | 
|---|
| [0c13238] | 239 | 
 | 
|---|
| [dcfedca] | 240 |                         if settings.archive:
 | 
|---|
 | 241 |                                 error = error + '\n' + core_archive(settings.archive, test.target(), exe_file)
 | 
|---|
 | 242 | 
 | 
|---|
| [0c13238] | 243 | 
 | 
|---|
| [b5f9829] | 244 | 
 | 
|---|
| [c07d724] | 245 |         # clean the executable
 | 
|---|
| [0c13238] | 246 |         rm(exe_file)
 | 
|---|
| [efc15918] | 247 | 
 | 
|---|
| [0c13238] | 248 |         return retcode, error, [comp_dur.duration, run_dur.duration if run_dur else None]
 | 
|---|
| [efc15918] | 249 | 
 | 
|---|
| [c07d724] | 250 | # run a single test and handle the errors, outputs, printing, exception handling, etc.
 | 
|---|
| [209383b] | 251 | def run_test_worker(t) :
 | 
|---|
| [1bb2488] | 252 |         try :
 | 
|---|
| [bacc36c] | 253 |                 # print formated name
 | 
|---|
| [5bf1f3e] | 254 |                 name_txt = '{0:{width}}  '.format(t.target(), width=settings.output_width)
 | 
|---|
| [ced2e989] | 255 | 
 | 
|---|
| [ca54499] | 256 |                 retcode, error, duration = run_single_test(t)
 | 
|---|
| [0a1a680] | 257 | 
 | 
|---|
| [bacc36c] | 258 |                 # update output based on current action
 | 
|---|
| [ca54499] | 259 |                 result_txt = TestResult.toString( retcode, duration )
 | 
|---|
| [bacc36c] | 260 | 
 | 
|---|
 | 261 |                 #print result with error if needed
 | 
|---|
| [a45fc7b] | 262 |                 text = '\t' + name_txt + result_txt
 | 
|---|
| [bacc36c] | 263 |                 out = sys.stdout
 | 
|---|
 | 264 |                 if error :
 | 
|---|
| [2b10f95] | 265 |                         text = text + '\n' + error
 | 
|---|
| [bacc36c] | 266 | 
 | 
|---|
| [e791851] | 267 |                 return retcode == TestResult.SUCCESS, text
 | 
|---|
| [1bb2488] | 268 |         except KeyboardInterrupt:
 | 
|---|
| [35a408b7] | 269 |                 return False, ""
 | 
|---|
| [a2f2fda] | 270 |         # except Exception as ex:
 | 
|---|
 | 271 |         #       print("Unexpected error in worker thread running {}: {}".format(t.target(), ex), file=sys.stderr)
 | 
|---|
 | 272 |         #       sys.stderr.flush()
 | 
|---|
 | 273 |         #       return False, ""
 | 
|---|
| [35a408b7] | 274 | 
 | 
|---|
| [ced2e989] | 275 | 
 | 
|---|
| [911348cd] | 276 | # run the given list of tests with the given parameters
 | 
|---|
| [209383b] | 277 | def run_tests(tests, jobs) :
 | 
|---|
| [911348cd] | 278 |         # clean the sandbox from previous commands
 | 
|---|
| [d65f92c] | 279 |         make('clean', output_file=subprocess.DEVNULL, error=subprocess.DEVNULL)
 | 
|---|
| [efc15918] | 280 | 
 | 
|---|
| [21c3ea1] | 281 |         # create the executor for our jobs
 | 
|---|
 | 282 |         pool = multiprocessing.Pool(jobs)
 | 
|---|
| [c07d724] | 283 | 
 | 
|---|
| [e791851] | 284 |         failed = False
 | 
|---|
 | 285 | 
 | 
|---|
| [c07d724] | 286 |         # for each test to run
 | 
|---|
| [ced2e989] | 287 |         try :
 | 
|---|
| [e791851] | 288 |                 num = len(tests)
 | 
|---|
 | 289 |                 fancy = sys.stdout.isatty()
 | 
|---|
| [35a408b7] | 290 |                 results = pool.imap_unordered(
 | 
|---|
| [209383b] | 291 |                         run_test_worker,
 | 
|---|
| [bacc36c] | 292 |                         tests,
 | 
|---|
 | 293 |                         chunksize = 1
 | 
|---|
| [35a408b7] | 294 |                 )
 | 
|---|
 | 295 | 
 | 
|---|
 | 296 |                 for i, (succ, txt) in enumerate(timed(results, timeout = settings.timeout.total), 1) :
 | 
|---|
| [e791851] | 297 |                         if not succ :
 | 
|---|
 | 298 |                                 failed = True
 | 
|---|
 | 299 | 
 | 
|---|
 | 300 |                         print("       " + txt)
 | 
|---|
 | 301 | 
 | 
|---|
 | 302 |                         if(fancy and i != num):
 | 
|---|
 | 303 |                                 print("%d/%d" % (i, num), end='\r')
 | 
|---|
 | 304 |                                 sys.stdout.flush()
 | 
|---|
 | 305 | 
 | 
|---|
| [ced2e989] | 306 |         except KeyboardInterrupt:
 | 
|---|
| [e791851] | 307 |                 print("Tests interrupted by user", file=sys.stderr)
 | 
|---|
| [35a408b7] | 308 |                 pool.terminate()
 | 
|---|
 | 309 |                 pool.join()
 | 
|---|
| [e791851] | 310 |                 failed = True
 | 
|---|
 | 311 |         except multiprocessing.TimeoutError:
 | 
|---|
 | 312 |                 print("ERROR: Test suite timed out", file=sys.stderr)
 | 
|---|
| [35a408b7] | 313 |                 pool.terminate()
 | 
|---|
 | 314 |                 pool.join()
 | 
|---|
| [e791851] | 315 |                 failed = True
 | 
|---|
| [35a408b7] | 316 |                 killgroup() # needed to cleanly kill all children
 | 
|---|
 | 317 | 
 | 
|---|
| [efc15918] | 318 | 
 | 
|---|
| [c07d724] | 319 |         # clean the workspace
 | 
|---|
| [d65f92c] | 320 |         make('clean', output_file=subprocess.DEVNULL, error=subprocess.DEVNULL)
 | 
|---|
| [efc15918] | 321 | 
 | 
|---|
| [136f86b] | 322 |         return failed
 | 
|---|
| [efc15918] | 323 | 
 | 
|---|
| [6a1bdfd] | 324 | 
 | 
|---|
| [efc15918] | 325 | ################################################################################
 | 
|---|
 | 326 | #               main loop
 | 
|---|
 | 327 | ################################################################################
 | 
|---|
| [c07d724] | 328 | if __name__ == "__main__":
 | 
|---|
| [f803a75] | 329 | 
 | 
|---|
| [c07d724] | 330 |         # parse the command line arguments
 | 
|---|
| [5bf1f3e] | 331 |         options = parse_args()
 | 
|---|
| [f1231f2] | 332 | 
 | 
|---|
| [bacc36c] | 333 |         # init global settings
 | 
|---|
 | 334 |         settings.init( options )
 | 
|---|
 | 335 | 
 | 
|---|
| [a468e1e9] | 336 |         # --------------------------------------------------
 | 
|---|
 | 337 |         # list all the test for auto completion programs
 | 
|---|
 | 338 |         # not pretty, single line, with the command line options
 | 
|---|
| [c07d724] | 339 |         if options.list_comp :
 | 
|---|
| [29806675] | 340 |                 # fetch the liest of all valid tests
 | 
|---|
 | 341 |                 tests = list_tests( None, None )
 | 
|---|
 | 342 | 
 | 
|---|
 | 343 |                 # print the possible options
 | 
|---|
| [a468e1e9] | 344 |                 print("-h --help --debug --dry-run --list --ast=new --ast=old --arch --all --regenerate-expected --archive-errors --install --timeout --global-timeout --timeout-with-gdb -j --jobs -I --include -E --exclude --continue ", end='')
 | 
|---|
| [0ad0c55] | 345 |                 print(" ".join(map(lambda t: "%s" % (t.target()), tests)))
 | 
|---|
| [911348cd] | 346 | 
 | 
|---|
| [a468e1e9] | 347 |                 # done
 | 
|---|
 | 348 |                 sys.exit(0)
 | 
|---|
 | 349 | 
 | 
|---|
 | 350 |         # --------------------------------------------------
 | 
|---|
 | 351 |         # list all the test for auto completion programs
 | 
|---|
 | 352 |         if options.list_dist :
 | 
|---|
 | 353 |                 # fetch the liest of all valid tests
 | 
|---|
 | 354 |                 tests = list_tests( None, None )
 | 
|---|
 | 355 | 
 | 
|---|
 | 356 |                 for t in tests:
 | 
|---|
 | 357 |                         print(os.path.relpath(t.expect(), settings.SRCDIR), end=' ')
 | 
|---|
 | 358 |                         print(os.path.relpath(t.input() , settings.SRCDIR), end=' ')
 | 
|---|
 | 359 |                         code, out = make_recon(t.target())
 | 
|---|
 | 360 | 
 | 
|---|
 | 361 |                         if code != 0:
 | 
|---|
 | 362 |                                 print('ERROR: recond failed for test {}'.format(t.target()), file=sys.stderr)
 | 
|---|
 | 363 |                                 sys.exit(1)
 | 
|---|
 | 364 | 
 | 
|---|
 | 365 |                         print(' '.join(re.findall('([^\s]+\.cfa)', out)), end=' ')
 | 
|---|
 | 366 | 
 | 
|---|
 | 367 |                 print('')
 | 
|---|
 | 368 | 
 | 
|---|
 | 369 |                 # done
 | 
|---|
 | 370 |                 sys.exit(0)
 | 
|---|
 | 371 | 
 | 
|---|
 | 372 | 
 | 
|---|
 | 373 |         # --------------------------------------------------
 | 
|---|
 | 374 |         # list all the tests for users, in a pretty format
 | 
|---|
 | 375 |         if options.list :
 | 
|---|
| [29806675] | 376 |                 # fetch the liest of all valid tests
 | 
|---|
 | 377 |                 tests = list_tests( options.include, options.exclude )
 | 
|---|
 | 378 | 
 | 
|---|
 | 379 |                 # print the available tests
 | 
|---|
| [5b993e0] | 380 |                 fancy_print("\n".join(map(lambda t: t.toString(), tests)))
 | 
|---|
| [911348cd] | 381 | 
 | 
|---|
| [a468e1e9] | 382 |                 # done
 | 
|---|
 | 383 |                 sys.exit(0)
 | 
|---|
 | 384 | 
 | 
|---|
 | 385 |         # fetch the liest of all valid tests
 | 
|---|
 | 386 |         all_tests = list_tests( options.include, options.exclude )
 | 
|---|
 | 387 | 
 | 
|---|
 | 388 |         # if user wants all tests than no other treatement of the test list is required
 | 
|---|
 | 389 |         if options.all or options.include :
 | 
|---|
 | 390 |                 tests = all_tests
 | 
|---|
 | 391 | 
 | 
|---|
 | 392 |         #otherwise we need to validate that the test list that was entered is valid
 | 
|---|
| [b98c913] | 393 |         else :
 | 
|---|
| [a468e1e9] | 394 |                 tests = valid_tests( options )
 | 
|---|
 | 395 | 
 | 
|---|
 | 396 |         # make sure we have at least some test to run
 | 
|---|
 | 397 |         if not tests :
 | 
|---|
 | 398 |                 print('ERROR: No valid test to run', file=sys.stderr)
 | 
|---|
 | 399 |                 sys.exit(1)
 | 
|---|
 | 400 | 
 | 
|---|
 | 401 |         # prep invariants
 | 
|---|
 | 402 |         settings.prep_output(tests)
 | 
|---|
 | 403 |         failed = 0
 | 
|---|
 | 404 | 
 | 
|---|
 | 405 |         # check if the expected files aren't empty
 | 
|---|
 | 406 |         if not options.regenerate_expected:
 | 
|---|
 | 407 |                 for t in tests:
 | 
|---|
 | 408 |                         if is_empty(t.expect()):
 | 
|---|
 | 409 |                                 print('WARNING: test "{}" has empty .expect file'.format(t.target()), file=sys.stderr)
 | 
|---|
 | 410 | 
 | 
|---|
 | 411 |         # for each build configurations, run the test
 | 
|---|
 | 412 |         with Timed() as total_dur:
 | 
|---|
 | 413 |                 for ast, arch, debug, install in itertools.product(settings.all_ast, settings.all_arch, settings.all_debug, settings.all_install):
 | 
|---|
 | 414 |                         settings.ast     = ast
 | 
|---|
 | 415 |                         settings.arch    = arch
 | 
|---|
 | 416 |                         settings.debug   = debug
 | 
|---|
 | 417 |                         settings.install = install
 | 
|---|
 | 418 | 
 | 
|---|
 | 419 |                         # filter out the tests for a different architecture
 | 
|---|
 | 420 |                         # tests are the same across debug/install
 | 
|---|
 | 421 |                         local_tests = settings.ast.filter( tests )
 | 
|---|
 | 422 |                         local_tests = settings.arch.filter( local_tests )
 | 
|---|
 | 423 |                         options.jobs, forceJobs = job_count( options, local_tests )
 | 
|---|
 | 424 |                         settings.update_make_cmd(forceJobs, options.jobs)
 | 
|---|
 | 425 | 
 | 
|---|
 | 426 |                         # check the build configuration works
 | 
|---|
 | 427 |                         settings.validate()
 | 
|---|
 | 428 | 
 | 
|---|
 | 429 |                         # print configuration
 | 
|---|
 | 430 |                         print('%s %i tests on %i cores (%s:%s - %s)' % (
 | 
|---|
 | 431 |                                 'Regenerating' if settings.generating else 'Running',
 | 
|---|
 | 432 |                                 len(local_tests),
 | 
|---|
 | 433 |                                 options.jobs,
 | 
|---|
 | 434 |                                 settings.ast.string,
 | 
|---|
 | 435 |                                 settings.arch.string,
 | 
|---|
 | 436 |                                 settings.debug.string
 | 
|---|
 | 437 |                         ))
 | 
|---|
 | 438 |                         if not local_tests :
 | 
|---|
 | 439 |                                 print('WARNING: No tests for this configuration')
 | 
|---|
 | 440 |                                 continue
 | 
|---|
 | 441 | 
 | 
|---|
 | 442 |                         # otherwise run all tests and make sure to return the correct error code
 | 
|---|
 | 443 |                         failed = run_tests(local_tests, options.jobs)
 | 
|---|
 | 444 |                         if failed:
 | 
|---|
 | 445 |                                 result = 1
 | 
|---|
 | 446 |                                 if not settings.continue_:
 | 
|---|
 | 447 |                                         break
 | 
|---|
 | 448 | 
 | 
|---|
 | 449 |         print('Tests took %s' % fmtDur( total_dur.duration ))
 | 
|---|
 | 450 |         sys.exit( failed )
 | 
|---|