Ignore:
File:
1 edited

Legend:

Unmodified
Added
Removed
  • src/tests/test.py

    r7895d46 rc07d724  
    66from os import listdir, environ
    77from os.path import isfile, join, splitext
    8 from subprocess import Popen, PIPE, STDOUT
     8from pybin.tools import *
    99
    1010import argparse
     11import multiprocessing
    1112import os
    1213import re
    13 import stat
     14import signal
    1415import sys
    1516
     
    2627def getMachineType():
    2728        sh('echo "void ?{}(int*a,int b){}int main(){return 0;}" > .dummy.c')
    28         sh("make .dummy", print2stdout=False)
     29        ret, out = sh("make .dummy -s", print2stdout=True)
     30       
     31        if ret != 0:
     32                print("Failed to identify architecture:")
     33                print(out)
     34                print("Stopping")
     35                rm( (".dummy.c",".dummy") )
     36                sys.exit(1)
     37
    2938        _, out = sh("file .dummy", print2stdout=False)
    30         sh("rm -f .dummy.c > /dev/null 2>&1")
    31         sh("rm -f .dummy > /dev/null 2>&1")
     39        rm( (".dummy.c",".dummy") )
     40
    3241        return re.search("ELF\s([0-9]+)-bit", out).group(1)
    3342
     
    5867        return generic_list + typed_list + concurrent_list;
    5968
    60 # helper functions to run terminal commands
    61 def sh(cmd, dry_run = False, print2stdout = True):
    62         if dry_run :    # if this is a dry_run, only print the commands that would be ran
    63                 print("cmd: %s" % cmd)
    64                 return 0, None
    65         else :                  # otherwise create a pipe and run the desired command
    66                 proc = Popen(cmd, stdout=None if print2stdout else PIPE, stderr=STDOUT, shell=True)
    67                 out, err = proc.communicate()
    68                 return proc.returncode, out
    69 
    70 # helper function to replace patterns in a file
    71 def file_replace(fname, pat, s_after):
    72     # first, see if the pattern is even in the file.
    73     with open(fname) as f:
    74         if not any(re.search(pat, line) for line in f):
    75             return # pattern does not occur in file so we are done.
    76 
    77     # pattern is in the file, so perform replace operation.
    78     with open(fname) as f:
    79         out_fname = fname + ".tmp"
    80         out = open(out_fname, "w")
    81         for line in f:
    82             out.write(re.sub(pat, s_after, line))
    83         out.close()
    84         os.rename(out_fname, fname)
    85 
    86 # tests output may differ depending on the depth of the makefile
    87 def fix_MakeLevel(file) :
    88         if environ.get('MAKELEVEL') :
    89                 file_replace(file, "make\[%i\]" % int(environ.get('MAKELEVEL')), 'make' )
    90 
    91 # helper function to check if a files contains only a spacific string
    92 def fileContainsOnly(file, text) :
    93         with open(file) as f:
    94                 ff = f.read().strip()
    95                 result = ff == text.strip()
    96 
    97                 return result;
    98 
    99 # check whether or not a file is executable
    100 def fileIsExecutable(file) :
    101         try :
    102                 fileinfo = os.stat(file)
    103                 return bool(fileinfo.st_mode & stat.S_IXUSR)
    104         except Exception as inst:
    105                 print(type(inst))    # the exception instance
    106                 print(inst.args)     # arguments stored in .args
    107                 print(inst)
    108                 return False
     69# from the found tests, filter all the valid tests/desired tests
     70def validTests( options ):
     71        tests = []
     72
     73        # if we are regenerating the tests we need to find the information of the
     74        # already existing tests and create new info for the new tests
     75        if options.regenerate_expected :
     76                for testname in options.tests :
     77                        if testname.endswith( (".c", ".cc", ".cpp") ):
     78                                print('ERROR: "%s", tests are not allowed to end with a C/C++/CFA extension, ignoring it' % testname, file=sys.stderr)
     79                        else :
     80                                found = [test for test in allTests if test.name == testname]
     81                                tests.append( found[0] if len(found) == 1 else Test(testname, testname) )
     82
     83        else :
     84                # otherwise we only need to validate that all tests are present in the complete list
     85                for testname in options.tests:
     86                        test = [t for t in allTests if t.name == testname]
     87
     88                        if len(test) != 0 :
     89                                tests.append( test[0] )
     90                        else :
     91                                print('ERROR: No expected file for test %s, ignoring it' % testname, file=sys.stderr)
     92
     93        # make sure we have at least some test to run
     94        if len(tests) == 0 :
     95                print('ERROR: No valid test to run', file=sys.stderr)
     96                sys.exit(1)
     97
     98        return tests
     99
     100# parses the option
     101def getOptions():
     102        # create a parser with the arguments for the tests script
     103        parser = argparse.ArgumentParser(description='Script which runs cforall tests')
     104        parser.add_argument('--debug', help='Run all tests in debug or release', type=yes_no, default='no')
     105        parser.add_argument('--concurrent', help='Run concurrent tests', type=yes_no, default='yes')
     106        parser.add_argument('--dry-run', help='Don\'t run the tests, only output the commands', action='store_true')
     107        parser.add_argument('--list', help='List all test available', action='store_true')
     108        parser.add_argument('--all', help='Run all test available', action='store_true')
     109        parser.add_argument('--regenerate-expected', help='Regenerate the .expect by running the specified tets, can be used with --all option', action='store_true')
     110        parser.add_argument('-j', '--jobs', help='Number of tests to run simultaneously', type=int, default='8')
     111        parser.add_argument('--list-comp', help='List all valide arguments', action='store_true')
     112        parser.add_argument('tests', metavar='test', type=str, nargs='*', help='a list of tests to run')
     113
     114        options =  parser.parse_args()
     115
     116        # script must have at least some tests to run or be listing
     117        listing    = options.list or options.list_comp
     118        all_tests  = options.all
     119        some_tests = len(options.tests) > 0
     120
     121        # check that exactly one of the booleans is set to true
     122        if not sum( (listing, all_tests, some_tests) ) == 1 :
     123                print('ERROR: must have option \'--all\', \'--list\' or non-empty test list', file=sys.stderr)
     124                parser.print_help()
     125                sys.exit(1)
     126
     127        return options
     128
     129def jobCount( options ):
     130        # check if the user already passed in a number of jobs for multi-threading
     131        make_flags = environ.get('MAKEFLAGS')
     132        make_jobs_fds = re.search("--jobserver-(auth|fds)=\s*([0-9]+),([0-9]+)", make_flags) if make_flags else None
     133        if make_jobs_fds :
     134                tokens = os.read(int(make_jobs_fds.group(2)), 1024)
     135                options.jobs = len(tokens)
     136                os.write(int(make_jobs_fds.group(3)), tokens)
     137        else :
     138                options.jobs = multiprocessing.cpu_count()
     139
     140        # make sure we have a valid number of jobs that corresponds to user input
     141        if options.jobs <= 0 :
     142                print('ERROR: Invalid number of jobs', file=sys.stderr)
     143                sys.exit(1)
     144
     145        return min( options.jobs, len(tests) ), True if make_flags else False
    109146
    110147################################################################################
    111148#               running test functions
    112149################################################################################
     150# logic to run a single test and return the result (No handling of printing or other test framework logic)
    113151def run_single_test(test, generate, dry_run, debug):
    114152
    115153        # find the output file based on the test name and options flag
    116154        out_file = (".out/%s.log" % test.name) if not generate else (".expect/%s.txt" % test.path)
     155        err_file = ".err/%s.log" % test.name
    117156
    118157        # remove any outputs from the previous tests to prevent side effects
    119         sh("rm -f %s" % out_file, dry_run)
    120         sh("rm -f %s > /dev/null 2>&1" % test.name, dry_run)
    121 
    122         options = "-debug" if debug else "-nodebug";
     158        rm( (out_file, test.name), dry_run )
     159
     160        options = "-debug" if debug else "-nodebug"
    123161
    124162        # build, skipping to next test on error
    125         make_ret, _ = sh("""%s EXTRA_FLAGS="-quiet %s" %s 2> %s 1> /dev/null""" % (make_cmd, options, test.name, out_file), dry_run)
     163        make_ret, _ = sh("""%s test=yes EXTRA_FLAGS="-quiet %s" %s 2> %s 1> /dev/null""" % (make_cmd, options, test.name, out_file), dry_run)
    126164
    127165        # if the make command succeds continue otherwise skip to diff
     
    137175                        sh("cat %s > %s" % (test.name, out_file), dry_run)
    138176
     177        else :
     178                # command failed save the log to less temporary file
     179                sh("mv %s %s" % (err_file, out_file), dry_run)
     180
    139181        retcode = 0
    140182        error = None
    141 
    142         # fix output to prevent make depth to cause issues
    143         fix_MakeLevel(out_file)
    144183
    145184        if generate :
     
    151190
    152191        else :
    153                 # diff the output of the files
    154                 diff_cmd = ("diff --old-group-format='\t\tmissing lines :\n"
    155                                         "%%<' \\\n"
    156                                         "--new-group-format='\t\tnew lines :\n"
    157                                         "%%>' \\\n"
    158                                         "--unchanged-group-format='%%=' \\"
    159                                         "--changed-group-format='\t\texpected :\n"
    160                                         "%%<\n"
    161                                         "\t\tgot :\n"
    162                                         "%%>' \\\n"
    163                                         "--new-line-format='\t\t%%dn\t%%L' \\\n"
    164                                         "--old-line-format='\t\t%%dn\t%%L' \\\n"
    165                                         "--unchanged-line-format='' \\\n"
    166                                         ".expect/%s.txt .out/%s.log")
    167 
    168192                # fetch return code and error from the diff command
    169                 retcode, error = sh(diff_cmd % (test.path, test.name), dry_run, False)
    170 
     193                retcode, error = diff(".expect/%s.txt" % test.path, ".out/%s.log" % test.name, dry_run)
     194       
    171195        # clean the executable
    172196        sh("rm -f %s > /dev/null 2>&1" % test.name, dry_run)
     
    174198        return retcode, error
    175199
    176 def run_test_instance(t, generate, dry_run, debug) :
    177         try :
    178                 # print formated name
    179                 name_txt = "%20s  " % t.name
    180 
    181                 #run the test instance and collect the result
    182                 test_failed, error = run_single_test(t, generate, dry_run, debug)
    183 
    184                 # update output based on current action
    185                 if generate :
    186                         failed_txt = "ERROR"
    187                         success_txt = "Done"
    188                 else :
    189                         failed_txt = "FAILED"
    190                         success_txt = "PASSED"
    191 
    192                 #print result with error if needed
    193                 text = name_txt + (failed_txt if test_failed else success_txt)
    194                 out = sys.stdout
    195                 if error :
    196                         text = text + "\n" + error
    197                         out = sys.stderr
    198 
    199                 print(text, file = out);
    200                 sys.stdout.flush()
    201                 sys.stderr.flush()
    202                 return test_failed
    203 
    204         except KeyboardInterrupt:
    205                 test_failed = True
    206 
     200# run a single test and handle the errors, outputs, printing, exception handling, etc.
     201def run_test_worker(t, generate, dry_run, debug) :
     202
     203        signal.signal(signal.SIGINT, signal.SIG_DFL)
     204        # print formated name
     205        name_txt = "%20s  " % t.name
     206
     207        #run the test instance and collect the result
     208        test_failed, error = run_single_test(t, generate, dry_run, debug)
     209
     210        # update output based on current action
     211        if generate :
     212                failed_txt = "ERROR"
     213                success_txt = "Done"
     214        else :
     215                failed_txt = "FAILED"
     216                success_txt = "PASSED"
     217
     218        #print result with error if needed
     219        text = name_txt + (failed_txt if test_failed else success_txt)
     220        out = sys.stdout
     221        if error :
     222                text = text + "\n" + error
     223                out = sys.stderr
     224
     225        print(text, file = out);
     226        sys.stdout.flush()
     227        sys.stderr.flush()
     228        signal.signal(signal.SIGINT, signal.SIG_IGN)
     229
     230        return test_failed
    207231
    208232# run the given list of tests with the given parameters
     
    211235        sh("%s clean > /dev/null 2>&1" % make_cmd, dry_run)
    212236
    213         #make sure the required folder are present
    214         sh('mkdir -p .out .expect', dry_run)
     237        # make sure the required folder are present
     238        sh('mkdir -p .out .expect .err', dry_run)
    215239
    216240        if generate :
    217241                print( "Regenerate tests for: " )
    218242
     243        # create the executor for our jobs and handle the signal properly
     244        original_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_IGN)
     245        pool = Pool(jobs)
     246        signal.signal(signal.SIGINT, original_sigint_handler)
     247
    219248        # for each test to run
    220         pool = Pool(jobs)
    221249        try :
    222                 results = pool.map_async(partial(run_test_instance, generate=generate, dry_run=dry_run, debug=debug), tests ).get(9999)
     250                results = pool.map_async(partial(run_test_worker, generate=generate, dry_run=dry_run, debug=debug), tests ).get(3600)
    223251        except KeyboardInterrupt:
    224252                pool.terminate()
     
    226254                sys.exit(1)
    227255
    228         #clean the workspace
     256        # clean the workspace
    229257        sh("%s clean > /dev/null 2>&1" % make_cmd, dry_run)
    230258
     
    235263        return 0
    236264
    237 def yes_no(string):
    238         if string == "yes" :
    239                 return True
    240         if string == "no" :
    241                 return False
    242         raise argparse.ArgumentTypeError(msg)
    243         return False
    244 
    245265
    246266################################################################################
    247267#               main loop
    248268################################################################################
    249 # create a parser with the arguments for the tests script
    250 parser = argparse.ArgumentParser(description='Script which runs cforall tests')
    251 parser.add_argument('--debug', help='Run all tests in debug or release', type=yes_no, default='no')
    252 parser.add_argument('--concurrent', help='Run concurrent tests', type=yes_no, default='yes')
    253 parser.add_argument('--dry-run', help='Don\'t run the tests, only output the commands', action='store_true')
    254 parser.add_argument('--list', help='List all test available', action='store_true')
    255 parser.add_argument('--all', help='Run all test available', action='store_true')
    256 parser.add_argument('--regenerate-expected', help='Regenerate the .expect by running the specified tets, can be used with --all option', action='store_true')
    257 parser.add_argument('-j', '--jobs', help='Number of tests to run simultaneously', type=int, default='8')
    258 parser.add_argument('tests', metavar='test', type=str, nargs='*', help='a list of tests to run')
    259 
    260 # parse the command line arguments
    261 options = parser.parse_args()
    262 
    263 # script must have at least some tests to run
    264 if (len(options.tests) > 0  and     options.all and not options.list) \
    265 or (len(options.tests) == 0 and not options.all and not options.list) :
    266         print('ERROR: must have option \'--all\' or non-empty test list', file=sys.stderr)
    267         parser.print_help()
    268         sys.exit(1)
    269 
    270 # fetch the liest of all valid tests
    271 allTests = listTests( options.concurrent )
    272 
    273 # if user wants all tests than no other treatement of the test list is required
    274 if options.all or options.list :
    275         tests = allTests
    276 
    277 else :
    278         #otherwise we need to validate that the test list that was entered is valid
    279         tests = []
    280 
    281         # if we are regenerating the tests we need to find the information of the
    282         # already existing tests and create new info for the new tests
    283         if options.regenerate_expected :
    284                 for testname in options.tests :
    285                         if testname.endswith(".c") or testname.endswith(".cc") or testname.endswith(".cpp") :
    286                                 print('ERROR: "%s", tests are not allowed to end with a C/C++/CFA extension, ignoring it' % testname, file=sys.stderr)
    287                         else :
    288                                 found = [test for test in allTests if test.name == testname]
    289                                 tests.append( found[0] if len(found) == 1 else Test(testname, testname) )
    290 
    291         else :
    292                 # otherwise we only need to validate that all tests are present in the complete list
    293                 for testname in options.tests:
    294                         test = [t for t in allTests if t.name == testname]
    295 
    296                         if len(test) != 0 :
    297                                 tests.append( test[0] )
    298                         else :
    299                                 print('ERROR: No expected file for test %s, ignoring it' % testname, file=sys.stderr)
    300 
    301         # make sure we have at least some test to run
    302         if len(tests) == 0 :
    303                 print('ERROR: No valid test to run', file=sys.stderr)
    304                 sys.exit(1)
    305 
    306 # sort the test alphabetically for convenience
    307 tests.sort(key=lambda t: t.name)
    308 
    309 # check if the user already passed in a number of jobs for multi-threading
    310 make_flags = environ.get('MAKEFLAGS')
    311 make_jobs_fds = re.search("--jobserver-fds=\s*([0-9]+),([0-9]+)", make_flags) if make_flags else None
    312 if make_jobs_fds :
    313         tokens = os.read(int(make_jobs_fds.group(1)), 1024)
    314         options.jobs = len(tokens)
    315         os.write(int(make_jobs_fds.group(2)), tokens)
    316 
    317 # make sure we have a valid number of jobs that corresponds to user input
    318 if options.jobs <= 0 :
    319         print('ERROR: Invalid number of jobs', file=sys.stderr)
    320         sys.exit(1)
    321 
    322 print('Running (%s) on %i cores' % ("debug" if options.debug else "no debug", options.jobs))
    323 make_cmd = "make" if make_flags else ("make -j%i" % options.jobs)
    324 
    325 # users may want to simply list the tests
    326 if options.list :
    327         print("\n".join(map(lambda t: "%s (%s)" % (t.name, t.path), tests)))
    328 
    329 else :
    330         # otherwise run all tests and make sure to return the correct error code
    331         sys.exit( run_tests(tests, options.regenerate_expected, options.dry_run, options.jobs, options.debug) )
     269if __name__ == "__main__":
     270        #always run from same folder
     271        chdir()
     272       
     273        # parse the command line arguments
     274        options = getOptions()
     275
     276        # fetch the liest of all valid tests
     277        allTests = listTests( options.concurrent )
     278
     279        # if user wants all tests than no other treatement of the test list is required
     280        if options.all or options.list or options.list_comp :
     281                tests = allTests
     282
     283        else :
     284                #otherwise we need to validate that the test list that was entered is valid
     285                tests = validTests( options )
     286
     287        # sort the test alphabetically for convenience
     288        tests.sort(key=lambda t: t.name)
     289
     290        # users may want to simply list the tests
     291        if options.list_comp :
     292                print("-h --help --debug --concurrent --dry-run --list --all --regenerate-expected -j --jobs ", end='')
     293                print(" ".join(map(lambda t: "%s" % (t.name), tests)))
     294
     295        elif options.list :
     296                print("\n".join(map(lambda t: "%s (%s)" % (t.name, t.path), tests)))
     297
     298        else :
     299                options.jobs, forceJobs = jobCount( options )
     300
     301                print('Running (%s) on %i cores' % ("debug" if options.debug else "no debug", options.jobs))
     302                make_cmd = "make" if forceJobs else ("make -j%i" % options.jobs)
     303
     304                # otherwise run all tests and make sure to return the correct error code
     305                sys.exit( run_tests(tests, options.regenerate_expected, options.dry_run, options.jobs, options.debug) )
Note: See TracChangeset for help on using the changeset viewer.