Changeset c07d724


Ignore:
Timestamp:
Apr 28, 2017, 11:16:50 AM (7 years ago)
Author:
Thierry Delisle <tdelisle@…>
Branches:
ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, resolv-new, with_gc
Children:
0f9bef3, 2e5ad9f, f621a148
Parents:
9c59cd4
Message:

Cleaned-up test script, notably by creating a pybin folder and a tools script inside it

Files:
2 added
2 edited

Legend:

Unmodified
Added
Removed
  • .gitignore

    r9c59cd4 rc07d724  
    11# build files
    22*.[ao]
     3*.pyc
    34
    45# generated by configure
  • src/tests/test.py

    r9c59cd4 rc07d724  
    66from os import listdir, environ
    77from os.path import isfile, join, splitext
    8 from subprocess import Popen, PIPE, STDOUT
     8from pybin.tools import *
    99
    1010import argparse
     
    1313import re
    1414import signal
    15 import stat
    1615import sys
    1716
     
    2827def getMachineType():
    2928        sh('echo "void ?{}(int*a,int b){}int main(){return 0;}" > .dummy.c')
    30         ret, out = sh("make .dummy -s", print2stdout=False)
     29        ret, out = sh("make .dummy -s", print2stdout=True)
    3130       
    3231        if ret != 0:
     
    3433                print(out)
    3534                print("Stopping")
    36                 sh("rm -f .dummy.c > /dev/null 2>&1")
    37                 sh("rm -f .dummy > /dev/null 2>&1")
     35                rm( (".dummy.c",".dummy") )
    3836                sys.exit(1)
    3937
    4038        _, out = sh("file .dummy", print2stdout=False)
    41         sh("rm -f .dummy.c > /dev/null 2>&1")
    42         sh("rm -f .dummy > /dev/null 2>&1")
     39        rm( (".dummy.c",".dummy") )
     40
    4341        return re.search("ELF\s([0-9]+)-bit", out).group(1)
    4442
     
    6967        return generic_list + typed_list + concurrent_list;
    7068
    71 # helper functions to run terminal commands
    72 def sh(cmd, dry_run = False, print2stdout = True):
    73         if dry_run :    # if this is a dry_run, only print the commands that would be ran
    74                 print("cmd: %s" % cmd)
    75                 return 0, None
    76         else :                  # otherwise create a pipe and run the desired command
    77                 proc = Popen(cmd, stdout=None if print2stdout else PIPE, stderr=STDOUT, shell=True)
    78                 out, err = proc.communicate()
    79                 return proc.returncode, out
    80 
    81 # helper function to replace patterns in a file
    82 def file_replace(fname, pat, s_after):
    83     # first, see if the pattern is even in the file.
    84     with open(fname) as f:
    85         if not any(re.search(pat, line) for line in f):
    86             return # pattern does not occur in file so we are done.
    87 
    88     # pattern is in the file, so perform replace operation.
    89     with open(fname) as f:
    90         out_fname = fname + ".tmp"
    91         out = open(out_fname, "w")
    92         for line in f:
    93             out.write(re.sub(pat, s_after, line))
    94         out.close()
    95         os.rename(out_fname, fname)
    96 
    97 # tests output may differ depending on the depth of the makefile
    98 def fix_MakeLevel(file) :
    99         if environ.get('MAKELEVEL') :
    100                 file_replace(file, "make\[%i\]" % int(environ.get('MAKELEVEL')), 'make' )
    101 
    102 # helper function to check if a files contains only a spacific string
    103 def fileContainsOnly(file, text) :
    104         with open(file) as f:
    105                 ff = f.read().strip()
    106                 result = ff == text.strip()
    107 
    108                 return result;
    109 
    110 # check whether or not a file is executable
    111 def fileIsExecutable(file) :
    112         try :
    113                 fileinfo = os.stat(file)
    114                 return bool(fileinfo.st_mode & stat.S_IXUSR)
    115         except Exception as inst:
    116                 print(type(inst))    # the exception instance
    117                 print(inst.args)     # arguments stored in .args
    118                 print(inst)
    119                 return False
    120 
    121 ################################################################################
    122 #               running test functions
    123 ################################################################################
    124 def run_single_test(test, generate, dry_run, debug):
    125 
    126         try :
    127                 # find the output file based on the test name and options flag
    128                 out_file = (".out/%s.log" % test.name) if not generate else (".expect/%s.txt" % test.path)
    129                 err_file = ".err/%s.log" % test.name
    130 
    131                 # remove any outputs from the previous tests to prevent side effects
    132                 sh("rm -f %s" % out_file, dry_run)
    133                 sh("rm -f %s > /dev/null 2>&1" % test.name, dry_run)
    134 
    135                 options = "-debug" if debug else "-nodebug";
    136 
    137                 # build, skipping to next test on error
    138                 make_ret, _ = sh("""%s test=yes EXTRA_FLAGS="-quiet %s" %s 2> %s 1> /dev/null""" % (make_cmd, options, test.name, out_file), dry_run)
    139 
    140                 # if the make command succeds continue otherwise skip to diff
    141                 if make_ret == 0 :
    142                         # fetch optional input
    143                         stdinput = "< .in/%s.txt" % test.name if isfile(".in/%s.txt" % test.path) else ""
    144 
    145                         if fileIsExecutable(test.name) :
    146                                 # run test
    147                                 sh("./%s %s > %s 2>&1" % (test.name, stdinput, out_file), dry_run)
    148                         else :
    149                                 # simply cat the result into the output
    150                                 sh("cat %s > %s" % (test.name, out_file), dry_run)
    151 
    152                 else :
    153                         # command failed save the log to less temporary file
    154                         sh("mv %s %s" % (err_file, out_file), dry_run)
    155 
    156                 retcode = 0
    157                 error = None
    158 
    159                 # # fix output to prevent make depth to cause issues
    160                 # fix_MakeLevel(out_file)
    161 
    162                 if generate :
    163                         # if we are ounly generating the output we still need to check that the test actually exists
    164                         if not dry_run and fileContainsOnly(out_file, "make: *** No rule to make target `%s'.  Stop." % test.name) :
    165                                 retcode = 1;
    166                                 error = "\t\tNo make target for test %s!" % test.name
    167                                 sh("rm %s" % out_file, False)
    168 
    169                 else :
    170                         # diff the output of the files
    171                         diff_cmd = ("diff --ignore-all-space "
    172                                                 "--ignore-blank-lines "
    173                                                 "--old-group-format='\t\tmissing lines :\n"
    174                                                 "%%<' \\\n"
    175                                                 "--new-group-format='\t\tnew lines :\n"
    176                                                 "%%>' \\\n"
    177                                                 "--unchanged-group-format='%%=' \\"
    178                                                 "--changed-group-format='\t\texpected :\n"
    179                                                 "%%<\n"
    180                                                 "\t\tgot :\n"
    181                                                 "%%>' \\\n"
    182                                                 "--new-line-format='\t\t%%dn\t%%L' \\\n"
    183                                                 "--old-line-format='\t\t%%dn\t%%L' \\\n"
    184                                                 "--unchanged-line-format='' \\\n"
    185                                                 ".expect/%s.txt .out/%s.log")
    186 
    187                         # fetch return code and error from the diff command
    188                         retcode, error = sh(diff_cmd % (test.path, test.name), dry_run, False) 
    189         finally :
    190                 # clean the executable
    191                 sh("rm -f %s > /dev/null 2>&1" % test.name, dry_run)
    192 
    193         return retcode, error
    194 
    195 def run_test_instance(t, generate, dry_run, debug) :
    196 
    197         signal.signal(signal.SIGINT, signal.SIG_DFL)
    198         # print formated name
    199         name_txt = "%20s  " % t.name
    200 
    201         #run the test instance and collect the result
    202         test_failed, error = run_single_test(t, generate, dry_run, debug)
    203 
    204         # update output based on current action
    205         if generate :
    206                 failed_txt = "ERROR"
    207                 success_txt = "Done"
    208         else :
    209                 failed_txt = "FAILED"
    210                 success_txt = "PASSED"
    211 
    212         #print result with error if needed
    213         text = name_txt + (failed_txt if test_failed else success_txt)
    214         out = sys.stdout
    215         if error :
    216                 text = text + "\n" + error
    217                 out = sys.stderr
    218 
    219         print(text, file = out);
    220         sys.stdout.flush()
    221         sys.stderr.flush()
    222         signal.signal(signal.SIGINT, signal.SIG_IGN)
    223 
    224         return test_failed
    225 
    226 
    227 # run the given list of tests with the given parameters
    228 def run_tests(tests, generate, dry_run, jobs, debug) :
    229         # clean the sandbox from previous commands
    230         sh("%s clean > /dev/null 2>&1" % make_cmd, dry_run)
    231 
    232         #make sure the required folder are present
    233         sh('mkdir -p .out .expect .err', dry_run)
    234 
    235         if generate :
    236                 print( "Regenerate tests for: " )
    237 
    238         # for each test to run
    239         original_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_IGN)
    240         pool = Pool(jobs)
    241         signal.signal(signal.SIGINT, original_sigint_handler)
    242         try :
    243                 results = pool.map_async(partial(run_test_instance, generate=generate, dry_run=dry_run, debug=debug), tests ).get(9999)
    244         except KeyboardInterrupt:
    245                 pool.terminate()
    246                 print("Tests interrupted by user")
    247                 sys.exit(1)
    248 
    249         #clean the workspace
    250         sh("%s clean > /dev/null 2>&1" % make_cmd, dry_run)
    251 
    252         for failed in results:
    253                 if failed :
    254                         return 1
    255 
    256         return 0
    257 
    258 def yes_no(string):
    259         if string == "yes" :
    260                 return True
    261         if string == "no" :
    262                 return False
    263         raise argparse.ArgumentTypeError(msg)
    264         return False
    265 
    266 
    267 ################################################################################
    268 #               main loop
    269 ################################################################################
    270 abspath = os.path.abspath(__file__)
    271 dname = os.path.dirname(abspath)
    272 os.chdir(dname)
    273 
    274 # create a parser with the arguments for the tests script
    275 parser = argparse.ArgumentParser(description='Script which runs cforall tests')
    276 parser.add_argument('--debug', help='Run all tests in debug or release', type=yes_no, default='no')
    277 parser.add_argument('--concurrent', help='Run concurrent tests', type=yes_no, default='yes')
    278 parser.add_argument('--dry-run', help='Don\'t run the tests, only output the commands', action='store_true')
    279 parser.add_argument('--list', help='List all test available', action='store_true')
    280 parser.add_argument('--all', help='Run all test available', action='store_true')
    281 parser.add_argument('--regenerate-expected', help='Regenerate the .expect by running the specified tets, can be used with --all option', action='store_true')
    282 parser.add_argument('-j', '--jobs', help='Number of tests to run simultaneously', type=int, default='8')
    283 parser.add_argument('--list-comp', help='List all valide arguments', action='store_true')
    284 parser.add_argument('tests', metavar='test', type=str, nargs='*', help='a list of tests to run')
    285 
    286 
    287 # parse the command line arguments
    288 options = parser.parse_args()
    289 do_list = options.list or options.list_comp
    290 
    291 # script must have at least some tests to run
    292 if (len(options.tests) > 0  and     options.all and not do_list) \
    293 or (len(options.tests) == 0 and not options.all and not do_list) :
    294         print('ERROR: must have option \'--all\' or non-empty test list', file=sys.stderr)
    295         parser.print_help()
    296         sys.exit(1)
    297 
    298 # fetch the liest of all valid tests
    299 allTests = listTests( options.concurrent )
    300 
    301 # if user wants all tests than no other treatement of the test list is required
    302 if options.all or do_list :
    303         tests = allTests
    304 
    305 else :
    306         #otherwise we need to validate that the test list that was entered is valid
     69# from the found tests, filter all the valid tests/desired tests
     70def validTests( options ):
    30771        tests = []
    30872
     
    31175        if options.regenerate_expected :
    31276                for testname in options.tests :
    313                         if testname.endswith(".c") or testname.endswith(".cc") or testname.endswith(".cpp") :
     77                        if testname.endswith( (".c", ".cc", ".cpp") ):
    31478                                print('ERROR: "%s", tests are not allowed to end with a C/C++/CFA extension, ignoring it' % testname, file=sys.stderr)
    31579                        else :
     
    33296                sys.exit(1)
    33397
    334 # sort the test alphabetically for convenience
    335 tests.sort(key=lambda t: t.name)
    336 
    337 # users may want to simply list the tests
    338 if options.list_comp :
    339         print("-h --help --debug --concurrent --dry-run --list --all --regenerate-expected -j --jobs ", end='')
    340         print(" ".join(map(lambda t: "%s" % (t.name), tests)))
    341 
    342 elif options.list :
    343         print("\n".join(map(lambda t: "%s (%s)" % (t.name, t.path), tests)))
    344 
    345 else :
     98        return tests
     99
     100# parses the option
     101def getOptions():
     102        # create a parser with the arguments for the tests script
     103        parser = argparse.ArgumentParser(description='Script which runs cforall tests')
     104        parser.add_argument('--debug', help='Run all tests in debug or release', type=yes_no, default='no')
     105        parser.add_argument('--concurrent', help='Run concurrent tests', type=yes_no, default='yes')
     106        parser.add_argument('--dry-run', help='Don\'t run the tests, only output the commands', action='store_true')
     107        parser.add_argument('--list', help='List all test available', action='store_true')
     108        parser.add_argument('--all', help='Run all test available', action='store_true')
     109        parser.add_argument('--regenerate-expected', help='Regenerate the .expect by running the specified tets, can be used with --all option', action='store_true')
     110        parser.add_argument('-j', '--jobs', help='Number of tests to run simultaneously', type=int, default='8')
     111        parser.add_argument('--list-comp', help='List all valide arguments', action='store_true')
     112        parser.add_argument('tests', metavar='test', type=str, nargs='*', help='a list of tests to run')
     113
     114        options =  parser.parse_args()
     115
     116        # script must have at least some tests to run or be listing
     117        listing    = options.list or options.list_comp
     118        all_tests  = options.all
     119        some_tests = len(options.tests) > 0
     120
     121        # check that exactly one of the booleans is set to true
     122        if not sum( (listing, all_tests, some_tests) ) == 1 :
     123                print('ERROR: must have option \'--all\', \'--list\' or non-empty test list', file=sys.stderr)
     124                parser.print_help()
     125                sys.exit(1)
     126
     127        return options
     128
     129def jobCount( options ):
    346130        # check if the user already passed in a number of jobs for multi-threading
    347131        make_flags = environ.get('MAKEFLAGS')
     
    359143                sys.exit(1)
    360144
    361         options.jobs = min( options.jobs, len(tests) )
    362 
    363         print('Running (%s) on %i cores' % ("debug" if options.debug else "no debug", options.jobs))
    364         make_cmd = "make" if make_flags else ("make -j%i" % options.jobs)
    365 
    366         # otherwise run all tests and make sure to return the correct error code
    367         sys.exit( run_tests(tests, options.regenerate_expected, options.dry_run, options.jobs, options.debug) )
     145        return min( options.jobs, len(tests) ), True if make_flags else False
     146
     147################################################################################
     148#               running test functions
     149################################################################################
     150# logic to run a single test and return the result (No handling of printing or other test framework logic)
     151def run_single_test(test, generate, dry_run, debug):
     152
     153        # find the output file based on the test name and options flag
     154        out_file = (".out/%s.log" % test.name) if not generate else (".expect/%s.txt" % test.path)
     155        err_file = ".err/%s.log" % test.name
     156
     157        # remove any outputs from the previous tests to prevent side effects
     158        rm( (out_file, test.name), dry_run )
     159
     160        options = "-debug" if debug else "-nodebug"
     161
     162        # build, skipping to next test on error
     163        make_ret, _ = sh("""%s test=yes EXTRA_FLAGS="-quiet %s" %s 2> %s 1> /dev/null""" % (make_cmd, options, test.name, out_file), dry_run)
     164
     165        # if the make command succeds continue otherwise skip to diff
     166        if make_ret == 0 :
     167                # fetch optional input
     168                stdinput = "< .in/%s.txt" % test.name if isfile(".in/%s.txt" % test.path) else ""
     169
     170                if fileIsExecutable(test.name) :
     171                        # run test
     172                        sh("./%s %s > %s 2>&1" % (test.name, stdinput, out_file), dry_run)
     173                else :
     174                        # simply cat the result into the output
     175                        sh("cat %s > %s" % (test.name, out_file), dry_run)
     176
     177        else :
     178                # command failed save the log to less temporary file
     179                sh("mv %s %s" % (err_file, out_file), dry_run)
     180
     181        retcode = 0
     182        error = None
     183
     184        if generate :
     185                # if we are ounly generating the output we still need to check that the test actually exists
     186                if not dry_run and fileContainsOnly(out_file, "make: *** No rule to make target `%s'.  Stop." % test.name) :
     187                        retcode = 1;
     188                        error = "\t\tNo make target for test %s!" % test.name
     189                        sh("rm %s" % out_file, False)
     190
     191        else :
     192                # fetch return code and error from the diff command
     193                retcode, error = diff(".expect/%s.txt" % test.path, ".out/%s.log" % test.name, dry_run)
     194       
     195        # clean the executable
     196        sh("rm -f %s > /dev/null 2>&1" % test.name, dry_run)
     197
     198        return retcode, error
     199
     200# run a single test and handle the errors, outputs, printing, exception handling, etc.
     201def run_test_worker(t, generate, dry_run, debug) :
     202
     203        signal.signal(signal.SIGINT, signal.SIG_DFL)
     204        # print formated name
     205        name_txt = "%20s  " % t.name
     206
     207        #run the test instance and collect the result
     208        test_failed, error = run_single_test(t, generate, dry_run, debug)
     209
     210        # update output based on current action
     211        if generate :
     212                failed_txt = "ERROR"
     213                success_txt = "Done"
     214        else :
     215                failed_txt = "FAILED"
     216                success_txt = "PASSED"
     217
     218        #print result with error if needed
     219        text = name_txt + (failed_txt if test_failed else success_txt)
     220        out = sys.stdout
     221        if error :
     222                text = text + "\n" + error
     223                out = sys.stderr
     224
     225        print(text, file = out);
     226        sys.stdout.flush()
     227        sys.stderr.flush()
     228        signal.signal(signal.SIGINT, signal.SIG_IGN)
     229
     230        return test_failed
     231
     232# run the given list of tests with the given parameters
     233def run_tests(tests, generate, dry_run, jobs, debug) :
     234        # clean the sandbox from previous commands
     235        sh("%s clean > /dev/null 2>&1" % make_cmd, dry_run)
     236
     237        # make sure the required folder are present
     238        sh('mkdir -p .out .expect .err', dry_run)
     239
     240        if generate :
     241                print( "Regenerate tests for: " )
     242
     243        # create the executor for our jobs and handle the signal properly
     244        original_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_IGN)
     245        pool = Pool(jobs)
     246        signal.signal(signal.SIGINT, original_sigint_handler)
     247
     248        # for each test to run
     249        try :
     250                results = pool.map_async(partial(run_test_worker, generate=generate, dry_run=dry_run, debug=debug), tests ).get(3600)
     251        except KeyboardInterrupt:
     252                pool.terminate()
     253                print("Tests interrupted by user")
     254                sys.exit(1)
     255
     256        # clean the workspace
     257        sh("%s clean > /dev/null 2>&1" % make_cmd, dry_run)
     258
     259        for failed in results:
     260                if failed :
     261                        return 1
     262
     263        return 0
     264
     265
     266################################################################################
     267#               main loop
     268################################################################################
     269if __name__ == "__main__":
     270        #always run from same folder
     271        chdir()
     272       
     273        # parse the command line arguments
     274        options = getOptions()
     275
     276        # fetch the liest of all valid tests
     277        allTests = listTests( options.concurrent )
     278
     279        # if user wants all tests than no other treatement of the test list is required
     280        if options.all or options.list or options.list_comp :
     281                tests = allTests
     282
     283        else :
     284                #otherwise we need to validate that the test list that was entered is valid
     285                tests = validTests( options )
     286
     287        # sort the test alphabetically for convenience
     288        tests.sort(key=lambda t: t.name)
     289
     290        # users may want to simply list the tests
     291        if options.list_comp :
     292                print("-h --help --debug --concurrent --dry-run --list --all --regenerate-expected -j --jobs ", end='')
     293                print(" ".join(map(lambda t: "%s" % (t.name), tests)))
     294
     295        elif options.list :
     296                print("\n".join(map(lambda t: "%s (%s)" % (t.name, t.path), tests)))
     297
     298        else :
     299                options.jobs, forceJobs = jobCount( options )
     300
     301                print('Running (%s) on %i cores' % ("debug" if options.debug else "no debug", options.jobs))
     302                make_cmd = "make" if forceJobs else ("make -j%i" % options.jobs)
     303
     304                # otherwise run all tests and make sure to return the correct error code
     305                sys.exit( run_tests(tests, options.regenerate_expected, options.dry_run, options.jobs, options.debug) )
Note: See TracChangeset for help on using the changeset viewer.