#!/usr/bin/python
from __future__ import print_function

from functools import partial
from multiprocessing import Pool
from os import listdir, environ
from os.path import isfile, join, splitext
from pybin.tools import *

import argparse
import multiprocessing
import os
import re
import signal
import sys

################################################################################
#               help functions
################################################################################

# Test class that defines what a test is
class Test:
    def __init__(self, name, path):
        self.name, self.path = name, path

class TestResult:
	SUCCESS = 0
	FAILURE = 1
	TIMEOUT = 124

# parses the Makefile to find the machine type (32-bit / 64-bit)
def getMachineType():
	sh('echo "void ?{}(int&a,int b){}int main(){return 0;}" > .dummy.c')
	ret, out = sh("make .dummy -s", print2stdout=True)

	if ret != 0:
		print("Failed to identify architecture:")
		print(out)
		print("Stopping")
		rm( (".dummy.c",".dummy") )
		sys.exit(1)

	_, out = sh("file .dummy", print2stdout=False)
	rm( (".dummy.c",".dummy") )

	return re.search("ELF\s([0-9]+)-bit", out).group(1)

def listTestsFolder(folder) :
	path = ('./.expect/%s/' % folder) if folder else './.expect/'
	subpath = "%s/" % folder if folder else ""

	# tests directly in the .expect folder will always be processed
	return map(lambda fname: Test(fname, subpath + fname),
		[splitext(f)[0] for f in listdir( path )
		if not f.startswith('.') and f.endswith('.txt')
		])

# reads the directory ./.expect and indentifies the tests
def listTests( concurrent ):
	machineType = getMachineType()

	# tests directly in the .expect folder will always be processed
	generic_list = listTestsFolder( "" )

	# tests in the machineType folder will be ran only for the corresponding compiler
	typed_list = listTestsFolder( machineType )

	# tests in the concurrent folder will be ran only if concurrency is enabled
	concurrent_list = listTestsFolder( "concurrent" ) if concurrent else []

	# append both lists to get
	return generic_list + typed_list + concurrent_list;

# from the found tests, filter all the valid tests/desired tests
def validTests( options ):
	tests = []

	# if we are regenerating the tests we need to find the information of the
	# already existing tests and create new info for the new tests
	if options.regenerate_expected :
		for testname in options.tests :
			if testname.endswith( (".c", ".cc", ".cpp") ):
				print('ERROR: "%s", tests are not allowed to end with a C/C++/CFA extension, ignoring it' % testname, file=sys.stderr)
			else :
				found = [test for test in allTests if test.name == testname]
				tests.append( found[0] if len(found) == 1 else Test(testname, testname) )

	else :
		# otherwise we only need to validate that all tests are present in the complete list
		for testname in options.tests:
			test = [t for t in allTests if t.name == testname]

			if len(test) != 0 :
				tests.append( test[0] )
			else :
				print('ERROR: No expected file for test %s, ignoring it' % testname, file=sys.stderr)

	# make sure we have at least some test to run
	if len(tests) == 0 :
		print('ERROR: No valid test to run', file=sys.stderr)
		sys.exit(1)

	return tests

# parses the option
def getOptions():
	# create a parser with the arguments for the tests script
	parser = argparse.ArgumentParser(description='Script which runs cforall tests')
	parser.add_argument('--debug', help='Run all tests in debug or release', type=yes_no, default='no')
	parser.add_argument('--concurrent', help='Run concurrent tests', type=yes_no, default='yes')
	parser.add_argument('--dry-run', help='Don\'t run the tests, only output the commands', action='store_true')
	parser.add_argument('--list', help='List all test available', action='store_true')
	parser.add_argument('--all', help='Run all test available', action='store_true')
	parser.add_argument('--regenerate-expected', help='Regenerate the .expect by running the specified tets, can be used with --all option', action='store_true')
	parser.add_argument('-j', '--jobs', help='Number of tests to run simultaneously', type=int, default='8')
	parser.add_argument('--list-comp', help='List all valide arguments', action='store_true')
	parser.add_argument('tests', metavar='test', type=str, nargs='*', help='a list of tests to run')

	options =  parser.parse_args()

	# script must have at least some tests to run or be listing
	listing    = options.list or options.list_comp
	all_tests  = options.all
	some_tests = len(options.tests) > 0

	# check that exactly one of the booleans is set to true
	if not sum( (listing, all_tests, some_tests) ) == 1 :
		print('ERROR: must have option \'--all\', \'--list\' or non-empty test list', file=sys.stderr)
		parser.print_help()
		sys.exit(1)

	return options

def jobCount( options ):
	# check if the user already passed in a number of jobs for multi-threading
	make_flags = environ.get('MAKEFLAGS')
	make_jobs_fds = re.search("--jobserver-(auth|fds)=\s*([0-9]+),([0-9]+)", make_flags) if make_flags else None
	if make_jobs_fds :
		tokens = os.read(int(make_jobs_fds.group(2)), 1024)
		options.jobs = len(tokens)
		os.write(int(make_jobs_fds.group(3)), tokens)
	else :
		options.jobs = multiprocessing.cpu_count()

	# make sure we have a valid number of jobs that corresponds to user input
	if options.jobs <= 0 :
		print('ERROR: Invalid number of jobs', file=sys.stderr)
		sys.exit(1)

	return min( options.jobs, len(tests) ), True if make_flags else False

################################################################################
#               running test functions
################################################################################
# logic to run a single test and return the result (No handling of printing or other test framework logic)
def run_single_test(test, generate, dry_run, debug):

	# find the output file based on the test name and options flag
	out_file = (".out/%s.log" % test.name) if not generate else (".expect/%s.txt" % test.path)
	err_file = ".err/%s.log" % test.name

	# remove any outputs from the previous tests to prevent side effects
	rm( (out_file, err_file, test.name), dry_run )

	options = "-debug" if debug else "-nodebug"

	# build, skipping to next test on error
	make_ret, _ = sh("""%s test=yes DEBUG_FLAGS="%s" %s 2> %s 1> /dev/null""" % (make_cmd, options, test.name, out_file), dry_run)

	retcode = 0
	error = None

	# if the make command succeds continue otherwise skip to diff
	if make_ret == 0 :
		# fetch optional input
		stdinput = "< .in/%s.txt" % test.name if isfile(".in/%s.txt" % test.name) else ""

		if fileIsExecutable(test.name) :
			# run test
			retcode, _ = sh("timeout 60 ./%s %s > %s 2>&1" % (test.name, stdinput, out_file), dry_run)
		else :
			# simply cat the result into the output
			sh("cat %s > %s" % (test.name, out_file), dry_run)

	else :
		# command failed save the log to less temporary file
		sh("mv %s %s" % (err_file, out_file), dry_run)

	if retcode == 0:
		if generate :
			# if we are ounly generating the output we still need to check that the test actually exists
			if not dry_run and fileContainsOnly(out_file, "make: *** No rule to make target `%s'.  Stop." % test.name) :
				retcode = 1;
				error = "\t\tNo make target for test %s!" % test.name
				sh("rm %s" % out_file, False)
		else :
			# fetch return code and error from the diff command
			retcode, error = diff(".expect/%s.txt" % test.path, ".out/%s.log" % test.name, dry_run)

	else:
		with open (out_file, "r") as myfile:
			error = myfile.read()


	# clean the executable
	sh("rm -f %s > /dev/null 2>&1" % test.name, dry_run)

	return retcode, error

# run a single test and handle the errors, outputs, printing, exception handling, etc.
def run_test_worker(t, generate, dry_run, debug) :

	signal.signal(signal.SIGINT, signal.SIG_DFL)
	# print formated name
	name_txt = "%20s  " % t.name

	retcode, error = run_single_test(t, generate, dry_run, debug)

	# update output based on current action
	if generate :
		if   retcode == TestResult.SUCCESS: 	result_txt = "Done"
		elif retcode == TestResult.TIMEOUT: 	result_txt = "TIMEOUT"
		else :						result_txt = "ERROR code %d" % retcode
	else :
		if   retcode == TestResult.SUCCESS: 	result_txt = "PASSED"
		elif retcode == TestResult.TIMEOUT: 	result_txt = "TIMEOUT"
		else :						result_txt = "FAILED with code %d" % retcode

	#print result with error if needed
	text = name_txt + result_txt
	out = sys.stdout
	if error :
		text = text + "\n" + error
		out = sys.stderr

	print(text, file = out)
	sys.stdout.flush()
	sys.stderr.flush()
	signal.signal(signal.SIGINT, signal.SIG_IGN)

	return retcode != TestResult.SUCCESS

# run the given list of tests with the given parameters
def run_tests(tests, generate, dry_run, jobs, debug) :
	# clean the sandbox from previous commands
	sh("%s clean > /dev/null 2>&1" % make_cmd, dry_run)

	# make sure the required folder are present
	sh('mkdir -p .out .expect .err', dry_run)

	if generate :
		print( "Regenerate tests for: " )

	# create the executor for our jobs and handle the signal properly
	original_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_IGN)
	pool = Pool(jobs)
	signal.signal(signal.SIGINT, original_sigint_handler)

	# for each test to run
	try :
		results = pool.map_async(partial(run_test_worker, generate=generate, dry_run=dry_run, debug=debug), tests, chunksize = 1 ).get(7200)
	except KeyboardInterrupt:
		pool.terminate()
		print("Tests interrupted by user")
		sys.exit(1)

	# clean the workspace
	sh("%s clean > /dev/null 2>&1" % make_cmd, dry_run)

	for failed in results:
		if failed :
			return 1

	return 0


################################################################################
#               main loop
################################################################################
if __name__ == "__main__":
	#always run from same folder
	chdir()

	# parse the command line arguments
	options = getOptions()

	# fetch the liest of all valid tests
	allTests = listTests( options.concurrent )

	# if user wants all tests than no other treatement of the test list is required
	if options.all or options.list or options.list_comp :
		tests = allTests

	else :
		#otherwise we need to validate that the test list that was entered is valid
		tests = validTests( options )

	# sort the test alphabetically for convenience
	tests.sort(key=lambda t: t.name)

	# users may want to simply list the tests
	if options.list_comp :
		print("-h --help --debug --concurrent --dry-run --list --all --regenerate-expected -j --jobs ", end='')
		print(" ".join(map(lambda t: "%s" % (t.name), tests)))

	elif options.list :
		print("\n".join(map(lambda t: "%s (%s)" % (t.name, t.path), tests)))

	else :
		options.jobs, forceJobs = jobCount( options )

		print('Running (%s) on %i cores' % ("debug" if options.debug else "no debug", options.jobs))
		make_cmd = "make" if forceJobs else ("make -j%i" % options.jobs)

		# otherwise run all tests and make sure to return the correct error code
		sys.exit( run_tests(tests, options.regenerate_expected, options.dry_run, options.jobs, options.debug) )
