mirror of
https://github.com/dashpay/dash.git
synced 2024-12-26 04:22:55 +01:00
Allow test cases to be skipped
Currently, functional test cases can either pass or fail. There are occasions when it is helpful to skip tests, for example if the system they are running on does not meet the requirements for the test. The rest of the test suite can run without being marked as a failure. This commit adds framework for tests to skip if their requirements aren't met.
This commit is contained in:
parent
02d64bd929
commit
232b6665bc
@ -28,9 +28,12 @@ from .util import (
|
|||||||
)
|
)
|
||||||
from .authproxy import JSONRPCException
|
from .authproxy import JSONRPCException
|
||||||
|
|
||||||
|
|
||||||
class BitcoinTestFramework(object):
|
class BitcoinTestFramework(object):
|
||||||
|
|
||||||
|
TEST_EXIT_PASSED = 0
|
||||||
|
TEST_EXIT_FAILED = 1
|
||||||
|
TEST_EXIT_SKIPPED = 77
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.num_nodes = 4
|
self.num_nodes = 4
|
||||||
self.setup_clean_chain = False
|
self.setup_clean_chain = False
|
||||||
@ -183,11 +186,11 @@ class BitcoinTestFramework(object):
|
|||||||
print("".join(deque(open(f), MAX_LINES_TO_PRINT)))
|
print("".join(deque(open(f), MAX_LINES_TO_PRINT)))
|
||||||
if success:
|
if success:
|
||||||
self.log.info("Tests successful")
|
self.log.info("Tests successful")
|
||||||
sys.exit(0)
|
sys.exit(self.TEST_EXIT_PASSED)
|
||||||
else:
|
else:
|
||||||
self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
|
self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
|
||||||
logging.shutdown()
|
logging.shutdown()
|
||||||
sys.exit(1)
|
sys.exit(self.TEST_EXIT_FAILED)
|
||||||
|
|
||||||
def _start_logging(self):
|
def _start_logging(self):
|
||||||
# Add logger and logging handlers
|
# Add logger and logging handlers
|
||||||
|
@ -24,6 +24,9 @@ import subprocess
|
|||||||
import tempfile
|
import tempfile
|
||||||
import re
|
import re
|
||||||
|
|
||||||
|
TEST_EXIT_PASSED = 0
|
||||||
|
TEST_EXIT_SKIPPED = 77
|
||||||
|
|
||||||
BASE_SCRIPTS= [
|
BASE_SCRIPTS= [
|
||||||
# Scripts that are run by the travis build process.
|
# Scripts that are run by the travis build process.
|
||||||
# Longest test should go first, to favor running tests in parallel
|
# Longest test should go first, to favor running tests in parallel
|
||||||
@ -245,20 +248,20 @@ def run_tests(test_list, src_dir, build_dir, exeext, jobs=1, enable_coverage=Fal
|
|||||||
job_queue = TestHandler(jobs, tests_dir, test_list, flags)
|
job_queue = TestHandler(jobs, tests_dir, test_list, flags)
|
||||||
|
|
||||||
max_len_name = len(max(test_list, key=len))
|
max_len_name = len(max(test_list, key=len))
|
||||||
results = BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "PASSED", "DURATION") + BOLD[0]
|
results = BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "PASSED ", "DURATION") + BOLD[0]
|
||||||
for _ in range(len(test_list)):
|
for _ in range(len(test_list)):
|
||||||
(name, stdout, stderr, passed, duration) = job_queue.get_next()
|
(name, stdout, stderr, status, duration) = job_queue.get_next()
|
||||||
all_passed = all_passed and passed
|
all_passed = all_passed and status != "Failed"
|
||||||
time_sum += duration
|
time_sum += duration
|
||||||
|
|
||||||
print('\n' + BOLD[1] + name + BOLD[0] + ":")
|
print('\n' + BOLD[1] + name + BOLD[0] + ":")
|
||||||
print('' if passed else stdout + '\n', end='')
|
print('' if status == "Passed" else stdout + '\n', end='')
|
||||||
print('' if stderr == '' else 'stderr:\n' + stderr + '\n', end='')
|
print('' if stderr == '' else 'stderr:\n' + stderr + '\n', end='')
|
||||||
print("Pass: %s%s%s, Duration: %s s\n" % (BOLD[1], passed, BOLD[0], duration))
|
print("Status: %s%s%s, Duration: %s s\n" % (BOLD[1], status, BOLD[0], duration))
|
||||||
|
|
||||||
results += "%s | %s | %s s\n" % (name.ljust(max_len_name), str(passed).ljust(6), duration)
|
results += "%s | %s | %s s\n" % (name.ljust(max_len_name), status.ljust(7), duration)
|
||||||
|
|
||||||
results += BOLD[1] + "\n%s | %s | %s s (accumulated)" % ("ALL".ljust(max_len_name), str(all_passed).ljust(6), time_sum) + BOLD[0]
|
results += BOLD[1] + "\n%s | %s | %s s (accumulated)" % ("ALL".ljust(max_len_name), str(all_passed).ljust(7), time_sum) + BOLD[0]
|
||||||
print(results)
|
print(results)
|
||||||
print("\nRuntime: %s s" % (int(time.time() - time0)))
|
print("\nRuntime: %s s" % (int(time.time() - time0)))
|
||||||
|
|
||||||
@ -315,10 +318,15 @@ class TestHandler:
|
|||||||
log_out.seek(0), log_err.seek(0)
|
log_out.seek(0), log_err.seek(0)
|
||||||
[stdout, stderr] = [l.read().decode('utf-8') for l in (log_out, log_err)]
|
[stdout, stderr] = [l.read().decode('utf-8') for l in (log_out, log_err)]
|
||||||
log_out.close(), log_err.close()
|
log_out.close(), log_err.close()
|
||||||
passed = stderr == "" and proc.returncode == 0
|
if proc.returncode == TEST_EXIT_PASSED and stderr == "":
|
||||||
|
status = "Passed"
|
||||||
|
elif proc.returncode == TEST_EXIT_SKIPPED:
|
||||||
|
status = "Skipped"
|
||||||
|
else:
|
||||||
|
status = "Failed"
|
||||||
self.num_running -= 1
|
self.num_running -= 1
|
||||||
self.jobs.remove(j)
|
self.jobs.remove(j)
|
||||||
return name, stdout, stderr, passed, int(time.time() - time0)
|
return name, stdout, stderr, status, int(time.time() - time0)
|
||||||
print('.', end='', flush=True)
|
print('.', end='', flush=True)
|
||||||
|
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user