mirror of
https://github.com/dashpay/dash.git
synced 2024-12-26 20:42:59 +01:00
77f106ebf5
5db506ba5943868cc2c845f717508739b7f05714 tests: Add option --valgrind to run nodes under valgrind in the functional tests (practicalswift) Pull request description: What is better than fixing bugs? Fixing entire bug classes of course! :) Add option `--valgrind` to run the functional tests under Valgrind. Regular functional testing under Valgrind would have caught many of the uninitialized reads we've seen historically. Let's kill this bug class once and for all: let's never use an uninitialized value ever again. Or at least not one that would be triggered by running the functional tests! :) My hope is that this addition will make it super-easy to run the functional tests under Valgrind and thus increase the probability of people making use of it :) Hopefully `test/functional/test_runner.py --valgrind` will become a natural part of the pre-release QA process. **Usage:** ``` $ test/functional/test_runner.py --help … --valgrind run nodes under the valgrind memory error detector: expect at least a ~10x slowdown, valgrind 3.14 or later required ``` **Live demo:** First, let's re-introduce a memory bug by reverting the recent P2P uninitialized read bug fix from PR #17624 ("net: Fix an uninitialized read in ProcessMessage(…, "tx", …) when receiving a transaction we already have"). ``` $ git diff diff --git a/src/consensus/validation.h b/src/consensus/validation.h index 3401eb64c..940adea33 100644 --- a/src/consensus/validation.h +++ b/src/consensus/validation.h @@ -114,7 +114,7 @@ inline ValidationState::~ValidationState() {}; class TxValidationState : public ValidationState { private: - TxValidationResult m_result = TxValidationResult::TX_RESULT_UNSET; + TxValidationResult m_result; public: bool Invalid(TxValidationResult result, const std::string &reject_reason="", ``` Second, let's test as normal without Valgrind: ``` $ test/functional/p2p_segwit.py -l INFO 2019-11-28T09:30:42.810000Z TestFramework (INFO): Initializing test directory /tmp/bitcoin_func_test__fc8q3qo … 2019-11-28T09:31:57.187000Z TestFramework (INFO): Subtest: test_non_standard_witness_blinding (Segwit active = True) … 2019-11-28T09:32:08.265000Z TestFramework (INFO): Tests successful ``` Third, let's test with `--valgrind` and see if the test fail (as we expect) when the unitialized value is used: ``` $ test/functional/p2p_segwit.py -l INFO --valgrind 2019-11-28T09:32:33.018000Z TestFramework (INFO): Initializing test directory /tmp/bitcoin_func_test_gtjecx2l … 2019-11-28T09:40:36.702000Z TestFramework (INFO): Subtest: test_non_standard_witness_blinding (Segwit active = True) 2019-11-28T09:40:37.813000Z TestFramework (ERROR): Assertion failed ConnectionRefusedError: [Errno 111] Connection refused ``` ACKs for top commit: MarcoFalke: ACK 5db506ba5943868cc2c845f717508739b7f05714 jonatack: ACK 5db506ba5943868cc2c845f717508739b7f05714 Tree-SHA512: 2eaecacf4da166febad88b2a8ee6d7ac2bcd38d4c1892ca39516b6343e8f8c8814edf5eaf14c90f11a069a0389d24f0713076112ac284de987e72fc5f6cc3795
614 lines
25 KiB
Python
Executable File
614 lines
25 KiB
Python
Executable File
#!/usr/bin/env python3
|
|
# Copyright (c) 2017 The Bitcoin Core developers
|
|
# Distributed under the MIT software license, see the accompanying
|
|
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
|
"""Class for dashd node under test"""
|
|
|
|
import contextlib
|
|
import decimal
|
|
import errno
|
|
from enum import Enum
|
|
import http.client
|
|
import json
|
|
import logging
|
|
import os.path
|
|
import re
|
|
import subprocess
|
|
import tempfile
|
|
import time
|
|
import urllib.parse
|
|
import shlex
|
|
import sys
|
|
import collections
|
|
|
|
from .authproxy import JSONRPCException
|
|
from .util import (
|
|
append_config,
|
|
delete_cookie_file,
|
|
get_rpc_proxy,
|
|
rpc_url,
|
|
wait_until,
|
|
p2p_port,
|
|
get_chain_folder,
|
|
Options
|
|
)
|
|
|
|
BITCOIND_PROC_WAIT_TIMEOUT = 60
|
|
|
|
|
|
class FailedToStartError(Exception):
|
|
"""Raised when a node fails to start correctly."""
|
|
|
|
|
|
class ErrorMatch(Enum):
|
|
FULL_TEXT = 1
|
|
FULL_REGEX = 2
|
|
PARTIAL_REGEX = 3
|
|
|
|
|
|
class TestNode():
|
|
"""A class for representing a dashd node under test.
|
|
|
|
This class contains:
|
|
|
|
- state about the node (whether it's running, etc)
|
|
- a Python subprocess.Popen object representing the running process
|
|
- an RPC connection to the node
|
|
- one or more P2P connections to the node
|
|
|
|
|
|
To make things easier for the test writer, any unrecognised messages will
|
|
be dispatched to the RPC connection."""
|
|
|
|
def __init__(self, i, datadir, extra_args_from_options, *, chain, rpchost, timewait, bitcoind, bitcoin_cli, mocktime, coverage_dir, cwd, extra_conf=None, extra_args=None, use_cli=False, start_perf=False, use_valgrind=False):
|
|
"""
|
|
Kwargs:
|
|
start_perf (bool): If True, begin profiling the node with `perf` as soon as
|
|
the node starts.
|
|
"""
|
|
|
|
self.index = i
|
|
self.datadir = datadir
|
|
self.chain = chain
|
|
self.bitcoinconf = os.path.join(self.datadir, "dash.conf")
|
|
self.stdout_dir = os.path.join(self.datadir, "stdout")
|
|
self.stderr_dir = os.path.join(self.datadir, "stderr")
|
|
self.rpchost = rpchost
|
|
self.rpc_timeout = timewait
|
|
self.rpc_timeout *= Options.timeout_scale
|
|
self.binary = bitcoind
|
|
self.coverage_dir = coverage_dir
|
|
self.cwd = cwd
|
|
self.mocktime = mocktime
|
|
if extra_conf is not None:
|
|
append_config(datadir, extra_conf)
|
|
# Most callers will just need to add extra args to the standard list below.
|
|
# For those callers that need more flexibity, they can just set the args property directly.
|
|
# Note that common args are set in the config file (see initialize_datadir)
|
|
self.extra_args = extra_args
|
|
self.extra_args_from_options = extra_args_from_options
|
|
# Configuration for logging is set as command-line args rather than in the bitcoin.conf file.
|
|
# This means that starting a bitcoind using the temp dir to debug a failed test won't
|
|
# spam debug.log.
|
|
self.args = [
|
|
self.binary,
|
|
"-datadir=" + self.datadir,
|
|
"-logtimemicros",
|
|
"-logthreadnames",
|
|
"-debug",
|
|
"-debugexclude=libevent",
|
|
"-debugexclude=leveldb",
|
|
"-mocktime=" + str(mocktime),
|
|
"-uacomment=testnode%d" % i
|
|
]
|
|
if use_valgrind:
|
|
default_suppressions_file = os.path.join(
|
|
os.path.dirname(os.path.realpath(__file__)),
|
|
"..", "..", "..", "contrib", "valgrind.supp")
|
|
suppressions_file = os.getenv("VALGRIND_SUPPRESSIONS_FILE",
|
|
default_suppressions_file)
|
|
self.args = ["valgrind", "--suppressions={}".format(suppressions_file),
|
|
"--gen-suppressions=all", "--exit-on-first-error=yes",
|
|
"--error-exitcode=1", "--quiet"] + self.args
|
|
|
|
self.cli = TestNodeCLI(bitcoin_cli, self.datadir)
|
|
self.use_cli = use_cli
|
|
self.start_perf = start_perf
|
|
|
|
# Don't try auto backups (they fail a lot when running tests)
|
|
self.args.append("-createwalletbackups=0")
|
|
|
|
self.running = False
|
|
self.process = None
|
|
self.rpc_connected = False
|
|
self.rpc = None
|
|
self.url = None
|
|
self.log = logging.getLogger('TestFramework.node%d' % i)
|
|
self.cleanup_on_exit = True # Whether to kill the node when this object goes away
|
|
# Cache perf subprocesses here by their data output filename.
|
|
self.perf_subprocesses = {}
|
|
|
|
self.p2ps = []
|
|
|
|
def get_deterministic_priv_key(self):
|
|
"""Return a deterministic priv key in base58, that only depends on the node's index"""
|
|
AddressKeyPair = collections.namedtuple('AddressKeyPair', ['address', 'key'])
|
|
PRIV_KEYS = [
|
|
# address , privkey
|
|
AddressKeyPair('yYdShjQSptFKitYLksFEUSwHe4hnbar5rf', 'cMfbiEsnG5b8Gwm6vEgfWvZLuXZNC4zsN2y7Es3An9xHRWRjmwgR'),
|
|
AddressKeyPair('yfTFJgvq65UZsb9RBbpdYAAzsJoCGXqH2w', 'cStuFACUD1N6JjKQxNLUQ443qJUtSzLitKKEkA8x6utxTPZTLUtA'),
|
|
AddressKeyPair('yU3w4VDjKhHiZpWszkUZVnFTS56AfgdfPV', 'cQb5yh2sTiG7dsxxbXHhWSBLMByYT7jY49A1kC7zKhgL9WNHysWW'),
|
|
AddressKeyPair('yYhzix2R5LiYnDixsUnF8XwBYGYpyeTgB4', 'cW9Gu6uU4KoZJQcdyUvjULNRg4C8srPJw1adhgdTZMr9YQdKHtcn'),
|
|
AddressKeyPair('yiQ3qLx5L1BW9XA6JAG7hC8UQDktcBCeYG', 'cSq7gHVC1QPsswyX2pE5C38UnWZXfCLr7XnkjnDwuZ68NkWp183T'),
|
|
AddressKeyPair('yUL8h8mR7aNDRsU5zhcDbpp6YtA6ieUtK2', 'cTk7hiDKgxZX3JSb37vywdYYjjJows4DQjEaxBJDGF6LC6GXvPKo'),
|
|
AddressKeyPair('yfy21e12jn3A3uDicNehCq486o9fMwJKMc', 'cMuko9rLDbtxCFWuBSrFgBDRSMxsLWKpJKScRGNuWKbhuQsnsjKT'),
|
|
AddressKeyPair('yURgENB3b2YRMWnbhKF7iGs3KoaVRVXsJr', 'cQhdjTMh57MaHCDk9FsWGPtftRMBUuhaYAtouWnetcewmBuSrLSM'),
|
|
AddressKeyPair('yYC9AxBEUs3ZZxfcQvj2LUF5PVxxtqaEs7', 'cQFueiiP13mfytV3Svoe4o4Ux79fRJvwuSgHapXsnBwrHod57EeL'),
|
|
]
|
|
return PRIV_KEYS[self.index]
|
|
|
|
def get_mem_rss_kilobytes(self):
|
|
"""Get the memory usage (RSS) per `ps`.
|
|
|
|
If process is stopped or `ps` is unavailable, return None.
|
|
"""
|
|
if not (self.running and self.process):
|
|
self.log.warning("Couldn't get memory usage; process isn't running.")
|
|
return None
|
|
|
|
try:
|
|
return int(subprocess.check_output(
|
|
"ps h -o rss {}".format(self.process.pid),
|
|
shell=True, stderr=subprocess.DEVNULL).strip())
|
|
|
|
# Catching `Exception` broadly to avoid failing on platforms where ps
|
|
# isn't installed or doesn't work as expected, e.g. OpenBSD.
|
|
#
|
|
# We could later use something like `psutils` to work across platforms.
|
|
except Exception:
|
|
self.log.exception("Unable to get memory usage")
|
|
return None
|
|
|
|
def _node_msg(self, msg: str) -> str:
|
|
"""Return a modified msg that identifies this node by its index as a debugging aid."""
|
|
return "[node %d] %s" % (self.index, msg)
|
|
|
|
def _raise_assertion_error(self, msg: str):
|
|
"""Raise an AssertionError with msg modified to identify this node."""
|
|
raise AssertionError(self._node_msg(msg))
|
|
|
|
def __del__(self):
|
|
# Ensure that we don't leave any dashd processes lying around after
|
|
# the test ends
|
|
if self.process and self.cleanup_on_exit:
|
|
# Should only happen on test failure
|
|
# Avoid using logger, as that may have already been shutdown when
|
|
# this destructor is called.
|
|
print(self._node_msg("Cleaning up leftover process"))
|
|
self.process.kill()
|
|
|
|
def __getattr__(self, name):
|
|
"""Dispatches any unrecognised messages to the RPC connection or a CLI instance."""
|
|
if self.use_cli:
|
|
return getattr(self.cli, name)
|
|
else:
|
|
assert self.rpc_connected and self.rpc is not None, self._node_msg("Error: no RPC connection")
|
|
return getattr(self.rpc, name)
|
|
|
|
def start(self, extra_args=None, *, cwd=None, stdout=None, stderr=None, **kwargs):
|
|
"""Start the node."""
|
|
if extra_args is None:
|
|
extra_args = self.extra_args
|
|
|
|
# Add a new stdout and stderr file each time dashd is started
|
|
if stderr is None:
|
|
stderr = tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False)
|
|
if stdout is None:
|
|
stdout = tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False)
|
|
self.stderr = stderr
|
|
self.stdout = stdout
|
|
|
|
if cwd is None:
|
|
cwd = self.cwd
|
|
|
|
all_args = self.args + self.extra_args_from_options + extra_args
|
|
if self.mocktime != 0:
|
|
all_args = all_args + ["-mocktime=%d" % self.mocktime]
|
|
|
|
# Delete any existing cookie file -- if such a file exists (eg due to
|
|
# unclean shutdown), it will get overwritten anyway by dashd, and
|
|
# potentially interfere with our attempt to authenticate
|
|
delete_cookie_file(self.datadir, self.chain)
|
|
|
|
# add environment variable LIBC_FATAL_STDERR_=1 so that libc errors are written to stderr and not the terminal
|
|
subp_env = dict(os.environ, LIBC_FATAL_STDERR_="1")
|
|
|
|
self.process = subprocess.Popen(all_args, env=subp_env, stdout=stdout, stderr=stderr, cwd=cwd, **kwargs)
|
|
|
|
self.running = True
|
|
self.log.debug("dashd started, waiting for RPC to come up")
|
|
|
|
if self.start_perf:
|
|
self._start_perf()
|
|
|
|
def wait_for_rpc_connection(self):
|
|
"""Sets up an RPC connection to the dashd process. Returns False if unable to connect."""
|
|
# Poll at a rate of four times per second
|
|
poll_per_s = 4
|
|
for _ in range(poll_per_s * self.rpc_timeout):
|
|
if self.process.poll() is not None:
|
|
raise FailedToStartError(self._node_msg(
|
|
'dashd exited with status {} during initialization'.format(self.process.returncode)))
|
|
try:
|
|
rpc = get_rpc_proxy(rpc_url(self.datadir, self.index, self.chain, self.rpchost), self.index, timeout=self.rpc_timeout, coveragedir=self.coverage_dir)
|
|
rpc.getblockcount()
|
|
# If the call to getblockcount() succeeds then the RPC connection is up
|
|
self.log.debug("RPC successfully started")
|
|
if self.use_cli:
|
|
return
|
|
self.rpc = rpc
|
|
self.rpc_connected = True
|
|
self.url = self.rpc.url
|
|
return
|
|
except IOError as e:
|
|
if e.errno != errno.ECONNREFUSED: # Port not yet open?
|
|
raise # unknown IO error
|
|
except JSONRPCException as e: # Initialization phase
|
|
# -28 RPC in warmup
|
|
# -342 Service unavailable, RPC server started but is shutting down due to error
|
|
if e.error['code'] != -28 and e.error['code'] != -342:
|
|
raise # unknown JSON RPC exception
|
|
except ValueError as e: # cookie file not found and no rpcuser or rpcassword. dashd still starting
|
|
if "No RPC credentials" not in str(e):
|
|
raise
|
|
time.sleep(1.0 / poll_per_s)
|
|
self._raise_assertion_error("Unable to connect to dashd")
|
|
|
|
def get_wallet_rpc(self, wallet_name):
|
|
if self.use_cli:
|
|
return self.cli("-rpcwallet={}".format(wallet_name))
|
|
else:
|
|
assert self.rpc_connected and self.rpc, self._node_msg("RPC not connected")
|
|
wallet_path = "wallet/{}".format(urllib.parse.quote(wallet_name))
|
|
return self.rpc / wallet_path
|
|
|
|
def stop_node(self, expected_stderr='', wait=0):
|
|
"""Stop the node."""
|
|
if not self.running:
|
|
return
|
|
self.log.debug("Stopping node")
|
|
try:
|
|
self.stop(wait=wait)
|
|
except http.client.CannotSendRequest:
|
|
self.log.exception("Unable to stop node.")
|
|
|
|
# If there are any running perf processes, stop them.
|
|
for profile_name in tuple(self.perf_subprocesses.keys()):
|
|
self._stop_perf(profile_name)
|
|
|
|
# Check that stderr is as expected
|
|
self.stderr.seek(0)
|
|
stderr = self.stderr.read().decode('utf-8').strip()
|
|
if stderr != expected_stderr:
|
|
raise AssertionError("Unexpected stderr {} != {}".format(stderr, expected_stderr))
|
|
|
|
self.stdout.close()
|
|
self.stderr.close()
|
|
|
|
del self.p2ps[:]
|
|
|
|
def is_node_stopped(self):
|
|
"""Checks whether the node has stopped.
|
|
|
|
Returns True if the node has stopped. False otherwise.
|
|
This method is responsible for freeing resources (self.process)."""
|
|
if not self.running:
|
|
return True
|
|
return_code = self.process.poll()
|
|
if return_code is None:
|
|
return False
|
|
|
|
# process has stopped. Assert that it didn't return an error code.
|
|
assert return_code == 0, self._node_msg(
|
|
"Node returned non-zero exit code (%d) when stopping" % return_code)
|
|
self.running = False
|
|
self.process = None
|
|
self.rpc_connected = False
|
|
self.rpc = None
|
|
self.log.debug("Node stopped")
|
|
return True
|
|
|
|
def wait_until_stopped(self, timeout=BITCOIND_PROC_WAIT_TIMEOUT):
|
|
wait_until(self.is_node_stopped, timeout=timeout)
|
|
|
|
@contextlib.contextmanager
|
|
def assert_debug_log(self, expected_msgs, timeout=2):
|
|
time_end = time.time() + timeout
|
|
chain = get_chain_folder(self.datadir, self.chain)
|
|
debug_log = os.path.join(self.datadir, chain, 'debug.log')
|
|
with open(debug_log, encoding='utf-8') as dl:
|
|
dl.seek(0, 2)
|
|
prev_size = dl.tell()
|
|
|
|
yield
|
|
|
|
while True:
|
|
found = True
|
|
with open(debug_log, encoding='utf-8') as dl:
|
|
dl.seek(prev_size)
|
|
log = dl.read()
|
|
print_log = " - " + "\n - ".join(log.splitlines())
|
|
for expected_msg in expected_msgs:
|
|
if re.search(re.escape(expected_msg), log, flags=re.MULTILINE) is None:
|
|
found = False
|
|
if found:
|
|
return
|
|
if time.time() >= time_end:
|
|
break
|
|
time.sleep(0.05)
|
|
self._raise_assertion_error('Expected messages "{}" does not partially match log:\n\n{}\n\n'.format(str(expected_msgs), print_log))
|
|
|
|
@contextlib.contextmanager
|
|
def assert_memory_usage_stable(self, *, increase_allowed=0.03):
|
|
"""Context manager that allows the user to assert that a node's memory usage (RSS)
|
|
hasn't increased beyond some threshold percentage.
|
|
|
|
Args:
|
|
increase_allowed (float): the fractional increase in memory allowed until failure;
|
|
e.g. `0.12` for up to 12% increase allowed.
|
|
"""
|
|
before_memory_usage = self.get_mem_rss_kilobytes()
|
|
|
|
yield
|
|
|
|
after_memory_usage = self.get_mem_rss_kilobytes()
|
|
|
|
if not (before_memory_usage and after_memory_usage):
|
|
self.log.warning("Unable to detect memory usage (RSS) - skipping memory check.")
|
|
return
|
|
|
|
perc_increase_memory_usage = 1 - (float(before_memory_usage) / after_memory_usage)
|
|
|
|
if perc_increase_memory_usage > increase_allowed:
|
|
self._raise_assertion_error(
|
|
"Memory usage increased over threshold of {:.3f}% from {} to {} ({:.3f}%)".format(
|
|
increase_allowed * 100, before_memory_usage, after_memory_usage,
|
|
perc_increase_memory_usage * 100))
|
|
|
|
@contextlib.contextmanager
|
|
def profile_with_perf(self, profile_name):
|
|
"""
|
|
Context manager that allows easy profiling of node activity using `perf`.
|
|
|
|
See `test/functional/README.md` for details on perf usage.
|
|
|
|
Args:
|
|
profile_name (str): This string will be appended to the
|
|
profile data filename generated by perf.
|
|
"""
|
|
subp = self._start_perf(profile_name)
|
|
|
|
yield
|
|
|
|
if subp:
|
|
self._stop_perf(profile_name)
|
|
|
|
def _start_perf(self, profile_name=None):
|
|
"""Start a perf process to profile this node.
|
|
|
|
Returns the subprocess running perf."""
|
|
subp = None
|
|
|
|
def test_success(cmd):
|
|
return subprocess.call(
|
|
# shell=True required for pipe use below
|
|
cmd, shell=True,
|
|
stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL) == 0
|
|
|
|
if not sys.platform.startswith('linux'):
|
|
self.log.warning("Can't profile with perf; only available on Linux platforms")
|
|
return None
|
|
|
|
if not test_success('which perf'):
|
|
self.log.warning("Can't profile with perf; must install perf-tools")
|
|
return None
|
|
|
|
if not test_success('readelf -S {} | grep .debug_str'.format(shlex.quote(self.binary))):
|
|
self.log.warning(
|
|
"perf output won't be very useful without debug symbols compiled into bitcoind")
|
|
|
|
output_path = tempfile.NamedTemporaryFile(
|
|
dir=self.datadir,
|
|
prefix="{}.perf.data.".format(profile_name or 'test'),
|
|
delete=False,
|
|
).name
|
|
|
|
cmd = [
|
|
'perf', 'record',
|
|
'-g', # Record the callgraph.
|
|
'--call-graph', 'dwarf', # Compatibility for gcc's --fomit-frame-pointer.
|
|
'-F', '101', # Sampling frequency in Hz.
|
|
'-p', str(self.process.pid),
|
|
'-o', output_path,
|
|
]
|
|
subp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
|
self.perf_subprocesses[profile_name] = subp
|
|
|
|
return subp
|
|
|
|
def _stop_perf(self, profile_name):
|
|
"""Stop (and pop) a perf subprocess."""
|
|
subp = self.perf_subprocesses.pop(profile_name)
|
|
output_path = subp.args[subp.args.index('-o') + 1]
|
|
|
|
subp.terminate()
|
|
subp.wait(timeout=10)
|
|
|
|
stderr = subp.stderr.read().decode()
|
|
if 'Consider tweaking /proc/sys/kernel/perf_event_paranoid' in stderr:
|
|
self.log.warning(
|
|
"perf couldn't collect data! Try "
|
|
"'sudo sysctl -w kernel.perf_event_paranoid=-1'")
|
|
else:
|
|
report_cmd = "perf report -i {}".format(output_path)
|
|
self.log.info("See perf output by running '{}'".format(report_cmd))
|
|
|
|
def assert_start_raises_init_error(self, extra_args=None, expected_msg=None, match=ErrorMatch.FULL_TEXT, *args, **kwargs):
|
|
"""Attempt to start the node and expect it to raise an error.
|
|
|
|
extra_args: extra arguments to pass through to dashd
|
|
expected_msg: regex that stderr should match when dashd fails
|
|
|
|
Will throw if dashd starts without an error.
|
|
Will throw if an expected_msg is provided and it does not match dashd's stdout."""
|
|
with tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False) as log_stderr, \
|
|
tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False) as log_stdout:
|
|
try:
|
|
self.start(extra_args, stdout=log_stdout, stderr=log_stderr, *args, **kwargs)
|
|
self.wait_for_rpc_connection()
|
|
self.stop_node()
|
|
self.wait_until_stopped()
|
|
except FailedToStartError as e:
|
|
self.log.debug('dashd failed to start: %s', e)
|
|
self.running = False
|
|
self.process = None
|
|
# Check stderr for expected message
|
|
if expected_msg is not None:
|
|
log_stderr.seek(0)
|
|
stderr = log_stderr.read().decode('utf-8').strip()
|
|
if match == ErrorMatch.PARTIAL_REGEX:
|
|
if re.search(expected_msg, stderr, flags=re.MULTILINE) is None:
|
|
self._raise_assertion_error(
|
|
'Expected message "{}" does not partially match stderr:\n"{}"'.format(expected_msg, stderr))
|
|
elif match == ErrorMatch.FULL_REGEX:
|
|
if re.fullmatch(expected_msg, stderr) is None:
|
|
self._raise_assertion_error(
|
|
'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr))
|
|
elif match == ErrorMatch.FULL_TEXT:
|
|
if expected_msg != stderr:
|
|
self._raise_assertion_error(
|
|
'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr))
|
|
else:
|
|
if expected_msg is None:
|
|
assert_msg = "dashd should have exited with an error"
|
|
else:
|
|
assert_msg = "dashd should have exited with expected error " + expected_msg
|
|
self._raise_assertion_error(assert_msg)
|
|
|
|
def add_p2p_connection(self, p2p_conn, *, wait_for_verack=True, **kwargs):
|
|
"""Add a p2p connection to the node.
|
|
|
|
This method adds the p2p connection to the self.p2ps list and also
|
|
returns the connection to the caller."""
|
|
if 'dstport' not in kwargs:
|
|
kwargs['dstport'] = p2p_port(self.index)
|
|
if 'dstaddr' not in kwargs:
|
|
kwargs['dstaddr'] = '127.0.0.1'
|
|
|
|
p2p_conn.peer_connect(**kwargs, net=self.chain)()
|
|
self.p2ps.append(p2p_conn)
|
|
if wait_for_verack:
|
|
p2p_conn.wait_for_verack()
|
|
|
|
return p2p_conn
|
|
|
|
@property
|
|
def p2p(self):
|
|
"""Return the first p2p connection
|
|
|
|
Convenience property - most tests only use a single p2p connection to each
|
|
node, so this saves having to write node.p2ps[0] many times."""
|
|
assert self.p2ps, self._node_msg("No p2p connection")
|
|
return self.p2ps[0]
|
|
|
|
def disconnect_p2ps(self):
|
|
"""Close all p2p connections to the node."""
|
|
for p in self.p2ps:
|
|
p.peer_disconnect()
|
|
|
|
# wait for p2p connections to disappear from getpeerinfo()
|
|
def check_peers():
|
|
for p in self.getpeerinfo():
|
|
for p2p in self.p2ps:
|
|
if p['subver'] == p2p.strSubVer.decode():
|
|
return False
|
|
return True
|
|
wait_until(check_peers, timeout=5)
|
|
|
|
del self.p2ps[:]
|
|
|
|
class TestNodeCLIAttr:
|
|
def __init__(self, cli, command):
|
|
self.cli = cli
|
|
self.command = command
|
|
|
|
def __call__(self, *args, **kwargs):
|
|
return self.cli.send_cli(self.command, *args, **kwargs)
|
|
|
|
def get_request(self, *args, **kwargs):
|
|
return lambda: self(*args, **kwargs)
|
|
|
|
def arg_to_cli(arg):
|
|
if isinstance(arg, bool):
|
|
return str(arg).lower()
|
|
elif isinstance(arg, dict) or isinstance(arg, list):
|
|
return json.dumps(arg)
|
|
else:
|
|
return str(arg)
|
|
|
|
class TestNodeCLI():
|
|
"""Interface to dash-cli for an individual node"""
|
|
|
|
def __init__(self, binary, datadir):
|
|
self.options = []
|
|
self.binary = binary
|
|
self.datadir = datadir
|
|
self.input = None
|
|
self.log = logging.getLogger('TestFramework.dashcli')
|
|
|
|
def __call__(self, *options, input=None):
|
|
# TestNodeCLI is callable with dash-cli command-line options
|
|
cli = TestNodeCLI(self.binary, self.datadir)
|
|
cli.options = [str(o) for o in options]
|
|
cli.input = input
|
|
return cli
|
|
|
|
def __getattr__(self, command):
|
|
return TestNodeCLIAttr(self, command)
|
|
|
|
def batch(self, requests):
|
|
results = []
|
|
for request in requests:
|
|
try:
|
|
results.append(dict(result=request()))
|
|
except JSONRPCException as e:
|
|
results.append(dict(error=e))
|
|
return results
|
|
|
|
def send_cli(self, command=None, *args, **kwargs):
|
|
"""Run dash-cli command. Deserializes returned string as python object."""
|
|
pos_args = [arg_to_cli(arg) for arg in args]
|
|
named_args = [str(key) + "=" + arg_to_cli(value) for (key, value) in kwargs.items()]
|
|
assert not (pos_args and named_args), "Cannot use positional arguments and named arguments in the same dash-cli call"
|
|
p_args = [self.binary, "-datadir=" + self.datadir] + self.options
|
|
if named_args:
|
|
p_args += ["-named"]
|
|
if command is not None:
|
|
p_args += [command]
|
|
p_args += pos_args + named_args
|
|
self.log.debug("Running dash-cli command: %s" % command)
|
|
process = subprocess.Popen(p_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
|
|
cli_stdout, cli_stderr = process.communicate(input=self.input)
|
|
returncode = process.poll()
|
|
if returncode:
|
|
match = re.match(r'error code: ([-0-9]+)\nerror message:\n(.*)', cli_stderr)
|
|
if match:
|
|
code, message = match.groups()
|
|
raise JSONRPCException(dict(code=int(code), message=message))
|
|
# Ignore cli_stdout, raise with cli_stderr
|
|
raise subprocess.CalledProcessError(returncode, self.binary, output=cli_stderr)
|
|
try:
|
|
return json.loads(cli_stdout, parse_float=decimal.Decimal)
|
|
except (json.JSONDecodeError, decimal.InvalidOperation):
|
|
return cli_stdout.rstrip("\n")
|