mirror of
https://github.com/dashpay/dash.git
synced 2024-12-24 19:42:46 +01:00
Merge pull request #5009 from UdjinM6/pr4999_u
backport: bitcoin#15419, #16042, #17288, #16681, #18873, #18617, #18247, #18986, #18474, #19208 (functional tests)
This commit is contained in:
commit
1c78d69cd4
@ -237,6 +237,10 @@ gdb /home/example/dashd <pid>
|
||||
Note: gdb attach step may require ptrace_scope to be modified, or `sudo` preceding the `gdb`.
|
||||
See this link for considerations: https://www.kernel.org/doc/Documentation/security/Yama.txt
|
||||
|
||||
Often while debugging rpc calls from functional tests, the test might reach timeout before
|
||||
process can return a response. Use `--timeout-factor 0` to disable all rpc timeouts for that partcular
|
||||
functional test. Ex: `test/functional/wallet_hd.py --timeout-factor 0`.
|
||||
|
||||
##### Profiling
|
||||
|
||||
An easy way to profile node performance during functional tests is provided
|
||||
|
@ -99,6 +99,16 @@ P2PInterface object and override the callback methods.
|
||||
Examples tests are [p2p_unrequested_blocks.py](p2p_unrequested_blocks.py),
|
||||
[p2p_compactblocks.py](p2p_compactblocks.py).
|
||||
|
||||
#### Prototyping tests
|
||||
|
||||
The [`TestShell`](test-shell.md) class exposes the BitcoinTestFramework
|
||||
functionality to interactive Python3 environments and can be used to prototype
|
||||
tests. This may be especially useful in a REPL environment with session logging
|
||||
utilities, such as
|
||||
[IPython](https://ipython.readthedocs.io/en/stable/interactive/reference.html#session-logging-and-restoring).
|
||||
The logs of such interactive sessions can later be adapted into permanent test
|
||||
cases.
|
||||
|
||||
### Test framework modules
|
||||
The following are useful modules for test developers. They are located in
|
||||
[test/functional/test_framework/](test_framework).
|
||||
|
@ -29,7 +29,7 @@ class AbortNodeTest(BitcoinTestFramework):
|
||||
datadir = get_datadir_path(self.options.tmpdir, 0)
|
||||
|
||||
# Deleting the undo file will result in reorg failure
|
||||
os.unlink(os.path.join(datadir, 'regtest', 'blocks', 'rev00000.dat'))
|
||||
os.unlink(os.path.join(datadir, self.chain, 'blocks', 'rev00000.dat'))
|
||||
|
||||
# Connecting to a node with a more work chain will trigger a reorg
|
||||
# attempt.
|
||||
|
@ -39,7 +39,7 @@ class ConfArgsTest(BitcoinTestFramework):
|
||||
if self.is_wallet_compiled():
|
||||
with open(inc_conf_file_path, 'w', encoding='utf8') as conf:
|
||||
conf.write("wallet=foo\n")
|
||||
self.nodes[0].assert_start_raises_init_error(expected_msg='Error: Config setting for -wallet only applied on regtest network when in [regtest] section.')
|
||||
self.nodes[0].assert_start_raises_init_error(expected_msg='Error: Config setting for -wallet only applied on %s network when in [%s] section.' % (self.chain, self.chain))
|
||||
|
||||
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
|
||||
conf.write('regtest=0\n') # mainnet
|
||||
|
@ -19,7 +19,7 @@ class FilelockTest(BitcoinTestFramework):
|
||||
self.nodes[0].wait_for_rpc_connection()
|
||||
|
||||
def run_test(self):
|
||||
datadir = os.path.join(self.nodes[0].datadir, 'regtest')
|
||||
datadir = os.path.join(self.nodes[0].datadir, self.chain)
|
||||
self.log.info("Using datadir {}".format(datadir))
|
||||
|
||||
self.log.info("Check that we can't start a second dashd instance using the same datadir")
|
||||
|
@ -18,7 +18,6 @@ from test_framework.util import (
|
||||
assert_equal,
|
||||
assert_greater_than_or_equal,
|
||||
connect_nodes,
|
||||
sync_blocks,
|
||||
wait_until,
|
||||
)
|
||||
|
||||
@ -126,7 +125,7 @@ class LLMQQuorumRotationTest(DashTestFramework):
|
||||
|
||||
mninfos_online = self.mninfo.copy()
|
||||
nodes = [self.nodes[0]] + [mn.node for mn in mninfos_online]
|
||||
sync_blocks(nodes)
|
||||
self.sync_blocks(nodes)
|
||||
quorum_list = self.nodes[0].quorum("list", llmq_type)
|
||||
quorum_blockhash = self.nodes[0].getbestblockhash()
|
||||
fallback_blockhash = self.nodes[0].generate(1)[0]
|
||||
|
@ -16,10 +16,8 @@ import sys
|
||||
import tempfile
|
||||
import urllib
|
||||
|
||||
from test_framework.test_framework import (
|
||||
BitcoinTestFramework,
|
||||
)
|
||||
from test_framework.util import assert_equal, wait_until
|
||||
from test_framework.test_framework import BitcoinTestFramework
|
||||
from test_framework.util import assert_equal
|
||||
|
||||
|
||||
class LoadblockTest(BitcoinTestFramework):
|
||||
@ -38,7 +36,7 @@ class LoadblockTest(BitcoinTestFramework):
|
||||
cfg_file = os.path.join(data_dir, "linearize.cfg")
|
||||
bootstrap_file = os.path.join(self.options.tmpdir, "bootstrap.dat")
|
||||
genesis_block = self.nodes[0].getblockhash(0)
|
||||
blocks_dir = os.path.join(data_dir, "regtest", "blocks")
|
||||
blocks_dir = os.path.join(data_dir, self.chain, "blocks")
|
||||
hash_list = tempfile.NamedTemporaryFile(dir=data_dir,
|
||||
mode='w',
|
||||
delete=False,
|
||||
@ -75,7 +73,7 @@ class LoadblockTest(BitcoinTestFramework):
|
||||
self.log.info("Restart second, unsynced node with bootstrap file")
|
||||
self.stop_node(1)
|
||||
self.start_node(1, ["-loadblock=" + bootstrap_file])
|
||||
wait_until(lambda: self.nodes[1].getblockcount() == 100)
|
||||
assert_equal(self.nodes[1].getblockcount(), 100) # start_node is blocking on all block files being imported
|
||||
|
||||
assert_equal(self.nodes[1].getblockchaininfo()['blocks'], 100)
|
||||
assert_equal(self.nodes[0].getbestblockhash(), self.nodes[1].getbestblockhash())
|
||||
|
@ -44,6 +44,7 @@ RANGE_BEGIN = PORT_MIN + 2 * PORT_RANGE # Start after p2p and rpc ports
|
||||
class ProxyTest(BitcoinTestFramework):
|
||||
def set_test_params(self):
|
||||
self.num_nodes = 4
|
||||
self.setup_clean_chain = True
|
||||
|
||||
def setup_nodes(self):
|
||||
self.have_ipv6 = test_ipv6_local()
|
||||
@ -198,4 +199,3 @@ class ProxyTest(BitcoinTestFramework):
|
||||
|
||||
if __name__ == '__main__':
|
||||
ProxyTest().main()
|
||||
|
||||
|
@ -10,10 +10,10 @@
|
||||
"""
|
||||
|
||||
from test_framework.test_framework import BitcoinTestFramework
|
||||
from test_framework.util import wait_until
|
||||
from test_framework.util import assert_equal
|
||||
|
||||
|
||||
class ReindexTest(BitcoinTestFramework):
|
||||
|
||||
def set_test_params(self):
|
||||
self.setup_clean_chain = True
|
||||
self.num_nodes = 1
|
||||
@ -24,7 +24,7 @@ class ReindexTest(BitcoinTestFramework):
|
||||
self.stop_nodes()
|
||||
extra_args = [["-reindex-chainstate" if justchainstate else "-reindex"]]
|
||||
self.start_nodes(extra_args)
|
||||
wait_until(lambda: self.nodes[0].getblockcount() == blockcount)
|
||||
assert_equal(self.nodes[0].getblockcount(), blockcount) # start_node is blocking on reindex
|
||||
self.log.info("Success")
|
||||
|
||||
def run_test(self):
|
||||
|
@ -33,7 +33,7 @@ class RPCInterfaceTest(BitcoinTestFramework):
|
||||
command = info['active_commands'][0]
|
||||
assert_equal(command['method'], 'getrpcinfo')
|
||||
assert_greater_than_or_equal(command['duration'], 0)
|
||||
assert_equal(info['logpath'], os.path.join(self.nodes[0].datadir, 'regtest', 'debug.log'))
|
||||
assert_equal(info['logpath'], os.path.join(self.nodes[0].datadir, self.chain, 'debug.log'))
|
||||
|
||||
def test_batch_request(self):
|
||||
self.log.info("Testing basic JSON-RPC batch request...")
|
||||
|
@ -28,7 +28,6 @@ from test_framework.util import (
|
||||
assert_equal,
|
||||
assert_raises_rpc_error,
|
||||
hex_str_to_bytes,
|
||||
wait_until,
|
||||
)
|
||||
|
||||
|
||||
@ -37,7 +36,6 @@ class MempoolAcceptanceTest(BitcoinTestFramework):
|
||||
self.num_nodes = 1
|
||||
self.extra_args = [[
|
||||
'-txindex',
|
||||
'-reindex', # Need reindex for txindex
|
||||
]] * self.num_nodes
|
||||
self.supports_cli = False
|
||||
|
||||
@ -55,7 +53,7 @@ class MempoolAcceptanceTest(BitcoinTestFramework):
|
||||
|
||||
self.log.info('Start with empty mempool, and 200 blocks')
|
||||
self.mempool_size = 0
|
||||
wait_until(lambda: node.getblockcount() == 200)
|
||||
assert_equal(node.getblockcount(), 200)
|
||||
assert_equal(node.getmempoolinfo()['size'], self.mempool_size)
|
||||
coins = node.listunspent()
|
||||
|
||||
|
@ -40,7 +40,7 @@ import os
|
||||
|
||||
from test_framework.test_framework import BitcoinTestFramework
|
||||
# from test_framework.mininode import P2PTxInvStore
|
||||
from test_framework.util import assert_equal, assert_raises_rpc_error, connect_nodes, disconnect_nodes, wait_until
|
||||
from test_framework.util import assert_equal, assert_raises_rpc_error, connect_nodes, disconnect_nodes
|
||||
|
||||
|
||||
class MempoolPersistTest(BitcoinTestFramework):
|
||||
@ -88,8 +88,8 @@ class MempoolPersistTest(BitcoinTestFramework):
|
||||
self.start_node(1, extra_args=["-persistmempool=0"])
|
||||
self.start_node(0)
|
||||
self.start_node(2)
|
||||
wait_until(lambda: self.nodes[0].getmempoolinfo()["loaded"], timeout=1)
|
||||
wait_until(lambda: self.nodes[2].getmempoolinfo()["loaded"], timeout=1)
|
||||
assert self.nodes[0].getmempoolinfo()["loaded"] # start_node is blocking on the mempool being loaded
|
||||
assert self.nodes[2].getmempoolinfo()["loaded"]
|
||||
assert_equal(len(self.nodes[0].getrawmempool()), 6)
|
||||
assert_equal(len(self.nodes[2].getrawmempool()), 5)
|
||||
# The others have loaded their mempool. If node_1 loaded anything, we'd probably notice by now:
|
||||
@ -107,13 +107,13 @@ class MempoolPersistTest(BitcoinTestFramework):
|
||||
self.log.debug("Stop-start node0 with -persistmempool=0. Verify that it doesn't load its mempool.dat file.")
|
||||
self.stop_nodes()
|
||||
self.start_node(0, extra_args=["-persistmempool=0", "-disablewallet"])
|
||||
wait_until(lambda: self.nodes[0].getmempoolinfo()["loaded"])
|
||||
assert self.nodes[0].getmempoolinfo()["loaded"]
|
||||
assert_equal(len(self.nodes[0].getrawmempool()), 0)
|
||||
|
||||
self.log.debug("Stop-start node0. Verify that it has the transactions in its mempool.")
|
||||
self.stop_nodes()
|
||||
self.start_node(0)
|
||||
wait_until(lambda: self.nodes[0].getmempoolinfo()["loaded"])
|
||||
assert self.nodes[0].getmempoolinfo()["loaded"]
|
||||
assert_equal(len(self.nodes[0].getrawmempool()), 6)
|
||||
|
||||
mempooldat0 = os.path.join(self.nodes[0].datadir, self.chain, 'mempool.dat')
|
||||
@ -127,7 +127,7 @@ class MempoolPersistTest(BitcoinTestFramework):
|
||||
os.rename(mempooldat0, mempooldat1)
|
||||
self.stop_nodes()
|
||||
self.start_node(1, extra_args=[])
|
||||
wait_until(lambda: self.nodes[1].getmempoolinfo()["loaded"])
|
||||
assert self.nodes[1].getmempoolinfo()["loaded"]
|
||||
assert_equal(len(self.nodes[1].getrawmempool()), 6)
|
||||
|
||||
self.log.debug("Prevent dashd from writing mempool.dat to disk. Verify that `savemempool` fails")
|
||||
|
@ -140,7 +140,6 @@ class InvalidMessagesTest(BitcoinTestFramework):
|
||||
|
||||
# Node is still up.
|
||||
conn = node.add_p2p_connection(P2PDataStore())
|
||||
conn.sync_with_ping()
|
||||
|
||||
def test_magic_bytes(self):
|
||||
conn = self.nodes[0].add_p2p_connection(P2PDataStore())
|
||||
|
@ -24,6 +24,7 @@ from test_framework.util import (
|
||||
|
||||
banscore = 10
|
||||
|
||||
|
||||
class CLazyNode(P2PInterface):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
@ -113,7 +114,11 @@ class P2PLeakTest(BitcoinTestFramework):
|
||||
def run_test(self):
|
||||
no_version_bannode = self.nodes[0].add_p2p_connection(CNodeNoVersionBan(), send_version=False, wait_for_verack=False)
|
||||
no_version_idlenode = self.nodes[0].add_p2p_connection(CNodeNoVersionIdle(), send_version=False, wait_for_verack=False)
|
||||
no_verack_idlenode = self.nodes[0].add_p2p_connection(CNodeNoVerackIdle())
|
||||
no_verack_idlenode = self.nodes[0].add_p2p_connection(CNodeNoVerackIdle(), wait_for_verack=False)
|
||||
|
||||
# Wait until we got the verack in response to the version. Though, don't wait for the other node to receive the
|
||||
# verack, since we never sent one
|
||||
no_verack_idlenode.wait_for_verack()
|
||||
|
||||
wait_until(lambda: no_version_bannode.ever_connected, timeout=10, lock=mininode_lock)
|
||||
wait_until(lambda: no_version_idlenode.ever_connected, timeout=10, lock=mininode_lock)
|
||||
|
@ -11,7 +11,7 @@ and that it responds to getdata requests for blocks correctly:
|
||||
from test_framework.messages import CInv, msg_getdata, NODE_BLOOM, NODE_NETWORK_LIMITED, NODE_HEADERS_COMPRESSED, msg_verack
|
||||
from test_framework.mininode import P2PInterface, mininode_lock
|
||||
from test_framework.test_framework import BitcoinTestFramework
|
||||
from test_framework.util import assert_equal, disconnect_nodes, connect_nodes, sync_blocks, wait_until
|
||||
from test_framework.util import assert_equal, disconnect_nodes, connect_nodes, wait_until
|
||||
|
||||
class P2PIgnoreInv(P2PInterface):
|
||||
firstAddrnServices = 0
|
||||
@ -60,7 +60,7 @@ class NodeNetworkLimitedTest(BitcoinTestFramework):
|
||||
self.log.info("Mine enough blocks to reach the NODE_NETWORK_LIMITED range.")
|
||||
connect_nodes(self.nodes[0], 1)
|
||||
blocks = self.nodes[1].generatetoaddress(292, self.nodes[1].get_deterministic_priv_key().address)
|
||||
sync_blocks([self.nodes[0], self.nodes[1]])
|
||||
self.sync_blocks([self.nodes[0], self.nodes[1]])
|
||||
|
||||
self.log.info("Make sure we can max retrieve block at tip-288.")
|
||||
node.send_getdata_for_block(blocks[1]) # last block in valid range
|
||||
@ -86,7 +86,7 @@ class NodeNetworkLimitedTest(BitcoinTestFramework):
|
||||
# because node 2 is in IBD and node 0 is a NODE_NETWORK_LIMITED peer, sync must not be possible
|
||||
connect_nodes(self.nodes[0], 2)
|
||||
try:
|
||||
sync_blocks([self.nodes[0], self.nodes[2]], timeout=5)
|
||||
self.sync_blocks([self.nodes[0], self.nodes[2]], timeout=5)
|
||||
except:
|
||||
pass
|
||||
# node2 must remain at height 0
|
||||
@ -96,7 +96,7 @@ class NodeNetworkLimitedTest(BitcoinTestFramework):
|
||||
connect_nodes(self.nodes[1], 2)
|
||||
|
||||
# sync must be possible
|
||||
sync_blocks(self.nodes)
|
||||
self.sync_blocks()
|
||||
|
||||
# disconnect all peers
|
||||
self.disconnect_all()
|
||||
@ -108,7 +108,7 @@ class NodeNetworkLimitedTest(BitcoinTestFramework):
|
||||
connect_nodes(self.nodes[0], 1)
|
||||
|
||||
# sync must be possible, node 1 is no longer in IBD and should therefore connect to node 0 (NODE_NETWORK_LIMITED)
|
||||
sync_blocks([self.nodes[0], self.nodes[1]])
|
||||
self.sync_blocks([self.nodes[0], self.nodes[1]])
|
||||
self.stop_node(0, expected_stderr='Warning: You are starting with governance validation disabled. This is expected because you are running a pruned node.')
|
||||
|
||||
|
||||
|
@ -27,11 +27,13 @@ from test_framework.messages import msg_ping
|
||||
from test_framework.mininode import P2PInterface
|
||||
from test_framework.test_framework import BitcoinTestFramework
|
||||
|
||||
|
||||
class TestP2PConn(P2PInterface):
|
||||
def on_version(self, message):
|
||||
# Don't send a verack in response
|
||||
pass
|
||||
|
||||
|
||||
class TimeoutsTest(BitcoinTestFramework):
|
||||
def set_test_params(self):
|
||||
self.setup_clean_chain = True
|
||||
@ -41,10 +43,14 @@ class TimeoutsTest(BitcoinTestFramework):
|
||||
|
||||
def run_test(self):
|
||||
# Setup the p2p connections
|
||||
no_verack_node = self.nodes[0].add_p2p_connection(TestP2PConn())
|
||||
no_verack_node = self.nodes[0].add_p2p_connection(TestP2PConn(), wait_for_verack=False)
|
||||
no_version_node = self.nodes[0].add_p2p_connection(TestP2PConn(), send_version=False, wait_for_verack=False)
|
||||
no_send_node = self.nodes[0].add_p2p_connection(TestP2PConn(), send_version=False, wait_for_verack=False)
|
||||
|
||||
# Wait until we got the verack in response to the version. Though, don't wait for the other node to receive the
|
||||
# verack, since we never sent one
|
||||
no_verack_node.wait_for_verack()
|
||||
|
||||
sleep(1)
|
||||
|
||||
assert no_verack_node.is_connected
|
||||
@ -77,5 +83,6 @@ class TimeoutsTest(BitcoinTestFramework):
|
||||
assert not no_version_node.is_connected
|
||||
assert not no_send_node.is_connected
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
TimeoutsTest().main()
|
||||
|
@ -31,10 +31,12 @@ from test_framework.util import (
|
||||
assert_raises_rpc_error,
|
||||
assert_is_hex_string,
|
||||
assert_is_hash_string,
|
||||
set_node_times,
|
||||
)
|
||||
from test_framework.blocktools import (
|
||||
create_block,
|
||||
create_coinbase,
|
||||
TIME_GENESIS_BLOCK,
|
||||
)
|
||||
from test_framework.messages import (
|
||||
CBlockHeader,
|
||||
@ -48,10 +50,12 @@ from test_framework.mininode import (
|
||||
|
||||
class BlockchainTest(BitcoinTestFramework):
|
||||
def set_test_params(self):
|
||||
self.setup_clean_chain = True
|
||||
self.num_nodes = 1
|
||||
self.supports_cli = False
|
||||
|
||||
def run_test(self):
|
||||
self.mine_chain()
|
||||
self.restart_node(0, extra_args=['-stopatheight=207', '-prune=1', '-txindex=0']) # Set extra args with pruning after rescan is complete
|
||||
|
||||
# Actual tests
|
||||
@ -65,6 +69,15 @@ class BlockchainTest(BitcoinTestFramework):
|
||||
self._test_waitforblockheight()
|
||||
assert self.nodes[0].verifychain(4, 0)
|
||||
|
||||
def mine_chain(self):
|
||||
self.log.info('Create some old blocks')
|
||||
address = self.nodes[0].get_deterministic_priv_key().address
|
||||
for t in range(TIME_GENESIS_BLOCK, TIME_GENESIS_BLOCK + 200 * 156, 156):
|
||||
# 156 sec steps from genesis block time
|
||||
set_node_times(self.nodes, t)
|
||||
self.nodes[0].generatetoaddress(1, address)
|
||||
assert_equal(self.nodes[0].getblockchaininfo()['blocks'], 200)
|
||||
|
||||
def _test_getblockchaininfo(self):
|
||||
self.log.info("Test getblockchaininfo")
|
||||
|
||||
|
@ -7,7 +7,7 @@
|
||||
from test_framework.test_framework import BitcoinTestFramework
|
||||
from test_framework.util import (
|
||||
assert_equal, assert_is_hex_string, assert_raises_rpc_error,
|
||||
connect_nodes, disconnect_nodes, sync_blocks
|
||||
connect_nodes, disconnect_nodes
|
||||
)
|
||||
|
||||
FILTER_TYPES = ["basic"]
|
||||
@ -30,7 +30,7 @@ class GetBlockFilterTest(BitcoinTestFramework):
|
||||
|
||||
# Reorg node 0 to a new chain
|
||||
connect_nodes(self.nodes[0], 1)
|
||||
sync_blocks(self.nodes)
|
||||
self.sync_blocks()
|
||||
|
||||
assert_equal(self.nodes[0].getblockcount(), 4)
|
||||
chain1_hashes = [self.nodes[0].getblockhash(block_height) for block_height in range(4)]
|
||||
|
@ -8,7 +8,6 @@ from test_framework.test_framework import BitcoinTestFramework
|
||||
from test_framework.util import (
|
||||
assert_equal,
|
||||
connect_nodes,
|
||||
sync_blocks,
|
||||
)
|
||||
|
||||
def unidirectional_node_sync_via_rpc(node_src, node_dest):
|
||||
@ -73,7 +72,7 @@ class PreciousTest(BitcoinTestFramework):
|
||||
assert_equal(self.nodes[0].getbestblockhash(), hashC)
|
||||
self.log.info("Make Node1 prefer block C")
|
||||
self.nodes[1].preciousblock(hashC)
|
||||
sync_blocks(self.nodes[0:2]) # wait because node 1 may not have downloaded hashC
|
||||
self.sync_blocks(self.nodes[0:2]) # wait because node 1 may not have downloaded hashC
|
||||
assert_equal(self.nodes[1].getbestblockhash(), hashC)
|
||||
self.log.info("Make Node1 prefer block G again")
|
||||
self.nodes[1].preciousblock(hashG)
|
||||
|
@ -54,7 +54,7 @@ class ScantxoutsetTest(BitcoinTestFramework):
|
||||
|
||||
self.log.info("Stop node, remove wallet, mine again some blocks...")
|
||||
self.stop_node(0)
|
||||
shutil.rmtree(os.path.join(self.nodes[0].datadir, "regtest", 'wallets'))
|
||||
shutil.rmtree(os.path.join(self.nodes[0].datadir, self.chain, 'wallets'))
|
||||
self.start_node(0)
|
||||
self.nodes[0].generate(110)
|
||||
|
||||
|
188
test/functional/test-shell.md
Normal file
188
test/functional/test-shell.md
Normal file
@ -0,0 +1,188 @@
|
||||
Test Shell for Interactive Environments
|
||||
=========================================
|
||||
|
||||
This document describes how to use the `TestShell` submodule in the functional
|
||||
test suite.
|
||||
|
||||
The `TestShell` submodule extends the `BitcoinTestFramework` functionality to
|
||||
external interactive environments for prototyping and educational purposes. Just
|
||||
like `BitcoinTestFramework`, the `TestShell` allows the user to:
|
||||
|
||||
* Manage regtest bitcoind subprocesses.
|
||||
* Access RPC interfaces of the underlying bitcoind instances.
|
||||
* Log events to the functional test logging utility.
|
||||
|
||||
The `TestShell` can be useful in interactive environments where it is necessary
|
||||
to extend the object lifetime of the underlying `BitcoinTestFramework` between
|
||||
user inputs. Such environments include the Python3 command line interpreter or
|
||||
[Jupyter](https://jupyter.org/) notebooks running a Python3 kernel.
|
||||
|
||||
## 1. Requirements
|
||||
|
||||
* Python3
|
||||
* `bitcoind` built in the same repository as the `TestShell`.
|
||||
|
||||
## 2. Importing `TestShell` from the Bitcoin Core repository
|
||||
|
||||
We can import the `TestShell` by adding the path of the Bitcoin Core
|
||||
`test_framework` module to the beginning of the PATH variable, and then
|
||||
importing the `TestShell` class from the `test_shell` sub-package.
|
||||
|
||||
```
|
||||
>>> import sys
|
||||
>>> sys.path.insert(0, "/path/to/bitcoin/test/functional")
|
||||
>>> from test_framework.test_shell import `TestShell`
|
||||
```
|
||||
|
||||
The following `TestShell` methods manage the lifetime of the underlying bitcoind
|
||||
processes and logging utilities.
|
||||
|
||||
* `TestShell.setup()`
|
||||
* `TestShell.shutdown()`
|
||||
|
||||
The `TestShell` inherits all `BitcoinTestFramework` members and methods, such
|
||||
as:
|
||||
* `TestShell.nodes[index].rpc_method()`
|
||||
* `TestShell.log.info("Custom log message")`
|
||||
|
||||
The following sections demonstrate how to initialize, run, and shut down a
|
||||
`TestShell` object.
|
||||
|
||||
## 3. Initializing a `TestShell` object
|
||||
|
||||
```
|
||||
>>> test = TestShell()
|
||||
>>> test.setup(num_nodes=2, setup_clean_chain=True)
|
||||
20XX-XX-XXTXX:XX:XX.XXXXXXX TestFramework (INFO): Initializing test directory /path/to/bitcoin_func_test_XXXXXXX
|
||||
```
|
||||
The `TestShell` forwards all functional test parameters of the parent
|
||||
`BitcoinTestFramework` object. The full set of argument keywords which can be
|
||||
used to initialize the `TestShell` can be found in [section
|
||||
#6](#custom-testshell-parameters) of this document.
|
||||
|
||||
**Note: Running multiple instances of `TestShell` is not allowed.** Running a
|
||||
single process also ensures that logging remains consolidated in the same
|
||||
temporary folder. If you need more bitcoind nodes than set by default (1),
|
||||
simply increase the `num_nodes` parameter during setup.
|
||||
|
||||
```
|
||||
>>> test2 = TestShell()
|
||||
>>> test2.setup()
|
||||
TestShell is already running!
|
||||
```
|
||||
|
||||
## 4. Interacting with the `TestShell`
|
||||
|
||||
Unlike the `BitcoinTestFramework` class, the `TestShell` keeps the underlying
|
||||
Bitcoind subprocesses (nodes) and logging utilities running until the user
|
||||
explicitly shuts down the `TestShell` object.
|
||||
|
||||
During the time between the `setup` and `shutdown` calls, all `bitcoind` node
|
||||
processes and `BitcoinTestFramework` convenience methods can be accessed
|
||||
interactively.
|
||||
|
||||
**Example: Mining a regtest chain**
|
||||
|
||||
By default, the `TestShell` nodes are initialized with a clean chain. This means
|
||||
that each node of the `TestShell` is initialized with a block height of 0.
|
||||
|
||||
```
|
||||
>>> test.nodes[0].getblockchaininfo()["blocks"]
|
||||
0
|
||||
```
|
||||
|
||||
We now let the first node generate 101 regtest blocks, and direct the coinbase
|
||||
rewards to a wallet address owned by the mining node.
|
||||
|
||||
```
|
||||
>>> address = test.nodes[0].getnewaddress()
|
||||
>>> test.nodes[0].generatetoaddress(101, address)
|
||||
['2b98dd0044aae6f1cca7f88a0acf366a4bfe053c7f7b00da3c0d115f03d67efb', ...
|
||||
```
|
||||
Since the two nodes are both initialized by default to establish an outbound
|
||||
connection to each other during `setup`, the second node's chain will include
|
||||
the mined blocks as soon as they propagate.
|
||||
|
||||
```
|
||||
>>> test.nodes[1].getblockchaininfo()["blocks"]
|
||||
101
|
||||
```
|
||||
The block rewards from the first block are now spendable by the wallet of the
|
||||
first node.
|
||||
|
||||
```
|
||||
>>> test.nodes[0].getbalance()
|
||||
Decimal('50.00000000')
|
||||
```
|
||||
|
||||
We can also log custom events to the logger.
|
||||
|
||||
```
|
||||
>>> test.nodes[0].log.info("Successfully mined regtest chain!")
|
||||
20XX-XX-XXTXX:XX:XX.XXXXXXX TestFramework.node0 (INFO): Successfully mined regtest chain!
|
||||
```
|
||||
|
||||
**Note: Please also consider the functional test
|
||||
[readme](../test/functional/README.md), which provides an overview of the
|
||||
test-framework**. Modules such as
|
||||
[key.py](../test/functional/test_framework/key.py),
|
||||
[script.py](../test/functional/test_framework/script.py) and
|
||||
[messages.py](../test/functional/test_framework/messages.py) are particularly
|
||||
useful in constructing objects which can be passed to the bitcoind nodes managed
|
||||
by a running `TestShell` object.
|
||||
|
||||
## 5. Shutting the `TestShell` down
|
||||
|
||||
Shutting down the `TestShell` will safely tear down all running bitcoind
|
||||
instances and remove all temporary data and logging directories.
|
||||
|
||||
```
|
||||
>>> test.shutdown()
|
||||
20XX-XX-XXTXX:XX:XX.XXXXXXX TestFramework (INFO): Stopping nodes
|
||||
20XX-XX-XXTXX:XX:XX.XXXXXXX TestFramework (INFO): Cleaning up /path/to/bitcoin_func_test_XXXXXXX on exit
|
||||
20XX-XX-XXTXX:XX:XX.XXXXXXX TestFramework (INFO): Tests successful
|
||||
```
|
||||
To prevent the logs from being removed after a shutdown, simply set the
|
||||
`TestShell.options.nocleanup` member to `True`.
|
||||
```
|
||||
>>> test.options.nocleanup = True
|
||||
>>> test.shutdown()
|
||||
20XX-XX-XXTXX:XX:XX.XXXXXXX TestFramework (INFO): Stopping nodes
|
||||
20XX-XX-XXTXX:XX:XX.XXXXXXX TestFramework (INFO): Not cleaning up dir /path/to/bitcoin_func_test_XXXXXXX on exit
|
||||
20XX-XX-XXTXX:XX:XX.XXXXXXX TestFramework (INFO): Tests successful
|
||||
```
|
||||
|
||||
The following utility consolidates logs from the bitcoind nodes and the
|
||||
underlying `BitcoinTestFramework`:
|
||||
|
||||
* `/path/to/bitcoin/test/functional/combine_logs.py
|
||||
'/path/to/bitcoin_func_test_XXXXXXX'`
|
||||
|
||||
## 6. Custom `TestShell` parameters
|
||||
|
||||
The `TestShell` object initializes with the default settings inherited from the
|
||||
`BitcoinTestFramework` class. The user can override these in
|
||||
`TestShell.setup(key=value)`.
|
||||
|
||||
**Note:** `TestShell.reset()` will reset test parameters to default values and
|
||||
can be called after the TestShell is shut down.
|
||||
|
||||
| Test parameter key | Default Value | Description |
|
||||
|---|---|---|
|
||||
| `bind_to_localhost_only` | `True` | Binds bitcoind RPC services to `127.0.0.1` if set to `True`.|
|
||||
| `cachedir` | `"/path/to/bitcoin/test/cache"` | Sets the bitcoind datadir directory. |
|
||||
| `chain` | `"regtest"` | Sets the chain-type for the underlying test bitcoind processes. |
|
||||
| `configfile` | `"/path/to/bitcoin/test/config.ini"` | Sets the location of the test framework config file. |
|
||||
| `coveragedir` | `None` | Records bitcoind RPC test coverage into this directory if set. |
|
||||
| `loglevel` | `INFO` | Logs events at this level and higher. Can be set to `DEBUG`, `INFO`, `WARNING`, `ERROR` or `CRITICAL`. |
|
||||
| `nocleanup` | `False` | Cleans up temporary test directory if set to `True` during `shutdown`. |
|
||||
| `noshutdown` | `False` | Does not stop bitcoind instances after `shutdown` if set to `True`. |
|
||||
| `num_nodes` | `1` | Sets the number of initialized bitcoind processes. |
|
||||
| `perf` | False | Profiles running nodes with `perf` for the duration of the test if set to `True`. |
|
||||
| `rpc_timeout` | `60` | Sets the RPC server timeout for the underlying bitcoind processes. |
|
||||
| `setup_clean_chain` | `False` | Initializes an empty blockchain by default. A 199-block-long chain is initialized if set to `True`. |
|
||||
| `randomseed` | Random Integer | `TestShell.options.randomseed` is a member of `TestShell` which can be accessed during a test to seed a random generator. User can override default with a constant value for reproducible test runs. |
|
||||
| `supports_cli` | `False` | Whether the bitcoin-cli utility is compiled and available for the test. |
|
||||
| `tmpdir` | `"/var/folders/.../"` | Sets directory for test logs. Will be deleted upon a successful test run unless `nocleanup` is set to `True` |
|
||||
| `trace_rpc` | `False` | Logs all RPC calls if set to `True`. |
|
||||
| `usecli` | `False` | Uses the bitcoin-cli interface for all bitcoind commands instead of directly calling the RPC server. Requires `supports_cli`. |
|
@ -22,7 +22,6 @@ from io import BytesIO
|
||||
import logging
|
||||
import struct
|
||||
import sys
|
||||
import time
|
||||
import threading
|
||||
|
||||
from test_framework.messages import (
|
||||
@ -155,8 +154,9 @@ class P2PConnection(asyncio.Protocol):
|
||||
def is_connected(self):
|
||||
return self._transport is not None
|
||||
|
||||
def peer_connect(self, dstaddr, dstport, *, net, uacomment=None):
|
||||
def peer_connect(self, dstaddr, dstport, *, net, timeout_factor, uacomment=None):
|
||||
assert not self.is_connected
|
||||
self.timeout_factor = timeout_factor
|
||||
self.dstaddr = dstaddr
|
||||
self.dstport = dstport
|
||||
# The initial message to send after the connection was made:
|
||||
@ -440,11 +440,12 @@ class P2PInterface(P2PConnection):
|
||||
|
||||
# Connection helper methods
|
||||
|
||||
def wait_until(self, test_function, timeout):
|
||||
wait_until(test_function, timeout=timeout, lock=mininode_lock, timeout_factor=self.timeout_factor)
|
||||
|
||||
def wait_for_disconnect(self, timeout=60):
|
||||
test_function = lambda: not self.is_connected
|
||||
wait_until(test_function, timeout=timeout, lock=mininode_lock)
|
||||
# This is a hack. The related issues should be fixed by bitcoin 14119 and 14457.
|
||||
time.sleep(1)
|
||||
self.wait_until(test_function, timeout=timeout)
|
||||
|
||||
# Message receiving helper methods
|
||||
|
||||
@ -455,14 +456,14 @@ class P2PInterface(P2PConnection):
|
||||
return False
|
||||
return self.last_message['tx'].tx.rehash() == txid
|
||||
|
||||
wait_until(test_function, timeout=timeout, lock=mininode_lock)
|
||||
self.wait_until(test_function, timeout=timeout)
|
||||
|
||||
def wait_for_block(self, blockhash, timeout=60):
|
||||
def test_function():
|
||||
assert self.is_connected
|
||||
return self.last_message.get("block") and self.last_message["block"].block.rehash() == blockhash
|
||||
|
||||
wait_until(test_function, timeout=timeout, lock=mininode_lock)
|
||||
self.wait_until(test_function, timeout=timeout)
|
||||
|
||||
def wait_for_header(self, blockhash, timeout=60):
|
||||
def test_function():
|
||||
@ -472,7 +473,7 @@ class P2PInterface(P2PConnection):
|
||||
return False
|
||||
return last_headers.headers[0].rehash() == blockhash
|
||||
|
||||
wait_until(test_function, timeout=timeout, lock=mininode_lock)
|
||||
self.wait_until(test_function, timeout=timeout)
|
||||
|
||||
def wait_for_getdata(self, timeout=60):
|
||||
"""Waits for a getdata message.
|
||||
@ -486,7 +487,7 @@ class P2PInterface(P2PConnection):
|
||||
assert self.is_connected
|
||||
return self.last_message.get("getdata")
|
||||
|
||||
wait_until(test_function, timeout=timeout, lock=mininode_lock)
|
||||
self.wait_until(test_function, timeout=timeout)
|
||||
|
||||
def wait_for_getheaders(self, timeout=60):
|
||||
"""Waits for a getheaders message.
|
||||
@ -501,7 +502,7 @@ class P2PInterface(P2PConnection):
|
||||
return self.last_message.get("getheaders2") if self.nServices & NODE_HEADERS_COMPRESSED \
|
||||
else self.last_message.get("getheaders")
|
||||
|
||||
wait_until(test_function, timeout=timeout, lock=mininode_lock)
|
||||
self.wait_until(test_function, timeout=timeout)
|
||||
|
||||
# TODO: enable when p2p_filter.py is backported
|
||||
# def wait_for_inv(self, expected_inv, timeout=60):
|
||||
@ -515,13 +516,13 @@ class P2PInterface(P2PConnection):
|
||||
# self.last_message["inv"].inv[0].type == expected_inv[0].type and \
|
||||
# self.last_message["inv"].inv[0].hash == expected_inv[0].hash
|
||||
|
||||
# wait_until(test_function, timeout=timeout, lock=mininode_lock)
|
||||
# self.wait_until(test_function, timeout=timeout)
|
||||
|
||||
def wait_for_verack(self, timeout=60):
|
||||
def test_function():
|
||||
return "verack" in self.last_message
|
||||
|
||||
wait_until(test_function, timeout=timeout, lock=mininode_lock)
|
||||
self.wait_until(test_function, timeout=timeout)
|
||||
|
||||
# Message sending helper functions
|
||||
|
||||
@ -537,7 +538,7 @@ class P2PInterface(P2PConnection):
|
||||
assert self.is_connected
|
||||
return self.last_message.get("pong") and self.last_message["pong"].nonce == self.ping_counter
|
||||
|
||||
wait_until(test_function, timeout=timeout, lock=mininode_lock)
|
||||
self.wait_until(test_function, timeout=timeout)
|
||||
self.ping_counter += 1
|
||||
|
||||
|
||||
@ -569,7 +570,8 @@ class NetworkThread(threading.Thread):
|
||||
wait_until(lambda: not self.network_event_loop.is_running(), timeout=timeout)
|
||||
self.network_event_loop.close()
|
||||
self.join(timeout)
|
||||
|
||||
# Safe to remove event loop.
|
||||
NetworkThread.network_event_loop = None
|
||||
|
||||
class P2PDataStore(P2PInterface):
|
||||
"""A P2P data store class.
|
||||
@ -665,7 +667,7 @@ class P2PDataStore(P2PInterface):
|
||||
self.send_message(msg_block(block=b))
|
||||
else:
|
||||
self.send_message(msg_headers([CBlockHeader(block) for block in blocks]))
|
||||
wait_until(lambda: blocks[-1].sha256 in self.getdata_requests, timeout=timeout, lock=mininode_lock)
|
||||
self.wait_until(lambda: blocks[-1].sha256 in self.getdata_requests, timeout=timeout)
|
||||
|
||||
if expect_disconnect:
|
||||
self.wait_for_disconnect()
|
||||
@ -673,7 +675,7 @@ class P2PDataStore(P2PInterface):
|
||||
self.sync_with_ping()
|
||||
|
||||
if success:
|
||||
wait_until(lambda: node.getbestblockhash() == blocks[-1].hash, timeout=timeout)
|
||||
self.wait_until(lambda: node.getbestblockhash() == blocks[-1].hash, timeout=timeout)
|
||||
else:
|
||||
assert node.getbestblockhash() != blocks[-1].hash
|
||||
|
||||
|
@ -49,12 +49,11 @@ from .util import (
|
||||
set_node_times,
|
||||
set_timeout_scale,
|
||||
satoshi_round,
|
||||
sync_blocks,
|
||||
sync_mempools,
|
||||
wait_until,
|
||||
get_chain_folder,
|
||||
)
|
||||
|
||||
|
||||
class TestStatus(Enum):
|
||||
PASSED = 1
|
||||
FAILED = 2
|
||||
@ -121,12 +120,42 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
|
||||
self.bind_to_localhost_only = True
|
||||
self.extra_args_from_options = []
|
||||
self.set_test_params()
|
||||
|
||||
assert hasattr(self, "num_nodes"), "Test must set self.num_nodes in set_test_params()"
|
||||
self.parse_args()
|
||||
if self.options.timeout_factor == 0 :
|
||||
self.options.timeout_factor = 99999
|
||||
self.rpc_timeout = int(self.rpc_timeout * self.options.timeout_factor) # optionally, increase timeout by a factor
|
||||
|
||||
def main(self):
|
||||
"""Main function. This should not be overridden by the subclass test scripts."""
|
||||
|
||||
assert hasattr(self, "num_nodes"), "Test must set self.num_nodes in set_test_params()"
|
||||
|
||||
try:
|
||||
self.setup()
|
||||
self.run_test()
|
||||
except JSONRPCException:
|
||||
self.log.exception("JSONRPC error")
|
||||
self.success = TestStatus.FAILED
|
||||
except SkipTest as e:
|
||||
self.log.warning("Test Skipped: %s" % e.message)
|
||||
self.success = TestStatus.SKIPPED
|
||||
except AssertionError:
|
||||
self.log.exception("Assertion failed")
|
||||
self.success = TestStatus.FAILED
|
||||
except KeyError:
|
||||
self.log.exception("Key error")
|
||||
self.success = TestStatus.FAILED
|
||||
except Exception:
|
||||
self.log.exception("Unexpected exception caught during testing")
|
||||
self.success = TestStatus.FAILED
|
||||
except KeyboardInterrupt:
|
||||
self.log.warning("Exiting after keyboard interrupt")
|
||||
self.success = TestStatus.FAILED
|
||||
finally:
|
||||
exit_code = self.shutdown()
|
||||
sys.exit(exit_code)
|
||||
|
||||
def parse_args(self):
|
||||
parser = argparse.ArgumentParser(usage="%(prog)s [options]")
|
||||
parser.add_argument("--nocleanup", dest="nocleanup", default=False, action="store_true",
|
||||
help="Leave dashds and test.* datadir on exit or error")
|
||||
@ -160,6 +189,7 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
|
||||
help="run nodes under the valgrind memory error detector: expect at least a ~10x slowdown, valgrind 3.14 or later required")
|
||||
parser.add_argument("--randomseed", type=int,
|
||||
help="set a random seed for deterministically reproducing a previous test run")
|
||||
parser.add_argument('--timeout-factor', dest="timeout_factor", type=float, default=1.0, help='adjust test timeouts by a factor. Setting it to 0 disables all timeouts')
|
||||
self.add_options(parser)
|
||||
# Running TestShell in a Jupyter notebook causes an additional -f argument
|
||||
# To keep TestShell from failing with an "unrecognized argument" error, we add a dummy "-f" argument
|
||||
@ -167,6 +197,9 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
|
||||
parser.add_argument("-f", "--fff", help="a dummy argument to fool ipython", default="1")
|
||||
self.options = parser.parse_args()
|
||||
|
||||
def setup(self):
|
||||
"""Call this method to start up the test framework object with options set."""
|
||||
|
||||
if self.options.timeout_scale < 1:
|
||||
raise RuntimeError("--timeoutscale can't be less than 1")
|
||||
|
||||
@ -220,33 +253,20 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
|
||||
self.network_thread = NetworkThread()
|
||||
self.network_thread.start()
|
||||
|
||||
success = TestStatus.FAILED
|
||||
if self.options.usecli:
|
||||
if not self.supports_cli:
|
||||
raise SkipTest("--usecli specified but test does not support using CLI")
|
||||
self.skip_if_no_cli()
|
||||
self.skip_test_if_missing_module()
|
||||
self.setup_chain()
|
||||
self.setup_network()
|
||||
|
||||
try:
|
||||
if self.options.usecli:
|
||||
if not self.supports_cli:
|
||||
raise SkipTest("--usecli specified but test does not support using CLI")
|
||||
self.skip_if_no_cli()
|
||||
self.skip_test_if_missing_module()
|
||||
self.setup_chain()
|
||||
self.setup_network()
|
||||
self.run_test()
|
||||
success = TestStatus.PASSED
|
||||
except JSONRPCException:
|
||||
self.log.exception("JSONRPC error")
|
||||
except SkipTest as e:
|
||||
self.log.warning("Test Skipped: %s" % e.message)
|
||||
success = TestStatus.SKIPPED
|
||||
except AssertionError:
|
||||
self.log.exception("Assertion failed")
|
||||
except KeyError:
|
||||
self.log.exception("Key error")
|
||||
except Exception:
|
||||
self.log.exception("Unexpected exception caught during testing")
|
||||
except KeyboardInterrupt:
|
||||
self.log.warning("Exiting after keyboard interrupt")
|
||||
self.success = TestStatus.PASSED
|
||||
|
||||
if success == TestStatus.FAILED and self.options.pdbonfailure:
|
||||
def shutdown(self):
|
||||
"""Call this method to shut down the test framework object."""
|
||||
|
||||
if self.success == TestStatus.FAILED and self.options.pdbonfailure:
|
||||
print("Testcase failed. Attaching python debugger. Enter ? for help")
|
||||
pdb.set_trace()
|
||||
|
||||
@ -258,7 +278,7 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
|
||||
if self.nodes:
|
||||
self.stop_nodes()
|
||||
except BaseException:
|
||||
success = False
|
||||
self.success = TestStatus.FAILED
|
||||
self.log.exception("Unexpected exception caught during shutdown")
|
||||
else:
|
||||
for node in self.nodes:
|
||||
@ -268,7 +288,7 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
|
||||
should_clean_up = (
|
||||
not self.options.nocleanup and
|
||||
not self.options.noshutdown and
|
||||
success != TestStatus.FAILED and
|
||||
self.success != TestStatus.FAILED and
|
||||
not self.options.perf
|
||||
)
|
||||
if should_clean_up:
|
||||
@ -281,10 +301,10 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
|
||||
self.log.warning("Not cleaning up dir {}".format(self.options.tmpdir))
|
||||
cleanup_tree_on_exit = False
|
||||
|
||||
if success == TestStatus.PASSED:
|
||||
if self.success == TestStatus.PASSED:
|
||||
self.log.info("Tests successful")
|
||||
exit_code = TEST_EXIT_PASSED
|
||||
elif success == TestStatus.SKIPPED:
|
||||
elif self.success == TestStatus.SKIPPED:
|
||||
self.log.info("Test skipped")
|
||||
exit_code = TEST_EXIT_SKIPPED
|
||||
else:
|
||||
@ -296,10 +316,23 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
|
||||
self.log.error(self.config['environment']['PACKAGE_BUGREPORT'])
|
||||
self.log.error("")
|
||||
exit_code = TEST_EXIT_FAILED
|
||||
logging.shutdown()
|
||||
# Logging.shutdown will not remove stream- and filehandlers, so we must
|
||||
# do it explicitly. Handlers are removed so the next test run can apply
|
||||
# different log handler settings.
|
||||
# See: https://docs.python.org/3/library/logging.html#logging.shutdown
|
||||
for h in list(self.log.handlers):
|
||||
h.flush()
|
||||
h.close()
|
||||
self.log.removeHandler(h)
|
||||
rpc_logger = logging.getLogger("BitcoinRPC")
|
||||
for h in list(rpc_logger.handlers):
|
||||
h.flush()
|
||||
rpc_logger.removeHandler(h)
|
||||
if cleanup_tree_on_exit:
|
||||
shutil.rmtree(self.options.tmpdir)
|
||||
sys.exit(exit_code)
|
||||
|
||||
self.nodes.clear()
|
||||
return exit_code
|
||||
|
||||
# Methods to override in subclass test scripts.
|
||||
def set_test_params(self):
|
||||
@ -353,6 +386,20 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
|
||||
self.add_nodes(self.num_nodes, extra_args)
|
||||
self.start_nodes()
|
||||
self.import_deterministic_coinbase_privkeys()
|
||||
if not self.setup_clean_chain:
|
||||
for n in self.nodes:
|
||||
assert_equal(n.getblockchaininfo()["blocks"], 199)
|
||||
# To ensure that all nodes are out of IBD, the most recent block
|
||||
# must have a timestamp not too old (see IsInitialBlockDownload()).
|
||||
self.log.debug('Generate a block with current mocktime')
|
||||
self.bump_mocktime(156 * 200)
|
||||
block_hash = self.nodes[0].generate(1)[0]
|
||||
block = self.nodes[0].getblock(blockhash=block_hash, verbosity=0)
|
||||
for n in self.nodes:
|
||||
n.submitblock(block)
|
||||
chain_info = n.getblockchaininfo()
|
||||
assert_equal(chain_info["blocks"], 200)
|
||||
assert_equal(chain_info["initialblockdownload"], False)
|
||||
|
||||
def import_deterministic_coinbase_privkeys(self):
|
||||
for n in self.nodes:
|
||||
@ -395,6 +442,7 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
|
||||
chain=self.chain,
|
||||
rpchost=rpchost,
|
||||
timewait=self.rpc_timeout,
|
||||
timeout_factor=self.options.timeout_factor,
|
||||
bitcoind=binary[i],
|
||||
bitcoin_cli=self.options.bitcoincli,
|
||||
mocktime=self.mocktime,
|
||||
@ -477,21 +525,54 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
|
||||
connect_nodes(self.nodes[1], 2)
|
||||
self.sync_all()
|
||||
|
||||
def sync_blocks(self, nodes=None, **kwargs):
|
||||
sync_blocks(nodes or self.nodes, **kwargs)
|
||||
def sync_blocks(self, nodes=None, wait=1, timeout=60):
|
||||
"""
|
||||
Wait until everybody has the same tip.
|
||||
sync_blocks needs to be called with an rpc_connections set that has least
|
||||
one node already synced to the latest, stable tip, otherwise there's a
|
||||
chance it might return before all nodes are stably synced.
|
||||
"""
|
||||
rpc_connections = nodes or self.nodes
|
||||
timeout = int(timeout * self.options.timeout_factor)
|
||||
timeout *= self.options.timeout_scale
|
||||
stop_time = time.time() + timeout
|
||||
while time.time() <= stop_time:
|
||||
best_hash = [x.getbestblockhash() for x in rpc_connections]
|
||||
if best_hash.count(best_hash[0]) == len(rpc_connections):
|
||||
return
|
||||
# Check that each peer has at least one connection
|
||||
assert (all([len(x.getpeerinfo()) for x in rpc_connections]))
|
||||
time.sleep(wait)
|
||||
raise AssertionError("Block sync timed out:{}".format("".join("\n {!r}".format(b) for b in best_hash)))
|
||||
|
||||
def sync_mempools(self, nodes=None, **kwargs):
|
||||
if self.mocktime != 0:
|
||||
if 'wait' not in kwargs:
|
||||
kwargs['wait'] = 0.1
|
||||
if 'wait_func' not in kwargs:
|
||||
kwargs['wait_func'] = lambda: self.bump_mocktime(3, nodes=nodes)
|
||||
def sync_mempools(self, nodes=None, wait=1, timeout=60, flush_scheduler=True, wait_func=None):
|
||||
"""
|
||||
Wait until everybody has the same transactions in their memory
|
||||
pools
|
||||
"""
|
||||
rpc_connections = nodes or self.nodes
|
||||
timeout = int(timeout * self.options.timeout_factor)
|
||||
timeout *= self.options.timeout_scale
|
||||
stop_time = time.time() + timeout
|
||||
if self.mocktime != 0 and wait_func is None:
|
||||
wait_func = lambda: self.bump_mocktime(3, nodes=nodes)
|
||||
while time.time() <= stop_time:
|
||||
pool = [set(r.getrawmempool()) for r in rpc_connections]
|
||||
if pool.count(pool[0]) == len(rpc_connections):
|
||||
if flush_scheduler:
|
||||
for r in rpc_connections:
|
||||
r.syncwithvalidationinterfacequeue()
|
||||
return
|
||||
# Check that each peer has at least one connection
|
||||
assert (all([len(x.getpeerinfo()) for x in rpc_connections]))
|
||||
if wait_func is not None:
|
||||
wait_func()
|
||||
time.sleep(wait)
|
||||
raise AssertionError("Mempool sync timed out:{}".format("".join("\n {!r}".format(m) for m in pool)))
|
||||
|
||||
sync_mempools(nodes or self.nodes, **kwargs)
|
||||
|
||||
def sync_all(self, nodes=None, **kwargs):
|
||||
self.sync_blocks(nodes, **kwargs)
|
||||
self.sync_mempools(nodes, **kwargs)
|
||||
def sync_all(self, nodes=None):
|
||||
self.sync_blocks(nodes)
|
||||
self.sync_mempools(nodes)
|
||||
|
||||
def disable_mocktime(self):
|
||||
self.mocktime = 0
|
||||
@ -499,15 +580,13 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
|
||||
node.mocktime = 0
|
||||
|
||||
def bump_mocktime(self, t, update_nodes=True, nodes=None):
|
||||
self.mocktime += t
|
||||
if update_nodes:
|
||||
set_node_times(nodes or self.nodes, self.mocktime)
|
||||
if self.mocktime != 0:
|
||||
self.mocktime += t
|
||||
if update_nodes:
|
||||
set_node_times(nodes or self.nodes, self.mocktime)
|
||||
|
||||
def set_cache_mocktime(self):
|
||||
# For backwared compatibility of the python scripts
|
||||
# with previous versions of the cache, set MOCKTIME
|
||||
# to regtest genesis time + (201 * 156)
|
||||
self.mocktime = TIME_GENESIS_BLOCK + (201 * 156)
|
||||
self.mocktime = TIME_GENESIS_BLOCK + (199 * 156)
|
||||
for node in self.nodes:
|
||||
node.mocktime = self.mocktime
|
||||
|
||||
@ -546,87 +625,76 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
|
||||
rpc_handler.setLevel(logging.DEBUG)
|
||||
rpc_logger.addHandler(rpc_handler)
|
||||
|
||||
def _initialize_chain(self, extra_args=None):
|
||||
def _initialize_chain(self):
|
||||
"""Initialize a pre-mined blockchain for use by the test.
|
||||
|
||||
Create a cache of a 200-block-long chain (with wallet) for MAX_NODES
|
||||
Create a cache of a 199-block-long chain
|
||||
Afterward, create num_nodes copies from the cache."""
|
||||
|
||||
CACHE_NODE_ID = 0 # Use node 0 to create the cache for all other nodes
|
||||
cache_node_dir = get_datadir_path(self.options.cachedir, CACHE_NODE_ID)
|
||||
assert self.num_nodes <= MAX_NODES
|
||||
create_cache = False
|
||||
for i in range(MAX_NODES):
|
||||
if not os.path.isdir(get_datadir_path(self.options.cachedir, i)):
|
||||
create_cache = True
|
||||
break
|
||||
|
||||
if create_cache:
|
||||
self.log.debug("Creating data directories from cached datadir")
|
||||
if not os.path.isdir(cache_node_dir):
|
||||
self.log.debug("Creating cache directory {}".format(cache_node_dir))
|
||||
|
||||
# find and delete old cache directories if any exist
|
||||
for i in range(MAX_NODES):
|
||||
if os.path.isdir(get_datadir_path(self.options.cachedir, i)):
|
||||
shutil.rmtree(get_datadir_path(self.options.cachedir, i))
|
||||
|
||||
# Create cache directories, run dashds:
|
||||
self.set_genesis_mocktime()
|
||||
for i in range(MAX_NODES):
|
||||
datadir = initialize_datadir(self.options.cachedir, i, self.chain)
|
||||
args = [self.options.bitcoind, "-datadir=" + datadir, "-mocktime="+str(TIME_GENESIS_BLOCK), '-disablewallet']
|
||||
if i > 0:
|
||||
args.append("-connect=127.0.0.1:" + str(p2p_port(0)))
|
||||
if extra_args is not None:
|
||||
args.extend(extra_args)
|
||||
self.nodes.append(TestNode(i, get_datadir_path(self.options.cachedir, i), chain=self.chain, extra_conf=["bind=127.0.0.1"], extra_args=[], extra_args_from_options=self.extra_args_from_options, rpchost=None,
|
||||
initialize_datadir(self.options.cachedir, CACHE_NODE_ID, self.chain)
|
||||
self.nodes.append(
|
||||
TestNode(
|
||||
CACHE_NODE_ID,
|
||||
cache_node_dir,
|
||||
chain=self.chain,
|
||||
extra_conf=["bind=127.0.0.1"],
|
||||
extra_args=['-disablewallet', "-mocktime=%d" % TIME_GENESIS_BLOCK],
|
||||
extra_args_from_options=self.extra_args_from_options,
|
||||
rpchost=None,
|
||||
timewait=self.rpc_timeout,
|
||||
timeout_factor=self.options.timeout_factor,
|
||||
bitcoind=self.options.bitcoind,
|
||||
bitcoin_cli=self.options.bitcoincli,
|
||||
mocktime=self.mocktime,
|
||||
coverage_dir=None,
|
||||
cwd=self.options.tmpdir,
|
||||
))
|
||||
self.nodes[i].args = args
|
||||
self.start_node(i)
|
||||
self.start_node(CACHE_NODE_ID)
|
||||
|
||||
# Wait for RPC connections to be ready
|
||||
for node in self.nodes:
|
||||
node.wait_for_rpc_connection()
|
||||
self.nodes[CACHE_NODE_ID].wait_for_rpc_connection()
|
||||
|
||||
# Create a 200-block-long chain; each of the 4 first nodes
|
||||
# Create a 199-block-long chain; each of the 4 first nodes
|
||||
# gets 25 mature blocks and 25 immature.
|
||||
# Note: To preserve compatibility with older versions of
|
||||
# initialize_chain, only 4 nodes will generate coins.
|
||||
#
|
||||
# blocks are created with timestamps 10 minutes apart
|
||||
# starting from 2010 minutes in the past
|
||||
block_time = TIME_GENESIS_BLOCK
|
||||
for i in range(2):
|
||||
for peer in range(4):
|
||||
for j in range(25):
|
||||
set_node_times(self.nodes, block_time)
|
||||
self.nodes[peer].generatetoaddress(1, self.nodes[peer].get_deterministic_priv_key().address)
|
||||
block_time += 156
|
||||
# Must sync before next peer starts generating blocks
|
||||
self.sync_blocks()
|
||||
# The 4th node gets only 24 immature blocks so that the very last
|
||||
# block in the cache does not age too much (have an old tip age).
|
||||
# This is needed so that we are out of IBD when the test starts,
|
||||
# see the tip age check in IsInitialBlockDownload().
|
||||
self.set_genesis_mocktime()
|
||||
for i in range(8):
|
||||
self.bump_mocktime((25 if i != 7 else 24) * 156)
|
||||
self.nodes[CACHE_NODE_ID].generatetoaddress(
|
||||
nblocks=25 if i != 7 else 24,
|
||||
address=TestNode.PRIV_KEYS[i % 4].address,
|
||||
)
|
||||
|
||||
# Shut them down, and clean up cache directories:
|
||||
assert_equal(self.nodes[CACHE_NODE_ID].getblockchaininfo()["blocks"], 199)
|
||||
|
||||
# Shut it down, and clean up cache directories:
|
||||
self.stop_nodes()
|
||||
self.nodes = []
|
||||
self.disable_mocktime()
|
||||
|
||||
def cache_path(n, *paths):
|
||||
chain = get_chain_folder(get_datadir_path(self.options.cachedir, n), self.chain)
|
||||
return os.path.join(get_datadir_path(self.options.cachedir, n), chain, *paths)
|
||||
def cache_path(*paths):
|
||||
chain = get_chain_folder(cache_node_dir, self.chain)
|
||||
return os.path.join(cache_node_dir, chain, *paths)
|
||||
|
||||
for i in range(MAX_NODES):
|
||||
os.rmdir(cache_path(i, 'wallets')) # Remove empty wallets dir
|
||||
for entry in os.listdir(cache_path(i)):
|
||||
if entry not in ['chainstate', 'blocks', 'indexes', 'evodb', 'llmq', 'backups']:
|
||||
os.remove(cache_path(i, entry))
|
||||
os.rmdir(cache_path('wallets')) # Remove empty wallets dir
|
||||
for entry in os.listdir(cache_path()):
|
||||
if entry not in ['chainstate', 'blocks', 'indexes', 'evodb', 'llmq']: # Keep some folders
|
||||
os.remove(cache_path(entry))
|
||||
|
||||
for i in range(self.num_nodes):
|
||||
from_dir = get_datadir_path(self.options.cachedir, i)
|
||||
self.log.debug("Copy cache directory {} to node {}".format(cache_node_dir, i))
|
||||
to_dir = get_datadir_path(self.options.tmpdir, i)
|
||||
shutil.copytree(from_dir, to_dir)
|
||||
shutil.copytree(cache_node_dir, to_dir)
|
||||
initialize_datadir(self.options.tmpdir, i, self.chain) # Overwrite port/rpcport in dash.conf
|
||||
|
||||
def _initialize_chain_clean(self):
|
||||
@ -1220,7 +1288,7 @@ class DashTestFramework(BitcoinTestFramework):
|
||||
return True
|
||||
self.bump_mocktime(sleep, nodes=nodes)
|
||||
self.nodes[0].generate(1)
|
||||
sync_blocks(nodes)
|
||||
self.sync_blocks(nodes)
|
||||
return False
|
||||
wait_until(wait_func, timeout=timeout, sleep=sleep)
|
||||
|
||||
@ -1232,7 +1300,7 @@ class DashTestFramework(BitcoinTestFramework):
|
||||
return True
|
||||
self.bump_mocktime(sleep, nodes=nodes)
|
||||
self.nodes[0].generate(1)
|
||||
sync_blocks(nodes)
|
||||
self.sync_blocks(nodes)
|
||||
return False
|
||||
wait_until(wait_func, timeout=timeout, sleep=sleep)
|
||||
|
||||
@ -1240,7 +1308,7 @@ class DashTestFramework(BitcoinTestFramework):
|
||||
time.sleep(1)
|
||||
self.bump_mocktime(1, nodes=nodes)
|
||||
self.nodes[0].generate(num_blocks)
|
||||
sync_blocks(nodes)
|
||||
self.sync_blocks(nodes)
|
||||
|
||||
def mine_quorum(self, llmq_type_name="llmq_test", llmq_type=100, expected_connections=None, expected_members=None, expected_contributions=None, expected_complaints=0, expected_justifications=0, expected_commitments=None, mninfos_online=None, mninfos_valid=None):
|
||||
spork21_active = self.nodes[0].spork('show')['SPORK_21_QUORUM_ALL_CONNECTED'] <= 1
|
||||
@ -1270,7 +1338,7 @@ class DashTestFramework(BitcoinTestFramework):
|
||||
if skip_count != 0:
|
||||
self.bump_mocktime(1, nodes=nodes)
|
||||
self.nodes[0].generate(skip_count)
|
||||
sync_blocks(nodes)
|
||||
self.sync_blocks(nodes)
|
||||
|
||||
q = self.nodes[0].getbestblockhash()
|
||||
self.log.info("Expected quorum_hash:"+str(q))
|
||||
@ -1312,7 +1380,7 @@ class DashTestFramework(BitcoinTestFramework):
|
||||
self.bump_mocktime(1, nodes=nodes)
|
||||
self.nodes[0].getblocktemplate() # this calls CreateNewBlock
|
||||
self.nodes[0].generate(1)
|
||||
sync_blocks(nodes)
|
||||
self.sync_blocks(nodes)
|
||||
|
||||
self.log.info("Waiting for quorum to appear in the list")
|
||||
self.wait_for_quorum_list(q, nodes, llmq_type_name=llmq_type_name)
|
||||
@ -1324,7 +1392,7 @@ class DashTestFramework(BitcoinTestFramework):
|
||||
# Mine 8 (SIGN_HEIGHT_OFFSET) more blocks to make sure that the new quorum gets eligible for signing sessions
|
||||
self.nodes[0].generate(8)
|
||||
|
||||
sync_blocks(nodes)
|
||||
self.sync_blocks(nodes)
|
||||
|
||||
self.log.info("New quorum: height=%d, quorumHash=%s, quorumIndex=%d, minedBlock=%s" % (quorum_info["height"], new_quorum, quorum_info["quorumIndex"], quorum_info["minedBlock"]))
|
||||
|
||||
@ -1360,7 +1428,7 @@ class DashTestFramework(BitcoinTestFramework):
|
||||
# self.bump_mocktime(1, nodes=nodes)
|
||||
# self.nodes[0].generate(skip_count)
|
||||
# time.sleep(4)
|
||||
# sync_blocks(nodes)
|
||||
# self.sync_blocks(nodes)
|
||||
|
||||
self.move_blocks(nodes, skip_count)
|
||||
|
||||
@ -1442,7 +1510,7 @@ class DashTestFramework(BitcoinTestFramework):
|
||||
self.bump_mocktime(1, nodes=nodes)
|
||||
self.nodes[0].getblocktemplate() # this calls CreateNewBlock
|
||||
self.nodes[0].generate(1)
|
||||
sync_blocks(nodes)
|
||||
self.sync_blocks(nodes)
|
||||
|
||||
time.sleep(6)
|
||||
self.log.info("Waiting for quorum(s) to appear in the list")
|
||||
@ -1453,7 +1521,7 @@ class DashTestFramework(BitcoinTestFramework):
|
||||
# Mine 8 (SIGN_HEIGHT_OFFSET) more blocks to make sure that the new quorum gets eligible for signing sessions
|
||||
self.nodes[0].generate(8)
|
||||
|
||||
sync_blocks(nodes)
|
||||
self.sync_blocks(nodes)
|
||||
self.log.info("New quorum: height=%d, quorumHash=%s, quorumIndex=%d, minedBlock=%s" % (quorum_info_0["height"], q_0, quorum_info_0["quorumIndex"], quorum_info_0["minedBlock"]))
|
||||
self.log.info("New quorum: height=%d, quorumHash=%s, quorumIndex=%d, minedBlock=%s" % (quorum_info_1["height"], q_1, quorum_info_1["quorumIndex"], quorum_info_1["minedBlock"]))
|
||||
|
||||
@ -1478,7 +1546,7 @@ class DashTestFramework(BitcoinTestFramework):
|
||||
if skip_count != 0:
|
||||
self.bump_mocktime(1, nodes=nodes)
|
||||
self.nodes[0].generate(skip_count)
|
||||
sync_blocks(nodes)
|
||||
self.sync_blocks(nodes)
|
||||
time.sleep(1)
|
||||
self.log.info('Moved from block %d to %d' % (cur_block, self.nodes[0].getblockcount()))
|
||||
|
||||
|
@ -23,6 +23,7 @@ import collections
|
||||
|
||||
from .authproxy import JSONRPCException
|
||||
from .util import (
|
||||
MAX_NODES,
|
||||
append_config,
|
||||
delete_cookie_file,
|
||||
get_rpc_proxy,
|
||||
@ -61,7 +62,7 @@ class TestNode():
|
||||
To make things easier for the test writer, any unrecognised messages will
|
||||
be dispatched to the RPC connection."""
|
||||
|
||||
def __init__(self, i, datadir, extra_args_from_options, *, chain, rpchost, timewait, bitcoind, bitcoin_cli, mocktime, coverage_dir, cwd, extra_conf=None, extra_args=None, use_cli=False, start_perf=False, use_valgrind=False):
|
||||
def __init__(self, i, datadir, extra_args_from_options, *, chain, rpchost, timewait, timeout_factor, bitcoind, bitcoin_cli, mocktime, coverage_dir, cwd, extra_conf=None, extra_args=None, use_cli=False, start_perf=False, use_valgrind=False):
|
||||
"""
|
||||
Kwargs:
|
||||
start_perf (bool): If True, begin profiling the node with `perf` as soon as
|
||||
@ -130,11 +131,10 @@ class TestNode():
|
||||
self.perf_subprocesses = {}
|
||||
|
||||
self.p2ps = []
|
||||
self.timeout_factor = timeout_factor
|
||||
|
||||
def get_deterministic_priv_key(self):
|
||||
"""Return a deterministic priv key in base58, that only depends on the node's index"""
|
||||
AddressKeyPair = collections.namedtuple('AddressKeyPair', ['address', 'key'])
|
||||
PRIV_KEYS = [
|
||||
AddressKeyPair = collections.namedtuple('AddressKeyPair', ['address', 'key'])
|
||||
PRIV_KEYS = [
|
||||
# address , privkey
|
||||
AddressKeyPair('yYdShjQSptFKitYLksFEUSwHe4hnbar5rf', 'cMfbiEsnG5b8Gwm6vEgfWvZLuXZNC4zsN2y7Es3An9xHRWRjmwgR'),
|
||||
AddressKeyPair('yfTFJgvq65UZsb9RBbpdYAAzsJoCGXqH2w', 'cStuFACUD1N6JjKQxNLUQ443qJUtSzLitKKEkA8x6utxTPZTLUtA'),
|
||||
@ -145,8 +145,23 @@ class TestNode():
|
||||
AddressKeyPair('yfy21e12jn3A3uDicNehCq486o9fMwJKMc', 'cMuko9rLDbtxCFWuBSrFgBDRSMxsLWKpJKScRGNuWKbhuQsnsjKT'),
|
||||
AddressKeyPair('yURgENB3b2YRMWnbhKF7iGs3KoaVRVXsJr', 'cQhdjTMh57MaHCDk9FsWGPtftRMBUuhaYAtouWnetcewmBuSrLSM'),
|
||||
AddressKeyPair('yYC9AxBEUs3ZZxfcQvj2LUF5PVxxtqaEs7', 'cQFueiiP13mfytV3Svoe4o4Ux79fRJvwuSgHapXsnBwrHod57EeL'),
|
||||
]
|
||||
return PRIV_KEYS[self.index]
|
||||
AddressKeyPair('yVs9jXGyLWLLFbpESnoppk7F8DtXcuCCTf', 'cN55daf1HotwBAgAKWVgDcoppmUNDtQSfb7XLutTLeAgVc3u8hik'),
|
||||
AddressKeyPair('yV3eqNNshZJ4Pv6NCyYsbdJb1ERFFygFqf', 'cT7qK7g1wkYEMvKowd2ZrX1E5f6JQ7TM246UfqbCiyF7kZhorpX3'),
|
||||
AddressKeyPair('yfE8gZCiFW9Uqu21v3JGibr3WVSPQWmY8n', 'cPiRWE8KMjTRxH1MWkPerhfoHFn5iHPWVK5aPqjW8NxmdwenFinJ'),
|
||||
AddressKeyPair('yLLVXzya7GzmVkjQzsCG4iDpqYJyJFDSEV', 'cVLCocFyWxzyCwEknkWvDeWneTBsh9Jf3u4yiJCYjcy3gt8Jw1cM'),
|
||||
AddressKeyPair('yLNNR3HeJxgR669oRePksYmCqHuPUG79mF', 'cQawC3oUgoToGDJBw1Ub2PpDmf44kVtcaVaTcHyzXMRKGwdn9UYW'),
|
||||
AddressKeyPair('yLPKVwRTXME7Q3JfKAPJ4FHEaGdWgJuhpj', 'cVcFaWTbkCUZPFTHfDs8iHurPWns5QXc5rqcfkPMHUdmv17o8UYB'),
|
||||
AddressKeyPair('yLPUundzTpvjU8KYVyM4Zmnr4REf3FFvhZ', 'cRVeRmRaYuEYP9HbCZFsf1ifYYZ4KQD9rttRoTNb9wjPzhvRwqMb'),
|
||||
AddressKeyPair('yLRhHqau58AS1ALtnaowv1Pyztxi1Q6fXG', 'cNYFW52pJswYbfPR9fpiRpWHEQygg5tyMih2ASPsgMgPy9SUSSEV'),
|
||||
AddressKeyPair('yLRwHeMkXwYrkDzC4q12vej243AyTeWiPm', 'cRqfZ3dAp8BJUcGhSv7ueCXNGbki1bpcXEKk5dEJN344H52GuHQY'),
|
||||
AddressKeyPair('yLTMCXJhG1mpaWhbHcsr7zUt9wDWuQSPSk', 'cVWGbeCT5QcVGVTL5NuiLs9JfL8HFDb9PN5Gq2xudw6ZsDFeDy1V'),
|
||||
AddressKeyPair('yLU9vxiAWUdiKKxn6EazLDFq9WXrK2T7RP', 'cVCzrzfxMhUMxV34UhTmdmntAqHvosAuNo2KUZsiHZSKLm73g35o'),
|
||||
]
|
||||
|
||||
def get_deterministic_priv_key(self):
|
||||
"""Return a deterministic priv key in base58, that only depends on the node's index"""
|
||||
assert len(self.PRIV_KEYS) == MAX_NODES
|
||||
return self.PRIV_KEYS[self.index]
|
||||
|
||||
def _node_msg(self, msg: str) -> str:
|
||||
"""Return a modified msg that identifies this node by its index as a debugging aid."""
|
||||
@ -222,6 +237,24 @@ class TestNode():
|
||||
rpc = get_rpc_proxy(rpc_url(self.datadir, self.index, self.chain, self.rpchost), self.index, timeout=self.rpc_timeout, coveragedir=self.coverage_dir)
|
||||
rpc.getblockcount()
|
||||
# If the call to getblockcount() succeeds then the RPC connection is up
|
||||
wait_until(lambda: rpc.getmempoolinfo()['loaded'])
|
||||
# Wait for the node to finish reindex, block import, and
|
||||
# loading the mempool. Usually importing happens fast or
|
||||
# even "immediate" when the node is started. However, there
|
||||
# is no guarantee and sometimes ThreadImport might finish
|
||||
# later. This is going to cause intermittent test failures,
|
||||
# because generally the tests assume the node is fully
|
||||
# ready after being started.
|
||||
#
|
||||
# For example, the node will reject block messages from p2p
|
||||
# when it is still importing with the error "Unexpected
|
||||
# block message received"
|
||||
#
|
||||
# The wait is done here to make tests as robust as possible
|
||||
# and prevent racy tests and intermittent failures as much
|
||||
# as possible. Some tests might not need this, but the
|
||||
# overhead is trivial, and the added guarantees are worth
|
||||
# the minimal performance cost.
|
||||
self.log.debug("RPC successfully started")
|
||||
if self.use_cli:
|
||||
return
|
||||
@ -255,6 +288,9 @@ class TestNode():
|
||||
wallet_path = "wallet/{}".format(urllib.parse.quote(wallet_name))
|
||||
return self.rpc / wallet_path
|
||||
|
||||
def version_is_at_least(self, ver):
|
||||
return self.version is None or self.version >= ver
|
||||
|
||||
def stop_node(self, expected_stderr='', wait=0):
|
||||
"""Stop the node."""
|
||||
if not self.running:
|
||||
@ -302,13 +338,13 @@ class TestNode():
|
||||
return True
|
||||
|
||||
def wait_until_stopped(self, timeout=BITCOIND_PROC_WAIT_TIMEOUT):
|
||||
wait_until(self.is_node_stopped, timeout=timeout)
|
||||
wait_until(self.is_node_stopped, timeout=timeout, timeout_factor=self.timeout_factor)
|
||||
|
||||
@contextlib.contextmanager
|
||||
def assert_debug_log(self, expected_msgs, unexpected_msgs=None, timeout=2):
|
||||
if unexpected_msgs is None:
|
||||
unexpected_msgs = []
|
||||
time_end = time.time() + timeout
|
||||
time_end = time.time() + timeout * self.timeout_factor
|
||||
chain = get_chain_folder(self.datadir, self.chain)
|
||||
debug_log = os.path.join(self.datadir, chain, 'debug.log')
|
||||
with open(debug_log, encoding='utf-8') as dl:
|
||||
@ -466,10 +502,22 @@ class TestNode():
|
||||
if 'dstaddr' not in kwargs:
|
||||
kwargs['dstaddr'] = '127.0.0.1'
|
||||
|
||||
p2p_conn.peer_connect(**kwargs, net=self.chain)()
|
||||
p2p_conn.peer_connect(**kwargs, net=self.chain, timeout_factor=self.timeout_factor)()
|
||||
self.p2ps.append(p2p_conn)
|
||||
if wait_for_verack:
|
||||
# Wait for the node to send us the version and verack
|
||||
p2p_conn.wait_for_verack()
|
||||
# At this point we have sent our version message and received the version and verack, however the full node
|
||||
# has not yet received the verack from us (in reply to their version). So, the connection is not yet fully
|
||||
# established (fSuccessfullyConnected).
|
||||
#
|
||||
# This shouldn't lead to any issues when sending messages, since the verack will be in-flight before the
|
||||
# message we send. However, it might lead to races where we are expecting to receive a message. E.g. a
|
||||
# transaction that will be added to the mempool as soon as we return here.
|
||||
#
|
||||
# So syncing here is redundant when we only want to send a message, but the cost is low (a few milliseconds)
|
||||
# in comparison to the upside of making tests less fragile and unexpected intermittent errors less likely.
|
||||
p2p_conn.sync_with_ping()
|
||||
|
||||
return p2p_conn
|
||||
|
||||
@ -498,6 +546,7 @@ class TestNode():
|
||||
|
||||
del self.p2ps[:]
|
||||
|
||||
|
||||
class TestNodeCLIAttr:
|
||||
def __init__(self, cli, command):
|
||||
self.cli = cli
|
||||
@ -509,6 +558,7 @@ class TestNodeCLIAttr:
|
||||
def get_request(self, *args, **kwargs):
|
||||
return lambda: self(*args, **kwargs)
|
||||
|
||||
|
||||
def arg_to_cli(arg):
|
||||
if isinstance(arg, bool):
|
||||
return str(arg).lower()
|
||||
@ -517,6 +567,7 @@ def arg_to_cli(arg):
|
||||
else:
|
||||
return str(arg)
|
||||
|
||||
|
||||
class TestNodeCLI():
|
||||
"""Interface to dash-cli for an individual node"""
|
||||
|
||||
|
75
test/functional/test_framework/test_shell.py
Normal file
75
test/functional/test_framework/test_shell.py
Normal file
@ -0,0 +1,75 @@
|
||||
#!/usr/bin/env python3
|
||||
# Copyright (c) 2019 The Bitcoin Core developers
|
||||
# Distributed under the MIT software license, see the accompanying
|
||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
|
||||
from test_framework.test_framework import BitcoinTestFramework
|
||||
|
||||
class TestShell:
|
||||
"""Wrapper Class for BitcoinTestFramework.
|
||||
|
||||
The TestShell class extends the BitcoinTestFramework
|
||||
rpc & daemon process management functionality to external
|
||||
python environments.
|
||||
|
||||
It is a singleton class, which ensures that users only
|
||||
start a single TestShell at a time."""
|
||||
|
||||
class __TestShell(BitcoinTestFramework):
|
||||
def set_test_params(self):
|
||||
pass
|
||||
|
||||
def run_test(self):
|
||||
pass
|
||||
|
||||
def setup(self, **kwargs):
|
||||
if self.running:
|
||||
print("TestShell is already running!")
|
||||
return
|
||||
|
||||
# Num_nodes parameter must be set
|
||||
# by BitcoinTestFramework child class.
|
||||
self.num_nodes = kwargs.get('num_nodes', 1)
|
||||
kwargs.pop('num_nodes', None)
|
||||
|
||||
# User parameters override default values.
|
||||
for key, value in kwargs.items():
|
||||
if hasattr(self, key):
|
||||
setattr(self, key, value)
|
||||
elif hasattr(self.options, key):
|
||||
setattr(self.options, key, value)
|
||||
else:
|
||||
raise KeyError(key + " not a valid parameter key!")
|
||||
|
||||
super().setup()
|
||||
self.running = True
|
||||
|
||||
def shutdown(self):
|
||||
if not self.running:
|
||||
print("TestShell is not running!")
|
||||
else:
|
||||
super().shutdown()
|
||||
self.running = False
|
||||
|
||||
def reset(self):
|
||||
if self.running:
|
||||
print("Shutdown TestWrapper before resetting!")
|
||||
else:
|
||||
self.num_nodes = None
|
||||
super().__init__()
|
||||
|
||||
instance = None
|
||||
|
||||
def __new__(cls):
|
||||
# This implementation enforces singleton pattern, and will return the
|
||||
# previously initialized instance if available
|
||||
if not TestShell.instance:
|
||||
TestShell.instance = TestShell.__TestShell()
|
||||
TestShell.instance.running = False
|
||||
return TestShell.instance
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(self.instance, name)
|
||||
|
||||
def __setattr__(self, name, value):
|
||||
return setattr(self.instance, name, value)
|
@ -227,9 +227,10 @@ def str_to_b64str(string):
|
||||
def satoshi_round(amount):
|
||||
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
|
||||
|
||||
def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf'), sleep=0.05, lock=None, do_assert=True, allow_exception=False):
|
||||
def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf'), sleep=0.05, timeout_factor=1.0, lock=None, do_assert=True, allow_exception=False):
|
||||
if attempts == float('inf') and timeout == float('inf'):
|
||||
timeout = 60
|
||||
timeout = timeout * timeout_factor
|
||||
attempt = 0
|
||||
timeout *= Options.timeout_scale
|
||||
time_end = time.time() + timeout
|
||||
@ -292,7 +293,7 @@ def get_rpc_proxy(url, node_number, *, timeout=None, coveragedir=None):
|
||||
"""
|
||||
proxy_kwargs = {}
|
||||
if timeout is not None:
|
||||
proxy_kwargs['timeout'] = timeout
|
||||
proxy_kwargs['timeout'] = int(timeout)
|
||||
|
||||
proxy = AuthServiceProxy(url, **proxy_kwargs)
|
||||
proxy.url = url # store URL on proxy for info
|
||||
@ -473,43 +474,6 @@ def reconnect_isolated_node(node, node_num):
|
||||
node.setnetworkactive(True)
|
||||
connect_nodes(node, node_num)
|
||||
|
||||
def sync_blocks(rpc_connections, *, wait=1, timeout=60):
|
||||
"""
|
||||
Wait until everybody has the same tip.
|
||||
|
||||
sync_blocks needs to be called with an rpc_connections set that has least
|
||||
one node already synced to the latest, stable tip, otherwise there's a
|
||||
chance it might return before all nodes are stably synced.
|
||||
"""
|
||||
timeout *= Options.timeout_scale
|
||||
|
||||
stop_time = time.time() + timeout
|
||||
while time.time() <= stop_time:
|
||||
best_hash = [x.getbestblockhash() for x in rpc_connections]
|
||||
if best_hash.count(best_hash[0]) == len(rpc_connections):
|
||||
return
|
||||
time.sleep(wait)
|
||||
raise AssertionError("Block sync timed out:{}".format("".join("\n {!r}".format(b) for b in best_hash)))
|
||||
|
||||
def sync_mempools(rpc_connections, *, wait=1, timeout=60, flush_scheduler=True, wait_func=None):
|
||||
"""
|
||||
Wait until everybody has the same transactions in their memory
|
||||
pools
|
||||
"""
|
||||
timeout *= Options.timeout_scale
|
||||
stop_time = time.time() + timeout
|
||||
while time.time() <= stop_time:
|
||||
pool = [set(r.getrawmempool()) for r in rpc_connections]
|
||||
if pool.count(pool[0]) == len(rpc_connections):
|
||||
if flush_scheduler:
|
||||
for r in rpc_connections:
|
||||
r.syncwithvalidationinterfacequeue()
|
||||
return
|
||||
if wait_func is not None:
|
||||
wait_func()
|
||||
time.sleep(wait)
|
||||
raise AssertionError("Mempool sync timed out:{}".format("".join("\n {!r}".format(m) for m in pool)))
|
||||
|
||||
def force_finish_mnsync(node):
|
||||
"""
|
||||
Masternodes won't accept incoming connections while IsSynced is false.
|
||||
|
@ -223,7 +223,7 @@ class ToolWalletTest(BitcoinTestFramework):
|
||||
self.assert_tool_output('', '-wallet=salvage', 'salvage')
|
||||
|
||||
def run_test(self):
|
||||
self.wallet_path = os.path.join(self.nodes[0].datadir, 'regtest', 'wallets', 'wallet.dat')
|
||||
self.wallet_path = os.path.join(self.nodes[0].datadir, self.chain, 'wallets', 'wallet.dat')
|
||||
self.test_invalid_tool_commands_and_args()
|
||||
# Warning: The following tests are order-dependent.
|
||||
self.test_tool_wallet_info()
|
||||
|
@ -18,7 +18,6 @@ from test_framework.util import (
|
||||
assert_raises_rpc_error,
|
||||
connect_nodes,
|
||||
disconnect_nodes,
|
||||
wait_until,
|
||||
)
|
||||
|
||||
|
||||
@ -98,7 +97,7 @@ class AbandonConflictTest(BitcoinTestFramework):
|
||||
# TODO: redo with eviction
|
||||
self.stop_node(0)
|
||||
self.start_node(0, extra_args=["-minrelaytxfee=0.0001"])
|
||||
wait_until(lambda: self.nodes[0].getmempoolinfo()['loaded'])
|
||||
assert self.nodes[0].getmempoolinfo()['loaded']
|
||||
|
||||
# Verify txs no longer in either node's mempool
|
||||
assert_equal(len(self.nodes[0].getrawmempool()), 0)
|
||||
@ -134,7 +133,7 @@ class AbandonConflictTest(BitcoinTestFramework):
|
||||
# Verify that even with a low min relay fee, the tx is not reaccepted from wallet on startup once abandoned
|
||||
self.stop_node(0)
|
||||
self.start_node(0, extra_args=["-minrelaytxfee=0.00001"])
|
||||
wait_until(lambda: self.nodes[0].getmempoolinfo()['loaded'])
|
||||
assert self.nodes[0].getmempoolinfo()['loaded']
|
||||
|
||||
assert_equal(len(self.nodes[0].getrawmempool()), 0)
|
||||
assert_equal(self.nodes[0].getbalance(), balance)
|
||||
@ -156,7 +155,7 @@ class AbandonConflictTest(BitcoinTestFramework):
|
||||
# Remove using high relay fee again
|
||||
self.stop_node(0)
|
||||
self.start_node(0, extra_args=["-minrelaytxfee=0.0001"])
|
||||
wait_until(lambda: self.nodes[0].getmempoolinfo()['loaded'])
|
||||
assert self.nodes[0].getmempoolinfo()['loaded']
|
||||
assert_equal(len(self.nodes[0].getrawmempool()), 0)
|
||||
newbalance = self.nodes[0].getbalance()
|
||||
assert_equal(newbalance, balance - Decimal("24.9996"))
|
||||
|
@ -12,7 +12,6 @@ from test_framework.util import (
|
||||
assert_equal,
|
||||
assert_raises_rpc_error,
|
||||
connect_nodes,
|
||||
sync_blocks,
|
||||
)
|
||||
|
||||
|
||||
@ -203,7 +202,7 @@ class WalletTest(BitcoinTestFramework):
|
||||
self.restart_node(1, ['-persistmempool=0', '-checklevel=0'])
|
||||
connect_nodes(self.nodes[0], 1)
|
||||
connect_nodes(self.nodes[1], 0)
|
||||
sync_blocks(self.nodes)
|
||||
self.sync_blocks()
|
||||
self.nodes[1].sendrawtransaction(tx_orig)
|
||||
self.nodes[1].generatetoaddress(1, ADDRESS_WATCHONLY)
|
||||
self.sync_all()
|
||||
|
@ -4,7 +4,6 @@
|
||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
"""Test the wallet."""
|
||||
from decimal import Decimal
|
||||
import time
|
||||
|
||||
from test_framework.test_framework import BitcoinTestFramework
|
||||
from test_framework.util import (
|
||||
@ -455,12 +454,8 @@ class WalletTest(BitcoinTestFramework):
|
||||
self.stop_node(0)
|
||||
self.start_node(0, extra_args=["-walletrejectlongchains", "-limitancestorcount=" + str(2 * chainlimit)])
|
||||
|
||||
# wait for loadmempool
|
||||
timeout = 10
|
||||
while (timeout > 0 and len(self.nodes[0].getrawmempool()) < chainlimit * 2):
|
||||
time.sleep(0.5)
|
||||
timeout -= 0.5
|
||||
assert_equal(len(self.nodes[0].getrawmempool()), chainlimit * 2)
|
||||
# wait until the wallet has submitted all transactions to the mempool
|
||||
wait_until(lambda: len(self.nodes[0].getrawmempool()) == chainlimit * 2)
|
||||
|
||||
# Prevent potential race condition when calling wallet RPCs right after restart
|
||||
self.nodes[0].syncwithvalidationinterfacequeue()
|
||||
|
@ -10,7 +10,6 @@ from test_framework.util import (
|
||||
assert_array_result,
|
||||
assert_equal,
|
||||
assert_raises_rpc_error,
|
||||
sync_blocks,
|
||||
)
|
||||
|
||||
|
||||
@ -25,7 +24,7 @@ class ReceivedByTest(BitcoinTestFramework):
|
||||
def run_test(self):
|
||||
# Generate block to get out of IBD
|
||||
self.nodes[0].generate(1)
|
||||
sync_blocks(self.nodes)
|
||||
self.sync_blocks()
|
||||
|
||||
# save the number of coinbase reward addresses so far
|
||||
num_cb_reward_addresses = len(self.nodes[1].listreceivedbyaddress(minconf=0, include_empty=True, include_watchonly=True))
|
||||
|
@ -11,7 +11,6 @@ from test_framework.util import assert_array_result, assert_equal
|
||||
class ListTransactionsTest(BitcoinTestFramework):
|
||||
def set_test_params(self):
|
||||
self.num_nodes = 2
|
||||
self.set_cache_mocktime()
|
||||
|
||||
def skip_test_if_missing_module(self):
|
||||
self.skip_if_no_wallet()
|
||||
|
@ -90,7 +90,7 @@ class ReorgsRestoreTest(BitcoinTestFramework):
|
||||
# Node0 wallet file is loaded on longest sync'ed node1
|
||||
self.stop_node(1)
|
||||
self.nodes[0].backupwallet(os.path.join(self.nodes[0].datadir, 'wallet.bak'))
|
||||
shutil.copyfile(os.path.join(self.nodes[0].datadir, 'wallet.bak'), os.path.join(self.nodes[1].datadir, 'regtest', 'wallet.dat'))
|
||||
shutil.copyfile(os.path.join(self.nodes[0].datadir, 'wallet.bak'), os.path.join(self.nodes[1].datadir, self.chain, 'wallet.dat'))
|
||||
self.start_node(1)
|
||||
tx_after_reorg = self.nodes[1].gettransaction(txid)
|
||||
# Check that normal confirmed tx is confirmed again but with different blockhash
|
||||
|
@ -34,6 +34,14 @@ class TxnMallTest(BitcoinTestFramework):
|
||||
def run_test(self):
|
||||
# All nodes should start with 12,500 DASH:
|
||||
starting_balance = 12500
|
||||
|
||||
# All nodes should be out of IBD.
|
||||
# If the nodes are not all out of IBD, that can interfere with
|
||||
# blockchain sync later in the test when nodes are connected, due to
|
||||
# timing issues.
|
||||
for n in self.nodes:
|
||||
assert n.getblockchaininfo()["initialblockdownload"] == False
|
||||
|
||||
for i in range(4):
|
||||
assert_equal(self.nodes[i].getbalance(), starting_balance)
|
||||
self.nodes[i].getnewaddress("") # bug workaround, coins generated assigned to first getnewaddress!
|
||||
|
Loading…
Reference in New Issue
Block a user