mirror of
https://github.com/dashpay/dash.git
synced 2024-12-24 11:32:46 +01:00
Merge #19752: test: Update wait_until usage in tests not to use the one from utils
d841301010914203fb5ef02627c76fad99cb11f1 test: Add docstring to wait_until() in util.py to warn about its usage (Seleme Topuz) 1343c86c7cc1fc896696b3ed87c12039e4ef3a0c test: Update wait_until usage in tests not to use the one from utils (Seleme Topuz) Pull request description: Replace global (from [test_framework/util.py](https://github.com/bitcoin/bitcoin/blob/master/test/functional/test_framework/util.py#L228)) `wait_until()` usages with the ones provided by `BitcoinTestFramework` and `P2PInterface` classes. The motivation behind this change is that the `util.wait_until()` expects a timeout, timeout_factor and lock and it is not aware of the context of the test framework. `BitcoinTestFramework` offers a `wait_until()` which has an understandable amount of default `timeout` and a shared `timeout_factor`. Moreover, on top of these, `mininode.wait_until()` also has a shared lock. closes #19080 ACKs for top commit: MarcoFalke: ACK d841301010914203fb5ef02627c76fad99cb11f1 🦆 kallewoof: utACK d841301010914203fb5ef02627c76fad99cb11f1 Tree-SHA512: 81604f4cfa87fed98071a80e4afe940b3897fe65cf680a69619a93e97d45f25b313c12227de7040e19517fa9c003291b232f1b40b2567aba0148f22c23c47a88
This commit is contained in:
parent
4171afe54e
commit
4c8e77a48d
@ -25,7 +25,6 @@ from test_framework.p2p import (
|
||||
from test_framework.test_framework import BitcoinTestFramework
|
||||
from test_framework.util import (
|
||||
assert_equal,
|
||||
wait_until,
|
||||
)
|
||||
|
||||
# P2PInterface is a class containing callbacks to be executed when a P2P
|
||||
@ -205,7 +204,7 @@ class ExampleTest(BitcoinTestFramework):
|
||||
|
||||
# wait_until() will loop until a predicate condition is met. Use it to test properties of the
|
||||
# P2PInterface objects.
|
||||
wait_until(lambda: sorted(blocks) == sorted(list(self.nodes[2].p2p.block_receive_map.keys())), timeout=5, lock=p2p_lock)
|
||||
self.nodes[2].p2p.wait_until(lambda: sorted(blocks) == sorted(list(self.nodes[2].p2p.block_receive_map.keys())), timeout=5)
|
||||
|
||||
self.log.info("Check that each block was received only once")
|
||||
# The network thread uses a global lock on data access to the P2PConnection objects when sending and receiving
|
||||
|
@ -11,7 +11,7 @@
|
||||
"""
|
||||
|
||||
from test_framework.test_framework import BitcoinTestFramework
|
||||
from test_framework.util import wait_until, get_datadir_path
|
||||
from test_framework.util import get_datadir_path
|
||||
import os
|
||||
|
||||
|
||||
@ -41,7 +41,7 @@ class AbortNodeTest(BitcoinTestFramework):
|
||||
|
||||
# Check that node0 aborted
|
||||
self.log.info("Waiting for crash")
|
||||
wait_until(lambda: self.nodes[0].is_node_stopped(), timeout=200)
|
||||
self.nodes[0].wait_until_stopped(timeout=200)
|
||||
self.log.info("Node crashed - now verifying restart fails")
|
||||
self.nodes[0].assert_start_raises_init_error()
|
||||
|
||||
|
@ -48,7 +48,7 @@ from test_framework.messages import (
|
||||
from test_framework.p2p import P2PInterface
|
||||
from test_framework.script import (CScript, OP_TRUE)
|
||||
from test_framework.test_framework import BitcoinTestFramework
|
||||
from test_framework.util import (assert_equal, set_node_times, wait_until)
|
||||
from test_framework.util import (assert_equal, set_node_times)
|
||||
|
||||
|
||||
class BaseNode(P2PInterface):
|
||||
@ -172,7 +172,7 @@ class AssumeValidTest(BitcoinTestFramework):
|
||||
|
||||
# Send blocks to node0. Block 102 will be rejected.
|
||||
self.send_blocks_until_disconnected(p2p0)
|
||||
wait_until(lambda: self.nodes[0].getblockcount() >= COINBASE_MATURITY + 1)
|
||||
self.wait_until(lambda: self.nodes[0].getblockcount() >= COINBASE_MATURITY + 1)
|
||||
assert_equal(self.nodes[0].getblockcount(), COINBASE_MATURITY + 1)
|
||||
|
||||
# Send 200 blocks to node1. All blocks, including block 102, will be accepted.
|
||||
@ -184,7 +184,7 @@ class AssumeValidTest(BitcoinTestFramework):
|
||||
|
||||
# Send blocks to node2. Block 102 will be rejected.
|
||||
self.send_blocks_until_disconnected(p2p2)
|
||||
wait_until(lambda: self.nodes[2].getblockcount() >= COINBASE_MATURITY + 1)
|
||||
self.wait_until(lambda: self.nodes[2].getblockcount() >= COINBASE_MATURITY + 1)
|
||||
assert_equal(self.nodes[2].getblockcount(), COINBASE_MATURITY + 1)
|
||||
|
||||
|
||||
|
@ -8,7 +8,6 @@ from test_framework.util import (
|
||||
assert_equal,
|
||||
assert_greater_than,
|
||||
assert_raises_rpc_error,
|
||||
wait_until
|
||||
)
|
||||
|
||||
|
||||
@ -19,7 +18,7 @@ class FeatureBlockfilterindexPruneTest(BitcoinTestFramework):
|
||||
|
||||
def sync_index(self, height):
|
||||
expected = {'basic block filter index': {'synced': True, 'best_block_height': height}}
|
||||
wait_until(lambda: self.nodes[0].getindexinfo() == expected)
|
||||
self.wait_until(lambda: self.nodes[0].getindexinfo() == expected)
|
||||
|
||||
def run_test(self):
|
||||
self.log.info("check if we can access a blockfilter when pruning is enabled but no blocks are actually pruned")
|
||||
|
@ -16,7 +16,7 @@ from test_framework.messages import CBlock, CBlockHeader, CCbTx, CMerkleBlock, F
|
||||
QuorumId, ser_uint256
|
||||
from test_framework.test_framework import DashTestFramework
|
||||
from test_framework.util import (
|
||||
assert_equal, wait_until
|
||||
assert_equal
|
||||
)
|
||||
|
||||
|
||||
@ -31,7 +31,7 @@ class TestP2PConn(P2PInterface):
|
||||
def wait_for_mnlistdiff(self, timeout=30):
|
||||
def received_mnlistdiff():
|
||||
return self.last_mnlistdiff is not None
|
||||
return wait_until(received_mnlistdiff, timeout=timeout)
|
||||
return self.wait_until(received_mnlistdiff, timeout=timeout)
|
||||
|
||||
def getmnlistdiff(self, base_block_hash, block_hash):
|
||||
msg = msg_getmnlistd(base_block_hash, block_hash)
|
||||
|
@ -15,9 +15,10 @@ from io import BytesIO
|
||||
from test_framework.messages import CBlock, CBlockHeader, CCbTx, CMerkleBlock, FromHex, hash256, msg_getmnlistd, QuorumId, ser_uint256
|
||||
from test_framework.p2p import P2PInterface
|
||||
from test_framework.test_framework import DashTestFramework
|
||||
from test_framework.util import assert_equal, wait_until
|
||||
from test_framework.util import assert_equal
|
||||
|
||||
|
||||
# TODO: this helper used in many tests, find a new home for it
|
||||
class TestP2PConn(P2PInterface):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
@ -29,7 +30,7 @@ class TestP2PConn(P2PInterface):
|
||||
def wait_for_mnlistdiff(self, timeout=30):
|
||||
def received_mnlistdiff():
|
||||
return self.last_mnlistdiff is not None
|
||||
return wait_until(received_mnlistdiff, timeout=timeout)
|
||||
self.wait_until(received_mnlistdiff, timeout=timeout)
|
||||
|
||||
def getmnlistdiff(self, baseBlockHash, blockHash):
|
||||
msg = msg_getmnlistd(baseBlockHash, blockHash)
|
||||
|
@ -214,9 +214,9 @@ class DashGovernanceTest (DashTestFramework):
|
||||
isolated.generate(1)
|
||||
self.bump_mocktime(1)
|
||||
# The isolated "winner" should submit new trigger and vote for it
|
||||
wait_until(lambda: len(isolated.gobject("list", "valid", "triggers")) == 1, timeout=5)
|
||||
self.wait_until(lambda: len(isolated.gobject("list", "valid", "triggers")) == 1, timeout=5)
|
||||
isolated_trigger_hash = list(isolated.gobject("list", "valid", "triggers").keys())[0]
|
||||
wait_until(lambda: list(isolated.gobject("list", "valid", "triggers").values())[0]['YesCount'] == 1, timeout=5)
|
||||
self.wait_until(lambda: list(isolated.gobject("list", "valid", "triggers").values())[0]['YesCount'] == 1, timeout=5)
|
||||
more_votes = wait_until(lambda: list(isolated.gobject("list", "valid", "triggers").values())[0]['YesCount'] > 1, timeout=5, do_assert=False)
|
||||
assert_equal(more_votes, False)
|
||||
|
||||
@ -236,9 +236,9 @@ class DashGovernanceTest (DashTestFramework):
|
||||
self.bump_mocktime(1)
|
||||
|
||||
# There is now new "winner" who should submit new trigger and vote for it
|
||||
wait_until(lambda: len(self.nodes[0].gobject("list", "valid", "triggers")) == 1, timeout=5)
|
||||
self.wait_until(lambda: len(self.nodes[0].gobject("list", "valid", "triggers")) == 1, timeout=5)
|
||||
winning_trigger_hash = list(self.nodes[0].gobject("list", "valid", "triggers").keys())[0]
|
||||
wait_until(lambda: list(self.nodes[0].gobject("list", "valid", "triggers").values())[0]['YesCount'] == 1, timeout=5)
|
||||
self.wait_until(lambda: list(self.nodes[0].gobject("list", "valid", "triggers").values())[0]['YesCount'] == 1, timeout=5)
|
||||
more_votes = wait_until(lambda: list(self.nodes[0].gobject("list", "valid", "triggers").values())[0]['YesCount'] > 1, timeout=5, do_assert=False)
|
||||
assert_equal(more_votes, False)
|
||||
|
||||
@ -254,7 +254,7 @@ class DashGovernanceTest (DashTestFramework):
|
||||
self.bump_mocktime(1)
|
||||
|
||||
# Every non-isolated MN should vote for the same trigger now, no new triggers should be created
|
||||
wait_until(lambda: list(self.nodes[0].gobject("list", "valid", "triggers").values())[0]['YesCount'] == self.mn_count - 1, timeout=5)
|
||||
self.wait_until(lambda: list(self.nodes[0].gobject("list", "valid", "triggers").values())[0]['YesCount'] == self.mn_count - 1, timeout=5)
|
||||
more_triggers = wait_until(lambda: len(self.nodes[0].gobject("list", "valid", "triggers")) > 1, timeout=5, do_assert=False)
|
||||
assert_equal(more_triggers, False)
|
||||
|
||||
@ -272,11 +272,11 @@ class DashGovernanceTest (DashTestFramework):
|
||||
node.mnsync("reset")
|
||||
# fast-forward to governance sync
|
||||
node.mnsync("next")
|
||||
wait_until(lambda: sync_gov(node))
|
||||
self.wait_until(lambda: sync_gov(node))
|
||||
|
||||
# Should see two triggers now
|
||||
wait_until(lambda: len(isolated.gobject("list", "valid", "triggers")) == 2, timeout=5)
|
||||
wait_until(lambda: len(self.nodes[0].gobject("list", "valid", "triggers")) == 2, timeout=5)
|
||||
self.wait_until(lambda: len(isolated.gobject("list", "valid", "triggers")) == 2, timeout=5)
|
||||
self.wait_until(lambda: len(self.nodes[0].gobject("list", "valid", "triggers")) == 2, timeout=5)
|
||||
more_triggers = wait_until(lambda: len(self.nodes[0].gobject("list", "valid", "triggers")) > 2, timeout=5, do_assert=False)
|
||||
assert_equal(more_triggers, False)
|
||||
|
||||
@ -286,8 +286,8 @@ class DashGovernanceTest (DashTestFramework):
|
||||
self.sync_blocks()
|
||||
|
||||
# Should see NO votes on both triggers now
|
||||
wait_until(lambda: self.nodes[0].gobject("list", "valid", "triggers")[winning_trigger_hash]['NoCount'] == 1, timeout=5)
|
||||
wait_until(lambda: self.nodes[0].gobject("list", "valid", "triggers")[isolated_trigger_hash]['NoCount'] == self.mn_count - 1, timeout=5)
|
||||
self.wait_until(lambda: self.nodes[0].gobject("list", "valid", "triggers")[winning_trigger_hash]['NoCount'] == 1, timeout=5)
|
||||
self.wait_until(lambda: self.nodes[0].gobject("list", "valid", "triggers")[isolated_trigger_hash]['NoCount'] == self.mn_count - 1, timeout=5)
|
||||
|
||||
block_count = self.nodes[0].getblockcount()
|
||||
n = sb_cycle - block_count % sb_cycle
|
||||
|
@ -13,7 +13,7 @@ Checks intra quorum connections
|
||||
import time
|
||||
|
||||
from test_framework.test_framework import DashTestFramework
|
||||
from test_framework.util import assert_greater_than_or_equal, wait_until
|
||||
from test_framework.util import assert_greater_than_or_equal
|
||||
|
||||
class LLMQConnections(DashTestFramework):
|
||||
def set_test_params(self):
|
||||
@ -53,17 +53,17 @@ class LLMQConnections(DashTestFramework):
|
||||
|
||||
self.log.info("checking that all MNs got probed")
|
||||
for mn in self.get_quorum_masternodes(q):
|
||||
wait_until(lambda: self.get_mn_probe_count(mn.node, q, False) == 4)
|
||||
self.wait_until(lambda: self.get_mn_probe_count(mn.node, q, False) == 4)
|
||||
|
||||
self.log.info("checking that probes age")
|
||||
self.bump_mocktime(self.MAX_AGE)
|
||||
for mn in self.get_quorum_masternodes(q):
|
||||
wait_until(lambda: self.get_mn_probe_count(mn.node, q, False) == 0)
|
||||
self.wait_until(lambda: self.get_mn_probe_count(mn.node, q, False) == 0)
|
||||
|
||||
self.log.info("mine a new quorum and re-check probes")
|
||||
q = self.mine_quorum()
|
||||
for mn in self.get_quorum_masternodes(q):
|
||||
wait_until(lambda: self.get_mn_probe_count(mn.node, q, True) == 4)
|
||||
self.wait_until(lambda: self.get_mn_probe_count(mn.node, q, True) == 4)
|
||||
|
||||
self.log.info("Activating SPORK_21_QUORUM_ALL_CONNECTED")
|
||||
self.nodes[0].sporkupdate("SPORK_21_QUORUM_ALL_CONNECTED", 0)
|
||||
@ -121,7 +121,7 @@ class LLMQConnections(DashTestFramework):
|
||||
for mn in self.mninfo:
|
||||
mn.node.setnetworkactive(False)
|
||||
for mn in self.mninfo:
|
||||
wait_until(lambda: len(mn.node.getpeerinfo()) == 0)
|
||||
self.wait_until(lambda: len(mn.node.getpeerinfo()) == 0)
|
||||
for mn in self.mninfo:
|
||||
mn.node.setnetworkactive(True)
|
||||
self.bump_mocktime(60)
|
||||
@ -138,7 +138,7 @@ class LLMQConnections(DashTestFramework):
|
||||
# wait for ping/pong so that we can be sure that spork propagation works
|
||||
time.sleep(1) # needed to make sure we don't check before the ping is actually sent (fPingQueued might be true but SendMessages still not called)
|
||||
for i in range(1, len(self.nodes)):
|
||||
wait_until(lambda: all('pingwait' not in peer for peer in self.nodes[i].getpeerinfo()))
|
||||
self.wait_until(lambda: all('pingwait' not in peer for peer in self.nodes[i].getpeerinfo()))
|
||||
|
||||
def get_mn_connection_count(self, node):
|
||||
peers = node.getpeerinfo()
|
||||
|
@ -6,7 +6,7 @@
|
||||
import time
|
||||
from test_framework.p2p import logger
|
||||
from test_framework.test_framework import DashTestFramework
|
||||
from test_framework.util import force_finish_mnsync, wait_until
|
||||
from test_framework.util import force_finish_mnsync
|
||||
|
||||
'''
|
||||
feature_llmq_data_recovery.py
|
||||
@ -39,7 +39,7 @@ class QuorumDataRecoveryTest(DashTestFramework):
|
||||
args.append('-reindex')
|
||||
bb_hash = mn.node.getbestblockhash()
|
||||
self.restart_node(mn.node.index, args)
|
||||
wait_until(lambda: mn.node.getbestblockhash() == bb_hash)
|
||||
self.wait_until(lambda: mn.node.getbestblockhash() == bb_hash)
|
||||
else:
|
||||
self.restart_node(mn.node.index, args)
|
||||
force_finish_mnsync(mn.node)
|
||||
|
@ -17,7 +17,7 @@ from test_framework.messages import CBlock, CBlockHeader, CCbTx, CMerkleBlock, F
|
||||
QuorumId, ser_uint256
|
||||
from test_framework.test_framework import DashTestFramework
|
||||
from test_framework.util import (
|
||||
assert_equal, p2p_port, wait_until
|
||||
assert_equal, p2p_port
|
||||
)
|
||||
|
||||
|
||||
@ -35,7 +35,7 @@ class TestP2PConn(P2PInterface):
|
||||
def wait_for_mnlistdiff(self, timeout=30):
|
||||
def received_mnlistdiff():
|
||||
return self.last_mnlistdiff is not None
|
||||
return wait_until(received_mnlistdiff, timeout=timeout)
|
||||
return self.wait_until(received_mnlistdiff, timeout=timeout)
|
||||
|
||||
def getmnlistdiff(self, baseBlockHash, blockHash):
|
||||
msg = msg_getmnlistd(baseBlockHash, blockHash)
|
||||
|
@ -41,7 +41,7 @@ class TestP2PConn(P2PInterface):
|
||||
def wait_for_mnlistdiff(self, timeout=30):
|
||||
def received_mnlistdiff():
|
||||
return self.last_mnlistdiff is not None
|
||||
return wait_until(received_mnlistdiff, timeout=timeout)
|
||||
return self.wait_until(received_mnlistdiff, timeout=timeout)
|
||||
|
||||
def getmnlistdiff(self, baseBlockHash, blockHash):
|
||||
msg = msg_getmnlistd(baseBlockHash, blockHash)
|
||||
|
@ -52,7 +52,7 @@ class LLMQSigningTest(DashTestFramework):
|
||||
return True
|
||||
|
||||
def wait_for_sigs(hasrecsigs, isconflicting1, isconflicting2, timeout):
|
||||
wait_until(lambda: check_sigs(hasrecsigs, isconflicting1, isconflicting2), timeout = timeout)
|
||||
self.wait_until(lambda: check_sigs(hasrecsigs, isconflicting1, isconflicting2), timeout = timeout)
|
||||
|
||||
def assert_sigs_nochange(hasrecsigs, isconflicting1, isconflicting2, timeout):
|
||||
assert not wait_until(lambda: not check_sigs(hasrecsigs, isconflicting1, isconflicting2), timeout = timeout, do_assert = False)
|
||||
@ -178,7 +178,7 @@ class LLMQSigningTest(DashTestFramework):
|
||||
q = self.nodes[0].quorum('selectquorum', 104, id)
|
||||
mn = self.get_mninfo(q['recoveryMembers'][0])
|
||||
mn.node.setnetworkactive(False)
|
||||
wait_until(lambda: mn.node.getconnectioncount() == 0)
|
||||
self.wait_until(lambda: mn.node.getconnectioncount() == 0)
|
||||
for i in range(4):
|
||||
self.mninfo[i].node.quorum("sign", 104, id, msgHash)
|
||||
assert_sigs_nochange(False, False, False, 3)
|
||||
@ -190,7 +190,7 @@ class LLMQSigningTest(DashTestFramework):
|
||||
self.bump_mocktime(1) # need this to bypass quorum connection retry timeout
|
||||
wait_until(lambda: mn.node.getconnectioncount() == self.llmq_size, timeout=10, sleep=2)
|
||||
mn.node.ping()
|
||||
wait_until(lambda: all('pingwait' not in peer for peer in mn.node.getpeerinfo()))
|
||||
self.wait_until(lambda: all('pingwait' not in peer for peer in mn.node.getpeerinfo()))
|
||||
# Let 2 seconds pass so that the next node is used for recovery, which should succeed
|
||||
self.bump_mocktime(2)
|
||||
wait_for_sigs(True, False, True, 2)
|
||||
|
@ -13,7 +13,7 @@ Checks simple PoSe system based on LLMQ commitments
|
||||
import time
|
||||
|
||||
from test_framework.test_framework import DashTestFramework
|
||||
from test_framework.util import assert_equal, force_finish_mnsync, p2p_port, wait_until
|
||||
from test_framework.util import assert_equal, force_finish_mnsync, p2p_port
|
||||
|
||||
|
||||
class LLMQSimplePoSeTest(DashTestFramework):
|
||||
@ -65,7 +65,7 @@ class LLMQSimplePoSeTest(DashTestFramework):
|
||||
|
||||
def isolate_mn(self, mn):
|
||||
mn.node.setnetworkactive(False)
|
||||
wait_until(lambda: mn.node.getconnectioncount() == 0)
|
||||
self.wait_until(lambda: mn.node.getconnectioncount() == 0)
|
||||
return True, True
|
||||
|
||||
def close_mn_port(self, mn):
|
||||
@ -205,7 +205,7 @@ class LLMQSimplePoSeTest(DashTestFramework):
|
||||
# Isolate and re-connect all MNs (otherwise there might be open connections with no MNAUTH for MNs which were banned before)
|
||||
for mn in self.mninfo:
|
||||
mn.node.setnetworkactive(False)
|
||||
wait_until(lambda: mn.node.getconnectioncount() == 0)
|
||||
self.wait_until(lambda: mn.node.getconnectioncount() == 0)
|
||||
mn.node.setnetworkactive(True)
|
||||
force_finish_mnsync(mn.node)
|
||||
self.connect_nodes(mn.node.index, 0)
|
||||
|
@ -11,7 +11,6 @@ from test_framework.test_framework import DashTestFramework
|
||||
from test_framework.util import (
|
||||
assert_equal,
|
||||
force_finish_mnsync,
|
||||
wait_until,
|
||||
)
|
||||
|
||||
# Linux allow all characters other than \x00
|
||||
@ -70,7 +69,7 @@ class NotificationsTest(DashTestFramework):
|
||||
blocks = self.nodes[1].generatetoaddress(block_count, self.nodes[1].getnewaddress() if self.is_wallet_compiled() else ADDRESS_BCRT1_UNSPENDABLE)
|
||||
|
||||
# wait at most 10 seconds for expected number of files before reading the content
|
||||
wait_until(lambda: len(os.listdir(self.blocknotify_dir)) == block_count, timeout=10)
|
||||
self.wait_until(lambda: len(os.listdir(self.blocknotify_dir)) == block_count, timeout=10)
|
||||
|
||||
# directory content should equal the generated blocks hashes
|
||||
assert_equal(sorted(blocks), sorted(os.listdir(self.blocknotify_dir)))
|
||||
@ -78,7 +77,7 @@ class NotificationsTest(DashTestFramework):
|
||||
if self.is_wallet_compiled():
|
||||
self.log.info("test -walletnotify")
|
||||
# wait at most 10 seconds for expected number of files before reading the content
|
||||
wait_until(lambda: len(os.listdir(self.walletnotify_dir)) == block_count, timeout=10)
|
||||
self.wait_until(lambda: len(os.listdir(self.walletnotify_dir)) == block_count, timeout=10)
|
||||
|
||||
# directory content should equal the generated transaction hashes
|
||||
txids_rpc = list(map(lambda t: notify_outputname(self.wallet, t['txid']), self.nodes[1].listtransactions("*", block_count)))
|
||||
@ -94,7 +93,7 @@ class NotificationsTest(DashTestFramework):
|
||||
force_finish_mnsync(self.nodes[1])
|
||||
self.connect_nodes(0, 1)
|
||||
|
||||
wait_until(lambda: len(os.listdir(self.walletnotify_dir)) == block_count, timeout=10)
|
||||
self.wait_until(lambda: len(os.listdir(self.walletnotify_dir)) == block_count, timeout=10)
|
||||
|
||||
# directory content should equal the generated transaction hashes
|
||||
txids_rpc = list(map(lambda t: notify_outputname(self.wallet, t['txid']), self.nodes[1].listtransactions("*", block_count)))
|
||||
@ -137,7 +136,7 @@ class NotificationsTest(DashTestFramework):
|
||||
self.wait_for_instantlock(txid, self.nodes[1])
|
||||
|
||||
# wait at most 10 seconds for expected number of files before reading the content
|
||||
wait_until(lambda: len(os.listdir(self.instantsendnotify_dir)) == tx_count, timeout=10)
|
||||
self.wait_until(lambda: len(os.listdir(self.instantsendnotify_dir)) == tx_count, timeout=10)
|
||||
|
||||
# directory content should equal the generated transaction hashes
|
||||
txids_rpc = list(map(lambda t: notify_outputname(self.wallet, t['txid']), self.nodes[1].listtransactions("*", tx_count)))
|
||||
|
@ -14,7 +14,11 @@ from test_framework.blocktools import create_coinbase
|
||||
from test_framework.messages import CBlock, ToHex
|
||||
from test_framework.script import CScript, OP_RETURN, OP_NOP
|
||||
from test_framework.test_framework import BitcoinTestFramework
|
||||
from test_framework.util import assert_equal, assert_greater_than, assert_raises_rpc_error, wait_until
|
||||
from test_framework.util import (
|
||||
assert_equal,
|
||||
assert_greater_than,
|
||||
assert_raises_rpc_error,
|
||||
)
|
||||
|
||||
# Rescans start at the earliest block up to 2 hours before a key timestamp, so
|
||||
# the manual prune RPC avoids pruning blocks in the same window to be
|
||||
@ -156,7 +160,7 @@ class PruneTest(BitcoinTestFramework):
|
||||
mine_large_blocks(self.nodes[0], 25)
|
||||
|
||||
# Wait for blk00000.dat to be pruned
|
||||
wait_until(lambda: not os.path.isfile(os.path.join(self.prunedir, "blk00000.dat")), timeout=30)
|
||||
self.wait_until(lambda: not os.path.isfile(os.path.join(self.prunedir, "blk00000.dat")), timeout=30)
|
||||
|
||||
self.log.info("Success")
|
||||
usage = calc_usage(self.prunedir)
|
||||
@ -272,7 +276,7 @@ class PruneTest(BitcoinTestFramework):
|
||||
|
||||
self.log.info("Verify node 2 reorged back to the main chain, some blocks of which it had to redownload")
|
||||
# Wait for Node 2 to reorg to proper height
|
||||
wait_until(lambda: self.nodes[2].getblockcount() >= goalbestheight, timeout=900)
|
||||
self.wait_until(lambda: self.nodes[2].getblockcount() >= goalbestheight, timeout=900)
|
||||
assert_equal(self.nodes[2].getbestblockhash(), goalbesthash)
|
||||
# Verify we can now have the data for a block previously pruned
|
||||
assert_equal(self.nodes[2].getblock(self.forkhash)["height"], self.forkheight)
|
||||
|
@ -5,7 +5,7 @@
|
||||
"""Test dashd shutdown."""
|
||||
|
||||
from test_framework.test_framework import BitcoinTestFramework
|
||||
from test_framework.util import assert_equal, get_rpc_proxy, wait_until
|
||||
from test_framework.util import assert_equal, get_rpc_proxy
|
||||
from threading import Thread
|
||||
|
||||
def test_long_call(node):
|
||||
@ -25,7 +25,7 @@ class ShutdownTest(BitcoinTestFramework):
|
||||
node.getblockcount()
|
||||
Thread(target=test_long_call, args=(node,)).start()
|
||||
# Wait until the server is executing the above `waitfornewblock`.
|
||||
wait_until(lambda: len(self.nodes[0].getrpcinfo()['active_commands']) == 2)
|
||||
self.wait_until(lambda: len(self.nodes[0].getrpcinfo()['active_commands']) == 2)
|
||||
# Wait 1 second after requesting shutdown but not before the `stop` call
|
||||
# finishes. This is to ensure event loop waits for current connections
|
||||
# to close.
|
||||
|
@ -14,7 +14,6 @@ from test_framework.blocktools import create_block, create_coinbase
|
||||
from test_framework.messages import msg_block
|
||||
from test_framework.p2p import p2p_lock, P2PInterface
|
||||
from test_framework.test_framework import BitcoinTestFramework
|
||||
from test_framework.util import wait_until
|
||||
|
||||
VB_PERIOD = 144 # versionbits period length for regtest
|
||||
VB_THRESHOLD = 108 # versionbits activation threshold for regtest
|
||||
@ -91,14 +90,14 @@ class VersionBitsWarningTest(BitcoinTestFramework):
|
||||
|
||||
# Generating one block guarantees that we'll get out of IBD
|
||||
node.generatetoaddress(1, node_deterministic_address)
|
||||
wait_until(lambda: not node.getblockchaininfo()['initialblockdownload'], timeout=10, lock=p2p_lock)
|
||||
self.wait_until(lambda: not node.getblockchaininfo()['initialblockdownload'], timeout=10, lock=p2p_lock)
|
||||
# Generating one more block will be enough to generate an error.
|
||||
node.generatetoaddress(1, node_deterministic_address)
|
||||
# Check that get*info() shows the versionbits unknown rules warning
|
||||
assert WARN_UNKNOWN_RULES_ACTIVE in node.getmininginfo()["warnings"]
|
||||
assert WARN_UNKNOWN_RULES_ACTIVE in node.getnetworkinfo()["warnings"]
|
||||
# Check that the alert file shows the versionbits unknown rules warning
|
||||
wait_until(lambda: self.versionbits_in_alert_file(), timeout=60)
|
||||
self.wait_until(lambda: self.versionbits_in_alert_file())
|
||||
|
||||
if __name__ == '__main__':
|
||||
VersionBitsWarningTest().main()
|
||||
|
@ -14,7 +14,6 @@ from test_framework.util import (
|
||||
assert_equal,
|
||||
assert_raises_rpc_error,
|
||||
satoshi_round,
|
||||
wait_until,
|
||||
)
|
||||
|
||||
# default limits
|
||||
@ -265,8 +264,8 @@ class MempoolPackagesTest(BitcoinTestFramework):
|
||||
# - txs from previous ancestor test (-> custom ancestor limit)
|
||||
# - parent tx for descendant test
|
||||
# - txs chained off parent tx (-> custom descendant limit)
|
||||
wait_until(lambda: len(self.nodes[1].getrawmempool(False)) ==
|
||||
MAX_ANCESTORS_CUSTOM + 1 + MAX_DESCENDANTS_CUSTOM, timeout=10)
|
||||
self.wait_until(lambda: len(self.nodes[1].getrawmempool(False)) ==
|
||||
MAX_ANCESTORS_CUSTOM + 1 + MAX_DESCENDANTS_CUSTOM, timeout=10)
|
||||
mempool0 = self.nodes[0].getrawmempool(False)
|
||||
mempool1 = self.nodes[1].getrawmempool(False)
|
||||
assert set(mempool1).issubset(set(mempool0))
|
||||
|
@ -173,7 +173,7 @@ class MempoolPersistTest(BitcoinTestFramework):
|
||||
# check that txn gets broadcast due to unbroadcast logic
|
||||
# conn = node0.add_p2p_connection(P2PTxInvStore())
|
||||
# node0.mockscheduler(16*60) # 15 min + 1 for buffer
|
||||
# wait_until(lambda: len(conn.get_invs()) == 1)
|
||||
# self.wait_until(lambda: len(conn.get_invs()) == 1)
|
||||
|
||||
if __name__ == '__main__':
|
||||
MempoolPersistTest().main()
|
||||
|
@ -13,7 +13,7 @@ from test_framework.messages import (
|
||||
)
|
||||
from test_framework.p2p import P2PInterface
|
||||
from test_framework.test_framework import BitcoinTestFramework
|
||||
from test_framework.util import assert_equal, wait_until
|
||||
from test_framework.util import assert_equal
|
||||
|
||||
|
||||
class AddrReceiver(P2PInterface):
|
||||
@ -30,7 +30,7 @@ class AddrReceiver(P2PInterface):
|
||||
self.addrv2_received_and_checked = True
|
||||
|
||||
def wait_for_addrv2(self):
|
||||
wait_until(lambda: "addrv2" in self.last_message)
|
||||
self.wait_until(lambda: "addrv2" in self.last_message)
|
||||
|
||||
|
||||
class AddrTest(BitcoinTestFramework):
|
||||
|
@ -22,7 +22,6 @@ from test_framework.p2p import P2PInterface
|
||||
from test_framework.test_framework import BitcoinTestFramework
|
||||
from test_framework.util import (
|
||||
assert_equal,
|
||||
wait_until,
|
||||
)
|
||||
|
||||
class CFiltersClient(P2PInterface):
|
||||
@ -63,11 +62,11 @@ class CompactFiltersTest(BitcoinTestFramework):
|
||||
self.disconnect_nodes(0, 1)
|
||||
|
||||
self.nodes[0].generate(1)
|
||||
wait_until(lambda: self.nodes[0].getblockcount() == 1000)
|
||||
self.wait_until(lambda: self.nodes[0].getblockcount() == 1000)
|
||||
stale_block_hash = self.nodes[0].getblockhash(1000)
|
||||
|
||||
self.nodes[1].generate(1001)
|
||||
wait_until(lambda: self.nodes[1].getblockcount() == 2000)
|
||||
self.wait_until(lambda: self.nodes[1].getblockcount() == 2000)
|
||||
|
||||
# Check that nodes have signalled NODE_COMPACT_FILTERS correctly.
|
||||
assert node0.nServices & NODE_COMPACT_FILTERS != 0
|
||||
|
@ -12,7 +12,7 @@ from test_framework.messages import BlockTransactions, BlockTransactionsRequest,
|
||||
from test_framework.p2p import p2p_lock, P2PInterface
|
||||
from test_framework.script import CScript, OP_TRUE, OP_DROP
|
||||
from test_framework.test_framework import BitcoinTestFramework
|
||||
from test_framework.util import assert_equal, wait_until
|
||||
from test_framework.util import assert_equal
|
||||
|
||||
# TestP2PConn: A peer we use to send messages to dashd, and store responses.
|
||||
class TestP2PConn(P2PInterface):
|
||||
@ -71,7 +71,7 @@ class TestP2PConn(P2PInterface):
|
||||
def request_headers_and_sync(self, locator, hashstop=0):
|
||||
self.clear_block_announcement()
|
||||
self.get_headers(locator, hashstop)
|
||||
wait_until(self.received_block_announcement, timeout=30, lock=p2p_lock)
|
||||
self.wait_until(self.received_block_announcement, timeout=30)
|
||||
self.clear_block_announcement()
|
||||
|
||||
# Block until a block announcement for a particular block hash is
|
||||
@ -79,7 +79,7 @@ class TestP2PConn(P2PInterface):
|
||||
def wait_for_block_announcement(self, block_hash, timeout=30):
|
||||
def received_hash():
|
||||
return (block_hash in self.announced_blockhashes)
|
||||
wait_until(received_hash, timeout=timeout, lock=p2p_lock)
|
||||
self.wait_until(received_hash, timeout=timeout)
|
||||
|
||||
def send_await_disconnect(self, message, timeout=30):
|
||||
"""Sends a message to the node and wait for disconnect.
|
||||
@ -87,7 +87,7 @@ class TestP2PConn(P2PInterface):
|
||||
This is used when we want to send a message into the node that we expect
|
||||
will get us disconnected, eg an invalid block."""
|
||||
self.send_message(message)
|
||||
wait_until(lambda: not self.is_connected, timeout=timeout, lock=p2p_lock)
|
||||
self.wait_for_disconnect(timeout)
|
||||
|
||||
class CompactBlocksTest(BitcoinTestFramework):
|
||||
def set_test_params(self):
|
||||
@ -149,7 +149,7 @@ class CompactBlocksTest(BitcoinTestFramework):
|
||||
# Make sure we get a SENDCMPCT message from our peer
|
||||
def received_sendcmpct():
|
||||
return (len(test_node.last_sendcmpct) > 0)
|
||||
wait_until(received_sendcmpct, timeout=30, lock=p2p_lock)
|
||||
test_node.wait_until(received_sendcmpct, timeout=30)
|
||||
with p2p_lock:
|
||||
# Check that the first version received is the preferred one
|
||||
assert_equal(test_node.last_sendcmpct[0].version, preferred_version)
|
||||
@ -272,7 +272,7 @@ class CompactBlocksTest(BitcoinTestFramework):
|
||||
block.rehash()
|
||||
|
||||
# Wait until the block was announced (via compact blocks)
|
||||
wait_until(lambda: "cmpctblock" in test_node.last_message, timeout=30, lock=p2p_lock)
|
||||
test_node.wait_until(lambda: "cmpctblock" in test_node.last_message, timeout=30)
|
||||
|
||||
# Now fetch and check the compact block
|
||||
header_and_shortids = None
|
||||
@ -287,7 +287,7 @@ class CompactBlocksTest(BitcoinTestFramework):
|
||||
inv = CInv(MSG_CMPCT_BLOCK, block_hash) # 20 == "CompactBlock"
|
||||
test_node.send_message(msg_getdata([inv]))
|
||||
|
||||
wait_until(lambda: "cmpctblock" in test_node.last_message, timeout=30, lock=p2p_lock)
|
||||
test_node.wait_until(lambda: "cmpctblock" in test_node.last_message, timeout=30)
|
||||
|
||||
# Now fetch and check the compact block
|
||||
header_and_shortids = None
|
||||
@ -345,7 +345,7 @@ class CompactBlocksTest(BitcoinTestFramework):
|
||||
if announce == "inv":
|
||||
test_node.send_message(msg_inv([CInv(MSG_BLOCK, block.sha256)]))
|
||||
getheaders_key = "getheaders2" if test_node.nServices & NODE_HEADERS_COMPRESSED else "getheaders"
|
||||
wait_until(lambda: getheaders_key in test_node.last_message, timeout=30, lock=p2p_lock)
|
||||
test_node.wait_until(lambda: getheaders_key in test_node.last_message, timeout=30)
|
||||
test_node.send_header_for_blocks([block])
|
||||
else:
|
||||
test_node.send_header_for_blocks([block])
|
||||
@ -538,7 +538,7 @@ class CompactBlocksTest(BitcoinTestFramework):
|
||||
num_to_request = random.randint(1, len(block.vtx))
|
||||
msg.block_txn_request.from_absolute(sorted(random.sample(range(len(block.vtx)), num_to_request)))
|
||||
test_node.send_message(msg)
|
||||
wait_until(lambda: "blocktxn" in test_node.last_message, timeout=10, lock=p2p_lock)
|
||||
test_node.wait_until(lambda: "blocktxn" in test_node.last_message, timeout=10)
|
||||
|
||||
[tx.calc_sha256() for tx in block.vtx]
|
||||
with p2p_lock:
|
||||
@ -572,20 +572,20 @@ class CompactBlocksTest(BitcoinTestFramework):
|
||||
for _ in range(MAX_CMPCTBLOCK_DEPTH + 1):
|
||||
test_node.clear_block_announcement()
|
||||
new_blocks.append(node.generate(1)[0])
|
||||
wait_until(test_node.received_block_announcement, timeout=30, lock=p2p_lock)
|
||||
test_node.wait_until(test_node.received_block_announcement, timeout=30)
|
||||
|
||||
test_node.clear_block_announcement()
|
||||
test_node.send_message(msg_getdata([CInv(MSG_CMPCT_BLOCK, int(new_blocks[0], 16))]))
|
||||
wait_until(lambda: "cmpctblock" in test_node.last_message, timeout=30, lock=p2p_lock)
|
||||
test_node.wait_until(lambda: "cmpctblock" in test_node.last_message, timeout=30)
|
||||
|
||||
test_node.clear_block_announcement()
|
||||
node.generate(1)
|
||||
wait_until(test_node.received_block_announcement, timeout=30, lock=p2p_lock)
|
||||
test_node.wait_until(test_node.received_block_announcement, timeout=30)
|
||||
test_node.clear_block_announcement()
|
||||
with p2p_lock:
|
||||
test_node.last_message.pop("block", None)
|
||||
test_node.send_message(msg_getdata([CInv(MSG_CMPCT_BLOCK, int(new_blocks[0], 16))]))
|
||||
wait_until(lambda: "block" in test_node.last_message, timeout=30, lock=p2p_lock)
|
||||
test_node.wait_until(lambda: "block" in test_node.last_message, timeout=30)
|
||||
with p2p_lock:
|
||||
test_node.last_message["block"].block.calc_sha256()
|
||||
assert_equal(test_node.last_message["block"].block.sha256, int(new_blocks[0], 16))
|
||||
@ -631,7 +631,7 @@ class CompactBlocksTest(BitcoinTestFramework):
|
||||
node.submitblock(ToHex(block))
|
||||
|
||||
for l in listeners:
|
||||
wait_until(lambda: "cmpctblock" in l.last_message, timeout=30, lock=p2p_lock)
|
||||
l.wait_until(lambda: "cmpctblock" in l.last_message, timeout=30)
|
||||
with p2p_lock:
|
||||
for l in listeners:
|
||||
assert "cmpctblock" in l.last_message
|
||||
|
@ -8,7 +8,6 @@ from test_framework.test_framework import BitcoinTestFramework
|
||||
from test_framework.util import (
|
||||
assert_equal,
|
||||
assert_raises_rpc_error,
|
||||
wait_until,
|
||||
)
|
||||
|
||||
class DisconnectBanTest(BitcoinTestFramework):
|
||||
@ -26,7 +25,7 @@ class DisconnectBanTest(BitcoinTestFramework):
|
||||
self.log.info("setban: successfully ban single IP address")
|
||||
assert_equal(len(self.nodes[1].getpeerinfo()), 2) # node1 should have 2 connections to node0 at this point
|
||||
self.nodes[1].setban(subnet="127.0.0.1", command="add")
|
||||
wait_until(lambda: len(self.nodes[1].getpeerinfo()) == 0, timeout=10)
|
||||
self.wait_until(lambda: len(self.nodes[1].getpeerinfo()) == 0, timeout=10)
|
||||
assert_equal(len(self.nodes[1].getpeerinfo()), 0) # all nodes must be disconnected at this point
|
||||
assert_equal(len(self.nodes[1].listbanned()), 1)
|
||||
|
||||
@ -61,7 +60,7 @@ class DisconnectBanTest(BitcoinTestFramework):
|
||||
listBeforeShutdown = self.nodes[1].listbanned()
|
||||
assert_equal("192.168.0.1/32", listBeforeShutdown[2]['address'])
|
||||
self.bump_mocktime(2)
|
||||
wait_until(lambda: len(self.nodes[1].listbanned()) == 3, timeout=10)
|
||||
self.wait_until(lambda: len(self.nodes[1].listbanned()) == 3, timeout=10)
|
||||
|
||||
self.restart_node(1)
|
||||
|
||||
@ -89,7 +88,7 @@ class DisconnectBanTest(BitcoinTestFramework):
|
||||
self.log.info("disconnectnode: successfully disconnect node by address")
|
||||
address1 = self.nodes[0].getpeerinfo()[0]['addr']
|
||||
self.nodes[0].disconnectnode(address=address1)
|
||||
wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1, timeout=10)
|
||||
self.wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1, timeout=10)
|
||||
assert not [node for node in self.nodes[0].getpeerinfo() if node['addr'] == address1]
|
||||
|
||||
self.log.info("disconnectnode: successfully reconnect node")
|
||||
@ -100,7 +99,7 @@ class DisconnectBanTest(BitcoinTestFramework):
|
||||
self.log.info("disconnectnode: successfully disconnect node by node id")
|
||||
id1 = self.nodes[0].getpeerinfo()[0]['id']
|
||||
self.nodes[0].disconnectnode(nodeid=id1)
|
||||
wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1, timeout=10)
|
||||
self.wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1, timeout=10)
|
||||
assert not [node for node in self.nodes[0].getpeerinfo() if node['id'] == id1]
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
@ -19,7 +19,7 @@ from test_framework.blocktools import COINBASE_MATURITY, create_block, create_co
|
||||
from test_framework.messages import CTransaction, FromHex, msg_pong, msg_tx
|
||||
from test_framework.p2p import P2PDataStore, P2PInterface
|
||||
from test_framework.test_framework import BitcoinTestFramework
|
||||
from test_framework.util import assert_equal, wait_until
|
||||
from test_framework.util import assert_equal
|
||||
|
||||
|
||||
class SlowP2PDataStore(P2PDataStore):
|
||||
@ -92,7 +92,7 @@ class P2PEvict(BitcoinTestFramework):
|
||||
for _ in range(8):
|
||||
fastpeer = node.add_p2p_connection(P2PInterface())
|
||||
current_peer += 1
|
||||
wait_until(lambda: "ping" in fastpeer.last_message, timeout=10)
|
||||
self.wait_until(lambda: "ping" in fastpeer.last_message, timeout=10)
|
||||
|
||||
# Make sure by asking the node what the actual min pings are
|
||||
peerinfo = node.getpeerinfo()
|
||||
|
@ -21,9 +21,9 @@ from test_framework.p2p import (
|
||||
from test_framework.test_framework import BitcoinTestFramework
|
||||
from test_framework.util import (
|
||||
assert_equal,
|
||||
wait_until,
|
||||
)
|
||||
|
||||
|
||||
class P2PFingerprintTest(BitcoinTestFramework):
|
||||
|
||||
def set_test_params(self):
|
||||
@ -102,12 +102,12 @@ class P2PFingerprintTest(BitcoinTestFramework):
|
||||
# Check that getdata request for stale block succeeds
|
||||
self.send_block_request(stale_hash, node0)
|
||||
test_function = lambda: self.last_block_equals(stale_hash, node0)
|
||||
wait_until(test_function, timeout=3)
|
||||
self.wait_until(test_function, timeout=3)
|
||||
|
||||
# Check that getheader request for stale block header succeeds
|
||||
self.send_header_request(stale_hash, node0)
|
||||
test_function = lambda: self.last_header_equals(stale_hash, node0)
|
||||
wait_until(test_function, timeout=3)
|
||||
self.wait_until(test_function, timeout=3)
|
||||
|
||||
# Longest chain is extended so stale is much older than chain tip
|
||||
self.nodes[0].setmocktime(0)
|
||||
@ -138,11 +138,11 @@ class P2PFingerprintTest(BitcoinTestFramework):
|
||||
|
||||
self.send_block_request(block_hash, node0)
|
||||
test_function = lambda: self.last_block_equals(block_hash, node0)
|
||||
wait_until(test_function, timeout=3)
|
||||
self.wait_until(test_function, timeout=3)
|
||||
|
||||
self.send_header_request(block_hash, node0)
|
||||
test_function = lambda: self.last_header_equals(block_hash, node0)
|
||||
wait_until(test_function, timeout=3)
|
||||
self.wait_until(test_function, timeout=3)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
@ -21,7 +21,6 @@ from test_framework.p2p import (
|
||||
from test_framework.test_framework import BitcoinTestFramework
|
||||
from test_framework.util import (
|
||||
assert_equal,
|
||||
wait_until,
|
||||
)
|
||||
|
||||
MSG_LIMIT = 3 * 1024 * 1024 # 3MB, per MAX_PROTOCOL_MESSAGE_LENGTH
|
||||
@ -66,7 +65,7 @@ class InvalidMessagesTest(BitcoinTestFramework):
|
||||
before = int(self.nodes[0].getnettotals()['totalbytesrecv'])
|
||||
conn.send_raw_message(msg[:cut_pos])
|
||||
# Wait until node has processed the first half of the message
|
||||
wait_until(lambda: int(self.nodes[0].getnettotals()['totalbytesrecv']) != before)
|
||||
self.wait_until(lambda: int(self.nodes[0].getnettotals()['totalbytesrecv']) != before)
|
||||
middle = int(self.nodes[0].getnettotals()['totalbytesrecv'])
|
||||
# If this assert fails, we've hit an unlikely race
|
||||
# where the test framework sent a message in between the two halves
|
||||
|
@ -18,7 +18,6 @@ from test_framework.p2p import P2PDataStore
|
||||
from test_framework.test_framework import BitcoinTestFramework
|
||||
from test_framework.util import (
|
||||
assert_equal,
|
||||
wait_until,
|
||||
)
|
||||
from data import invalid_txs
|
||||
|
||||
@ -182,7 +181,7 @@ class InvalidTxRequestTest(BitcoinTestFramework):
|
||||
# This TX has appeared in a block instead of being broadcasted via the mempool
|
||||
expected_mempool.remove(tx_withhold.hash)
|
||||
|
||||
wait_until(lambda: 1 == len(node.getpeerinfo()), timeout=12) # p2ps[1] is no longer connected
|
||||
self.wait_until(lambda: 1 == len(node.getpeerinfo()), timeout=12) # p2ps[1] is no longer connected
|
||||
assert_equal(expected_mempool, set(node.getrawmempool()))
|
||||
|
||||
self.log.info('Test orphan pool overflow')
|
||||
|
@ -24,7 +24,6 @@ from test_framework.test_framework import BitcoinTestFramework
|
||||
from test_framework.util import (
|
||||
assert_equal,
|
||||
assert_greater_than_or_equal,
|
||||
wait_until,
|
||||
)
|
||||
|
||||
banscore = 10
|
||||
@ -121,9 +120,9 @@ class P2PLeakTest(BitcoinTestFramework):
|
||||
# verack, since we never sent one
|
||||
no_verack_idlenode.wait_for_verack()
|
||||
|
||||
wait_until(lambda: no_version_bannode.ever_connected, timeout=10, lock=p2p_lock)
|
||||
wait_until(lambda: no_version_idlenode.ever_connected, timeout=10, lock=p2p_lock)
|
||||
wait_until(lambda: no_verack_idlenode.version_received, timeout=10, lock=p2p_lock)
|
||||
self.wait_until(lambda: no_version_bannode.ever_connected, timeout=10, lock=p2p_lock)
|
||||
self.wait_until(lambda: no_version_idlenode.ever_connected, timeout=10, lock=p2p_lock)
|
||||
self.wait_until(lambda: no_verack_idlenode.version_received, timeout=10, lock=p2p_lock)
|
||||
|
||||
# Mine a block and make sure that it's not sent to the connected nodes
|
||||
self.nodes[0].generatetoaddress(1, self.nodes[0].get_deterministic_priv_key().address)
|
||||
@ -156,7 +155,7 @@ class P2PLeakTest(BitcoinTestFramework):
|
||||
p2p_old_node = self.nodes[0].add_p2p_connection(P2PInterface(), send_version=False, wait_for_verack=False)
|
||||
old_version_msg = msg_version()
|
||||
old_version_msg.nVersion = 31799
|
||||
wait_until(lambda: p2p_old_node.is_connected)
|
||||
self.wait_until(lambda: p2p_old_node.is_connected)
|
||||
with self.nodes[0].assert_debug_log(['peer=4 using obsolete version 31799; disconnecting']):
|
||||
p2p_old_node.send_message(old_version_msg)
|
||||
p2p_old_node.wait_for_disconnect()
|
||||
|
@ -9,9 +9,9 @@ and that it responds to getdata requests for blocks correctly:
|
||||
- send a block within 288 + 2 of the tip
|
||||
- disconnect peers who request blocks older than that."""
|
||||
from test_framework.messages import CInv, MSG_BLOCK, msg_getdata, NODE_BLOOM, NODE_NETWORK_LIMITED, NODE_HEADERS_COMPRESSED, msg_verack
|
||||
from test_framework.p2p import P2PInterface, p2p_lock
|
||||
from test_framework.p2p import P2PInterface
|
||||
from test_framework.test_framework import BitcoinTestFramework
|
||||
from test_framework.util import assert_equal, wait_until
|
||||
from test_framework.util import assert_equal
|
||||
|
||||
class P2PIgnoreInv(P2PInterface):
|
||||
firstAddrnServices = 0
|
||||
@ -22,7 +22,7 @@ class P2PIgnoreInv(P2PInterface):
|
||||
self.firstAddrnServices = message.addrs[0].nServices
|
||||
def wait_for_addr(self, timeout=5):
|
||||
test_function = lambda: self.last_message.get("addr")
|
||||
wait_until(test_function, timeout=timeout, lock=p2p_lock)
|
||||
self.wait_until(test_function, timeout=timeout)
|
||||
def send_getdata_for_block(self, blockhash):
|
||||
getdata_request = msg_getdata()
|
||||
getdata_request.inv.append(CInv(MSG_BLOCK, int(blockhash, 16)))
|
||||
|
@ -22,7 +22,6 @@ from test_framework.test_framework import BitcoinTestFramework
|
||||
from test_framework.util import (
|
||||
assert_equal,
|
||||
p2p_port,
|
||||
wait_until,
|
||||
)
|
||||
|
||||
|
||||
@ -139,7 +138,7 @@ class P2PPermissionsTests(BitcoinTestFramework):
|
||||
|
||||
with self.nodes[1].assert_debug_log(["Force relaying tx {} from peer=0".format(txid)]):
|
||||
p2p_rebroadcast_wallet.send_txs_and_test([tx], self.nodes[1])
|
||||
wait_until(in_mempool)
|
||||
self.wait_until(in_mempool)
|
||||
|
||||
self.log.debug("Check that node[1] will not send an invalid tx to node[0]")
|
||||
tx.vout[0].nValue += 1
|
||||
|
@ -338,7 +338,7 @@ class QuorumDataMessagesTest(DashTestFramework):
|
||||
# mn1 should still have a score of 75
|
||||
wait_for_banscore(mn3.node, id_p2p_mn3_1, 75)
|
||||
# mn2 should be "banned" now
|
||||
wait_until(lambda: not p2p_mn3_2.is_connected, timeout=10)
|
||||
self.wait_until(lambda: not p2p_mn3_2.is_connected, timeout=10)
|
||||
mn3.node.disconnect_p2ps()
|
||||
|
||||
# Test that QWATCH connections are also allowed to query data but all
|
||||
|
@ -103,7 +103,6 @@ from test_framework.p2p import (
|
||||
from test_framework.test_framework import BitcoinTestFramework
|
||||
from test_framework.util import (
|
||||
assert_equal,
|
||||
wait_until,
|
||||
)
|
||||
|
||||
DIRECT_FETCH_RESPONSE_TIME = 0.05
|
||||
@ -146,7 +145,7 @@ class BaseNode(P2PInterface):
|
||||
|
||||
def wait_for_block_announcement(self, block_hash, timeout=60):
|
||||
test_function = lambda: self.last_blockhash_announced == block_hash
|
||||
wait_until(test_function, timeout=timeout, lock=p2p_lock)
|
||||
self.wait_until(test_function, timeout=timeout)
|
||||
|
||||
def on_inv(self, message):
|
||||
self.block_announced = True
|
||||
@ -173,7 +172,7 @@ class BaseNode(P2PInterface):
|
||||
"""Test whether the last headers announcements received are right.
|
||||
Headers may be announced across more than one message."""
|
||||
test_function = lambda: (len(self.recent_headers_announced) >= len(headers))
|
||||
wait_until(test_function, timeout=60, lock=p2p_lock)
|
||||
self.wait_until(test_function)
|
||||
with p2p_lock:
|
||||
assert_equal(self.recent_headers_announced, headers)
|
||||
self.block_announced = False
|
||||
@ -185,7 +184,7 @@ class BaseNode(P2PInterface):
|
||||
inv should be a list of block hashes."""
|
||||
|
||||
test_function = lambda: self.block_announced
|
||||
wait_until(test_function, timeout=60, lock=p2p_lock)
|
||||
self.wait_until(test_function)
|
||||
|
||||
with p2p_lock:
|
||||
compare_inv = []
|
||||
@ -297,7 +296,7 @@ class SendHeadersTest(BitcoinTestFramework):
|
||||
test_node.send_header_for_blocks([new_block])
|
||||
test_node.wait_for_getdata([new_block.sha256])
|
||||
test_node.send_and_ping(msg_block(new_block)) # make sure this block is processed
|
||||
wait_until(lambda: inv_node.block_announced, timeout=60, lock=p2p_lock)
|
||||
inv_node.wait_until(lambda: inv_node.block_announced)
|
||||
inv_node.clear_block_announcements()
|
||||
test_node.clear_block_announcements()
|
||||
|
||||
|
@ -30,7 +30,6 @@ from test_framework.p2p import (
|
||||
from test_framework.test_framework import BitcoinTestFramework
|
||||
from test_framework.util import (
|
||||
assert_equal,
|
||||
wait_until,
|
||||
)
|
||||
|
||||
DIRECT_FETCH_RESPONSE_TIME = 0.05
|
||||
@ -77,11 +76,11 @@ class BaseNode(P2PInterface):
|
||||
return
|
||||
|
||||
test_function = lambda: "getdata" in self.last_message and [inv.hash for inv in self.last_message["getdata"].inv] == hash_list
|
||||
wait_until(test_function, timeout=timeout, lock=p2p_lock)
|
||||
self.wait_until(test_function, timeout=timeout)
|
||||
|
||||
def wait_for_block_announcement(self, block_hash, timeout=60):
|
||||
test_function = lambda: self.last_blockhash_announced == block_hash
|
||||
wait_until(test_function, timeout=timeout, lock=p2p_lock)
|
||||
self.wait_until(test_function, timeout=timeout)
|
||||
|
||||
def on_inv(self, message):
|
||||
self.block_announced = True
|
||||
@ -107,7 +106,7 @@ class BaseNode(P2PInterface):
|
||||
"""Test whether the last headers announcements received are right.
|
||||
Headers may be announced across more than one message."""
|
||||
test_function = lambda: (len(self.recent_headers_announced) >= len(headers))
|
||||
wait_until(test_function, timeout=60, lock=p2p_lock)
|
||||
self.wait_until(test_function, timeout=60)
|
||||
with p2p_lock:
|
||||
assert_equal(self.recent_headers_announced, headers)
|
||||
self.block_announced = False
|
||||
@ -119,7 +118,7 @@ class BaseNode(P2PInterface):
|
||||
inv should be a list of block hashes."""
|
||||
|
||||
test_function = lambda: self.block_announced
|
||||
wait_until(test_function, timeout=60, lock=p2p_lock)
|
||||
self.wait_until(test_function, timeout=60)
|
||||
|
||||
with p2p_lock:
|
||||
compare_inv = []
|
||||
@ -304,7 +303,7 @@ class SendHeadersTest(BitcoinTestFramework):
|
||||
test_node.send_header_for_blocks([new_block])
|
||||
test_node.wait_for_getdata([new_block.sha256])
|
||||
test_node.send_and_ping(msg_block(new_block)) # make sure this block is processed
|
||||
wait_until(lambda: inv_node.block_announced, timeout=60, lock=p2p_lock)
|
||||
self.wait_until(lambda: inv_node.block_announced, timeout=60)
|
||||
inv_node.clear_block_announcements()
|
||||
test_node.clear_block_announcements()
|
||||
|
||||
|
@ -22,7 +22,6 @@ from test_framework.p2p import (
|
||||
from test_framework.test_framework import BitcoinTestFramework
|
||||
from test_framework.util import (
|
||||
assert_equal,
|
||||
wait_until,
|
||||
)
|
||||
from test_framework.address import ADDRESS_BCRT1_UNSPENDABLE
|
||||
|
||||
@ -73,7 +72,7 @@ class TxDownloadTest(BitcoinTestFramework):
|
||||
|
||||
while outstanding_peer_index:
|
||||
self.bump_mocktime(MAX_GETDATA_INBOUND_WAIT)
|
||||
wait_until(lambda: any(getdata_found(i) for i in outstanding_peer_index))
|
||||
self.wait_until(lambda: any(getdata_found(i) for i in outstanding_peer_index))
|
||||
for i in outstanding_peer_index:
|
||||
if getdata_found(i):
|
||||
outstanding_peer_index.remove(i)
|
||||
@ -136,21 +135,21 @@ class TxDownloadTest(BitcoinTestFramework):
|
||||
self.bump_mocktime(1)
|
||||
return p.tx_getdata_count >= target
|
||||
|
||||
wait_until(lambda: wait_for_tx_getdata(MAX_GETDATA_IN_FLIGHT), lock=p2p_lock)
|
||||
p.wait_until(lambda: wait_for_tx_getdata(MAX_GETDATA_IN_FLIGHT))
|
||||
|
||||
with p2p_lock:
|
||||
assert_equal(p.tx_getdata_count, MAX_GETDATA_IN_FLIGHT)
|
||||
|
||||
self.log.info("Now check that if we send a NOTFOUND for a transaction, we'll get one more request")
|
||||
p.send_message(msg_notfound(vec=[CInv(t=1, h=txids[0])]))
|
||||
wait_until(lambda: wait_for_tx_getdata(MAX_GETDATA_IN_FLIGHT + 1), timeout=10, lock=p2p_lock)
|
||||
p.wait_until(lambda: wait_for_tx_getdata(MAX_GETDATA_IN_FLIGHT + 1), timeout=10)
|
||||
with p2p_lock:
|
||||
assert_equal(p.tx_getdata_count, MAX_GETDATA_IN_FLIGHT + 1)
|
||||
|
||||
WAIT_TIME = TX_EXPIRY_INTERVAL // 2 + TX_EXPIRY_INTERVAL
|
||||
self.log.info("if we wait about {} minutes, we should eventually get more requests".format(WAIT_TIME / 60))
|
||||
self.bump_mocktime(WAIT_TIME)
|
||||
wait_until(lambda: wait_for_tx_getdata(MAX_GETDATA_IN_FLIGHT + 2))
|
||||
p.wait_until(lambda: wait_for_tx_getdata(MAX_GETDATA_IN_FLIGHT + 2))
|
||||
|
||||
def test_spurious_notfound(self):
|
||||
self.log.info('Check that spurious notfound is ignored')
|
||||
|
@ -8,7 +8,6 @@ from test_framework.test_framework import BitcoinTestFramework
|
||||
from test_framework.address import ADDRESS_BCRT1_UNSPENDABLE_DESCRIPTOR
|
||||
from test_framework.util import (
|
||||
assert_equal,
|
||||
wait_until,
|
||||
)
|
||||
|
||||
|
||||
@ -56,9 +55,9 @@ class InvalidateTest(BitcoinTestFramework):
|
||||
self.log.info("..and then mine a block")
|
||||
self.nodes[2].generatetoaddress(1, self.nodes[2].get_deterministic_priv_key().address)
|
||||
self.log.info("Verify all nodes are at the right height")
|
||||
wait_until(lambda: self.nodes[2].getblockcount() == 3, timeout=5)
|
||||
wait_until(lambda: self.nodes[0].getblockcount() == 4, timeout=5)
|
||||
wait_until(lambda: self.nodes[1].getblockcount() == 4, timeout=5)
|
||||
self.wait_until(lambda: self.nodes[2].getblockcount() == 3, timeout=5)
|
||||
self.wait_until(lambda: self.nodes[0].getblockcount() == 4, timeout=5)
|
||||
self.wait_until(lambda: self.nodes[1].getblockcount() == 4, timeout=5)
|
||||
|
||||
self.log.info("Make sure ResetBlockFailureFlags does the job correctly")
|
||||
self.restart_node(0, extra_args=["-checkblocks=5"])
|
||||
@ -83,7 +82,7 @@ class InvalidateTest(BitcoinTestFramework):
|
||||
|
||||
assert_equal(self.nodes[1].getblockcount(), newheight)
|
||||
self.restart_node(1, extra_args=["-checkblocks=5"])
|
||||
wait_until(lambda: self.nodes[1].getblockcount() == newheight + 20)
|
||||
self.wait_until(lambda: self.nodes[1].getblockcount() == newheight + 20)
|
||||
assert_equal(tip, self.nodes[1].getbestblockhash())
|
||||
|
||||
self.log.info("Verify that we reconsider all ancestors as well")
|
||||
|
@ -11,7 +11,6 @@ from test_framework.util import (
|
||||
assert_equal,
|
||||
assert_greater_than,
|
||||
assert_greater_than_or_equal,
|
||||
wait_until
|
||||
)
|
||||
|
||||
from test_framework.authproxy import JSONRPCException
|
||||
@ -83,7 +82,7 @@ class RpcMiscTest(BitcoinTestFramework):
|
||||
|
||||
# Restart the node with indices and wait for them to sync
|
||||
self.restart_node(0, ["-txindex", "-blockfilterindex", "-coinstatsindex"])
|
||||
wait_until(lambda: all(i["synced"] for i in node.getindexinfo().values()))
|
||||
self.wait_until(lambda: all(i["synced"] for i in node.getindexinfo().values()))
|
||||
|
||||
# Returns a list of all running indices by default
|
||||
values = {"synced": True, "best_block_height": 200}
|
||||
|
@ -19,7 +19,6 @@ from test_framework.util import (
|
||||
assert_greater_than,
|
||||
assert_raises_rpc_error,
|
||||
p2p_port,
|
||||
wait_until,
|
||||
)
|
||||
|
||||
|
||||
@ -45,7 +44,7 @@ class NetTest(DashTestFramework):
|
||||
# Wait for one ping/pong to finish so that we can be sure that there is no chatter between nodes for some time
|
||||
# Especially the exchange of messages like getheaders and friends causes test failures here
|
||||
self.nodes[0].ping()
|
||||
wait_until(lambda: all(['pingtime' in n for n in self.nodes[0].getpeerinfo()]))
|
||||
self.wait_until(lambda: all(['pingtime' in n for n in self.nodes[0].getpeerinfo()]))
|
||||
self.log.info('Connect nodes both way')
|
||||
self.connect_nodes(0, 1)
|
||||
self.connect_nodes(1, 0)
|
||||
@ -87,8 +86,8 @@ class NetTest(DashTestFramework):
|
||||
# the bytes sent/received should change
|
||||
# note ping and pong are 32 bytes each
|
||||
self.nodes[0].ping()
|
||||
wait_until(lambda: (self.nodes[0].getnettotals()['totalbytessent'] >= net_totals_after['totalbytessent'] + 32 * 2), timeout=1)
|
||||
wait_until(lambda: (self.nodes[0].getnettotals()['totalbytesrecv'] >= net_totals_after['totalbytesrecv'] + 32 * 2), timeout=1)
|
||||
self.wait_until(lambda: (self.nodes[0].getnettotals()['totalbytessent'] >= net_totals_after['totalbytessent'] + 32 * 2), timeout=1)
|
||||
self.wait_until(lambda: (self.nodes[0].getnettotals()['totalbytesrecv'] >= net_totals_after['totalbytesrecv'] + 32 * 2), timeout=1)
|
||||
|
||||
peer_info_after_ping = self.nodes[0].getpeerinfo()
|
||||
for before, after in zip(peer_info, peer_info_after_ping):
|
||||
@ -102,8 +101,8 @@ class NetTest(DashTestFramework):
|
||||
self.nodes[0].setnetworkactive(state=False)
|
||||
assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], False)
|
||||
# Wait a bit for all sockets to close
|
||||
wait_until(lambda: self.nodes[0].getnetworkinfo()['connections'] == 0, timeout=3)
|
||||
wait_until(lambda: self.nodes[1].getnetworkinfo()['connections'] == 0, timeout=3)
|
||||
self.wait_until(lambda: self.nodes[0].getnetworkinfo()['connections'] == 0, timeout=3)
|
||||
self.wait_until(lambda: self.nodes[1].getnetworkinfo()['connections'] == 0, timeout=3)
|
||||
|
||||
self.nodes[0].setnetworkactive(state=True)
|
||||
self.log.info('Connect nodes both way')
|
||||
@ -124,7 +123,7 @@ class NetTest(DashTestFramework):
|
||||
self.log.info('Test extended connections info')
|
||||
self.connect_nodes(1, 2)
|
||||
self.nodes[1].ping()
|
||||
wait_until(lambda: all(['pingtime' in n for n in self.nodes[1].getpeerinfo()]))
|
||||
self.wait_until(lambda: all(['pingtime' in n for n in self.nodes[1].getpeerinfo()]))
|
||||
assert_equal(self.nodes[1].getnetworkinfo()['connections'], 3)
|
||||
assert_equal(self.nodes[1].getnetworkinfo()['inboundconnections'], 1)
|
||||
assert_equal(self.nodes[1].getnetworkinfo()['outboundconnections'], 2)
|
||||
|
@ -5,7 +5,7 @@
|
||||
|
||||
from test_framework.messages import CTransaction, FromHex, hash256, ser_compact_size, ser_string
|
||||
from test_framework.test_framework import DashTestFramework
|
||||
from test_framework.util import assert_raises_rpc_error, satoshi_round, wait_until
|
||||
from test_framework.util import assert_raises_rpc_error, satoshi_round
|
||||
|
||||
'''
|
||||
rpc_verifyislock.py
|
||||
@ -51,7 +51,7 @@ class RPCVerifyISLockTest(DashTestFramework):
|
||||
self.wait_for_instantlock(txid, node)
|
||||
|
||||
request_id = self.get_request_id(self.nodes[0].getrawtransaction(txid))
|
||||
wait_until(lambda: node.quorum("hasrecsig", 103, request_id, txid))
|
||||
self.wait_until(lambda: node.quorum("hasrecsig", 103, request_id, txid))
|
||||
|
||||
rec_sig = node.quorum("getrecsig", 103, request_id, txid)['sig']
|
||||
assert node.verifyislock(request_id, txid, rec_sig)
|
||||
|
@ -229,6 +229,14 @@ def satoshi_round(amount):
|
||||
|
||||
|
||||
def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf'), sleep=0.5, timeout_factor=1.0, lock=None, do_assert=True, allow_exception=False):
|
||||
"""Sleep until the predicate resolves to be True.
|
||||
|
||||
Warning: Note that this method is not recommended to be used in tests as it is
|
||||
not aware of the context of the test framework. Using `wait_until()` counterpart
|
||||
from `BitcoinTestFramework` or `P2PInterface` class ensures an understandable
|
||||
amount of timeout and a common shared timeout_factor. Furthermore, `wait_until()`
|
||||
from `P2PInterface` class in `mininode.py` has a preset lock.
|
||||
"""
|
||||
if attempts == float('inf') and timeout == float('inf'):
|
||||
timeout = 60
|
||||
timeout = timeout * timeout_factor
|
||||
|
@ -7,9 +7,9 @@ import time
|
||||
|
||||
from test_framework.blocktools import create_block, create_coinbase
|
||||
from test_framework.messages import ToHex
|
||||
from test_framework.p2p import P2PTxInvStore, p2p_lock
|
||||
from test_framework.p2p import P2PTxInvStore
|
||||
from test_framework.test_framework import BitcoinTestFramework
|
||||
from test_framework.util import assert_equal, wait_until
|
||||
from test_framework.util import assert_equal
|
||||
|
||||
class ResendWalletTransactionsTest(BitcoinTestFramework):
|
||||
def set_test_params(self):
|
||||
@ -24,7 +24,7 @@ class ResendWalletTransactionsTest(BitcoinTestFramework):
|
||||
node.add_p2p_connection(P2PTxInvStore())
|
||||
|
||||
self.log.info("Create a new transaction and wait until it's broadcast")
|
||||
txid = int(node.sendtoaddress(node.getnewaddress(), 1), 16)
|
||||
txid = node.sendtoaddress(node.getnewaddress(), 1)
|
||||
|
||||
# Wallet rebroadcast is first scheduled 1 sec after startup (see
|
||||
# nNextResend in ResendWalletTransactions()). Sleep for just over a
|
||||
@ -35,8 +35,8 @@ class ResendWalletTransactionsTest(BitcoinTestFramework):
|
||||
# Can take a few seconds due to transaction trickling
|
||||
def wait_p2p():
|
||||
self.bump_mocktime(1)
|
||||
return node.p2p.tx_invs_received[txid] >= 1
|
||||
wait_until(wait_p2p, lock=p2p_lock)
|
||||
return node.p2p.tx_invs_received[int(txid, 16)] >= 1
|
||||
self.wait_until(wait_p2p)
|
||||
|
||||
# Add a second peer since txs aren't rebroadcast to the same peer (see filterInventoryKnown)
|
||||
node.add_p2p_connection(P2PTxInvStore())
|
||||
@ -62,7 +62,7 @@ class ResendWalletTransactionsTest(BitcoinTestFramework):
|
||||
node.setmocktime(self.mocktime + twelve_hrs - two_min)
|
||||
self.mocktime = self.mocktime + twelve_hrs - two_min
|
||||
time.sleep(2) # ensure enough time has passed for rebroadcast attempt to occur
|
||||
assert_equal(txid in node.p2ps[1].get_invs(), False)
|
||||
assert_equal(int(txid, 16) in node.p2ps[1].get_invs(), False)
|
||||
|
||||
self.log.info("Bump time & check that transaction is rebroadcast")
|
||||
# Transaction should be rebroadcast approximately 2 hours in the future,
|
||||
@ -71,10 +71,9 @@ class ResendWalletTransactionsTest(BitcoinTestFramework):
|
||||
node.setmocktime(rebroadcast_time)
|
||||
self.mocktime = rebroadcast_time
|
||||
|
||||
def wait_p2p_1():
|
||||
self.bump_mocktime(1)
|
||||
return node.p2ps[1].tx_invs_received[txid] >= 1
|
||||
wait_until(wait_p2p_1, lock=p2p_lock)
|
||||
# Transaction should be rebroadcast approximately 24 hours in the future,
|
||||
# but can range from 12-36. So bump 36 hours to be sure.
|
||||
node.p2p.wait_for_broadcast([txid])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
Loading…
Reference in New Issue
Block a user