Merge #19816: test: Rename wait until helper to wait_until_helper

fa1cd9e1ddc6918c3d600d36eadea71eebb242b6 test: Remove unused lock arg from BitcoinTestFramework.wait_until (MarcoFalke)
fad2794e93b4f5976e81793a4a63aa03a2c8c686 test: Rename wait until helper to wait_until_helper (MarcoFalke)
facb41bf1d1b7ee552c627f9829b4494b817ce28 test: Remove unused p2p_lock in VersionBitsWarningTest (MarcoFalke)

Pull request description:

  This avoids confusion with the `wait_until` member functions, which should be preferred because they take the appropriate locks and scale the timeout appropriately on their own.

ACKs for top commit:
  laanwj:
    Code review ACK fa1cd9e1ddc6918c3d600d36eadea71eebb242b6
  hebasto:
    ACK fa1cd9e1ddc6918c3d600d36eadea71eebb242b6, I have reviewed the code and it looks OK, I agree it can be merged.

Tree-SHA512: 319d400085606a4c738e314824037f72998e6657d8622b363726842aba968744f23c56d27275dfe506b8cbbb6e97fc39ca1d325db05d4d67df0e8b35f2244d5c
This commit is contained in:
fanquake 2020-09-03 11:47:08 +08:00 committed by Konstantin Akimov
parent 063c9b744d
commit 8368bd795e
No known key found for this signature in database
GPG Key ID: 2176C4A5D01EA524
15 changed files with 83 additions and 84 deletions

View File

@ -216,10 +216,11 @@ class ExampleTest(BitcoinTestFramework):
self.log.info("Check that each block was received only once")
# The network thread uses a global lock on data access to the P2PConnection objects when sending and receiving
# messages. The test thread should acquire the global lock before accessing any P2PConnection data to avoid locking
# and synchronization issues. Note wait_until() acquires this global lock when testing the predicate.
# and synchronization issues. Note p2p.wait_until() acquires this global lock internally when testing the predicate.
with p2p_lock:
for block in peer_receiving.block_receive_map.values():
assert_equal(block, 1)
if __name__ == '__main__':
ExampleTest().main()

View File

@ -8,7 +8,7 @@ import json
from test_framework.messages import uint256_to_string
from test_framework.test_framework import DashTestFramework
from test_framework.util import assert_equal, satoshi_round, set_node_times, wait_until
from test_framework.util import assert_equal, satoshi_round, set_node_times, wait_until_helper
class DashGovernanceTest (DashTestFramework):
def set_test_params(self):
@ -229,7 +229,7 @@ class DashGovernanceTest (DashTestFramework):
self.wait_until(lambda: len(isolated.gobject("list", "valid", "triggers")) == 1, timeout=5)
isolated_trigger_hash = list(isolated.gobject("list", "valid", "triggers").keys())[0]
self.wait_until(lambda: list(isolated.gobject("list", "valid", "triggers").values())[0]['YesCount'] == 1, timeout=5)
more_votes = wait_until(lambda: list(isolated.gobject("list", "valid", "triggers").values())[0]['YesCount'] > 1, timeout=5, do_assert=False)
more_votes = wait_until_helper(lambda: list(isolated.gobject("list", "valid", "triggers").values())[0]['YesCount'] > 1, timeout=5, do_assert=False)
assert_equal(more_votes, False)
# Move 1 block enabling the Superblock maturity window on non-isolated nodes
@ -240,7 +240,7 @@ class DashGovernanceTest (DashTestFramework):
self.check_superblockbudget(False)
# The "winner" should submit new trigger and vote for it, but it's isolated so no triggers should be found
has_trigger = wait_until(lambda: len(self.nodes[0].gobject("list", "valid", "triggers")) >= 1, timeout=5, do_assert=False)
has_trigger = wait_until_helper(lambda: len(self.nodes[0].gobject("list", "valid", "triggers")) >= 1, timeout=5, do_assert=False)
assert_equal(has_trigger, False)
# Move 1 block inside the Superblock maturity window on non-isolated nodes
@ -251,7 +251,7 @@ class DashGovernanceTest (DashTestFramework):
self.wait_until(lambda: len(self.nodes[0].gobject("list", "valid", "triggers")) == 1, timeout=5)
winning_trigger_hash = list(self.nodes[0].gobject("list", "valid", "triggers").keys())[0]
self.wait_until(lambda: list(self.nodes[0].gobject("list", "valid", "triggers").values())[0]['YesCount'] == 1, timeout=5)
more_votes = wait_until(lambda: list(self.nodes[0].gobject("list", "valid", "triggers").values())[0]['YesCount'] > 1, timeout=5, do_assert=False)
more_votes = wait_until_helper(lambda: list(self.nodes[0].gobject("list", "valid", "triggers").values())[0]['YesCount'] > 1, timeout=5, do_assert=False)
assert_equal(more_votes, False)
# Make sure amounts aren't trimmed
@ -267,7 +267,7 @@ class DashGovernanceTest (DashTestFramework):
# Every non-isolated MN should vote for the same trigger now, no new triggers should be created
self.wait_until(lambda: list(self.nodes[0].gobject("list", "valid", "triggers").values())[0]['YesCount'] == self.mn_count - 1, timeout=5)
more_triggers = wait_until(lambda: len(self.nodes[0].gobject("list", "valid", "triggers")) > 1, timeout=5, do_assert=False)
more_triggers = wait_until_helper(lambda: len(self.nodes[0].gobject("list", "valid", "triggers")) > 1, timeout=5, do_assert=False)
assert_equal(more_triggers, False)
self.reconnect_isolated_node(payee_idx, 0)
@ -294,7 +294,7 @@ class DashGovernanceTest (DashTestFramework):
# Should see two triggers now
self.wait_until(lambda: len(isolated.gobject("list", "valid", "triggers")) == 2, timeout=5)
self.wait_until(lambda: len(self.nodes[0].gobject("list", "valid", "triggers")) == 2, timeout=5)
more_triggers = wait_until(lambda: len(self.nodes[0].gobject("list", "valid", "triggers")) > 2, timeout=5, do_assert=False)
more_triggers = wait_until_helper(lambda: len(self.nodes[0].gobject("list", "valid", "triggers")) > 2, timeout=5, do_assert=False)
assert_equal(more_triggers, False)
# Move another block inside the Superblock maturity window

View File

@ -16,7 +16,7 @@ from test_framework.blocktools import create_block_with_mnpayments
from test_framework.messages import CInv, CTransaction, FromHex, hash256, msg_clsig, msg_inv, ser_string, ToHex, uint256_from_str
from test_framework.p2p import P2PInterface
from test_framework.test_framework import DashTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error, hex_str_to_bytes, wait_until
from test_framework.util import assert_equal, assert_raises_rpc_error, hex_str_to_bytes
class TestP2PConn(P2PInterface):
@ -243,7 +243,7 @@ class LLMQ_IS_CL_Conflicts(DashTestFramework):
# even though the nodes don't know the locked transaction yet
self.test_node.send_isdlock(isdlock)
for node in self.nodes:
wait_until(lambda: node.getbestblockhash() == good_tip, timeout=10, sleep=0.5)
self.wait_until(lambda: node.getbestblockhash() == good_tip, timeout=10)
# islock for tx2 is incomplete, tx1 should return in mempool now that blocks are disconnected
assert rawtx1_txid in set(node.getrawmempool())

View File

@ -18,7 +18,7 @@ from test_framework.p2p import P2PInterface
from test_framework.util import (
assert_equal,
assert_greater_than_or_equal,
wait_until, assert_greater_than, get_bip9_details,
assert_greater_than, get_bip9_details,
)
@ -244,7 +244,7 @@ class LLMQQuorumRotationTest(DashTestFramework):
self.nodes[0].sporkupdate("SPORK_19_CHAINLOCKS_ENABLED", 0)
self.wait_for_sporks_same()
self.nodes[0].reconsiderblock(fallback_blockhash)
wait_until(lambda: self.nodes[0].getbestblockhash() == new_quorum_blockhash, sleep=1)
self.wait_until(lambda: self.nodes[0].getbestblockhash() == new_quorum_blockhash)
assert_equal(self.nodes[0].quorum("list", llmq_type), new_quorum_list)
def test_getmnlistdiff_quorums(self, baseBlockHash, blockHash, baseQuorumList, expectedDeleted, expectedNew, testQuorumsCLSigs = True):

View File

@ -13,7 +13,7 @@ Checks LLMQs signing sessions
from test_framework.messages import CSigShare, msg_qsigshare, uint256_to_string
from test_framework.p2p import P2PInterface
from test_framework.test_framework import DashTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error, force_finish_mnsync, hex_str_to_bytes, wait_until
from test_framework.util import assert_equal, assert_raises_rpc_error, force_finish_mnsync, hex_str_to_bytes, wait_until_helper
class LLMQSigningTest(DashTestFramework):
@ -55,7 +55,7 @@ class LLMQSigningTest(DashTestFramework):
self.wait_until(lambda: check_sigs(hasrecsigs, isconflicting1, isconflicting2), timeout = timeout)
def assert_sigs_nochange(hasrecsigs, isconflicting1, isconflicting2, timeout):
assert not wait_until(lambda: not check_sigs(hasrecsigs, isconflicting1, isconflicting2), timeout = timeout, do_assert = False)
assert not wait_until_helper(lambda: not check_sigs(hasrecsigs, isconflicting1, isconflicting2), timeout = timeout, do_assert = False)
# Initial state
wait_for_sigs(False, False, False, 1)
@ -188,7 +188,7 @@ class LLMQSigningTest(DashTestFramework):
force_finish_mnsync(mn.node)
# Make sure intra-quorum connections were also restored
self.bump_mocktime(1) # need this to bypass quorum connection retry timeout
wait_until(lambda: mn.node.getconnectioncount() == self.llmq_size, timeout=10, sleep=2)
self.wait_until(lambda: mn.node.getconnectioncount() == self.llmq_size, timeout=10)
mn.node.ping()
self.wait_until(lambda: all('pingwait' not in peer for peer in mn.node.getpeerinfo()))
# Let 2 seconds pass so that the next node is used for recovery, which should succeed

View File

@ -5,7 +5,6 @@
import time
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import wait_until
'''
feature_multikeysporks.py
@ -101,7 +100,7 @@ class MultiKeySporkTest(BitcoinTestFramework):
self.nodes[2].sporkupdate(spork_name, 1)
# now spork state is changed
for node in self.nodes:
wait_until(lambda: self.get_test_spork_value(node, spork_name) == 1, sleep=0.1, timeout=10)
self.wait_until(lambda: self.get_test_spork_value(node, spork_name) == 1, timeout=10)
# restart with no extra args to trigger CheckAndRemove, should reset the spork back to its default
self.restart_node(0)
@ -112,7 +111,7 @@ class MultiKeySporkTest(BitcoinTestFramework):
for i in range(1, 5):
self.connect_nodes(0, i)
wait_until(lambda: self.get_test_spork_value(self.nodes[0], spork_name) == 1, sleep=0.1, timeout=10)
self.wait_until(lambda: self.get_test_spork_value(self.nodes[0], spork_name) == 1, timeout=10)
self.bump_mocktime(1)
# now set the spork again with other signers to test
@ -121,7 +120,7 @@ class MultiKeySporkTest(BitcoinTestFramework):
self.nodes[3].sporkupdate(spork_name, final_value)
self.nodes[4].sporkupdate(spork_name, final_value)
for node in self.nodes:
wait_until(lambda: self.get_test_spork_value(node, spork_name) == final_value, sleep=0.1, timeout=10)
self.wait_until(lambda: self.get_test_spork_value(node, spork_name) == final_value, timeout=10)
def run_test(self):
self.test_spork('SPORK_2_INSTANTSEND_ENABLED', 2)

View File

@ -4,7 +4,6 @@
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import wait_until
'''
'''
@ -43,7 +42,7 @@ class SporkTest(BitcoinTestFramework):
# check spork propagation for connected nodes
spork_new_state = not spork_default_state
self.set_test_spork_state(self.nodes[0], spork_new_state)
wait_until(lambda: self.get_test_spork_state(self.nodes[1]), sleep=0.1, timeout=10)
self.wait_until(lambda: self.get_test_spork_state(self.nodes[1]), timeout=10)
# restart nodes to check spork persistence
self.stop_node(0)
@ -58,7 +57,7 @@ class SporkTest(BitcoinTestFramework):
# connect new node and check spork propagation after restoring from cache
self.connect_nodes(1, 2)
wait_until(lambda: self.get_test_spork_state(self.nodes[2]), sleep=0.1, timeout=10)
self.wait_until(lambda: self.get_test_spork_state(self.nodes[2]), timeout=10)
if __name__ == '__main__':
SporkTest().main()

View File

@ -12,7 +12,7 @@ import re
from test_framework.blocktools import create_block, create_coinbase
from test_framework.messages import msg_block
from test_framework.p2p import p2p_lock, P2PInterface
from test_framework.p2p import P2PInterface
from test_framework.test_framework import BitcoinTestFramework
VB_PERIOD = 144 # versionbits period length for regtest
@ -90,7 +90,7 @@ class VersionBitsWarningTest(BitcoinTestFramework):
# Generating one block guarantees that we'll get out of IBD
node.generatetoaddress(1, node_deterministic_address)
self.wait_until(lambda: not node.getblockchaininfo()['initialblockdownload'], timeout=10, lock=p2p_lock)
self.wait_until(lambda: not node.getblockchaininfo()['initialblockdownload'])
# Generating one more block will be enough to generate an error.
node.generatetoaddress(1, node_deterministic_address)
# Check that get*info() shows the versionbits unknown rules warning

View File

@ -9,7 +9,7 @@ import random
import threading
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import get_rpc_proxy, wait_until
from test_framework.util import get_rpc_proxy
from test_framework.wallet import MiniWallet
@ -78,7 +78,7 @@ class GetBlockTemplateLPTest(BitcoinTestFramework):
def check():
self.bump_mocktime(1)
return not thr.is_alive()
wait_until(check, timeout=60 + 20, sleep=1)
self.wait_until(check, timeout=60 + 20)
if __name__ == '__main__':
GetBlockTemplateLPTest().main()

View File

@ -18,7 +18,7 @@ from test_framework.messages import (
msg_ping,
msg_version,
)
from test_framework.p2p import p2p_lock, P2PInterface
from test_framework.p2p import P2PInterface
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
@ -118,9 +118,9 @@ class P2PLeakTest(BitcoinTestFramework):
# verack, since we never sent one
no_verack_idle_peer.wait_for_verack()
self.wait_until(lambda: no_version_ban_peer.ever_connected, timeout=10, lock=p2p_lock)
self.wait_until(lambda: no_version_idle_peer.ever_connected, timeout=10, lock=p2p_lock)
self.wait_until(lambda: no_verack_idle_peer.version_received, timeout=10, lock=p2p_lock)
no_version_ban_peer.wait_until(lambda: no_version_ban_peer.ever_connected, check_connected=False)
no_version_idle_peer.wait_until(lambda: no_version_idle_peer.ever_connected)
no_verack_idle_peer.wait_until(lambda: no_verack_idle_peer.version_received)
# Mine a block and make sure that it's not sent to the connected peers
self.nodes[0].generate(nblocks=1)

View File

@ -15,7 +15,7 @@ from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
force_finish_mnsync,
wait_until,
wait_until_helper,
)
'''
@ -65,14 +65,14 @@ def wait_for_banscore(node, peer_id, expected_score):
time.sleep(1)
return peer["banscore"]
return None
wait_until(lambda: get_score() == expected_score, timeout=6)
wait_until_helper(lambda: get_score() == expected_score, timeout=6)
def p2p_connection(node, uacomment=None):
return node.add_p2p_connection(QuorumDataInterface(), uacomment=uacomment)
def get_mininode_id(node, uacomment=None):
def get_p2p_id(node, uacomment=None):
def get_id():
for p in node.getpeerinfo():
for p2p in node.p2ps:
@ -81,7 +81,7 @@ def get_mininode_id(node, uacomment=None):
if p["subver"] == p2p.strSubVer.decode():
return p["id"]
return None
wait_until(lambda: get_id() is not None, timeout=10)
wait_until_helper(lambda: get_id() is not None, timeout=10)
return get_id()
@ -106,7 +106,7 @@ class QuorumDataInterface(P2PInterface):
assert_qdata(self.get_qdata(), qgetdata, expected_error, len_vvec, len_contributions)
def wait_for_qmessage(self, message=None, timeout=3, message_expected=True):
wait_until(lambda: self.message_count[message] > 0, timeout=timeout, lock=p2p_lock, do_assert=message_expected)
wait_until_helper(lambda: self.message_count[message] > 0, timeout=timeout, lock=p2p_lock, do_assert=message_expected)
if not message_expected:
assert self.message_count[message] == 0
self.message_count[message] = 0
@ -143,8 +143,8 @@ class QuorumDataMessagesTest(DashTestFramework):
force_request_expire()
p2p_node0 = p2p_connection(node0)
p2p_mn2 = p2p_connection(mn2.node)
id_p2p_node0 = get_mininode_id(node0)
id_p2p_mn2 = get_mininode_id(mn2.node)
id_p2p_node0 = get_p2p_id(node0)
id_p2p_mn2 = get_p2p_id(mn2.node)
# Ensure that both nodes start with zero ban score
wait_for_banscore(node0, id_p2p_node0, 0)
@ -166,7 +166,7 @@ class QuorumDataMessagesTest(DashTestFramework):
node0.disconnect_p2ps()
mn2.node.disconnect_p2ps()
p2p_mn2 = p2p_connection(mn2.node)
id_p2p_mn2 = get_mininode_id(mn2.node)
id_p2p_mn2 = get_p2p_id(mn2.node)
mnauth(mn2.node, id_p2p_mn2, fake_mnauth_2[0], fake_mnauth_2[1])
# The masternode should now respond to qgetdata requests
self.log.info("Request verification vector")
@ -188,8 +188,8 @@ class QuorumDataMessagesTest(DashTestFramework):
self.log.info("Test ban score increase for invalid / unexpected QDATA")
p2p_mn1 = p2p_connection(mn1.node)
p2p_mn2 = p2p_connection(mn2.node)
id_p2p_mn1 = get_mininode_id(mn1.node)
id_p2p_mn2 = get_mininode_id(mn2.node)
id_p2p_mn1 = get_p2p_id(mn1.node)
id_p2p_mn2 = get_p2p_id(mn2.node)
mnauth(mn1.node, id_p2p_mn1, fake_mnauth_1[0], fake_mnauth_1[1])
mnauth(mn2.node, id_p2p_mn2, fake_mnauth_2[0], fake_mnauth_2[1])
wait_for_banscore(mn1.node, id_p2p_mn1, 0)
@ -236,7 +236,7 @@ class QuorumDataMessagesTest(DashTestFramework):
self.log.info("Test all available error codes")
p2p_mn2 = p2p_connection(mn2.node)
id_p2p_mn2 = get_mininode_id(mn2.node)
id_p2p_mn2 = get_p2p_id(mn2.node)
mnauth(mn2.node, id_p2p_mn2, fake_mnauth_2[0], fake_mnauth_2[1])
qgetdata_invalid_type = msg_qgetdata(quorum_hash_int, 105, 0x01, protx_hash_int)
qgetdata_invalid_block = msg_qgetdata(protx_hash_int, 100, 0x01, protx_hash_int)
@ -252,8 +252,8 @@ class QuorumDataMessagesTest(DashTestFramework):
# Re-connect to the masternode
p2p_mn1 = p2p_connection(mn1.node)
p2p_mn2 = p2p_connection(mn2.node)
id_p2p_mn1 = get_mininode_id(mn1.node)
id_p2p_mn2 = get_mininode_id(mn2.node)
id_p2p_mn1 = get_p2p_id(mn1.node)
id_p2p_mn2 = get_p2p_id(mn2.node)
assert id_p2p_mn1 is not None
assert id_p2p_mn2 is not None
mnauth(mn1.node, id_p2p_mn1, fake_mnauth_1[0], fake_mnauth_1[1])
@ -304,7 +304,7 @@ class QuorumDataMessagesTest(DashTestFramework):
self.log.info("Test request limiting / banscore increases")
p2p_mn1 = p2p_connection(mn1.node)
id_p2p_mn1 = get_mininode_id(mn1.node)
id_p2p_mn1 = get_p2p_id(mn1.node)
mnauth(mn1.node, id_p2p_mn1, fake_mnauth_1[0], fake_mnauth_1[1])
p2p_mn1.test_qgetdata(qgetdata_vvec, 0, self.llmq_threshold, 0)
wait_for_banscore(mn1.node, id_p2p_mn1, 0)
@ -319,8 +319,8 @@ class QuorumDataMessagesTest(DashTestFramework):
# in banscore increase for either of both.
p2p_mn3_1 = p2p_connection(mn3.node, uacomment_m3_1)
p2p_mn3_2 = p2p_connection(mn3.node, uacomment_m3_2)
id_p2p_mn3_1 = get_mininode_id(mn3.node, uacomment_m3_1)
id_p2p_mn3_2 = get_mininode_id(mn3.node, uacomment_m3_2)
id_p2p_mn3_1 = get_p2p_id(mn3.node, uacomment_m3_1)
id_p2p_mn3_2 = get_p2p_id(mn3.node, uacomment_m3_2)
assert id_p2p_mn3_1 != id_p2p_mn3_2
mnauth(mn3.node, id_p2p_mn3_1, fake_mnauth_1[0], fake_mnauth_1[1])
mnauth(mn3.node, id_p2p_mn3_2, fake_mnauth_2[0], fake_mnauth_2[1])
@ -348,8 +348,8 @@ class QuorumDataMessagesTest(DashTestFramework):
force_request_expire()
p2p_mn3_1 = p2p_connection(mn3.node, uacomment_m3_1)
p2p_mn3_2 = p2p_connection(mn3.node, uacomment_m3_2)
id_p2p_mn3_1 = get_mininode_id(mn3.node, uacomment_m3_1)
id_p2p_mn3_2 = get_mininode_id(mn3.node, uacomment_m3_2)
id_p2p_mn3_1 = get_p2p_id(mn3.node, uacomment_m3_1)
id_p2p_mn3_2 = get_p2p_id(mn3.node, uacomment_m3_2)
assert id_p2p_mn3_1 != id_p2p_mn3_2
wait_for_banscore(mn3.node, id_p2p_mn3_1, 0)
@ -377,8 +377,8 @@ class QuorumDataMessagesTest(DashTestFramework):
self.connect_nodes(0, i + 1)
p2p_node0 = p2p_connection(node0)
p2p_mn2 = p2p_connection(mn2.node)
id_p2p_node0 = get_mininode_id(node0)
id_p2p_mn2 = get_mininode_id(mn2.node)
id_p2p_node0 = get_p2p_id(node0)
id_p2p_mn2 = get_p2p_id(mn2.node)
mnauth(node0, id_p2p_node0, fake_mnauth_1[0], fake_mnauth_1[1])
mnauth(mn2.node, id_p2p_mn2, fake_mnauth_2[0], fake_mnauth_2[1])
p2p_mn2.test_qgetdata(qgetdata_vvec, 0, self.llmq_threshold)

View File

@ -82,7 +82,7 @@ from test_framework.messages import (
NODE_NETWORK,
sha256,
)
from test_framework.util import wait_until
from test_framework.util import wait_until_helper
logger = logging.getLogger("TestFramework.p2p")
@ -343,7 +343,7 @@ class P2PInterface(P2PConnection):
# Track the most recent message of each type.
# To wait for a message to be received, pop that message from
# this and use wait_until.
# this and use self.wait_until.
self.last_message = {}
# A count of the number of ping messages we've sent to the node
@ -463,7 +463,7 @@ class P2PInterface(P2PConnection):
assert self.is_connected
return test_function_in()
wait_until(test_function, timeout=timeout, lock=p2p_lock, timeout_factor=self.timeout_factor)
wait_until_helper(test_function, timeout=timeout, lock=p2p_lock, timeout_factor=self.timeout_factor)
def wait_for_disconnect(self, timeout=60):
test_function = lambda: not self.is_connected
@ -501,7 +501,7 @@ class P2PInterface(P2PConnection):
return False
return last_filtered_block.merkleblock.header.rehash() == int(blockhash, 16)
wait_until(test_function, timeout=timeout, lock=p2p_lock)
self.wait_until(test_function, timeout=timeout)
def wait_for_getdata(self, hash_list, timeout=60):
@ -589,7 +589,7 @@ class NetworkThread(threading.Thread):
def close(self, timeout=10):
"""Close the connections and network event loop."""
self.network_event_loop.call_soon_threadsafe(self.network_event_loop.stop)
wait_until(lambda: not self.network_event_loop.is_running(), timeout=timeout)
wait_until_helper(lambda: not self.network_event_loop.is_running(), timeout=timeout)
self.network_event_loop.close()
self.join(timeout)
# Safe to remove event loop.

View File

@ -52,7 +52,7 @@ from .util import (
set_node_times,
satoshi_round,
softfork_active,
wait_until,
wait_until_helper,
get_chain_folder, rpc_port,
)
@ -681,8 +681,8 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
# See comments in net_processing:
# * Must have a version message before anything else
# * Must have a verack message before anything else
wait_until(lambda: all(peer['version'] != 0 for peer in from_connection.getpeerinfo()))
wait_until(lambda: all(peer['bytesrecv_per_msg'].pop('verack', 0) == 24 for peer in from_connection.getpeerinfo()))
wait_until_helper(lambda: all(peer['version'] != 0 for peer in from_connection.getpeerinfo()))
wait_until_helper(lambda: all(peer['bytesrecv_per_msg'].pop('verack', 0) == 24 for peer in from_connection.getpeerinfo()))
connect_nodes_helper(self.nodes[a], b)
@ -713,13 +713,13 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
raise
# wait to disconnect
wait_until(lambda: not get_peer_ids(), timeout=5)
wait_until_helper(lambda: not get_peer_ids(), timeout=5)
disconnect_nodes_helper(self.nodes[a], b)
def isolate_node(self, node_num, timeout=5):
self.nodes[node_num].setnetworkactive(False)
wait_until(lambda: self.nodes[node_num].getconnectioncount() == 0, timeout=timeout)
wait_until_helper(lambda: self.nodes[node_num].getconnectioncount() == 0, timeout=timeout)
def reconnect_isolated_node(self, a, b):
self.nodes[a].setnetworkactive(True)
@ -816,7 +816,7 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
node.mocktime = self.mocktime
def wait_until(self, test_function, timeout=60, lock=None):
return wait_until(test_function, timeout=timeout, lock=lock, timeout_factor=self.options.timeout_factor)
return wait_until_helper(test_function, timeout=timeout, lock=lock, timeout_factor=self.options.timeout_factor)
# Private helper methods. These should not be accessed by the subclass test scripts.
@ -1551,7 +1551,7 @@ class DashTestFramework(BitcoinTestFramework):
return node.getrawtransaction(txid)
except:
return False
if wait_until(check_tx, timeout=timeout, sleep=1, do_assert=expected) and not expected:
if wait_until_helper(check_tx, timeout=timeout, sleep=1, do_assert=expected) and not expected:
raise AssertionError("waiting unexpectedly succeeded")
def create_isdlock(self, hextx):
@ -1583,7 +1583,7 @@ class DashTestFramework(BitcoinTestFramework):
return node.getrawtransaction(txid, True)["instantlock"]
except:
return False
if wait_until(check_instantlock, timeout=timeout, sleep=1, do_assert=expected) and not expected:
if wait_until_helper(check_instantlock, timeout=timeout, sleep=1, do_assert=expected) and not expected:
raise AssertionError("waiting unexpectedly succeeded")
def wait_for_chainlocked_block(self, node, block_hash, expected=True, timeout=15):
@ -1593,7 +1593,7 @@ class DashTestFramework(BitcoinTestFramework):
return block["confirmations"] > 0 and block["chainlock"]
except:
return False
if wait_until(check_chainlocked_block, timeout=timeout, sleep=0.1, do_assert=expected) and not expected:
if wait_until_helper(check_chainlocked_block, timeout=timeout, sleep=0.1, do_assert=expected) and not expected:
raise AssertionError("waiting unexpectedly succeeded")
def wait_for_chainlocked_block_all_nodes(self, block_hash, timeout=15, expected=True):
@ -1601,14 +1601,14 @@ class DashTestFramework(BitcoinTestFramework):
self.wait_for_chainlocked_block(node, block_hash, expected=expected, timeout=timeout)
def wait_for_best_chainlock(self, node, block_hash, timeout=15):
wait_until(lambda: node.getbestchainlock()["blockhash"] == block_hash, timeout=timeout, sleep=0.1)
wait_until_helper(lambda: node.getbestchainlock()["blockhash"] == block_hash, timeout=timeout, sleep=0.1)
def wait_for_sporks_same(self, timeout=30):
def check_sporks_same():
self.bump_mocktime(1)
sporks = self.nodes[0].spork('show')
return all(node.spork('show') == sporks for node in self.nodes[1:])
wait_until(check_sporks_same, timeout=timeout, sleep=0.5)
wait_until_helper(check_sporks_same, timeout=timeout, sleep=0.5)
def wait_for_quorum_connections(self, quorum_hash, expected_connections, mninfos, llmq_type_name="llmq_test", timeout = 60, wait_proc=None):
def check_quorum_connections():
@ -1647,7 +1647,7 @@ class DashTestFramework(BitcoinTestFramework):
# no sessions at all - not ok
return ret()
wait_until(check_quorum_connections, timeout=timeout, sleep=1)
wait_until_helper(check_quorum_connections, timeout=timeout, sleep=1)
def wait_for_masternode_probes(self, quorum_hash, mninfos, timeout = 30, wait_proc=None, llmq_type_name="llmq_test"):
def check_probes():
@ -1685,7 +1685,7 @@ class DashTestFramework(BitcoinTestFramework):
return ret()
return True
wait_until(check_probes, timeout=timeout, sleep=1)
wait_until_helper(check_probes, timeout=timeout, sleep=1)
def wait_for_quorum_phase(self, quorum_hash, phase, expected_member_count, check_received_messages, check_received_messages_count, mninfos, llmq_type_name="llmq_test", timeout=30, sleep=0.5):
def check_dkg_session():
@ -1707,7 +1707,7 @@ class DashTestFramework(BitcoinTestFramework):
break
return member_count >= expected_member_count
wait_until(check_dkg_session, timeout=timeout, sleep=sleep)
wait_until_helper(check_dkg_session, timeout=timeout, sleep=sleep)
def wait_for_quorum_commitment(self, quorum_hash, nodes, llmq_type=100, timeout=15):
def check_dkg_comitments():
@ -1728,7 +1728,7 @@ class DashTestFramework(BitcoinTestFramework):
return False
return True
wait_until(check_dkg_comitments, timeout=timeout, sleep=1)
wait_until_helper(check_dkg_comitments, timeout=timeout, sleep=1)
def wait_for_quorum_list(self, quorum_hash, nodes, timeout=15, sleep=2, llmq_type_name="llmq_test"):
def wait_func():
@ -1739,7 +1739,7 @@ class DashTestFramework(BitcoinTestFramework):
self.nodes[0].generate(1)
self.sync_blocks(nodes)
return False
wait_until(wait_func, timeout=timeout, sleep=sleep)
wait_until_helper(wait_func, timeout=timeout, sleep=sleep)
def wait_for_quorums_list(self, quorum_hash_0, quorum_hash_1, nodes, llmq_type_name="llmq_test", timeout=15, sleep=2):
def wait_func():
@ -1751,7 +1751,7 @@ class DashTestFramework(BitcoinTestFramework):
self.nodes[0].generate(1)
self.sync_blocks(nodes)
return False
wait_until(wait_func, timeout=timeout, sleep=sleep)
wait_until_helper(wait_func, timeout=timeout, sleep=sleep)
def move_blocks(self, nodes, num_blocks):
time.sleep(1)
@ -2008,7 +2008,7 @@ class DashTestFramework(BitcoinTestFramework):
if not mn.node.quorum("hasrecsig", llmq_type, rec_sig_id, rec_sig_msg_hash):
return False
return True
wait_until(check_recovered_sig, timeout=timeout, sleep=1)
wait_until_helper(check_recovered_sig, timeout=timeout, sleep=1)
def get_recovered_sig(self, rec_sig_id, rec_sig_msg_hash, llmq_type=100):
# Note: recsigs aren't relayed to regular nodes by default,
@ -2067,7 +2067,7 @@ class DashTestFramework(BitcoinTestFramework):
(valid, len(mns), quorum_type_in, quorum_hash_in))
return valid == len(mns)
wait_until(test_mns, timeout=timeout, sleep=0.5)
wait_until_helper(test_mns, timeout=timeout, sleep=0.5)
def wait_for_mnauth(self, node, count, timeout=10):
def test():
@ -2077,4 +2077,4 @@ class DashTestFramework(BitcoinTestFramework):
if "verified_proregtx_hash" in p and p["verified_proregtx_hash"] != "":
c += 1
return c >= count
wait_until(test, timeout=timeout)
wait_until_helper(test, timeout=timeout)

View File

@ -30,7 +30,7 @@ from .util import (
get_auth_cookie,
get_rpc_proxy,
rpc_url,
wait_until,
wait_until_helper,
p2p_port,
get_chain_folder,
EncodeDecimal,
@ -248,7 +248,7 @@ class TestNode():
if self.version_is_at_least(180000):
# getmempoolinfo.loaded is available since commit
# 71e38b9ebcb78b3a264a4c25c7c4e373317f2a40 (version 0.18.0)
wait_until(lambda: rpc.getmempoolinfo()['loaded'])
wait_until_helper(lambda: rpc.getmempoolinfo()['loaded'])
# Wait for the node to finish reindex, block import, and
# loading the mempool. Usually importing happens fast or
# even "immediate" when the node is started. However, there
@ -379,7 +379,7 @@ class TestNode():
return True
def wait_until_stopped(self, timeout=BITCOIND_PROC_WAIT_TIMEOUT):
wait_until(self.is_node_stopped, timeout=timeout, timeout_factor=self.timeout_factor)
wait_until_helper(self.is_node_stopped, timeout=timeout, timeout_factor=self.timeout_factor)
@contextlib.contextmanager
def assert_debug_log(self, expected_msgs, unexpected_msgs=None, timeout=2):
@ -580,10 +580,10 @@ class TestNode():
if p['subver'] == p2p.strSubVer.decode():
return False
return True
wait_until(check_peers, timeout=5)
wait_until_helper(check_peers, timeout=5)
del self.p2ps[:]
wait_until(lambda: self.num_test_p2p_connections() == 0)
wait_until_helper(lambda: self.num_test_p2p_connections() == 0, timeout_factor=self.timeout_factor)
class TestNodeCLIAttr:

View File

@ -226,14 +226,14 @@ def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf'), sleep=0.5, timeout_factor=1.0, lock=None, do_assert=True, allow_exception=False):
def wait_until_helper(predicate, *, attempts=float('inf'), timeout=float('inf'), sleep=0.5, timeout_factor=1.0, lock=None, do_assert=True, allow_exception=False):
"""Sleep until the predicate resolves to be True.
Warning: Note that this method is not recommended to be used in tests as it is
not aware of the context of the test framework. Using `wait_until()` counterpart
from `BitcoinTestFramework` or `P2PInterface` class ensures an understandable
amount of timeout and a common shared timeout_factor. Furthermore, `wait_until()`
from `P2PInterface` class in `mininode.py` has a preset lock.
not aware of the context of the test framework. Using the `wait_until()` members
from `BitcoinTestFramework` or `P2PInterface` class ensures the timeout is
properly scaled. Furthermore, `wait_until()` from `P2PInterface` class in
`p2p.py` has a preset lock.
"""
if attempts == float('inf') and timeout == float('inf'):
timeout = 60