mirror of
https://github.com/dashpay/dash.git
synced 2024-12-25 03:52:49 +01:00
fix(tests): various fixes (#4840)
* tests: extend "age" period in `feature_llmq_connections.py` see `NOTE` * tests: sleep more in `wait_until` by default Avoid overloading rpc with 20+ requests per second, 2 should be enough. * tests: various fixes in `activate_dip0024` - lower batch size - no fast mode - disable spork17 while mining - bump mocktime on every generate call * tests: bump mocktime on generate in `activate_dip8` * tests: fix `reindex` option in `restart_mn` Make sure nodes actually finished reindexing before moving any further. * tests: trigger recovery threads and wait on mn restarts * tests: sync blocks in `wait_for_quorum_data` * tests: bump disconnect timeouts in `p2p_invalid_messages.py` 1 is too low for busy nodes * tests: Wait for addrv2 processing before bumping mocktime in p2p_addrv2_relay.py * tests: use `timeout_scale` option in `get_recovered_sig` and `isolate_node` * tests: fix `wait_for...`s * tests: fix `close_mn_port` banning test * Bump MASTERNODE_SYNC_RESET_SECONDS to 900 This helps to avoid issues with 10m+ bump_mocktime on isolated nodes in feature_llmq_is_retroactive.py and feature_llmq_simplepose.py. * style: fix extra whitespace Co-authored-by: PastaPastaPasta <6443210+PastaPastaPasta@users.noreply.github.com>
This commit is contained in:
parent
c74a30af9f
commit
5b334b35e1
@ -22,7 +22,7 @@ static constexpr int MASTERNODE_SYNC_FINISHED = 999;
|
||||
|
||||
static constexpr int MASTERNODE_SYNC_TICK_SECONDS = 6;
|
||||
static constexpr int MASTERNODE_SYNC_TIMEOUT_SECONDS = 30; // our blocks are 2.5 minutes so 30 seconds should be fine
|
||||
static constexpr int MASTERNODE_SYNC_RESET_SECONDS = 600; // Reset fReachedBestHeader in CMasternodeSync::Reset if UpdateBlockTip hasn't been called for this seconds
|
||||
static constexpr int MASTERNODE_SYNC_RESET_SECONDS = 900; // Reset fReachedBestHeader in CMasternodeSync::Reset if UpdateBlockTip hasn't been called for this seconds
|
||||
|
||||
extern std::unique_ptr<CMasternodeSync> masternodeSync;
|
||||
|
||||
|
@ -13,8 +13,11 @@ Checks intra quorum connections
|
||||
import time
|
||||
|
||||
from test_framework.test_framework import DashTestFramework
|
||||
from test_framework.util import assert_greater_than_or_equal, connect_nodes, wait_until
|
||||
from test_framework.util import assert_greater_than_or_equal, connect_nodes, Options, wait_until
|
||||
|
||||
# Probes should age after this many seconds.
|
||||
# NOTE: mine_quorum() can bump mocktime quite often internally so make sure this number is high enough.
|
||||
MAX_AGE = 120 * Options.timeout_scale
|
||||
|
||||
class LLMQConnections(DashTestFramework):
|
||||
def set_test_params(self):
|
||||
@ -54,7 +57,7 @@ class LLMQConnections(DashTestFramework):
|
||||
wait_until(lambda: self.get_mn_probe_count(mn.node, q, False) == 4)
|
||||
|
||||
self.log.info("checking that probes age")
|
||||
self.bump_mocktime(60)
|
||||
self.bump_mocktime(MAX_AGE)
|
||||
for mn in self.get_quorum_masternodes(q):
|
||||
wait_until(lambda: self.get_mn_probe_count(mn.node, q, False) == 0)
|
||||
|
||||
@ -144,7 +147,7 @@ class LLMQConnections(DashTestFramework):
|
||||
peerMap[p['verified_proregtx_hash']] = p
|
||||
for mn in self.get_quorum_masternodes(q):
|
||||
pi = mnMap[mn.proTxHash]
|
||||
if pi['metaInfo']['lastOutboundSuccessElapsed'] < 60:
|
||||
if pi['metaInfo']['lastOutboundSuccessElapsed'] < MAX_AGE:
|
||||
count += 1
|
||||
elif check_peers and mn.proTxHash in peerMap:
|
||||
count += 1
|
||||
|
@ -6,7 +6,7 @@
|
||||
import time
|
||||
from test_framework.mininode import logger
|
||||
from test_framework.test_framework import DashTestFramework
|
||||
from test_framework.util import force_finish_mnsync, connect_nodes
|
||||
from test_framework.util import force_finish_mnsync, connect_nodes, wait_until
|
||||
|
||||
'''
|
||||
feature_llmq_data_recovery.py
|
||||
@ -31,13 +31,22 @@ class QuorumDataRecoveryTest(DashTestFramework):
|
||||
def restart_mn(self, mn, reindex=False, qvvec_sync=[], qdata_recovery_enabled=True):
|
||||
args = self.extra_args[mn.nodeIdx] + ['-masternodeblsprivkey=%s' % mn.keyOperator,
|
||||
'-llmq-data-recovery=%d' % qdata_recovery_enabled]
|
||||
if reindex:
|
||||
args.append('-reindex')
|
||||
for llmq_sync in qvvec_sync:
|
||||
args.append('-llmq-qvvec-sync=%s:%d' % (llmq_type_strings[llmq_sync[0]], llmq_sync[1]))
|
||||
self.restart_node(mn.nodeIdx, args)
|
||||
if reindex:
|
||||
args.append('-reindex')
|
||||
bb_hash = mn.node.getbestblockhash()
|
||||
self.restart_node(mn.nodeIdx, args)
|
||||
wait_until(lambda: mn.node.getbestblockhash() == bb_hash)
|
||||
else:
|
||||
self.restart_node(mn.nodeIdx, args)
|
||||
force_finish_mnsync(mn.node)
|
||||
connect_nodes(mn.node, 0)
|
||||
if qdata_recovery_enabled:
|
||||
# trigger recovery threads and wait for them to start
|
||||
self.nodes[0].generate(1)
|
||||
self.bump_mocktime(self.quorum_data_thread_request_timeout_seconds + 1)
|
||||
time.sleep(self.quorum_data_thread_request_timeout_seconds + 1)
|
||||
self.sync_blocks()
|
||||
|
||||
def restart_mns(self, mns=None, exclude=[], reindex=False, qvvec_sync=[], qdata_recovery_enabled=True):
|
||||
|
@ -13,7 +13,7 @@ Checks simple PoSe system based on LLMQ commitments
|
||||
import time
|
||||
|
||||
from test_framework.test_framework import DashTestFramework
|
||||
from test_framework.util import connect_nodes, force_finish_mnsync, p2p_port, wait_until
|
||||
from test_framework.util import assert_equal, connect_nodes, force_finish_mnsync, p2p_port, wait_until
|
||||
|
||||
|
||||
class LLMQSimplePoSeTest(DashTestFramework):
|
||||
@ -92,6 +92,67 @@ class LLMQSimplePoSeTest(DashTestFramework):
|
||||
for mn in self.mninfo:
|
||||
assert not self.check_punished(mn) and not self.check_banned(mn)
|
||||
|
||||
def mine_quorum_no_check(self, expected_good_nodes, mninfos_online):
|
||||
# Unlike in mine_quorum we skip most of the checks and only care about
|
||||
# nodes moving forward from phase to phase and the fact that the quorum is actualy mined.
|
||||
self.log.info("Mining a quorum with no checks")
|
||||
nodes = [self.nodes[0]] + [mn.node for mn in mninfos_online]
|
||||
|
||||
# move forward to next DKG
|
||||
skip_count = 24 - (self.nodes[0].getblockcount() % 24)
|
||||
if skip_count != 0:
|
||||
self.bump_mocktime(1, nodes=nodes)
|
||||
self.nodes[0].generate(skip_count)
|
||||
self.sync_blocks(nodes)
|
||||
|
||||
q = self.nodes[0].getbestblockhash()
|
||||
self.log.info("Expected quorum_hash: "+str(q))
|
||||
self.log.info("Waiting for phase 1 (init)")
|
||||
self.wait_for_quorum_phase(q, 1, expected_good_nodes, None, 0, mninfos_online)
|
||||
self.move_blocks(nodes, 2)
|
||||
|
||||
self.log.info("Waiting for phase 2 (contribute)")
|
||||
self.wait_for_quorum_phase(q, 2, expected_good_nodes, None, 0, mninfos_online)
|
||||
self.move_blocks(nodes, 2)
|
||||
|
||||
self.log.info("Waiting for phase 3 (complain)")
|
||||
self.wait_for_quorum_phase(q, 3, expected_good_nodes, None, 0, mninfos_online)
|
||||
self.move_blocks(nodes, 2)
|
||||
|
||||
self.log.info("Waiting for phase 4 (justify)")
|
||||
self.wait_for_quorum_phase(q, 4, expected_good_nodes, None, 0, mninfos_online)
|
||||
self.move_blocks(nodes, 2)
|
||||
|
||||
self.log.info("Waiting for phase 5 (commit)")
|
||||
self.wait_for_quorum_phase(q, 5, expected_good_nodes, None, 0, mninfos_online)
|
||||
self.move_blocks(nodes, 2)
|
||||
|
||||
self.log.info("Waiting for phase 6 (mining)")
|
||||
self.wait_for_quorum_phase(q, 6, expected_good_nodes, None, 0, mninfos_online)
|
||||
|
||||
self.log.info("Waiting final commitment")
|
||||
self.wait_for_quorum_commitment(q, nodes)
|
||||
|
||||
self.log.info("Mining final commitment")
|
||||
self.bump_mocktime(1, nodes=nodes)
|
||||
self.nodes[0].getblocktemplate() # this calls CreateNewBlock
|
||||
self.nodes[0].generate(1)
|
||||
self.sync_blocks(nodes)
|
||||
|
||||
self.log.info("Waiting for quorum to appear in the list")
|
||||
self.wait_for_quorum_list(q, nodes)
|
||||
|
||||
new_quorum = self.nodes[0].quorum("list", 1)["llmq_test"][0]
|
||||
assert_equal(q, new_quorum)
|
||||
quorum_info = self.nodes[0].quorum("info", 100, new_quorum)
|
||||
|
||||
# Mine 8 (SIGN_HEIGHT_OFFSET) more blocks to make sure that the new quorum gets eligible for signing sessions
|
||||
self.nodes[0].generate(8)
|
||||
self.sync_blocks(nodes)
|
||||
self.log.info("New quorum: height=%d, quorumHash=%s, quorumIndex=%d, minedBlock=%s" % (quorum_info["height"], new_quorum, quorum_info["quorumIndex"], quorum_info["minedBlock"]))
|
||||
|
||||
return new_quorum
|
||||
|
||||
def test_banning(self, invalidate_proc, expected_connections):
|
||||
mninfos_online = self.mninfo.copy()
|
||||
mninfos_valid = self.mninfo.copy()
|
||||
@ -105,13 +166,15 @@ class LLMQSimplePoSeTest(DashTestFramework):
|
||||
|
||||
# NOTE: Min PoSe penalty is 100 (see CDeterministicMNList::CalcMaxPoSePenalty()),
|
||||
# so nodes are PoSe-banned in the same DKG they misbehave without being PoSe-punished first.
|
||||
if not instant_ban:
|
||||
# it's ok to miss probes/quorum connections up to 5 times
|
||||
for i in range(5):
|
||||
if instant_ban:
|
||||
self.reset_probe_timeouts()
|
||||
self.mine_quorum(expected_connections=expected_connections, expected_members=expected_contributors, expected_contributions=expected_contributors, expected_complaints=expected_contributors-1, expected_commitments=expected_contributors, mninfos_online=mninfos_online, mninfos_valid=mninfos_valid)
|
||||
else:
|
||||
# It's ok to miss probes/quorum connections up to 5 times.
|
||||
# 6th time is when it should be banned for sure.
|
||||
for i in range(6):
|
||||
self.reset_probe_timeouts()
|
||||
self.mine_quorum(expected_connections=expected_connections, expected_members=expected_contributors, expected_contributions=expected_contributors, expected_complaints=0, expected_commitments=expected_contributors, mninfos_online=mninfos_online, mninfos_valid=mninfos_valid)
|
||||
self.reset_probe_timeouts()
|
||||
self.mine_quorum(expected_connections=expected_connections, expected_members=expected_contributors, expected_contributions=expected_contributors, expected_complaints=expected_contributors-1, expected_commitments=expected_contributors, mninfos_online=mninfos_online, mninfos_valid=mninfos_valid)
|
||||
self.mine_quorum_no_check(expected_contributors - 1, mninfos_online)
|
||||
|
||||
assert self.check_banned(mn)
|
||||
|
||||
|
@ -68,9 +68,13 @@ class AddrTest(BitcoinTestFramework):
|
||||
with self.nodes[0].assert_debug_log([
|
||||
'Added 10 addresses from 127.0.0.1: 0 tried',
|
||||
'received: addrv2 (131 bytes) peer=1',
|
||||
'sending addrv2 (131 bytes) peer=2',
|
||||
]):
|
||||
addr_source.send_and_ping(msg)
|
||||
|
||||
# Wait until "Added ..." before bumping mocktime to make sure addv2 is (almost) fully processed
|
||||
with self.nodes[0].assert_debug_log([
|
||||
'sending addrv2 (131 bytes) peer=2',
|
||||
]):
|
||||
self.bump_mocktime(30 * 60)
|
||||
addr_receiver.wait_for_addrv2()
|
||||
|
||||
|
@ -152,7 +152,7 @@ class InvalidMessagesTest(BitcoinTestFramework):
|
||||
# modify magic bytes
|
||||
msg = b'\xff' * 4 + msg[4:]
|
||||
conn.send_raw_message(msg)
|
||||
conn.wait_for_disconnect(timeout=1)
|
||||
conn.wait_for_disconnect(timeout=5)
|
||||
self.nodes[0].disconnect_p2ps()
|
||||
|
||||
def test_checksum(self):
|
||||
@ -178,7 +178,7 @@ class InvalidMessagesTest(BitcoinTestFramework):
|
||||
# modify len to MAX_SIZE + 1
|
||||
msg = msg[:cut_len] + struct.pack("<I", 0x02000000 + 1) + msg[cut_len + 4:]
|
||||
self.nodes[0].p2p.send_raw_message(msg)
|
||||
conn.wait_for_disconnect(timeout=1)
|
||||
conn.wait_for_disconnect(timeout=5)
|
||||
self.nodes[0].disconnect_p2ps()
|
||||
|
||||
def test_command(self):
|
||||
|
@ -815,32 +815,48 @@ class DashTestFramework(BitcoinTestFramework):
|
||||
# (MAX_BLOCKS_IN_TRANSIT_PER_PEER = 16 blocks) reorg error.
|
||||
self.log.info("Wait for dip0008 activation")
|
||||
while self.nodes[0].getblockcount() < self.dip8_activation_height:
|
||||
self.bump_mocktime(10)
|
||||
self.nodes[0].generate(10)
|
||||
if slow_mode:
|
||||
self.sync_blocks()
|
||||
self.sync_blocks()
|
||||
|
||||
def activate_dip0024(self, slow_mode=False, expected_activation_height=None):
|
||||
def activate_dip0024(self, expected_activation_height=None):
|
||||
self.log.info("Wait for dip0024 activation")
|
||||
|
||||
# disable spork17 while mining blocks to activate dip0024 to prevent accidental quorum formation
|
||||
spork17_value = self.nodes[0].spork('show')['SPORK_17_QUORUM_DKG_ENABLED']
|
||||
self.bump_mocktime(1)
|
||||
self.nodes[0].sporkupdate("SPORK_17_QUORUM_DKG_ENABLED", 4070908800)
|
||||
self.wait_for_sporks_same()
|
||||
|
||||
# mine blocks in batches
|
||||
batch_size = 10
|
||||
if expected_activation_height is not None:
|
||||
height = self.nodes[0].getblockcount()
|
||||
batch_size = 100
|
||||
while height - expected_activation_height > batch_size:
|
||||
self.bump_mocktime(batch_size)
|
||||
self.nodes[0].generate(batch_size)
|
||||
height += batch_size
|
||||
self.sync_blocks()
|
||||
assert height - expected_activation_height < batch_size
|
||||
self.nodes[0].generate(height - expected_activation_height - 1)
|
||||
blocks_left = height - expected_activation_height - 1
|
||||
self.bump_mocktime(blocks_left)
|
||||
self.nodes[0].generate(blocks_left)
|
||||
self.sync_blocks()
|
||||
assert self.nodes[0].getblockchaininfo()['bip9_softforks']['dip0024']['status'] != 'active'
|
||||
|
||||
while self.nodes[0].getblockchaininfo()['bip9_softforks']['dip0024']['status'] != 'active':
|
||||
self.nodes[0].generate(10)
|
||||
if slow_mode:
|
||||
self.sync_blocks()
|
||||
self.bump_mocktime(batch_size)
|
||||
self.nodes[0].generate(batch_size)
|
||||
self.sync_blocks()
|
||||
self.sync_blocks()
|
||||
|
||||
# revert spork17 changes
|
||||
self.bump_mocktime(1)
|
||||
self.nodes[0].sporkupdate("SPORK_17_QUORUM_DKG_ENABLED", spork17_value)
|
||||
self.wait_for_sporks_same()
|
||||
|
||||
def set_dash_llmq_test_params(self, llmq_size, llmq_threshold):
|
||||
self.llmq_size = llmq_size
|
||||
self.llmq_threshold = llmq_threshold
|
||||
@ -1155,40 +1171,46 @@ class DashTestFramework(BitcoinTestFramework):
|
||||
return all(node.spork('show') == sporks for node in self.nodes[1:])
|
||||
wait_until(check_sporks_same, timeout=timeout, sleep=0.5)
|
||||
|
||||
def wait_for_quorum_connections(self, quorum_hash, expected_connections, nodes, llmq_type_name="llmq_test", timeout = 60, wait_proc=None):
|
||||
def wait_for_quorum_connections(self, quorum_hash, expected_connections, mninfos, llmq_type_name="llmq_test", timeout = 60, wait_proc=None):
|
||||
def check_quorum_connections():
|
||||
all_ok = True
|
||||
for node in nodes:
|
||||
s = node.quorum("dkgstatus")
|
||||
mn_ok = True
|
||||
for qs in s:
|
||||
if "llmqType" not in qs:
|
||||
continue
|
||||
def ret():
|
||||
if wait_proc is not None:
|
||||
wait_proc()
|
||||
return False
|
||||
|
||||
for mn in mninfos:
|
||||
s = mn.node.quorum("dkgstatus")
|
||||
for qs in s["session"]:
|
||||
if qs["llmqType"] != llmq_type_name:
|
||||
continue
|
||||
if "quorumConnections" not in qs:
|
||||
if qs["status"]["quorumHash"] != quorum_hash:
|
||||
continue
|
||||
qconnections = qs["quorumConnections"]
|
||||
if qconnections["quorumHash"] != quorum_hash:
|
||||
mn_ok = False
|
||||
continue
|
||||
cnt = 0
|
||||
for c in qconnections["quorumConnections"]:
|
||||
if c["connected"]:
|
||||
cnt += 1
|
||||
if cnt < expected_connections:
|
||||
mn_ok = False
|
||||
break
|
||||
break
|
||||
if not mn_ok:
|
||||
all_ok = False
|
||||
break
|
||||
if not all_ok and wait_proc is not None:
|
||||
wait_proc()
|
||||
return all_ok
|
||||
for qc in s["quorumConnections"]:
|
||||
if "quorumConnections" not in qc:
|
||||
continue
|
||||
if qc["llmqType"] != llmq_type_name:
|
||||
continue
|
||||
if qc["quorumHash"] != quorum_hash:
|
||||
continue
|
||||
if len(qc["quorumConnections"]) == 0:
|
||||
continue
|
||||
cnt = 0
|
||||
for c in qc["quorumConnections"]:
|
||||
if c["connected"]:
|
||||
cnt += 1
|
||||
if cnt < expected_connections:
|
||||
return ret()
|
||||
return True
|
||||
# a session with no matching connections - not ok
|
||||
return ret()
|
||||
# a node with no sessions - ok
|
||||
pass
|
||||
# no sessions at all - not ok
|
||||
return ret()
|
||||
|
||||
wait_until(check_quorum_connections, timeout=timeout, sleep=1)
|
||||
|
||||
def wait_for_masternode_probes(self, mninfos, timeout = 30, wait_proc=None, llmq_type_name="llmq_test"):
|
||||
def wait_for_masternode_probes(self, quorum_hash, mninfos, timeout = 30, wait_proc=None, llmq_type_name="llmq_test"):
|
||||
def check_probes():
|
||||
def ret():
|
||||
if wait_proc is not None:
|
||||
@ -1197,75 +1219,63 @@ class DashTestFramework(BitcoinTestFramework):
|
||||
|
||||
for mn in mninfos:
|
||||
s = mn.node.quorum('dkgstatus')
|
||||
if llmq_type_name not in s["session"]:
|
||||
continue
|
||||
if "quorumConnections" not in s:
|
||||
return ret()
|
||||
s = s["quorumConnections"]
|
||||
if llmq_type_name not in s:
|
||||
return ret()
|
||||
|
||||
for c in s[llmq_type_name]:
|
||||
if c["proTxHash"] == mn.proTxHash:
|
||||
for qs in s["session"]:
|
||||
if qs["llmqType"] != llmq_type_name:
|
||||
continue
|
||||
if not c["outbound"]:
|
||||
mn2 = mn.node.protx('info', c["proTxHash"])
|
||||
if [m for m in mninfos if c["proTxHash"] == m.proTxHash]:
|
||||
# MN is expected to be online and functioning, so let's verify that the last successful
|
||||
# probe is not too old. Probes are retried after 50 minutes, while DKGs consider a probe
|
||||
# as failed after 60 minutes
|
||||
if mn2['metaInfo']['lastOutboundSuccessElapsed'] > 55 * 60:
|
||||
return ret()
|
||||
else:
|
||||
# MN is expected to be offline, so let's only check that the last probe is not too long ago
|
||||
if mn2['metaInfo']['lastOutboundAttemptElapsed'] > 55 * 60 and mn2['metaInfo']['lastOutboundSuccessElapsed'] > 55 * 60:
|
||||
return ret()
|
||||
|
||||
if qs["status"]["quorumHash"] != quorum_hash:
|
||||
continue
|
||||
for qc in s["quorumConnections"]:
|
||||
if qc["llmqType"] != llmq_type_name:
|
||||
continue
|
||||
if qc["quorumHash"] != quorum_hash:
|
||||
continue
|
||||
for c in qc["quorumConnections"]:
|
||||
if c["proTxHash"] == mn.proTxHash:
|
||||
continue
|
||||
if not c["outbound"]:
|
||||
mn2 = mn.node.protx('info', c["proTxHash"])
|
||||
if [m for m in mninfos if c["proTxHash"] == m.proTxHash]:
|
||||
# MN is expected to be online and functioning, so let's verify that the last successful
|
||||
# probe is not too old. Probes are retried after 50 minutes, while DKGs consider a probe
|
||||
# as failed after 60 minutes
|
||||
if mn2['metaInfo']['lastOutboundSuccessElapsed'] > 55 * 60:
|
||||
return ret()
|
||||
else:
|
||||
# MN is expected to be offline, so let's only check that the last probe is not too long ago
|
||||
if mn2['metaInfo']['lastOutboundAttemptElapsed'] > 55 * 60 and mn2['metaInfo']['lastOutboundSuccessElapsed'] > 55 * 60:
|
||||
return ret()
|
||||
return True
|
||||
|
||||
wait_until(check_probes, timeout=timeout, sleep=1)
|
||||
|
||||
def wait_for_quorum_phase(self, quorum_hash, phase, expected_member_count, check_received_messages, check_received_messages_count, mninfos, llmq_type_name="llmq_test", timeout=30, sleep=1):
|
||||
def check_dkg_session():
|
||||
all_ok = True
|
||||
member_count = 0
|
||||
for mn in mninfos:
|
||||
s = mn.node.quorum("dkgstatus")["session"]
|
||||
mn_ok = True
|
||||
for qs in s:
|
||||
if qs["llmqType"] != llmq_type_name:
|
||||
continue
|
||||
qstatus = qs["status"]
|
||||
if qstatus["quorumHash"] != quorum_hash:
|
||||
continue
|
||||
member_count += 1
|
||||
if "phase" not in qstatus:
|
||||
mn_ok = False
|
||||
break
|
||||
if qstatus["phase"] != phase:
|
||||
mn_ok = False
|
||||
break
|
||||
return False
|
||||
if check_received_messages is not None:
|
||||
if qstatus[check_received_messages] < check_received_messages_count:
|
||||
mn_ok = False
|
||||
break
|
||||
return False
|
||||
member_count += 1
|
||||
break
|
||||
if not mn_ok:
|
||||
all_ok = False
|
||||
break
|
||||
if all_ok and member_count != expected_member_count:
|
||||
return False
|
||||
return all_ok
|
||||
return member_count >= expected_member_count
|
||||
|
||||
wait_until(check_dkg_session, timeout=timeout, sleep=sleep)
|
||||
|
||||
def wait_for_quorum_commitment(self, quorum_hash, nodes, llmq_type=100, timeout=15):
|
||||
def check_dkg_comitments():
|
||||
time.sleep(2)
|
||||
all_ok = True
|
||||
for node in nodes:
|
||||
s = node.quorum("dkgstatus")
|
||||
if "minableCommitments" not in s:
|
||||
all_ok = False
|
||||
break
|
||||
return False
|
||||
commits = s["minableCommitments"]
|
||||
c_ok = False
|
||||
for c in commits:
|
||||
@ -1276,9 +1286,9 @@ class DashTestFramework(BitcoinTestFramework):
|
||||
c_ok = True
|
||||
break
|
||||
if not c_ok:
|
||||
all_ok = False
|
||||
break
|
||||
return all_ok
|
||||
return False
|
||||
return True
|
||||
|
||||
wait_until(check_dkg_comitments, timeout=timeout, sleep=1)
|
||||
|
||||
def wait_for_quorum_list(self, quorum_hash, nodes, timeout=15, sleep=2, llmq_type_name="llmq_test"):
|
||||
@ -1344,9 +1354,9 @@ class DashTestFramework(BitcoinTestFramework):
|
||||
self.log.info("Expected quorum_hash:"+str(q))
|
||||
self.log.info("Waiting for phase 1 (init)")
|
||||
self.wait_for_quorum_phase(q, 1, expected_members, None, 0, mninfos_online, llmq_type_name=llmq_type_name)
|
||||
self.wait_for_quorum_connections(q, expected_connections, nodes, wait_proc=lambda: self.bump_mocktime(1, nodes=nodes), llmq_type_name=llmq_type_name)
|
||||
self.wait_for_quorum_connections(q, expected_connections, mninfos_online, wait_proc=lambda: self.bump_mocktime(1, nodes=nodes), llmq_type_name=llmq_type_name)
|
||||
if spork23_active:
|
||||
self.wait_for_masternode_probes(mninfos_valid, wait_proc=lambda: self.bump_mocktime(1, nodes=nodes))
|
||||
self.wait_for_masternode_probes(q, mninfos_online, wait_proc=lambda: self.bump_mocktime(1, nodes=nodes))
|
||||
|
||||
self.move_blocks(nodes, 2)
|
||||
|
||||
@ -1440,9 +1450,9 @@ class DashTestFramework(BitcoinTestFramework):
|
||||
self.log.info("quorumIndex 0: Waiting for phase 1 (init)")
|
||||
self.wait_for_quorum_phase(q_0, 1, expected_members, None, 0, mninfos_online, llmq_type_name)
|
||||
self.log.info("quorumIndex 0: Waiting for quorum connections (init)")
|
||||
self.wait_for_quorum_connections(q_0, expected_connections, nodes, llmq_type_name, wait_proc=lambda: self.bump_mocktime(1, nodes=nodes))
|
||||
self.wait_for_quorum_connections(q_0, expected_connections, mninfos_online, llmq_type_name, wait_proc=lambda: self.bump_mocktime(1, nodes=nodes))
|
||||
if spork23_active:
|
||||
self.wait_for_masternode_probes(mninfos_valid, wait_proc=lambda: self.bump_mocktime(1, nodes=nodes))
|
||||
self.wait_for_masternode_probes(q_0, mninfos_online, wait_proc=lambda: self.bump_mocktime(1, nodes=nodes), llmq_type_name=llmq_type_name)
|
||||
|
||||
self.move_blocks(nodes, 1)
|
||||
|
||||
@ -1454,7 +1464,9 @@ class DashTestFramework(BitcoinTestFramework):
|
||||
self.log.info("quorumIndex 1: Waiting for phase 1 (init)")
|
||||
self.wait_for_quorum_phase(q_1, 1, expected_members, None, 0, mninfos_online, llmq_type_name)
|
||||
self.log.info("quorumIndex 1: Waiting for quorum connections (init)")
|
||||
self.wait_for_quorum_connections(q_1, expected_connections, nodes, llmq_type_name, wait_proc=lambda: self.bump_mocktime(1, nodes=nodes))
|
||||
self.wait_for_quorum_connections(q_1, expected_connections, mninfos_online, llmq_type_name, wait_proc=lambda: self.bump_mocktime(1, nodes=nodes))
|
||||
if spork23_active:
|
||||
self.wait_for_masternode_probes(q_1, mninfos_online, wait_proc=lambda: self.bump_mocktime(1, nodes=nodes), llmq_type_name=llmq_type_name)
|
||||
|
||||
self.move_blocks(nodes, 1)
|
||||
|
||||
@ -1554,8 +1566,8 @@ class DashTestFramework(BitcoinTestFramework):
|
||||
# Note: recsigs aren't relayed to regular nodes by default,
|
||||
# make sure to pick a mn as a node to query for recsigs.
|
||||
node = self.mninfo[0].node if node is None else node
|
||||
time_start = time.time()
|
||||
while time.time() - time_start < 10:
|
||||
stop_time = time.time() + 10 * self.options.timeout_scale
|
||||
while time.time() < stop_time:
|
||||
try:
|
||||
return node.quorum('getrecsig', llmq_type, rec_sig_id, rec_sig_msg_hash)
|
||||
except JSONRPCException:
|
||||
@ -1596,6 +1608,7 @@ class DashTestFramework(BitcoinTestFramework):
|
||||
if self.mocktime % 2:
|
||||
self.bump_mocktime(self.quorum_data_request_expiration_timeout + 1)
|
||||
self.nodes[0].generate(1)
|
||||
self.sync_blocks()
|
||||
else:
|
||||
self.bump_mocktime(self.quorum_data_thread_request_timeout_seconds + 1)
|
||||
|
||||
|
@ -227,7 +227,7 @@ def str_to_b64str(string):
|
||||
def satoshi_round(amount):
|
||||
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
|
||||
|
||||
def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf'), sleep=0.05, timeout_factor=1.0, lock=None, do_assert=True, allow_exception=False):
|
||||
def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf'), sleep=0.5, timeout_factor=1.0, lock=None, do_assert=True, allow_exception=False):
|
||||
if attempts == float('inf') and timeout == float('inf'):
|
||||
timeout = 60
|
||||
timeout = timeout * timeout_factor
|
||||
@ -470,8 +470,9 @@ def connect_nodes(from_connection, node_num):
|
||||
|
||||
def isolate_node(node, timeout=5):
|
||||
node.setnetworkactive(False)
|
||||
st = time.time()
|
||||
while time.time() < st + timeout:
|
||||
timeout *= Options.timeout_scale
|
||||
stop_time = time.time() + timeout
|
||||
while time.time() < stop_time:
|
||||
if node.getconnectioncount() == 0:
|
||||
return
|
||||
time.sleep(0.5)
|
||||
|
Loading…
Reference in New Issue
Block a user