mirror of
https://github.com/dashpay/dash.git
synced 2024-12-25 20:12:57 +01:00
tests: Fix feature_llmq_simplepose.py
and feature_llmq_signing.py
(#3781)
* tests: Use lower mocktime bump value to reset masternode probes in feature_llmq_simplepose.py Bumping `50 * 60 + 1` should be enough, see https://github.com/dashpay/dash/blob/master/src/llmq/quorums_utils.cpp#L218-L222. Bumping `60 * 60 + 1` interferes with mnsync reset, see https://github.com/dashpay/dash/blob/master/src/masternode/masternode-sync.cpp#L112-L119. * Fix expected connection count in llmq-signing.py and llmq-simplepose.py * Sleep a couple of seconds to let mn sync tick to happen * Move helper functions out of run_test * Let helper functions return expect_contribution_to_fail * No need to check for "punished" state in test_banning * Split mninfos in test_banning and mine_quorum into "online" and "valid" sets Needed for wait_for_masternode_probes in mine_quorum. Also, refactor and fix test_banning while at it.
This commit is contained in:
parent
b42f2d3c9c
commit
ac6878ffa2
@ -26,11 +26,13 @@ class LLMQSigningTest(DashTestFramework):
|
||||
def run_test(self):
|
||||
|
||||
self.nodes[0].spork("SPORK_17_QUORUM_DKG_ENABLED", 0)
|
||||
expected_connections = 2
|
||||
if self.options.spork21:
|
||||
self.nodes[0].spork("SPORK_21_QUORUM_ALL_CONNECTED", 0)
|
||||
expected_connections = 4
|
||||
self.wait_for_sporks_same()
|
||||
|
||||
self.mine_quorum()
|
||||
self.mine_quorum(expected_connections=expected_connections)
|
||||
|
||||
id = "0000000000000000000000000000000000000000000000000000000000000001"
|
||||
msgHash = "0000000000000000000000000000000000000000000000000000000000000002"
|
||||
@ -67,12 +69,12 @@ class LLMQSigningTest(DashTestFramework):
|
||||
recsig_time = self.mocktime
|
||||
|
||||
# Mine one more quorum, so that we have 2 active ones, nothing should change
|
||||
self.mine_quorum()
|
||||
self.mine_quorum(expected_connections=expected_connections)
|
||||
assert_sigs_nochange(True, False, True, 3)
|
||||
|
||||
# Mine 2 more quorums, so that the one used for the the recovered sig should become inactive, nothing should change
|
||||
self.mine_quorum()
|
||||
self.mine_quorum()
|
||||
self.mine_quorum(expected_connections=expected_connections)
|
||||
self.mine_quorum(expected_connections=expected_connections)
|
||||
assert_sigs_nochange(True, False, True, 3)
|
||||
|
||||
# fast forward until 0.5 days before cleanup is expected, recovered sig should still be valid
|
||||
|
@ -26,13 +26,10 @@ class LLMQSimplePoSeTest(DashTestFramework):
|
||||
self.wait_for_sporks_same()
|
||||
|
||||
# check if mining quorums with all nodes being online succeeds without punishment/banning
|
||||
self.test_no_banning()
|
||||
self.test_no_banning(expected_connections=2)
|
||||
|
||||
# Now lets isolate MNs one by one and verify that punishment/banning happens
|
||||
def isolate_mn(mn):
|
||||
mn.node.setnetworkactive(False)
|
||||
wait_until(lambda: mn.node.getconnectioncount() == 0)
|
||||
self.test_banning(isolate_mn, True)
|
||||
self.test_banning(self.isolate_mn, 1)
|
||||
|
||||
self.repair_masternodes(False)
|
||||
|
||||
@ -45,50 +42,66 @@ class LLMQSimplePoSeTest(DashTestFramework):
|
||||
self.test_no_banning(expected_connections=4)
|
||||
|
||||
# Lets restart masternodes with closed ports and verify that they get banned even though they are connected to other MNs (via outbound connections)
|
||||
def close_mn_port(mn):
|
||||
self.stop_node(mn.node.index)
|
||||
self.start_masternode(mn, ["-listen=0", "-nobind"])
|
||||
connect_nodes(mn.node, 0)
|
||||
# Make sure the to-be-banned node is still connected well via outbound connections
|
||||
for mn2 in self.mninfo:
|
||||
if mn2 is not mn:
|
||||
connect_nodes(mn.node, mn2.node.index)
|
||||
self.reset_probe_timeouts()
|
||||
self.test_banning(close_mn_port, False)
|
||||
self.test_banning(self.close_mn_port, 3)
|
||||
|
||||
self.repair_masternodes(True)
|
||||
self.reset_probe_timeouts()
|
||||
|
||||
def force_old_mn_proto(mn):
|
||||
self.stop_node(mn.node.index)
|
||||
self.start_masternode(mn, ["-pushversion=70216"])
|
||||
connect_nodes(mn.node, 0)
|
||||
self.reset_probe_timeouts()
|
||||
self.test_banning(force_old_mn_proto, False)
|
||||
self.test_banning(self.force_old_mn_proto, 3)
|
||||
|
||||
def test_no_banning(self, expected_connections=1):
|
||||
def isolate_mn(self, mn):
|
||||
mn.node.setnetworkactive(False)
|
||||
wait_until(lambda: mn.node.getconnectioncount() == 0)
|
||||
return True
|
||||
|
||||
def close_mn_port(self, mn):
|
||||
self.stop_node(mn.node.index)
|
||||
self.start_masternode(mn, ["-listen=0", "-nobind"])
|
||||
connect_nodes(mn.node, 0)
|
||||
# Make sure the to-be-banned node is still connected well via outbound connections
|
||||
for mn2 in self.mninfo:
|
||||
if mn2 is not mn:
|
||||
connect_nodes(mn.node, mn2.node.index)
|
||||
self.reset_probe_timeouts()
|
||||
return False
|
||||
|
||||
def force_old_mn_proto(self, mn):
|
||||
self.stop_node(mn.node.index)
|
||||
self.start_masternode(mn, ["-pushversion=70216"])
|
||||
connect_nodes(mn.node, 0)
|
||||
self.reset_probe_timeouts()
|
||||
return False
|
||||
|
||||
def test_no_banning(self, expected_connections):
|
||||
for i in range(3):
|
||||
self.mine_quorum(expected_connections=expected_connections)
|
||||
for mn in self.mninfo:
|
||||
assert(not self.check_punished(mn) and not self.check_banned(mn))
|
||||
|
||||
def test_banning(self, invalidate_proc, expect_contribution_to_fail):
|
||||
online_mninfos = self.mninfo.copy()
|
||||
def test_banning(self, invalidate_proc, expected_connections):
|
||||
mninfos_online = self.mninfo.copy()
|
||||
mninfos_valid = self.mninfo.copy()
|
||||
expected_contributors = len(mninfos_online)
|
||||
for i in range(2):
|
||||
mn = online_mninfos[len(online_mninfos) - 1]
|
||||
online_mninfos.remove(mn)
|
||||
invalidate_proc(mn)
|
||||
mn = mninfos_valid.pop()
|
||||
went_offline = invalidate_proc(mn)
|
||||
if went_offline:
|
||||
mninfos_online.remove(mn)
|
||||
expected_contributors -= 1
|
||||
|
||||
t = time.time()
|
||||
while (not self.check_punished(mn) or not self.check_banned(mn)) and (time.time() - t) < 120:
|
||||
expected_contributors = len(online_mninfos) + 1
|
||||
if expect_contribution_to_fail:
|
||||
expected_contributors -= 1
|
||||
while (not self.check_banned(mn)) and (time.time() - t) < 120:
|
||||
# Make sure we do fresh probes
|
||||
self.bump_mocktime(60 * 60)
|
||||
self.mine_quorum(expected_connections=1, expected_members=len(online_mninfos), expected_contributions=expected_contributors, expected_complaints=expected_contributors-1, expected_commitments=expected_contributors, mninfos=online_mninfos)
|
||||
self.bump_mocktime(50 * 60 + 1)
|
||||
# Sleep a couple of seconds to let mn sync tick to happen
|
||||
time.sleep(2)
|
||||
self.mine_quorum(expected_connections=expected_connections, expected_members=expected_contributors, expected_contributions=expected_contributors, expected_complaints=expected_contributors-1, expected_commitments=expected_contributors, mninfos_online=mninfos_online, mninfos_valid=mninfos_valid)
|
||||
|
||||
assert(self.check_punished(mn) and self.check_banned(mn))
|
||||
assert(self.check_banned(mn))
|
||||
|
||||
if not went_offline:
|
||||
# we do not include PoSe banned mns in quorums, so the next one should have 1 contributor less
|
||||
expected_contributors -= 1
|
||||
|
||||
def repair_masternodes(self, restart):
|
||||
# Repair all nodes
|
||||
@ -118,7 +131,9 @@ class LLMQSimplePoSeTest(DashTestFramework):
|
||||
|
||||
def reset_probe_timeouts(self):
|
||||
# Make sure all masternodes will reconnect/re-probe
|
||||
self.bump_mocktime(60 * 60 + 1)
|
||||
self.bump_mocktime(50 * 60 + 1)
|
||||
# Sleep a couple of seconds to let mn sync tick to happen
|
||||
time.sleep(2)
|
||||
self.sync_all()
|
||||
|
||||
def check_punished(self, mn):
|
||||
|
@ -891,21 +891,23 @@ class DashTestFramework(BitcoinTestFramework):
|
||||
return all_ok
|
||||
wait_until(check_dkg_comitments, timeout=timeout, sleep=0.1)
|
||||
|
||||
def mine_quorum(self, expected_members=None, expected_connections=2, expected_contributions=None, expected_complaints=0, expected_justifications=0, expected_commitments=None, mninfos=None):
|
||||
def mine_quorum(self, expected_connections=2, expected_members=None, expected_contributions=None, expected_complaints=0, expected_justifications=0, expected_commitments=None, mninfos_online=None, mninfos_valid=None):
|
||||
if expected_members is None:
|
||||
expected_members = self.llmq_size
|
||||
if expected_contributions is None:
|
||||
expected_contributions = self.llmq_size
|
||||
if expected_commitments is None:
|
||||
expected_commitments = self.llmq_size
|
||||
if mninfos is None:
|
||||
mninfos = self.mninfo
|
||||
if mninfos_online is None:
|
||||
mninfos_online = self.mninfo.copy()
|
||||
if mninfos_valid is None:
|
||||
mninfos_valid = self.mninfo.copy()
|
||||
|
||||
self.log.info("Mining quorum: expected_members=%d, expected_connections=%d, expected_contributions=%d, expected_complaints=%d, expected_justifications=%d, "
|
||||
"expected_commitments=%d" % (expected_members, expected_connections, expected_contributions, expected_complaints,
|
||||
expected_justifications, expected_commitments))
|
||||
|
||||
nodes = [self.nodes[0]] + [mn.node for mn in mninfos]
|
||||
nodes = [self.nodes[0]] + [mn.node for mn in mninfos_online]
|
||||
|
||||
quorums = self.nodes[0].quorum("list")
|
||||
|
||||
@ -919,40 +921,40 @@ class DashTestFramework(BitcoinTestFramework):
|
||||
q = self.nodes[0].getbestblockhash()
|
||||
|
||||
self.log.info("Waiting for phase 1 (init)")
|
||||
self.wait_for_quorum_phase(q, 1, expected_members, None, 0, mninfos)
|
||||
self.wait_for_quorum_phase(q, 1, expected_members, None, 0, mninfos_online)
|
||||
self.wait_for_quorum_connections(expected_connections, nodes, wait_proc=lambda: self.bump_mocktime(1, nodes=nodes))
|
||||
if self.nodes[0].spork('show')['SPORK_21_QUORUM_ALL_CONNECTED'] == 0:
|
||||
self.wait_for_masternode_probes(mninfos, wait_proc=lambda: self.bump_mocktime(1, nodes=nodes))
|
||||
self.wait_for_masternode_probes(mninfos_valid, wait_proc=lambda: self.bump_mocktime(1, nodes=nodes))
|
||||
self.bump_mocktime(1, nodes=nodes)
|
||||
self.nodes[0].generate(2)
|
||||
sync_blocks(nodes)
|
||||
|
||||
self.log.info("Waiting for phase 2 (contribute)")
|
||||
self.wait_for_quorum_phase(q, 2, expected_members, "receivedContributions", expected_contributions, mninfos)
|
||||
self.wait_for_quorum_phase(q, 2, expected_members, "receivedContributions", expected_contributions, mninfos_online)
|
||||
self.bump_mocktime(1, nodes=nodes)
|
||||
self.nodes[0].generate(2)
|
||||
sync_blocks(nodes)
|
||||
|
||||
self.log.info("Waiting for phase 3 (complain)")
|
||||
self.wait_for_quorum_phase(q, 3, expected_members, "receivedComplaints", expected_complaints, mninfos)
|
||||
self.wait_for_quorum_phase(q, 3, expected_members, "receivedComplaints", expected_complaints, mninfos_online)
|
||||
self.bump_mocktime(1, nodes=nodes)
|
||||
self.nodes[0].generate(2)
|
||||
sync_blocks(nodes)
|
||||
|
||||
self.log.info("Waiting for phase 4 (justify)")
|
||||
self.wait_for_quorum_phase(q, 4, expected_members, "receivedJustifications", expected_justifications, mninfos)
|
||||
self.wait_for_quorum_phase(q, 4, expected_members, "receivedJustifications", expected_justifications, mninfos_online)
|
||||
self.bump_mocktime(1, nodes=nodes)
|
||||
self.nodes[0].generate(2)
|
||||
sync_blocks(nodes)
|
||||
|
||||
self.log.info("Waiting for phase 5 (commit)")
|
||||
self.wait_for_quorum_phase(q, 5, expected_members, "receivedPrematureCommitments", expected_commitments, mninfos)
|
||||
self.wait_for_quorum_phase(q, 5, expected_members, "receivedPrematureCommitments", expected_commitments, mninfos_online)
|
||||
self.bump_mocktime(1, nodes=nodes)
|
||||
self.nodes[0].generate(2)
|
||||
sync_blocks(nodes)
|
||||
|
||||
self.log.info("Waiting for phase 6 (mining)")
|
||||
self.wait_for_quorum_phase(q, 6, expected_members, None, 0, mninfos)
|
||||
self.wait_for_quorum_phase(q, 6, expected_members, None, 0, mninfos_online)
|
||||
|
||||
self.log.info("Waiting final commitment")
|
||||
self.wait_for_quorum_commitment(q, nodes)
|
||||
|
Loading…
Reference in New Issue
Block a user