dash/test/functional/feature_llmq_simplepose.py
pasta 9b21aef29e
Merge #6269: feat: improvements for v19, v20 and dip3 - fire up test chains by first block 5/n
603061141f style: apply clang format for new changes (Konstantin Akimov)
f01338f607 feat: drop requirement of v20 for Asset Unlock transactions (Konstantin Akimov)
b3e9e5c981 feat: drop v20 requirement for special EHF transaction (Konstantin Akimov)
8639298e16 refactor: drop fast_dip3_enforcement=True from functional tests. (Konstantin Akimov)
0add6bc823 feat: removed 2 checkpoints: TestChainDIP3Setup and TestChainV19Setup from unit tests (Konstantin Akimov)
3fffb0cab9 refactor: moves evo-deterministicmns_tests specific code from header (Konstantin Akimov)
1d96fbf091 feat: let asset-unlock transactions be available since v20 on all networks (Konstantin Akimov)
4b4001bbe7 perf: activate v20 on the same block as v19 for Reg Test (Konstantin Akimov)
e0d97cf7ac feat: let asset locks be mined before v20 (Konstantin Akimov)

Pull request description:

  ## Issue being fixed or feature implemented
  This PR is 5th in the achieving ultimate goal to activate old forks from block 1.
  It helps to run unit and functional tests faster; it helps for platform's dev-environment to start faster.

  ## What was done?
   - v20 on RegTest is activated from same block as v19 (height 1200 changed to 900)
   - relaxed condition for special transactions Asset Lock (can be mined any block so far as v20 is activated long time ago).
   - unify code for regtest, mainnet, testnet for Asset Unlock validation
   - removed 2 checkpoints: TestChainDIP3Setup and TestChainV19Setup from unit tests which make further changes for forks easier
   - enforced flag `fast_dip3_enforcement=True` from functional tests which is always true

  ## How Has This Been Tested?
  Run unit and functional tests

  `tsan` job runs  500 seconds faster of real time and 2000seconds faster for "accumulated time"
  https://gitlab.com/dashpay/dash/-/jobs/7817453421 - this PR
  https://gitlab.com/dashpay/dash/-/jobs/7805625816 - some old PR for reference

  No breakdown per tests here, because they affect each other and runs in parallel.

  ## Breaking Changes
  Regtest has v20 activated on same block as v19 if otherwise is not specified with `-testactivationheight=v20@1200`

  ## Checklist:
  - [x] I have performed a self-review of my own code
  - [x] I have commented my code, particularly in hard-to-understand areas
  - [x] I have added or updated relevant unit/integration/functional/e2e tests
  - [x] I have made corresponding changes to the documentation
  - [x] I have assigned this pull request to a milestone

ACKs for top commit:
  UdjinM6:
    utACK 603061141f
  PastaPastaPasta:
    utACK 603061141f

Tree-SHA512: 5a1e15a32931682240ecd8e5bab8a0bba67eebf0409ea7b7556018240c48d59ec8daab8859a2fb883154aac95813553c2835a0527269fcf4e81f1edb1b2ed0ac
2024-09-26 20:12:40 -05:00

248 lines
10 KiB
Python
Executable File

#!/usr/bin/env python3
# Copyright (c) 2015-2024 The Dash Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
feature_llmq_simplepose.py
Checks simple PoSe system based on LLMQ commitments
'''
import time
from test_framework.test_framework import DashTestFramework
from test_framework.util import assert_equal, force_finish_mnsync, p2p_port
class LLMQSimplePoSeTest(DashTestFramework):
def set_test_params(self):
self.set_dash_test_params(6, 5)
self.set_dash_llmq_test_params(5, 3)
def run_test(self):
self.deaf_mns = []
self.nodes[0].sporkupdate("SPORK_17_QUORUM_DKG_ENABLED", 0)
self.wait_for_sporks_same()
# check if mining quorums with all nodes being online succeeds without punishment/banning
self.test_no_banning()
# Now lets isolate MNs one by one and verify that punishment/banning happens
self.test_banning(self.isolate_mn, 2)
self.repair_masternodes(False)
self.nodes[0].sporkupdate("SPORK_21_QUORUM_ALL_CONNECTED", 0)
self.nodes[0].sporkupdate("SPORK_23_QUORUM_POSE", 0)
self.wait_for_sporks_same()
self.reset_probe_timeouts()
# Make sure no banning happens with spork21 enabled
self.test_no_banning()
# Lets restart masternodes with closed ports and verify that they get banned even though they are connected to other MNs (via outbound connections)
self.test_banning(self.close_mn_port)
self.deaf_mns.clear()
self.repair_masternodes(True)
self.reset_probe_timeouts()
self.test_banning(self.force_old_mn_proto, 3)
# With PoSe off there should be no punishing for non-reachable and outdated nodes
self.nodes[0].sporkupdate("SPORK_23_QUORUM_POSE", 4070908800)
self.wait_for_sporks_same()
self.repair_masternodes(True)
self.force_old_mn_proto(self.mninfo[0])
self.test_no_banning(3)
self.repair_masternodes(True)
self.close_mn_port(self.mninfo[0])
self.deaf_mns.clear()
self.test_no_banning(3)
def isolate_mn(self, mn):
mn.node.setnetworkactive(False)
self.wait_until(lambda: mn.node.getconnectioncount() == 0)
return True, True
def close_mn_port(self, mn):
self.deaf_mns.append(mn)
self.stop_node(mn.node.index)
self.start_masternode(mn, ["-listen=0", "-nobind"])
self.connect_nodes(mn.node.index, 0)
# Make sure the to-be-banned node is still connected well via outbound connections
for mn2 in self.mninfo:
if self.deaf_mns.count(mn2) == 0:
self.connect_nodes(mn.node.index, mn2.node.index)
self.reset_probe_timeouts()
return False, False
def force_old_mn_proto(self, mn):
self.stop_node(mn.node.index)
self.start_masternode(mn, ["-pushversion=70216"])
self.connect_nodes(mn.node.index, 0)
self.reset_probe_timeouts()
return False, True
def test_no_banning(self, expected_connections=None):
for i in range(3):
self.log.info(f"Testing no PoSe banning in normal conditions {i + 1}/3")
self.mine_quorum(expected_connections=expected_connections)
for mn in self.mninfo:
assert not self.check_punished(mn) and not self.check_banned(mn)
def mine_quorum_less_checks(self, expected_good_nodes, mninfos_online):
# Unlike in mine_quorum we skip most of the checks and only care about
# nodes moving forward from phase to phase correctly and the fact that the quorum is actually mined.
self.log.info("Mining a quorum with less checks")
nodes = [self.nodes[0]] + [mn.node for mn in mninfos_online]
# move forward to next DKG
skip_count = 24 - (self.nodes[0].getblockcount() % 24)
if skip_count != 0:
self.bump_mocktime(skip_count, nodes=nodes)
self.nodes[0].generate(skip_count)
self.sync_blocks(nodes)
q = self.nodes[0].getbestblockhash()
self.log.info("Expected quorum_hash: "+str(q))
self.log.info("Waiting for phase 1 (init)")
self.wait_for_quorum_phase(q, 1, expected_good_nodes, None, 0, mninfos_online)
self.move_blocks(nodes, 2)
self.log.info("Waiting for phase 2 (contribute)")
self.wait_for_quorum_phase(q, 2, expected_good_nodes, "receivedContributions", expected_good_nodes, mninfos_online)
self.move_blocks(nodes, 2)
self.log.info("Waiting for phase 3 (complain)")
self.wait_for_quorum_phase(q, 3, expected_good_nodes, None, 0, mninfos_online)
self.move_blocks(nodes, 2)
self.log.info("Waiting for phase 4 (justify)")
self.wait_for_quorum_phase(q, 4, expected_good_nodes, None, 0, mninfos_online)
self.move_blocks(nodes, 2)
self.log.info("Waiting for phase 5 (commit)")
self.wait_for_quorum_phase(q, 5, expected_good_nodes, "receivedPrematureCommitments", expected_good_nodes, mninfos_online)
self.move_blocks(nodes, 2)
self.log.info("Waiting for phase 6 (mining)")
self.wait_for_quorum_phase(q, 6, expected_good_nodes, None, 0, mninfos_online)
self.log.info("Waiting final commitment")
self.wait_for_quorum_commitment(q, nodes)
self.log.info("Mining final commitment")
self.bump_mocktime(1, nodes=nodes)
self.nodes[0].getblocktemplate() # this calls CreateNewBlock
self.nodes[0].generate(1)
self.sync_blocks(nodes)
self.log.info("Waiting for quorum to appear in the list")
self.wait_for_quorum_list(q, nodes)
new_quorum = self.nodes[0].quorum("list", 1)["llmq_test"][0]
assert_equal(q, new_quorum)
quorum_info = self.nodes[0].quorum("info", 100, new_quorum)
# Mine 8 (SIGN_HEIGHT_OFFSET) more blocks to make sure that the new quorum gets eligible for signing sessions
self.bump_mocktime(8)
self.nodes[0].generate(8)
self.sync_blocks(nodes)
self.log.info("New quorum: height=%d, quorumHash=%s, quorumIndex=%d, minedBlock=%s" % (quorum_info["height"], new_quorum, quorum_info["quorumIndex"], quorum_info["minedBlock"]))
return new_quorum
def test_banning(self, invalidate_proc, expected_connections=None):
mninfos_online = self.mninfo.copy()
mninfos_valid = self.mninfo.copy()
expected_contributors = len(mninfos_online)
for i in range(2):
self.log.info(f"Testing PoSe banning due to {invalidate_proc.__name__} {i + 1}/2")
mn = mninfos_valid.pop()
went_offline, instant_ban = invalidate_proc(mn)
expected_complaints = expected_contributors - 1
if went_offline:
mninfos_online.remove(mn)
expected_contributors -= 1
# NOTE: Min PoSe penalty is 100 (see CDeterministicMNList::CalcMaxPoSePenalty()),
# so nodes are PoSe-banned in the same DKG they misbehave without being PoSe-punished first.
if instant_ban:
assert expected_connections is not None
self.log.info("Expecting instant PoSe banning")
self.reset_probe_timeouts()
self.mine_quorum(expected_connections=expected_connections, expected_members=expected_contributors, expected_contributions=expected_contributors, expected_complaints=expected_complaints, expected_commitments=expected_contributors, mninfos_online=mninfos_online, mninfos_valid=mninfos_valid)
else:
# It's ok to miss probes/quorum connections up to 5 times.
# 6th time is when it should be banned for sure.
assert expected_connections is None
for j in range(6):
self.log.info(f"Accumulating PoSe penalty {j + 1}/6")
self.reset_probe_timeouts()
self.mine_quorum_less_checks(expected_contributors - 1, mninfos_online)
assert self.check_banned(mn)
if not went_offline:
# we do not include PoSe banned mns in quorums, so the next one should have 1 contributor less
expected_contributors -= 1
def repair_masternodes(self, restart):
self.log.info("Repairing all banned and punished masternodes")
for mn in self.mninfo:
if self.check_banned(mn) or self.check_punished(mn):
addr = self.nodes[0].getnewaddress()
self.nodes[0].sendtoaddress(addr, 0.1)
self.nodes[0].protx('update_service', mn.proTxHash, '127.0.0.1:%d' % p2p_port(mn.node.index), mn.keyOperator, "", addr)
if restart:
self.stop_node(mn.node.index)
self.start_masternode(mn)
else:
mn.node.setnetworkactive(True)
self.connect_nodes(mn.node.index, 0)
# syncing blocks only since node 0 has txes waiting to be mined
self.sync_blocks()
# Make sure protxes are "safe" to mine even when InstantSend and ChainLocks are no longer functional
self.bump_mocktime(60 * 10 + 1)
self.nodes[0].generate(1)
self.sync_all()
# Isolate and re-connect all MNs (otherwise there might be open connections with no MNAUTH for MNs which were banned before)
for mn in self.mninfo:
assert not self.check_banned(mn)
mn.node.setnetworkactive(False)
self.wait_until(lambda: mn.node.getconnectioncount() == 0)
mn.node.setnetworkactive(True)
force_finish_mnsync(mn.node)
self.connect_nodes(mn.node.index, 0)
def reset_probe_timeouts(self):
# Make sure all masternodes will reconnect/re-probe
self.bump_mocktime(10 * 60 + 1)
# Sleep a couple of seconds to let mn sync tick to happen
time.sleep(2)
def check_punished(self, mn):
info = self.nodes[0].protx('info', mn.proTxHash)
if info['state']['PoSePenalty'] > 0:
return True
return False
def check_banned(self, mn):
info = self.nodes[0].protx('info', mn.proTxHash)
if info['state']['PoSeBanHeight'] != -1:
return True
return False
if __name__ == '__main__':
LLMQSimplePoSeTest().main()