dash/test/functional/feature_dip3_v19.py

206 lines
8.8 KiB
Python
Raw Normal View History

#!/usr/bin/env python3
# Copyright (c) 2015-2024 The Dash Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
feature_dip3_v19.py
Checks DIP3 for v19
'''
from io import BytesIO
Merge #19760: test: Remove confusing mininode terminology d5800da5199527a366024bc80cad7fcca17d5c4a [test] Remove final references to mininode (John Newbery) 5e8df3312e47a73e747ee892face55ed9ababeea test: resort imports (John Newbery) 85165d4332b0f72d30e0c584b476249b542338e6 scripted-diff: Rename mininode to p2p (John Newbery) 9e2897d020b114a10c860f90c5405be029afddba scripted-diff: Rename mininode_lock to p2p_lock (John Newbery) Pull request description: New contributors are often confused by the terminology in the test framework, and what the difference between a _node_ and a _peer_ is. To summarize: - a 'node' is a bitcoind instance. This is the thing whose behavior is being tested. Each bitcoind node is managed by a python `TestNode` object which is used to start/stop the node, manage the node's data directory, read state about the node (eg process status, log file), and interact with the node over different interfaces. - one of the interfaces that we can use to interact with the node is the p2p interface. Each connection to a node using this interface is managed by a python `P2PInterface` or derived object (which is owned by the `TestNode` object). We can open zero, one or many p2p connections to each bitcoind node. The node sees these connections as 'peers'. For historic reasons, the word 'mininode' has been used to refer to those p2p interface objects that we use to connect to the bitcoind node (the code was originally taken from the 'mini-node' branch of https://github.com/jgarzik/pynode/tree/mini-node). However that name has proved to be confusing for new contributors, so rename the remaining references. ACKs for top commit: amitiuttarwar: ACK d5800da519 MarcoFalke: ACK d5800da5199527a366024bc80cad7fcca17d5c4a 🚞 Tree-SHA512: 2c46c2ac3c4278b6e3c647cfd8108428a41e80788fc4f0e386e5b0c47675bc687d94779496c09a3e5ea1319617295be10c422adeeff2d2bd68378e00e0eeb5de
2024-01-15 20:35:29 +01:00
from test_framework.p2p import P2PInterface
from test_framework.messages import CBlock, CBlockHeader, CCbTx, CMerkleBlock, FromHex, hash256, msg_getmnlistd, \
QuorumId, ser_uint256
from test_framework.test_framework import DashTestFramework
from test_framework.util import (
assert_equal
)
class TestP2PConn(P2PInterface):
def __init__(self):
super().__init__()
self.last_mnlistdiff = None
def on_mnlistdiff(self, message):
self.last_mnlistdiff = message
def wait_for_mnlistdiff(self, timeout=30):
def received_mnlistdiff():
return self.last_mnlistdiff is not None
return self.wait_until(received_mnlistdiff, timeout=timeout)
def getmnlistdiff(self, base_block_hash, block_hash):
msg = msg_getmnlistd(base_block_hash, block_hash)
self.last_mnlistdiff = None
self.send_message(msg)
self.wait_for_mnlistdiff()
return self.last_mnlistdiff
class DIP3V19Test(DashTestFramework):
def set_test_params(self):
self.set_dash_test_params(6, 5, fast_dip3_enforcement=True, evo_count=2)
def run_test(self):
# Connect all nodes to node1 so that we always have the whole network connected
# Otherwise only masternode connections will be established between nodes, which won't propagate TXs/blocks
# Usually node0 is the one that does this, but in this test we isolate it multiple times
self.test_node = self.nodes[0].add_p2p_connection(TestP2PConn())
null_hash = format(0, "064x")
for i in range(len(self.nodes)):
if i != 0:
self.connect_nodes(i, 0)
self.activate_dip8()
self.nodes[0].sporkupdate("SPORK_17_QUORUM_DKG_ENABLED", 0)
self.wait_for_sporks_same()
expected_updated = [mn.proTxHash for mn in self.mninfo]
b_0 = self.nodes[0].getbestblockhash()
self.test_getmnlistdiff(null_hash, b_0, {}, [], expected_updated)
mn_list_before = self.nodes[0].masternodelist()
pubkeyoperator_list_before = set([mn_list_before[e]["pubkeyoperator"] for e in mn_list_before])
self.activate_v19(expected_activation_height=900)
self.log.info("Activated v19 at height:" + str(self.nodes[0].getblockcount()))
mn_list_after = self.nodes[0].masternodelist()
pubkeyoperator_list_after = set([mn_list_after[e]["pubkeyoperator"] for e in mn_list_after])
self.log.info("pubkeyoperator should still be shown using legacy scheme")
assert_equal(pubkeyoperator_list_before, pubkeyoperator_list_after)
self.move_to_next_cycle()
self.log.info("Cycle H height:" + str(self.nodes[0].getblockcount()))
self.move_to_next_cycle()
self.log.info("Cycle H+C height:" + str(self.nodes[0].getblockcount()))
self.move_to_next_cycle()
self.log.info("Cycle H+2C height:" + str(self.nodes[0].getblockcount()))
self.mine_cycle_quorum(llmq_type_name='llmq_test_dip0024', llmq_type=103)
evo_info_0 = self.dynamically_add_masternode(evo=True, rnd=7)
assert evo_info_0 is not None
fix: check HPMNs duplicate on tx broadcast (#5257) <!-- *** Please remove the following help text before submitting: *** Provide a general summary of your changes in the Title above Pull requests without a rationale and clear improvement may be closed immediately. Please provide clear motivation for your patch and explain how it improves Dash Core user experience or Dash Core developer experience significantly: * Any test improvements or new tests that improve coverage are always welcome. * All other changes should have accompanying unit tests (see `src/test/`) or functional tests (see `test/`). Contributors should note which tests cover modified code. If no tests exist for a region of modified code, new tests should accompany the change. * Bug fixes are most welcome when they come with steps to reproduce or an explanation of the potential issue as well as reasoning for the way the bug was fixed. * Features are welcome, but might be rejected due to design or scope issues. If a feature is based on a lot of dependencies, contributors should first consider building the system outside of Dash Core, if possible. --> ## Issue being fixed or feature implemented <!--- Why is this change required? What problem does it solve? --> <!--- If it fixes an open issue, please link to the issue here. --> Before this fix, uniqueness of HPMN `platformNodeID` was checked only while processing a block containing a `ProRegTx` or a `ProUpServTx`. This is not enough as a `ProRegTx` or `ProUpServTx` containing duplicate HPMN `platformNodeID` must be rejected at tx broadcast level. ## What was done? <!--- Describe your changes in detail --> Checking uniqueness when calling respective RPC and when receiving such txs. ## How Has This Been Tested? <!--- Please describe in detail how you tested your changes. --> <!--- Include details of your testing environment, and the tests you ran to --> <!--- see how your change affects other areas of the code, etc. --> ## Breaking Changes <!--- Please describe any breaking changes your code introduces --> ## Checklist: <!--- Go over all the following points, and put an `x` in all the boxes that apply. --> - [x] I have performed a self-review of my own code - [ ] I have commented my code, particularly in hard-to-understand areas - [x] I have added or updated relevant unit/integration/functional/e2e tests - [ ] I have made corresponding changes to the documentation **For repository code-owners and collaborators only** - [x] I have assigned this pull request to a milestone
2023-03-16 17:28:38 +01:00
self.nodes[0].generate(8)
self.sync_blocks(self.nodes)
self.log.info("Checking that protxs with duplicate EvoNodes fields are rejected")
evo_info_1 = self.dynamically_add_masternode(evo=True, rnd=7, should_be_rejected=True)
assert evo_info_1 is None
self.dynamically_evo_update_service(evo_info_0, 8)
evo_info_2 = self.dynamically_add_masternode(evo=True, rnd=8, should_be_rejected=True)
assert evo_info_2 is None
evo_info_3 = self.dynamically_add_masternode(evo=True, rnd=9)
assert evo_info_3 is not None
fix: check HPMNs duplicate on tx broadcast (#5257) <!-- *** Please remove the following help text before submitting: *** Provide a general summary of your changes in the Title above Pull requests without a rationale and clear improvement may be closed immediately. Please provide clear motivation for your patch and explain how it improves Dash Core user experience or Dash Core developer experience significantly: * Any test improvements or new tests that improve coverage are always welcome. * All other changes should have accompanying unit tests (see `src/test/`) or functional tests (see `test/`). Contributors should note which tests cover modified code. If no tests exist for a region of modified code, new tests should accompany the change. * Bug fixes are most welcome when they come with steps to reproduce or an explanation of the potential issue as well as reasoning for the way the bug was fixed. * Features are welcome, but might be rejected due to design or scope issues. If a feature is based on a lot of dependencies, contributors should first consider building the system outside of Dash Core, if possible. --> ## Issue being fixed or feature implemented <!--- Why is this change required? What problem does it solve? --> <!--- If it fixes an open issue, please link to the issue here. --> Before this fix, uniqueness of HPMN `platformNodeID` was checked only while processing a block containing a `ProRegTx` or a `ProUpServTx`. This is not enough as a `ProRegTx` or `ProUpServTx` containing duplicate HPMN `platformNodeID` must be rejected at tx broadcast level. ## What was done? <!--- Describe your changes in detail --> Checking uniqueness when calling respective RPC and when receiving such txs. ## How Has This Been Tested? <!--- Please describe in detail how you tested your changes. --> <!--- Include details of your testing environment, and the tests you ran to --> <!--- see how your change affects other areas of the code, etc. --> ## Breaking Changes <!--- Please describe any breaking changes your code introduces --> ## Checklist: <!--- Go over all the following points, and put an `x` in all the boxes that apply. --> - [x] I have performed a self-review of my own code - [ ] I have commented my code, particularly in hard-to-understand areas - [x] I have added or updated relevant unit/integration/functional/e2e tests - [ ] I have made corresponding changes to the documentation **For repository code-owners and collaborators only** - [x] I have assigned this pull request to a milestone
2023-03-16 17:28:38 +01:00
self.nodes[0].generate(8)
self.sync_blocks(self.nodes)
self.dynamically_evo_update_service(evo_info_0, 9, should_be_rejected=True)
fix: check HPMNs duplicate on tx broadcast (#5257) <!-- *** Please remove the following help text before submitting: *** Provide a general summary of your changes in the Title above Pull requests without a rationale and clear improvement may be closed immediately. Please provide clear motivation for your patch and explain how it improves Dash Core user experience or Dash Core developer experience significantly: * Any test improvements or new tests that improve coverage are always welcome. * All other changes should have accompanying unit tests (see `src/test/`) or functional tests (see `test/`). Contributors should note which tests cover modified code. If no tests exist for a region of modified code, new tests should accompany the change. * Bug fixes are most welcome when they come with steps to reproduce or an explanation of the potential issue as well as reasoning for the way the bug was fixed. * Features are welcome, but might be rejected due to design or scope issues. If a feature is based on a lot of dependencies, contributors should first consider building the system outside of Dash Core, if possible. --> ## Issue being fixed or feature implemented <!--- Why is this change required? What problem does it solve? --> <!--- If it fixes an open issue, please link to the issue here. --> Before this fix, uniqueness of HPMN `platformNodeID` was checked only while processing a block containing a `ProRegTx` or a `ProUpServTx`. This is not enough as a `ProRegTx` or `ProUpServTx` containing duplicate HPMN `platformNodeID` must be rejected at tx broadcast level. ## What was done? <!--- Describe your changes in detail --> Checking uniqueness when calling respective RPC and when receiving such txs. ## How Has This Been Tested? <!--- Please describe in detail how you tested your changes. --> <!--- Include details of your testing environment, and the tests you ran to --> <!--- see how your change affects other areas of the code, etc. --> ## Breaking Changes <!--- Please describe any breaking changes your code introduces --> ## Checklist: <!--- Go over all the following points, and put an `x` in all the boxes that apply. --> - [x] I have performed a self-review of my own code - [ ] I have commented my code, particularly in hard-to-understand areas - [x] I have added or updated relevant unit/integration/functional/e2e tests - [ ] I have made corresponding changes to the documentation **For repository code-owners and collaborators only** - [x] I have assigned this pull request to a milestone
2023-03-16 17:28:38 +01:00
revoke_protx = self.mninfo[-1].proTxHash
revoke_keyoperator = self.mninfo[-1].keyOperator
self.log.info(f"Trying to revoke proTx:{revoke_protx}")
self.test_revoke_protx(evo_info_3.nodeIdx, revoke_protx, revoke_keyoperator)
self.mine_quorum(llmq_type_name='llmq_test', llmq_type=100)
self.log.info("Checking that adding more regular MNs after v19 doesn't break DKGs and IS/CLs")
for i in range(6):
new_mn = self.dynamically_add_masternode(evo=False, rnd=(10 + i))
assert new_mn is not None
# mine more quorums and make sure everything still works
prev_quorum = None
for _ in range(5):
quorum = self.mine_quorum()
assert prev_quorum != quorum
self.wait_for_chainlocked_block_all_nodes(self.nodes[0].getbestblockhash())
def test_revoke_protx(self, node_idx, revoke_protx, revoke_keyoperator):
funds_address = self.nodes[0].getnewaddress()
fund_txid = self.nodes[0].sendtoaddress(funds_address, 1)
self.wait_for_instantlock(fund_txid, self.nodes[0])
tip = self.nodes[0].generate(1)[0]
assert_equal(self.nodes[0].getrawtransaction(fund_txid, 1, tip)['confirmations'], 1)
self.sync_all(self.nodes)
protx_result = self.nodes[0].protx('revoke', revoke_protx, revoke_keyoperator, 1, funds_address)
self.wait_for_instantlock(protx_result, self.nodes[0])
tip = self.nodes[0].generate(1)[0]
assert_equal(self.nodes[0].getrawtransaction(protx_result, 1, tip)['confirmations'], 1)
# Revoking a MN results in disconnects. Wait for disconnects to actually happen
# and then reconnect the corresponding node back to let sync_blocks finish correctly.
self.wait_until(lambda: self.nodes[node_idx].getconnectioncount() == 0)
self.connect_nodes(node_idx, 0)
self.sync_all(self.nodes)
test: multiple linter warnings to suppress or fix (#5880) ## Issue being fixed or feature implemented On my local kubuntu linters have way too much spam ## What was done? See each commit ## How Has This Been Tested? Run locally. Amount of warnings decreased from thousands to fewer amount. Excluding typos, they are: ``` src/coinjoin/client.cpp:1420:5: warning: Consider using std::any_of algorithm instead of a raw loop. [useStlAlgorithm] src/coinjoin/client.cpp:1426:5: warning: Consider using std::any_of algorithm instead of a raw loop. [useStlAlgorithm] src/coinjoin/client.cpp:655:26: warning: Consider using std::copy_if algorithm instead of a raw loop. [useStlAlgorithm] src/coinjoin/server.cpp:593:33: warning: Consider using std::any_of algorithm instead of a raw loop. [useStlAlgorithm] src/coinjoin/server.cpp:630:106: warning: Consider using std::any_of algorithm instead of a raw loop. [useStlAlgorithm] src/governance/governance.cpp:1057:9: warning: C-style pointer casting [cstyleCast] src/governance/governance.cpp:1068:9: warning: C-style pointer casting [cstyleCast] src/governance/governance.cpp:1079:13: warning: C-style pointer casting [cstyleCast] src/governance/governance.cpp:1086:9: warning: C-style pointer casting [cstyleCast] src/governance/governance.cpp:1094:9: warning: C-style pointer casting [cstyleCast] src/governance/governance.cpp:1099:5: warning: C-style pointer casting [cstyleCast] src/governance/governance.cpp:1486:34: warning: Consider using std::copy_if algorithm instead of a raw loop. [useStlAlgorithm] src/llmq/commitment.cpp:102:5: warning: Consider using std::all_of or std::none_of algorithm instead of a raw loop. [useStlAlgorithm] src/llmq/instantsend.cpp:820:38: warning: Consider using std::any_of algorithm instead of a raw loop. [useStlAlgorithm] src/llmq/quorums.cpp:831:102: warning: Consider using std::any_of algorithm instead of a raw loop. [useStlAlgorithm] src/llmq/quorums.h:300:17: warning: C-style pointer casting [cstyleCast] src/llmq/quorums.h:301:17: warning: C-style pointer casting [cstyleCast] src/llmq/quorums.h:302:17: warning: C-style pointer casting [cstyleCast] src/llmq/quorums.h:303:17: warning: C-style pointer casting [cstyleCast] src/spork.cpp:119:58: warning: Consider using std::any_of algorithm instead of a raw loop. [useStlAlgorithm] src/statsd_client.cpp:234:63: warning: C-style pointer casting [cstyleCast] Advice not applicable in this specific case? Add an exception by updating IGNORED_WARNINGS in test/lint/lint-cppcheck-dash.sh ^---- failure generated from test/lint/lint-cppcheck-dash.sh Consider install flake8-cached for cached flake8 results. test/functional/data/invalid_txs.py: error: Source file found twice under different module names: "invalid_txs" and "data.invalid_txs" test/functional/data/invalid_txs.py: note: See https://mypy.readthedocs.io/en/stable/running_mypy.html#mapping-file-paths-to-modules for more info test/functional/data/invalid_txs.py: note: Common resolutions include: a) adding `__init__.py` somewhere, b) using `--explicit-package-bases` or adjusting MYPYPATH Found 1 error in 1 file (errors prevented further checking) ^---- failure generated from test/lint/lint-python.s ``` ## Breaking Changes N/A ## Checklist: - [x] I have performed a self-review of my own code - [ ] I have commented my code, particularly in hard-to-understand areas - [ ] I have added or updated relevant unit/integration/functional/e2e tests - [ ] I have made corresponding changes to the documentation - [x] I have assigned this pull request to a milestone
2024-02-20 15:22:37 +01:00
self.log.info(f"Successfully revoked={revoke_protx}")
for mn in self.mninfo:
if mn.proTxHash == revoke_protx:
self.mninfo.remove(mn)
return
def test_getmnlistdiff(self, base_block_hash, block_hash, base_mn_list, expected_deleted, expected_updated):
d = self.test_getmnlistdiff_base(base_block_hash, block_hash)
# Assert that the deletedMNs and mnList fields are what we expected
assert_equal(set(d.deletedMNs), set([int(e, 16) for e in expected_deleted]))
assert_equal(set([e.proRegTxHash for e in d.mnList]), set(int(e, 16) for e in expected_updated))
# Build a new list based on the old list and the info from the diff
new_mn_list = base_mn_list.copy()
for e in d.deletedMNs:
new_mn_list.pop(format(e, '064x'))
for e in d.mnList:
new_mn_list[format(e.proRegTxHash, '064x')] = e
cbtx = CCbTx()
cbtx.deserialize(BytesIO(d.cbTx.vExtraPayload))
# Verify that the merkle root matches what we locally calculate
hashes = []
for mn in sorted(new_mn_list.values(), key=lambda mn: ser_uint256(mn.proRegTxHash)):
feat: store protx version in CSimplifiedMNListEntry and use it to ser/deser pubKeyOperator (#5397) ## Issue being fixed or feature implemented Mobile wallets would have to convert 4k+ pubkeys at the V19 fork point and it's a pretty hard job for them that can easily take 10-15 seconds if not more. Also after the HF, if a masternode list is requested from before the HF, the operator keys come in basic scheme, but the merkelroot was calculated with legacy. From mobile team work it wasn't possible to convert all operator keys to legacy and then calculate the correct merkleroot. ~This PR builds on top of ~#5392~ #5403 (changes that belong to this PR: 26f7e966500bdea4c604f1d16716b40b366fc707 and 4b42dc8fcee3354afd82ce7e3a72ebe1659f5f22) and aims to solve both of these issues.~ cc @hashengineering @QuantumExplorer ## What was done? Introduce `nVersion` on p2p level for every CSimplifiedMNListEntry. Set `nVersion` to the same value we have it in CDeterministicMNState i.e. pubkey serialization would not be via basic scheme only after the V19 fork, it would match the way it’s serialized on-chain/in CDeterministicMNState for that specific MN. ## How Has This Been Tested? run tests ## Breaking Changes NOTE: `testnet` is going to re-fork at v19 forkpoint because `merkleRootMNList` is not going to match ## Checklist: - [x] I have performed a self-review of my own code - [ ] I have commented my code, particularly in hard-to-understand areas - [ ] I have added or updated relevant unit/integration/functional/e2e tests - [ ] I have made corresponding changes to the documentation - [ ] I have assigned this pull request to a milestone _(for repository code-owners and collaborators only)_
2023-06-11 19:29:00 +02:00
hashes.append(hash256(mn.serialize(with_version = False)))
merkle_root = CBlock.get_merkle_root(hashes)
assert_equal(merkle_root, cbtx.merkleRootMNList)
return new_mn_list
def test_getmnlistdiff_base(self, base_block_hash, block_hash):
hexstr = self.nodes[0].getblockheader(block_hash, False)
header = FromHex(CBlockHeader(), hexstr)
d = self.test_node.getmnlistdiff(int(base_block_hash, 16), int(block_hash, 16))
assert_equal(d.baseBlockHash, int(base_block_hash, 16))
assert_equal(d.blockHash, int(block_hash, 16))
# Check that the merkle proof is valid
proof = CMerkleBlock(header, d.merkleProof)
proof = proof.serialize().hex()
assert_equal(self.nodes[0].verifytxoutproof(proof), [d.cbTx.hash])
# Check if P2P messages match with RPCs
d2 = self.nodes[0].protx("diff", base_block_hash, block_hash)
assert_equal(d2["baseBlockHash"], base_block_hash)
assert_equal(d2["blockHash"], block_hash)
assert_equal(d2["cbTxMerkleTree"], d.merkleProof.serialize().hex())
assert_equal(d2["cbTx"], d.cbTx.serialize().hex())
assert_equal(set([int(e, 16) for e in d2["deletedMNs"]]), set(d.deletedMNs))
assert_equal(set([int(e["proRegTxHash"], 16) for e in d2["mnList"]]), set([e.proRegTxHash for e in d.mnList]))
assert_equal(set([QuorumId(e["llmqType"], int(e["quorumHash"], 16)) for e in d2["deletedQuorums"]]), set(d.deletedQuorums))
assert_equal(set([QuorumId(e["llmqType"], int(e["quorumHash"], 16)) for e in d2["newQuorums"]]), set([QuorumId(e.llmqType, e.quorumHash) for e in d.newQuorums]))
return d
if __name__ == '__main__':
DIP3V19Test().main()