From 22cfddaf120d6ec4f65f05ec5e83f2e63b236691 Mon Sep 17 00:00:00 2001 From: Alexander Block Date: Fri, 6 Dec 2019 10:05:58 +0100 Subject: [PATCH] Allow re-signing of IS locks when performing retroactive signing (#3219) * Implement re-signing of InstantSend inputs when TXs come in via blocks * Use GetAdjustedTime instead of GetTimeMillis in CSigSharesManager This allows use of mocktime in tests. * Expose verifiedProRegTxHash in getpeerinfo and implement wait_for_mnauth * Allow to wait for IS and CL to NOT happen * Bump timeout for wait_for_instantlock * Implement tests for retroactive signing of IS and CLs * Add wait_for_tx function to DashTestFramework * Add -whitelist=127.0.0.1 to node0 * Use node3 for isolated block generation * Don't test for non-receival of TXs on node4/node5 --- qa/pull-tester/rpc-tests.py | 1 + qa/rpc-tests/llmq-is-retroactive.py | 178 ++++++++++++++++++ qa/rpc-tests/test_framework/test_framework.py | 24 ++- src/llmq/quorums_instantsend.cpp | 18 +- src/llmq/quorums_instantsend.h | 2 +- src/llmq/quorums_signing.cpp | 19 +- src/llmq/quorums_signing.h | 2 +- src/llmq/quorums_signing_shares.cpp | 42 ++++- src/llmq/quorums_signing_shares.h | 6 +- src/net.cpp | 5 + src/net.h | 2 + src/rpc/net.cpp | 6 + 12 files changed, 283 insertions(+), 22 deletions(-) create mode 100755 qa/rpc-tests/llmq-is-retroactive.py diff --git a/qa/pull-tester/rpc-tests.py b/qa/pull-tester/rpc-tests.py index 1522df7ba4..b27d7e047a 100755 --- a/qa/pull-tester/rpc-tests.py +++ b/qa/pull-tester/rpc-tests.py @@ -48,6 +48,7 @@ BASE_SCRIPTS= [ 'llmq-chainlocks.py', # NOTE: needs dash_hash to pass 'llmq-simplepose.py', # NOTE: needs dash_hash to pass 'llmq-is-cl-conflicts.py', # NOTE: needs dash_hash to pass + 'llmq-is-retroactive.py', # NOTE: needs dash_hash to pass 'llmq-dkgerrors.py', # NOTE: needs dash_hash to pass 'dip4-coinbasemerkleroots.py', # NOTE: needs dash_hash to pass # vv Tests less than 60s vv diff --git a/qa/rpc-tests/llmq-is-retroactive.py b/qa/rpc-tests/llmq-is-retroactive.py new file mode 100755 index 0000000000..7bdd7b4019 --- /dev/null +++ b/qa/rpc-tests/llmq-is-retroactive.py @@ -0,0 +1,178 @@ +#!/usr/bin/env python3 +# Copyright (c) 2015-2018 The Dash Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +from test_framework.mininode import * +from test_framework.test_framework import DashTestFramework +from test_framework.util import sync_blocks, set_node_times, \ + isolate_node, reconnect_isolated_node + +''' +llmq-is-retroactive.py + +Tests retroactive signing + +We have 6 nodes where node 0 is the control node, nodes 1-5 are masternodes. +Mempool inconsistencies are simulated via disconnecting/reconnecting node 3 +and by having a higher relay fee on nodes 4 and 5. +''' + +class LLMQ_IS_RetroactiveSigning(DashTestFramework): + def set_test_params(self): + # -whitelist is needed to avoid the trickling logic on node0 + self.set_dash_test_params(6, 5, [["-whitelist=127.0.0.1"], [], [], [], ["-minrelaytxfee=0.001"], ["-minrelaytxfee=0.001"]], fast_dip3_enforcement=True) + + def run_test(self): + while self.nodes[0].getblockchaininfo()["bip9_softforks"]["dip0008"]["status"] != "active": + self.nodes[0].generate(10) + sync_blocks(self.nodes, timeout=60*5) + + self.nodes[0].spork("SPORK_17_QUORUM_DKG_ENABLED", 0) + self.nodes[0].spork("SPORK_19_CHAINLOCKS_ENABLED", 0) + self.nodes[0].spork("SPORK_2_INSTANTSEND_ENABLED", 0) + self.nodes[0].spork("SPORK_3_INSTANTSEND_BLOCK_FILTERING", 0) + self.wait_for_sporks_same() + + self.mine_quorum() + self.mine_quorum() + + # Make sure that all nodes are chainlocked at the same height before starting actual tests + self.wait_for_chainlocked_block_all_nodes(self.nodes[0].getbestblockhash()) + + self.log.info("trying normal IS lock") + txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1) + # 3 nodes should be enough to create an IS lock even if nodes 4 and 5 (which have no tx itself) + # are the only "neighbours" in intra-quorum connections for one of them. + self.wait_for_instantlock(txid, self.nodes[0]) + self.bump_mocktime(1) + set_node_times(self.nodes, self.mocktime) + block = self.nodes[0].generate(1)[0] + self.wait_for_chainlocked_block_all_nodes(block) + + self.log.info("testing normal signing with partially known TX") + isolate_node(self.nodes[3]) + txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1) + # Make sure nodes 1 and 2 received the TX before we continue, + # otherwise it might announce the TX to node 3 when reconnecting + self.wait_for_tx(txid, self.nodes[1]) + self.wait_for_tx(txid, self.nodes[2]) + reconnect_isolated_node(self.nodes[3], 0) + self.wait_for_mnauth(self.nodes[3], 2) + # node 3 fully reconnected but the TX wasn't relayed to it, so there should be no IS lock + self.wait_for_instantlock(txid, self.nodes[0], False, 5) + # push the tx directly via rpc + self.nodes[3].sendrawtransaction(self.nodes[0].getrawtransaction(txid)) + # node 3 should vote on a tx now since it became aware of it via sendrawtransaction + # and this should be enough to complete an IS lock + self.wait_for_instantlock(txid, self.nodes[0]) + + self.log.info("testing retroactive signing with unknown TX") + isolate_node(self.nodes[3]) + rawtx = self.nodes[0].createrawtransaction([], {self.nodes[0].getnewaddress(): 1}) + rawtx = self.nodes[0].fundrawtransaction(rawtx)['hex'] + rawtx = self.nodes[0].signrawtransaction(rawtx)['hex'] + txid = self.nodes[3].sendrawtransaction(rawtx) + # Make node 3 consider the TX as safe + self.bump_mocktime(10 * 60 + 1) + set_node_times(self.nodes, self.mocktime) + block = self.nodes[3].generatetoaddress(1, self.nodes[0].getnewaddress())[0] + reconnect_isolated_node(self.nodes[3], 0) + self.wait_for_chainlocked_block_all_nodes(block) + self.nodes[0].setmocktime(self.mocktime) + + self.log.info("testing retroactive signing with partially known TX") + isolate_node(self.nodes[3]) + txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1) + # Make sure nodes 1 and 2 received the TX before we continue, + # otherwise it might announce the TX to node 3 when reconnecting + self.wait_for_tx(txid, self.nodes[1]) + self.wait_for_tx(txid, self.nodes[2]) + reconnect_isolated_node(self.nodes[3], 0) + self.wait_for_mnauth(self.nodes[3], 2) + # node 3 fully reconnected but the TX wasn't relayed to it, so there should be no IS lock + self.wait_for_instantlock(txid, self.nodes[0], False, 5) + # Make node0 consider the TX as safe + self.bump_mocktime(10 * 60 + 1) + set_node_times(self.nodes, self.mocktime) + block = self.nodes[0].generate(1)[0] + self.wait_for_chainlocked_block_all_nodes(block) + + self.log.info("testing retroactive signing with partially known TX and all nodes session timeout") + self.test_all_nodes_session_timeout(False) + self.log.info("repeating test, but with cycled LLMQs") + self.test_all_nodes_session_timeout(True) + + self.log.info("testing retroactive signing with partially known TX and single node session timeout") + self.test_single_node_session_timeout(False) + self.log.info("repeating test, but with cycled LLMQs") + self.test_single_node_session_timeout(True) + + def cycle_llmqs(self): + self.mine_quorum() + self.mine_quorum() + self.wait_for_chainlocked_block_all_nodes(self.nodes[0].getbestblockhash()) + + def test_all_nodes_session_timeout(self, do_cycle_llmqs): + set_node_times(self.nodes, self.mocktime) + isolate_node(self.nodes[3]) + rawtx = self.nodes[0].createrawtransaction([], {self.nodes[0].getnewaddress(): 1}) + rawtx = self.nodes[0].fundrawtransaction(rawtx)['hex'] + rawtx = self.nodes[0].signrawtransaction(rawtx)['hex'] + txid = self.nodes[0].sendrawtransaction(rawtx) + txid = self.nodes[3].sendrawtransaction(rawtx) + # Make sure nodes 1 and 2 received the TX before we continue + self.wait_for_tx(txid, self.nodes[1]) + self.wait_for_tx(txid, self.nodes[2]) + # Make sure signing is done on nodes 1 and 2 (it's async) + time.sleep(5) + # Make the signing session for the IS lock timeout on nodes 1-3 + self.bump_mocktime(61) + set_node_times(self.nodes, self.mocktime) + time.sleep(2) # make sure Cleanup() is called + reconnect_isolated_node(self.nodes[3], 0) + self.wait_for_mnauth(self.nodes[3], 2) + # node 3 fully reconnected but the signing session is already timed out on all nodes, so no IS lock + self.wait_for_instantlock(txid, self.nodes[0], False, 5) + if do_cycle_llmqs: + self.cycle_llmqs() + self.wait_for_instantlock(txid, self.nodes[0], False, 5) + # Make node 0 consider the TX as safe + self.bump_mocktime(10 * 60 + 1) + self.nodes[0].setmocktime(self.mocktime) + block = self.nodes[0].generate(1)[0] + self.wait_for_chainlocked_block_all_nodes(block) + + def test_single_node_session_timeout(self, do_cycle_llmqs): + set_node_times(self.nodes, self.mocktime) + isolate_node(self.nodes[3]) + rawtx = self.nodes[0].createrawtransaction([], {self.nodes[0].getnewaddress(): 1}) + rawtx = self.nodes[0].fundrawtransaction(rawtx)['hex'] + rawtx = self.nodes[0].signrawtransaction(rawtx)['hex'] + txid = self.nodes[3].sendrawtransaction(rawtx) + time.sleep(2) # make sure signing is done on node 2 (it's async) + # Make the signing session for the IS lock timeout on node 3 + self.bump_mocktime(61) + set_node_times(self.nodes, self.mocktime) + time.sleep(2) # make sure Cleanup() is called + reconnect_isolated_node(self.nodes[3], 0) + self.wait_for_mnauth(self.nodes[3], 2) + self.nodes[0].sendrawtransaction(rawtx) + # Make sure nodes 1 and 2 received the TX + self.wait_for_tx(txid, self.nodes[1]) + self.wait_for_tx(txid, self.nodes[2]) + # Make sure signing is done on nodes 1 and 2 (it's async) + time.sleep(5) + # node 3 fully reconnected but the signing session is already timed out on it, so no IS lock + self.wait_for_instantlock(txid, self.nodes[0], False, 1) + if do_cycle_llmqs: + self.cycle_llmqs() + self.wait_for_instantlock(txid, self.nodes[0], False, 5) + # Make node 0 consider the TX as safe + self.bump_mocktime(10 * 60 + 1) + self.nodes[0].setmocktime(self.mocktime) + block = self.nodes[0].generate(1)[0] + self.wait_for_chainlocked_block_all_nodes(block) + +if __name__ == '__main__': + LLMQ_IS_RetroactiveSigning().main() diff --git a/qa/rpc-tests/test_framework/test_framework.py b/qa/rpc-tests/test_framework/test_framework.py index 4e80a8c669..2cefe8dbf5 100755 --- a/qa/rpc-tests/test_framework/test_framework.py +++ b/qa/rpc-tests/test_framework/test_framework.py @@ -555,13 +555,23 @@ class DashTestFramework(BitcoinTestFramework): self.sync_all() return self.wait_for_instantlock(txid, sender) - def wait_for_instantlock(self, txid, node): + def wait_for_tx(self, txid, node, expected=True, timeout=15): + def check_tx(): + try: + return node.getrawtransaction(txid) + except: + return False + if wait_until(check_tx, timeout=timeout, sleep=0.5, do_assert=expected) and not expected: + raise AssertionError("waiting unexpectedly succeeded") + + def wait_for_instantlock(self, txid, node, expected=True, timeout=15): def check_instantlock(): try: return node.getrawtransaction(txid, True)["instantlock"] except: return False - return wait_until(check_instantlock, timeout=10, sleep=0.5) + if wait_until(check_instantlock, timeout=timeout, sleep=0.5, do_assert=expected) and not expected: + raise AssertionError("waiting unexpectedly succeeded") def wait_for_chainlocked_block(self, node, block_hash, expected=True, timeout=15): def check_chainlocked_block(): @@ -712,6 +722,16 @@ class DashTestFramework(BitcoinTestFramework): return new_quorum + def wait_for_mnauth(self, node, count, timeout=10): + def test(): + pi = node.getpeerinfo() + c = 0 + for p in pi: + if "verified_proregtx_hash" in p and p["verified_proregtx_hash"] != "": + c += 1 + return c >= count + wait_until(test, timeout=timeout) + # Test framework for doing p2p comparison testing, which sets up some bitcoind # binaries: # 1 binary: test binary diff --git a/src/llmq/quorums_instantsend.cpp b/src/llmq/quorums_instantsend.cpp index c5da3e8f98..6fd96826e0 100644 --- a/src/llmq/quorums_instantsend.cpp +++ b/src/llmq/quorums_instantsend.cpp @@ -374,7 +374,7 @@ void CInstantSendManager::InterruptWorkerThread() workInterrupt(); } -bool CInstantSendManager::ProcessTx(const CTransaction& tx, const Consensus::Params& params) +bool CInstantSendManager::ProcessTx(const CTransaction& tx, bool allowReSigning, const Consensus::Params& params) { if (!IsNewInstantSendEnabled()) { return true; @@ -444,7 +444,7 @@ bool CInstantSendManager::ProcessTx(const CTransaction& tx, const Consensus::Par return false; } } - if (alreadyVotedCount == ids.size()) { + if (!allowReSigning && alreadyVotedCount == ids.size()) { LogPrint("instantsend", "CInstantSendManager::%s -- txid=%s: already voted on all inputs, bailing out\n", __func__, tx.GetHash().ToString()); return true; @@ -457,9 +457,9 @@ bool CInstantSendManager::ProcessTx(const CTransaction& tx, const Consensus::Par auto& in = tx.vin[i]; auto& id = ids[i]; inputRequestIds.emplace(id); - LogPrint("instantsend", "CInstantSendManager::%s -- txid=%s: trying to vote on input %s with id %s\n", __func__, - tx.GetHash().ToString(), in.prevout.ToStringShort(), id.ToString()); - if (quorumSigningManager->AsyncSignIfMember(llmqType, id, tx.GetHash())) { + LogPrint("instantsend", "CInstantSendManager::%s -- txid=%s: trying to vote on input %s with id %s. allowReSigning=%d\n", __func__, + tx.GetHash().ToString(), in.prevout.ToStringShort(), id.ToString(), allowReSigning); + if (quorumSigningManager->AsyncSignIfMember(llmqType, id, tx.GetHash(), allowReSigning)) { LogPrint("instantsend", "CInstantSendManager::%s -- txid=%s: voted on input %s with id %s\n", __func__, tx.GetHash().ToString(), in.prevout.ToStringShort(), id.ToString()); } @@ -1015,6 +1015,10 @@ void CInstantSendManager::SyncTransaction(const CTransaction& tx, const CBlockIn return; } + // This is different on develop as allowReSigning is passed in from the caller. In 0.14.0.x, we have to figure this out + // here to mimic develop. + bool allowReSigning = !inMempool && !isDisconnect; + uint256 islockHash; { LOCK(cs); @@ -1037,7 +1041,7 @@ void CInstantSendManager::SyncTransaction(const CTransaction& tx, const CBlockIn bool chainlocked = pindex && chainLocksHandler->HasChainLock(pindex->nHeight, pindex->GetBlockHash()); if (islockHash.IsNull() && !chainlocked) { - ProcessTx(tx, Params().GetConsensus()); + ProcessTx(tx, allowReSigning, Params().GetConsensus()); } LOCK(cs); @@ -1421,7 +1425,7 @@ bool CInstantSendManager::ProcessPendingRetryLockTxs() tx->GetHash().ToString()); } - ProcessTx(*tx, Params().GetConsensus()); + ProcessTx(*tx, false, Params().GetConsensus()); retryCount++; } diff --git a/src/llmq/quorums_instantsend.h b/src/llmq/quorums_instantsend.h index bb696b4ade..11ff8583d1 100644 --- a/src/llmq/quorums_instantsend.h +++ b/src/llmq/quorums_instantsend.h @@ -120,7 +120,7 @@ public: void InterruptWorkerThread(); public: - bool ProcessTx(const CTransaction& tx, const Consensus::Params& params); + bool ProcessTx(const CTransaction& tx, bool allowReSigning, const Consensus::Params& params); bool CheckCanLock(const CTransaction& tx, bool printDebug, const Consensus::Params& params); bool CheckCanLock(const COutPoint& outpoint, bool printDebug, const uint256& txHash, CAmount* retValue, const Consensus::Params& params); bool IsLocked(const uint256& txHash); diff --git a/src/llmq/quorums_signing.cpp b/src/llmq/quorums_signing.cpp index 2940e05198..d8a0f9e07e 100644 --- a/src/llmq/quorums_signing.cpp +++ b/src/llmq/quorums_signing.cpp @@ -743,7 +743,7 @@ void CSigningManager::UnregisterRecoveredSigsListener(CRecoveredSigsListener* l) recoveredSigsListeners.erase(itRem, recoveredSigsListeners.end()); } -bool CSigningManager::AsyncSignIfMember(Consensus::LLMQType llmqType, const uint256& id, const uint256& msgHash) +bool CSigningManager::AsyncSignIfMember(Consensus::LLMQType llmqType, const uint256& id, const uint256& msgHash, bool allowReSign) { auto& params = Params().GetConsensus().llmqs.at(llmqType); @@ -754,24 +754,31 @@ bool CSigningManager::AsyncSignIfMember(Consensus::LLMQType llmqType, const uint { LOCK(cs); - if (db.HasVotedOnId(llmqType, id)) { + bool hasVoted = db.HasVotedOnId(llmqType, id); + if (hasVoted) { uint256 prevMsgHash; db.GetVoteForId(llmqType, id, prevMsgHash); if (msgHash != prevMsgHash) { LogPrintf("CSigningManager::%s -- already voted for id=%s and msgHash=%s. Not voting on conflicting msgHash=%s\n", __func__, id.ToString(), prevMsgHash.ToString(), msgHash.ToString()); + return false; + } else if (allowReSign) { + LogPrint("llmq", "CSigningManager::%s -- already voted for id=%s and msgHash=%s. Resigning!\n", __func__, + id.ToString(), prevMsgHash.ToString()); } else { LogPrint("llmq", "CSigningManager::%s -- already voted for id=%s and msgHash=%s. Not voting again.\n", __func__, id.ToString(), prevMsgHash.ToString()); + return false; } - return false; } if (db.HasRecoveredSigForId(llmqType, id)) { // no need to sign it if we already have a recovered sig return true; } - db.WriteVoteForId(llmqType, id, msgHash); + if (!hasVoted) { + db.WriteVoteForId(llmqType, id, msgHash); + } } int tipHeight; @@ -796,6 +803,10 @@ bool CSigningManager::AsyncSignIfMember(Consensus::LLMQType llmqType, const uint return false; } + if (allowReSign) { + // make us re-announce all known shares (other nodes might have run into a timeout) + quorumSigSharesManager->ForceReAnnouncement(quorum, llmqType, id, msgHash); + } quorumSigSharesManager->AsyncSign(quorum, id, msgHash); return true; diff --git a/src/llmq/quorums_signing.h b/src/llmq/quorums_signing.h index c4c5343032..92d18e4af0 100644 --- a/src/llmq/quorums_signing.h +++ b/src/llmq/quorums_signing.h @@ -167,7 +167,7 @@ public: void RegisterRecoveredSigsListener(CRecoveredSigsListener* l); void UnregisterRecoveredSigsListener(CRecoveredSigsListener* l); - bool AsyncSignIfMember(Consensus::LLMQType llmqType, const uint256& id, const uint256& msgHash); + bool AsyncSignIfMember(Consensus::LLMQType llmqType, const uint256& id, const uint256& msgHash, bool allowReSign = false); bool HasRecoveredSig(Consensus::LLMQType llmqType, const uint256& id, const uint256& msgHash); bool HasRecoveredSigForId(Consensus::LLMQType llmqType, const uint256& id); bool HasRecoveredSigForSession(const uint256& signHash); diff --git a/src/llmq/quorums_signing_shares.cpp b/src/llmq/quorums_signing_shares.cpp index b5f7284605..c1632500ce 100644 --- a/src/llmq/quorums_signing_shares.cpp +++ b/src/llmq/quorums_signing_shares.cpp @@ -82,6 +82,13 @@ void CSigSharesInv::Set(uint16_t quorumMember, bool v) inv[quorumMember] = v; } +void CSigSharesInv::SetAll(bool v) +{ + for (size_t i = 0; i < inv.size(); i++) { + inv[i] = v; + } +} + std::string CBatchedSigShares::ToInvString() const { CSigSharesInv inv; @@ -679,7 +686,7 @@ void CSigSharesManager::ProcessSigShare(NodeId nodeId, const CSigShare& sigShare sigSharesToAnnounce.Add(sigShare.GetKey(), true); // Update the time we've seen the last sigShare - timeSeenForSessions[sigShare.GetSignHash()] = GetTimeMillis(); + timeSeenForSessions[sigShare.GetSignHash()] = GetAdjustedTime(); if (!quorumNodes.empty()) { // don't announce and wait for other nodes to request this share and directly send it to them @@ -778,7 +785,7 @@ void CSigSharesManager::CollectSigSharesToRequest(std::unordered_mapqc.quorumHash, id, msgHash); + auto sigs = sigShares.GetAllForSignHash(signHash); + if (sigs) { + for (auto& p : *sigs) { + // re-announce every sigshare to every node + sigSharesToAnnounce.Add(std::make_pair(signHash, p.first), true); + } + } + for (auto& p : nodeStates) { + CSigSharesNodeState& nodeState = p.second; + auto session = nodeState.GetSessionBySignHash(signHash); + if (!session) { + continue; + } + // pretend that the other node doesn't know about any shares so that we re-announce everything + session->knows.SetAll(false); + // we need to use a new session id as we don't know if the other node has run into a timeout already + session->sendSessionId = (uint32_t)-1; + } +} + void CSigSharesManager::HandleNewRecoveredSig(const llmq::CRecoveredSig& recoveredSig) { LOCK(cs); diff --git a/src/llmq/quorums_signing_shares.h b/src/llmq/quorums_signing_shares.h index 654f88268f..340c8ea07b 100644 --- a/src/llmq/quorums_signing_shares.h +++ b/src/llmq/quorums_signing_shares.h @@ -104,6 +104,7 @@ public: void Init(size_t size); bool IsSet(uint16_t quorumMember) const; void Set(uint16_t quorumMember, bool v); + void SetAll(bool v); void Merge(const CSigSharesInv& inv2); size_t CountSet() const; @@ -329,8 +330,8 @@ public: class CSigSharesManager : public CRecoveredSigsListener { - static const int64_t SESSION_NEW_SHARES_TIMEOUT = 60 * 1000; - static const int64_t SIG_SHARE_REQUEST_TIMEOUT = 5 * 1000; + static const int64_t SESSION_NEW_SHARES_TIMEOUT = 60; + static const int64_t SIG_SHARE_REQUEST_TIMEOUT = 5; // we try to keep total message size below 10k const size_t MAX_MSGS_CNT_QSIGSESANN = 100; @@ -377,6 +378,7 @@ public: void AsyncSign(const CQuorumCPtr& quorum, const uint256& id, const uint256& msgHash); void Sign(const CQuorumCPtr& quorum, const uint256& id, const uint256& msgHash); + void ForceReAnnouncement(const CQuorumCPtr& quorum, Consensus::LLMQType llmqType, const uint256& id, const uint256& msgHash); void HandleNewRecoveredSig(const CRecoveredSig& recoveredSig); diff --git a/src/net.cpp b/src/net.cpp index 78b20eda18..0f6346a3c7 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -691,6 +691,11 @@ void CNode::copyStats(CNodeStats &stats) // Leave string empty if addrLocal invalid (not filled in yet) CService addrLocalUnlocked = GetAddrLocal(); stats.addrLocal = addrLocalUnlocked.IsValid() ? addrLocalUnlocked.ToString() : ""; + + { + LOCK(cs_mnauth); + X(verifiedProRegTxHash); + } } #undef X diff --git a/src/net.h b/src/net.h index 4bf1a8206f..b90a585cdf 100644 --- a/src/net.h +++ b/src/net.h @@ -660,6 +660,8 @@ public: double dMinPing; std::string addrLocal; CAddress addr; + // In case this is a verified MN, this value is the proTx of the MN + uint256 verifiedProRegTxHash; }; diff --git a/src/rpc/net.cpp b/src/rpc/net.cpp index 1828a8a965..e2efac8121 100644 --- a/src/rpc/net.cpp +++ b/src/rpc/net.cpp @@ -79,6 +79,9 @@ UniValue getpeerinfo(const JSONRPCRequest& request) " \"addr\":\"host:port\", (string) The ip address and port of the peer\n" " \"addrlocal\":\"ip:port\", (string) local address\n" " \"services\":\"xxxxxxxxxxxxxxxx\", (string) The services offered\n" + " \"verified_proregtx_hash\": h, (hex) Only present when the peer is a masternode and succesfully\n" + " autheticated via MNAUTH. In this case, this field contains the\n" + " protx hash of the masternode\n" " \"relaytxes\":true|false, (boolean) Whether peer has asked us to relay transactions to it\n" " \"lastsend\": ttt, (numeric) The time in seconds since epoch (Jan 1 1970 GMT) of the last send\n" " \"lastrecv\": ttt, (numeric) The time in seconds since epoch (Jan 1 1970 GMT) of the last receive\n" @@ -135,6 +138,9 @@ UniValue getpeerinfo(const JSONRPCRequest& request) if (!(stats.addrLocal.empty())) obj.push_back(Pair("addrlocal", stats.addrLocal)); obj.push_back(Pair("services", strprintf("%016x", stats.nServices))); + if (!stats.verifiedProRegTxHash.IsNull()) { + obj.push_back(Pair("verified_proregtx_hash", stats.verifiedProRegTxHash.ToString())); + } obj.push_back(Pair("relaytxes", stats.fRelayTxes)); obj.push_back(Pair("lastsend", stats.nLastSend)); obj.push_back(Pair("lastrecv", stats.nLastRecv));