Merge #5982: backport: merge bitcoin#20769, #19499, #23575, #23695, #21160, #24692, partial bitcoin#20196, #25176, merge bitcoin-core/gui#526 (networking backports: part 4)

ab7ac1b85b partial bitcoin#25176: Fix frequent -netinfo JSON errors from null getpeerinfo#relaytxes (Kittywhiskers Van Gogh)
c89799d46c merge bitcoin#24692: Follow-ups to #21160 (Kittywhiskers Van Gogh)
33098aefff merge bitcoin#21160: Move tx inventory into net_processing (Kittywhiskers Van Gogh)
24205d94fe partial bitcoin#20196: fix GetListenPort() to derive the proper port (Kittywhiskers Van Gogh)
7f7200986b merge bitcoin-core/gui#526: Add address relay/processed/rate-limited fields to peer details (Kittywhiskers Van Gogh)
a9114f1ce8 merge bitcoin#23695: Always serialize local timestamp for version msg (Kittywhiskers Van Gogh)
d936c28b4e merge bitcoin#23575: Rework FillNode (Kittywhiskers Van Gogh)
6f8c730f35 merge bitcoin#19499: Make timeout mockable and type safe, speed up test (Kittywhiskers Van Gogh)
43a82bdd29 merge bitcoin#20769: fixes "advertised address where nobody is listening" (Kittywhiskers Van Gogh)

Pull request description:

  ## Additional Information

  * Dependent on https://github.com/dashpay/dash/pull/5978
  * Dependent on https://github.com/dashpay/dash/pull/5977

  ## Breaking Changes

  None observed.

  ## Checklist:

  - [x] I have performed a self-review of my own code
  - [x] I have commented my code, particularly in hard-to-understand areas **(note: N/A)**
  - [x] I have added or updated relevant unit/integration/functional/e2e tests
  - [x] I have made corresponding changes to the documentation **(note: N/A)**
  - [x] I have assigned this pull request to a milestone _(for repository code-owners and collaborators only)_

ACKs for top commit:
  PastaPastaPasta:
    utACK ab7ac1b85b

Tree-SHA512: 87bf5108bb80576c5bff8cd577add7800044da252fd18590e06a727f0bf70de94e2e9294b4412cdd9f1f6676472b0359902af361aaffc4c9ee299ad07d6af009
This commit is contained in:
pasta 2024-04-28 12:39:33 -05:00
commit c3f34dcd98
No known key found for this signature in database
GPG Key ID: 52527BEDABE87984
42 changed files with 542 additions and 327 deletions

View File

@ -525,7 +525,7 @@ public:
const int8_t network_id{NetworkStringToId(network)}; const int8_t network_id{NetworkStringToId(network)};
if (network_id == UNKNOWN_NETWORK) continue; if (network_id == UNKNOWN_NETWORK) continue;
const bool is_outbound{!peer["inbound"].get_bool()}; const bool is_outbound{!peer["inbound"].get_bool()};
const bool is_block_relay{!peer["relaytxes"].get_bool()}; const bool is_block_relay{peer["relaytxes"].isNull() ? false : !peer["relaytxes"].get_bool()};
++m_counts.at(is_outbound).at(network_id); // in/out by network ++m_counts.at(is_outbound).at(network_id); // in/out by network
++m_counts.at(is_outbound).at(m_networks.size()); // in/out overall ++m_counts.at(is_outbound).at(m_networks.size()); // in/out overall
++m_counts.at(2).at(network_id); // total by network ++m_counts.at(2).at(network_id); // total by network

View File

@ -17,6 +17,7 @@
#include <masternode/meta.h> #include <masternode/meta.h>
#include <masternode/node.h> #include <masternode/node.h>
#include <masternode/sync.h> #include <masternode/sync.h>
#include <net_processing.h>
#include <netfulfilledman.h> #include <netfulfilledman.h>
#include <netmessagemaker.h> #include <netmessagemaker.h>
#include <protocol.h> #include <protocol.h>
@ -141,9 +142,9 @@ PeerMsgRet CGovernanceManager::ProcessMessage(CNode& peer, CConnman& connman, Pe
LogPrint(BCLog::GOBJECT, "MNGOVERNANCESYNC -- syncing governance objects to our peer %s\n", peer.GetLogString()); LogPrint(BCLog::GOBJECT, "MNGOVERNANCESYNC -- syncing governance objects to our peer %s\n", peer.GetLogString());
if (nProp == uint256()) { if (nProp == uint256()) {
return SyncObjects(peer, connman); return SyncObjects(peer, peerman, connman);
} else { } else {
SyncSingleObjVotes(peer, nProp, filter, connman); SyncSingleObjVotes(peer, peerman, nProp, filter, connman);
} }
} }
@ -858,7 +859,7 @@ bool CGovernanceManager::ConfirmInventoryRequest(const CInv& inv)
return true; return true;
} }
void CGovernanceManager::SyncSingleObjVotes(CNode& peer, const uint256& nProp, const CBloomFilter& filter, CConnman& connman) void CGovernanceManager::SyncSingleObjVotes(CNode& peer, PeerManager& peerman, const uint256& nProp, const CBloomFilter& filter, CConnman& connman)
{ {
// do not provide any data until our node is synced // do not provide any data until our node is synced
if (!Assert(m_mn_sync)->IsSynced()) return; if (!Assert(m_mn_sync)->IsSynced()) return;
@ -899,7 +900,7 @@ void CGovernanceManager::SyncSingleObjVotes(CNode& peer, const uint256& nProp, c
if (filter.contains(nVoteHash) || !vote.IsValid(tip_mn_list, onlyVotingKeyAllowed)) { if (filter.contains(nVoteHash) || !vote.IsValid(tip_mn_list, onlyVotingKeyAllowed)) {
continue; continue;
} }
peer.PushInventory(CInv(MSG_GOVERNANCE_OBJECT_VOTE, nVoteHash)); peerman.PushInventory(peer.GetId(), CInv(MSG_GOVERNANCE_OBJECT_VOTE, nVoteHash));
++nVoteCount; ++nVoteCount;
} }
@ -908,7 +909,7 @@ void CGovernanceManager::SyncSingleObjVotes(CNode& peer, const uint256& nProp, c
LogPrint(BCLog::GOBJECT, "CGovernanceManager::%s -- sent %d votes to peer=%d\n", __func__, nVoteCount, peer.GetId()); LogPrint(BCLog::GOBJECT, "CGovernanceManager::%s -- sent %d votes to peer=%d\n", __func__, nVoteCount, peer.GetId());
} }
PeerMsgRet CGovernanceManager::SyncObjects(CNode& peer, CConnman& connman) const PeerMsgRet CGovernanceManager::SyncObjects(CNode& peer, PeerManager& peerman, CConnman& connman) const
{ {
assert(m_netfulfilledman.IsValid()); assert(m_netfulfilledman.IsValid());
@ -959,7 +960,7 @@ PeerMsgRet CGovernanceManager::SyncObjects(CNode& peer, CConnman& connman) const
// Push the inventory budget proposal message over to the other client // Push the inventory budget proposal message over to the other client
LogPrint(BCLog::GOBJECT, "CGovernanceManager::%s -- syncing govobj: %s, peer=%d\n", __func__, strHash, peer.GetId()); LogPrint(BCLog::GOBJECT, "CGovernanceManager::%s -- syncing govobj: %s, peer=%d\n", __func__, strHash, peer.GetId());
peer.PushInventory(CInv(MSG_GOVERNANCE_OBJECT, nHash)); peerman.PushInventory(peer.GetId(), CInv(MSG_GOVERNANCE_OBJECT, nHash));
++nObjCount; ++nObjCount;
} }

View File

@ -292,8 +292,8 @@ public:
*/ */
bool ConfirmInventoryRequest(const CInv& inv); bool ConfirmInventoryRequest(const CInv& inv);
void SyncSingleObjVotes(CNode& peer, const uint256& nProp, const CBloomFilter& filter, CConnman& connman); void SyncSingleObjVotes(CNode& peer, PeerManager& peerman, const uint256& nProp, const CBloomFilter& filter, CConnman& connman);
PeerMsgRet SyncObjects(CNode& peer, CConnman& connman) const; PeerMsgRet SyncObjects(CNode& peer, PeerManager& peerman, CConnman& connman) const;
PeerMsgRet ProcessMessage(CNode& peer, CConnman& connman, PeerManager& peerman, std::string_view msg_type, CDataStream& vRecv); PeerMsgRet ProcessMessage(CNode& peer, CConnman& connman, PeerManager& peerman, std::string_view msg_type, CDataStream& vRecv);

View File

@ -1256,6 +1256,11 @@ bool AppInitParameterInteraction(const ArgsManager& args)
return InitError(Untranslated("Cannot set -bind or -whitebind together with -listen=0")); return InitError(Untranslated("Cannot set -bind or -whitebind together with -listen=0"));
} }
// if listen=0, then disallow listenonion=1
if (!args.GetBoolArg("-listen", DEFAULT_LISTEN) && args.GetBoolArg("-listenonion", DEFAULT_LISTEN_ONION)) {
return InitError(Untranslated("Cannot set -listen=0 together with -listenonion=1"));
}
// Make sure enough file descriptors are available // Make sure enough file descriptors are available
int nBind = std::max(nUserBind, size_t(1)); int nBind = std::max(nUserBind, size_t(1));
nUserMaxConnections = args.GetArg("-maxconnections", DEFAULT_MAX_PEER_CONNECTIONS); nUserMaxConnections = args.GetArg("-maxconnections", DEFAULT_MAX_PEER_CONNECTIONS);

View File

@ -29,13 +29,13 @@ LLMQContext::LLMQContext(CChainState& chainstate, CConnman& connman, CDeterminis
llmq::quorumBlockProcessor = std::make_unique<llmq::CQuorumBlockProcessor>(chainstate, dmnman, evo_db, peerman); llmq::quorumBlockProcessor = std::make_unique<llmq::CQuorumBlockProcessor>(chainstate, dmnman, evo_db, peerman);
return llmq::quorumBlockProcessor.get(); return llmq::quorumBlockProcessor.get();
}()}, }()},
qdkgsman{std::make_unique<llmq::CDKGSessionManager>(*bls_worker, chainstate, connman, dmnman, *dkg_debugman, mn_metaman, *quorum_block_processor, mn_activeman, sporkman, unit_tests, wipe)}, qdkgsman{std::make_unique<llmq::CDKGSessionManager>(*bls_worker, chainstate, connman, dmnman, *dkg_debugman, mn_metaman, *quorum_block_processor, mn_activeman, sporkman, peerman, unit_tests, wipe)},
qman{[&]() -> llmq::CQuorumManager* const { qman{[&]() -> llmq::CQuorumManager* const {
assert(llmq::quorumManager == nullptr); assert(llmq::quorumManager == nullptr);
llmq::quorumManager = std::make_unique<llmq::CQuorumManager>(*bls_worker, chainstate, connman, dmnman, *qdkgsman, evo_db, *quorum_block_processor, mn_activeman, mn_sync, sporkman); llmq::quorumManager = std::make_unique<llmq::CQuorumManager>(*bls_worker, chainstate, connman, dmnman, *qdkgsman, evo_db, *quorum_block_processor, mn_activeman, mn_sync, sporkman);
return llmq::quorumManager.get(); return llmq::quorumManager.get();
}()}, }()},
sigman{std::make_unique<llmq::CSigningManager>(connman, mn_activeman, *llmq::quorumManager, unit_tests, wipe)}, sigman{std::make_unique<llmq::CSigningManager>(connman, mn_activeman, *llmq::quorumManager, peerman, unit_tests, wipe)},
shareman{std::make_unique<llmq::CSigSharesManager>(connman, *sigman, mn_activeman, *llmq::quorumManager, sporkman, peerman)}, shareman{std::make_unique<llmq::CSigSharesManager>(connman, *sigman, mn_activeman, *llmq::quorumManager, sporkman, peerman)},
clhandler{[&]() -> llmq::CChainLocksHandler* const { clhandler{[&]() -> llmq::CChainLocksHandler* const {
assert(llmq::chainLocksHandler == nullptr); assert(llmq::chainLocksHandler == nullptr);

View File

@ -20,6 +20,7 @@
#include <logging.h> #include <logging.h>
#include <masternode/meta.h> #include <masternode/meta.h>
#include <masternode/node.h> #include <masternode/node.h>
#include <net_processing.h>
#include <netmessagemaker.h> #include <netmessagemaker.h>
#include <validation.h> #include <validation.h>
#include <util/irange.h> #include <util/irange.h>
@ -1326,10 +1327,10 @@ void CDKGSession::RelayInvToParticipants(const CInv& inv) const
myProTxHash.ToString().substr(0, 4), ss.str()); myProTxHash.ToString().substr(0, 4), ss.str());
std::stringstream ss2; std::stringstream ss2;
connman.ForEachNode([&](CNode* pnode) { connman.ForEachNode([&](const CNode* pnode) {
if (pnode->qwatch || if (pnode->qwatch ||
(!pnode->GetVerifiedProRegTxHash().IsNull() && (relayMembers.count(pnode->GetVerifiedProRegTxHash()) != 0))) { (!pnode->GetVerifiedProRegTxHash().IsNull() && (relayMembers.count(pnode->GetVerifiedProRegTxHash()) != 0))) {
pnode->PushInventory(inv); Assert(m_peerman)->PushInventory(pnode->GetId(), inv);
} }
if (pnode->GetVerifiedProRegTxHash().IsNull()) { if (pnode->GetVerifiedProRegTxHash().IsNull()) {

View File

@ -22,6 +22,7 @@ class CDeterministicMN;
class CMasternodeMetaMan; class CMasternodeMetaMan;
class CSporkManager; class CSporkManager;
class UniValue; class UniValue;
class PeerManager;
using CDeterministicMNCPtr = std::shared_ptr<const CDeterministicMN>; using CDeterministicMNCPtr = std::shared_ptr<const CDeterministicMN>;
@ -280,6 +281,7 @@ private:
CMasternodeMetaMan& m_mn_metaman; CMasternodeMetaMan& m_mn_metaman;
const CActiveMasternodeManager* const m_mn_activeman; const CActiveMasternodeManager* const m_mn_activeman;
const CSporkManager& m_sporkman; const CSporkManager& m_sporkman;
const std::unique_ptr<PeerManager>& m_peerman;
const CBlockIndex* m_quorum_base_block_index{nullptr}; const CBlockIndex* m_quorum_base_block_index{nullptr};
int quorumIndex{0}; int quorumIndex{0};
@ -322,9 +324,10 @@ public:
CDKGSession(const Consensus::LLMQParams& _params, CBLSWorker& _blsWorker, CConnman& _connman, CDKGSession(const Consensus::LLMQParams& _params, CBLSWorker& _blsWorker, CConnman& _connman,
CDeterministicMNManager& dmnman, CDKGSessionManager& _dkgManager, CDKGDebugManager& _dkgDebugManager, CDeterministicMNManager& dmnman, CDKGSessionManager& _dkgManager, CDKGDebugManager& _dkgDebugManager,
CMasternodeMetaMan& mn_metaman, const CActiveMasternodeManager* const mn_activeman, CMasternodeMetaMan& mn_metaman, const CActiveMasternodeManager* const mn_activeman,
const CSporkManager& sporkman) : const CSporkManager& sporkman, const std::unique_ptr<PeerManager>& peerman) :
params(_params), blsWorker(_blsWorker), cache(_blsWorker), connman(_connman), m_dmnman(dmnman), dkgManager(_dkgManager), params(_params), blsWorker(_blsWorker), cache(_blsWorker), connman(_connman), m_dmnman(dmnman), dkgManager(_dkgManager),
dkgDebugManager(_dkgDebugManager), m_mn_metaman(mn_metaman), m_mn_activeman(mn_activeman), m_sporkman(sporkman) {} dkgDebugManager(_dkgDebugManager), m_mn_metaman(mn_metaman), m_mn_activeman(mn_activeman), m_sporkman(sporkman),
m_peerman(peerman) {}
bool Init(gsl::not_null<const CBlockIndex*> pQuorumBaseBlockIndex, Span<CDeterministicMNCPtr> mns, const uint256& _myProTxHash, int _quorumIndex); bool Init(gsl::not_null<const CBlockIndex*> pQuorumBaseBlockIndex, Span<CDeterministicMNCPtr> mns, const uint256& _myProTxHash, int _quorumIndex);

View File

@ -27,7 +27,7 @@ namespace llmq
CDKGSessionHandler::CDKGSessionHandler(CBLSWorker& _blsWorker, CChainState& chainstate, CConnman& _connman, CDeterministicMNManager& dmnman, CDKGSessionHandler::CDKGSessionHandler(CBLSWorker& _blsWorker, CChainState& chainstate, CConnman& _connman, CDeterministicMNManager& dmnman,
CDKGDebugManager& _dkgDebugManager, CDKGSessionManager& _dkgManager, CMasternodeMetaMan& mn_metaman, CDKGDebugManager& _dkgDebugManager, CDKGSessionManager& _dkgManager, CMasternodeMetaMan& mn_metaman,
CQuorumBlockProcessor& _quorumBlockProcessor, const CActiveMasternodeManager* const mn_activeman, CQuorumBlockProcessor& _quorumBlockProcessor, const CActiveMasternodeManager* const mn_activeman,
const CSporkManager& sporkman, const Consensus::LLMQParams& _params, int _quorumIndex) : const CSporkManager& sporkman, const std::unique_ptr<PeerManager>& peerman, const Consensus::LLMQParams& _params, int _quorumIndex) :
blsWorker(_blsWorker), blsWorker(_blsWorker),
m_chainstate(chainstate), m_chainstate(chainstate),
connman(_connman), connman(_connman),
@ -38,9 +38,10 @@ CDKGSessionHandler::CDKGSessionHandler(CBLSWorker& _blsWorker, CChainState& chai
quorumBlockProcessor(_quorumBlockProcessor), quorumBlockProcessor(_quorumBlockProcessor),
m_mn_activeman(mn_activeman), m_mn_activeman(mn_activeman),
m_sporkman(sporkman), m_sporkman(sporkman),
m_peerman(peerman),
params(_params), params(_params),
quorumIndex(_quorumIndex), quorumIndex(_quorumIndex),
curSession(std::make_unique<CDKGSession>(_params, _blsWorker, _connman, dmnman, _dkgManager, _dkgDebugManager, m_mn_metaman, m_mn_activeman, sporkman)), curSession(std::make_unique<CDKGSession>(_params, _blsWorker, _connman, dmnman, _dkgManager, _dkgDebugManager, m_mn_metaman, m_mn_activeman, sporkman, peerman)),
pendingContributions((size_t)_params.size * 2, MSG_QUORUM_CONTRIB), // we allow size*2 messages as we need to make sure we see bad behavior (double messages) pendingContributions((size_t)_params.size * 2, MSG_QUORUM_CONTRIB), // we allow size*2 messages as we need to make sure we see bad behavior (double messages)
pendingComplaints((size_t)_params.size * 2, MSG_QUORUM_COMPLAINT), pendingComplaints((size_t)_params.size * 2, MSG_QUORUM_COMPLAINT),
pendingJustifications((size_t)_params.size * 2, MSG_QUORUM_JUSTIFICATION), pendingJustifications((size_t)_params.size * 2, MSG_QUORUM_JUSTIFICATION),
@ -188,7 +189,7 @@ void CDKGSessionHandler::StopThread()
bool CDKGSessionHandler::InitNewQuorum(const CBlockIndex* pQuorumBaseBlockIndex) bool CDKGSessionHandler::InitNewQuorum(const CBlockIndex* pQuorumBaseBlockIndex)
{ {
curSession = std::make_unique<CDKGSession>(params, blsWorker, connman, m_dmnman, dkgManager, dkgDebugManager, m_mn_metaman, m_mn_activeman, m_sporkman); curSession = std::make_unique<CDKGSession>(params, blsWorker, connman, m_dmnman, dkgManager, dkgDebugManager, m_mn_metaman, m_mn_activeman, m_sporkman, m_peerman);
if (!DeploymentDIP0003Enforced(pQuorumBaseBlockIndex->nHeight, Params().GetConsensus())) { if (!DeploymentDIP0003Enforced(pQuorumBaseBlockIndex->nHeight, Params().GetConsensus())) {
return false; return false;

View File

@ -130,6 +130,7 @@ private:
CQuorumBlockProcessor& quorumBlockProcessor; CQuorumBlockProcessor& quorumBlockProcessor;
const CActiveMasternodeManager* const m_mn_activeman; const CActiveMasternodeManager* const m_mn_activeman;
const CSporkManager& m_sporkman; const CSporkManager& m_sporkman;
const std::unique_ptr<PeerManager>& m_peerman;
const Consensus::LLMQParams params; const Consensus::LLMQParams params;
const int quorumIndex; const int quorumIndex;
@ -151,7 +152,7 @@ public:
CDKGSessionHandler(CBLSWorker& _blsWorker, CChainState& chainstate, CConnman& _connman, CDeterministicMNManager& dmnman, CDKGSessionHandler(CBLSWorker& _blsWorker, CChainState& chainstate, CConnman& _connman, CDeterministicMNManager& dmnman,
CDKGDebugManager& _dkgDebugManager, CDKGSessionManager& _dkgManager, CMasternodeMetaMan& mn_metaman, CDKGDebugManager& _dkgDebugManager, CDKGSessionManager& _dkgManager, CMasternodeMetaMan& mn_metaman,
CQuorumBlockProcessor& _quorumBlockProcessor, const CActiveMasternodeManager* const mn_activeman, CQuorumBlockProcessor& _quorumBlockProcessor, const CActiveMasternodeManager* const mn_activeman,
const CSporkManager& sporkman, const Consensus::LLMQParams& _params, int _quorumIndex); const CSporkManager& sporkman, const std::unique_ptr<PeerManager>& peerman, const Consensus::LLMQParams& _params, int _quorumIndex);
~CDKGSessionHandler() = default; ~CDKGSessionHandler() = default;
void UpdatedBlockTip(const CBlockIndex *pindexNew); void UpdatedBlockTip(const CBlockIndex *pindexNew);

View File

@ -26,7 +26,8 @@ static const std::string DB_ENC_CONTRIB = "qdkg_E";
CDKGSessionManager::CDKGSessionManager(CBLSWorker& _blsWorker, CChainState& chainstate, CConnman& _connman, CDeterministicMNManager& dmnman, CDKGSessionManager::CDKGSessionManager(CBLSWorker& _blsWorker, CChainState& chainstate, CConnman& _connman, CDeterministicMNManager& dmnman,
CDKGDebugManager& _dkgDebugManager, CMasternodeMetaMan& mn_metaman, CQuorumBlockProcessor& _quorumBlockProcessor, CDKGDebugManager& _dkgDebugManager, CMasternodeMetaMan& mn_metaman, CQuorumBlockProcessor& _quorumBlockProcessor,
const CActiveMasternodeManager* const mn_activeman, const CSporkManager& sporkman, bool unitTests, bool fWipe) : const CActiveMasternodeManager* const mn_activeman, const CSporkManager& sporkman,
const std::unique_ptr<PeerManager>& peerman, bool unitTests, bool fWipe) :
db(std::make_unique<CDBWrapper>(unitTests ? "" : (GetDataDir() / "llmq/dkgdb"), 1 << 20, unitTests, fWipe)), db(std::make_unique<CDBWrapper>(unitTests ? "" : (GetDataDir() / "llmq/dkgdb"), 1 << 20, unitTests, fWipe)),
blsWorker(_blsWorker), blsWorker(_blsWorker),
m_chainstate(chainstate), m_chainstate(chainstate),
@ -49,7 +50,8 @@ CDKGSessionManager::CDKGSessionManager(CBLSWorker& _blsWorker, CChainState& chai
for (const auto i : irange::range(session_count)) { for (const auto i : irange::range(session_count)) {
dkgSessionHandlers.emplace(std::piecewise_construct, dkgSessionHandlers.emplace(std::piecewise_construct,
std::forward_as_tuple(params.type, i), std::forward_as_tuple(params.type, i),
std::forward_as_tuple(blsWorker, m_chainstate, connman, dmnman, dkgDebugManager, *this, mn_metaman, quorumBlockProcessor, mn_activeman, spork_manager, params, i)); std::forward_as_tuple(blsWorker, m_chainstate, connman, dmnman, dkgDebugManager, *this, mn_metaman,
quorumBlockProcessor, mn_activeman, spork_manager, peerman, params, i));
} }
} }
} }

View File

@ -69,7 +69,8 @@ private:
public: public:
CDKGSessionManager(CBLSWorker& _blsWorker, CChainState& chainstate, CConnman& _connman, CDeterministicMNManager& dmnman, CDKGSessionManager(CBLSWorker& _blsWorker, CChainState& chainstate, CConnman& _connman, CDeterministicMNManager& dmnman,
CDKGDebugManager& _dkgDebugManager, CMasternodeMetaMan& mn_metaman, CQuorumBlockProcessor& _quorumBlockProcessor, CDKGDebugManager& _dkgDebugManager, CMasternodeMetaMan& mn_metaman, CQuorumBlockProcessor& _quorumBlockProcessor,
const CActiveMasternodeManager* const mn_activeman, const CSporkManager& sporkman, bool unitTests, bool fWipe); const CActiveMasternodeManager* const mn_activeman, const CSporkManager& sporkman,
const std::unique_ptr<PeerManager>& peerman, bool unitTests, bool fWipe);
~CDKGSessionManager() = default; ~CDKGSessionManager() = default;
void StartThreads(); void StartThreads();

View File

@ -1452,12 +1452,9 @@ void CInstantSendManager::AskNodesForLockedTx(const uint256& txid, const CConnma
if (nodesToAskFor.size() >= 4) { if (nodesToAskFor.size() >= 4) {
return; return;
} }
if (peerman.CanRelayAddrs(pnode->GetId())) { if (peerman.IsInvInFilter(pnode->GetId(), txid)) {
LOCK(pnode->m_tx_relay->cs_tx_inventory); pnode->AddRef();
if (pnode->m_tx_relay->filterInventoryKnown.contains(txid)) { nodesToAskFor.emplace_back(pnode);
pnode->AddRef();
nodesToAskFor.emplace_back(pnode);
}
} }
}; };

View File

@ -540,8 +540,8 @@ void CRecoveredSigsDb::CleanupOldVotes(int64_t maxAge)
////////////////// //////////////////
CSigningManager::CSigningManager(CConnman& _connman, const CActiveMasternodeManager* const mn_activeman, const CQuorumManager& _qman, CSigningManager::CSigningManager(CConnman& _connman, const CActiveMasternodeManager* const mn_activeman, const CQuorumManager& _qman,
bool fMemory, bool fWipe) : const std::unique_ptr<PeerManager>& peerman, bool fMemory, bool fWipe) :
db(fMemory, fWipe), connman(_connman), m_mn_activeman(mn_activeman), qman(_qman) db(fMemory, fWipe), connman(_connman), m_mn_activeman(mn_activeman), qman(_qman), m_peerman(peerman)
{ {
} }
@ -572,18 +572,18 @@ bool CSigningManager::GetRecoveredSigForGetData(const uint256& hash, CRecoveredS
return true; return true;
} }
PeerMsgRet CSigningManager::ProcessMessage(const CNode& pfrom, gsl::not_null<PeerManager*> peerman, const std::string& msg_type, CDataStream& vRecv) PeerMsgRet CSigningManager::ProcessMessage(const CNode& pfrom, const std::string& msg_type, CDataStream& vRecv)
{ {
if (msg_type == NetMsgType::QSIGREC) { if (msg_type == NetMsgType::QSIGREC) {
auto recoveredSig = std::make_shared<CRecoveredSig>(); auto recoveredSig = std::make_shared<CRecoveredSig>();
vRecv >> *recoveredSig; vRecv >> *recoveredSig;
return ProcessMessageRecoveredSig(pfrom, peerman, recoveredSig); return ProcessMessageRecoveredSig(pfrom, recoveredSig);
} }
return {}; return {};
} }
PeerMsgRet CSigningManager::ProcessMessageRecoveredSig(const CNode& pfrom, gsl::not_null<PeerManager*> peerman, const std::shared_ptr<const CRecoveredSig>& recoveredSig) PeerMsgRet CSigningManager::ProcessMessageRecoveredSig(const CNode& pfrom, const std::shared_ptr<const CRecoveredSig>& recoveredSig)
{ {
{ {
LOCK(cs_main); LOCK(cs_main);
@ -615,12 +615,6 @@ PeerMsgRet CSigningManager::ProcessMessageRecoveredSig(const CNode& pfrom, gsl::
return {}; return {};
} }
if (m_peerman == nullptr) {
m_peerman = peerman;
}
// we should never use one CSigningManager with different PeerManager
assert(m_peerman == peerman);
pendingRecoveredSigs[pfrom.GetId()].emplace_back(recoveredSig); pendingRecoveredSigs[pfrom.GetId()].emplace_back(recoveredSig);
return {}; return {};
} }
@ -776,7 +770,7 @@ bool CSigningManager::ProcessPendingRecoveredSigs()
if (batchVerifier.badSources.count(nodeId)) { if (batchVerifier.badSources.count(nodeId)) {
LogPrint(BCLog::LLMQ, "CSigningManager::%s -- invalid recSig from other node, banning peer=%d\n", __func__, nodeId); LogPrint(BCLog::LLMQ, "CSigningManager::%s -- invalid recSig from other node, banning peer=%d\n", __func__, nodeId);
m_peerman.load()->Misbehaving(nodeId, 100); Assert(m_peerman)->Misbehaving(nodeId, 100);
continue; continue;
} }
@ -840,9 +834,9 @@ void CSigningManager::ProcessRecoveredSig(const std::shared_ptr<const CRecovered
if (m_mn_activeman != nullptr) { if (m_mn_activeman != nullptr) {
CInv inv(MSG_QUORUM_RECOVERED_SIG, recoveredSig->GetHash()); CInv inv(MSG_QUORUM_RECOVERED_SIG, recoveredSig->GetHash());
connman.ForEachNode([&](CNode* pnode) { connman.ForEachNode([&](const CNode* pnode) {
if (pnode->fSendRecSigs) { if (pnode->fSendRecSigs) {
pnode->PushInventory(inv); Assert(m_peerman)->PushInventory(pnode->GetId(), inv);
} }
}); });
} }

View File

@ -165,8 +165,7 @@ private:
CConnman& connman; CConnman& connman;
const CActiveMasternodeManager* const m_mn_activeman; const CActiveMasternodeManager* const m_mn_activeman;
const CQuorumManager& qman; const CQuorumManager& qman;
const std::unique_ptr<PeerManager>& m_peerman;
std::atomic<PeerManager*> m_peerman{nullptr};
// Incoming and not verified yet // Incoming and not verified yet
std::unordered_map<NodeId, std::list<std::shared_ptr<const CRecoveredSig>>> pendingRecoveredSigs GUARDED_BY(cs); std::unordered_map<NodeId, std::list<std::shared_ptr<const CRecoveredSig>>> pendingRecoveredSigs GUARDED_BY(cs);
@ -179,12 +178,13 @@ private:
std::vector<CRecoveredSigsListener*> recoveredSigsListeners GUARDED_BY(cs); std::vector<CRecoveredSigsListener*> recoveredSigsListeners GUARDED_BY(cs);
public: public:
CSigningManager(CConnman& _connman, const CActiveMasternodeManager* const mn_activeman, const CQuorumManager& _qman, bool fMemory, bool fWipe); CSigningManager(CConnman& _connman, const CActiveMasternodeManager* const mn_activeman, const CQuorumManager& _qman,
const std::unique_ptr<PeerManager>& peerman, bool fMemory, bool fWipe);
bool AlreadyHave(const CInv& inv) const; bool AlreadyHave(const CInv& inv) const;
bool GetRecoveredSigForGetData(const uint256& hash, CRecoveredSig& ret) const; bool GetRecoveredSigForGetData(const uint256& hash, CRecoveredSig& ret) const;
PeerMsgRet ProcessMessage(const CNode& pnode, gsl::not_null<PeerManager*> peerman, const std::string& msg_type, CDataStream& vRecv); PeerMsgRet ProcessMessage(const CNode& pnode, const std::string& msg_type, CDataStream& vRecv);
// This is called when a recovered signature was was reconstructed from another P2P message and is known to be valid // This is called when a recovered signature was was reconstructed from another P2P message and is known to be valid
// This is the case for example when a signature appears as part of InstantSend or ChainLocks // This is the case for example when a signature appears as part of InstantSend or ChainLocks
@ -197,7 +197,7 @@ public:
void TruncateRecoveredSig(Consensus::LLMQType llmqType, const uint256& id); void TruncateRecoveredSig(Consensus::LLMQType llmqType, const uint256& id);
private: private:
PeerMsgRet ProcessMessageRecoveredSig(const CNode& pfrom, gsl::not_null<PeerManager*> peerman, const std::shared_ptr<const CRecoveredSig>& recoveredSig); PeerMsgRet ProcessMessageRecoveredSig(const CNode& pfrom, const std::shared_ptr<const CRecoveredSig>& recoveredSig);
static bool PreVerifyRecoveredSig(const CQuorumManager& quorum_manager, const CRecoveredSig& recoveredSig, bool& retBan); static bool PreVerifyRecoveredSig(const CQuorumManager& quorum_manager, const CRecoveredSig& recoveredSig, bool& retBan);
void CollectPendingRecoveredSigsToVerify(size_t maxUniqueSessions, void CollectPendingRecoveredSigsToVerify(size_t maxUniqueSessions,

View File

@ -654,14 +654,8 @@ void CNode::copyStats(CNodeStats &stats, const std::vector<bool> &m_asmap)
X(addrBind); X(addrBind);
stats.m_network = ConnectedThroughNetwork(); stats.m_network = ConnectedThroughNetwork();
stats.m_mapped_as = addr.GetMappedAS(m_asmap); stats.m_mapped_as = addr.GetMappedAS(m_asmap);
if (!IsBlockOnlyConn()) { X(m_last_send);
LOCK(m_tx_relay->cs_filter); X(m_last_recv);
stats.fRelayTxes = m_tx_relay->fRelayTxes;
} else {
stats.fRelayTxes = false;
}
X(nLastSend);
X(nLastRecv);
X(nLastTXTime); X(nLastTXTime);
X(nLastBlockTime); X(nLastBlockTime);
X(nTimeConnected); X(nTimeConnected);
@ -710,7 +704,7 @@ bool CNode::ReceiveMsgBytes(Span<const uint8_t> msg_bytes, bool& complete)
// TODO: use mocktime here after bitcoin#19499 is backported // TODO: use mocktime here after bitcoin#19499 is backported
const auto time = std::chrono::microseconds(GetTimeMicros()); const auto time = std::chrono::microseconds(GetTimeMicros());
LOCK(cs_vRecv); LOCK(cs_vRecv);
nLastRecv = std::chrono::duration_cast<std::chrono::seconds>(time).count(); m_last_recv = std::chrono::duration_cast<std::chrono::seconds>(time);
nRecvBytes += msg_bytes.size(); nRecvBytes += msg_bytes.size();
while (msg_bytes.size() > 0) { while (msg_bytes.size() > 0) {
// absorb network data // absorb network data
@ -881,7 +875,7 @@ size_t CConnman::SocketSendData(CNode& node)
nBytes = send(node.hSocket, reinterpret_cast<const char*>(data.data()) + node.nSendOffset, data.size() - node.nSendOffset, MSG_NOSIGNAL | MSG_DONTWAIT); nBytes = send(node.hSocket, reinterpret_cast<const char*>(data.data()) + node.nSendOffset, data.size() - node.nSendOffset, MSG_NOSIGNAL | MSG_DONTWAIT);
} }
if (nBytes > 0) { if (nBytes > 0) {
node.nLastSend = GetTimeSeconds(); node.m_last_send = GetTime<std::chrono::seconds>();
node.nSendBytes += nBytes; node.nSendBytes += nBytes;
node.nSendOffset += nBytes; node.nSendOffset += nBytes;
nSentSize += nBytes; nSentSize += nBytes;
@ -957,7 +951,7 @@ static bool CompareNodeTXTime(const NodeEvictionCandidate &a, const NodeEviction
{ {
// There is a fall-through here because it is common for a node to have more than a few peers that have not yet relayed txn. // There is a fall-through here because it is common for a node to have more than a few peers that have not yet relayed txn.
if (a.nLastTXTime != b.nLastTXTime) return a.nLastTXTime < b.nLastTXTime; if (a.nLastTXTime != b.nLastTXTime) return a.nLastTXTime < b.nLastTXTime;
if (a.fRelayTxes != b.fRelayTxes) return b.fRelayTxes; if (a.m_relay_txs != b.m_relay_txs) return b.m_relay_txs;
if (a.fBloomFilter != b.fBloomFilter) return a.fBloomFilter; if (a.fBloomFilter != b.fBloomFilter) return a.fBloomFilter;
return a.nTimeConnected > b.nTimeConnected; return a.nTimeConnected > b.nTimeConnected;
} }
@ -965,7 +959,7 @@ static bool CompareNodeTXTime(const NodeEvictionCandidate &a, const NodeEviction
// Pick out the potential block-relay only peers, and sort them by last block time. // Pick out the potential block-relay only peers, and sort them by last block time.
static bool CompareNodeBlockRelayOnlyTime(const NodeEvictionCandidate &a, const NodeEvictionCandidate &b) static bool CompareNodeBlockRelayOnlyTime(const NodeEvictionCandidate &a, const NodeEvictionCandidate &b)
{ {
if (a.fRelayTxes != b.fRelayTxes) return a.fRelayTxes; if (a.m_relay_txs != b.m_relay_txs) return a.m_relay_txs;
if (a.nLastBlockTime != b.nLastBlockTime) return a.nLastBlockTime < b.nLastBlockTime; if (a.nLastBlockTime != b.nLastBlockTime) return a.nLastBlockTime < b.nLastBlockTime;
if (a.fRelevantServices != b.fRelevantServices) return b.fRelevantServices; if (a.fRelevantServices != b.fRelevantServices) return b.fRelevantServices;
return a.nTimeConnected > b.nTimeConnected; return a.nTimeConnected > b.nTimeConnected;
@ -1034,7 +1028,7 @@ void ProtectEvictionCandidatesByRatio(std::vector<NodeEvictionCandidate>& vEvict
// Protect up to 8 non-tx-relay peers that have sent us novel blocks. // Protect up to 8 non-tx-relay peers that have sent us novel blocks.
const size_t erase_size = std::min(size_t(8), vEvictionCandidates.size()); const size_t erase_size = std::min(size_t(8), vEvictionCandidates.size());
EraseLastKElements(vEvictionCandidates, CompareNodeBlockRelayOnlyTime, erase_size, EraseLastKElements(vEvictionCandidates, CompareNodeBlockRelayOnlyTime, erase_size,
[](const NodeEvictionCandidate& n) { return !n.fRelayTxes && n.fRelevantServices; }); [](const NodeEvictionCandidate& n) { return !n.m_relay_txs && n.fRelevantServices; });
// Protect 4 nodes that most recently sent us novel blocks. // Protect 4 nodes that most recently sent us novel blocks.
// An attacker cannot manipulate this metric without performing useful work. // An attacker cannot manipulate this metric without performing useful work.
@ -1120,18 +1114,11 @@ bool CConnman::AttemptToEvictConnection()
} }
} }
bool peer_relay_txes = false;
bool peer_filter_not_null = false;
if (!node->IsBlockOnlyConn()) {
LOCK(node->m_tx_relay->cs_filter);
peer_relay_txes = node->m_tx_relay->fRelayTxes;
peer_filter_not_null = node->m_tx_relay->pfilter != nullptr;
}
NodeEvictionCandidate candidate = {node->GetId(), node->nTimeConnected, node->m_min_ping_time, NodeEvictionCandidate candidate = {node->GetId(), node->nTimeConnected, node->m_min_ping_time,
node->nLastBlockTime, node->nLastTXTime, node->nLastBlockTime, node->nLastTXTime,
HasAllDesirableServiceFlags(node->nServices), HasAllDesirableServiceFlags(node->nServices),
peer_relay_txes, peer_filter_not_null, node->nKeyedNetGroup, node->m_relays_txs.load(), node->m_bloom_filter_loaded.load(),
node->m_prefer_evict, node->addr.IsLocal(), node->nKeyedNetGroup, node->m_prefer_evict, node->addr.IsLocal(),
node->m_inbound_onion}; node->m_inbound_onion};
vEvictionCandidates.push_back(candidate); vEvictionCandidates.push_back(candidate);
} }
@ -1524,31 +1511,33 @@ void CConnman::CalculateNumConnectionsChangedStats()
statsClient.gauge("peers.torConnections", torNodes, 1.0f); statsClient.gauge("peers.torConnections", torNodes, 1.0f);
} }
bool CConnman::ShouldRunInactivityChecks(const CNode& node, int64_t now) const bool CConnman::ShouldRunInactivityChecks(const CNode& node, std::chrono::seconds now) const
{ {
return node.nTimeConnected + m_peer_connect_timeout < now; return std::chrono::seconds{node.nTimeConnected} + m_peer_connect_timeout < now;
} }
bool CConnman::InactivityCheck(const CNode& node) const bool CConnman::InactivityCheck(const CNode& node) const
{ {
// Use non-mockable system time (otherwise these timers will pop when we // Tests that see disconnects after using mocktime can start nodes with a
// use setmocktime in the tests). // large timeout. For example, -peertimeout=999999999.
int64_t now = GetTimeSeconds(); const auto now{GetTime<std::chrono::seconds>()};
const auto last_send{node.m_last_send.load()};
const auto last_recv{node.m_last_recv.load()};
if (!ShouldRunInactivityChecks(node, now)) return false; if (!ShouldRunInactivityChecks(node, now)) return false;
if (node.nLastRecv == 0 || node.nLastSend == 0) { if (last_recv.count() == 0 || last_send.count() == 0) {
LogPrint(BCLog::NET, "socket no message in first %i seconds, %d %d peer=%d\n", m_peer_connect_timeout, node.nLastRecv != 0, node.nLastSend != 0, node.GetId()); LogPrint(BCLog::NET, "socket no message in first %i seconds, %d %d peer=%d\n", count_seconds(m_peer_connect_timeout), last_recv.count() != 0, last_send.count() != 0, node.GetId());
return true; return true;
} }
if (now > node.nLastSend + TIMEOUT_INTERVAL) { if (now > last_send + TIMEOUT_INTERVAL) {
LogPrint(BCLog::NET, "socket sending timeout: %is peer=%d\n", now - node.nLastSend, node.GetId()); LogPrint(BCLog::NET, "socket sending timeout: %is peer=%d\n", count_seconds(now - last_send), node.GetId());
return true; return true;
} }
if (now > node.nLastRecv + TIMEOUT_INTERVAL) { if (now > last_recv + TIMEOUT_INTERVAL) {
LogPrint(BCLog::NET, "socket receive timeout: %is peer=%d\n", now - node.nLastRecv, node.GetId()); LogPrint(BCLog::NET, "socket receive timeout: %is peer=%d\n", count_seconds(now - last_recv), node.GetId());
return true; return true;
} }
@ -4266,7 +4255,11 @@ void CConnman::UnregisterEvents(CNode *pnode)
} }
#endif #endif
} }
void CaptureMessage(const CAddress& addr, const std::string& msg_type, const Span<const unsigned char>& data, bool is_incoming)
void CaptureMessageToFile(const CAddress& addr,
const std::string& msg_type,
Span<const unsigned char> data,
bool is_incoming)
{ {
// Note: This function captures the message at the time of processing, // Note: This function captures the message at the time of processing,
// not at socket receive/send time. // not at socket receive/send time.
@ -4293,3 +4286,9 @@ void CaptureMessage(const CAddress& addr, const std::string& msg_type, const Spa
ser_writedata32(f, size); ser_writedata32(f, size);
f.write(AsBytes(data)); f.write(AsBytes(data));
} }
std::function<void(const CAddress& addr,
const std::string& msg_type,
Span<const unsigned char> data,
bool is_incoming)>
CaptureMessage = CaptureMessageToFile;

View File

@ -36,6 +36,7 @@
#include <condition_variable> #include <condition_variable>
#include <cstdint> #include <cstdint>
#include <deque> #include <deque>
#include <functional>
#include <map> #include <map>
#include <memory> #include <memory>
#include <thread> #include <thread>
@ -63,7 +64,7 @@ static const bool DEFAULT_WHITELISTRELAY = true;
static const bool DEFAULT_WHITELISTFORCERELAY = false; static const bool DEFAULT_WHITELISTFORCERELAY = false;
/** Time after which to disconnect, after waiting for a ping response (or inactivity). */ /** Time after which to disconnect, after waiting for a ping response (or inactivity). */
static const int TIMEOUT_INTERVAL = 20 * 60; static constexpr std::chrono::minutes TIMEOUT_INTERVAL{20};
/** Time to wait since nTimeConnected before disconnecting a probe node. **/ /** Time to wait since nTimeConnected before disconnecting a probe node. **/
static const int PROBE_WAIT_INTERVAL = 5; static const int PROBE_WAIT_INTERVAL = 5;
/** Minimum time between warnings printed to log. */ /** Minimum time between warnings printed to log. */
@ -274,9 +275,8 @@ class CNodeStats
public: public:
NodeId nodeid; NodeId nodeid;
ServiceFlags nServices; ServiceFlags nServices;
bool fRelayTxes; std::chrono::seconds m_last_send;
int64_t nLastSend; std::chrono::seconds m_last_recv;
int64_t nLastRecv;
int64_t nLastTXTime; int64_t nLastTXTime;
int64_t nLastBlockTime; int64_t nLastBlockTime;
int64_t nTimeConnected; int64_t nTimeConnected;
@ -459,8 +459,8 @@ public:
uint64_t nRecvBytes GUARDED_BY(cs_vRecv){0}; uint64_t nRecvBytes GUARDED_BY(cs_vRecv){0};
std::atomic<int64_t> nLastSend{0}; std::atomic<std::chrono::seconds> m_last_send{0s};
std::atomic<int64_t> nLastRecv{0}; std::atomic<std::chrono::seconds> m_last_recv{0s};
//! Unix epoch time at peer connection, in seconds. //! Unix epoch time at peer connection, in seconds.
const int64_t nTimeConnected; const int64_t nTimeConnected;
std::atomic<int64_t> nTimeOffset{0}; std::atomic<int64_t> nTimeOffset{0};
@ -580,34 +580,16 @@ public:
assert(false); assert(false);
} }
struct TxRelay { public:
mutable RecursiveMutex cs_filter; /** Whether we should relay transactions to this peer (their version
// We use fRelayTxes for two purposes - * message did not include fRelay=false and this is not a block-relay-only
// a) it allows us to not relay tx invs before receiving the peer's version message * connection). This only changes from false to true. It will never change
// b) the peer may tell us in its version message that we should not relay tx invs * back to false. Used only in inbound eviction logic. */
// unless it loads a bloom filter. std::atomic_bool m_relays_txs{false};
bool fRelayTxes GUARDED_BY(cs_filter){false};
std::unique_ptr<CBloomFilter> pfilter PT_GUARDED_BY(cs_filter) GUARDED_BY(cs_filter){nullptr};
mutable RecursiveMutex cs_tx_inventory; /** Whether this peer has loaded a bloom filter. Used only in inbound
// inventory based relay * eviction logic. */
CRollingBloomFilter filterInventoryKnown GUARDED_BY(cs_tx_inventory){50000, 0.000001}; std::atomic_bool m_bloom_filter_loaded{false};
// Set of transaction ids we still have to announce.
// They are sorted by the mempool before relay, so the order is not important.
std::set<uint256> setInventoryTxToSend GUARDED_BY(cs_tx_inventory);
// List of non-tx/non-block inventory items
std::vector<CInv> vInventoryOtherToSend GUARDED_BY(cs_tx_inventory);
// Used for BIP35 mempool sending, also protected by cs_tx_inventory
bool fSendMempool GUARDED_BY(cs_tx_inventory){false};
// Last time a "MEMPOOL" request was serviced.
std::atomic<std::chrono::seconds> m_last_mempool_req{0s};
std::chrono::microseconds nNextInvSend{0};
};
// in bitcoin: m_tx_relay == nullptr if we're not relaying transactions with this peer
// in dash: m_tx_relay should never be nullptr, we don't relay transactions if
// `IsBlockOnlyConn() == true` is instead
std::unique_ptr<TxRelay> m_tx_relay{std::make_unique<TxRelay>()};
/** UNIX epoch time of the last block received from this peer that we had /** UNIX epoch time of the last block received from this peer that we had
* not yet seen (e.g. not already received from another peer), that passed * not yet seen (e.g. not already received from another peer), that passed
@ -694,33 +676,6 @@ public:
nRefCount--; nRefCount--;
} }
void AddKnownInventory(const uint256& hash)
{
LOCK(m_tx_relay->cs_tx_inventory);
m_tx_relay->filterInventoryKnown.insert(hash);
}
void PushInventory(const CInv& inv)
{
ASSERT_IF_DEBUG(inv.type != MSG_BLOCK);
if (inv.type == MSG_BLOCK) {
LogPrintf("%s -- WARNING: using PushInventory for BLOCK inv, peer=%d\n", __func__, id);
return;
}
LOCK(m_tx_relay->cs_tx_inventory);
if (m_tx_relay->filterInventoryKnown.contains(inv.hash)) {
LogPrint(BCLog::NET, "%s -- skipping known inv: %s peer=%d\n", __func__, inv.ToString(), id);
return;
}
LogPrint(BCLog::NET, "%s -- adding new inv: %s peer=%d\n", __func__, inv.ToString(), id);
if (inv.type == MSG_TX || inv.type == MSG_DSTX) {
m_tx_relay->setInventoryTxToSend.insert(inv.hash);
return;
}
m_tx_relay->vInventoryOtherToSend.push_back(inv);
}
void CloseSocketDisconnect(CConnman* connman); void CloseSocketDisconnect(CConnman* connman);
void copyStats(CNodeStats &stats, const std::vector<bool> &m_asmap); void copyStats(CNodeStats &stats, const std::vector<bool> &m_asmap);
@ -926,7 +881,7 @@ public:
m_msgproc = connOptions.m_msgproc; m_msgproc = connOptions.m_msgproc;
nSendBufferMaxSize = connOptions.nSendBufferMaxSize; nSendBufferMaxSize = connOptions.nSendBufferMaxSize;
nReceiveFloodSize = connOptions.nReceiveFloodSize; nReceiveFloodSize = connOptions.nReceiveFloodSize;
m_peer_connect_timeout = connOptions.m_peer_connect_timeout; m_peer_connect_timeout = std::chrono::seconds{connOptions.m_peer_connect_timeout};
{ {
LOCK(cs_totalBytesSent); LOCK(cs_totalBytesSent);
nMaxOutboundLimit = connOptions.nMaxOutboundLimit; nMaxOutboundLimit = connOptions.nMaxOutboundLimit;
@ -1235,7 +1190,7 @@ public:
void SetAsmap(std::vector<bool> asmap) { addrman.m_asmap = std::move(asmap); } void SetAsmap(std::vector<bool> asmap) { addrman.m_asmap = std::move(asmap); }
/** Return true if we should disconnect the peer for failing an inactivity check. */ /** Return true if we should disconnect the peer for failing an inactivity check. */
bool ShouldRunInactivityChecks(const CNode& node, int64_t secs_now) const; bool ShouldRunInactivityChecks(const CNode& node, std::chrono::seconds now) const;
private: private:
struct ListenSocket { struct ListenSocket {
@ -1351,7 +1306,7 @@ private:
uint64_t nMaxOutboundLimit GUARDED_BY(cs_totalBytesSent); uint64_t nMaxOutboundLimit GUARDED_BY(cs_totalBytesSent);
// P2P timeout in seconds // P2P timeout in seconds
int64_t m_peer_connect_timeout; std::chrono::seconds m_peer_connect_timeout;
// Whitelisted ranges. Any node connecting from these is automatically // Whitelisted ranges. Any node connecting from these is automatically
// whitelisted (as well as those connecting to whitelisted binds). // whitelisted (as well as those connecting to whitelisted binds).
@ -1528,7 +1483,17 @@ private:
std::chrono::microseconds PoissonNextSend(std::chrono::microseconds now, std::chrono::seconds average_interval); std::chrono::microseconds PoissonNextSend(std::chrono::microseconds now, std::chrono::seconds average_interval);
/** Dump binary message to file, with timestamp */ /** Dump binary message to file, with timestamp */
void CaptureMessage(const CAddress& addr, const std::string& msg_type, const Span<const unsigned char>& data, bool is_incoming); void CaptureMessageToFile(const CAddress& addr,
const std::string& msg_type,
Span<const unsigned char> data,
bool is_incoming);
/** Defaults to `CaptureMessageToFile()`, but can be overridden by unit tests. */
extern std::function<void(const CAddress& addr,
const std::string& msg_type,
Span<const unsigned char> data,
bool is_incoming)>
CaptureMessage;
struct NodeEvictionCandidate struct NodeEvictionCandidate
{ {
@ -1538,7 +1503,7 @@ struct NodeEvictionCandidate
int64_t nLastBlockTime; int64_t nLastBlockTime;
int64_t nLastTXTime; int64_t nLastTXTime;
bool fRelevantServices; bool fRelevantServices;
bool fRelayTxes; bool m_relay_txs;
bool fBloomFilter; bool fBloomFilter;
uint64_t nKeyedNetGroup; uint64_t nKeyedNetGroup;
bool prefer_evict; bool prefer_evict;

View File

@ -262,6 +262,35 @@ struct Peer {
/** Whether a ping has been requested by the user */ /** Whether a ping has been requested by the user */
std::atomic<bool> m_ping_queued{false}; std::atomic<bool> m_ping_queued{false};
struct TxRelay {
mutable RecursiveMutex m_bloom_filter_mutex;
// We use m_relay_txs for two purposes -
// a) it allows us to not relay tx invs before receiving the peer's version message
// b) the peer may tell us in its version message that we should not relay tx invs
// unless it loads a bloom filter.
bool m_relay_txs GUARDED_BY(m_bloom_filter_mutex){false};
std::unique_ptr<CBloomFilter> m_bloom_filter PT_GUARDED_BY(m_bloom_filter_mutex) GUARDED_BY(m_bloom_filter_mutex){nullptr};
mutable RecursiveMutex m_tx_inventory_mutex;
// inventory based relay
CRollingBloomFilter m_tx_inventory_known_filter GUARDED_BY(m_tx_inventory_mutex){50000, 0.000001};
// Set of transaction ids we still have to announce.
// They are sorted by the mempool before relay, so the order is not important.
std::set<uint256> m_tx_inventory_to_send GUARDED_BY(m_tx_inventory_mutex);
// List of non-tx/non-block inventory items
std::vector<CInv> vInventoryOtherToSend GUARDED_BY(m_tx_inventory_mutex);
// Used for BIP35 mempool sending, also protected by m_tx_inventory_mutex
bool m_send_mempool GUARDED_BY(m_tx_inventory_mutex){false};
// Last time a "MEMPOOL" request was serviced.
std::atomic<std::chrono::seconds> m_last_mempool_req{0s};
std::chrono::microseconds m_next_inv_send_time{0};
};
// in bitcoin: m_tx_relay == nullptr if we're not relaying transactions with this peer
// in dash: m_tx_relay should never be nullptr, we don't relay transactions if
// `IsBlockOnlyConn() == true` is instead
std::unique_ptr<TxRelay> m_tx_relay{std::make_unique<TxRelay>()};
/** A vector of addresses to send to the peer, limited to MAX_ADDR_TO_SEND. */ /** A vector of addresses to send to the peer, limited to MAX_ADDR_TO_SEND. */
std::vector<CAddress> m_addrs_to_send; std::vector<CAddress> m_addrs_to_send;
/** Probabilistic filter to track recent addr messages relayed with this /** Probabilistic filter to track recent addr messages relayed with this
@ -289,6 +318,8 @@ struct Peer {
* This field must correlate with whether m_addr_known has been * This field must correlate with whether m_addr_known has been
* initialized.*/ * initialized.*/
std::atomic_bool m_addr_relay_enabled{false}; std::atomic_bool m_addr_relay_enabled{false};
/** Whether a Peer can only be relayed blocks */
const bool m_block_relay_only{false};
/** Whether a getaddr request to this peer is outstanding. */ /** Whether a getaddr request to this peer is outstanding. */
bool m_getaddr_sent{false}; bool m_getaddr_sent{false};
/** Guards address sending timers. */ /** Guards address sending timers. */
@ -320,8 +351,10 @@ struct Peer {
/** Work queue of items requested by this peer **/ /** Work queue of items requested by this peer **/
std::deque<CInv> m_getdata_requests GUARDED_BY(m_getdata_requests_mutex); std::deque<CInv> m_getdata_requests GUARDED_BY(m_getdata_requests_mutex);
explicit Peer(NodeId id) explicit Peer(NodeId id, bool block_relay_only)
: m_id(id) : m_id(id)
, m_tx_relay(std::make_unique<TxRelay>())
, m_block_relay_only{block_relay_only}
{} {}
}; };
@ -358,6 +391,7 @@ public:
bool GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) const override; bool GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) const override;
bool IgnoresIncomingTxs() override { return m_ignore_incoming_txs; } bool IgnoresIncomingTxs() override { return m_ignore_incoming_txs; }
void SendPings() override; void SendPings() override;
void PushInventory(NodeId nodeid, const CInv& inv) override;
void RelayInv(CInv &inv, const int minProtoVersion) override; void RelayInv(CInv &inv, const int minProtoVersion) override;
void RelayInvFiltered(CInv &inv, const CTransaction &relatedTx, const int minProtoVersion) override; void RelayInvFiltered(CInv &inv, const CTransaction &relatedTx, const int minProtoVersion) override;
void RelayInvFiltered(CInv &inv, const uint256 &relatedTxHash, const int minProtoVersion) override; void RelayInvFiltered(CInv &inv, const uint256 &relatedTxHash, const int minProtoVersion) override;
@ -367,7 +401,7 @@ public:
void ProcessMessage(CNode& pfrom, const std::string& msg_type, CDataStream& vRecv, void ProcessMessage(CNode& pfrom, const std::string& msg_type, CDataStream& vRecv,
const std::chrono::microseconds time_received, const std::atomic<bool>& interruptMsgProc) override; const std::chrono::microseconds time_received, const std::atomic<bool>& interruptMsgProc) override;
bool IsBanned(NodeId pnode) override EXCLUSIVE_LOCKS_REQUIRED(cs_main); bool IsBanned(NodeId pnode) override EXCLUSIVE_LOCKS_REQUIRED(cs_main);
bool CanRelayAddrs(NodeId pnode) const override; bool IsInvInFilter(NodeId nodeid, const uint256& hash) const override;
private: private:
/** Helper to process result of external handlers of message */ /** Helper to process result of external handlers of message */
@ -430,7 +464,7 @@ private:
void SendBlockTransactions(CNode& pfrom, const CBlock& block, const BlockTransactionsRequest& req); void SendBlockTransactions(CNode& pfrom, const CBlock& block, const BlockTransactionsRequest& req);
/** Send a version message to a peer */ /** Send a version message to a peer */
void PushNodeVersion(CNode& pnode, int64_t nTime); void PushNodeVersion(CNode& pnode, const Peer& peer);
/** Send a ping message every PING_INTERVAL or if requested via RPC. May /** Send a ping message every PING_INTERVAL or if requested via RPC. May
* mark the peer to be disconnected if a ping has timed out. * mark the peer to be disconnected if a ping has timed out.
@ -902,6 +936,39 @@ static void PushAddress(Peer& peer, const CAddress& addr, FastRandomContext& ins
} }
} }
static void AddKnownInv(Peer& peer, const uint256& hash)
{
// Dash always initializes m_tx_relay
assert(peer.m_tx_relay != nullptr);
LOCK(peer.m_tx_relay->m_tx_inventory_mutex);
peer.m_tx_relay->m_tx_inventory_known_filter.insert(hash);
}
static void PushInv(Peer& peer, const CInv& inv)
{
// Dash always initializes m_tx_relay
assert(peer.m_tx_relay != nullptr);
ASSERT_IF_DEBUG(inv.type != MSG_BLOCK);
if (inv.type == MSG_BLOCK) {
LogPrintf("%s -- WARNING: using PushInv for BLOCK inv, peer=%d\n", __func__, peer.m_id);
return;
}
LOCK(peer.m_tx_relay->m_tx_inventory_mutex);
if (peer.m_tx_relay->m_tx_inventory_known_filter.contains(inv.hash)) {
LogPrint(BCLog::NET, "%s -- skipping known inv: %s peer=%d\n", __func__, inv.ToString(), peer.m_id);
return;
}
LogPrint(BCLog::NET, "%s -- adding new inv: %s peer=%d\n", __func__, inv.ToString(), peer.m_id);
if (inv.type == MSG_TX || inv.type == MSG_DSTX) {
peer.m_tx_relay->m_tx_inventory_to_send.insert(inv.hash);
return;
}
peer.m_tx_relay->vInventoryOtherToSend.push_back(inv);
}
static void UpdatePreferredDownload(const CNode& node, CNodeState* state) EXCLUSIVE_LOCKS_REQUIRED(cs_main) static void UpdatePreferredDownload(const CNode& node, CNodeState* state) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{ {
nPreferredDownload -= state->fPreferredDownload; nPreferredDownload -= state->fPreferredDownload;
@ -1148,7 +1215,7 @@ void PeerManagerImpl::FindNextBlocksToDownload(NodeId nodeid, unsigned int count
} }
} // namespace } // namespace
void PeerManagerImpl::PushNodeVersion(CNode& pnode, int64_t nTime) void PeerManagerImpl::PushNodeVersion(CNode& pnode, const Peer& peer)
{ {
const auto& params = Params(); const auto& params = Params();
@ -1156,6 +1223,7 @@ void PeerManagerImpl::PushNodeVersion(CNode& pnode, int64_t nTime)
// services we were offering when the CNode object was created for this // services we were offering when the CNode object was created for this
// peer. // peer.
ServiceFlags nLocalNodeServices = pnode.GetLocalServices(); ServiceFlags nLocalNodeServices = pnode.GetLocalServices();
const int64_t nTime{count_seconds(GetTime<std::chrono::seconds>())};
uint64_t nonce = pnode.GetLocalNonce(); uint64_t nonce = pnode.GetLocalNonce();
const int nNodeStartingHeight{m_best_height}; const int nNodeStartingHeight{m_best_height};
NodeId nodeid = pnode.GetId(); NodeId nodeid = pnode.GetId();
@ -1342,13 +1410,13 @@ void PeerManagerImpl::InitializeNode(CNode *pnode) {
LOCK(cs_main); LOCK(cs_main);
mapNodeState.emplace_hint(mapNodeState.end(), std::piecewise_construct, std::forward_as_tuple(nodeid), std::forward_as_tuple(pnode->IsInboundConn())); mapNodeState.emplace_hint(mapNodeState.end(), std::piecewise_construct, std::forward_as_tuple(nodeid), std::forward_as_tuple(pnode->IsInboundConn()));
} }
PeerRef peer = std::make_shared<Peer>(nodeid, /* block_relay_only = */ pnode->IsBlockOnlyConn());
{ {
PeerRef peer = std::make_shared<Peer>(nodeid);
LOCK(m_peer_mutex); LOCK(m_peer_mutex);
m_peer_map.emplace_hint(m_peer_map.end(), nodeid, std::move(peer)); m_peer_map.emplace_hint(m_peer_map.end(), nodeid, peer);
} }
if (!pnode->IsInboundConn()) { if (!pnode->IsInboundConn()) {
PushNodeVersion(*pnode, GetTime()); PushNodeVersion(*pnode, *peer);
} }
} }
@ -1473,6 +1541,12 @@ bool PeerManagerImpl::GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) c
ping_wait = GetTime<std::chrono::microseconds>() - peer->m_ping_start.load(); ping_wait = GetTime<std::chrono::microseconds>() - peer->m_ping_start.load();
} }
if (!peer->m_block_relay_only) {
stats.m_relay_txs = WITH_LOCK(peer->m_tx_relay->m_bloom_filter_mutex, return peer->m_tx_relay->m_relay_txs);
} else {
stats.m_relay_txs = false;
}
stats.m_ping_wait = ping_wait; stats.m_ping_wait = ping_wait;
stats.m_addr_processed = peer->m_addr_processed.load(); stats.m_addr_processed = peer->m_addr_processed.load();
stats.m_addr_rate_limited = peer->m_addr_rate_limited.load(); stats.m_addr_rate_limited = peer->m_addr_rate_limited.load();
@ -1661,14 +1735,6 @@ bool PeerManagerImpl::IsBanned(NodeId pnode)
return false; return false;
} }
bool PeerManagerImpl::CanRelayAddrs(NodeId pnode) const
{
PeerRef peer = GetPeerRef(pnode);
if (peer == nullptr)
return false;
return peer->m_addr_relay_enabled;
}
bool PeerManagerImpl::MaybePunishNodeForBlock(NodeId nodeid, const BlockValidationState& state, bool PeerManagerImpl::MaybePunishNodeForBlock(NodeId nodeid, const BlockValidationState& state,
bool via_compact_block, const std::string& message) bool via_compact_block, const std::string& message)
{ {
@ -2112,59 +2178,95 @@ void PeerManagerImpl::SendPings()
for(auto& it : m_peer_map) it.second->m_ping_queued = true; for(auto& it : m_peer_map) it.second->m_ping_queued = true;
} }
void PeerManagerImpl::RelayInv(CInv &inv, const int minProtoVersion) { bool PeerManagerImpl::IsInvInFilter(NodeId nodeid, const uint256& hash) const
{
PeerRef peer = GetPeerRef(nodeid);
if (peer == nullptr)
return false;
if (peer->m_block_relay_only)
return false;
LOCK(peer->m_tx_relay->m_tx_inventory_mutex);
return peer->m_tx_relay->m_tx_inventory_known_filter.contains(hash);
}
void PeerManagerImpl::PushInventory(NodeId nodeid, const CInv& inv)
{
// TODO: Get rid of this function at some point
PeerRef peer = GetPeerRef(nodeid);
if (peer == nullptr)
return;
PushInv(*peer, inv);
}
void PeerManagerImpl::RelayInv(CInv &inv, const int minProtoVersion)
{
// TODO: Migrate to iteration through m_peer_map
m_connman.ForEachNode([&](CNode* pnode) { m_connman.ForEachNode([&](CNode* pnode) {
if (pnode->nVersion < minProtoVersion || !pnode->CanRelay()) if (pnode->nVersion < minProtoVersion || !pnode->CanRelay())
return; return;
pnode->PushInventory(inv);
PeerRef peer = GetPeerRef(pnode->GetId());
if (peer == nullptr) return;
PushInv(*peer, inv);
}); });
} }
void PeerManagerImpl::RelayInvFiltered(CInv &inv, const CTransaction& relatedTx, const int minProtoVersion) void PeerManagerImpl::RelayInvFiltered(CInv &inv, const CTransaction& relatedTx, const int minProtoVersion)
{ {
// TODO: Migrate to iteration through m_peer_map
m_connman.ForEachNode([&](CNode* pnode) { m_connman.ForEachNode([&](CNode* pnode) {
if (pnode->nVersion < minProtoVersion || !pnode->CanRelay() || pnode->IsBlockOnlyConn()) { if (pnode->nVersion < minProtoVersion || !pnode->CanRelay() || pnode->IsBlockOnlyConn()) {
return; return;
} }
PeerRef peer = GetPeerRef(pnode->GetId());
if (peer == nullptr) return;
{ {
LOCK(pnode->m_tx_relay->cs_filter); LOCK(peer->m_tx_relay->m_bloom_filter_mutex);
if (!pnode->m_tx_relay->fRelayTxes) { if (!peer->m_tx_relay->m_relay_txs) {
return; return;
} }
if (pnode->m_tx_relay->pfilter && !pnode->m_tx_relay->pfilter->IsRelevantAndUpdate(relatedTx)) { if (peer->m_tx_relay->m_bloom_filter && !peer->m_tx_relay->m_bloom_filter->IsRelevantAndUpdate(relatedTx)) {
return; return;
} }
} }
pnode->PushInventory(inv); PushInv(*peer, inv);
}); });
} }
void PeerManagerImpl::RelayInvFiltered(CInv &inv, const uint256& relatedTxHash, const int minProtoVersion) void PeerManagerImpl::RelayInvFiltered(CInv &inv, const uint256& relatedTxHash, const int minProtoVersion)
{ {
// TODO: Migrate to iteration through m_peer_map
m_connman.ForEachNode([&](CNode* pnode) { m_connman.ForEachNode([&](CNode* pnode) {
if (pnode->nVersion < minProtoVersion || !pnode->CanRelay() || pnode->IsBlockOnlyConn()) { if (pnode->nVersion < minProtoVersion || !pnode->CanRelay() || pnode->IsBlockOnlyConn()) {
return; return;
} }
PeerRef peer = GetPeerRef(pnode->GetId());
if (peer == nullptr) return;
{ {
LOCK(pnode->m_tx_relay->cs_filter); LOCK(peer->m_tx_relay->m_bloom_filter_mutex);
if (!pnode->m_tx_relay->fRelayTxes) { if (!peer->m_tx_relay->m_relay_txs) {
return; return;
} }
if (pnode->m_tx_relay->pfilter && !pnode->m_tx_relay->pfilter->contains(relatedTxHash)) { if (peer->m_tx_relay->m_bloom_filter && !peer->m_tx_relay->m_bloom_filter->contains(relatedTxHash)) {
return; return;
} }
} }
pnode->PushInventory(inv); PushInv(*peer, inv);
}); });
} }
void PeerManagerImpl::RelayTransaction(const uint256& txid) void PeerManagerImpl::RelayTransaction(const uint256& txid)
{ {
CInv inv(m_cj_ctx->dstxman->GetDSTX(txid) ? MSG_DSTX : MSG_TX, txid); LOCK(m_peer_mutex);
m_connman.ForEachNode([&inv](CNode* pnode) for(auto& it : m_peer_map) {
{ Peer& peer = *it.second;
pnode->PushInventory(inv); if (!peer.m_tx_relay) continue;
});
const CInv inv{m_cj_ctx->dstxman->GetDSTX(txid) ? MSG_DSTX : MSG_TX, txid};
PushInv(peer, inv);
};
} }
void PeerManagerImpl::RelayAddress(NodeId originator, void PeerManagerImpl::RelayAddress(NodeId originator,
@ -2298,10 +2400,10 @@ void PeerManagerImpl::ProcessGetBlockData(CNode& pfrom, Peer& peer, const CInv&
bool sendMerkleBlock = false; bool sendMerkleBlock = false;
CMerkleBlock merkleBlock; CMerkleBlock merkleBlock;
if (!pfrom.IsBlockOnlyConn()) { if (!pfrom.IsBlockOnlyConn()) {
LOCK(pfrom.m_tx_relay->cs_filter); LOCK(peer.m_tx_relay->m_bloom_filter_mutex);
if (pfrom.m_tx_relay->pfilter) { if (peer.m_tx_relay->m_bloom_filter) {
sendMerkleBlock = true; sendMerkleBlock = true;
merkleBlock = CMerkleBlock(*pblock, *pfrom.m_tx_relay->pfilter); merkleBlock = CMerkleBlock(*pblock, *peer.m_tx_relay->m_bloom_filter);
} }
} }
if (sendMerkleBlock) { if (sendMerkleBlock) {
@ -2400,7 +2502,7 @@ void PeerManagerImpl::ProcessGetData(CNode& pfrom, Peer& peer, const std::atomic
const std::chrono::seconds now = GetTime<std::chrono::seconds>(); const std::chrono::seconds now = GetTime<std::chrono::seconds>();
// Get last mempool request time // Get last mempool request time
const std::chrono::seconds mempool_req = !pfrom.IsBlockOnlyConn() ? pfrom.m_tx_relay->m_last_mempool_req.load() const std::chrono::seconds mempool_req = !pfrom.IsBlockOnlyConn() ? peer.m_tx_relay->m_last_mempool_req.load()
: std::chrono::seconds::min(); : std::chrono::seconds::min();
// Process as many TX items from the front of the getdata queue as // Process as many TX items from the front of the getdata queue as
@ -2462,7 +2564,7 @@ void PeerManagerImpl::ProcessGetData(CNode& pfrom, Peer& peer, const std::atomic
for (const uint256& parent_txid : parent_ids_to_add) { for (const uint256& parent_txid : parent_ids_to_add) {
// Relaying a transaction with a recent but unconfirmed parent. // Relaying a transaction with a recent but unconfirmed parent.
if (WITH_LOCK(pfrom.m_tx_relay->cs_tx_inventory, return !pfrom.m_tx_relay->filterInventoryKnown.contains(parent_txid))) { if (WITH_LOCK(peer.m_tx_relay->m_tx_inventory_mutex, return !peer.m_tx_relay->m_tx_inventory_known_filter.contains(parent_txid))) {
LOCK(cs_main); LOCK(cs_main);
State(pfrom.GetId())->m_recently_announced_invs.insert(parent_txid); State(pfrom.GetId())->m_recently_announced_invs.insert(parent_txid);
} }
@ -3220,8 +3322,9 @@ void PeerManagerImpl::ProcessMessage(
} }
// Be shy and don't send version until we hear // Be shy and don't send version until we hear
if (pfrom.IsInboundConn()) if (pfrom.IsInboundConn()) {
PushNodeVersion(pfrom, GetAdjustedTime()); PushNodeVersion(pfrom, *peer);
}
if (Params().NetworkIDString() == CBaseChainParams::DEVNET) { if (Params().NetworkIDString() == CBaseChainParams::DEVNET) {
if (cleanSubVer.find(strprintf("devnet.%s", gArgs.GetDevNetName())) == std::string::npos) { if (cleanSubVer.find(strprintf("devnet.%s", gArgs.GetDevNetName())) == std::string::npos) {
@ -3266,8 +3369,11 @@ void PeerManagerImpl::ProcessMessage(
pfrom.m_limited_node = (!(nServices & NODE_NETWORK) && (nServices & NODE_NETWORK_LIMITED)); pfrom.m_limited_node = (!(nServices & NODE_NETWORK) && (nServices & NODE_NETWORK_LIMITED));
if (!pfrom.IsBlockOnlyConn()) { if (!pfrom.IsBlockOnlyConn()) {
LOCK(pfrom.m_tx_relay->cs_filter); {
pfrom.m_tx_relay->fRelayTxes = fRelay; // set to true after we get the first filter* message LOCK(peer->m_tx_relay->m_bloom_filter_mutex);
peer->m_tx_relay->m_relay_txs = fRelay; // set to true after we get the first filter* message
}
if (fRelay) pfrom.m_relays_txs = true;
} }
// Potentially mark this peer as a preferred download peer. // Potentially mark this peer as a preferred download peer.
@ -3656,7 +3762,7 @@ void PeerManagerImpl::ProcessMessage(
MSG_SPORK MSG_SPORK
}; };
pfrom.AddKnownInventory(inv.hash); AddKnownInv(*peer, inv.hash);
if (fBlocksOnly && NetMessageViolatesBlocksOnly(inv.GetCommand())) { if (fBlocksOnly && NetMessageViolatesBlocksOnly(inv.GetCommand())) {
LogPrint(BCLog::NET, "%s (%s) inv sent in violation of protocol, disconnecting peer=%d\n", inv.GetCommand(), inv.hash.ToString(), pfrom.GetId()); LogPrint(BCLog::NET, "%s (%s) inv sent in violation of protocol, disconnecting peer=%d\n", inv.GetCommand(), inv.hash.ToString(), pfrom.GetId());
pfrom.fDisconnect = true; pfrom.fDisconnect = true;
@ -3932,7 +4038,7 @@ void PeerManagerImpl::ProcessMessage(
const CTransaction& tx = *ptx; const CTransaction& tx = *ptx;
const uint256& txid = ptx->GetHash(); const uint256& txid = ptx->GetHash();
pfrom.AddKnownInventory(txid); AddKnownInv(*peer, txid);
CInv inv(nInvType, tx.GetHash()); CInv inv(nInvType, tx.GetHash());
{ {
@ -4026,11 +4132,11 @@ void PeerManagerImpl::ProcessMessage(
for (const uint256& parent_txid : unique_parents) { for (const uint256& parent_txid : unique_parents) {
CInv _inv(MSG_TX, parent_txid); CInv _inv(MSG_TX, parent_txid);
pfrom.AddKnownInventory(_inv.hash); AddKnownInv(*peer, _inv.hash);
if (!AlreadyHave(_inv)) RequestObject(State(pfrom.GetId()), _inv, current_time, is_masternode); if (!AlreadyHave(_inv)) RequestObject(State(pfrom.GetId()), _inv, current_time, is_masternode);
// We don't know if the previous tx was a regular or a mixing one, try both // We don't know if the previous tx was a regular or a mixing one, try both
CInv _inv2(MSG_DSTX, parent_txid); CInv _inv2(MSG_DSTX, parent_txid);
pfrom.AddKnownInventory(_inv2.hash); AddKnownInv(*peer, _inv2.hash);
if (!AlreadyHave(_inv2)) RequestObject(State(pfrom.GetId()), _inv2, current_time, is_masternode); if (!AlreadyHave(_inv2)) RequestObject(State(pfrom.GetId()), _inv2, current_time, is_masternode);
} }
AddOrphanTx(ptx, pfrom.GetId()); AddOrphanTx(ptx, pfrom.GetId());
@ -4488,8 +4594,8 @@ void PeerManagerImpl::ProcessMessage(
} }
if (!pfrom.IsBlockOnlyConn()) { if (!pfrom.IsBlockOnlyConn()) {
LOCK(pfrom.m_tx_relay->cs_tx_inventory); LOCK(peer->m_tx_relay->m_tx_inventory_mutex);
pfrom.m_tx_relay->fSendMempool = true; peer->m_tx_relay->m_send_mempool = true;
} }
return; return;
} }
@ -4583,9 +4689,13 @@ void PeerManagerImpl::ProcessMessage(
} }
else if (!pfrom.IsBlockOnlyConn()) else if (!pfrom.IsBlockOnlyConn())
{ {
LOCK(pfrom.m_tx_relay->cs_filter); {
pfrom.m_tx_relay->pfilter.reset(new CBloomFilter(filter)); LOCK(peer->m_tx_relay->m_bloom_filter_mutex);
pfrom.m_tx_relay->fRelayTxes = true; peer->m_tx_relay->m_bloom_filter.reset(new CBloomFilter(filter));
peer->m_tx_relay->m_relay_txs = true;
}
pfrom.m_bloom_filter_loaded = true;
pfrom.m_relays_txs = true;
} }
return; return;
} }
@ -4605,9 +4715,9 @@ void PeerManagerImpl::ProcessMessage(
if (vData.size() > MAX_SCRIPT_ELEMENT_SIZE) { if (vData.size() > MAX_SCRIPT_ELEMENT_SIZE) {
bad = true; bad = true;
} else if (!pfrom.IsBlockOnlyConn()) { } else if (!pfrom.IsBlockOnlyConn()) {
LOCK(pfrom.m_tx_relay->cs_filter); LOCK(peer->m_tx_relay->m_bloom_filter_mutex);
if (pfrom.m_tx_relay->pfilter) { if (peer->m_tx_relay->m_bloom_filter) {
pfrom.m_tx_relay->pfilter->insert(vData); peer->m_tx_relay->m_bloom_filter->insert(vData);
} else { } else {
bad = true; bad = true;
} }
@ -4627,9 +4737,14 @@ void PeerManagerImpl::ProcessMessage(
if (pfrom.IsBlockOnlyConn()) { if (pfrom.IsBlockOnlyConn()) {
return; return;
} }
LOCK(pfrom.m_tx_relay->cs_filter);
pfrom.m_tx_relay->pfilter = nullptr; {
pfrom.m_tx_relay->fRelayTxes = true; LOCK(peer->m_tx_relay->m_bloom_filter_mutex);
peer->m_tx_relay->m_bloom_filter = nullptr;
peer->m_tx_relay->m_relay_txs = true;
}
pfrom.m_bloom_filter_loaded = false;
pfrom.m_relays_txs = true;
return; return;
} }
@ -4747,7 +4862,7 @@ void PeerManagerImpl::ProcessMessage(
ProcessPeerMsgRet(m_llmq_ctx->qdkgsman->ProcessMessage(pfrom, this, is_masternode, msg_type, vRecv), pfrom); ProcessPeerMsgRet(m_llmq_ctx->qdkgsman->ProcessMessage(pfrom, this, is_masternode, msg_type, vRecv), pfrom);
ProcessPeerMsgRet(m_llmq_ctx->qman->ProcessMessage(pfrom, msg_type, vRecv), pfrom); ProcessPeerMsgRet(m_llmq_ctx->qman->ProcessMessage(pfrom, msg_type, vRecv), pfrom);
m_llmq_ctx->shareman->ProcessMessage(pfrom, m_sporkman, msg_type, vRecv); m_llmq_ctx->shareman->ProcessMessage(pfrom, m_sporkman, msg_type, vRecv);
ProcessPeerMsgRet(m_llmq_ctx->sigman->ProcessMessage(pfrom, this, msg_type, vRecv), pfrom); ProcessPeerMsgRet(m_llmq_ctx->sigman->ProcessMessage(pfrom, msg_type, vRecv), pfrom);
ProcessPeerMsgRet(m_llmq_ctx->clhandler->ProcessMessage(pfrom, msg_type, vRecv), pfrom); ProcessPeerMsgRet(m_llmq_ctx->clhandler->ProcessMessage(pfrom, msg_type, vRecv), pfrom);
ProcessPeerMsgRet(m_llmq_ctx->isman->ProcessMessage(pfrom, msg_type, vRecv), pfrom); ProcessPeerMsgRet(m_llmq_ctx->isman->ProcessMessage(pfrom, msg_type, vRecv), pfrom);
return; return;
@ -5063,9 +5178,10 @@ void PeerManagerImpl::CheckForStaleTipAndEvictPeers()
void PeerManagerImpl::MaybeSendPing(CNode& node_to, Peer& peer, std::chrono::microseconds now) void PeerManagerImpl::MaybeSendPing(CNode& node_to, Peer& peer, std::chrono::microseconds now)
{ {
if (m_connman.ShouldRunInactivityChecks(node_to, std::chrono::duration_cast<std::chrono::seconds>(now).count()) && if (m_connman.ShouldRunInactivityChecks(node_to, std::chrono::duration_cast<std::chrono::seconds>(now)) &&
peer.m_ping_nonce_sent && peer.m_ping_nonce_sent &&
now > peer.m_ping_start.load() + std::chrono::seconds{TIMEOUT_INTERVAL}) { now > peer.m_ping_start.load() + TIMEOUT_INTERVAL)
{
// The ping timeout is using mocktime. To disable the check during // The ping timeout is using mocktime. To disable the check during
// testing, increase -peertimeout. // testing, increase -peertimeout.
LogPrint(BCLog::NET, "ping timeout: %fs peer=%d\n", 0.000001 * count_microseconds(now - peer.m_ping_start.load()), peer.m_id); LogPrint(BCLog::NET, "ping timeout: %fs peer=%d\n", 0.000001 * count_microseconds(now - peer.m_ping_start.load()), peer.m_id);
@ -5431,8 +5547,8 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
size_t reserve = INVENTORY_BROADCAST_MAX_PER_1MB_BLOCK * MaxBlockSize() / 1000000; size_t reserve = INVENTORY_BROADCAST_MAX_PER_1MB_BLOCK * MaxBlockSize() / 1000000;
if (!pto->IsBlockOnlyConn()) { if (!pto->IsBlockOnlyConn()) {
LOCK(pto->m_tx_relay->cs_tx_inventory); LOCK(peer->m_tx_relay->m_tx_inventory_mutex);
reserve = std::min<size_t>(pto->m_tx_relay->setInventoryTxToSend.size(), reserve); reserve = std::min<size_t>(peer->m_tx_relay->m_tx_inventory_to_send.size(), reserve);
} }
reserve = std::max<size_t>(reserve, peer->m_blocks_for_inv_relay.size()); reserve = std::max<size_t>(reserve, peer->m_blocks_for_inv_relay.size());
reserve = std::min<size_t>(reserve, MAX_INV_SZ); reserve = std::min<size_t>(reserve, MAX_INV_SZ);
@ -5448,9 +5564,9 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
} }
peer->m_blocks_for_inv_relay.clear(); peer->m_blocks_for_inv_relay.clear();
auto queueAndMaybePushInv = [this, pto, &vInv, &msgMaker](const CInv& invIn) { auto queueAndMaybePushInv = [this, pto, peer, &vInv, &msgMaker](const CInv& invIn) {
AssertLockHeld(pto->m_tx_relay->cs_tx_inventory); AssertLockHeld(peer->m_tx_relay->m_tx_inventory_mutex);
pto->m_tx_relay->filterInventoryKnown.insert(invIn.hash); peer->m_tx_relay->m_tx_inventory_known_filter.insert(invIn.hash);
LogPrint(BCLog::NET, "SendMessages -- queued inv: %s index=%d peer=%d\n", invIn.ToString(), vInv.size(), pto->GetId()); LogPrint(BCLog::NET, "SendMessages -- queued inv: %s index=%d peer=%d\n", invIn.ToString(), vInv.size(), pto->GetId());
// Responses to MEMPOOL requests bypass the m_recently_announced_invs filter. // Responses to MEMPOOL requests bypass the m_recently_announced_invs filter.
vInv.push_back(invIn); vInv.push_back(invIn);
@ -5462,41 +5578,41 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
}; };
if (!pto->IsBlockOnlyConn()) { if (!pto->IsBlockOnlyConn()) {
LOCK(pto->m_tx_relay->cs_tx_inventory); LOCK(peer->m_tx_relay->m_tx_inventory_mutex);
// Check whether periodic sends should happen // Check whether periodic sends should happen
// Note: If this node is running in a Masternode mode, it makes no sense to delay outgoing txes // Note: If this node is running in a Masternode mode, it makes no sense to delay outgoing txes
// because we never produce any txes ourselves i.e. no privacy is lost in this case. // because we never produce any txes ourselves i.e. no privacy is lost in this case.
bool fSendTrickle = pto->HasPermission(NetPermissionFlags::NoBan) || is_masternode; bool fSendTrickle = pto->HasPermission(NetPermissionFlags::NoBan) || is_masternode;
if (pto->m_tx_relay->nNextInvSend < current_time) { if (peer->m_tx_relay->m_next_inv_send_time < current_time) {
fSendTrickle = true; fSendTrickle = true;
if (pto->IsInboundConn()) { if (pto->IsInboundConn()) {
pto->m_tx_relay->nNextInvSend = m_connman.PoissonNextSendInbound(current_time, INBOUND_INVENTORY_BROADCAST_INTERVAL); peer->m_tx_relay->m_next_inv_send_time = m_connman.PoissonNextSendInbound(current_time, INBOUND_INVENTORY_BROADCAST_INTERVAL);
} else { } else {
// Use half the delay for Masternode outbound peers, as there is less privacy concern for them. // Use half the delay for Masternode outbound peers, as there is less privacy concern for them.
pto->m_tx_relay->nNextInvSend = pto->GetVerifiedProRegTxHash().IsNull() ? peer->m_tx_relay->m_next_inv_send_time = pto->GetVerifiedProRegTxHash().IsNull() ?
PoissonNextSend(current_time, OUTBOUND_INVENTORY_BROADCAST_INTERVAL) : PoissonNextSend(current_time, OUTBOUND_INVENTORY_BROADCAST_INTERVAL) :
PoissonNextSend(current_time, OUTBOUND_INVENTORY_BROADCAST_INTERVAL / 2); PoissonNextSend(current_time, OUTBOUND_INVENTORY_BROADCAST_INTERVAL / 2);
} }
} }
// Time to send but the peer has requested we not relay transactions. // Time to send but the peer has requested we not relay transactions.
if (fSendTrickle) { if (fSendTrickle) {
LOCK(pto->m_tx_relay->cs_filter); LOCK(peer->m_tx_relay->m_bloom_filter_mutex);
if (!pto->m_tx_relay->fRelayTxes) pto->m_tx_relay->setInventoryTxToSend.clear(); if (!peer->m_tx_relay->m_relay_txs) peer->m_tx_relay->m_tx_inventory_to_send.clear();
} }
// Respond to BIP35 mempool requests // Respond to BIP35 mempool requests
if (fSendTrickle && pto->m_tx_relay->fSendMempool) { if (fSendTrickle && peer->m_tx_relay->m_send_mempool) {
auto vtxinfo = m_mempool.infoAll(); auto vtxinfo = m_mempool.infoAll();
pto->m_tx_relay->fSendMempool = false; peer->m_tx_relay->m_send_mempool = false;
LOCK(pto->m_tx_relay->cs_filter); LOCK(peer->m_tx_relay->m_bloom_filter_mutex);
// Send invs for txes and corresponding IS-locks // Send invs for txes and corresponding IS-locks
for (const auto& txinfo : vtxinfo) { for (const auto& txinfo : vtxinfo) {
const uint256& hash = txinfo.tx->GetHash(); const uint256& hash = txinfo.tx->GetHash();
pto->m_tx_relay->setInventoryTxToSend.erase(hash); peer->m_tx_relay->m_tx_inventory_to_send.erase(hash);
if (pto->m_tx_relay->pfilter && !pto->m_tx_relay->pfilter->IsRelevantAndUpdate(*txinfo.tx)) continue; if (peer->m_tx_relay->m_bloom_filter && !peer->m_tx_relay->m_bloom_filter->IsRelevantAndUpdate(*txinfo.tx)) continue;
int nInvType = m_cj_ctx->dstxman->GetDSTX(hash) ? MSG_DSTX : MSG_TX; int nInvType = m_cj_ctx->dstxman->GetDSTX(hash) ? MSG_DSTX : MSG_TX;
queueAndMaybePushInv(CInv(nInvType, hash)); queueAndMaybePushInv(CInv(nInvType, hash));
@ -5513,17 +5629,17 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
uint256 chainlockHash = ::SerializeHash(clsig); uint256 chainlockHash = ::SerializeHash(clsig);
queueAndMaybePushInv(CInv(MSG_CLSIG, chainlockHash)); queueAndMaybePushInv(CInv(MSG_CLSIG, chainlockHash));
} }
pto->m_tx_relay->m_last_mempool_req = std::chrono::duration_cast<std::chrono::seconds>(current_time); peer->m_tx_relay->m_last_mempool_req = std::chrono::duration_cast<std::chrono::seconds>(current_time);
} }
// Determine transactions to relay // Determine transactions to relay
if (fSendTrickle) { if (fSendTrickle) {
LOCK(pto->m_tx_relay->cs_filter); LOCK(peer->m_tx_relay->m_bloom_filter_mutex);
// Produce a vector with all candidates for sending // Produce a vector with all candidates for sending
std::vector<std::set<uint256>::iterator> vInvTx; std::vector<std::set<uint256>::iterator> vInvTx;
vInvTx.reserve(pto->m_tx_relay->setInventoryTxToSend.size()); vInvTx.reserve(peer->m_tx_relay->m_tx_inventory_to_send.size());
for (std::set<uint256>::iterator it = pto->m_tx_relay->setInventoryTxToSend.begin(); it != pto->m_tx_relay->setInventoryTxToSend.end(); it++) { for (std::set<uint256>::iterator it = peer->m_tx_relay->m_tx_inventory_to_send.begin(); it != peer->m_tx_relay->m_tx_inventory_to_send.end(); it++) {
vInvTx.push_back(it); vInvTx.push_back(it);
} }
// Topologically and fee-rate sort the inventory we send for privacy and priority reasons. // Topologically and fee-rate sort the inventory we send for privacy and priority reasons.
@ -5540,9 +5656,9 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
vInvTx.pop_back(); vInvTx.pop_back();
uint256 hash = *it; uint256 hash = *it;
// Remove it from the to-be-sent set // Remove it from the to-be-sent set
pto->m_tx_relay->setInventoryTxToSend.erase(it); peer->m_tx_relay->m_tx_inventory_to_send.erase(it);
// Check if not in the filter already // Check if not in the filter already
if (pto->m_tx_relay->filterInventoryKnown.contains(hash)) { if (peer->m_tx_relay->m_tx_inventory_known_filter.contains(hash)) {
continue; continue;
} }
// Not in the mempool anymore? don't bother sending it. // Not in the mempool anymore? don't bother sending it.
@ -5550,7 +5666,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
if (!txinfo.tx) { if (!txinfo.tx) {
continue; continue;
} }
if (pto->m_tx_relay->pfilter && !pto->m_tx_relay->pfilter->IsRelevantAndUpdate(*txinfo.tx)) continue; if (peer->m_tx_relay->m_bloom_filter && !peer->m_tx_relay->m_bloom_filter->IsRelevantAndUpdate(*txinfo.tx)) continue;
// Send // Send
State(pto->GetId())->m_recently_announced_invs.insert(hash); State(pto->GetId())->m_recently_announced_invs.insert(hash);
nRelayedTransactions++; nRelayedTransactions++;
@ -5575,15 +5691,15 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
{ {
// Send non-tx/non-block inventory items // Send non-tx/non-block inventory items
LOCK2(pto->m_tx_relay->cs_tx_inventory, pto->m_tx_relay->cs_filter); LOCK2(peer->m_tx_relay->m_tx_inventory_mutex, peer->m_tx_relay->m_bloom_filter_mutex);
bool fSendIS = pto->m_tx_relay->fRelayTxes && !pto->IsBlockRelayOnly(); bool fSendIS = peer->m_tx_relay->m_relay_txs && !pto->IsBlockRelayOnly();
for (const auto& inv : pto->m_tx_relay->vInventoryOtherToSend) { for (const auto& inv : peer->m_tx_relay->vInventoryOtherToSend) {
if (!pto->m_tx_relay->fRelayTxes && NetMessageViolatesBlocksOnly(inv.GetCommand())) { if (!peer->m_tx_relay->m_relay_txs && NetMessageViolatesBlocksOnly(inv.GetCommand())) {
continue; continue;
} }
if (pto->m_tx_relay->filterInventoryKnown.contains(inv.hash)) { if (peer->m_tx_relay->m_tx_inventory_known_filter.contains(inv.hash)) {
continue; continue;
} }
if (!fSendIS && inv.type == MSG_ISDLOCK) { if (!fSendIS && inv.type == MSG_ISDLOCK) {
@ -5591,7 +5707,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
} }
queueAndMaybePushInv(inv); queueAndMaybePushInv(inv);
} }
pto->m_tx_relay->vInventoryOtherToSend.clear(); peer->m_tx_relay->vInventoryOtherToSend.clear();
} }
} }
if (!vInv.empty()) if (!vInv.empty())

View File

@ -47,6 +47,7 @@ struct CNodeStateStats {
int m_starting_height = -1; int m_starting_height = -1;
std::chrono::microseconds m_ping_wait; std::chrono::microseconds m_ping_wait;
std::vector<int> vHeightInFlight; std::vector<int> vHeightInFlight;
bool m_relay_txs;
uint64_t m_addr_processed = 0; uint64_t m_addr_processed = 0;
uint64_t m_addr_rate_limited = 0; uint64_t m_addr_rate_limited = 0;
bool m_addr_relay_enabled{false}; bool m_addr_relay_enabled{false};
@ -74,6 +75,12 @@ public:
/** Send ping message to all peers */ /** Send ping message to all peers */
virtual void SendPings() = 0; virtual void SendPings() = 0;
/** Is an inventory in the known inventory filter. Used by InstantSend. */
virtual bool IsInvInFilter(NodeId nodeid, const uint256& hash) const = 0;
/** Broadcast inventory message to a specific peer. */
virtual void PushInventory(NodeId nodeid, const CInv& inv) = 0;
/** Relay inventories to all peers */ /** Relay inventories to all peers */
virtual void RelayInv(CInv &inv, const int minProtoVersion = MIN_PEER_PROTO_VERSION) = 0; virtual void RelayInv(CInv &inv, const int minProtoVersion = MIN_PEER_PROTO_VERSION) = 0;
virtual void RelayInvFiltered(CInv &inv, const CTransaction &relatedTx, virtual void RelayInvFiltered(CInv &inv, const CTransaction &relatedTx,
@ -110,9 +117,6 @@ public:
virtual bool IsBanned(NodeId pnode) = 0; virtual bool IsBanned(NodeId pnode) = 0;
/* Can we send addr messages to a peer. Used by InstantSend. */
virtual bool CanRelayAddrs(NodeId pnode) const = 0;
/** Whether we've completed initial sync yet, for determining when to turn /** Whether we've completed initial sync yet, for determining when to turn
* on extra block-relay-only peers. */ * on extra block-relay-only peers. */
bool m_initial_sync_finished{false}; bool m_initial_sync_finished{false};

View File

@ -1270,10 +1270,10 @@
<item row="14" column="0"> <item row="14" column="0">
<widget class="QLabel" name="peerLastTxLabel"> <widget class="QLabel" name="peerLastTxLabel">
<property name="toolTip"> <property name="toolTip">
<string>Elapsed time since a novel transaction accepted into our mempool was received from this peer.</string> <string extracomment="Tooltip text for the Last Transaction field in the peer details area.">Elapsed time since a novel transaction accepted into our mempool was received from this peer.</string>
</property> </property>
<property name="text"> <property name="text">
<string>Last Tx</string> <string>Last Transaction</string>
</property> </property>
</widget> </widget>
</item> </item>
@ -1507,6 +1507,84 @@
</widget> </widget>
</item> </item>
<item row="24" column="0"> <item row="24" column="0">
<widget class="QLabel" name="peerAddrRelayEnabledLabel">
<property name="toolTip">
<string extracomment="Tooltip text for the Address Relay field in the peer details area.">Whether we relay addresses to this peer.</string>
</property>
<property name="text">
<string>Address Relay</string>
</property>
</widget>
</item>
<item row="24" column="2">
<widget class="QLabel" name="peerAddrRelayEnabled">
<property name="cursor">
<cursorShape>IBeamCursor</cursorShape>
</property>
<property name="text">
<string>N/A</string>
</property>
<property name="textFormat">
<enum>Qt::PlainText</enum>
</property>
<property name="textInteractionFlags">
<set>Qt::LinksAccessibleByMouse|Qt::TextSelectableByKeyboard|Qt::TextSelectableByMouse</set>
</property>
</widget>
</item>
<item row="25" column="0">
<widget class="QLabel" name="peerAddrProcessedLabel">
<property name="toolTip">
<string extracomment="Tooltip text for the Addresses Processed field in the peer details area.">Total number of addresses processed, excluding those dropped due to rate-limiting.</string>
</property>
<property name="text">
<string>Addresses Processed</string>
</property>
</widget>
</item>
<item row="25" column="2">
<widget class="QLabel" name="peerAddrProcessed">
<property name="cursor">
<cursorShape>IBeamCursor</cursorShape>
</property>
<property name="text">
<string>N/A</string>
</property>
<property name="textFormat">
<enum>Qt::PlainText</enum>
</property>
<property name="textInteractionFlags">
<set>Qt::LinksAccessibleByMouse|Qt::TextSelectableByKeyboard|Qt::TextSelectableByMouse</set>
</property>
</widget>
</item>
<item row="26" column="0">
<widget class="QLabel" name="peerAddrRateLimitedLabel">
<property name="toolTip">
<string extracomment="Tooltip text for the Addresses Rate-Limited field in the peer details area.">Total number of addresses dropped due to rate-limiting.</string>
</property>
<property name="text">
<string>Addresses Rate-Limited</string>
</property>
</widget>
</item>
<item row="26" column="2">
<widget class="QLabel" name="peerAddrRateLimited">
<property name="cursor">
<cursorShape>IBeamCursor</cursorShape>
</property>
<property name="text">
<string>N/A</string>
</property>
<property name="textFormat">
<enum>Qt::PlainText</enum>
</property>
<property name="textInteractionFlags">
<set>Qt::LinksAccessibleByMouse|Qt::TextSelectableByKeyboard|Qt::TextSelectableByMouse</set>
</property>
</widget>
</item>
<item row="27" column="0">
<spacer name="verticalSpacer_3"> <spacer name="verticalSpacer_3">
<property name="orientation"> <property name="orientation">
<enum>Qt::Vertical</enum> <enum>Qt::Vertical</enum>

View File

@ -23,6 +23,7 @@
#include <script/script.h> #include <script/script.h>
#include <script/standard.h> #include <script/standard.h>
#include <util/system.h> #include <util/system.h>
#include <util/time.h>
#include <cmath> #include <cmath>
@ -1673,8 +1674,9 @@ QString ConnectionTypeToQString(ConnectionType conn_type)
assert(false); assert(false);
} }
QString formatDurationStr(int secs) QString formatDurationStr(std::chrono::seconds dur)
{ {
const auto secs = count_seconds(dur);
QStringList strList; QStringList strList;
int days = secs / 86400; int days = secs / 86400;
int hours = (secs % 86400) / 3600; int hours = (secs % 86400) / 3600;

View File

@ -401,7 +401,7 @@ namespace GUIUtil
QString ConnectionTypeToQString(ConnectionType conn_type); QString ConnectionTypeToQString(ConnectionType conn_type);
/** Convert seconds into a QString with days, hours, mins, secs */ /** Convert seconds into a QString with days, hours, mins, secs */
QString formatDurationStr(int secs); QString formatDurationStr(std::chrono::seconds dur);
/** Format CNodeStats.nServices bitmask into a user-readable string */ /** Format CNodeStats.nServices bitmask into a user-readable string */
QString formatServicesStr(quint64 mask); QString formatServicesStr(quint64 mask);

View File

@ -1152,7 +1152,7 @@ void RPCConsole::on_sldGraphRange_valueChanged(int value)
void RPCConsole::setTrafficGraphRange(TrafficGraphData::GraphRange range) void RPCConsole::setTrafficGraphRange(TrafficGraphData::GraphRange range)
{ {
ui->trafficGraph->setGraphRangeMins(range); ui->trafficGraph->setGraphRangeMins(range);
ui->lblGraphRange->setText(GUIUtil::formatDurationStr(TrafficGraphData::RangeMinutes[range] * 60)); ui->lblGraphRange->setText(GUIUtil::formatDurationStr(std::chrono::minutes{TrafficGraphData::RangeMinutes[range]}));
} }
void RPCConsole::peerLayoutAboutToChange() void RPCConsole::peerLayoutAboutToChange()
@ -1236,13 +1236,12 @@ void RPCConsole::updateDetailWidget()
peerAddrDetails += "<br />" + tr("via %1").arg(QString::fromStdString(stats->nodeStats.addrLocal)); peerAddrDetails += "<br />" + tr("via %1").arg(QString::fromStdString(stats->nodeStats.addrLocal));
ui->peerHeading->setText(peerAddrDetails); ui->peerHeading->setText(peerAddrDetails);
ui->peerServices->setText(GUIUtil::formatServicesStr(stats->nodeStats.nServices)); ui->peerServices->setText(GUIUtil::formatServicesStr(stats->nodeStats.nServices));
ui->peerRelayTxes->setText(stats->nodeStats.fRelayTxes ? "Yes" : "No"); const auto time_now{GetTime<std::chrono::seconds>()};
const int64_t time_now{GetTimeSeconds()}; ui->peerConnTime->setText(GUIUtil::formatDurationStr(time_now - std::chrono::seconds{stats->nodeStats.nTimeConnected}));
ui->peerConnTime->setText(GUIUtil::formatDurationStr(time_now - stats->nodeStats.nTimeConnected)); ui->peerLastBlock->setText(TimeDurationField(time_now, std::chrono::seconds{stats->nodeStats.nLastBlockTime}));
ui->peerLastBlock->setText(TimeDurationField(time_now, stats->nodeStats.nLastBlockTime)); ui->peerLastTx->setText(TimeDurationField(time_now, std::chrono::seconds{stats->nodeStats.nLastTXTime}));
ui->peerLastTx->setText(TimeDurationField(time_now, stats->nodeStats.nLastTXTime)); ui->peerLastSend->setText(TimeDurationField(time_now, stats->nodeStats.m_last_send));
ui->peerLastSend->setText(TimeDurationField(time_now, stats->nodeStats.nLastSend)); ui->peerLastRecv->setText(TimeDurationField(time_now, stats->nodeStats.m_last_recv));
ui->peerLastRecv->setText(TimeDurationField(time_now, stats->nodeStats.nLastRecv));
ui->peerBytesSent->setText(GUIUtil::formatBytes(stats->nodeStats.nSendBytes)); ui->peerBytesSent->setText(GUIUtil::formatBytes(stats->nodeStats.nSendBytes));
ui->peerBytesRecv->setText(GUIUtil::formatBytes(stats->nodeStats.nRecvBytes)); ui->peerBytesRecv->setText(GUIUtil::formatBytes(stats->nodeStats.nRecvBytes));
ui->peerPingTime->setText(GUIUtil::formatPingTime(stats->nodeStats.m_last_ping_time)); ui->peerPingTime->setText(GUIUtil::formatPingTime(stats->nodeStats.m_last_ping_time));
@ -1292,6 +1291,10 @@ void RPCConsole::updateDetailWidget()
ui->peerHeight->setText(QString::number(stats->nodeStateStats.m_starting_height)); ui->peerHeight->setText(QString::number(stats->nodeStateStats.m_starting_height));
ui->peerPingWait->setText(GUIUtil::formatPingTime(stats->nodeStateStats.m_ping_wait)); ui->peerPingWait->setText(GUIUtil::formatPingTime(stats->nodeStateStats.m_ping_wait));
ui->peerAddrRelayEnabled->setText(stats->nodeStateStats.m_addr_relay_enabled ? "Yes" : "No");
ui->peerAddrProcessed->setText(QString::number(stats->nodeStateStats.m_addr_processed));
ui->peerAddrRateLimited->setText(QString::number(stats->nodeStateStats.m_addr_rate_limited));
ui->peerRelayTxes->setText(stats->nodeStateStats.m_relay_txs ? "Yes" : "No");
} }
ui->detailWidget->show(); ui->detailWidget->show();

View File

@ -193,8 +193,9 @@ private:
void updateNetworkState(); void updateNetworkState();
/** Helper for the output of a time duration field. Inputs are UNIX epoch times. */ /** Helper for the output of a time duration field. Inputs are UNIX epoch times. */
QString TimeDurationField(uint64_t time_now, uint64_t time_at_event) const { QString TimeDurationField(std::chrono::seconds time_now, std::chrono::seconds time_at_event) const
return time_at_event ? GUIUtil::formatDurationStr(time_now - time_at_event) : tr("Never"); {
return time_at_event.count() ? GUIUtil::formatDurationStr(time_now - time_at_event) : tr("Never");
} }
private Q_SLOTS: private Q_SLOTS:

View File

@ -219,9 +219,8 @@ static RPCHelpMan getpeerinfo()
obj.pushKV("verified_pubkey_hash", stats.verifiedPubKeyHash.ToString()); obj.pushKV("verified_pubkey_hash", stats.verifiedPubKeyHash.ToString());
} }
obj.pushKV("servicesnames", GetServicesNames(stats.nServices)); obj.pushKV("servicesnames", GetServicesNames(stats.nServices));
obj.pushKV("relaytxes", stats.fRelayTxes); obj.pushKV("lastsend", count_seconds(stats.m_last_send));
obj.pushKV("lastsend", stats.nLastSend); obj.pushKV("lastrecv", count_seconds(stats.m_last_recv));
obj.pushKV("lastrecv", stats.nLastRecv);
obj.pushKV("last_transaction", stats.nLastTXTime); obj.pushKV("last_transaction", stats.nLastTXTime);
obj.pushKV("last_block", stats.nLastBlockTime); obj.pushKV("last_block", stats.nLastBlockTime);
obj.pushKV("bytessent", stats.nSendBytes); obj.pushKV("bytessent", stats.nSendBytes);
@ -258,6 +257,7 @@ static RPCHelpMan getpeerinfo()
heights.push_back(height); heights.push_back(height);
} }
obj.pushKV("inflight", heights); obj.pushKV("inflight", heights);
obj.pushKV("relaytxes", statestats.m_relay_txs);
obj.pushKV("addr_relay_enabled", statestats.m_addr_relay_enabled); obj.pushKV("addr_relay_enabled", statestats.m_addr_relay_enabled);
obj.pushKV("addr_processed", statestats.m_addr_processed); obj.pushKV("addr_processed", statestats.m_addr_processed);
obj.pushKV("addr_rate_limited", statestats.m_addr_rate_limited); obj.pushKV("addr_rate_limited", statestats.m_addr_rate_limited);

View File

@ -66,7 +66,7 @@ BOOST_AUTO_TEST_CASE(outbound_slow_chain_eviction)
const CChainParams& chainparams = Params(); const CChainParams& chainparams = Params();
auto connman = std::make_unique<CConnman>(0x1337, 0x1337, *m_node.addrman); auto connman = std::make_unique<CConnman>(0x1337, 0x1337, *m_node.addrman);
// Disable inactivity checks for this test to avoid interference // Disable inactivity checks for this test to avoid interference
static_cast<ConnmanTestMsg*>(connman.get())->SetPeerConnectTimeout(99999); static_cast<ConnmanTestMsg*>(connman.get())->SetPeerConnectTimeout(99999s);
auto peerLogic = PeerManager::make(chainparams, *connman, *m_node.addrman, nullptr, *m_node.scheduler, auto peerLogic = PeerManager::make(chainparams, *connman, *m_node.addrman, nullptr, *m_node.scheduler,
*m_node.chainman, *m_node.mempool, *m_node.mn_metaman, *m_node.mn_sync, *m_node.chainman, *m_node.mempool, *m_node.mn_metaman, *m_node.mn_sync,
*m_node.govman, *m_node.sporkman, /* mn_activeman = */ nullptr, m_node.dmnman, *m_node.govman, *m_node.sporkman, /* mn_activeman = */ nullptr, m_node.dmnman,

View File

@ -60,20 +60,6 @@ FUZZ_TARGET_INIT(net, initialize_net)
node.Release(); node.Release();
} }
}, },
[&] {
const std::optional<CInv> inv_opt = ConsumeDeserializable<CInv>(fuzzed_data_provider);
if (!inv_opt) {
return;
}
node.AddKnownInventory(inv_opt->hash);
},
[&] {
const std::optional<CInv> inv_opt = ConsumeDeserializable<CInv>(fuzzed_data_provider);
if (!inv_opt) {
return;
}
node.PushInventory(*inv_opt);
},
[&] { [&] {
const std::optional<CService> service_opt = ConsumeDeserializable<CService>(fuzzed_data_provider); const std::optional<CService> service_opt = ConsumeDeserializable<CService>(fuzzed_data_provider);
if (!service_opt) { if (!service_opt) {

View File

@ -26,7 +26,7 @@ FUZZ_TARGET(node_eviction)
/* nLastBlockTime */ fuzzed_data_provider.ConsumeIntegral<int64_t>(), /* nLastBlockTime */ fuzzed_data_provider.ConsumeIntegral<int64_t>(),
/* nLastTXTime */ fuzzed_data_provider.ConsumeIntegral<int64_t>(), /* nLastTXTime */ fuzzed_data_provider.ConsumeIntegral<int64_t>(),
/* fRelevantServices */ fuzzed_data_provider.ConsumeBool(), /* fRelevantServices */ fuzzed_data_provider.ConsumeBool(),
/* fRelayTxes */ fuzzed_data_provider.ConsumeBool(), /* m_relay_txs */ fuzzed_data_provider.ConsumeBool(),
/* fBloomFilter */ fuzzed_data_provider.ConsumeBool(), /* fBloomFilter */ fuzzed_data_provider.ConsumeBool(),
/* nKeyedNetGroup */ fuzzed_data_provider.ConsumeIntegral<uint64_t>(), /* nKeyedNetGroup */ fuzzed_data_provider.ConsumeIntegral<uint64_t>(),
/* prefer_evict */ fuzzed_data_provider.ConsumeBool(), /* prefer_evict */ fuzzed_data_provider.ConsumeBool(),

View File

@ -86,11 +86,9 @@ void fuzz_target(FuzzBufferType buffer, const std::string& LIMIT_TO_MESSAGE_TYPE
} }
CNode& p2p_node = *ConsumeNodeAsUniquePtr(fuzzed_data_provider).release(); CNode& p2p_node = *ConsumeNodeAsUniquePtr(fuzzed_data_provider).release();
const bool successfully_connected{fuzzed_data_provider.ConsumeBool()};
p2p_node.fSuccessfullyConnected = successfully_connected;
connman.AddTestNode(p2p_node); connman.AddTestNode(p2p_node);
g_setup->m_node.peerman->InitializeNode(&p2p_node); g_setup->m_node.peerman->InitializeNode(&p2p_node);
FillNode(fuzzed_data_provider, p2p_node, /* init_version */ successfully_connected); FillNode(fuzzed_data_provider, connman, *g_setup->m_node.peerman, p2p_node);
const auto mock_time = ConsumeTime(fuzzed_data_provider); const auto mock_time = ConsumeTime(fuzzed_data_provider);
SetMockTime(mock_time); SetMockTime(mock_time);

View File

@ -45,11 +45,8 @@ FUZZ_TARGET_INIT(process_messages, initialize_process_messages)
peers.push_back(ConsumeNodeAsUniquePtr(fuzzed_data_provider, i).release()); peers.push_back(ConsumeNodeAsUniquePtr(fuzzed_data_provider, i).release());
CNode& p2p_node = *peers.back(); CNode& p2p_node = *peers.back();
const bool successfully_connected{fuzzed_data_provider.ConsumeBool()};
p2p_node.fSuccessfullyConnected = successfully_connected;
p2p_node.fPauseSend = false;
g_setup->m_node.peerman->InitializeNode(&p2p_node); g_setup->m_node.peerman->InitializeNode(&p2p_node);
FillNode(fuzzed_data_provider, p2p_node, /* init_version */ successfully_connected); FillNode(fuzzed_data_provider, connman, *g_setup->m_node.peerman, p2p_node);
connman.AddTestNode(p2p_node); connman.AddTestNode(p2p_node);
} }

View File

@ -2,8 +2,9 @@
// Distributed under the MIT software license, see the accompanying // Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php. // file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <net_processing.h>
#include <netmessagemaker.h>
#include <test/fuzz/util.h> #include <test/fuzz/util.h>
#include <test/util/script.h> #include <test/util/script.h>
#include <util/overflow.h> #include <util/overflow.h>
#include <util/time.h> #include <util/time.h>
@ -199,22 +200,56 @@ bool FuzzedSock::IsConnected(std::string& errmsg) const
return false; return false;
} }
void FillNode(FuzzedDataProvider& fuzzed_data_provider, CNode& node, bool init_version) noexcept void FillNode(FuzzedDataProvider& fuzzed_data_provider, ConnmanTestMsg& connman, PeerManager& peerman, CNode& node) noexcept
{ {
const bool successfully_connected{fuzzed_data_provider.ConsumeBool()};
const ServiceFlags remote_services = ConsumeWeakEnum(fuzzed_data_provider, ALL_SERVICE_FLAGS); const ServiceFlags remote_services = ConsumeWeakEnum(fuzzed_data_provider, ALL_SERVICE_FLAGS);
const NetPermissionFlags permission_flags = ConsumeWeakEnum(fuzzed_data_provider, ALL_NET_PERMISSION_FLAGS); const NetPermissionFlags permission_flags = ConsumeWeakEnum(fuzzed_data_provider, ALL_NET_PERMISSION_FLAGS);
const int32_t version = fuzzed_data_provider.ConsumeIntegralInRange<int32_t>(MIN_PEER_PROTO_VERSION, std::numeric_limits<int32_t>::max()); const int32_t version = fuzzed_data_provider.ConsumeIntegralInRange<int32_t>(MIN_PEER_PROTO_VERSION, std::numeric_limits<int32_t>::max());
const bool filter_txs = fuzzed_data_provider.ConsumeBool(); const bool relay_txs{fuzzed_data_provider.ConsumeBool()};
node.nServices = remote_services; const CNetMsgMaker mm{0};
node.m_permissionFlags = permission_flags;
if (init_version) { CSerializedNetMsg msg_version{
node.nVersion = version; mm.Make(NetMsgType::VERSION,
node.SetCommonVersion(std::min(version, PROTOCOL_VERSION)); version, //
Using<CustomUintFormatter<8>>(remote_services), //
int64_t{}, // dummy time
int64_t{}, // ignored service bits
CService{}, // dummy
int64_t{}, // ignored service bits
CService{}, // ignored
uint64_t{1}, // dummy nonce
std::string{}, // dummy subver
int32_t{}, // dummy starting_height
relay_txs),
};
(void)connman.ReceiveMsgFrom(node, msg_version);
node.fPauseSend = false;
connman.ProcessMessagesOnce(node);
{
LOCK(node.cs_sendProcessing);
peerman.SendMessages(&node);
} }
if (node.m_tx_relay != nullptr) { if (node.fDisconnect) return;
LOCK(node.m_tx_relay->cs_filter); assert(node.nVersion == version);
node.m_tx_relay->fRelayTxes = filter_txs; assert(node.GetCommonVersion() == std::min(version, PROTOCOL_VERSION));
assert(node.nServices == remote_services);
CNodeStateStats statestats;
assert(peerman.GetNodeStateStats(node.GetId(), statestats));
assert(statestats.m_relay_txs == (relay_txs && !node.IsBlockOnlyConn()));
node.m_permissionFlags = permission_flags;
if (successfully_connected) {
CSerializedNetMsg msg_verack{mm.Make(NetMsgType::VERACK)};
(void)connman.ReceiveMsgFrom(node, msg_verack);
node.fPauseSend = false;
connman.ProcessMessagesOnce(node);
{
LOCK(node.cs_sendProcessing);
peerman.SendMessages(&node);
}
assert(node.fSuccessfullyConnected == true);
} }
} }

View File

@ -38,6 +38,8 @@
#include <string> #include <string>
#include <vector> #include <vector>
class PeerManager;
template <typename... Callables> template <typename... Callables>
void CallOneOf(FuzzedDataProvider& fuzzed_data_provider, Callables... callables) void CallOneOf(FuzzedDataProvider& fuzzed_data_provider, Callables... callables)
{ {
@ -309,7 +311,7 @@ inline CAddress ConsumeAddress(FuzzedDataProvider& fuzzed_data_provider) noexcep
template <bool ReturnUniquePtr = false> template <bool ReturnUniquePtr = false>
auto ConsumeNode(FuzzedDataProvider& fuzzed_data_provider, const std::optional<NodeId>& node_id_in = std::nullopt) noexcept auto ConsumeNode(FuzzedDataProvider& fuzzed_data_provider, const std::optional<NodeId>& node_id_in = std::nullopt) noexcept
{ {
const NodeId node_id = node_id_in.value_or(fuzzed_data_provider.ConsumeIntegral<NodeId>()); const NodeId node_id = node_id_in.value_or(fuzzed_data_provider.ConsumeIntegralInRange<NodeId>(0, std::numeric_limits<NodeId>::max()));
const ServiceFlags local_services = ConsumeWeakEnum(fuzzed_data_provider, ALL_SERVICE_FLAGS); const ServiceFlags local_services = ConsumeWeakEnum(fuzzed_data_provider, ALL_SERVICE_FLAGS);
const SOCKET socket = INVALID_SOCKET; const SOCKET socket = INVALID_SOCKET;
const CAddress address = ConsumeAddress(fuzzed_data_provider); const CAddress address = ConsumeAddress(fuzzed_data_provider);
@ -328,7 +330,7 @@ auto ConsumeNode(FuzzedDataProvider& fuzzed_data_provider, const std::optional<N
} }
inline std::unique_ptr<CNode> ConsumeNodeAsUniquePtr(FuzzedDataProvider& fdp, const std::optional<NodeId>& node_id_in = std::nullopt) { return ConsumeNode<true>(fdp, node_id_in); } inline std::unique_ptr<CNode> ConsumeNodeAsUniquePtr(FuzzedDataProvider& fdp, const std::optional<NodeId>& node_id_in = std::nullopt) { return ConsumeNode<true>(fdp, node_id_in); }
void FillNode(FuzzedDataProvider& fuzzed_data_provider, CNode& node, bool init_version) noexcept; void FillNode(FuzzedDataProvider& fuzzed_data_provider, ConnmanTestMsg& connman, PeerManager& peerman, CNode& node) noexcept;
class FuzzedFileProvider class FuzzedFileProvider
{ {

View File

@ -31,7 +31,7 @@ std::vector<NodeEvictionCandidate> GetRandomNodeEvictionCandidates(const int n_c
/* nLastBlockTime */ static_cast<int64_t>(random_context.randrange(100)), /* nLastBlockTime */ static_cast<int64_t>(random_context.randrange(100)),
/* nLastTXTime */ static_cast<int64_t>(random_context.randrange(100)), /* nLastTXTime */ static_cast<int64_t>(random_context.randrange(100)),
/* fRelevantServices */ random_context.randbool(), /* fRelevantServices */ random_context.randbool(),
/* fRelayTxes */ random_context.randbool(), /* m_relay_txs */ random_context.randbool(),
/* fBloomFilter */ random_context.randbool(), /* fBloomFilter */ random_context.randbool(),
/* nKeyedNetGroup */ random_context.randrange(100), /* nKeyedNetGroup */ random_context.randrange(100),
/* prefer_evict */ random_context.randbool(), /* prefer_evict */ random_context.randbool(),
@ -289,7 +289,7 @@ BOOST_AUTO_TEST_CASE(peer_eviction_test)
number_of_nodes, [number_of_nodes](NodeEvictionCandidate& candidate) { number_of_nodes, [number_of_nodes](NodeEvictionCandidate& candidate) {
candidate.nLastBlockTime = number_of_nodes - candidate.id; candidate.nLastBlockTime = number_of_nodes - candidate.id;
if (candidate.id <= 7) { if (candidate.id <= 7) {
candidate.fRelayTxes = false; candidate.m_relay_txs = false;
candidate.fRelevantServices = true; candidate.fRelevantServices = true;
} }
}, },
@ -308,7 +308,7 @@ BOOST_AUTO_TEST_CASE(peer_eviction_test)
number_of_nodes, [number_of_nodes](NodeEvictionCandidate& candidate) { number_of_nodes, [number_of_nodes](NodeEvictionCandidate& candidate) {
candidate.nLastBlockTime = number_of_nodes - candidate.id; candidate.nLastBlockTime = number_of_nodes - candidate.id;
if (candidate.id <= 7) { if (candidate.id <= 7) {
candidate.fRelayTxes = false; candidate.m_relay_txs = false;
candidate.fRelevantServices = true; candidate.fRelevantServices = true;
} }
}, },

View File

@ -96,9 +96,10 @@ BOOST_AUTO_TEST_CASE(addtimedata)
// not to fix this because it prevents possible attacks. See the comment in AddTimeData() or issue #4521 // not to fix this because it prevents possible attacks. See the comment in AddTimeData() or issue #4521
// for a more detailed explanation. // for a more detailed explanation.
MultiAddTimeData(2, 100); // filter median is 100 now, but nTimeOffset will not change MultiAddTimeData(2, 100); // filter median is 100 now, but nTimeOffset will not change
// We want this test to end with nTimeOffset==0, otherwise subsequent tests of the suite will fail.
BOOST_CHECK_EQUAL(GetTimeOffset(), 0); BOOST_CHECK_EQUAL(GetTimeOffset(), 0);
// We want this test to end with nTimeOffset==0, otherwise subsequent tests of the suite will fail. TestOnlyResetTimeData();
} }
BOOST_AUTO_TEST_SUITE_END() BOOST_AUTO_TEST_SUITE_END()

View File

@ -16,7 +16,7 @@
struct ConnmanTestMsg : public CConnman { struct ConnmanTestMsg : public CConnman {
using CConnman::CConnman; using CConnman::CConnman;
void SetPeerConnectTimeout(int64_t timeout) void SetPeerConnectTimeout(std::chrono::seconds timeout)
{ {
m_peer_connect_timeout = timeout; m_peer_connect_timeout = timeout;
} }

View File

@ -39,29 +39,31 @@ int64_t GetAdjustedTime()
#define BITCOIN_TIMEDATA_MAX_SAMPLES 200 #define BITCOIN_TIMEDATA_MAX_SAMPLES 200
static std::set<CNetAddr> g_sources;
static CMedianFilter<int64_t> g_time_offsets{BITCOIN_TIMEDATA_MAX_SAMPLES, 0};
static bool g_warning_emitted;
void AddTimeData(const CNetAddr& ip, int64_t nOffsetSample) void AddTimeData(const CNetAddr& ip, int64_t nOffsetSample)
{ {
LOCK(g_timeoffset_mutex); LOCK(g_timeoffset_mutex);
// Ignore duplicates // Ignore duplicates
static std::set<CNetAddr> setKnown; if (g_sources.size() == BITCOIN_TIMEDATA_MAX_SAMPLES)
if (setKnown.size() == BITCOIN_TIMEDATA_MAX_SAMPLES)
return; return;
if (!setKnown.insert(ip).second) if (!g_sources.insert(ip).second)
return; return;
// Add data // Add data
static CMedianFilter<int64_t> vTimeOffsets(BITCOIN_TIMEDATA_MAX_SAMPLES, 0); g_time_offsets.input(nOffsetSample);
vTimeOffsets.input(nOffsetSample); LogPrint(BCLog::NET, "added time data, samples %d, offset %+d (%+d minutes)\n", g_time_offsets.size(), nOffsetSample, nOffsetSample / 60);
LogPrint(BCLog::NET, "added time data, samples %d, offset %+d (%+d minutes)\n", vTimeOffsets.size(), nOffsetSample, nOffsetSample / 60);
// There is a known issue here (see issue #4521): // There is a known issue here (see issue #4521):
// //
// - The structure vTimeOffsets contains up to 200 elements, after which // - The structure g_time_offsets contains up to 200 elements, after which
// any new element added to it will not increase its size, replacing the // any new element added to it will not increase its size, replacing the
// oldest element. // oldest element.
// //
// - The condition to update nTimeOffset includes checking whether the // - The condition to update nTimeOffset includes checking whether the
// number of elements in vTimeOffsets is odd, which will never happen after // number of elements in g_time_offsets is odd, which will never happen after
// there are 200 elements. // there are 200 elements.
// //
// But in this case the 'bug' is protective against some attacks, and may // But in this case the 'bug' is protective against some attacks, and may
@ -71,9 +73,9 @@ void AddTimeData(const CNetAddr& ip, int64_t nOffsetSample)
// So we should hold off on fixing this and clean it up as part of // So we should hold off on fixing this and clean it up as part of
// a timing cleanup that strengthens it in a number of other ways. // a timing cleanup that strengthens it in a number of other ways.
// //
if (vTimeOffsets.size() >= 5 && vTimeOffsets.size() % 2 == 1) { if (g_time_offsets.size() >= 5 && g_time_offsets.size() % 2 == 1) {
int64_t nMedian = vTimeOffsets.median(); int64_t nMedian = g_time_offsets.median();
std::vector<int64_t> vSorted = vTimeOffsets.sorted(); std::vector<int64_t> vSorted = g_time_offsets.sorted();
// Only let other nodes change our time by so much // Only let other nodes change our time by so much
int64_t max_adjustment = std::max<int64_t>(0, gArgs.GetArg("-maxtimeadjustment", DEFAULT_MAX_TIME_ADJUSTMENT)); int64_t max_adjustment = std::max<int64_t>(0, gArgs.GetArg("-maxtimeadjustment", DEFAULT_MAX_TIME_ADJUSTMENT));
if (nMedian >= -max_adjustment && nMedian <= max_adjustment) { if (nMedian >= -max_adjustment && nMedian <= max_adjustment) {
@ -81,8 +83,7 @@ void AddTimeData(const CNetAddr& ip, int64_t nOffsetSample)
} else { } else {
nTimeOffset = 0; nTimeOffset = 0;
static bool fDone; if (!g_warning_emitted) {
if (!fDone) {
// If nobody has a time different than ours but within 5 minutes of ours, give a warning // If nobody has a time different than ours but within 5 minutes of ours, give a warning
bool fMatch = false; bool fMatch = false;
for (const int64_t nOffset : vSorted) { for (const int64_t nOffset : vSorted) {
@ -90,7 +91,7 @@ void AddTimeData(const CNetAddr& ip, int64_t nOffsetSample)
} }
if (!fMatch) { if (!fMatch) {
fDone = true; g_warning_emitted = true;
bilingual_str strMessage = strprintf(_("Please check that your computer's date and time are correct! If your clock is wrong, %s will not work properly."), PACKAGE_NAME); bilingual_str strMessage = strprintf(_("Please check that your computer's date and time are correct! If your clock is wrong, %s will not work properly."), PACKAGE_NAME);
SetMiscWarning(strMessage); SetMiscWarning(strMessage);
uiInterface.ThreadSafeMessageBox(strMessage, "", CClientUIInterface::MSG_WARNING); uiInterface.ThreadSafeMessageBox(strMessage, "", CClientUIInterface::MSG_WARNING);
@ -108,3 +109,12 @@ void AddTimeData(const CNetAddr& ip, int64_t nOffsetSample)
} }
} }
} }
void TestOnlyResetTimeData()
{
LOCK(g_timeoffset_mutex);
nTimeOffset = 0;
g_sources.clear();
g_time_offsets = CMedianFilter<int64_t>{BITCOIN_TIMEDATA_MAX_SAMPLES, 0};
g_warning_emitted = false;
}

View File

@ -75,4 +75,9 @@ int64_t GetTimeOffset();
int64_t GetAdjustedTime(); int64_t GetAdjustedTime();
void AddTimeData(const CNetAddr& ip, int64_t nTime); void AddTimeData(const CNetAddr& ip, int64_t nTime);
/**
* Reset the internal state of GetTimeOffset(), GetAdjustedTime() and AddTimeData().
*/
void TestOnlyResetTimeData();
#endif // BITCOIN_TIMEDATA_H #endif // BITCOIN_TIMEDATA_H

View File

@ -87,7 +87,7 @@ class P2PBlocksOnly(BitcoinTestFramework):
self.nodes[0].sendrawtransaction(tx_hex) self.nodes[0].sendrawtransaction(tx_hex)
# Bump time forward to ensure nNextInvSend timer pops # Bump time forward to ensure m_next_inv_send_time timer pops
self.nodes[0].setmocktime(int(time.time()) + 60) self.nodes[0].setmocktime(int(time.time()) + 60)
conn.sync_send_with_ping() conn.sync_send_with_ping()

View File

@ -20,7 +20,7 @@ LENGTH_SIZE = 4
MSGTYPE_SIZE = 12 MSGTYPE_SIZE = 12
def mini_parser(dat_file): def mini_parser(dat_file):
"""Parse a data file created by CaptureMessage. """Parse a data file created by CaptureMessageToFile.
From the data file we'll only check the structure. From the data file we'll only check the structure.

View File

@ -4,13 +4,12 @@
# file COPYING or http://www.opensource.org/licenses/mit-license.php. # file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test various net timeouts. """Test various net timeouts.
- Create three dashd nodes: - Create three peers:
no_verack_node - we never send a verack in response to their version no_verack_node - we never send a verack in response to their version
no_version_node - we never send a version (only a ping) no_version_node - we never send a version (only a ping)
no_send_node - we never send any P2P message. no_send_node - we never send any P2P message.
- Start all three nodes
- Wait 1 second - Wait 1 second
- Assert that we're connected - Assert that we're connected
- Send a ping to no_verack_node and no_version_node - Send a ping to no_verack_node and no_version_node
@ -21,12 +20,12 @@
- Assert that we're no longer connected (timeout to receive version/verack is 3 seconds) - Assert that we're no longer connected (timeout to receive version/verack is 3 seconds)
""" """
from time import sleep
from test_framework.messages import msg_ping from test_framework.messages import msg_ping
from test_framework.p2p import P2PInterface from test_framework.p2p import P2PInterface
from test_framework.test_framework import BitcoinTestFramework from test_framework.test_framework import BitcoinTestFramework
import time
class TestP2PConn(P2PInterface): class TestP2PConn(P2PInterface):
def on_version(self, message): def on_version(self, message):
@ -41,7 +40,14 @@ class TimeoutsTest(BitcoinTestFramework):
# set timeout to receive version/verack to 3 seconds # set timeout to receive version/verack to 3 seconds
self.extra_args = [["-peertimeout=3"]] self.extra_args = [["-peertimeout=3"]]
def mock_forward(self, delta):
self.mock_time += delta
self.nodes[0].setmocktime(self.mock_time)
def run_test(self): def run_test(self):
self.mock_time = int(time.time())
self.mock_forward(0)
# Setup the p2p connections # Setup the p2p connections
no_verack_node = self.nodes[0].add_p2p_connection(TestP2PConn(), wait_for_verack=False) no_verack_node = self.nodes[0].add_p2p_connection(TestP2PConn(), wait_for_verack=False)
no_version_node = self.nodes[0].add_p2p_connection(TestP2PConn(), send_version=False, wait_for_verack=False) no_version_node = self.nodes[0].add_p2p_connection(TestP2PConn(), send_version=False, wait_for_verack=False)
@ -51,7 +57,7 @@ class TimeoutsTest(BitcoinTestFramework):
# verack, since we never sent one # verack, since we never sent one
no_verack_node.wait_for_verack() no_verack_node.wait_for_verack()
sleep(1) self.mock_forward(1)
assert no_verack_node.is_connected assert no_verack_node.is_connected
assert no_version_node.is_connected assert no_version_node.is_connected
@ -60,7 +66,7 @@ class TimeoutsTest(BitcoinTestFramework):
no_verack_node.send_message(msg_ping()) no_verack_node.send_message(msg_ping())
no_version_node.send_message(msg_ping()) no_version_node.send_message(msg_ping())
sleep(1) self.mock_forward(1)
assert "version" in no_verack_node.last_message assert "version" in no_verack_node.last_message
@ -78,10 +84,10 @@ class TimeoutsTest(BitcoinTestFramework):
] ]
with self.nodes[0].assert_debug_log(expected_msgs=expected_timeout_logs): with self.nodes[0].assert_debug_log(expected_msgs=expected_timeout_logs):
sleep(3 + 1) # Sleep one second more than peertimeout self.mock_forward(3)
assert not no_verack_node.is_connected no_verack_node.wait_for_disconnect(timeout=1)
assert not no_version_node.is_connected no_version_node.wait_for_disconnect(timeout=1)
assert not no_send_node.is_connected no_send_node.wait_for_disconnect(timeout=1)
if __name__ == '__main__': if __name__ == '__main__':

View File

@ -375,10 +375,10 @@ def write_config(config_path, *, n, chain, extra_config=""):
f.write("fixedseeds=0\n") f.write("fixedseeds=0\n")
f.write("listenonion=0\n") f.write("listenonion=0\n")
# Increase peertimeout to avoid disconnects while using mocktime. # Increase peertimeout to avoid disconnects while using mocktime.
# peertimeout is measured in wall clock time, so setting it to the # peertimeout is measured in mock time, so setting it large enough to
# duration of the longest test is sufficient. It can be overriden in # cover any duration in mock time is sufficient. It can be overridden
# tests. # in tests.
f.write("peertimeout=999999\n") f.write("peertimeout=999999999\n")
f.write("printtoconsole=0\n") f.write("printtoconsole=0\n")
f.write("upnp=0\n") f.write("upnp=0\n")
f.write("natpmp=0\n") f.write("natpmp=0\n")

View File

@ -40,7 +40,7 @@ class ResendWalletTransactionsTest(BitcoinTestFramework):
return peer_first.tx_invs_received[int(txid, 16)] >= 1 return peer_first.tx_invs_received[int(txid, 16)] >= 1
self.wait_until(wait_p2p) self.wait_until(wait_p2p)
# Add a second peer since txs aren't rebroadcast to the same peer (see filterInventoryKnown) # Add a second peer since txs aren't rebroadcast to the same peer (see m_tx_inventory_known_filter)
peer_second = node.add_p2p_connection(P2PTxInvStore()) peer_second = node.add_p2p_connection(P2PTxInvStore())
self.log.info("Create a block") self.log.info("Create a block")

View File

@ -98,9 +98,10 @@ EXPECTED_CIRCULAR_DEPENDENCIES=(
"coinjoin/client -> coinjoin/util -> wallet/wallet -> psbt -> node/transaction -> node/context -> coinjoin/context -> coinjoin/client" "coinjoin/client -> coinjoin/util -> wallet/wallet -> psbt -> node/transaction -> node/context -> coinjoin/context -> coinjoin/client"
"llmq/blockprocessor -> net_processing -> llmq/blockprocessor" "llmq/blockprocessor -> net_processing -> llmq/blockprocessor"
"llmq/chainlocks -> net_processing -> llmq/chainlocks" "llmq/chainlocks -> net_processing -> llmq/chainlocks"
"llmq/dkgsession -> net_processing -> llmq/quorums -> llmq/dkgsession"
"net_processing -> spork -> net_processing" "net_processing -> spork -> net_processing"
"evo/simplifiedmns -> llmq/blockprocessor -> net_processing -> evo/simplifiedmns" "evo/simplifiedmns -> llmq/blockprocessor -> net_processing -> evo/simplifiedmns"
"governance/governance -> governance/object -> net_processing -> governance/governance" "governance/governance -> net_processing -> governance/governance"
"llmq/blockprocessor -> net_processing -> llmq/context -> llmq/blockprocessor" "llmq/blockprocessor -> net_processing -> llmq/context -> llmq/blockprocessor"
"llmq/blockprocessor -> net_processing -> llmq/quorums -> llmq/blockprocessor" "llmq/blockprocessor -> net_processing -> llmq/quorums -> llmq/blockprocessor"
"llmq/chainlocks -> net_processing -> llmq/context -> llmq/chainlocks" "llmq/chainlocks -> net_processing -> llmq/context -> llmq/chainlocks"