From cbff29a6306a34148d69731ed9e71a5dd3e69c90 Mon Sep 17 00:00:00 2001 From: Kittywhiskers Van Gogh <63189531+kwvg@users.noreply.github.com> Date: Tue, 27 Aug 2024 18:53:33 +0000 Subject: [PATCH 01/14] partial bitcoin#20524: Move MIN_VERSION_SUPPORTED to p2p.py excludes: - 9f21ed4037758f407b536c0dd129f8da83173c79 --- test/functional/p2p_filter.py | 15 ++++++++-- test/functional/p2p_leak.py | 12 ++++++-- test/functional/test_framework/messages.py | 33 ++++++++------------- test/functional/test_framework/p2p.py | 29 +++++++++++++----- test/functional/test_framework/test_node.py | 4 +-- 5 files changed, 58 insertions(+), 35 deletions(-) diff --git a/test/functional/p2p_filter.py b/test/functional/p2p_filter.py index fa1f9dcf17..359cfb9c34 100755 --- a/test/functional/p2p_filter.py +++ b/test/functional/p2p_filter.py @@ -19,7 +19,13 @@ from test_framework.messages import ( msg_mempool, msg_version, ) -from test_framework.p2p import P2PInterface, p2p_lock +from test_framework.p2p import ( + P2PInterface, + P2P_SERVICES, + P2P_SUBVERSION, + P2P_VERSION, + p2p_lock, +) from test_framework.script import MAX_SCRIPT_ELEMENT_SIZE from test_framework.test_framework import BitcoinTestFramework @@ -215,9 +221,12 @@ class FilterTest(BitcoinTestFramework): self.log.info('Test BIP 37 for a node with fRelay = False') # Add peer but do not send version yet filter_peer_without_nrelay = self.nodes[0].add_p2p_connection(P2PBloomFilter(), send_version=False, wait_for_verack=False) - # Send version with fRelay=False + # Send version with relay=False version_without_fRelay = msg_version() - version_without_fRelay.nRelay = 0 + version_without_fRelay.nVersion = P2P_VERSION + version_without_fRelay.strSubVer = P2P_SUBVERSION + version_without_fRelay.nServices = P2P_SERVICES + version_without_fRelay.relay = 0 filter_peer_without_nrelay.send_message(version_without_fRelay) filter_peer_without_nrelay.wait_for_verack() assert not self.nodes[0].getpeerinfo()[0]['relaytxes'] diff --git a/test/functional/p2p_leak.py b/test/functional/p2p_leak.py index 16b94c6918..da037b60aa 100755 --- a/test/functional/p2p_leak.py +++ b/test/functional/p2p_leak.py @@ -18,7 +18,12 @@ from test_framework.messages import ( msg_ping, msg_version, ) -from test_framework.p2p import P2PInterface +from test_framework.p2p import ( + P2PInterface, + P2P_SUBVERSION, + P2P_SERVICES, + P2P_VERSION_RELAY, +) from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, @@ -126,12 +131,15 @@ class P2PLeakTest(BitcoinTestFramework): assert_equal(ver.addrFrom.port, 0) assert_equal(ver.addrFrom.ip, '0.0.0.0') assert_equal(ver.nStartingHeight, 201) - assert_equal(ver.nRelay, 1) + assert_equal(ver.relay, 1) self.log.info('Check that old peers are disconnected') p2p_old_peer = self.nodes[0].add_p2p_connection(P2PInterface(), send_version=False, wait_for_verack=False) old_version_msg = msg_version() old_version_msg.nVersion = 31799 + old_version_msg.strSubVer = P2P_SUBVERSION + old_version_msg.nServices = P2P_SERVICES + old_version_msg.relay = P2P_VERSION_RELAY with self.nodes[0].assert_debug_log(['peer=3 using obsolete version 31799; disconnecting']): p2p_old_peer.send_message(old_version_msg) p2p_old_peer.wait_for_disconnect() diff --git a/test/functional/test_framework/messages.py b/test/functional/test_framework/messages.py index 192676412e..0804b87873 100755 --- a/test/functional/test_framework/messages.py +++ b/test/functional/test_framework/messages.py @@ -31,11 +31,6 @@ from test_framework.util import assert_equal import dash_hash -MIN_VERSION_SUPPORTED = 60001 -MY_VERSION = 70231 # NO_LEGACY_ISLOCK_PROTO_VERSION -MY_SUBVERSION = "/python-p2p-tester:0.0.3%s/" -MY_RELAY = 1 # from version 70001 onwards, fRelay should be appended to version messages (BIP37) - MAX_LOCATOR_SZ = 101 MAX_BLOCK_SIZE = 2000000 MAX_BLOOM_FILTER_SIZE = 36000 @@ -383,22 +378,20 @@ class CBlockLocator: __slots__ = ("nVersion", "vHave") def __init__(self): - self.nVersion = MY_VERSION self.vHave = [] def deserialize(self, f): - self.nVersion = struct.unpack("= MIN_VERSION_SUPPORTED, "Version {} received. Test framework only supports versions greater than {}".format(message.nVersion, MIN_VERSION_SUPPORTED) + assert message.nVersion >= MIN_P2P_VERSION_SUPPORTED, "Version {} received. Test framework only supports versions greater than {}".format(message.nVersion, MIN_P2P_VERSION_SUPPORTED) if self.support_addrv2: self.send_message(msg_sendaddrv2()) self.send_message(msg_verack()) diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py index 1361ae4701..dbdeb255de 100755 --- a/test/functional/test_framework/test_node.py +++ b/test/functional/test_framework/test_node.py @@ -23,7 +23,7 @@ import collections from .authproxy import JSONRPCException from .descriptors import descsum_create -from .messages import MY_SUBVERSION +from .p2p import P2P_SUBVERSION from .util import ( MAX_NODES, append_config, @@ -596,7 +596,7 @@ class TestNode(): def num_test_p2p_connections(self): """Return number of test framework p2p connections to the node.""" - return len([peer for peer in self.getpeerinfo() if peer['subver'] == MY_SUBVERSION]) + return len([peer for peer in self.getpeerinfo() if P2P_SUBVERSION % "" in peer['subver']]) def disconnect_p2ps(self): """Close all p2p connections to the node.""" From 51ad8e4ddec40bb205584f8fdd033e3df2686503 Mon Sep 17 00:00:00 2001 From: Kittywhiskers Van Gogh <63189531+kwvg@users.noreply.github.com> Date: Sun, 4 Aug 2024 08:15:54 +0000 Subject: [PATCH 02/14] merge bitcoin#21148: Split orphan handling from net_processing into txorphanage --- src/Makefile.am | 2 + src/init.cpp | 1 + src/net_processing.cpp | 304 ++++------------------------- src/net_processing.h | 1 - src/test/denialofservice_tests.cpp | 69 ++++--- src/test/fuzz/process_message.cpp | 1 + src/test/fuzz/process_messages.cpp | 1 + src/txorphanage.cpp | 227 +++++++++++++++++++++ src/txorphanage.h | 88 +++++++++ test/functional/p2p_invalid_tx.py | 2 +- 10 files changed, 394 insertions(+), 302 deletions(-) create mode 100644 src/txorphanage.cpp create mode 100644 src/txorphanage.h diff --git a/src/Makefile.am b/src/Makefile.am index 99d3c99d86..f90e432b7b 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -328,6 +328,7 @@ BITCOIN_CORE_H = \ torcontrol.h \ txdb.h \ txmempool.h \ + txorphanage.h \ undo.h \ unordered_lru_cache.h \ util/bip32.h \ @@ -527,6 +528,7 @@ libbitcoin_server_a_SOURCES = \ torcontrol.cpp \ txdb.cpp \ txmempool.cpp \ + txorphanage.cpp \ validation.cpp \ validationinterface.cpp \ versionbits.cpp \ diff --git a/src/init.cpp b/src/init.cpp index e52ab7a913..2b9f2bb31f 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -56,6 +56,7 @@ #include #include #include +#include #include #include #include diff --git a/src/net_processing.cpp b/src/net_processing.cpp index db6d9c70ff..8d6489d5c6 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -29,6 +29,7 @@ #include #include #include +#include #include // For NDEBUG compile time check #include #include @@ -86,10 +87,6 @@ static_assert(INBOUND_PEER_TX_DELAY >= MAX_GETDATA_RANDOM_DELAY, /** Limit to avoid sending big packets. Not used in processing incoming GETDATA for compatibility */ static const unsigned int MAX_GETDATA_SZ = 1000; -/** Expiration time for orphan transactions in seconds */ -static constexpr int64_t ORPHAN_TX_EXPIRE_TIME = 20 * 60; -/** Minimum time between orphan transactions expire time checks in seconds */ -static constexpr int64_t ORPHAN_TX_EXPIRE_INTERVAL = 5 * 60; /** How long to cache transactions in mapRelay for normal relay */ static constexpr auto RELAY_TX_CACHE_TIME = 15min; /** How long a transaction has to be in the mempool before it can unconditionally be relayed (even when not in mapRelay). */ @@ -191,24 +188,6 @@ static constexpr size_t MAX_ADDR_PROCESSING_TOKEN_BUCKET{MAX_ADDR_TO_SEND}; /** The compactblocks version we support. See BIP 152. */ static constexpr uint64_t CMPCTBLOCKS_VERSION{1}; -struct COrphanTx { - // When modifying, adapt the copy of this definition in tests/DoS_tests. - CTransactionRef tx; - NodeId fromPeer; - int64_t nTimeExpire; - size_t list_pos; - size_t nTxSize; -}; - -/** Guards orphan transactions and extra txs for compact blocks */ -RecursiveMutex g_cs_orphans; -/** Map from txid to orphan transaction record. Limited by - * -maxorphantx/DEFAULT_MAX_ORPHAN_TRANSACTIONS */ -std::map mapOrphanTransactions GUARDED_BY(g_cs_orphans); - -size_t nMapOrphanTransactionsSize = 0; -void EraseOrphansFor(NodeId peer); - // Internal stuff namespace { /** Blocks that are in flight, and that are in the queue to be downloaded. */ @@ -773,35 +752,24 @@ private: /** Number of peers from which we're downloading blocks. */ int nPeersWithValidatedDownloads GUARDED_BY(cs_main) = 0; -}; -} // namespace -namespace { + /** Storage for orphan information */ + TxOrphanage m_orphanage; - /** Number of preferable block download peers. */ - int nPreferredDownload GUARDED_BY(cs_main) = 0; - - struct IteratorComparator - { - template - bool operator()(const I& a, const I& b) const - { - return &(*a) < &(*b); - } - }; - - /** Index from the parents' COutPoint into the mapOrphanTransactions. Used - * to remove orphan transactions from the mapOrphanTransactions */ - std::map::iterator, IteratorComparator>> mapOrphanTransactionsByPrev GUARDED_BY(g_cs_orphans); - /** Orphan transactions in vector for quick random eviction */ - std::vector::iterator> g_orphan_list GUARDED_BY(g_cs_orphans); + void AddToCompactExtraTransactions(const CTransactionRef& tx) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans); /** Orphan/conflicted/etc transactions that are kept for compact block reconstruction. * The last -blockreconstructionextratxn/DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN of * these are kept in a ring buffer */ - static std::vector> vExtraTxnForCompact GUARDED_BY(g_cs_orphans); + std::vector> vExtraTxnForCompact GUARDED_BY(g_cs_orphans); /** Offset into vExtraTxnForCompact to insert the next tx */ - static size_t vExtraTxnForCompactIt GUARDED_BY(g_cs_orphans) = 0; + size_t vExtraTxnForCompactIt GUARDED_BY(g_cs_orphans) = 0; +}; +} // namespace + +namespace { + /** Number of preferable block download peers. */ + int nPreferredDownload GUARDED_BY(cs_main) = 0; } // namespace namespace { @@ -1569,7 +1537,7 @@ void PeerManagerImpl::FinalizeNode(const CNode& node) { for (const QueuedBlock& entry : state->vBlocksInFlight) { mapBlocksInFlight.erase(entry.hash); } - EraseOrphansFor(nodeid); + WITH_LOCK(g_cs_orphans, m_orphanage.EraseForPeer(nodeid)); nPreferredDownload -= state->fPreferredDownload; nPeersWithValidatedDownloads -= (state->nBlocksInFlightValidHeaders != 0); assert(nPeersWithValidatedDownloads >= 0); @@ -1661,12 +1629,7 @@ bool PeerManagerImpl::GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) c return true; } -////////////////////////////////////////////////////////////////////////////// -// -// mapOrphanTransactions -// - -static void AddToCompactExtraTransactions(const CTransactionRef& tx) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans) +void PeerManagerImpl::AddToCompactExtraTransactions(const CTransactionRef& tx) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans) { size_t max_extra_txn = gArgs.GetArg("-blockreconstructionextratxn", DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN); if (max_extra_txn <= 0) @@ -1677,131 +1640,6 @@ static void AddToCompactExtraTransactions(const CTransactionRef& tx) EXCLUSIVE_L vExtraTxnForCompactIt = (vExtraTxnForCompactIt + 1) % max_extra_txn; } -bool AddOrphanTx(const CTransactionRef& tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans) -{ - const uint256& hash = tx->GetHash(); - if (mapOrphanTransactions.count(hash)) - return false; - - // Ignore big transactions, to avoid a - // send-big-orphans memory exhaustion attack. If a peer has a legitimate - // large transaction with a missing parent then we assume - // it will rebroadcast it later, after the parent transaction(s) - // have been mined or received. - // 100 orphans, each of which is at most 99,999 bytes big is - // at most 10 megabytes of orphans and somewhat more byprev index (in the worst case): - unsigned int sz = GetSerializeSize(*tx, CTransaction::CURRENT_VERSION); - if (sz > MAX_STANDARD_TX_SIZE) - { - LogPrint(BCLog::MEMPOOL, "ignoring large orphan tx (size: %u, hash: %s)\n", sz, hash.ToString()); - return false; - } - - auto ret = mapOrphanTransactions.emplace(hash, COrphanTx{tx, peer, GetTime() + ORPHAN_TX_EXPIRE_TIME, g_orphan_list.size(), sz}); - assert(ret.second); - g_orphan_list.push_back(ret.first); - for (const CTxIn& txin : tx->vin) { - mapOrphanTransactionsByPrev[txin.prevout].insert(ret.first); - } - - AddToCompactExtraTransactions(tx); - - nMapOrphanTransactionsSize += sz; - - LogPrint(BCLog::MEMPOOL, "stored orphan tx %s (mapsz %u outsz %u)\n", hash.ToString(), - mapOrphanTransactions.size(), mapOrphanTransactionsByPrev.size()); - statsClient.inc("transactions.orphans.add", 1.0f); - statsClient.gauge("transactions.orphans", mapOrphanTransactions.size()); - return true; -} - -int static EraseOrphanTx(uint256 hash) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans) -{ - std::map::iterator it = mapOrphanTransactions.find(hash); - if (it == mapOrphanTransactions.end()) - return 0; - for (const CTxIn& txin : it->second.tx->vin) - { - auto itPrev = mapOrphanTransactionsByPrev.find(txin.prevout); - if (itPrev == mapOrphanTransactionsByPrev.end()) - continue; - itPrev->second.erase(it); - if (itPrev->second.empty()) - mapOrphanTransactionsByPrev.erase(itPrev); - } - - size_t old_pos = it->second.list_pos; - assert(g_orphan_list[old_pos] == it); - if (old_pos + 1 != g_orphan_list.size()) { - // Unless we're deleting the last entry in g_orphan_list, move the last - // entry to the position we're deleting. - auto it_last = g_orphan_list.back(); - g_orphan_list[old_pos] = it_last; - it_last->second.list_pos = old_pos; - } - g_orphan_list.pop_back(); - - assert(nMapOrphanTransactionsSize >= it->second.nTxSize); - nMapOrphanTransactionsSize -= it->second.nTxSize; - mapOrphanTransactions.erase(it); - statsClient.inc("transactions.orphans.remove", 1.0f); - statsClient.gauge("transactions.orphans", mapOrphanTransactions.size()); - return 1; -} - -void EraseOrphansFor(NodeId peer) -{ - LOCK(g_cs_orphans); - int nErased = 0; - std::map::iterator iter = mapOrphanTransactions.begin(); - while (iter != mapOrphanTransactions.end()) - { - std::map::iterator maybeErase = iter++; // increment to avoid iterator becoming invalid - if (maybeErase->second.fromPeer == peer) - { - nErased += EraseOrphanTx(maybeErase->second.tx->GetHash()); - } - } - if (nErased > 0) LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx from peer=%d\n", nErased, peer); -} - - -unsigned int LimitOrphanTxSize(unsigned int nMaxOrphansSize) -{ - LOCK(g_cs_orphans); - - unsigned int nEvicted = 0; - static int64_t nNextSweep; - int64_t nNow = GetTime(); - if (nNextSweep <= nNow) { - // Sweep out expired orphan pool entries: - int nErased = 0; - int64_t nMinExpTime = nNow + ORPHAN_TX_EXPIRE_TIME - ORPHAN_TX_EXPIRE_INTERVAL; - std::map::iterator iter = mapOrphanTransactions.begin(); - while (iter != mapOrphanTransactions.end()) - { - std::map::iterator maybeErase = iter++; - if (maybeErase->second.nTimeExpire <= nNow) { - nErased += EraseOrphanTx(maybeErase->second.tx->GetHash()); - } else { - nMinExpTime = std::min(maybeErase->second.nTimeExpire, nMinExpTime); - } - } - // Sweep again 5 minutes after the next entry that expires in order to batch the linear scan. - nNextSweep = nMinExpTime + ORPHAN_TX_EXPIRE_INTERVAL; - if (nErased > 0) LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx due to expiration\n", nErased); - } - FastRandomContext rng; - while (!mapOrphanTransactions.empty() && nMapOrphanTransactionsSize > nMaxOrphansSize) - { - // Evict a random orphan: - size_t randompos = rng.randrange(g_orphan_list.size()); - EraseOrphanTx(g_orphan_list[randompos]->first); - ++nEvicted; - } - return nEvicted; -} - void PeerManagerImpl::Misbehaving(const NodeId pnode, const int howmuch, const std::string& message) { assert(howmuch > 0); @@ -2022,52 +1860,17 @@ void PeerManagerImpl::StartScheduledTasks(CScheduler& scheduler) */ void PeerManagerImpl::BlockConnected(const std::shared_ptr& pblock, const CBlockIndex* pindex) { - { - LOCK2(cs_main, g_cs_orphans); + LOCK2(::cs_main, g_cs_orphans); - std::vector vOrphanErase; - std::set orphanWorkSet; - - for (const CTransactionRef& ptx : pblock->vtx) { - const CTransaction& tx = *ptx; - - // Which orphan pool entries we should reprocess and potentially try to accept into mempool again? - for (size_t i = 0; i < tx.vin.size(); i++) { - auto itByPrev = mapOrphanTransactionsByPrev.find(COutPoint(tx.GetHash(), (uint32_t)i)); - if (itByPrev == mapOrphanTransactionsByPrev.end()) continue; - for (const auto& elem : itByPrev->second) { - orphanWorkSet.insert(elem->first); - } - } - - // Which orphan pool entries must we evict? - for (const auto& txin : tx.vin) { - auto itByPrev = mapOrphanTransactionsByPrev.find(txin.prevout); - if (itByPrev == mapOrphanTransactionsByPrev.end()) continue; - for (auto mi = itByPrev->second.begin(); mi != itByPrev->second.end(); ++mi) { - const CTransaction& orphanTx = *(*mi)->second.tx; - const uint256& orphanHash = orphanTx.GetHash(); - vOrphanErase.push_back(orphanHash); - } - } - } - - // Erase orphan transactions included or precluded by this block - if (vOrphanErase.size()) { - int nErased = 0; - for (const uint256& orphanHash : vOrphanErase) { - nErased += EraseOrphanTx(orphanHash); - } - LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx included or conflicted by block\n", nErased); - } - - while (!orphanWorkSet.empty()) { - LogPrint(BCLog::MEMPOOL, "Trying to process %d orphans\n", orphanWorkSet.size()); - ProcessOrphanTx(orphanWorkSet); - } - - m_last_tip_update = GetTime(); + auto orphanWorkSet = m_orphanage.GetCandidatesForBlock(*pblock); + while (!orphanWorkSet.empty()) { + LogPrint(BCLog::MEMPOOL, "Trying to process %d orphans\n", orphanWorkSet.size()); + ProcessOrphanTx(orphanWorkSet); } + + m_orphanage.EraseForBlock(*pblock); + m_last_tip_update = GetTime(); + { LOCK(m_recent_confirmed_transactions_mutex); for (const auto& ptx : pblock->vtx) { @@ -2241,10 +2044,7 @@ bool PeerManagerImpl::AlreadyHave(const CInv& inv) m_recent_rejects.reset(); } - { - LOCK(g_cs_orphans); - if (mapOrphanTransactions.count(inv.hash)) return true; - } + if (m_orphanage.HaveTx(inv.hash)) return true; { LOCK(m_recent_confirmed_transactions_mutex); @@ -3144,40 +2944,32 @@ void PeerManagerImpl::ProcessOrphanTx(std::set& orphan_work_set) const uint256 orphanHash = *orphan_work_set.begin(); orphan_work_set.erase(orphan_work_set.begin()); - auto orphan_it = mapOrphanTransactions.find(orphanHash); - if (orphan_it == mapOrphanTransactions.end()) continue; + const auto [porphanTx, from_peer] = m_orphanage.GetTx(orphanHash); + if (porphanTx == nullptr) continue; - const CTransactionRef porphanTx = orphan_it->second.tx; const MempoolAcceptResult result = AcceptToMemoryPool(m_chainman.ActiveChainstate(), m_mempool, porphanTx, false /* bypass_limits */); const TxValidationState& state = result.m_state; if (result.m_result_type == MempoolAcceptResult::ResultType::VALID) { LogPrint(BCLog::MEMPOOL, " accepted orphan tx %s\n", orphanHash.ToString()); RelayTransaction(porphanTx->GetHash()); - for (unsigned int i = 0; i < porphanTx->vout.size(); i++) { - auto it_by_prev = mapOrphanTransactionsByPrev.find(COutPoint(orphanHash, i)); - if (it_by_prev != mapOrphanTransactionsByPrev.end()) { - for (const auto& elem : it_by_prev->second) { - orphan_work_set.insert(elem->first); - } - } - } - EraseOrphanTx(orphanHash); + m_orphanage.AddChildrenToWorkSet(*porphanTx, orphan_work_set); + m_orphanage.EraseTx(orphanHash); break; } else if (state.GetResult() != TxValidationResult::TX_MISSING_INPUTS) { if (state.IsInvalid()) { LogPrint(BCLog::MEMPOOL, " invalid orphan tx %s from peer=%d. %s\n", orphanHash.ToString(), - orphan_it->second.fromPeer, + from_peer, state.ToString()); // Maybe punish peer that gave us an invalid orphan tx - MaybePunishNodeForTx(orphan_it->second.fromPeer, state); + MaybePunishNodeForTx(from_peer, state); } // Has inputs but not accepted to mempool // Probably non-standard or insufficient fee LogPrint(BCLog::MEMPOOL, " removed orphan tx %s\n", orphanHash.ToString()); m_recent_rejects.insert(orphanHash); - EraseOrphanTx(orphanHash); + m_orphanage.EraseTx(orphanHash); break; } } @@ -4325,15 +4117,7 @@ void PeerManagerImpl::ProcessMessage( m_mempool.check(m_chainman.ActiveChainstate()); RelayTransaction(tx.GetHash()); - - for (unsigned int i = 0; i < tx.vout.size(); i++) { - auto it_by_prev = mapOrphanTransactionsByPrev.find(COutPoint(txid, i)); - if (it_by_prev != mapOrphanTransactionsByPrev.end()) { - for (const auto& elem : it_by_prev->second) { - peer->m_orphan_work_set.insert(elem->first); - } - } - } + m_orphanage.AddChildrenToWorkSet(tx, peer->m_orphan_work_set); pfrom.m_last_tx_time = GetTime(); @@ -4377,13 +4161,16 @@ void PeerManagerImpl::ProcessMessage( AddKnownInv(*peer, _inv2.hash); if (!AlreadyHave(_inv2)) RequestObject(State(pfrom.GetId()), _inv2, current_time, is_masternode); } - AddOrphanTx(ptx, pfrom.GetId()); - // DoS prevention: do not allow mapOrphanTransactions to grow unbounded (see CVE-2012-3789) + if (m_orphanage.AddTx(ptx, pfrom.GetId())) { + AddToCompactExtraTransactions(ptx); + } + + // DoS prevention: do not allow m_orphans to grow unbounded (see CVE-2012-3789) unsigned int nMaxOrphanTxSize = (unsigned int)std::max((int64_t)0, gArgs.GetArg("-maxorphantxsize", DEFAULT_MAX_ORPHAN_TRANSACTIONS_SIZE)) * 1000000; - unsigned int nEvicted = LimitOrphanTxSize(nMaxOrphanTxSize); + unsigned int nEvicted = m_orphanage.LimitOrphans(nMaxOrphanTxSize); if (nEvicted > 0) { - LogPrint(BCLog::MEMPOOL, "mapOrphan overflow, removed %u tx\n", nEvicted); + LogPrint(BCLog::MEMPOOL, "orphanage overflow, removed %u tx\n", nEvicted); } } else { LogPrint(BCLog::MEMPOOL, "not keeping orphan with rejected parents %s\n",tx.GetHash().ToString()); @@ -6153,16 +5940,3 @@ bool PeerManagerImpl::SendMessages(CNode* pto) } // release cs_main return true; } - -class CNetProcessingCleanup -{ -public: - CNetProcessingCleanup() {} - ~CNetProcessingCleanup() { - // orphan transactions - mapOrphanTransactions.clear(); - mapOrphanTransactionsByPrev.clear(); - nMapOrphanTransactionsSize = 0; - } -}; -static CNetProcessingCleanup instance_of_cnetprocessingcleanup; diff --git a/src/net_processing.h b/src/net_processing.h index b8fd7090b6..33b12b24ad 100644 --- a/src/net_processing.h +++ b/src/net_processing.h @@ -29,7 +29,6 @@ struct CJContext; struct LLMQContext; extern RecursiveMutex cs_main; -extern RecursiveMutex g_cs_orphans; /** Default for -maxorphantxsize, maximum size in megabytes the orphan map can grow before entries are removed */ static const unsigned int DEFAULT_MAX_ORPHAN_TRANSACTIONS_SIZE = 10; // this allows around 100 TXs of max size (and many more of normal size) diff --git a/src/test/denialofservice_tests.cpp b/src/test/denialofservice_tests.cpp index 1e55dda23a..dbf154531d 100644 --- a/src/test/denialofservice_tests.cpp +++ b/src/test/denialofservice_tests.cpp @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -26,18 +27,6 @@ #include -// Tests these internal-to-net_processing.cpp methods: -extern bool AddOrphanTx(const CTransactionRef& tx, NodeId peer); -extern void EraseOrphansFor(NodeId peer); -extern unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans); - -struct COrphanTx { - CTransactionRef tx; - NodeId fromPeer; - int64_t nTimeExpire; -}; -extern std::map mapOrphanTransactions GUARDED_BY(g_cs_orphans); - static CService ip(uint32_t i) { struct in_addr s; @@ -448,15 +437,23 @@ BOOST_AUTO_TEST_CASE(DoS_bantime) peerLogic->FinalizeNode(dummyNode); } -static CTransactionRef RandomOrphan() +class TxOrphanageTest : public TxOrphanage { - std::map::iterator it; - LOCK2(cs_main, g_cs_orphans); - it = mapOrphanTransactions.lower_bound(InsecureRand256()); - if (it == mapOrphanTransactions.end()) - it = mapOrphanTransactions.begin(); - return it->second.tx; -} +public: + inline size_t CountOrphans() const EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans) + { + return m_orphans.size(); + } + + CTransactionRef RandomOrphan() EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans) + { + std::map::iterator it; + it = m_orphans.lower_bound(InsecureRand256()); + if (it == m_orphans.end()) + it = m_orphans.begin(); + return it->second.tx; + } +}; static void MakeNewKeyWithFastRandomContext(CKey& key) { @@ -476,11 +473,14 @@ BOOST_AUTO_TEST_CASE(DoS_mapOrphans) // signature's R and S values have leading zeros. g_insecure_rand_ctx = FastRandomContext{uint256{33}}; + TxOrphanageTest orphanage; CKey key; MakeNewKeyWithFastRandomContext(key); FillableSigningProvider keystore; BOOST_CHECK(keystore.AddKey(key)); + LOCK(g_cs_orphans); + // 50 orphan transactions: for (int i = 0; i < 50; i++) { @@ -493,13 +493,13 @@ BOOST_AUTO_TEST_CASE(DoS_mapOrphans) tx.vout[0].nValue = 1*CENT; tx.vout[0].scriptPubKey = GetScriptForDestination(PKHash(key.GetPubKey())); - AddOrphanTx(MakeTransactionRef(tx), i); + orphanage.AddTx(MakeTransactionRef(tx), i); } // ... and 50 that depend on other orphans: for (int i = 0; i < 50; i++) { - CTransactionRef txPrev = RandomOrphan(); + CTransactionRef txPrev = orphanage.RandomOrphan(); CMutableTransaction tx; tx.vin.resize(1); @@ -510,13 +510,13 @@ BOOST_AUTO_TEST_CASE(DoS_mapOrphans) tx.vout[0].scriptPubKey = GetScriptForDestination(PKHash(key.GetPubKey())); BOOST_CHECK(SignSignature(keystore, *txPrev, tx, 0, SIGHASH_ALL)); - AddOrphanTx(MakeTransactionRef(tx), i); + orphanage.AddTx(MakeTransactionRef(tx), i); } // This really-big orphan should be ignored: for (int i = 0; i < 10; i++) { - CTransactionRef txPrev = RandomOrphan(); + CTransactionRef txPrev = orphanage.RandomOrphan(); CMutableTransaction tx; tx.vout.resize(1); @@ -534,25 +534,24 @@ BOOST_AUTO_TEST_CASE(DoS_mapOrphans) for (unsigned int j = 1; j < tx.vin.size(); j++) tx.vin[j].scriptSig = tx.vin[0].scriptSig; - BOOST_CHECK(!AddOrphanTx(MakeTransactionRef(tx), i)); + BOOST_CHECK(!orphanage.AddTx(MakeTransactionRef(tx), i)); } - LOCK2(cs_main, g_cs_orphans); // Test EraseOrphansFor: for (NodeId i = 0; i < 3; i++) { - size_t sizeBefore = mapOrphanTransactions.size(); - EraseOrphansFor(i); - BOOST_CHECK(mapOrphanTransactions.size() < sizeBefore); + size_t sizeBefore = orphanage.CountOrphans(); + orphanage.EraseForPeer(i); + BOOST_CHECK(orphanage.CountOrphans() < sizeBefore); } // Test LimitOrphanTxSize() function: - LimitOrphanTxSize(40); - BOOST_CHECK(mapOrphanTransactions.size() <= 40); - LimitOrphanTxSize(10); - BOOST_CHECK(mapOrphanTransactions.size() <= 10); - LimitOrphanTxSize(0); - BOOST_CHECK(mapOrphanTransactions.empty()); + orphanage.LimitOrphans(40); + BOOST_CHECK(orphanage.CountOrphans() <= 40); + orphanage.LimitOrphans(10); + BOOST_CHECK(orphanage.CountOrphans() <= 10); + orphanage.LimitOrphans(0); + BOOST_CHECK(orphanage.CountOrphans() == 0); } BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/fuzz/process_message.cpp b/src/test/fuzz/process_message.cpp index 5b921696f9..51ba2ab04f 100644 --- a/src/test/fuzz/process_message.cpp +++ b/src/test/fuzz/process_message.cpp @@ -18,6 +18,7 @@ #include #include #include +#include #include #include diff --git a/src/test/fuzz/process_messages.cpp b/src/test/fuzz/process_messages.cpp index 744503bf55..6bac0336cf 100644 --- a/src/test/fuzz/process_messages.cpp +++ b/src/test/fuzz/process_messages.cpp @@ -13,6 +13,7 @@ #include #include #include +#include #include #include diff --git a/src/txorphanage.cpp b/src/txorphanage.cpp new file mode 100644 index 0000000000..6e224e93ad --- /dev/null +++ b/src/txorphanage.cpp @@ -0,0 +1,227 @@ +// Copyright (c) 2021 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include + +#include +#include +#include +#include + +#include + +/** Expiration time for orphan transactions in seconds */ +static constexpr int64_t ORPHAN_TX_EXPIRE_TIME = 20 * 60; +/** Minimum time between orphan transactions expire time checks in seconds */ +static constexpr int64_t ORPHAN_TX_EXPIRE_INTERVAL = 5 * 60; + +RecursiveMutex g_cs_orphans; + +bool TxOrphanage::AddTx(const CTransactionRef& tx, NodeId peer) +{ + AssertLockHeld(g_cs_orphans); + + const uint256& hash = tx->GetHash(); + if (m_orphans.count(hash)) + return false; + + // Ignore big transactions, to avoid a + // send-big-orphans memory exhaustion attack. If a peer has a legitimate + // large transaction with a missing parent then we assume + // it will rebroadcast it later, after the parent transaction(s) + // have been mined or received. + // 100 orphans, each of which is at most 99,999 bytes big is + // at most 10 megabytes of orphans and somewhat more byprev index (in the worst case): + unsigned int sz = GetSerializeSize(*tx, CTransaction::CURRENT_VERSION); + if (sz > MAX_STANDARD_TX_SIZE) + { + LogPrint(BCLog::MEMPOOL, "ignoring large orphan tx (size: %u, hash: %s)\n", sz, hash.ToString()); + return false; + } + + auto ret = m_orphans.emplace(hash, OrphanTx{tx, peer, GetTime() + ORPHAN_TX_EXPIRE_TIME, m_orphan_list.size(), sz}); + assert(ret.second); + m_orphan_list.push_back(ret.first); + for (const CTxIn& txin : tx->vin) { + m_outpoint_to_orphan_it[txin.prevout].insert(ret.first); + } + + m_orphan_tx_size += sz; + + LogPrint(BCLog::MEMPOOL, "stored orphan tx %s (mapsz %u outsz %u)\n", hash.ToString(), + m_orphans.size(), m_outpoint_to_orphan_it.size()); + statsClient.inc("transactions.orphans.add", 1.0f); + statsClient.gauge("transactions.orphans", m_orphans.size()); + + return true; +} + +int TxOrphanage::EraseTx(const uint256& txid) +{ + AssertLockHeld(g_cs_orphans); + std::map::iterator it = m_orphans.find(txid); + if (it == m_orphans.end()) + return 0; + for (const CTxIn& txin : it->second.tx->vin) + { + auto itPrev = m_outpoint_to_orphan_it.find(txin.prevout); + if (itPrev == m_outpoint_to_orphan_it.end()) + continue; + itPrev->second.erase(it); + if (itPrev->second.empty()) + m_outpoint_to_orphan_it.erase(itPrev); + } + + size_t old_pos = it->second.list_pos; + assert(m_orphan_list[old_pos] == it); + if (old_pos + 1 != m_orphan_list.size()) { + // Unless we're deleting the last entry in m_orphan_list, move the last + // entry to the position we're deleting. + auto it_last = m_orphan_list.back(); + m_orphan_list[old_pos] = it_last; + it_last->second.list_pos = old_pos; + } + m_orphan_list.pop_back(); + + assert(m_orphan_tx_size >= it->second.nTxSize); + m_orphan_tx_size -= it->second.nTxSize; + m_orphans.erase(it); + statsClient.inc("transactions.orphans.remove", 1.0f); + statsClient.gauge("transactions.orphans", m_orphans.size()); + return 1; +} + +void TxOrphanage::EraseForPeer(NodeId peer) +{ + AssertLockHeld(g_cs_orphans); + + int nErased = 0; + std::map::iterator iter = m_orphans.begin(); + while (iter != m_orphans.end()) + { + std::map::iterator maybeErase = iter++; // increment to avoid iterator becoming invalid + if (maybeErase->second.fromPeer == peer) + { + nErased += EraseTx(maybeErase->second.tx->GetHash()); + } + } + if (nErased > 0) LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx from peer=%d\n", nErased, peer); +} + +unsigned int TxOrphanage::LimitOrphans(unsigned int max_orphans_size) +{ + AssertLockHeld(g_cs_orphans); + + unsigned int nEvicted = 0; + static int64_t nNextSweep; + int64_t nNow = GetTime(); + if (nNextSweep <= nNow) { + // Sweep out expired orphan pool entries: + int nErased = 0; + int64_t nMinExpTime = nNow + ORPHAN_TX_EXPIRE_TIME - ORPHAN_TX_EXPIRE_INTERVAL; + std::map::iterator iter = m_orphans.begin(); + while (iter != m_orphans.end()) + { + std::map::iterator maybeErase = iter++; + if (maybeErase->second.nTimeExpire <= nNow) { + nErased += EraseTx(maybeErase->second.tx->GetHash()); + } else { + nMinExpTime = std::min(maybeErase->second.nTimeExpire, nMinExpTime); + } + } + // Sweep again 5 minutes after the next entry that expires in order to batch the linear scan. + nNextSweep = nMinExpTime + ORPHAN_TX_EXPIRE_INTERVAL; + if (nErased > 0) LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx due to expiration\n", nErased); + } + FastRandomContext rng; + while (!m_orphans.empty() && m_orphan_tx_size > max_orphans_size) + { + // Evict a random orphan: + size_t randompos = rng.randrange(m_orphan_list.size()); + EraseTx(m_orphan_list[randompos]->first); + ++nEvicted; + } + return nEvicted; +} + +void TxOrphanage::AddChildrenToWorkSet(const CTransaction& tx, std::set& orphan_work_set) const +{ + AssertLockHeld(g_cs_orphans); + for (unsigned int i = 0; i < tx.vout.size(); i++) { + const auto it_by_prev = m_outpoint_to_orphan_it.find(COutPoint(tx.GetHash(), i)); + if (it_by_prev != m_outpoint_to_orphan_it.end()) { + for (const auto& elem : it_by_prev->second) { + orphan_work_set.insert(elem->first); + } + } + } +} + +bool TxOrphanage::HaveTx(const uint256& txid) const +{ + LOCK(g_cs_orphans); + return m_orphans.count(txid); +} + +std::pair TxOrphanage::GetTx(const uint256& txid) const +{ + AssertLockHeld(g_cs_orphans); + + const auto it = m_orphans.find(txid); + if (it == m_orphans.end()) return {nullptr, -1}; + return {it->second.tx, it->second.fromPeer}; +} + +std::set TxOrphanage::GetCandidatesForBlock(const CBlock& block) +{ + AssertLockHeld(g_cs_orphans); + + std::set orphanWorkSet; + + for (const CTransactionRef& ptx : block.vtx) { + const CTransaction& tx = *ptx; + + // Which orphan pool entries we should reprocess and potentially try to accept into mempool again? + for (size_t i = 0; i < tx.vin.size(); i++) { + auto itByPrev = m_outpoint_to_orphan_it.find(COutPoint(tx.GetHash(), (uint32_t)i)); + if (itByPrev == m_outpoint_to_orphan_it.end()) continue; + for (const auto& elem : itByPrev->second) { + orphanWorkSet.insert(elem->first); + } + } + } + + return orphanWorkSet; +} + +void TxOrphanage::EraseForBlock(const CBlock& block) +{ + AssertLockHeld(g_cs_orphans); + + std::vector vOrphanErase; + + for (const CTransactionRef& ptx : block.vtx) { + const CTransaction& tx = *ptx; + + // Which orphan pool entries must we evict? + for (const auto& txin : tx.vin) { + auto itByPrev = m_outpoint_to_orphan_it.find(txin.prevout); + if (itByPrev == m_outpoint_to_orphan_it.end()) continue; + for (auto mi = itByPrev->second.begin(); mi != itByPrev->second.end(); ++mi) { + const CTransaction& orphanTx = *(*mi)->second.tx; + const uint256& orphanHash = orphanTx.GetHash(); + vOrphanErase.push_back(orphanHash); + } + } + } + + // Erase orphan transactions included or precluded by this block + if (vOrphanErase.size()) { + int nErased = 0; + for (const uint256& orphanHash : vOrphanErase) { + nErased += EraseTx(orphanHash); + } + LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx included or conflicted by block\n", nErased); + } +} diff --git a/src/txorphanage.h b/src/txorphanage.h new file mode 100644 index 0000000000..6fc043eda6 --- /dev/null +++ b/src/txorphanage.h @@ -0,0 +1,88 @@ +// Copyright (c) 2021 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#ifndef BITCOIN_TXORPHANAGE_H +#define BITCOIN_TXORPHANAGE_H + +#include +#include +#include +#include + +/** Guards orphan transactions and extra txs for compact blocks */ +extern RecursiveMutex g_cs_orphans; + +/** A class to track orphan transactions (failed on TX_MISSING_INPUTS) + * Since we cannot distinguish orphans from bad transactions with + * non-existent inputs, we heavily limit the number of orphans + * we keep and the duration we keep them for. + */ +class TxOrphanage { +public: + /** Add a new orphan transaction */ + bool AddTx(const CTransactionRef& tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans); + + /** Check if we already have an orphan transaction */ + bool HaveTx(const uint256& txid) const LOCKS_EXCLUDED(::g_cs_orphans); + + /** Get an orphan transaction and its orginating peer + * (Transaction ref will be nullptr if not found) + */ + std::pair GetTx(const uint256& txid) const EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans); + + /** Get a set of orphan transactions that can be candidates for reconsideration into the mempool */ + std::set GetCandidatesForBlock(const CBlock& block) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans); + + /** Erase an orphan by txid */ + int EraseTx(const uint256& txid) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans); + + /** Erase all orphans announced by a peer (eg, after that peer disconnects) */ + void EraseForPeer(NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans); + + /** Erase all orphans included in or invalidated by a new block */ + void EraseForBlock(const CBlock& block) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans); + + /** Limit the orphanage to the given maximum */ + unsigned int LimitOrphans(unsigned int max_orphans_size) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans); + + /** Add any orphans that list a particular tx as a parent into a peer's work set + * (ie orphans that may have found their final missing parent, and so should be reconsidered for the mempool) */ + void AddChildrenToWorkSet(const CTransaction& tx, std::set& orphan_work_set) const EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans); + +protected: + struct OrphanTx { + CTransactionRef tx; + NodeId fromPeer; + int64_t nTimeExpire; + size_t list_pos; + size_t nTxSize; + }; + + /** Map from txid to orphan transaction record. Limited by + * -maxorphantx/DEFAULT_MAX_ORPHAN_TRANSACTIONS */ + std::map m_orphans GUARDED_BY(g_cs_orphans); + + using OrphanMap = decltype(m_orphans); + + struct IteratorComparator + { + template + bool operator()(const I& a, const I& b) const + { + return &(*a) < &(*b); + } + }; + + /** Index from the parents' COutPoint into the m_orphans. Used + * to remove orphan transactions from the m_orphans */ + std::map> m_outpoint_to_orphan_it GUARDED_BY(g_cs_orphans); + + /** Orphan transactions in vector for quick random eviction */ + std::vector m_orphan_list GUARDED_BY(g_cs_orphans); + + /** Cumulative size of all transactions in the orphan map */ + size_t m_orphan_tx_size{0}; +}; + +#endif // BITCOIN_TXORPHANAGE_H diff --git a/test/functional/p2p_invalid_tx.py b/test/functional/p2p_invalid_tx.py index c0fd000630..3a4fd8919b 100755 --- a/test/functional/p2p_invalid_tx.py +++ b/test/functional/p2p_invalid_tx.py @@ -194,7 +194,7 @@ class InvalidTxRequestTest(BitcoinTestFramework): for j in range(110): orphan_tx_pool[i].vout.append(CTxOut(nValue=COIN // 10, scriptPubKey=SCRIPT_PUB_KEY_OP_TRUE)) - with node.assert_debug_log(['mapOrphan overflow, removed 1 tx']): + with node.assert_debug_log(['orphanage overflow, removed 1 tx']): node.p2ps[0].send_txs_and_test(orphan_tx_pool, node, success=False) rejected_parent = CTransaction() From 87205f26b59a5ffd3940a339780c6084e47affb0 Mon Sep 17 00:00:00 2001 From: Kittywhiskers Van Gogh <63189531+kwvg@users.noreply.github.com> Date: Wed, 4 Sep 2024 16:29:28 +0000 Subject: [PATCH 03/14] merge bitcoin#21327: ignore transactions while in IBD `p2p_ibd_txrelay.py` was introduced in bitcoin#19423 but not backported as Dash doesn't have feefilter capabilities but this backport has the test check for additional cases which are within Dash's capabilities, so the test has been committed in with the feefilter portions minimally stripped out --- src/net_processing.cpp | 5 ++ test/functional/p2p_ibd_txrelay.py | 78 +++++++++++++++++++++++++++ test/functional/test_framework/p2p.py | 2 + test/functional/test_runner.py | 1 + 4 files changed, 86 insertions(+) create mode 100755 test/functional/p2p_ibd_txrelay.py diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 8d6489d5c6..99f793881c 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -4053,6 +4053,11 @@ void PeerManagerImpl::ProcessMessage( } if (msg_type == NetMsgType::TX || msg_type == NetMsgType::DSTX) { + // Stop processing the transaction early if we are still in IBD since we don't + // have enough information to validate it yet. Sending unsolicited transactions + // is not considered a protocol violation, so don't punish the peer. + if (m_chainman.ActiveChainstate().IsInitialBlockDownload()) return; + CTransactionRef ptx; CCoinJoinBroadcastTx dstx; int nInvType = MSG_TX; diff --git a/test/functional/p2p_ibd_txrelay.py b/test/functional/p2p_ibd_txrelay.py new file mode 100755 index 0000000000..1af89d6a2f --- /dev/null +++ b/test/functional/p2p_ibd_txrelay.py @@ -0,0 +1,78 @@ +#!/usr/bin/env python3 +# Copyright (c) 2020 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test transaction relay behavior during IBD: +- Don't request transactions +- Ignore all transaction messages +""" + +from decimal import Decimal +import time + +from test_framework.messages import ( + CInv, + COIN, + CTransaction, + from_hex, + msg_inv, + msg_tx, + MSG_TX, +) +from test_framework.p2p import ( + NONPREF_PEER_TX_DELAY, + P2PDataStore, + P2PInterface, + p2p_lock +) +from test_framework.test_framework import BitcoinTestFramework + +NORMAL_FEE_FILTER = Decimal(100) / COIN + +class P2PIBDTxRelayTest(BitcoinTestFramework): + def set_test_params(self): + self.setup_clean_chain = True + self.disable_mocktime = True + self.num_nodes = 2 + self.extra_args = [ + ["-minrelaytxfee={}".format(NORMAL_FEE_FILTER)], + ["-minrelaytxfee={}".format(NORMAL_FEE_FILTER)], + ] + + def run_test(self): + self.log.info("Check that nodes don't send getdatas for transactions while still in IBD") + peer_inver = self.nodes[0].add_p2p_connection(P2PDataStore()) + txid = 0xdeadbeef + peer_inver.send_and_ping(msg_inv([CInv(t=MSG_TX, h=txid)])) + # The node should not send a getdata, but if it did, it would first delay 2 seconds + self.nodes[0].setmocktime(int(time.time() + NONPREF_PEER_TX_DELAY)) + peer_inver.sync_send_with_ping() + with p2p_lock: + assert txid not in peer_inver.getdata_requests + self.nodes[0].disconnect_p2ps() + + self.log.info("Check that nodes don't process unsolicited transactions while still in IBD") + # A transaction hex pulled from tx_valid.json. There are no valid transactions since no UTXOs + # exist yet, but it should be a well-formed transaction. + rawhex = "0100000001b14bdcbc3e01bdaad36cc08e81e69c82e1060bc14e518db2b49aa43ad90ba260000000004a01ff473" + \ + "04402203f16c6f40162ab686621ef3000b04e75418a0c0cb2d8aebeac894ae360ac1e780220ddc15ecdfc3507ac48e168" + \ + "1a33eb60996631bf6bf5bc0a0682c4db743ce7ca2b01ffffffff0140420f00000000001976a914660d4ef3a743e3e696a" + \ + "d990364e555c271ad504b88ac00000000" + assert self.nodes[1].decoderawtransaction(rawhex) # returns a dict, should not throw + tx = from_hex(CTransaction(), rawhex) + peer_txer = self.nodes[0].add_p2p_connection(P2PInterface()) + with self.nodes[0].assert_debug_log(expected_msgs=["received: tx"], unexpected_msgs=["was not accepted"]): + peer_txer.send_and_ping(msg_tx(tx)) + self.nodes[0].disconnect_p2ps() + + # Come out of IBD by generating a block + self.nodes[0].generate(1) + self.sync_all() + + self.log.info("Check that nodes process the same transaction, even when unsolicited, when no longer in IBD") + peer_txer = self.nodes[0].add_p2p_connection(P2PInterface()) + with self.nodes[0].assert_debug_log(expected_msgs=["was not accepted"]): + peer_txer.send_and_ping(msg_tx(tx)) + +if __name__ == '__main__': + P2PIBDTxRelayTest().main() diff --git a/test/functional/test_framework/p2p.py b/test/functional/test_framework/p2p.py index 4e88e5268a..1c9e9a2958 100755 --- a/test/functional/test_framework/p2p.py +++ b/test/functional/test_framework/p2p.py @@ -99,6 +99,8 @@ P2P_SERVICES = NODE_NETWORK | NODE_HEADERS_COMPRESSED P2P_SUBVERSION = "/python-p2p-tester:0.0.3%s/" # Value for relay that this test framework sends in its `version` message P2P_VERSION_RELAY = 1 +# Delay after receiving a tx inv before requesting transactions from non-preferred peers, in seconds +NONPREF_PEER_TX_DELAY = 2 MESSAGEMAP = { b"addr": msg_addr, diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py index 80bd7abfb7..4ecef85f37 100755 --- a/test/functional/test_runner.py +++ b/test/functional/test_runner.py @@ -306,6 +306,7 @@ BASE_SCRIPTS = [ 'rpc_estimatefee.py', 'p2p_unrequested_blocks.py', # NOTE: needs dash_hash to pass 'feature_shutdown.py', + 'p2p_ibd_txrelay.py', 'rpc_coinjoin.py', 'rpc_masternode.py', 'rpc_mnauth.py', From 6d690ede82d71381c6ac3dcb738824ba372b8552 Mon Sep 17 00:00:00 2001 From: Kittywhiskers Van Gogh <63189531+kwvg@users.noreply.github.com> Date: Tue, 4 Jan 2022 11:10:09 +0100 Subject: [PATCH 04/14] merge bitcoin#23970: Remove pointless and confusing shift in RelayAddress --- src/net_processing.cpp | 4 ++-- test/sanitizer_suppressions/ubsan | 1 - 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 99f793881c..85a9775333 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -2228,8 +2228,8 @@ void PeerManagerImpl::RelayAddress(NodeId originator, // Relay to a limited number of other nodes // Use deterministic randomness to send to the same nodes for 24 hours // at a time so the m_addr_knowns of the chosen nodes prevent repeats - uint64_t hashAddr = addr.GetHash(); - const CSipHasher hasher = m_connman.GetDeterministicRandomizer(RANDOMIZER_ID_ADDRESS_RELAY).Write(hashAddr << 32).Write((GetTime() + hashAddr) / (24 * 60 * 60)); + const uint64_t hashAddr{addr.GetHash()}; + const CSipHasher hasher{m_connman.GetDeterministicRandomizer(RANDOMIZER_ID_ADDRESS_RELAY).Write(hashAddr).Write((GetTime() + hashAddr) / (24 * 60 * 60))}; FastRandomContext insecure_rand; // Relay reachable addresses to 2 peers. Unreachable addresses are relayed randomly to 1 or 2 peers. diff --git a/test/sanitizer_suppressions/ubsan b/test/sanitizer_suppressions/ubsan index 1fd1b38819..e1a49e9544 100644 --- a/test/sanitizer_suppressions/ubsan +++ b/test/sanitizer_suppressions/ubsan @@ -101,7 +101,6 @@ shift-base:arith_uint256.cpp shift-base:crypto/ shift-base:hash.cpp shift-base:leveldb/ -shift-base:net_processing.cpp shift-base:streams.h shift-base:test/fuzz/crypto_diff_fuzz_chacha20.cpp shift-base:util/bip32.cpp From 112c4e0a16c9781ac23f4ee86b105ce6daa11a9f Mon Sep 17 00:00:00 2001 From: Kittywhiskers Van Gogh <63189531+kwvg@users.noreply.github.com> Date: Sun, 4 Aug 2024 13:45:01 +0000 Subject: [PATCH 05/14] merge bitcoin#24021: Rename and move PoissonNextSend functions --- src/net.cpp | 27 +++++---------------------- src/net.h | 11 ----------- src/net_processing.cpp | 33 ++++++++++++++++++++++++++++----- src/random.cpp | 7 +++++++ src/random.h | 12 ++++++++++++ src/test/fuzz/connman.cpp | 6 ------ 6 files changed, 52 insertions(+), 44 deletions(-) diff --git a/src/net.cpp b/src/net.cpp index 60d3d551ad..fc97d4c42e 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -2497,8 +2497,8 @@ void CConnman::ThreadOpenConnections(const std::vector connect, CDe auto start = GetTime(); // Minimum time before next feeler connection (in microseconds). - auto next_feeler = PoissonNextSend(start, FEELER_INTERVAL); - auto next_extra_block_relay = PoissonNextSend(start, EXTRA_BLOCK_RELAY_ONLY_PEER_INTERVAL); + auto next_feeler = GetExponentialRand(start, FEELER_INTERVAL); + auto next_extra_block_relay = GetExponentialRand(start, EXTRA_BLOCK_RELAY_ONLY_PEER_INTERVAL); const bool dnsseed = gArgs.GetBoolArg("-dnsseed", DEFAULT_DNSSEED); bool add_fixed_seeds = gArgs.GetBoolArg("-fixedseeds", DEFAULT_FIXEDSEEDS); @@ -2632,7 +2632,7 @@ void CConnman::ThreadOpenConnections(const std::vector connect, CDe // // This is similar to the logic for trying extra outbound (full-relay) // peers, except: - // - we do this all the time on a poisson timer, rather than just when + // - we do this all the time on an exponential timer, rather than just when // our tip is stale // - we potentially disconnect our next-youngest block-relay-only peer, if our // newest block-relay-only peer delivers a block more recently. @@ -2641,10 +2641,10 @@ void CConnman::ThreadOpenConnections(const std::vector connect, CDe // Because we can promote these connections to block-relay-only // connections, they do not get their own ConnectionType enum // (similar to how we deal with extra outbound peers). - next_extra_block_relay = PoissonNextSend(now, EXTRA_BLOCK_RELAY_ONLY_PEER_INTERVAL); + next_extra_block_relay = GetExponentialRand(now, EXTRA_BLOCK_RELAY_ONLY_PEER_INTERVAL); conn_type = ConnectionType::BLOCK_RELAY; } else if (now > next_feeler) { - next_feeler = PoissonNextSend(now, FEELER_INTERVAL); + next_feeler = GetExponentialRand(now, FEELER_INTERVAL); conn_type = ConnectionType::FEELER; fFeeler = true; } else if (nOutboundOnionRelay < m_max_outbound_onion && IsReachable(Network::NET_ONION)) { @@ -4234,23 +4234,6 @@ bool CConnman::IsMasternodeOrDisconnectRequested(const CService& addr) { }); } -std::chrono::microseconds CConnman::PoissonNextSendInbound(std::chrono::microseconds now, std::chrono::seconds average_interval) -{ - if (m_next_send_inv_to_incoming.load() < now) { - // If this function were called from multiple threads simultaneously - // it would possible that both update the next send variable, and return a different result to their caller. - // This is not possible in practice as only the net processing thread invokes this function. - m_next_send_inv_to_incoming = PoissonNextSend(now, average_interval); - } - return m_next_send_inv_to_incoming; -} - -std::chrono::microseconds PoissonNextSend(std::chrono::microseconds now, std::chrono::seconds average_interval) -{ - double unscaled = -log1p(GetRand(1ULL << 48) * -0.0000000000000035527136788 /* -1/2^48 */); - return now + std::chrono::duration_cast(unscaled * average_interval + 0.5us); -} - CConnman::NodesSnapshot::NodesSnapshot(const CConnman& connman, std::function filter, bool shuffle) { diff --git a/src/net.h b/src/net.h index e51d1a1e30..77aa23fd40 100644 --- a/src/net.h +++ b/src/net.h @@ -1205,12 +1205,6 @@ public: void WakeMessageHandler() EXCLUSIVE_LOCKS_REQUIRED(!mutexMsgProc); - /** Attempts to obfuscate tx time through exponentially distributed emitting. - Works assuming that a single interval is used. - Variable intervals will result in privacy decrease. - */ - std::chrono::microseconds PoissonNextSendInbound(std::chrono::microseconds now, std::chrono::seconds average_interval); - /** Return true if we should disconnect the peer for failing an inactivity check. */ bool ShouldRunInactivityChecks(const CNode& node, std::chrono::seconds now) const; @@ -1584,8 +1578,6 @@ private: */ std::atomic_bool m_start_extra_block_relay_peers{false}; - std::atomic m_next_send_inv_to_incoming{0us}; - /** * A vector of -bind=
:=onion arguments each of which is * an address and port that are designated for incoming Tor connections. @@ -1616,9 +1608,6 @@ private: friend struct ConnmanTestMsg; }; -/** Return a timestamp in the future (in microseconds) for exponentially distributed events. */ -std::chrono::microseconds PoissonNextSend(std::chrono::microseconds now, std::chrono::seconds average_interval); - /** Dump binary message to file, with timestamp */ void CaptureMessageToFile(const CAddress& addr, const std::string& msg_type, diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 85a9775333..3129c30b85 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -565,6 +565,8 @@ private: */ std::map m_peer_map GUARDED_BY(m_peer_mutex); + std::atomic m_next_inv_to_inbounds{0us}; + /** Check whether the last unknown block a peer advertised is not yet known. */ void ProcessBlockAvailability(NodeId nodeid) EXCLUSIVE_LOCKS_REQUIRED(cs_main); /** Update tracking information about which blocks a peer is assumed to have. */ @@ -703,6 +705,15 @@ private: Mutex m_recent_confirmed_transactions_mutex; CRollingBloomFilter m_recent_confirmed_transactions GUARDED_BY(m_recent_confirmed_transactions_mutex){48'000, 0.000'001}; + /** + * For sending `inv`s to inbound peers, we use a single (exponentially + * distributed) timer for all peers. If we used a separate timer for each + * peer, a spy node could make multiple inbound connections to us to + * accurately determine when we received the transaction (and potentially + * determine the transaction's origin). */ + std::chrono::microseconds NextInvToInbounds(std::chrono::microseconds now, + std::chrono::seconds average_interval); + /* Returns a bool indicating whether we requested this block. * Also used if a block was /not/ received and timed out or started with another peer */ @@ -1032,6 +1043,18 @@ static void UpdatePreferredDownload(const CNode& node, const Peer& peer, CNodeSt nPreferredDownload += state->fPreferredDownload; } +std::chrono::microseconds PeerManagerImpl::NextInvToInbounds(std::chrono::microseconds now, + std::chrono::seconds average_interval) +{ + if (m_next_inv_to_inbounds.load() < now) { + // If this function were called from multiple threads simultaneously + // it would possible that both update the next send variable, and return a different result to their caller. + // This is not possible in practice as only the net processing thread invokes this function. + m_next_inv_to_inbounds = GetExponentialRand(now, average_interval); + } + return m_next_inv_to_inbounds; +} + bool PeerManagerImpl::MarkBlockAsReceived(const uint256& hash) { std::map::iterator> >::iterator itInFlight = mapBlocksInFlight.find(hash); @@ -5279,13 +5302,13 @@ void PeerManagerImpl::MaybeSendAddr(CNode& node, Peer& peer, std::chrono::micros FastRandomContext insecure_rand; PushAddress(peer, local_addr, insecure_rand); } - peer.m_next_local_addr_send = PoissonNextSend(current_time, AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL); + peer.m_next_local_addr_send = GetExponentialRand(current_time, AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL); } // We sent an `addr` message to this peer recently. Nothing more to do. if (current_time <= peer.m_next_addr_send) return; - peer.m_next_addr_send = PoissonNextSend(current_time, AVG_ADDRESS_BROADCAST_INTERVAL); + peer.m_next_addr_send = GetExponentialRand(current_time, AVG_ADDRESS_BROADCAST_INTERVAL); if (!Assume(peer.m_addrs_to_send.size() <= MAX_ADDR_TO_SEND)) { // Should be impossible since we always check size before adding to @@ -5660,12 +5683,12 @@ bool PeerManagerImpl::SendMessages(CNode* pto) if (peer->m_tx_relay->m_next_inv_send_time < current_time) { fSendTrickle = true; if (pto->IsInboundConn()) { - peer->m_tx_relay->m_next_inv_send_time = m_connman.PoissonNextSendInbound(current_time, INBOUND_INVENTORY_BROADCAST_INTERVAL); + peer->m_tx_relay->m_next_inv_send_time = NextInvToInbounds(current_time, INBOUND_INVENTORY_BROADCAST_INTERVAL); } else { // Use half the delay for Masternode outbound peers, as there is less privacy concern for them. peer->m_tx_relay->m_next_inv_send_time = pto->GetVerifiedProRegTxHash().IsNull() ? - PoissonNextSend(current_time, OUTBOUND_INVENTORY_BROADCAST_INTERVAL) : - PoissonNextSend(current_time, OUTBOUND_INVENTORY_BROADCAST_INTERVAL / 2); + GetExponentialRand(current_time, OUTBOUND_INVENTORY_BROADCAST_INTERVAL) : + GetExponentialRand(current_time, OUTBOUND_INVENTORY_BROADCAST_INTERVAL / 2); } } diff --git a/src/random.cpp b/src/random.cpp index 614ddeb11c..3ccbe82c4a 100644 --- a/src/random.cpp +++ b/src/random.cpp @@ -22,6 +22,7 @@ #include // for GetTimeMicros() #include +#include #include #include @@ -724,3 +725,9 @@ void RandomInit() ReportHardwareRand(); } + +std::chrono::microseconds GetExponentialRand(std::chrono::microseconds now, std::chrono::seconds average_interval) +{ + double unscaled = -std::log1p(GetRand(uint64_t{1} << 48) * -0.0000000000000035527136788 /* -1/2^48 */); + return now + std::chrono::duration_cast(unscaled * average_interval + 0.5us); +} diff --git a/src/random.h b/src/random.h index d461318e6c..71cdbf4a11 100644 --- a/src/random.h +++ b/src/random.h @@ -85,6 +85,18 @@ D GetRandomDuration(typename std::common_type::type max) noexcept }; constexpr auto GetRandMicros = GetRandomDuration; constexpr auto GetRandMillis = GetRandomDuration; + +/** + * Return a timestamp in the future sampled from an exponential distribution + * (https://en.wikipedia.org/wiki/Exponential_distribution). This distribution + * is memoryless and should be used for repeated network events (e.g. sending a + * certain type of message) to minimize leaking information to observers. + * + * The probability of an event occuring before time x is 1 - e^-(x/a) where a + * is the average interval between events. + * */ +std::chrono::microseconds GetExponentialRand(std::chrono::microseconds now, std::chrono::seconds average_interval); + int GetRandInt(int nMax) noexcept; uint256 GetRandHash() noexcept; diff --git a/src/test/fuzz/connman.cpp b/src/test/fuzz/connman.cpp index aa2056c451..dcbf31809a 100644 --- a/src/test/fuzz/connman.cpp +++ b/src/test/fuzz/connman.cpp @@ -91,12 +91,6 @@ FUZZ_TARGET_INIT(connman, initialize_connman) [&] { (void)connman.OutboundTargetReached(fuzzed_data_provider.ConsumeBool()); }, - [&] { - // Limit now to int32_t to avoid signed integer overflow - (void)connman.PoissonNextSendInbound( - std::chrono::microseconds{fuzzed_data_provider.ConsumeIntegral()}, - std::chrono::seconds{fuzzed_data_provider.ConsumeIntegral()}); - }, [&] { CSerializedNetMsg serialized_net_msg; serialized_net_msg.m_type = fuzzed_data_provider.ConsumeRandomLengthString(CMessageHeader::COMMAND_SIZE); From dbe41ea1417ef832e4df2bebc2ece70fa4b91060 Mon Sep 17 00:00:00 2001 From: Kittywhiskers Van Gogh <63189531+kwvg@users.noreply.github.com> Date: Sun, 4 Aug 2024 14:31:03 +0000 Subject: [PATCH 06/14] refactor: move object request logic to `PeerManagerImpl` Preparation for backporting bitcoin#24543, which makes `State()` internal to `PeerManagerImpl`. --- src/governance/governance.cpp | 19 ++++------- src/governance/governance.h | 4 +-- src/init.cpp | 2 +- src/llmq/blockprocessor.cpp | 2 +- src/llmq/chainlocks.cpp | 3 +- src/llmq/dkgsessionhandler.cpp | 3 +- src/llmq/instantsend.cpp | 6 ++-- src/llmq/instantsend.h | 2 +- src/llmq/signing.cpp | 5 +-- src/masternode/sync.cpp | 10 +++--- src/masternode/sync.h | 5 +-- src/net.h | 4 --- src/net_processing.cpp | 58 +++++++++++++++------------------- src/net_processing.h | 5 +++ src/spork.cpp | 8 ++--- 15 files changed, 58 insertions(+), 78 deletions(-) diff --git a/src/governance/governance.cpp b/src/governance/governance.cpp index 89336190ed..dc88ac524e 100644 --- a/src/governance/governance.cpp +++ b/src/governance/governance.cpp @@ -157,10 +157,7 @@ PeerMsgRet CGovernanceManager::ProcessMessage(CNode& peer, CConnman& connman, Pe uint256 nHash = govobj.GetHash(); - { - LOCK(cs_main); - EraseObjectRequest(peer.GetId(), CInv(MSG_GOVERNANCE_OBJECT, nHash)); - } + WITH_LOCK(::cs_main, peerman.EraseObjectRequest(peer.GetId(), CInv(MSG_GOVERNANCE_OBJECT, nHash))); if (!m_mn_sync->IsBlockchainSynced()) { LogPrint(BCLog::GOBJECT, "MNGOVERNANCEOBJECT -- masternode list not synced\n"); @@ -223,11 +220,7 @@ PeerMsgRet CGovernanceManager::ProcessMessage(CNode& peer, CConnman& connman, Pe vRecv >> vote; uint256 nHash = vote.GetHash(); - - { - LOCK(cs_main); - EraseObjectRequest(peer.GetId(), CInv(MSG_GOVERNANCE_OBJECT_VOTE, nHash)); - } + WITH_LOCK(::cs_main, peerman.EraseObjectRequest(peer.GetId(), CInv(MSG_GOVERNANCE_OBJECT_VOTE, nHash))); // Ignore such messages until masternode list is synced if (!m_mn_sync->IsBlockchainSynced()) { @@ -1222,13 +1215,13 @@ void CGovernanceManager::RequestGovernanceObject(CNode* pfrom, const uint256& nH connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::MNGOVERNANCESYNC, nHash, filter)); } -int CGovernanceManager::RequestGovernanceObjectVotes(CNode& peer, CConnman& connman) const +int CGovernanceManager::RequestGovernanceObjectVotes(CNode& peer, CConnman& connman, const PeerManager& peerman) const { const std::vector vNodeCopy{&peer}; - return RequestGovernanceObjectVotes(vNodeCopy, connman); + return RequestGovernanceObjectVotes(vNodeCopy, connman, peerman); } -int CGovernanceManager::RequestGovernanceObjectVotes(const std::vector& vNodesCopy, CConnman& connman) const +int CGovernanceManager::RequestGovernanceObjectVotes(const std::vector& vNodesCopy, CConnman& connman, const PeerManager& peerman) const { static std::map > mapAskedRecently; @@ -1304,7 +1297,7 @@ int CGovernanceManager::RequestGovernanceObjectVotes(const std::vector& // stop early to prevent setAskFor overflow { LOCK(cs_main); - size_t nProjectedSize = GetRequestedObjectCount(pnode->GetId()) + nProjectedVotes; + size_t nProjectedSize = peerman.GetRequestedObjectCount(pnode->GetId()) + nProjectedVotes; if (nProjectedSize > MAX_INV_SZ) continue; // to early to ask the same node if (mapAskedRecently[nHashGovobj].count(pnode->addr)) continue; diff --git a/src/governance/governance.h b/src/governance/governance.h index 9e38f411fa..e8b0a075dd 100644 --- a/src/governance/governance.h +++ b/src/governance/governance.h @@ -357,8 +357,8 @@ public: void InitOnLoad(); - int RequestGovernanceObjectVotes(CNode& peer, CConnman& connman) const; - int RequestGovernanceObjectVotes(const std::vector& vNodesCopy, CConnman& connman) const; + int RequestGovernanceObjectVotes(CNode& peer, CConnman& connman, const PeerManager& peerman) const; + int RequestGovernanceObjectVotes(const std::vector& vNodesCopy, CConnman& connman, const PeerManager& peerman) const; /* * Trigger Management (formerly CGovernanceTriggerManager) diff --git a/src/init.cpp b/src/init.cpp index 2b9f2bb31f..144d258b94 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -2220,7 +2220,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) // ********************************************************* Step 10a: schedule Dash-specific tasks node.scheduler->scheduleEvery(std::bind(&CNetFulfilledRequestManager::DoMaintenance, std::ref(*node.netfulfilledman)), std::chrono::minutes{1}); - node.scheduler->scheduleEvery(std::bind(&CMasternodeSync::DoMaintenance, std::ref(*node.mn_sync)), std::chrono::seconds{1}); + node.scheduler->scheduleEvery(std::bind(&CMasternodeSync::DoMaintenance, std::ref(*node.mn_sync), std::cref(*node.peerman)), std::chrono::seconds{1}); node.scheduler->scheduleEvery(std::bind(&CMasternodeUtils::DoMaintenance, std::ref(*node.connman), std::ref(*node.dmnman), std::ref(*node.mn_sync), std::ref(*node.cj_ctx)), std::chrono::minutes{1}); node.scheduler->scheduleEvery(std::bind(&CDeterministicMNManager::DoMaintenance, std::ref(*node.dmnman)), std::chrono::seconds{10}); diff --git a/src/llmq/blockprocessor.cpp b/src/llmq/blockprocessor.cpp index 98c827ef6c..6030c9330e 100644 --- a/src/llmq/blockprocessor.cpp +++ b/src/llmq/blockprocessor.cpp @@ -60,7 +60,7 @@ PeerMsgRet CQuorumBlockProcessor::ProcessMessage(const CNode& peer, std::string_ CFinalCommitment qc; vRecv >> qc; - WITH_LOCK(cs_main, EraseObjectRequest(peer.GetId(), CInv(MSG_QUORUM_FINAL_COMMITMENT, ::SerializeHash(qc)))); + WITH_LOCK(::cs_main, Assert(m_peerman)->EraseObjectRequest(peer.GetId(), CInv(MSG_QUORUM_FINAL_COMMITMENT, ::SerializeHash(qc)))); if (qc.IsNull()) { LogPrint(BCLog::LLMQ, "CQuorumBlockProcessor::%s -- null commitment from peer=%d\n", __func__, peer.GetId()); diff --git a/src/llmq/chainlocks.cpp b/src/llmq/chainlocks.cpp index 39130b5a07..cfd2938935 100644 --- a/src/llmq/chainlocks.cpp +++ b/src/llmq/chainlocks.cpp @@ -115,8 +115,7 @@ PeerMsgRet CChainLocksHandler::ProcessNewChainLock(const NodeId from, const llmq CInv clsigInv(MSG_CLSIG, hash); if (from != -1) { - LOCK(cs_main); - EraseObjectRequest(from, clsigInv); + WITH_LOCK(::cs_main, Assert(m_peerman)->EraseObjectRequest(from, clsigInv)); } { diff --git a/src/llmq/dkgsessionhandler.cpp b/src/llmq/dkgsessionhandler.cpp index e6f301cfa0..7b4b8c4ab2 100644 --- a/src/llmq/dkgsessionhandler.cpp +++ b/src/llmq/dkgsessionhandler.cpp @@ -72,8 +72,7 @@ void CDKGPendingMessages::PushPendingMessage(NodeId from, PeerManager* peerman, uint256 hash = hw.GetHash(); if (from != -1) { - LOCK(cs_main); - EraseObjectRequest(from, CInv(invType, hash)); + WITH_LOCK(::cs_main, Assert(m_peerman.load())->EraseObjectRequest(from, CInv(invType, hash))); } LOCK(cs_messages); diff --git a/src/llmq/instantsend.cpp b/src/llmq/instantsend.cpp index 037dd2f1f6..75f016a194 100644 --- a/src/llmq/instantsend.cpp +++ b/src/llmq/instantsend.cpp @@ -762,7 +762,7 @@ PeerMsgRet CInstantSendManager::ProcessMessageInstantSendLock(const CNode& pfrom { auto hash = ::SerializeHash(*islock); - WITH_LOCK(cs_main, EraseObjectRequest(pfrom.GetId(), CInv(MSG_ISDLOCK, hash))); + WITH_LOCK(::cs_main, Assert(m_peerman)->EraseObjectRequest(pfrom.GetId(), CInv(MSG_ISDLOCK, hash))); if (!islock->TriviallyValid()) { return tl::unexpected{100}; @@ -1446,7 +1446,7 @@ void CInstantSendManager::RemoveConflictingLock(const uint256& islockHash, const } } -void CInstantSendManager::AskNodesForLockedTx(const uint256& txid, const CConnman& connman, const PeerManager& peerman, bool is_masternode) +void CInstantSendManager::AskNodesForLockedTx(const uint256& txid, const CConnman& connman, PeerManager& peerman, bool is_masternode) { std::vector nodesToAskFor; nodesToAskFor.reserve(4); @@ -1476,7 +1476,7 @@ void CInstantSendManager::AskNodesForLockedTx(const uint256& txid, const CConnma txid.ToString(), pnode->GetId()); CInv inv(MSG_TX, txid); - RequestObject(pnode->GetId(), inv, GetTime(), is_masternode, /* fForce = */ true); + peerman.RequestObject(pnode->GetId(), inv, GetTime(), is_masternode, /* fForce = */ true); } } for (CNode* pnode : nodesToAskFor) { diff --git a/src/llmq/instantsend.h b/src/llmq/instantsend.h index 733d2afd7a..a823354f4e 100644 --- a/src/llmq/instantsend.h +++ b/src/llmq/instantsend.h @@ -315,7 +315,7 @@ private: EXCLUSIVE_LOCKS_REQUIRED(!cs_inputReqests, !cs_nonLocked, !cs_pendingRetry); void ResolveBlockConflicts(const uint256& islockHash, const CInstantSendLock& islock) EXCLUSIVE_LOCKS_REQUIRED(!cs_inputReqests, !cs_nonLocked, !cs_pendingLocks, !cs_pendingRetry); - static void AskNodesForLockedTx(const uint256& txid, const CConnman& connman, const PeerManager& peerman, + static void AskNodesForLockedTx(const uint256& txid, const CConnman& connman, PeerManager& peerman, bool is_masternode); void ProcessPendingRetryLockTxs() EXCLUSIVE_LOCKS_REQUIRED(!cs_creating, !cs_inputReqests, !cs_nonLocked, !cs_pendingRetry); diff --git a/src/llmq/signing.cpp b/src/llmq/signing.cpp index 8200146d32..b866cfc1e3 100644 --- a/src/llmq/signing.cpp +++ b/src/llmq/signing.cpp @@ -604,10 +604,7 @@ static bool PreVerifyRecoveredSig(const CQuorumManager& quorum_manager, const CR PeerMsgRet CSigningManager::ProcessMessageRecoveredSig(const CNode& pfrom, const std::shared_ptr& recoveredSig) { - { - LOCK(cs_main); - EraseObjectRequest(pfrom.GetId(), CInv(MSG_QUORUM_RECOVERED_SIG, recoveredSig->GetHash())); - } + WITH_LOCK(::cs_main, Assert(m_peerman)->EraseObjectRequest(pfrom.GetId(), CInv(MSG_QUORUM_RECOVERED_SIG, recoveredSig->GetHash()))); bool ban = false; if (!PreVerifyRecoveredSig(qman, *recoveredSig, ban)) { diff --git a/src/masternode/sync.cpp b/src/masternode/sync.cpp index 9e3f53a0c2..b4fc5440bf 100644 --- a/src/masternode/sync.cpp +++ b/src/masternode/sync.cpp @@ -115,7 +115,7 @@ void CMasternodeSync::ProcessMessage(const CNode& peer, std::string_view msg_typ LogPrint(BCLog::MNSYNC, "SYNCSTATUSCOUNT -- got inventory count: nItemID=%d nCount=%d peer=%d\n", nItemID, nCount, peer.GetId()); } -void CMasternodeSync::ProcessTick() +void CMasternodeSync::ProcessTick(const PeerManager& peerman) { assert(m_netfulfilledman.IsValid()); @@ -144,7 +144,7 @@ void CMasternodeSync::ProcessTick() // gradually request the rest of the votes after sync finished if(IsSynced()) { - m_govman.RequestGovernanceObjectVotes(snap.Nodes(), connman); + m_govman.RequestGovernanceObjectVotes(snap.Nodes(), connman, peerman); return; } @@ -264,7 +264,7 @@ void CMasternodeSync::ProcessTick() if(!m_netfulfilledman.HasFulfilledRequest(pnode->addr, "governance-sync")) { continue; // to early for this node } - int nObjsLeftToAsk = m_govman.RequestGovernanceObjectVotes(*pnode, connman); + int nObjsLeftToAsk = m_govman.RequestGovernanceObjectVotes(*pnode, connman, peerman); // check for data if(nObjsLeftToAsk == 0) { static int64_t nTimeNoObjectsLeft = 0; @@ -368,9 +368,9 @@ void CMasternodeSync::UpdatedBlockTip(const CBlockIndex *pindexTip, const CBlock pindexNew->nHeight, pindexTip->nHeight, fInitialDownload, fReachedBestHeader); } -void CMasternodeSync::DoMaintenance() +void CMasternodeSync::DoMaintenance(const PeerManager& peerman) { if (ShutdownRequested()) return; - ProcessTick(); + ProcessTick(peerman); } diff --git a/src/masternode/sync.h b/src/masternode/sync.h index 2692cd1736..d61ef070ba 100644 --- a/src/masternode/sync.h +++ b/src/masternode/sync.h @@ -15,6 +15,7 @@ class CGovernanceManager; class CMasternodeSync; class CNetFulfilledRequestManager; class CNode; +class PeerManager; static constexpr int MASTERNODE_SYNC_BLOCKCHAIN = 1; static constexpr int MASTERNODE_SYNC_GOVERNANCE = 4; @@ -71,13 +72,13 @@ public: void SwitchToNextAsset(); void ProcessMessage(const CNode& peer, std::string_view msg_type, CDataStream& vRecv) const; - void ProcessTick(); + void ProcessTick(const PeerManager& peerman); void AcceptedBlockHeader(const CBlockIndex *pindexNew); void NotifyHeaderTip(const CBlockIndex *pindexNew, bool fInitialDownload); void UpdatedBlockTip(const CBlockIndex *pindexTip, const CBlockIndex *pindexNew, bool fInitialDownload); - void DoMaintenance(); + void DoMaintenance(const PeerManager& peerman); }; #endif // BITCOIN_MASTERNODE_SYNC_H diff --git a/src/net.h b/src/net.h index 77aa23fd40..71ff10861c 100644 --- a/src/net.h +++ b/src/net.h @@ -1654,10 +1654,6 @@ public: extern RecursiveMutex cs_main; -void EraseObjectRequest(NodeId nodeId, const CInv& inv) EXCLUSIVE_LOCKS_REQUIRED(cs_main); -void RequestObject(NodeId nodeId, const CInv& inv, std::chrono::microseconds current_time, bool is_masternode, bool fForce=false) EXCLUSIVE_LOCKS_REQUIRED(cs_main); -size_t GetRequestedObjectCount(NodeId nodeId) EXCLUSIVE_LOCKS_REQUIRED(cs_main); - /** Protect desirable or disadvantaged inbound peers from eviction by ratio. * * This function protects half of the peers which have been connected the diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 3129c30b85..78dbe58a01 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -421,6 +421,10 @@ public: const std::chrono::microseconds time_received, const std::atomic& interruptMsgProc) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_recent_confirmed_transactions_mutex); bool IsBanned(NodeId pnode) override EXCLUSIVE_LOCKS_REQUIRED(cs_main, !m_peer_mutex); + void EraseObjectRequest(NodeId nodeid, const CInv& inv) override EXCLUSIVE_LOCKS_REQUIRED(::cs_main); + void RequestObject(NodeId nodeid, const CInv& inv, std::chrono::microseconds current_time, + bool is_masternode, bool fForce = false) override EXCLUSIVE_LOCKS_REQUIRED(::cs_main); + size_t GetRequestedObjectCount(NodeId nodeid) const override EXCLUSIVE_LOCKS_REQUIRED(::cs_main); bool IsInvInFilter(NodeId nodeid, const uint256& hash) const override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); private: @@ -1350,27 +1354,20 @@ void PeerManagerImpl::PushNodeVersion(CNode& pnode, const Peer& peer) } } -void EraseObjectRequest(CNodeState* nodestate, const CInv& inv) EXCLUSIVE_LOCKS_REQUIRED(cs_main) +void PeerManagerImpl::EraseObjectRequest(NodeId nodeid, const CInv& inv) { AssertLockHeld(cs_main); + + CNodeState* state = State(nodeid); + if (state == nullptr) + return; + LogPrint(BCLog::NET, "%s -- inv=(%s)\n", __func__, inv.ToString()); g_already_asked_for.erase(inv.hash); g_erased_object_requests.insert(std::make_pair(inv.hash, GetTime())); - if (nodestate) { - nodestate->m_object_download.m_object_announced.erase(inv); - nodestate->m_object_download.m_object_in_flight.erase(inv); - } -} - -void EraseObjectRequest(NodeId nodeId, const CInv& inv) EXCLUSIVE_LOCKS_REQUIRED(cs_main) -{ - AssertLockHeld(cs_main); - auto* state = State(nodeId); - if (!state) { - return; - } - EraseObjectRequest(state, inv); + state->m_object_download.m_object_announced.erase(inv); + state->m_object_download.m_object_in_flight.erase(inv); } std::chrono::microseconds GetObjectRequestTime(const CInv& inv) EXCLUSIVE_LOCKS_REQUIRED(cs_main) @@ -1443,9 +1440,15 @@ std::chrono::microseconds CalculateObjectGetDataTime(const CInv& inv, std::chron return process_time; } -void RequestObject(CNodeState* state, const CInv& inv, std::chrono::microseconds current_time, bool is_masternode, bool fForce = false) EXCLUSIVE_LOCKS_REQUIRED(cs_main) +void PeerManagerImpl::RequestObject(NodeId nodeid, const CInv& inv, std::chrono::microseconds current_time, + bool is_masternode, bool fForce) { AssertLockHeld(cs_main); + + CNodeState* state = State(nodeid); + if (state == nullptr) + return; + CNodeState::ObjectDownloadState& peer_download_state = state->m_object_download; if (peer_download_state.m_object_announced.size() >= MAX_PEER_OBJECT_ANNOUNCEMENTS || peer_download_state.m_object_process_time.size() >= MAX_PEER_OBJECT_ANNOUNCEMENTS || @@ -1471,23 +1474,14 @@ void RequestObject(CNodeState* state, const CInv& inv, std::chrono::microseconds LogPrint(BCLog::NET, "%s -- inv=(%s), current_time=%d, process_time=%d, delta=%d\n", __func__, inv.ToString(), current_time.count(), process_time.count(), (process_time - current_time).count()); } -void RequestObject(NodeId nodeId, const CInv& inv, std::chrono::microseconds current_time, bool is_masternode, bool fForce) EXCLUSIVE_LOCKS_REQUIRED(cs_main) +size_t PeerManagerImpl::GetRequestedObjectCount(NodeId nodeid) const { AssertLockHeld(cs_main); - auto* state = State(nodeId); - if (!state) { - return; - } - RequestObject(state, inv, current_time, is_masternode, fForce); -} -size_t GetRequestedObjectCount(NodeId nodeId) -{ - AssertLockHeld(cs_main); - auto* state = State(nodeId); - if (!state) { + CNodeState* state = State(nodeid); + if (state == nullptr) return 0; - } + return state->m_object_download.m_object_process_time.size(); } @@ -3795,7 +3789,7 @@ void PeerManagerImpl::ProcessMessage( } bool allowWhileInIBD = allowWhileInIBDObjs.count(inv.type); if (allowWhileInIBD || !m_chainman.ActiveChainstate().IsInitialBlockDownload()) { - RequestObject(State(pfrom.GetId()), inv, current_time, is_masternode); + RequestObject(pfrom.GetId(), inv, current_time, is_masternode); } } } @@ -4183,11 +4177,11 @@ void PeerManagerImpl::ProcessMessage( for (const uint256& parent_txid : unique_parents) { CInv _inv(MSG_TX, parent_txid); AddKnownInv(*peer, _inv.hash); - if (!AlreadyHave(_inv)) RequestObject(State(pfrom.GetId()), _inv, current_time, is_masternode); + if (!AlreadyHave(_inv)) RequestObject(pfrom.GetId(), _inv, current_time, is_masternode); // We don't know if the previous tx was a regular or a mixing one, try both CInv _inv2(MSG_DSTX, parent_txid); AddKnownInv(*peer, _inv2.hash); - if (!AlreadyHave(_inv2)) RequestObject(State(pfrom.GetId()), _inv2, current_time, is_masternode); + if (!AlreadyHave(_inv2)) RequestObject(pfrom.GetId(), _inv2, current_time, is_masternode); } if (m_orphanage.AddTx(ptx, pfrom.GetId())) { diff --git a/src/net_processing.h b/src/net_processing.h index 33b12b24ad..2b281df0c9 100644 --- a/src/net_processing.h +++ b/src/net_processing.h @@ -128,6 +128,11 @@ public: const std::chrono::microseconds time_received, const std::atomic& interruptMsgProc) = 0; virtual bool IsBanned(NodeId pnode) = 0; + + virtual void EraseObjectRequest(NodeId nodeid, const CInv& inv) = 0; + virtual void RequestObject(NodeId nodeid, const CInv& inv, std::chrono::microseconds current_time, + bool is_masternode, bool fForce = false) = 0; + virtual size_t GetRequestedObjectCount(NodeId nodeid) const = 0; }; #endif // BITCOIN_NET_PROCESSING_H diff --git a/src/spork.cpp b/src/spork.cpp index 6fd7a6388b..058d9b9b40 100644 --- a/src/spork.cpp +++ b/src/spork.cpp @@ -145,12 +145,8 @@ PeerMsgRet CSporkManager::ProcessSpork(const CNode& peer, PeerManager& peerman, uint256 hash = spork.GetHash(); - std::string strLogMsg; - { - LOCK(cs_main); - EraseObjectRequest(peer.GetId(), CInv(MSG_SPORK, hash)); - strLogMsg = strprintf("SPORK -- hash: %s id: %d value: %10d peer=%d", hash.ToString(), spork.nSporkID, spork.nValue, peer.GetId()); - } + WITH_LOCK(::cs_main, peerman.EraseObjectRequest(peer.GetId(), CInv(MSG_SPORK, hash))); + std::string strLogMsg{strprintf("SPORK -- hash: %s id: %d value: %10d peer=%d", hash.ToString(), spork.nSporkID, spork.nValue, peer.GetId())}; if (spork.nTimeSigned > GetAdjustedTime() + 2 * 60 * 60) { LogPrint(BCLog::SPORK, "CSporkManager::ProcessSpork -- ERROR: too far into the future\n"); From f6c943922fd77d81756cf3d846ce6f92b301763c Mon Sep 17 00:00:00 2001 From: Kittywhiskers Van Gogh <63189531+kwvg@users.noreply.github.com> Date: Sun, 4 Aug 2024 12:03:39 +0000 Subject: [PATCH 07/14] merge bitcoin#24543: Move remaining globals into PeerManagerImpl --- src/net_processing.cpp | 947 +++++++++++++++-------------- src/net_processing.h | 3 + src/test/denialofservice_tests.cpp | 4 +- 3 files changed, 478 insertions(+), 476 deletions(-) diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 78dbe58a01..7e812e30b0 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -370,424 +370,6 @@ struct Peer { using PeerRef = std::shared_ptr; -class PeerManagerImpl final : public PeerManager -{ -public: - PeerManagerImpl(const CChainParams& chainparams, CConnman& connman, AddrMan& addrman, BanMan* banman, - ChainstateManager& chainman, CTxMemPool& pool, - CMasternodeMetaMan& mn_metaman, CMasternodeSync& mn_sync, - CGovernanceManager& govman, CSporkManager& sporkman, - const CActiveMasternodeManager* const mn_activeman, - const std::unique_ptr& dmnman, - const std::unique_ptr& cj_ctx, - const std::unique_ptr& llmq_ctx, - bool ignore_incoming_txs); - - /** Overridden from CValidationInterface. */ - void BlockConnected(const std::shared_ptr& pblock, const CBlockIndex* pindexConnected) override - EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_recent_confirmed_transactions_mutex); - void BlockDisconnected(const std::shared_ptr &block, const CBlockIndex* pindex) override - EXCLUSIVE_LOCKS_REQUIRED(!m_recent_confirmed_transactions_mutex); - void UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) override - EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); - void BlockChecked(const CBlock& block, const BlockValidationState& state) override - EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); - void NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr& pblock) override; - - /** Implement NetEventsInterface */ - void InitializeNode(CNode& node, ServiceFlags our_services) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); - void FinalizeNode(const CNode& node) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); - bool ProcessMessages(CNode* pfrom, std::atomic& interrupt) override - EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_recent_confirmed_transactions_mutex); - bool SendMessages(CNode* pto) override EXCLUSIVE_LOCKS_REQUIRED(pto->cs_sendProcessing) - EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_recent_confirmed_transactions_mutex); - - /** Implement PeerManager */ - void StartScheduledTasks(CScheduler& scheduler) override; - void CheckForStaleTipAndEvictPeers() override; - std::optional FetchBlock(NodeId peer_id, const CBlockIndex& block_index) override - EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); - bool GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) const override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); - bool IgnoresIncomingTxs() override { return m_ignore_incoming_txs; } - void SendPings() override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);; - void PushInventory(NodeId nodeid, const CInv& inv) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); - void RelayInv(CInv &inv, const int minProtoVersion) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); - void RelayInvFiltered(CInv &inv, const CTransaction &relatedTx, const int minProtoVersion) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); - void RelayInvFiltered(CInv &inv, const uint256 &relatedTxHash, const int minProtoVersion) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); - void RelayTransaction(const uint256& txid) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); - void SetBestHeight(int height) override { m_best_height = height; }; - void Misbehaving(const NodeId pnode, const int howmuch, const std::string& message = "") override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); - void ProcessMessage(CNode& pfrom, const std::string& msg_type, CDataStream& vRecv, - const std::chrono::microseconds time_received, const std::atomic& interruptMsgProc) override - EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_recent_confirmed_transactions_mutex); - bool IsBanned(NodeId pnode) override EXCLUSIVE_LOCKS_REQUIRED(cs_main, !m_peer_mutex); - void EraseObjectRequest(NodeId nodeid, const CInv& inv) override EXCLUSIVE_LOCKS_REQUIRED(::cs_main); - void RequestObject(NodeId nodeid, const CInv& inv, std::chrono::microseconds current_time, - bool is_masternode, bool fForce = false) override EXCLUSIVE_LOCKS_REQUIRED(::cs_main); - size_t GetRequestedObjectCount(NodeId nodeid) const override EXCLUSIVE_LOCKS_REQUIRED(::cs_main); - bool IsInvInFilter(NodeId nodeid, const uint256& hash) const override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); - -private: - /** Helper to process result of external handlers of message */ - void ProcessPeerMsgRet(const PeerMsgRet& ret, CNode& pfrom) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); - - /** Consider evicting an outbound peer based on the amount of time they've been behind our tip */ - void ConsiderEviction(CNode& pto, Peer& peer, std::chrono::seconds time_in_seconds) EXCLUSIVE_LOCKS_REQUIRED(cs_main); - - /** If we have extra outbound peers, try to disconnect the one with the oldest block announcement */ - void EvictExtraOutboundPeers(std::chrono::seconds now) EXCLUSIVE_LOCKS_REQUIRED(cs_main); - - /** Retrieve unbroadcast transactions from the mempool and reattempt sending to peers */ - void ReattemptInitialBroadcast(CScheduler& scheduler) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); - - /** Get a shared pointer to the Peer object. - * May return an empty shared_ptr if the Peer object can't be found. */ - PeerRef GetPeerRef(NodeId id) const EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); - - /** Get a shared pointer to the Peer object and remove it from m_peer_map. - * May return an empty shared_ptr if the Peer object can't be found. */ - PeerRef RemovePeer(NodeId id) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); - - /** - * Potentially mark a node discouraged based on the contents of a BlockValidationState object - * - * @param[in] via_compact_block this bool is passed in because net_processing should - * punish peers differently depending on whether the data was provided in a compact - * block message or not. If the compact block had a valid header, but contained invalid - * txs, the peer should not be punished. See BIP 152. - * - * @return Returns true if the peer was punished (probably disconnected) - */ - bool MaybePunishNodeForBlock(NodeId nodeid, const BlockValidationState& state, - bool via_compact_block, const std::string& message = "") - EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); - - /** - * Potentially ban a node based on the contents of a TxValidationState object - * - * @return Returns true if the peer was punished (probably disconnected) - * - * Changes here may need to be reflected in TxRelayMayResultInDisconnect(). - */ - bool MaybePunishNodeForTx(NodeId nodeid, const TxValidationState& state, const std::string& message = "") - EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); - - /** Maybe disconnect a peer and discourage future connections from its address. - * - * @param[in] pnode The node to check. - * @param[in] peer The peer object to check. - * @return True if the peer was marked for disconnection in this function - */ - bool MaybeDiscourageAndDisconnect(CNode& pnode, Peer& peer); - - void ProcessOrphanTx(std::set& orphan_work_set) EXCLUSIVE_LOCKS_REQUIRED(cs_main, g_cs_orphans) - EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); - /** Process a single headers message from a peer. */ - void ProcessHeadersMessage(CNode& pfrom, Peer& peer, - const std::vector& headers, - bool via_compact_block) - EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); - /** Various helpers for headers processing, invoked by ProcessHeadersMessage() */ - /** Deal with state tracking and headers sync for peers that send the - * occasional non-connecting header (this can happen due to BIP 130 headers - * announcements for blocks interacting with the 2hr (MAX_FUTURE_BLOCK_TIME) rule). */ - void HandleFewUnconnectingHeaders(CNode& pfrom, Peer& peer, const std::vector& headers) - EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); - /** Return true if the headers connect to each other, false otherwise */ - bool CheckHeadersAreContinuous(const std::vector& headers) const; - /** Request further headers from this peer with a given locator. - * We don't issue a getheaders message if we have a recent one outstanding. - * This returns true if a getheaders is actually sent, and false otherwise. - */ - bool MaybeSendGetHeaders(CNode& pfrom, const std::string& msg_type, const CBlockLocator& locator, Peer& peer); - /** Potentially fetch blocks from this peer upon receipt of a new headers tip */ - void HeadersDirectFetchBlocks(CNode& pfrom, const Peer& peer, const CBlockIndex* pindexLast); - /** Update peer state based on received headers message */ - void UpdatePeerStateForReceivedHeaders(CNode& pfrom, const CBlockIndex *pindexLast, bool received_new_header, bool may_have_more_headers); - - void SendBlockTransactions(CNode& pfrom, const CBlock& block, const BlockTransactionsRequest& req) - EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); - - /** Send a version message to a peer */ - void PushNodeVersion(CNode& pnode, const Peer& peer); - - /** Send a ping message every PING_INTERVAL or if requested via RPC. May - * mark the peer to be disconnected if a ping has timed out. - * We use mockable time for ping timeouts, so setmocktime may cause pings - * to time out. */ - void MaybeSendPing(CNode& node_to, Peer& peer, std::chrono::microseconds now); - - /** Send `addr` messages on a regular schedule. */ - void MaybeSendAddr(CNode& node, Peer& peer, std::chrono::microseconds current_time); - - /** Relay (gossip) an address to a few randomly chosen nodes. - * - * @param[in] originator The id of the peer that sent us the address. We don't want to relay it back. - * @param[in] addr Address to relay. - * @param[in] fReachable Whether the address' network is reachable. We relay unreachable - * addresses less. - */ - void RelayAddress(NodeId originator, const CAddress& addr, bool fReachable) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); - - const CChainParams& m_chainparams; - CConnman& m_connman; - AddrMan& m_addrman; - /** Pointer to this node's banman. May be nullptr - check existence before dereferencing. */ - BanMan* const m_banman; - ChainstateManager& m_chainman; - CTxMemPool& m_mempool; - const std::unique_ptr& m_dmnman; - const std::unique_ptr& m_cj_ctx; - const std::unique_ptr& m_llmq_ctx; - CMasternodeMetaMan& m_mn_metaman; - CMasternodeSync& m_mn_sync; - CGovernanceManager& m_govman; - CSporkManager& m_sporkman; - const CActiveMasternodeManager* const m_mn_activeman; - - /** The height of the best chain */ - std::atomic m_best_height{-1}; - - /** Next time to check for stale tip */ - std::chrono::seconds m_stale_tip_check_time GUARDED_BY(cs_main){0s}; - - /** Whether this node is running in blocks only mode */ - const bool m_ignore_incoming_txs; - - /** Whether we've completed initial sync yet, for determining when to turn - * on extra block-relay-only peers. */ - bool m_initial_sync_finished GUARDED_BY(cs_main){false}; - - /** Protects m_peer_map. This mutex must not be locked while holding a lock - * on any of the mutexes inside a Peer object. */ - mutable Mutex m_peer_mutex; - /** - * Map of all Peer objects, keyed by peer id. This map is protected - * by the m_peer_mutex. Once a shared pointer reference is - * taken, the lock may be released. Individual fields are protected by - * their own locks. - */ - std::map m_peer_map GUARDED_BY(m_peer_mutex); - - std::atomic m_next_inv_to_inbounds{0us}; - - /** Check whether the last unknown block a peer advertised is not yet known. */ - void ProcessBlockAvailability(NodeId nodeid) EXCLUSIVE_LOCKS_REQUIRED(cs_main); - /** Update tracking information about which blocks a peer is assumed to have. */ - void UpdateBlockAvailability(NodeId nodeid, const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main); - bool CanDirectFetch() EXCLUSIVE_LOCKS_REQUIRED(cs_main); - - /** - * To prevent fingerprinting attacks, only send blocks/headers outside of the - * active chain if they are no more than a month older (both in time, and in - * best equivalent proof of work) than the best header chain we know about and - * we fully-validated them at some point. - */ - bool BlockRequestAllowed(const CBlockIndex* pindex) EXCLUSIVE_LOCKS_REQUIRED(cs_main); - bool AlreadyHaveBlock(const uint256& block_hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main); - void ProcessGetBlockData(CNode& pfrom, Peer& peer, const CInv& inv, llmq::CInstantSendManager& isman); - - /** - * Validation logic for compact filters request handling. - * - * May disconnect from the peer in the case of a bad request. - * - * @param[in] node The node that we received the request from - * @param[in] peer The peer that we received the request from - * @param[in] filter_type The filter type the request is for. Must be basic filters. - * @param[in] start_height The start height for the request - * @param[in] stop_hash The stop_hash for the request - * @param[in] max_height_diff The maximum number of items permitted to request, as specified in BIP 157 - * @param[out] stop_index The CBlockIndex for the stop_hash block, if the request can be serviced. - * @param[out] filter_index The filter index, if the request can be serviced. - * @return True if the request can be serviced. - */ - bool PrepareBlockFilterRequest(CNode& node, Peer& peer, - BlockFilterType filter_type, uint32_t start_height, - const uint256& stop_hash, uint32_t max_height_diff, - const CBlockIndex*& stop_index, - BlockFilterIndex*& filter_index); - - /** - * Handle a cfilters request. - * - * May disconnect from the peer in the case of a bad request. - * - * @param[in] node The node that we received the request from - * @param[in] peer The peer that we received the request from - * @param[in] vRecv The raw message received - */ - void ProcessGetCFilters(CNode& node, Peer& peer, CDataStream& vRecv); - - /** - * Handle a cfheaders request. - * - * May disconnect from the peer in the case of a bad request. - * - * @param[in] node The node that we received the request from - * @param[in] peer The peer that we received the request from - * @param[in] vRecv The raw message received - */ - void ProcessGetCFHeaders(CNode& node, Peer& peer, CDataStream& vRecv); - - /** - * Handle a getcfcheckpt request. - * - * May disconnect from the peer in the case of a bad request. - * - * @param[in] node The node that we received the request from - * @param[in] peer The peer that we received the request from - * @param[in] vRecv The raw message received - */ - void ProcessGetCFCheckPt(CNode& node, Peer& peer, CDataStream& vRecv); - - /** Checks if address relay is permitted with peer. If needed, initializes - * the m_addr_known bloom filter and sets m_addr_relay_enabled to true. - * - * @return True if address relay is enabled with peer - * False if address relay is disallowed - */ - bool SetupAddressRelay(const CNode& node, Peer& peer); - - /** Number of nodes with fSyncStarted. */ - int nSyncStarted GUARDED_BY(cs_main) = 0; - - /** Hash of the last block we received via INV */ - uint256 m_last_block_inv_triggering_headers_sync{}; - - /** - * Sources of received blocks, saved to be able punish them when processing - * happens afterwards. - * Set mapBlockSource[hash].second to false if the node should not be - * punished if the block is invalid. - */ - std::map> mapBlockSource GUARDED_BY(cs_main); - - /** Number of outbound peers with m_chain_sync.m_protect. */ - int m_outbound_peers_with_protect_from_disconnect GUARDED_BY(cs_main) = 0; - - bool AlreadyHave(const CInv& inv) - EXCLUSIVE_LOCKS_REQUIRED(cs_main, !m_recent_confirmed_transactions_mutex); - - /** - * Filter for transactions that were recently rejected by - * AcceptToMemoryPool. These are not rerequested until the chain tip - * changes, at which point the entire filter is reset. - * - * Without this filter we'd be re-requesting txs from each of our peers, - * increasing bandwidth consumption considerably. For instance, with 100 - * peers, half of which relay a tx we don't accept, that might be a 50x - * bandwidth increase. A flooding attacker attempting to roll-over the - * filter using minimum-sized, 60byte, transactions might manage to send - * 1000/sec if we have fast peers, so we pick 120,000 to give our peers a - * two minute window to send invs to us. - * - * Decreasing the false positive rate is fairly cheap, so we pick one in a - * million to make it highly unlikely for users to have issues with this - * filter. - * - * Memory used: 1.3MB - */ - CRollingBloomFilter m_recent_rejects GUARDED_BY(::cs_main){120'000, 0.000'001}; - uint256 hashRecentRejectsChainTip GUARDED_BY(cs_main); - - /* - * Filter for transactions that have been recently confirmed. - * We use this to avoid requesting transactions that have already been - * confirnmed. - * - * Blocks don't typically have more than 4000 transactions, so this should - * be at least six blocks (~1 hr) worth of transactions that we can store, - * inserting both a txid and wtxid for every observed transaction. - * If the number of transactions appearing in a block goes up, or if we are - * seeing getdata requests more than an hour after initial announcement, we - * can increase this number. - * The false positive rate of 1/1M should come out to less than 1 - * transaction per day that would be inadvertently ignored (which is the - * same probability that we have in the reject filter). - */ - Mutex m_recent_confirmed_transactions_mutex; - CRollingBloomFilter m_recent_confirmed_transactions GUARDED_BY(m_recent_confirmed_transactions_mutex){48'000, 0.000'001}; - - /** - * For sending `inv`s to inbound peers, we use a single (exponentially - * distributed) timer for all peers. If we used a separate timer for each - * peer, a spy node could make multiple inbound connections to us to - * accurately determine when we received the transaction (and potentially - * determine the transaction's origin). */ - std::chrono::microseconds NextInvToInbounds(std::chrono::microseconds now, - std::chrono::seconds average_interval); - - /* Returns a bool indicating whether we requested this block. - * Also used if a block was /not/ received and timed out or started with another peer - */ - bool MarkBlockAsReceived(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main); - - /* Mark a block as in flight - * Returns false, still setting pit, if the block was already in flight from the same peer - * pit will only be valid as long as the same cs_main lock is being held - */ - bool MarkBlockAsInFlight(NodeId nodeid, const uint256& hash, const CBlockIndex* pindex = nullptr, std::list::iterator** pit = nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs_main); - - bool TipMayBeStale() EXCLUSIVE_LOCKS_REQUIRED(cs_main); - - /** Update pindexLastCommonBlock and add not-in-flight missing successors to vBlocks, until it has - * at most count entries. - */ - void FindNextBlocksToDownload(const Peer& peer, unsigned int count, std::vector& vBlocks, NodeId& nodeStaller) EXCLUSIVE_LOCKS_REQUIRED(cs_main); - - std::map::iterator> > mapBlocksInFlight GUARDED_BY(cs_main); - - /** When our tip was last updated. */ - std::atomic m_last_tip_update{0s}; - - /** Determine whether or not a peer can request a transaction, and return it (or nullptr if not found or not allowed). */ - CTransactionRef FindTxForGetData(const CNode* peer, const uint256& txid, const std::chrono::seconds mempool_req, const std::chrono::seconds now) LOCKS_EXCLUDED(cs_main); - - void ProcessGetData(CNode& pfrom, Peer& peer, const std::atomic& interruptMsgProc) LOCKS_EXCLUDED(cs_main) EXCLUSIVE_LOCKS_REQUIRED(peer.m_getdata_requests_mutex); - - void ProcessBlock(CNode& pfrom, const std::shared_ptr& pblock, bool fForceProcessing); - - /** Relay map (txid -> CTransactionRef) */ - typedef std::map MapRelay; - MapRelay mapRelay GUARDED_BY(cs_main); - /** Expiration-time ordered list of (expire time, relay map entry) pairs. */ - std::deque> g_relay_expiration GUARDED_BY(cs_main); - - /** - * When a peer sends us a valid block, instruct it to announce blocks to us - * using CMPCTBLOCK if possible by adding its nodeid to the end of - * lNodesAnnouncingHeaderAndIDs, and keeping that list under a certain size by - * removing the first element if necessary. - */ - void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid) EXCLUSIVE_LOCKS_REQUIRED(cs_main); - - /** Stack of nodes which we have set to announce using compact blocks */ - std::list lNodesAnnouncingHeaderAndIDs GUARDED_BY(cs_main); - - /** Number of peers from which we're downloading blocks. */ - int nPeersWithValidatedDownloads GUARDED_BY(cs_main) = 0; - - /** Storage for orphan information */ - TxOrphanage m_orphanage; - - void AddToCompactExtraTransactions(const CTransactionRef& tx) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans); - - /** Orphan/conflicted/etc transactions that are kept for compact block reconstruction. - * The last -blockreconstructionextratxn/DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN of - * these are kept in a ring buffer */ - std::vector> vExtraTxnForCompact GUARDED_BY(g_cs_orphans); - /** Offset into vExtraTxnForCompact to insert the next tx */ - size_t vExtraTxnForCompactIt GUARDED_BY(g_cs_orphans) = 0; -}; -} // namespace - -namespace { - /** Number of preferable block download peers. */ - int nPreferredDownload GUARDED_BY(cs_main) = 0; -} // namespace - -namespace { /** * Maintain validation-specific state about nodes, protected by cs_main, instead * by CNode's own locks. This simplifies asynchronous operation, where @@ -939,20 +521,456 @@ struct CNodeState { CNodeState(bool is_inbound) : m_is_inbound(is_inbound) {} }; +class PeerManagerImpl final : public PeerManager +{ +public: + PeerManagerImpl(const CChainParams& chainparams, CConnman& connman, AddrMan& addrman, BanMan* banman, + ChainstateManager& chainman, CTxMemPool& pool, + CMasternodeMetaMan& mn_metaman, CMasternodeSync& mn_sync, + CGovernanceManager& govman, CSporkManager& sporkman, + const CActiveMasternodeManager* const mn_activeman, + const std::unique_ptr& dmnman, + const std::unique_ptr& cj_ctx, + const std::unique_ptr& llmq_ctx, + bool ignore_incoming_txs); + + /** Overridden from CValidationInterface. */ + void BlockConnected(const std::shared_ptr& pblock, const CBlockIndex* pindexConnected) override + EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_recent_confirmed_transactions_mutex); + void BlockDisconnected(const std::shared_ptr &block, const CBlockIndex* pindex) override + EXCLUSIVE_LOCKS_REQUIRED(!m_recent_confirmed_transactions_mutex); + void UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) override + EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); + void BlockChecked(const CBlock& block, const BlockValidationState& state) override + EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); + void NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr& pblock) override; + + /** Implement NetEventsInterface */ + void InitializeNode(CNode& node, ServiceFlags our_services) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); + void FinalizeNode(const CNode& node) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); + bool ProcessMessages(CNode* pfrom, std::atomic& interrupt) override + EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_recent_confirmed_transactions_mutex); + bool SendMessages(CNode* pto) override EXCLUSIVE_LOCKS_REQUIRED(pto->cs_sendProcessing) + EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_recent_confirmed_transactions_mutex); + + /** Implement PeerManager */ + void StartScheduledTasks(CScheduler& scheduler) override; + void CheckForStaleTipAndEvictPeers() override; + std::optional FetchBlock(NodeId peer_id, const CBlockIndex& block_index) override + EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); + bool GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) const override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); + bool IgnoresIncomingTxs() override { return m_ignore_incoming_txs; } + void SendPings() override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);; + void PushInventory(NodeId nodeid, const CInv& inv) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); + void RelayInv(CInv &inv, const int minProtoVersion) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); + void RelayInvFiltered(CInv &inv, const CTransaction &relatedTx, const int minProtoVersion) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); + void RelayInvFiltered(CInv &inv, const uint256 &relatedTxHash, const int minProtoVersion) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); + void RelayTransaction(const uint256& txid) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); + void SetBestHeight(int height) override { m_best_height = height; }; + void Misbehaving(const NodeId pnode, const int howmuch, const std::string& message = "") override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); + void ProcessMessage(CNode& pfrom, const std::string& msg_type, CDataStream& vRecv, + const std::chrono::microseconds time_received, const std::atomic& interruptMsgProc) override + EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_recent_confirmed_transactions_mutex); + void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds) override; + bool IsBanned(NodeId pnode) override EXCLUSIVE_LOCKS_REQUIRED(cs_main, !m_peer_mutex); + void EraseObjectRequest(NodeId nodeid, const CInv& inv) override EXCLUSIVE_LOCKS_REQUIRED(::cs_main); + void RequestObject(NodeId nodeid, const CInv& inv, std::chrono::microseconds current_time, + bool is_masternode, bool fForce = false) override EXCLUSIVE_LOCKS_REQUIRED(::cs_main); + size_t GetRequestedObjectCount(NodeId nodeid) const override EXCLUSIVE_LOCKS_REQUIRED(::cs_main); + bool IsInvInFilter(NodeId nodeid, const uint256& hash) const override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); + +private: + /** Helper to process result of external handlers of message */ + void ProcessPeerMsgRet(const PeerMsgRet& ret, CNode& pfrom) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); + + /** Consider evicting an outbound peer based on the amount of time they've been behind our tip */ + void ConsiderEviction(CNode& pto, Peer& peer, std::chrono::seconds time_in_seconds) EXCLUSIVE_LOCKS_REQUIRED(cs_main); + + /** If we have extra outbound peers, try to disconnect the one with the oldest block announcement */ + void EvictExtraOutboundPeers(std::chrono::seconds now) EXCLUSIVE_LOCKS_REQUIRED(cs_main); + + /** Retrieve unbroadcast transactions from the mempool and reattempt sending to peers */ + void ReattemptInitialBroadcast(CScheduler& scheduler) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); + + /** Get a shared pointer to the Peer object. + * May return an empty shared_ptr if the Peer object can't be found. */ + PeerRef GetPeerRef(NodeId id) const EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); + + /** Get a shared pointer to the Peer object and remove it from m_peer_map. + * May return an empty shared_ptr if the Peer object can't be found. */ + PeerRef RemovePeer(NodeId id) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); + + /** + * Potentially mark a node discouraged based on the contents of a BlockValidationState object + * + * @param[in] via_compact_block this bool is passed in because net_processing should + * punish peers differently depending on whether the data was provided in a compact + * block message or not. If the compact block had a valid header, but contained invalid + * txs, the peer should not be punished. See BIP 152. + * + * @return Returns true if the peer was punished (probably disconnected) + */ + bool MaybePunishNodeForBlock(NodeId nodeid, const BlockValidationState& state, + bool via_compact_block, const std::string& message = "") + EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); + + /** + * Potentially ban a node based on the contents of a TxValidationState object + * + * @return Returns true if the peer was punished (probably disconnected) + * + * Changes here may need to be reflected in TxRelayMayResultInDisconnect(). + */ + bool MaybePunishNodeForTx(NodeId nodeid, const TxValidationState& state, const std::string& message = "") + EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); + + /** Maybe disconnect a peer and discourage future connections from its address. + * + * @param[in] pnode The node to check. + * @param[in] peer The peer object to check. + * @return True if the peer was marked for disconnection in this function + */ + bool MaybeDiscourageAndDisconnect(CNode& pnode, Peer& peer); + + void ProcessOrphanTx(std::set& orphan_work_set) EXCLUSIVE_LOCKS_REQUIRED(cs_main, g_cs_orphans) + EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); + /** Process a single headers message from a peer. */ + void ProcessHeadersMessage(CNode& pfrom, Peer& peer, + const std::vector& headers, + bool via_compact_block) + EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); + /** Various helpers for headers processing, invoked by ProcessHeadersMessage() */ + /** Deal with state tracking and headers sync for peers that send the + * occasional non-connecting header (this can happen due to BIP 130 headers + * announcements for blocks interacting with the 2hr (MAX_FUTURE_BLOCK_TIME) rule). */ + void HandleFewUnconnectingHeaders(CNode& pfrom, Peer& peer, const std::vector& headers) + EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); + /** Return true if the headers connect to each other, false otherwise */ + bool CheckHeadersAreContinuous(const std::vector& headers) const; + /** Request further headers from this peer with a given locator. + * We don't issue a getheaders message if we have a recent one outstanding. + * This returns true if a getheaders is actually sent, and false otherwise. + */ + bool MaybeSendGetHeaders(CNode& pfrom, const std::string& msg_type, const CBlockLocator& locator, Peer& peer); + /** Potentially fetch blocks from this peer upon receipt of a new headers tip */ + void HeadersDirectFetchBlocks(CNode& pfrom, const Peer& peer, const CBlockIndex* pindexLast); + /** Update peer state based on received headers message */ + void UpdatePeerStateForReceivedHeaders(CNode& pfrom, const CBlockIndex *pindexLast, bool received_new_header, bool may_have_more_headers); + + void SendBlockTransactions(CNode& pfrom, const CBlock& block, const BlockTransactionsRequest& req) + EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); + + /** Send a version message to a peer */ + void PushNodeVersion(CNode& pnode, const Peer& peer); + + /** Send a ping message every PING_INTERVAL or if requested via RPC. May + * mark the peer to be disconnected if a ping has timed out. + * We use mockable time for ping timeouts, so setmocktime may cause pings + * to time out. */ + void MaybeSendPing(CNode& node_to, Peer& peer, std::chrono::microseconds now); + + /** Send `addr` messages on a regular schedule. */ + void MaybeSendAddr(CNode& node, Peer& peer, std::chrono::microseconds current_time); + + /** Relay (gossip) an address to a few randomly chosen nodes. + * + * @param[in] originator The id of the peer that sent us the address. We don't want to relay it back. + * @param[in] addr Address to relay. + * @param[in] fReachable Whether the address' network is reachable. We relay unreachable + * addresses less. + */ + void RelayAddress(NodeId originator, const CAddress& addr, bool fReachable) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); + + const CChainParams& m_chainparams; + CConnman& m_connman; + AddrMan& m_addrman; + /** Pointer to this node's banman. May be nullptr - check existence before dereferencing. */ + BanMan* const m_banman; + ChainstateManager& m_chainman; + CTxMemPool& m_mempool; + const std::unique_ptr& m_dmnman; + const std::unique_ptr& m_cj_ctx; + const std::unique_ptr& m_llmq_ctx; + CMasternodeMetaMan& m_mn_metaman; + CMasternodeSync& m_mn_sync; + CGovernanceManager& m_govman; + CSporkManager& m_sporkman; + const CActiveMasternodeManager* const m_mn_activeman; + + /** The height of the best chain */ + std::atomic m_best_height{-1}; + + /** Next time to check for stale tip */ + std::chrono::seconds m_stale_tip_check_time GUARDED_BY(cs_main){0s}; + + /** Whether this node is running in blocks only mode */ + const bool m_ignore_incoming_txs; + + /** Whether we've completed initial sync yet, for determining when to turn + * on extra block-relay-only peers. */ + bool m_initial_sync_finished GUARDED_BY(cs_main){false}; + + /** Protects m_peer_map. This mutex must not be locked while holding a lock + * on any of the mutexes inside a Peer object. */ + mutable Mutex m_peer_mutex; + /** + * Map of all Peer objects, keyed by peer id. This map is protected + * by the m_peer_mutex. Once a shared pointer reference is + * taken, the lock may be released. Individual fields are protected by + * their own locks. + */ + std::map m_peer_map GUARDED_BY(m_peer_mutex); + + /** Map maintaining per-node state. */ + std::map m_node_states GUARDED_BY(cs_main); + + /** Get a pointer to a const CNodeState, used when not mutating the CNodeState object. */ + const CNodeState* State(NodeId pnode) const EXCLUSIVE_LOCKS_REQUIRED(cs_main); + /** Get a pointer to a mutable CNodeState. */ + CNodeState* State(NodeId pnode) EXCLUSIVE_LOCKS_REQUIRED(cs_main); + + std::atomic m_next_inv_to_inbounds{0us}; + + /** Check whether the last unknown block a peer advertised is not yet known. */ + void ProcessBlockAvailability(NodeId nodeid) EXCLUSIVE_LOCKS_REQUIRED(cs_main); + /** Update tracking information about which blocks a peer is assumed to have. */ + void UpdateBlockAvailability(NodeId nodeid, const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main); + bool CanDirectFetch() EXCLUSIVE_LOCKS_REQUIRED(cs_main); + + /** + * To prevent fingerprinting attacks, only send blocks/headers outside of the + * active chain if they are no more than a month older (both in time, and in + * best equivalent proof of work) than the best header chain we know about and + * we fully-validated them at some point. + */ + bool BlockRequestAllowed(const CBlockIndex* pindex) EXCLUSIVE_LOCKS_REQUIRED(cs_main); + bool AlreadyHaveBlock(const uint256& block_hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main); + void ProcessGetBlockData(CNode& pfrom, Peer& peer, const CInv& inv, llmq::CInstantSendManager& isman); + + /** + * Validation logic for compact filters request handling. + * + * May disconnect from the peer in the case of a bad request. + * + * @param[in] node The node that we received the request from + * @param[in] peer The peer that we received the request from + * @param[in] filter_type The filter type the request is for. Must be basic filters. + * @param[in] start_height The start height for the request + * @param[in] stop_hash The stop_hash for the request + * @param[in] max_height_diff The maximum number of items permitted to request, as specified in BIP 157 + * @param[out] stop_index The CBlockIndex for the stop_hash block, if the request can be serviced. + * @param[out] filter_index The filter index, if the request can be serviced. + * @return True if the request can be serviced. + */ + bool PrepareBlockFilterRequest(CNode& node, Peer& peer, + BlockFilterType filter_type, uint32_t start_height, + const uint256& stop_hash, uint32_t max_height_diff, + const CBlockIndex*& stop_index, + BlockFilterIndex*& filter_index); + + /** + * Handle a cfilters request. + * + * May disconnect from the peer in the case of a bad request. + * + * @param[in] node The node that we received the request from + * @param[in] peer The peer that we received the request from + * @param[in] vRecv The raw message received + */ + void ProcessGetCFilters(CNode& node, Peer& peer, CDataStream& vRecv); + + /** + * Handle a cfheaders request. + * + * May disconnect from the peer in the case of a bad request. + * + * @param[in] node The node that we received the request from + * @param[in] peer The peer that we received the request from + * @param[in] vRecv The raw message received + */ + void ProcessGetCFHeaders(CNode& node, Peer& peer, CDataStream& vRecv); + + /** + * Handle a getcfcheckpt request. + * + * May disconnect from the peer in the case of a bad request. + * + * @param[in] node The node that we received the request from + * @param[in] peer The peer that we received the request from + * @param[in] vRecv The raw message received + */ + void ProcessGetCFCheckPt(CNode& node, Peer& peer, CDataStream& vRecv); + + /** Checks if address relay is permitted with peer. If needed, initializes + * the m_addr_known bloom filter and sets m_addr_relay_enabled to true. + * + * @return True if address relay is enabled with peer + * False if address relay is disallowed + */ + bool SetupAddressRelay(const CNode& node, Peer& peer); + + /** Number of nodes with fSyncStarted. */ + int nSyncStarted GUARDED_BY(cs_main) = 0; + + /** Hash of the last block we received via INV */ + uint256 m_last_block_inv_triggering_headers_sync{}; + + /** + * Sources of received blocks, saved to be able punish them when processing + * happens afterwards. + * Set mapBlockSource[hash].second to false if the node should not be + * punished if the block is invalid. + */ + std::map> mapBlockSource GUARDED_BY(cs_main); + + /** Number of outbound peers with m_chain_sync.m_protect. */ + int m_outbound_peers_with_protect_from_disconnect GUARDED_BY(cs_main) = 0; + + /** Number of preferable block download peers. */ + int m_num_preferred_download_peers GUARDED_BY(cs_main){0}; + + bool AlreadyHave(const CInv& inv) + EXCLUSIVE_LOCKS_REQUIRED(cs_main, !m_recent_confirmed_transactions_mutex); + + /** + * Filter for transactions that were recently rejected by + * AcceptToMemoryPool. These are not rerequested until the chain tip + * changes, at which point the entire filter is reset. + * + * Without this filter we'd be re-requesting txs from each of our peers, + * increasing bandwidth consumption considerably. For instance, with 100 + * peers, half of which relay a tx we don't accept, that might be a 50x + * bandwidth increase. A flooding attacker attempting to roll-over the + * filter using minimum-sized, 60byte, transactions might manage to send + * 1000/sec if we have fast peers, so we pick 120,000 to give our peers a + * two minute window to send invs to us. + * + * Decreasing the false positive rate is fairly cheap, so we pick one in a + * million to make it highly unlikely for users to have issues with this + * filter. + * + * Memory used: 1.3MB + */ + CRollingBloomFilter m_recent_rejects GUARDED_BY(::cs_main){120'000, 0.000'001}; + uint256 hashRecentRejectsChainTip GUARDED_BY(cs_main); + + /* + * Filter for transactions that have been recently confirmed. + * We use this to avoid requesting transactions that have already been + * confirnmed. + * + * Blocks don't typically have more than 4000 transactions, so this should + * be at least six blocks (~1 hr) worth of transactions that we can store, + * inserting both a txid and wtxid for every observed transaction. + * If the number of transactions appearing in a block goes up, or if we are + * seeing getdata requests more than an hour after initial announcement, we + * can increase this number. + * The false positive rate of 1/1M should come out to less than 1 + * transaction per day that would be inadvertently ignored (which is the + * same probability that we have in the reject filter). + */ + Mutex m_recent_confirmed_transactions_mutex; + CRollingBloomFilter m_recent_confirmed_transactions GUARDED_BY(m_recent_confirmed_transactions_mutex){48'000, 0.000'001}; + + /** + * For sending `inv`s to inbound peers, we use a single (exponentially + * distributed) timer for all peers. If we used a separate timer for each + * peer, a spy node could make multiple inbound connections to us to + * accurately determine when we received the transaction (and potentially + * determine the transaction's origin). */ + std::chrono::microseconds NextInvToInbounds(std::chrono::microseconds now, + std::chrono::seconds average_interval); + + + // All of the following cache a recent block, and are protected by m_most_recent_block_mutex + RecursiveMutex m_most_recent_block_mutex; + std::shared_ptr m_most_recent_block GUARDED_BY(m_most_recent_block_mutex); + std::shared_ptr m_most_recent_compact_block GUARDED_BY(m_most_recent_block_mutex); + uint256 m_most_recent_block_hash GUARDED_BY(m_most_recent_block_mutex); + + /** Height of the highest block announced using BIP 152 high-bandwidth mode. */ + int m_highest_fast_announce{0}; + + /* Returns a bool indicating whether we requested this block. + * Also used if a block was /not/ received and timed out or started with another peer + */ + bool MarkBlockAsReceived(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main); + + /* Mark a block as in flight + * Returns false, still setting pit, if the block was already in flight from the same peer + * pit will only be valid as long as the same cs_main lock is being held + */ + bool MarkBlockAsInFlight(NodeId nodeid, const uint256& hash, const CBlockIndex* pindex = nullptr, std::list::iterator** pit = nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs_main); + + bool TipMayBeStale() EXCLUSIVE_LOCKS_REQUIRED(cs_main); + + /** Update pindexLastCommonBlock and add not-in-flight missing successors to vBlocks, until it has + * at most count entries. + */ + void FindNextBlocksToDownload(const Peer& peer, unsigned int count, std::vector& vBlocks, NodeId& nodeStaller) EXCLUSIVE_LOCKS_REQUIRED(cs_main); + + std::map::iterator> > mapBlocksInFlight GUARDED_BY(cs_main); + + /** When our tip was last updated. */ + std::atomic m_last_tip_update{0s}; + + /** Determine whether or not a peer can request a transaction, and return it (or nullptr if not found or not allowed). */ + CTransactionRef FindTxForGetData(const CNode* peer, const uint256& txid, const std::chrono::seconds mempool_req, const std::chrono::seconds now) LOCKS_EXCLUDED(cs_main); + + void ProcessGetData(CNode& pfrom, Peer& peer, const std::atomic& interruptMsgProc) LOCKS_EXCLUDED(cs_main) EXCLUSIVE_LOCKS_REQUIRED(peer.m_getdata_requests_mutex); + + void ProcessBlock(CNode& pfrom, const std::shared_ptr& pblock, bool fForceProcessing); + + /** Relay map (txid -> CTransactionRef) */ + typedef std::map MapRelay; + MapRelay mapRelay GUARDED_BY(cs_main); + /** Expiration-time ordered list of (expire time, relay map entry) pairs. */ + std::deque> g_relay_expiration GUARDED_BY(cs_main); + + /** + * When a peer sends us a valid block, instruct it to announce blocks to us + * using CMPCTBLOCK if possible by adding its nodeid to the end of + * lNodesAnnouncingHeaderAndIDs, and keeping that list under a certain size by + * removing the first element if necessary. + */ + void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid) EXCLUSIVE_LOCKS_REQUIRED(cs_main); + + /** Stack of nodes which we have set to announce using compact blocks */ + std::list lNodesAnnouncingHeaderAndIDs GUARDED_BY(cs_main); + + /** Number of peers from which we're downloading blocks. */ + int nPeersWithValidatedDownloads GUARDED_BY(cs_main) = 0; + + /** Storage for orphan information */ + TxOrphanage m_orphanage; + + void AddToCompactExtraTransactions(const CTransactionRef& tx) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans); + + /** Orphan/conflicted/etc transactions that are kept for compact block reconstruction. + * The last -blockreconstructionextratxn/DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN of + * these are kept in a ring buffer */ + std::vector> vExtraTxnForCompact GUARDED_BY(g_cs_orphans); + /** Offset into vExtraTxnForCompact to insert the next tx */ + size_t vExtraTxnForCompactIt GUARDED_BY(g_cs_orphans) = 0; +}; + // Keeps track of the time (in microseconds) when transactions were requested last time unordered_limitedmap g_already_asked_for(MAX_INV_SZ, MAX_INV_SZ * 2); unordered_limitedmap g_erased_object_requests(MAX_INV_SZ, MAX_INV_SZ * 2); -/** Map maintaining per-node state. */ -static std::map mapNodeState GUARDED_BY(cs_main); - -static CNodeState *State(NodeId pnode) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { - std::map::iterator it = mapNodeState.find(pnode); - if (it == mapNodeState.end()) +const CNodeState* PeerManagerImpl::State(NodeId pnode) const EXCLUSIVE_LOCKS_REQUIRED(cs_main) +{ + std::map::const_iterator it = m_node_states.find(pnode); + if (it == m_node_states.end()) return nullptr; return &it->second; } +CNodeState* PeerManagerImpl::State(NodeId pnode) EXCLUSIVE_LOCKS_REQUIRED(cs_main) +{ + return const_cast(std::as_const(*this).State(pnode)); +} + /** * Whether the peer supports the address. For example, a peer that does not * implement BIP155 cannot receive Tor v3 addresses because it requires @@ -1037,16 +1055,6 @@ static void PushInv(Peer& peer, const CInv& inv) peer.m_tx_relay->vInventoryOtherToSend.push_back(inv); } -static void UpdatePreferredDownload(const CNode& node, const Peer& peer, CNodeState* state) EXCLUSIVE_LOCKS_REQUIRED(cs_main) -{ - nPreferredDownload -= state->fPreferredDownload; - - // Whether this node should be marked as a preferred download node. - state->fPreferredDownload = (!node.IsInboundConn() || node.HasPermission(NetPermissionFlags::NoBan)) && !node.IsAddrFetchConn() && CanServeBlocks(peer); - - nPreferredDownload += state->fPreferredDownload; -} - std::chrono::microseconds PeerManagerImpl::NextInvToInbounds(std::chrono::microseconds now, std::chrono::seconds average_interval) { @@ -1478,16 +1486,14 @@ size_t PeerManagerImpl::GetRequestedObjectCount(NodeId nodeid) const { AssertLockHeld(cs_main); - CNodeState* state = State(nodeid); + const CNodeState* state = State(nodeid); if (state == nullptr) return 0; return state->m_object_download.m_object_process_time.size(); } -// This function is used for testing the stale tip eviction logic, see -// denialofservice_tests.cpp -void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds) +void PeerManagerImpl::UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds) { LOCK(cs_main); CNodeState *state = State(node); @@ -1498,7 +1504,7 @@ void PeerManagerImpl::InitializeNode(CNode& node, ServiceFlags our_services) { NodeId nodeid = node.GetId(); { LOCK(cs_main); - mapNodeState.emplace_hint(mapNodeState.end(), std::piecewise_construct, std::forward_as_tuple(nodeid), std::forward_as_tuple(node.IsInboundConn())); + m_node_states.emplace_hint(m_node_states.end(), std::piecewise_construct, std::forward_as_tuple(nodeid), std::forward_as_tuple(node.IsInboundConn())); } PeerRef peer = std::make_shared(nodeid, our_services, /* block_relay_only = */ node.IsBlockOnlyConn()); { @@ -1555,18 +1561,18 @@ void PeerManagerImpl::FinalizeNode(const CNode& node) { mapBlocksInFlight.erase(entry.hash); } WITH_LOCK(g_cs_orphans, m_orphanage.EraseForPeer(nodeid)); - nPreferredDownload -= state->fPreferredDownload; + m_num_preferred_download_peers -= state->fPreferredDownload; nPeersWithValidatedDownloads -= (state->nBlocksInFlightValidHeaders != 0); assert(nPeersWithValidatedDownloads >= 0); m_outbound_peers_with_protect_from_disconnect -= state->m_chain_sync.m_protect; assert(m_outbound_peers_with_protect_from_disconnect >= 0); - mapNodeState.erase(nodeid); + m_node_states.erase(nodeid); - if (mapNodeState.empty()) { + if (m_node_states.empty()) { // Do a consistency check after the last peer is removed. assert(mapBlocksInFlight.empty()); - assert(nPreferredDownload == 0); + assert(m_num_preferred_download_peers == 0); assert(nPeersWithValidatedDownloads == 0); assert(m_outbound_peers_with_protect_from_disconnect == 0); } @@ -1605,7 +1611,7 @@ bool PeerManagerImpl::GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) c { { LOCK(cs_main); - CNodeState* state = State(nodeid); + const CNodeState* state = State(nodeid); if (state == nullptr) return false; stats.nSyncHeight = state->pindexBestKnownBlock ? state->pindexBestKnownBlock->nHeight : -1; @@ -1910,12 +1916,6 @@ void PeerManagerImpl::BlockDisconnected(const std::shared_ptr &blo m_recent_confirmed_transactions.reset(); } -// All of the following cache a recent block, and are protected by cs_most_recent_block -static RecursiveMutex cs_most_recent_block; -static std::shared_ptr most_recent_block GUARDED_BY(cs_most_recent_block); -static std::shared_ptr most_recent_compact_block GUARDED_BY(cs_most_recent_block); -static uint256 most_recent_block_hash GUARDED_BY(cs_most_recent_block); - /** * Maintain state about the best-seen block and fast-announce a compact block * to compatible peers. @@ -1926,20 +1926,19 @@ void PeerManagerImpl::NewPoWValidBlock(const CBlockIndex *pindex, const std::sha LOCK(cs_main); - static int nHighestFastAnnounce = 0; - if (pindex->nHeight <= nHighestFastAnnounce) + if (pindex->nHeight <= m_highest_fast_announce) return; - nHighestFastAnnounce = pindex->nHeight; + m_highest_fast_announce = pindex->nHeight; uint256 hashBlock(pblock->GetHash()); const std::shared_future lazy_ser{ std::async(std::launch::deferred, [&] { return msgMaker.Make(NetMsgType::CMPCTBLOCK, *pcmpctblock); })}; { - LOCK(cs_most_recent_block); - most_recent_block_hash = hashBlock; - most_recent_block = pblock; - most_recent_compact_block = pcmpctblock; + LOCK(m_most_recent_block_mutex); + m_most_recent_block_hash = hashBlock; + m_most_recent_block = pblock; + m_most_recent_compact_block = pcmpctblock; } m_connman.ForEachNode([this, pindex, &lazy_ser, &hashBlock](CNode* pnode) { @@ -2280,9 +2279,9 @@ void PeerManagerImpl::ProcessGetBlockData(CNode& pfrom, Peer& peer, const CInv& std::shared_ptr a_recent_block; std::shared_ptr a_recent_compact_block; { - LOCK(cs_most_recent_block); - a_recent_block = most_recent_block; - a_recent_compact_block = most_recent_compact_block; + LOCK(m_most_recent_block_mutex); + a_recent_block = m_most_recent_block; + a_recent_compact_block = m_most_recent_compact_block; } bool need_activate_chain = false; @@ -3402,7 +3401,9 @@ void PeerManagerImpl::ProcessMessage( // Potentially mark this peer as a preferred download peer. { LOCK(cs_main); - UpdatePreferredDownload(pfrom, *peer, State(pfrom.GetId())); + CNodeState* state = State(pfrom.GetId()); + state->fPreferredDownload = (!pfrom.IsInboundConn() || pfrom.HasPermission(NetPermissionFlags::NoBan)) && !pfrom.IsAddrFetchConn() && CanServeBlocks(*peer); + m_num_preferred_download_peers += state->fPreferredDownload; } // Self advertisement & GETADDR logic @@ -3871,8 +3872,8 @@ void PeerManagerImpl::ProcessMessage( { std::shared_ptr a_recent_block; { - LOCK(cs_most_recent_block); - a_recent_block = most_recent_block; + LOCK(m_most_recent_block_mutex); + a_recent_block = m_most_recent_block; } BlockValidationState state; if (!m_chainman.ActiveChainstate().ActivateBestChain(state, a_recent_block)) { @@ -3925,10 +3926,10 @@ void PeerManagerImpl::ProcessMessage( std::shared_ptr recent_block; { - LOCK(cs_most_recent_block); - if (most_recent_block_hash == req.blockhash) - recent_block = most_recent_block; - // Unlock cs_most_recent_block to avoid cs_main lock inversion + LOCK(m_most_recent_block_mutex); + if (m_most_recent_block_hash == req.blockhash) + recent_block = m_most_recent_block; + // Unlock m_most_recent_block_mutex to avoid cs_main lock inversion } if (recent_block) { SendBlockTransactions(pfrom, *recent_block, req); @@ -5438,7 +5439,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto) // the latest blocks is from an inbound peer, we have to be sure to // eventually download it (and not just wait indefinitely for an // outbound peer to have it). - if (nPreferredDownload == 0 || mapBlocksInFlight.empty()) { + if (m_num_preferred_download_peers == 0 || mapBlocksInFlight.empty()) { sync_blocks_and_headers_from_peer = true; } } @@ -5557,9 +5558,9 @@ bool PeerManagerImpl::SendMessages(CNode* pto) bool fGotBlockFromCache = false; { - LOCK(cs_most_recent_block); - if (most_recent_block_hash == pBestIndex->GetBlockHash()) { - m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::CMPCTBLOCK, *most_recent_compact_block)); + LOCK(m_most_recent_block_mutex); + if (m_most_recent_block_hash == pBestIndex->GetBlockHash()) { + m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::CMPCTBLOCK, *m_most_recent_compact_block)); fGotBlockFromCache = true; } } @@ -5832,7 +5833,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto) if (state.fSyncStarted && state.m_headers_sync_timeout < std::chrono::microseconds::max()) { // Detect whether this is a stalling initial-headers-sync peer if (m_chainman.m_best_header->GetBlockTime() <= GetAdjustedTime() - nMaxTipAge) { - if (current_time > state.m_headers_sync_timeout && nSyncStarted == 1 && (nPreferredDownload - state.fPreferredDownload >= 1)) { + if (current_time > state.m_headers_sync_timeout && nSyncStarted == 1 && (m_num_preferred_download_peers - state.fPreferredDownload >= 1)) { // Disconnect a peer (without NetPermissionFlags::NoBan permission) if it is our only sync peer, // and we have others we could be using instead. // Note: If all our peers are inbound, then we won't @@ -5961,4 +5962,4 @@ bool PeerManagerImpl::SendMessages(CNode* pto) } } // release cs_main return true; -} +} \ No newline at end of file diff --git a/src/net_processing.h b/src/net_processing.h index 2b281df0c9..17f0e2e500 100644 --- a/src/net_processing.h +++ b/src/net_processing.h @@ -127,6 +127,9 @@ public: virtual void ProcessMessage(CNode& pfrom, const std::string& msg_type, CDataStream& vRecv, const std::chrono::microseconds time_received, const std::atomic& interruptMsgProc) = 0; + /** This function is used for testing the stale tip eviction logic, see denialofservice_tests.cpp */ + virtual void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds) = 0; + virtual bool IsBanned(NodeId pnode) = 0; virtual void EraseObjectRequest(NodeId nodeid, const CInv& inv) = 0; diff --git a/src/test/denialofservice_tests.cpp b/src/test/denialofservice_tests.cpp index dbf154531d..a1d992cc01 100644 --- a/src/test/denialofservice_tests.cpp +++ b/src/test/denialofservice_tests.cpp @@ -34,8 +34,6 @@ static CService ip(uint32_t i) return CService(CNetAddr(s), Params().GetDefaultPort()); } -void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds); - BOOST_FIXTURE_TEST_SUITE(denialofservice_tests, TestingSetup) // Test eviction of an outbound peer whose chain never advances @@ -202,7 +200,7 @@ BOOST_AUTO_TEST_CASE(stale_tip_peer_management) // Update the last announced block time for the last // peer, and check that the next newest node gets evicted. - UpdateLastBlockAnnounceTime(vNodes.back()->GetId(), GetTime()); + peerLogic->UpdateLastBlockAnnounceTime(vNodes.back()->GetId(), GetTime()); peerLogic->CheckForStaleTipAndEvictPeers(); for (int i = 0; i < max_outbound_full_relay - 1; ++i) { From 2ecba6ba5fdb1798ba2cd23d38c8331269b71afe Mon Sep 17 00:00:00 2001 From: Kittywhiskers Van Gogh <63189531+kwvg@users.noreply.github.com> Date: Wed, 7 Sep 2022 13:57:18 +1000 Subject: [PATCH 08/14] partial bitcoin#26036: add NetEventsInterface::g_msgproc_mutex This backport excludes annotations for members introduced in bitcoin#25717 as it hasn't been backported yet. --- src/net.cpp | 5 ++- src/net.h | 9 +++-- src/net_processing.cpp | 62 ++++++++++++++++++------------ src/net_processing.h | 2 +- src/test/denialofservice_tests.cpp | 47 ++++++++-------------- src/test/fuzz/process_message.cpp | 7 ++-- src/test/fuzz/process_messages.cpp | 7 ++-- src/test/fuzz/util.h | 2 +- src/test/net_tests.cpp | 7 ++-- src/test/util/net.cpp | 10 +---- src/test/util/net.h | 5 ++- 11 files changed, 77 insertions(+), 86 deletions(-) diff --git a/src/net.cpp b/src/net.cpp index fc97d4c42e..42f3ad6b49 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -3142,8 +3142,12 @@ void CConnman::OpenMasternodeConnection(const CAddress &addrConnect, MasternodeP OpenNetworkConnection(addrConnect, false, nullptr, nullptr, ConnectionType::OUTBOUND_FULL_RELAY, MasternodeConn::IsConnection, probe); } +Mutex NetEventsInterface::g_msgproc_mutex; + void CConnman::ThreadMessageHandler() { + LOCK(NetEventsInterface::g_msgproc_mutex); + int64_t nLastSendMessagesTimeMasternodes = 0; FastRandomContext rng; @@ -3173,7 +3177,6 @@ void CConnman::ThreadMessageHandler() return; // Send messages if (!fSkipSendMessagesForMasternodes || !pnode->m_masternode_connection) { - LOCK(pnode->cs_sendProcessing); m_msgproc->SendMessages(pnode); } diff --git a/src/net.h b/src/net.h index 71ff10861c..0277b27e10 100644 --- a/src/net.h +++ b/src/net.h @@ -481,8 +481,6 @@ public: std::list vProcessMsg GUARDED_BY(cs_vProcessMsg); size_t nProcessQueueSize GUARDED_BY(cs_vProcessMsg){0}; - RecursiveMutex cs_sendProcessing; - uint64_t nRecvBytes GUARDED_BY(cs_vRecv){0}; std::atomic m_last_send{0s}; @@ -816,6 +814,9 @@ private: class NetEventsInterface { public: + /** Mutex for anything that is only accessed via the msg processing thread */ + static Mutex g_msgproc_mutex; + /** Initialize a peer (setup state, queue any initial messages) */ virtual void InitializeNode(CNode& node, ServiceFlags our_services) = 0; @@ -829,7 +830,7 @@ public: * @param[in] interrupt Interrupt condition for processing threads * @return True if there is more work to be done */ - virtual bool ProcessMessages(CNode* pnode, std::atomic& interrupt) = 0; + virtual bool ProcessMessages(CNode* pnode, std::atomic& interrupt) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex) = 0; /** * Send queued protocol messages to a given node. @@ -837,7 +838,7 @@ public: * @param[in] pnode The node which we are sending messages to. * @return True if there is more work to be done */ - virtual bool SendMessages(CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(pnode->cs_sendProcessing) = 0; + virtual bool SendMessages(CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex) = 0; protected: diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 7e812e30b0..8747950df7 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -286,7 +286,7 @@ struct Peer { bool m_send_mempool GUARDED_BY(m_tx_inventory_mutex){false}; // Last time a "MEMPOOL" request was serviced. std::atomic m_last_mempool_req{0s}; - std::chrono::microseconds m_next_inv_send_time{0}; + std::chrono::microseconds m_next_inv_send_time GUARDED_BY(NetEventsInterface::g_msgproc_mutex){0}; }; // in bitcoin: m_tx_relay == nullptr if we're not relaying transactions with this peer @@ -295,7 +295,7 @@ struct Peer { std::unique_ptr m_tx_relay{std::make_unique()}; /** A vector of addresses to send to the peer, limited to MAX_ADDR_TO_SEND. */ - std::vector m_addrs_to_send; + std::vector m_addrs_to_send GUARDED_BY(NetEventsInterface::g_msgproc_mutex); /** Probabilistic filter to track recent addr messages relayed with this * peer. Used to avoid relaying redundant addresses to this peer. * @@ -305,7 +305,7 @@ struct Peer { * * Presence of this filter must correlate with m_addr_relay_enabled. **/ - std::unique_ptr m_addr_known; + std::unique_ptr m_addr_known GUARDED_BY(NetEventsInterface::g_msgproc_mutex); /** Whether we are participating in address relay with this connection. * * We set this bool to true for outbound peers (other than @@ -324,7 +324,7 @@ struct Peer { /** Whether a Peer can only be relayed blocks */ const bool m_block_relay_only{false}; /** Whether a getaddr request to this peer is outstanding. */ - bool m_getaddr_sent{false}; + bool m_getaddr_sent GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false}; /** Guards address sending timers. */ mutable Mutex m_addr_send_times_mutex; /** Time point to send the next ADDR message to this peer. */ @@ -335,12 +335,12 @@ struct Peer { * messages, indicating a preference to receive ADDRv2 instead of ADDR ones. */ std::atomic_bool m_wants_addrv2{false}; /** Whether this peer has already sent us a getaddr message. */ - bool m_getaddr_recvd{false}; + bool m_getaddr_recvd GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false}; /** Number of addresses that can be processed from this peer. Start at 1 to * permit self-announcement. */ - double m_addr_token_bucket{1.0}; + double m_addr_token_bucket GUARDED_BY(NetEventsInterface::g_msgproc_mutex){1.0}; /** When m_addr_token_bucket was last updated */ - std::chrono::microseconds m_addr_token_timestamp{GetTime()}; + std::chrono::microseconds m_addr_token_timestamp GUARDED_BY(NetEventsInterface::g_msgproc_mutex){GetTime()}; /** Total number of addresses that were dropped due to rate limiting. */ std::atomic m_addr_rate_limited{0}; /** Total number of addresses that were processed (excludes rate-limited ones). */ @@ -350,7 +350,7 @@ struct Peer { std::set m_orphan_work_set GUARDED_BY(g_cs_orphans); /** Whether we've sent this peer a getheaders in response to an inv prior to initial-headers-sync completing */ - bool m_inv_triggered_getheaders_before_sync{false}; + bool m_inv_triggered_getheaders_before_sync GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false}; /** Protects m_getdata_requests **/ Mutex m_getdata_requests_mutex; @@ -358,7 +358,7 @@ struct Peer { std::deque m_getdata_requests GUARDED_BY(m_getdata_requests_mutex); /** Time of the last getheaders message to this peer */ - std::atomic m_last_getheaders_timestamp{0s}; + std::atomic m_last_getheaders_timestamp GUARDED_BY(NetEventsInterface::g_msgproc_mutex){0s}; explicit Peer(NodeId id, ServiceFlags our_services, bool block_relay_only) : m_id(id) @@ -549,9 +549,9 @@ public: void InitializeNode(CNode& node, ServiceFlags our_services) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); void FinalizeNode(const CNode& node) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); bool ProcessMessages(CNode* pfrom, std::atomic& interrupt) override - EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_recent_confirmed_transactions_mutex); - bool SendMessages(CNode* pto) override EXCLUSIVE_LOCKS_REQUIRED(pto->cs_sendProcessing) - EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_recent_confirmed_transactions_mutex); + EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_recent_confirmed_transactions_mutex, g_msgproc_mutex); + bool SendMessages(CNode* pto) override + EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_recent_confirmed_transactions_mutex, g_msgproc_mutex); /** Implement PeerManager */ void StartScheduledTasks(CScheduler& scheduler) override; @@ -570,7 +570,7 @@ public: void Misbehaving(const NodeId pnode, const int howmuch, const std::string& message = "") override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); void ProcessMessage(CNode& pfrom, const std::string& msg_type, CDataStream& vRecv, const std::chrono::microseconds time_received, const std::atomic& interruptMsgProc) override - EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_recent_confirmed_transactions_mutex); + EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_recent_confirmed_transactions_mutex, g_msgproc_mutex); void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds) override; bool IsBanned(NodeId pnode) override EXCLUSIVE_LOCKS_REQUIRED(cs_main, !m_peer_mutex); void EraseObjectRequest(NodeId nodeid, const CInv& inv) override EXCLUSIVE_LOCKS_REQUIRED(::cs_main); @@ -584,7 +584,7 @@ private: void ProcessPeerMsgRet(const PeerMsgRet& ret, CNode& pfrom) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); /** Consider evicting an outbound peer based on the amount of time they've been behind our tip */ - void ConsiderEviction(CNode& pto, Peer& peer, std::chrono::seconds time_in_seconds) EXCLUSIVE_LOCKS_REQUIRED(cs_main); + void ConsiderEviction(CNode& pto, Peer& peer, std::chrono::seconds time_in_seconds) EXCLUSIVE_LOCKS_REQUIRED(cs_main, g_msgproc_mutex); /** If we have extra outbound peers, try to disconnect the one with the oldest block announcement */ void EvictExtraOutboundPeers(std::chrono::seconds now) EXCLUSIVE_LOCKS_REQUIRED(cs_main); @@ -638,20 +638,21 @@ private: void ProcessHeadersMessage(CNode& pfrom, Peer& peer, const std::vector& headers, bool via_compact_block) - EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); + EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex); /** Various helpers for headers processing, invoked by ProcessHeadersMessage() */ /** Deal with state tracking and headers sync for peers that send the * occasional non-connecting header (this can happen due to BIP 130 headers * announcements for blocks interacting with the 2hr (MAX_FUTURE_BLOCK_TIME) rule). */ void HandleFewUnconnectingHeaders(CNode& pfrom, Peer& peer, const std::vector& headers) - EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); + EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex); /** Return true if the headers connect to each other, false otherwise */ bool CheckHeadersAreContinuous(const std::vector& headers) const; /** Request further headers from this peer with a given locator. * We don't issue a getheaders message if we have a recent one outstanding. * This returns true if a getheaders is actually sent, and false otherwise. */ - bool MaybeSendGetHeaders(CNode& pfrom, const std::string& msg_type, const CBlockLocator& locator, Peer& peer); + bool MaybeSendGetHeaders(CNode& pfrom, const std::string& msg_type, const CBlockLocator& locator, Peer& peer) + EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); /** Potentially fetch blocks from this peer upon receipt of a new headers tip */ void HeadersDirectFetchBlocks(CNode& pfrom, const Peer& peer, const CBlockIndex* pindexLast); /** Update peer state based on received headers message */ @@ -670,7 +671,8 @@ private: void MaybeSendPing(CNode& node_to, Peer& peer, std::chrono::microseconds now); /** Send `addr` messages on a regular schedule. */ - void MaybeSendAddr(CNode& node, Peer& peer, std::chrono::microseconds current_time); + void MaybeSendAddr(CNode& node, Peer& peer, std::chrono::microseconds current_time) + EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); /** Relay (gossip) an address to a few randomly chosen nodes. * @@ -679,7 +681,8 @@ private: * @param[in] fReachable Whether the address' network is reachable. We relay unreachable * addresses less. */ - void RelayAddress(NodeId originator, const CAddress& addr, bool fReachable) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); + void RelayAddress(NodeId originator, const CAddress& addr, bool fReachable) + EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex); const CChainParams& m_chainparams; CConnman& m_connman; @@ -807,13 +810,16 @@ private: * @return True if address relay is enabled with peer * False if address relay is disallowed */ - bool SetupAddressRelay(const CNode& node, Peer& peer); + bool SetupAddressRelay(const CNode& node, Peer& peer) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); + + void AddAddressKnown(Peer& peer, const CAddress& addr) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); + void PushAddress(Peer& peer, const CAddress& addr, FastRandomContext& insecure_rand) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); /** Number of nodes with fSyncStarted. */ int nSyncStarted GUARDED_BY(cs_main) = 0; /** Hash of the last block we received via INV */ - uint256 m_last_block_inv_triggering_headers_sync{}; + uint256 m_last_block_inv_triggering_headers_sync GUARDED_BY(g_msgproc_mutex){}; /** * Sources of received blocks, saved to be able punish them when processing @@ -889,7 +895,7 @@ private: uint256 m_most_recent_block_hash GUARDED_BY(m_most_recent_block_mutex); /** Height of the highest block announced using BIP 152 high-bandwidth mode. */ - int m_highest_fast_announce{0}; + int m_highest_fast_announce GUARDED_BY(::cs_main){0}; /* Returns a bool indicating whether we requested this block. * Also used if a block was /not/ received and timed out or started with another peer @@ -981,13 +987,13 @@ static bool IsAddrCompatible(const Peer& peer, const CAddress& addr) return peer.m_wants_addrv2 || addr.IsAddrV1Compatible(); } -static void AddAddressKnown(Peer& peer, const CAddress& addr) +void PeerManagerImpl::AddAddressKnown(Peer& peer, const CAddress& addr) { assert(peer.m_addr_known); peer.m_addr_known->insert(addr.GetKey()); } -static void PushAddress(Peer& peer, const CAddress& addr, FastRandomContext& insecure_rand) +void PeerManagerImpl::PushAddress(Peer& peer, const CAddress& addr, FastRandomContext& insecure_rand) { // Known checking here is only to save space from duplicates. // Before sending, we'll filter it again for known addresses that were @@ -3250,6 +3256,8 @@ void PeerManagerImpl::ProcessMessage( const std::chrono::microseconds time_received, const std::atomic& interruptMsgProc) { + AssertLockHeld(g_msgproc_mutex); + LogPrint(BCLog::NET, "received: %s (%u bytes) peer=%d\n", SanitizeString(msg_type), vRecv.size(), pfrom.GetId()); statsClient.inc("message.received." + SanitizeString(msg_type), 1.0f); @@ -4971,6 +4979,8 @@ bool PeerManagerImpl::MaybeDiscourageAndDisconnect(CNode& pnode, Peer& peer) bool PeerManagerImpl::ProcessMessages(CNode* pfrom, std::atomic& interruptMsgProc) { + AssertLockHeld(g_msgproc_mutex); + bool fMoreWork = false; PeerRef peer = GetPeerRef(pfrom->GetId()); @@ -5313,7 +5323,7 @@ void PeerManagerImpl::MaybeSendAddr(CNode& node, Peer& peer, std::chrono::micros // Remove addr records that the peer already knows about, and add new // addrs to the m_addr_known filter on the same pass. - auto addr_already_known = [&peer](const CAddress& addr) { + auto addr_already_known = [&peer](const CAddress& addr) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex) { bool ret = peer.m_addr_known->contains(addr.GetKey()); if (!ret) peer.m_addr_known->insert(addr.GetKey()); return ret; @@ -5379,6 +5389,8 @@ bool PeerManagerImpl::SetupAddressRelay(const CNode& node, Peer& peer) bool PeerManagerImpl::SendMessages(CNode* pto) { + AssertLockHeld(g_msgproc_mutex); + assert(m_llmq_ctx); const bool is_masternode = m_mn_activeman != nullptr; diff --git a/src/net_processing.h b/src/net_processing.h index 17f0e2e500..295fcc1d6e 100644 --- a/src/net_processing.h +++ b/src/net_processing.h @@ -125,7 +125,7 @@ public: /** Process a single message from a peer. Public for fuzz testing */ virtual void ProcessMessage(CNode& pfrom, const std::string& msg_type, CDataStream& vRecv, - const std::chrono::microseconds time_received, const std::atomic& interruptMsgProc) = 0; + const std::chrono::microseconds time_received, const std::atomic& interruptMsgProc) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex) = 0; /** This function is used for testing the stale tip eviction logic, see denialofservice_tests.cpp */ virtual void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds) = 0; diff --git a/src/test/denialofservice_tests.cpp b/src/test/denialofservice_tests.cpp index a1d992cc01..bc9daa6744 100644 --- a/src/test/denialofservice_tests.cpp +++ b/src/test/denialofservice_tests.cpp @@ -46,6 +46,8 @@ BOOST_FIXTURE_TEST_SUITE(denialofservice_tests, TestingSetup) // work. BOOST_AUTO_TEST_CASE(outbound_slow_chain_eviction) { + LOCK(NetEventsInterface::g_msgproc_mutex); + ConnmanTestMsg& connman = static_cast(*m_node.connman); // Disable inactivity checks for this test to avoid interference connman.SetPeerConnectTimeout(99999s); @@ -82,10 +84,8 @@ BOOST_AUTO_TEST_CASE(outbound_slow_chain_eviction) } // Test starts here - { - LOCK(dummyNode1.cs_sendProcessing); - BOOST_CHECK(peerman.SendMessages(&dummyNode1)); // should result in getheaders - } + BOOST_CHECK(peerman.SendMessages(&dummyNode1)); // should result in getheaders + { LOCK(dummyNode1.cs_vSend); BOOST_CHECK(dummyNode1.vSendMsg.size() > 0); @@ -96,20 +96,14 @@ BOOST_AUTO_TEST_CASE(outbound_slow_chain_eviction) int64_t nStartTime = GetTime(); // Wait 21 minutes SetMockTime(nStartTime+21*60); - { - LOCK(dummyNode1.cs_sendProcessing); - BOOST_CHECK(peerman.SendMessages(&dummyNode1)); // should result in getheaders - } + BOOST_CHECK(peerman.SendMessages(&dummyNode1)); // should result in getheaders { LOCK(dummyNode1.cs_vSend); BOOST_CHECK(dummyNode1.vSendMsg.size() > 0); } // Wait 3 more minutes SetMockTime(nStartTime+24*60); - { - LOCK(dummyNode1.cs_sendProcessing); - BOOST_CHECK(peerman.SendMessages(&dummyNode1)); // should result in disconnect - } + BOOST_CHECK(peerman.SendMessages(&dummyNode1)); // should result in disconnect BOOST_CHECK(dummyNode1.fDisconnect == true); peerman.FinalizeNode(dummyNode1); @@ -283,6 +277,8 @@ BOOST_AUTO_TEST_CASE(block_relay_only_eviction) BOOST_AUTO_TEST_CASE(peer_discouragement) { + LOCK(NetEventsInterface::g_msgproc_mutex); + const CChainParams& chainparams = Params(); auto banman = std::make_unique(m_args.GetDataDirBase() / "banlist", nullptr, DEFAULT_MISBEHAVING_BANTIME); auto connman = std::make_unique(0x1337, 0x1337, *m_node.addrman); @@ -320,10 +316,7 @@ BOOST_AUTO_TEST_CASE(peer_discouragement) nodes[0]->fSuccessfullyConnected = true; connman->AddTestNode(*nodes[0]); peerLogic->Misbehaving(nodes[0]->GetId(), DISCOURAGEMENT_THRESHOLD); // Should be discouraged - { - LOCK(nodes[0]->cs_sendProcessing); - BOOST_CHECK(peerLogic->SendMessages(nodes[0])); - } + BOOST_CHECK(peerLogic->SendMessages(nodes[0])); BOOST_CHECK(banman->IsDiscouraged(addr[0])); BOOST_CHECK(nodes[0]->fDisconnect); BOOST_CHECK(!banman->IsDiscouraged(other_addr)); // Different address, not discouraged @@ -342,10 +335,7 @@ BOOST_AUTO_TEST_CASE(peer_discouragement) nodes[1]->fSuccessfullyConnected = true; connman->AddTestNode(*nodes[1]); peerLogic->Misbehaving(nodes[1]->GetId(), DISCOURAGEMENT_THRESHOLD - 1); - { - LOCK(nodes[1]->cs_sendProcessing); - BOOST_CHECK(peerLogic->SendMessages(nodes[1])); - } + BOOST_CHECK(peerLogic->SendMessages(nodes[1])); // [0] is still discouraged/disconnected. BOOST_CHECK(banman->IsDiscouraged(addr[0])); BOOST_CHECK(nodes[0]->fDisconnect); @@ -353,10 +343,7 @@ BOOST_AUTO_TEST_CASE(peer_discouragement) BOOST_CHECK(!banman->IsDiscouraged(addr[1])); BOOST_CHECK(!nodes[1]->fDisconnect); peerLogic->Misbehaving(nodes[1]->GetId(), 1); // [1] reaches discouragement threshold - { - LOCK(nodes[1]->cs_sendProcessing); - BOOST_CHECK(peerLogic->SendMessages(nodes[1])); - } + BOOST_CHECK(peerLogic->SendMessages(nodes[1])); // Expect both [0] and [1] to be discouraged/disconnected now. BOOST_CHECK(banman->IsDiscouraged(addr[0])); BOOST_CHECK(nodes[0]->fDisconnect); @@ -379,10 +366,7 @@ BOOST_AUTO_TEST_CASE(peer_discouragement) nodes[2]->fSuccessfullyConnected = true; connman->AddTestNode(*nodes[2]); peerLogic->Misbehaving(nodes[2]->GetId(), DISCOURAGEMENT_THRESHOLD, /* message */ ""); - { - LOCK(nodes[2]->cs_sendProcessing); - BOOST_CHECK(peerLogic->SendMessages(nodes[2])); - } + BOOST_CHECK(peerLogic->SendMessages(nodes[2])); BOOST_CHECK(banman->IsDiscouraged(addr[0])); BOOST_CHECK(banman->IsDiscouraged(addr[1])); BOOST_CHECK(banman->IsDiscouraged(addr[2])); @@ -398,6 +382,8 @@ BOOST_AUTO_TEST_CASE(peer_discouragement) BOOST_AUTO_TEST_CASE(DoS_bantime) { + LOCK(NetEventsInterface::g_msgproc_mutex); + const CChainParams& chainparams = Params(); auto banman = std::make_unique(m_args.GetDataDirBase() / "banlist", nullptr, DEFAULT_MISBEHAVING_BANTIME); auto connman = std::make_unique(0x1337, 0x1337, *m_node.addrman); @@ -426,10 +412,7 @@ BOOST_AUTO_TEST_CASE(DoS_bantime) dummyNode.fSuccessfullyConnected = true; peerLogic->Misbehaving(dummyNode.GetId(), DISCOURAGEMENT_THRESHOLD); - { - LOCK(dummyNode.cs_sendProcessing); - BOOST_CHECK(peerLogic->SendMessages(&dummyNode)); - } + BOOST_CHECK(peerLogic->SendMessages(&dummyNode)); BOOST_CHECK(banman->IsDiscouraged(addr)); peerLogic->FinalizeNode(dummyNode); diff --git a/src/test/fuzz/process_message.cpp b/src/test/fuzz/process_message.cpp index 51ba2ab04f..7c042b5cb0 100644 --- a/src/test/fuzz/process_message.cpp +++ b/src/test/fuzz/process_message.cpp @@ -81,6 +81,8 @@ void fuzz_target(FuzzBufferType buffer, const std::string& LIMIT_TO_MESSAGE_TYPE SetMockTime(1610000000); // any time to successfully reset ibd chainstate.ResetIbd(); + LOCK(NetEventsInterface::g_msgproc_mutex); + const std::string random_message_type{fuzzed_data_provider.ConsumeBytesAsString(CMessageHeader::COMMAND_SIZE).c_str()}; if (!LIMIT_TO_MESSAGE_TYPE.empty() && random_message_type != LIMIT_TO_MESSAGE_TYPE) { return; @@ -99,10 +101,7 @@ void fuzz_target(FuzzBufferType buffer, const std::string& LIMIT_TO_MESSAGE_TYPE g_setup->m_node.peerman->ProcessMessage(p2p_node, random_message_type, random_bytes_data_stream, GetTime(), std::atomic{false}); } catch (const std::ios_base::failure& e) { } - { - LOCK(p2p_node.cs_sendProcessing); - g_setup->m_node.peerman->SendMessages(&p2p_node); - } + g_setup->m_node.peerman->SendMessages(&p2p_node); SyncWithValidationInterfaceQueue(); LOCK2(::cs_main, g_cs_orphans); // See init.cpp for rationale for implicit locking order requirement g_setup->m_node.connman->StopNodes(); diff --git a/src/test/fuzz/process_messages.cpp b/src/test/fuzz/process_messages.cpp index 6bac0336cf..6e270f8d60 100644 --- a/src/test/fuzz/process_messages.cpp +++ b/src/test/fuzz/process_messages.cpp @@ -40,6 +40,8 @@ FUZZ_TARGET_INIT(process_messages, initialize_process_messages) SetMockTime(1610000000); // any time to successfully reset ibd chainstate.ResetIbd(); + LOCK(NetEventsInterface::g_msgproc_mutex); + std::vector peers; const auto num_peers_to_add = fuzzed_data_provider.ConsumeIntegralInRange(1, 3); for (int i = 0; i < num_peers_to_add; ++i) { @@ -70,10 +72,7 @@ FUZZ_TARGET_INIT(process_messages, initialize_process_messages) connman.ProcessMessagesOnce(random_node); } catch (const std::ios_base::failure&) { } - { - LOCK(random_node.cs_sendProcessing); - g_setup->m_node.peerman->SendMessages(&random_node); - } + g_setup->m_node.peerman->SendMessages(&random_node); } SyncWithValidationInterfaceQueue(); LOCK2(::cs_main, g_cs_orphans); // See init.cpp for rationale for implicit locking order requirement diff --git a/src/test/fuzz/util.h b/src/test/fuzz/util.h index 259323dd73..38704f6d2e 100644 --- a/src/test/fuzz/util.h +++ b/src/test/fuzz/util.h @@ -390,7 +390,7 @@ auto ConsumeNode(FuzzedDataProvider& fuzzed_data_provider, const std::optional ConsumeNodeAsUniquePtr(FuzzedDataProvider& fdp, const std::optional& node_id_in = std::nullopt) { return ConsumeNode(fdp, node_id_in); } -void FillNode(FuzzedDataProvider& fuzzed_data_provider, ConnmanTestMsg& connman, CNode& node) noexcept; +void FillNode(FuzzedDataProvider& fuzzed_data_provider, ConnmanTestMsg& connman, CNode& node) noexcept EXCLUSIVE_LOCKS_REQUIRED(NetEventsInterface::g_msgproc_mutex); class FuzzedFileProvider { diff --git a/src/test/net_tests.cpp b/src/test/net_tests.cpp index 2c7f5fa09a..d89027291b 100644 --- a/src/test/net_tests.cpp +++ b/src/test/net_tests.cpp @@ -809,6 +809,8 @@ BOOST_AUTO_TEST_CASE(LocalAddress_BasicLifecycle) BOOST_AUTO_TEST_CASE(initial_advertise_from_version_message) { + LOCK(NetEventsInterface::g_msgproc_mutex); + // Tests the following scenario: // * -bind=3.4.5.6:20001 is specified // * we make an outbound connection to a peer @@ -893,10 +895,7 @@ BOOST_AUTO_TEST_CASE(initial_advertise_from_version_message) } }; - { - LOCK(peer.cs_sendProcessing); - m_node.peerman->SendMessages(&peer); - } + m_node.peerman->SendMessages(&peer); BOOST_CHECK(sent); diff --git a/src/test/util/net.cpp b/src/test/util/net.cpp index 85b2e2ea9a..b636559c4a 100644 --- a/src/test/util/net.cpp +++ b/src/test/util/net.cpp @@ -44,10 +44,7 @@ void ConnmanTestMsg::Handshake(CNode& node, (void)connman.ReceiveMsgFrom(node, msg_version); node.fPauseSend = false; connman.ProcessMessagesOnce(node); - { - LOCK(node.cs_sendProcessing); - peerman.SendMessages(&node); - } + peerman.SendMessages(&node); if (node.fDisconnect) return; assert(node.nVersion == version); assert(node.GetCommonVersion() == std::min(version, PROTOCOL_VERSION)); @@ -61,10 +58,7 @@ void ConnmanTestMsg::Handshake(CNode& node, (void)connman.ReceiveMsgFrom(node, msg_verack); node.fPauseSend = false; connman.ProcessMessagesOnce(node); - { - LOCK(node.cs_sendProcessing); - peerman.SendMessages(&node); - } + peerman.SendMessages(&node); assert(node.fSuccessfullyConnected == true); } } diff --git a/src/test/util/net.h b/src/test/util/net.h index fddaa52bc5..1f2a31290e 100644 --- a/src/test/util/net.h +++ b/src/test/util/net.h @@ -44,9 +44,10 @@ struct ConnmanTestMsg : public CConnman { ServiceFlags local_services, NetPermissionFlags permission_flags, int32_t version, - bool relay_txs); + bool relay_txs) + EXCLUSIVE_LOCKS_REQUIRED(NetEventsInterface::g_msgproc_mutex); - void ProcessMessagesOnce(CNode& node) { m_msgproc->ProcessMessages(&node, flagInterruptMsgProc); } + void ProcessMessagesOnce(CNode& node) EXCLUSIVE_LOCKS_REQUIRED(NetEventsInterface::g_msgproc_mutex) { m_msgproc->ProcessMessages(&node, flagInterruptMsgProc); } void NodeReceiveMsgBytes(CNode& node, Span msg_bytes, bool& complete) const; From caaa0fda01c224790c60d2d81fa1b18a7d262e6e Mon Sep 17 00:00:00 2001 From: Kittywhiskers Van Gogh <63189531+kwvg@users.noreply.github.com> Date: Sun, 16 Jun 2024 11:55:00 +0000 Subject: [PATCH 09/14] net: use `std::deque` for `vSendMsg` instead of `std::list` The change was introduced as an optimization in 027a852a (dash#3398) but prevents the backport of bitcoin#26844 due to the inability to engage in binary expressions with iterators of `std::list`. --- src/net.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/net.h b/src/net.h index 0277b27e10..4e1ea87b7f 100644 --- a/src/net.h +++ b/src/net.h @@ -471,7 +471,7 @@ public: /** Offset inside the first vSendMsg already sent */ size_t nSendOffset GUARDED_BY(cs_vSend){0}; uint64_t nSendBytes GUARDED_BY(cs_vSend){0}; - std::list> vSendMsg GUARDED_BY(cs_vSend); + std::deque> vSendMsg GUARDED_BY(cs_vSend); std::atomic nSendMsgSize{0}; Mutex cs_vSend; Mutex m_sock_mutex; From 13f6dc1b27e914134d29accace3cd323e8c2a717 Mon Sep 17 00:00:00 2001 From: Kittywhiskers Van Gogh <63189531+kwvg@users.noreply.github.com> Date: Tue, 6 Aug 2024 07:26:00 +0000 Subject: [PATCH 10/14] merge bitcoin#26844: Pass MSG_MORE flag when sending non-final network messages --- src/net.cpp | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/net.cpp b/src/net.cpp index 42f3ad6b49..be1650eb6f 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -950,7 +950,13 @@ size_t CConnman::SocketSendData(CNode& node) if (!node.m_sock) { break; } - nBytes = node.m_sock->Send(reinterpret_cast(data.data()) + node.nSendOffset, data.size() - node.nSendOffset, MSG_NOSIGNAL | MSG_DONTWAIT); + int flags = MSG_NOSIGNAL | MSG_DONTWAIT; +#ifdef MSG_MORE + if (it + 1 != node.vSendMsg.end()) { + flags |= MSG_MORE; + } +#endif + nBytes = node.m_sock->Send(reinterpret_cast(data.data()) + node.nSendOffset, data.size() - node.nSendOffset, flags); } if (nBytes > 0) { node.m_last_send = GetTime(); From 8c986d6b08fd6218660a83e036f9002b388bf24f Mon Sep 17 00:00:00 2001 From: Kittywhiskers Van Gogh <63189531+kwvg@users.noreply.github.com> Date: Tue, 27 Aug 2024 18:36:31 +0000 Subject: [PATCH 11/14] partial bitcoin#27981: Fix potential network stalling bug To allow for the removal of a node from `vReceivableNodes`, the collection of node pointers have been made into an `std::set`. Marking as partial as it should be revisited when bitcoin#24356 is backported. --- src/net.cpp | 58 +++++++++++++++++++++++++++++++---------------------- src/net.h | 5 ++++- 2 files changed, 38 insertions(+), 25 deletions(-) diff --git a/src/net.cpp b/src/net.cpp index be1650eb6f..6105970d42 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -936,7 +936,7 @@ void V1TransportSerializer::prepareForTransport(CSerializedNetMsg& msg, std::vec CVectorWriter{SER_NETWORK, INIT_PROTO_VERSION, header, 0, hdr}; } -size_t CConnman::SocketSendData(CNode& node) +std::pair CConnman::SocketSendData(CNode& node) const { auto it = node.vSendMsg.begin(); size_t nSentSize = 0; @@ -994,7 +994,7 @@ size_t CConnman::SocketSendData(CNode& node) } node.vSendMsg.erase(node.vSendMsg.begin(), it); node.nSendMsgSize = node.vSendMsg.size(); - return nSentSize; + return {nSentSize, !node.vSendMsg.empty()}; } static bool ReverseCompareNodeMinPingTime(const NodeEvictionCandidate& a, const NodeEvictionCandidate& b) @@ -1711,8 +1711,7 @@ bool CConnman::GenerateSelectSet(const std::vector& nodes, recv_set.insert(hListenSocket.sock->Get()); } - for (CNode* pnode : nodes) - { + for (CNode* pnode : nodes) { bool select_recv = !pnode->fHasRecvData; bool select_send = !pnode->fCanSendData; @@ -2027,9 +2026,9 @@ void CConnman::SocketHandlerConnected(const std::set& recv_set, if (interruptNet) return; - std::vector vErrorNodes; - std::vector vReceivableNodes; - std::vector vSendableNodes; + std::set vErrorNodes; + std::set vReceivableNodes; + std::set vSendableNodes; { LOCK(cs_mapSocketToNode); for (auto hSocket : error_set) { @@ -2038,7 +2037,7 @@ void CConnman::SocketHandlerConnected(const std::set& recv_set, continue; } it->second->AddRef(); - vErrorNodes.emplace_back(it->second); + vErrorNodes.emplace(it->second); } for (auto hSocket : recv_set) { if (error_set.count(hSocket)) { @@ -2073,7 +2072,6 @@ void CConnman::SocketHandlerConnected(const std::set& recv_set, { LOCK(cs_sendable_receivable_nodes); - vReceivableNodes.reserve(mapReceivableNodes.size()); for (auto it = mapReceivableNodes.begin(); it != mapReceivableNodes.end(); ) { if (!it->second->fHasRecvData) { it = mapReceivableNodes.erase(it); @@ -2088,7 +2086,7 @@ void CConnman::SocketHandlerConnected(const std::set& recv_set, // receiving data (which should succeed as the socket signalled as receivable). if (!it->second->fPauseRecv && it->second->nSendMsgSize == 0 && !it->second->fDisconnect) { it->second->AddRef(); - vReceivableNodes.emplace_back(it->second); + vReceivableNodes.emplace(it->second); } ++it; } @@ -2099,7 +2097,6 @@ void CConnman::SocketHandlerConnected(const std::set& recv_set, // also clean up mapNodesWithDataToSend from nodes that had messages to send in the last iteration // but don't have any in this iteration LOCK(cs_mapNodesWithDataToSend); - vSendableNodes.reserve(mapNodesWithDataToSend.size()); for (auto it = mapNodesWithDataToSend.begin(); it != mapNodesWithDataToSend.end(); ) { if (it->second->nSendMsgSize == 0) { // See comment in PushMessage @@ -2108,13 +2105,36 @@ void CConnman::SocketHandlerConnected(const std::set& recv_set, } else { if (it->second->fCanSendData) { it->second->AddRef(); - vSendableNodes.emplace_back(it->second); + vSendableNodes.emplace(it->second); } ++it; } } } + for (CNode* pnode : vSendableNodes) { + if (interruptNet) { + break; + } + + // Send data + auto [bytes_sent, data_left] = WITH_LOCK(pnode->cs_vSend, return SocketSendData(*pnode)); + if (bytes_sent) { + RecordBytesSent(bytes_sent); + + // If both receiving and (non-optimistic) sending were possible, we first attempt + // sending. If that succeeds, but does not fully drain the send queue, do not + // attempt to receive. This avoids needlessly queueing data if the remote peer + // is slow at receiving data, by means of TCP flow control. We only do this when + // sending actually succeeded to make sure progress is always made; otherwise a + // deadlock would be possible when both sides have data to send, but neither is + // receiving. + if (data_left && vReceivableNodes.erase(pnode)) { + pnode->Release(); + } + } + } + for (CNode* pnode : vErrorNodes) { if (interruptNet) { @@ -2136,16 +2156,6 @@ void CConnman::SocketHandlerConnected(const std::set& recv_set, SocketRecvData(pnode); } - for (CNode* pnode : vSendableNodes) { - if (interruptNet) { - break; - } - - // Send data - size_t bytes_sent = WITH_LOCK(pnode->cs_vSend, return SocketSendData(*pnode)); - if (bytes_sent) RecordBytesSent(bytes_sent); - } - for (auto& node : vErrorNodes) { node->Release(); } @@ -4183,7 +4193,7 @@ void CConnman::PushMessage(CNode* pnode, CSerializedNetMsg&& msg) { LOCK(pnode->cs_vSend); - bool hasPendingData = !pnode->vSendMsg.empty(); + bool optimisticSend(pnode->vSendMsg.empty()); //log total amount of bytes per message type pnode->mapSendBytesPerMsgType[msg.m_type] += nTotalSize; @@ -4206,7 +4216,7 @@ void CConnman::PushMessage(CNode* pnode, CSerializedNetMsg&& msg) } // wake up select() call in case there was no pending data before (so it was not selecting this socket for sending) - if (!hasPendingData && (m_wakeup_pipe && m_wakeup_pipe->m_need_wakeup.load())) + if (optimisticSend && (m_wakeup_pipe && m_wakeup_pipe->m_need_wakeup.load())) m_wakeup_pipe->Write(); } } diff --git a/src/net.h b/src/net.h index 4e1ea87b7f..70dd0d5fab 100644 --- a/src/net.h +++ b/src/net.h @@ -1387,8 +1387,11 @@ private: NodeId GetNewNodeId(); - size_t SocketSendData(CNode& node) EXCLUSIVE_LOCKS_REQUIRED(node.cs_vSend); + /** (Try to) send data from node's vSendMsg. Returns (bytes_sent, data_left). */ + std::pair SocketSendData(CNode& node) const EXCLUSIVE_LOCKS_REQUIRED(node.cs_vSend); + size_t SocketRecvData(CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(!mutexMsgProc); + void DumpAddresses(); // Network stats From c6b9186e69299be34f520caf6f708fdee070acb6 Mon Sep 17 00:00:00 2001 From: Kittywhiskers Van Gogh <63189531+kwvg@users.noreply.github.com> Date: Sat, 11 Jun 2022 09:23:51 +0200 Subject: [PATCH 12/14] merge bitcoin#25325: Add pool based memory resource --- src/Makefile.am | 1 + src/Makefile.bench.include | 1 + src/Makefile.test.include | 2 + src/Makefile.test_util.include | 1 + src/bench/pool.cpp | 50 ++++ src/coins.cpp | 15 +- src/coins.h | 20 +- src/memusage.h | 20 ++ src/support/allocators/pool.h | 349 ++++++++++++++++++++++++++++ src/test/coins_tests.cpp | 39 +++- src/test/fuzz/coins_view.cpp | 3 +- src/test/fuzz/poolresource.cpp | 174 ++++++++++++++ src/test/pool_tests.cpp | 189 +++++++++++++++ src/test/util/poolresourcetester.h | 129 ++++++++++ src/test/validation_flush_tests.cpp | 33 +-- src/validation.cpp | 1 - 16 files changed, 1003 insertions(+), 24 deletions(-) create mode 100644 src/bench/pool.cpp create mode 100644 src/support/allocators/pool.h create mode 100644 src/test/fuzz/poolresource.cpp create mode 100644 src/test/pool_tests.cpp create mode 100644 src/test/util/poolresourcetester.h diff --git a/src/Makefile.am b/src/Makefile.am index f90e432b7b..0c2e16a6db 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -314,6 +314,7 @@ BITCOIN_CORE_H = \ streams.h \ statsd_client.h \ support/allocators/mt_pooled_secure.h \ + support/allocators/pool.h \ support/allocators/pooled_secure.h \ support/allocators/secure.h \ support/allocators/zeroafterfree.h \ diff --git a/src/Makefile.bench.include b/src/Makefile.bench.include index 5da7d544a9..133bc282ef 100644 --- a/src/Makefile.bench.include +++ b/src/Makefile.bench.include @@ -41,6 +41,7 @@ bench_bench_dash_SOURCES = \ bench/nanobench.h \ bench/nanobench.cpp \ bench/peer_eviction.cpp \ + bench/pool.cpp \ bench/rpc_blockchain.cpp \ bench/rpc_mempool.cpp \ bench/util_time.cpp \ diff --git a/src/Makefile.test.include b/src/Makefile.test.include index d7ccc53c5d..471c38be44 100644 --- a/src/Makefile.test.include +++ b/src/Makefile.test.include @@ -136,6 +136,7 @@ BITCOIN_TESTS =\ test/netbase_tests.cpp \ test/pmt_tests.cpp \ test/policyestimator_tests.cpp \ + test/pool_tests.cpp \ test/pow_tests.cpp \ test/prevector_tests.cpp \ test/raii_event_tests.cpp \ @@ -298,6 +299,7 @@ test_fuzz_fuzz_SOURCES = \ test/fuzz/parse_univalue.cpp \ test/fuzz/policy_estimator.cpp \ test/fuzz/policy_estimator_io.cpp \ + test/fuzz/poolresource.cpp \ test/fuzz/pow.cpp \ test/fuzz/prevector.cpp \ test/fuzz/primitives_transaction.cpp \ diff --git a/src/Makefile.test_util.include b/src/Makefile.test_util.include index ee349bbc6f..5e17e398b1 100644 --- a/src/Makefile.test_util.include +++ b/src/Makefile.test_util.include @@ -14,6 +14,7 @@ TEST_UTIL_H = \ test/util/logging.h \ test/util/mining.h \ test/util/net.h \ + test/util/poolresourcetester.h \ test/util/script.h \ test/util/setup_common.h \ test/util/str.h \ diff --git a/src/bench/pool.cpp b/src/bench/pool.cpp new file mode 100644 index 0000000000..0bf2b18514 --- /dev/null +++ b/src/bench/pool.cpp @@ -0,0 +1,50 @@ +// Copyright (c) 2022 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include +#include + +#include + +template +void BenchFillClearMap(benchmark::Bench& bench, Map& map) +{ + size_t batch_size = 5000; + + // make sure each iteration of the benchmark contains exactly 5000 inserts and one clear. + // do this at least 10 times so we get reasonable accurate results + + bench.batch(batch_size).minEpochIterations(10).run([&] { + auto rng = ankerl::nanobench::Rng(1234); + for (size_t i = 0; i < batch_size; ++i) { + map[rng()]; + } + map.clear(); + }); +} + +static void PoolAllocator_StdUnorderedMap(benchmark::Bench& bench) +{ + auto map = std::unordered_map(); + BenchFillClearMap(bench, map); +} + +static void PoolAllocator_StdUnorderedMapWithPoolResource(benchmark::Bench& bench) +{ + using Map = std::unordered_map, + std::equal_to, + PoolAllocator, + sizeof(std::pair) + 4 * sizeof(void*), + alignof(void*)>>; + + // make sure the resource supports large enough pools to hold the node. We do this by adding the size of a few pointers to it. + auto pool_resource = Map::allocator_type::ResourceType(); + auto map = Map{0, std::hash{}, std::equal_to{}, &pool_resource}; + BenchFillClearMap(bench, map); +} + +BENCHMARK(PoolAllocator_StdUnorderedMap); +BENCHMARK(PoolAllocator_StdUnorderedMapWithPoolResource); diff --git a/src/coins.cpp b/src/coins.cpp index 431d7223d8..4a1cccd0ab 100644 --- a/src/coins.cpp +++ b/src/coins.cpp @@ -33,7 +33,7 @@ size_t CCoinsViewBacked::EstimateSize() const { return base->EstimateSize(); } CCoinsViewCache::CCoinsViewCache(CCoinsView* baseIn, bool deterministic) : CCoinsViewBacked(baseIn), m_deterministic(deterministic), - cacheCoins(0, SaltedOutpointHasher(/*deterministic=*/deterministic)) + cacheCoins(0, SaltedOutpointHasher(/*deterministic=*/deterministic), CCoinsMap::key_equal{}, &m_cache_coins_memory_resource) {} size_t CCoinsViewCache::DynamicMemoryUsage() const { @@ -240,9 +240,12 @@ bool CCoinsViewCache::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlockIn bool CCoinsViewCache::Flush() { bool fOk = base->BatchWrite(cacheCoins, hashBlock, /*erase=*/true); - if (fOk && !cacheCoins.empty()) { - /* BatchWrite must erase all cacheCoins elements when erase=true. */ - throw std::logic_error("Not all cached coins were erased"); + if (fOk) { + if (!cacheCoins.empty()) { + /* BatchWrite must erase all cacheCoins elements when erase=true. */ + throw std::logic_error("Not all cached coins were erased"); + } + ReallocateCache(); } cachedCoinsUsage = 0; return fOk; @@ -295,7 +298,9 @@ void CCoinsViewCache::ReallocateCache() // Cache should be empty when we're calling this. assert(cacheCoins.size() == 0); cacheCoins.~CCoinsMap(); - ::new (&cacheCoins) CCoinsMap(0, SaltedOutpointHasher(/*deterministic=*/m_deterministic)); + m_cache_coins_memory_resource.~CCoinsMapMemoryResource(); + ::new (&m_cache_coins_memory_resource) CCoinsMapMemoryResource{}; + ::new (&cacheCoins) CCoinsMap{0, SaltedOutpointHasher{/*deterministic=*/m_deterministic}, CCoinsMap::key_equal{}, &m_cache_coins_memory_resource}; } void CCoinsViewCache::SanityCheck() const diff --git a/src/coins.h b/src/coins.h index 3dbe23aea5..fde5f688cc 100644 --- a/src/coins.h +++ b/src/coins.h @@ -11,6 +11,7 @@ #include #include #include +#include #include #include @@ -131,7 +132,23 @@ struct CCoinsCacheEntry CCoinsCacheEntry(Coin&& coin_, unsigned char flag) : coin(std::move(coin_)), flags(flag) {} }; -typedef std::unordered_map CCoinsMap; +/** + * PoolAllocator's MAX_BLOCK_SIZE_BYTES parameter here uses sizeof the data, and adds the size + * of 4 pointers. We do not know the exact node size used in the std::unordered_node implementation + * because it is implementation defined. Most implementations have an overhead of 1 or 2 pointers, + * so nodes can be connected in a linked list, and in some cases the hash value is stored as well. + * Using an additional sizeof(void*)*4 for MAX_BLOCK_SIZE_BYTES should thus be sufficient so that + * all implementations can allocate the nodes from the PoolAllocator. + */ +using CCoinsMap = std::unordered_map, + PoolAllocator, + sizeof(std::pair) + sizeof(void*) * 4, + alignof(void*)>>; + +using CCoinsMapMemoryResource = CCoinsMap::allocator_type::ResourceType; /** Cursor for iterating over CoinsView state */ class CCoinsViewCursor @@ -221,6 +238,7 @@ protected: * declared as "const". */ mutable uint256 hashBlock; + mutable CCoinsMapMemoryResource m_cache_coins_memory_resource{}; mutable CCoinsMap cacheCoins; /* Cached dynamic memory usage for the inner Coin objects. */ diff --git a/src/memusage.h b/src/memusage.h index a6e894129a..5fffe4ec07 100644 --- a/src/memusage.h +++ b/src/memusage.h @@ -7,6 +7,7 @@ #include #include +#include #include @@ -167,6 +168,25 @@ static inline size_t DynamicUsage(const std::unordered_map& m) return MallocUsage(sizeof(unordered_node >)) * m.size() + MallocUsage(sizeof(void*) * m.bucket_count()); } +template +static inline size_t DynamicUsage(const std::unordered_map, + MAX_BLOCK_SIZE_BYTES, + ALIGN_BYTES>>& m) +{ + auto* pool_resource = m.get_allocator().resource(); + + // The allocated chunks are stored in a std::list. Size per node should + // therefore be 3 pointers: next, previous, and a pointer to the chunk. + size_t estimated_list_node_size = MallocUsage(sizeof(void*) * 3); + size_t usage_resource = estimated_list_node_size * pool_resource->NumAllocatedChunks(); + size_t usage_chunks = MallocUsage(pool_resource->ChunkSizeBytes()) * pool_resource->NumAllocatedChunks(); + return usage_resource + usage_chunks + MallocUsage(sizeof(void*) * m.bucket_count()); } +} // namespace memusage + #endif // BITCOIN_MEMUSAGE_H diff --git a/src/support/allocators/pool.h b/src/support/allocators/pool.h new file mode 100644 index 0000000000..c8e70ebacf --- /dev/null +++ b/src/support/allocators/pool.h @@ -0,0 +1,349 @@ +// Copyright (c) 2022 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#ifndef BITCOIN_SUPPORT_ALLOCATORS_POOL_H +#define BITCOIN_SUPPORT_ALLOCATORS_POOL_H + +#include +#include +#include +#include +#include +#include +#include +#include + +/** + * A memory resource similar to std::pmr::unsynchronized_pool_resource, but + * optimized for node-based containers. It has the following properties: + * + * * Owns the allocated memory and frees it on destruction, even when deallocate + * has not been called on the allocated blocks. + * + * * Consists of a number of pools, each one for a different block size. + * Each pool holds blocks of uniform size in a freelist. + * + * * Exhausting memory in a freelist causes a new allocation of a fixed size chunk. + * This chunk is used to carve out blocks. + * + * * Block sizes or alignments that can not be served by the pools are allocated + * and deallocated by operator new(). + * + * PoolResource is not thread-safe. It is intended to be used by PoolAllocator. + * + * @tparam MAX_BLOCK_SIZE_BYTES Maximum size to allocate with the pool. If larger + * sizes are requested, allocation falls back to new(). + * + * @tparam ALIGN_BYTES Required alignment for the allocations. + * + * An example: If you create a PoolResource<128, 8>(262144) and perform a bunch of + * allocations and deallocate 2 blocks with size 8 bytes, and 3 blocks with size 16, + * the members will look like this: + * + * m_free_lists m_allocated_chunks + * ┌───┐ ┌───┐ ┌────────────-------──────┐ + * │ │ blocks │ ├─►│ 262144 B │ + * │ │ ┌─────┐ ┌─────┐ └─┬─┘ └────────────-------──────┘ + * │ 1 ├─►│ 8 B ├─►│ 8 B │ │ + * │ │ └─────┘ └─────┘ : + * │ │ │ + * │ │ ┌─────┐ ┌─────┐ ┌─────┐ ▼ + * │ 2 ├─►│16 B ├─►│16 B ├─►│16 B │ ┌───┐ ┌─────────────────────────┐ + * │ │ └─────┘ └─────┘ └─────┘ │ ├─►│ ▲ │ ▲ + * │ │ └───┘ └──────────┬──────────────┘ │ + * │ . │ │ m_available_memory_end + * │ . │ m_available_memory_it + * │ . │ + * │ │ + * │ │ + * │16 │ + * └───┘ + * + * Here m_free_lists[1] holds the 2 blocks of size 8 bytes, and m_free_lists[2] + * holds the 3 blocks of size 16. The blocks came from the data stored in the + * m_allocated_chunks list. Each chunk has bytes 262144. The last chunk has still + * some memory available for the blocks, and when m_available_memory_it is at the + * end, a new chunk will be allocated and added to the list. + */ +template +class PoolResource final +{ + static_assert(ALIGN_BYTES > 0, "ALIGN_BYTES must be nonzero"); + static_assert((ALIGN_BYTES & (ALIGN_BYTES - 1)) == 0, "ALIGN_BYTES must be a power of two"); + + /** + * In-place linked list of the allocations, used for the freelist. + */ + struct ListNode { + ListNode* m_next; + + explicit ListNode(ListNode* next) : m_next(next) {} + }; + static_assert(std::is_trivially_destructible_v, "Make sure we don't need to manually call a destructor"); + + /** + * Internal alignment value. The larger of the requested ALIGN_BYTES and alignof(FreeList). + */ + static constexpr std::size_t ELEM_ALIGN_BYTES = std::max(alignof(ListNode), ALIGN_BYTES); + static_assert((ELEM_ALIGN_BYTES & (ELEM_ALIGN_BYTES - 1)) == 0, "ELEM_ALIGN_BYTES must be a power of two"); + static_assert(sizeof(ListNode) <= ELEM_ALIGN_BYTES, "Units of size ELEM_SIZE_ALIGN need to be able to store a ListNode"); + static_assert((MAX_BLOCK_SIZE_BYTES & (ELEM_ALIGN_BYTES - 1)) == 0, "MAX_BLOCK_SIZE_BYTES needs to be a multiple of the alignment."); + + /** + * Size in bytes to allocate per chunk + */ + const size_t m_chunk_size_bytes; + + /** + * Contains all allocated pools of memory, used to free the data in the destructor. + */ + std::list m_allocated_chunks{}; + + /** + * Single linked lists of all data that came from deallocating. + * m_free_lists[n] will serve blocks of size n*ELEM_ALIGN_BYTES. + */ + std::array m_free_lists{}; + + /** + * Points to the beginning of available memory for carving out allocations. + */ + std::byte* m_available_memory_it = nullptr; + + /** + * Points to the end of available memory for carving out allocations. + * + * That member variable is redundant, and is always equal to `m_allocated_chunks.back() + m_chunk_size_bytes` + * whenever it is accessed, but `m_available_memory_end` caches this for clarity and efficiency. + */ + std::byte* m_available_memory_end = nullptr; + + /** + * How many multiple of ELEM_ALIGN_BYTES are necessary to fit bytes. We use that result directly as an index + * into m_free_lists. Round up for the special case when bytes==0. + */ + [[nodiscard]] static constexpr std::size_t NumElemAlignBytes(std::size_t bytes) + { + return (bytes + ELEM_ALIGN_BYTES - 1) / ELEM_ALIGN_BYTES + (bytes == 0); + } + + /** + * True when it is possible to make use of the freelist + */ + [[nodiscard]] static constexpr bool IsFreeListUsable(std::size_t bytes, std::size_t alignment) + { + return alignment <= ELEM_ALIGN_BYTES && bytes <= MAX_BLOCK_SIZE_BYTES; + } + + /** + * Replaces node with placement constructed ListNode that points to the previous node + */ + void PlacementAddToList(void* p, ListNode*& node) + { + node = new (p) ListNode{node}; + } + + /** + * Allocate one full memory chunk which will be used to carve out allocations. + * Also puts any leftover bytes into the freelist. + * + * Precondition: leftover bytes are either 0 or few enough to fit into a place in the freelist + */ + void AllocateChunk() + { + // if there is still any available memory left, put it into the freelist. + size_t remaining_available_bytes = std::distance(m_available_memory_it, m_available_memory_end); + if (0 != remaining_available_bytes) { + PlacementAddToList(m_available_memory_it, m_free_lists[remaining_available_bytes / ELEM_ALIGN_BYTES]); + } + + void* storage = ::operator new (m_chunk_size_bytes, std::align_val_t{ELEM_ALIGN_BYTES}); + m_available_memory_it = new (storage) std::byte[m_chunk_size_bytes]; + m_available_memory_end = m_available_memory_it + m_chunk_size_bytes; + m_allocated_chunks.emplace_back(m_available_memory_it); + } + + /** + * Access to internals for testing purpose only + */ + friend class PoolResourceTester; + +public: + /** + * Construct a new PoolResource object which allocates the first chunk. + * chunk_size_bytes will be rounded up to next multiple of ELEM_ALIGN_BYTES. + */ + explicit PoolResource(std::size_t chunk_size_bytes) + : m_chunk_size_bytes(NumElemAlignBytes(chunk_size_bytes) * ELEM_ALIGN_BYTES) + { + assert(m_chunk_size_bytes >= MAX_BLOCK_SIZE_BYTES); + AllocateChunk(); + } + + /** + * Construct a new Pool Resource object, defaults to 2^18=262144 chunk size. + */ + PoolResource() : PoolResource(262144) {} + + /** + * Disable copy & move semantics, these are not supported for the resource. + */ + PoolResource(const PoolResource&) = delete; + PoolResource& operator=(const PoolResource&) = delete; + PoolResource(PoolResource&&) = delete; + PoolResource& operator=(PoolResource&&) = delete; + + /** + * Deallocates all memory allocated associated with the memory resource. + */ + ~PoolResource() + { + for (std::byte* chunk : m_allocated_chunks) { + std::destroy(chunk, chunk + m_chunk_size_bytes); + ::operator delete ((void*)chunk, std::align_val_t{ELEM_ALIGN_BYTES}); + } + } + + /** + * Allocates a block of bytes. If possible the freelist is used, otherwise allocation + * is forwarded to ::operator new(). + */ + void* Allocate(std::size_t bytes, std::size_t alignment) + { + if (IsFreeListUsable(bytes, alignment)) { + const std::size_t num_alignments = NumElemAlignBytes(bytes); + if (nullptr != m_free_lists[num_alignments]) { + // we've already got data in the pool's freelist, unlink one element and return the pointer + // to the unlinked memory. Since FreeList is trivially destructible we can just treat it as + // uninitialized memory. + return std::exchange(m_free_lists[num_alignments], m_free_lists[num_alignments]->m_next); + } + + // freelist is empty: get one allocation from allocated chunk memory. + const std::ptrdiff_t round_bytes = static_cast(num_alignments * ELEM_ALIGN_BYTES); + if (round_bytes > m_available_memory_end - m_available_memory_it) { + // slow path, only happens when a new chunk needs to be allocated + AllocateChunk(); + } + + // Make sure we use the right amount of bytes for that freelist (might be rounded up), + return std::exchange(m_available_memory_it, m_available_memory_it + round_bytes); + } + + // Can't use the pool => use operator new() + return ::operator new (bytes, std::align_val_t{alignment}); + } + + /** + * Returns a block to the freelists, or deletes the block when it did not come from the chunks. + */ + void Deallocate(void* p, std::size_t bytes, std::size_t alignment) noexcept + { + if (IsFreeListUsable(bytes, alignment)) { + const std::size_t num_alignments = NumElemAlignBytes(bytes); + // put the memory block into the linked list. We can placement construct the FreeList + // into the memory since we can be sure the alignment is correct. + PlacementAddToList(p, m_free_lists[num_alignments]); + } else { + // Can't use the pool => forward deallocation to ::operator delete(). + ::operator delete (p, std::align_val_t{alignment}); + } + } + + /** + * Number of allocated chunks + */ + [[nodiscard]] std::size_t NumAllocatedChunks() const + { + return m_allocated_chunks.size(); + } + + /** + * Size in bytes to allocate per chunk, currently hardcoded to a fixed size. + */ + [[nodiscard]] size_t ChunkSizeBytes() const + { + return m_chunk_size_bytes; + } +}; + + +/** + * Forwards all allocations/deallocations to the PoolResource. + */ +template +class PoolAllocator +{ + PoolResource* m_resource; + + template + friend class PoolAllocator; + +public: + using value_type = T; + using ResourceType = PoolResource; + + /** + * Not explicit so we can easily construct it with the correct resource + */ + PoolAllocator(ResourceType* resource) noexcept + : m_resource(resource) + { + } + + PoolAllocator(const PoolAllocator& other) noexcept = default; + PoolAllocator& operator=(const PoolAllocator& other) noexcept = default; + + template + PoolAllocator(const PoolAllocator& other) noexcept + : m_resource(other.resource()) + { + } + + /** + * The rebind struct here is mandatory because we use non type template arguments for + * PoolAllocator. See https://en.cppreference.com/w/cpp/named_req/Allocator#cite_note-2 + */ + template + struct rebind { + using other = PoolAllocator; + }; + + /** + * Forwards each call to the resource. + */ + T* allocate(size_t n) + { + return static_cast(m_resource->Allocate(n * sizeof(T), alignof(T))); + } + + /** + * Forwards each call to the resource. + */ + void deallocate(T* p, size_t n) noexcept + { + m_resource->Deallocate(p, n * sizeof(T), alignof(T)); + } + + ResourceType* resource() const noexcept + { + return m_resource; + } +}; + +template +bool operator==(const PoolAllocator& a, + const PoolAllocator& b) noexcept +{ + return a.resource() == b.resource(); +} + +template +bool operator!=(const PoolAllocator& a, + const PoolAllocator& b) noexcept +{ + return !(a == b); +} + +#endif // BITCOIN_SUPPORT_ALLOCATORS_POOL_H diff --git a/src/test/coins_tests.cpp b/src/test/coins_tests.cpp index 2c8e0b1013..75b25143e2 100644 --- a/src/test/coins_tests.cpp +++ b/src/test/coins_tests.cpp @@ -6,6 +6,7 @@ #include #include