mirror of
https://github.com/dashpay/dash.git
synced 2024-12-24 19:42:46 +01:00
merge bitcoin#21148: Split orphan handling from net_processing into txorphanage
This commit is contained in:
parent
cbff29a630
commit
51ad8e4dde
@ -328,6 +328,7 @@ BITCOIN_CORE_H = \
|
||||
torcontrol.h \
|
||||
txdb.h \
|
||||
txmempool.h \
|
||||
txorphanage.h \
|
||||
undo.h \
|
||||
unordered_lru_cache.h \
|
||||
util/bip32.h \
|
||||
@ -527,6 +528,7 @@ libbitcoin_server_a_SOURCES = \
|
||||
torcontrol.cpp \
|
||||
txdb.cpp \
|
||||
txmempool.cpp \
|
||||
txorphanage.cpp \
|
||||
validation.cpp \
|
||||
validationinterface.cpp \
|
||||
versionbits.cpp \
|
||||
|
@ -56,6 +56,7 @@
|
||||
#include <torcontrol.h>
|
||||
#include <txdb.h>
|
||||
#include <txmempool.h>
|
||||
#include <txorphanage.h>
|
||||
#include <util/asmap.h>
|
||||
#include <util/error.h>
|
||||
#include <util/moneystr.h>
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include <tinyformat.h>
|
||||
#include <index/txindex.h>
|
||||
#include <txmempool.h>
|
||||
#include <txorphanage.h>
|
||||
#include <util/check.h> // For NDEBUG compile time check
|
||||
#include <util/system.h>
|
||||
#include <util/strencodings.h>
|
||||
@ -86,10 +87,6 @@ static_assert(INBOUND_PEER_TX_DELAY >= MAX_GETDATA_RANDOM_DELAY,
|
||||
/** Limit to avoid sending big packets. Not used in processing incoming GETDATA for compatibility */
|
||||
static const unsigned int MAX_GETDATA_SZ = 1000;
|
||||
|
||||
/** Expiration time for orphan transactions in seconds */
|
||||
static constexpr int64_t ORPHAN_TX_EXPIRE_TIME = 20 * 60;
|
||||
/** Minimum time between orphan transactions expire time checks in seconds */
|
||||
static constexpr int64_t ORPHAN_TX_EXPIRE_INTERVAL = 5 * 60;
|
||||
/** How long to cache transactions in mapRelay for normal relay */
|
||||
static constexpr auto RELAY_TX_CACHE_TIME = 15min;
|
||||
/** How long a transaction has to be in the mempool before it can unconditionally be relayed (even when not in mapRelay). */
|
||||
@ -191,24 +188,6 @@ static constexpr size_t MAX_ADDR_PROCESSING_TOKEN_BUCKET{MAX_ADDR_TO_SEND};
|
||||
/** The compactblocks version we support. See BIP 152. */
|
||||
static constexpr uint64_t CMPCTBLOCKS_VERSION{1};
|
||||
|
||||
struct COrphanTx {
|
||||
// When modifying, adapt the copy of this definition in tests/DoS_tests.
|
||||
CTransactionRef tx;
|
||||
NodeId fromPeer;
|
||||
int64_t nTimeExpire;
|
||||
size_t list_pos;
|
||||
size_t nTxSize;
|
||||
};
|
||||
|
||||
/** Guards orphan transactions and extra txs for compact blocks */
|
||||
RecursiveMutex g_cs_orphans;
|
||||
/** Map from txid to orphan transaction record. Limited by
|
||||
* -maxorphantx/DEFAULT_MAX_ORPHAN_TRANSACTIONS */
|
||||
std::map<uint256, COrphanTx> mapOrphanTransactions GUARDED_BY(g_cs_orphans);
|
||||
|
||||
size_t nMapOrphanTransactionsSize = 0;
|
||||
void EraseOrphansFor(NodeId peer);
|
||||
|
||||
// Internal stuff
|
||||
namespace {
|
||||
/** Blocks that are in flight, and that are in the queue to be downloaded. */
|
||||
@ -773,35 +752,24 @@ private:
|
||||
|
||||
/** Number of peers from which we're downloading blocks. */
|
||||
int nPeersWithValidatedDownloads GUARDED_BY(cs_main) = 0;
|
||||
};
|
||||
} // namespace
|
||||
|
||||
namespace {
|
||||
/** Storage for orphan information */
|
||||
TxOrphanage m_orphanage;
|
||||
|
||||
/** Number of preferable block download peers. */
|
||||
int nPreferredDownload GUARDED_BY(cs_main) = 0;
|
||||
|
||||
struct IteratorComparator
|
||||
{
|
||||
template<typename I>
|
||||
bool operator()(const I& a, const I& b) const
|
||||
{
|
||||
return &(*a) < &(*b);
|
||||
}
|
||||
};
|
||||
|
||||
/** Index from the parents' COutPoint into the mapOrphanTransactions. Used
|
||||
* to remove orphan transactions from the mapOrphanTransactions */
|
||||
std::map<COutPoint, std::set<std::map<uint256, COrphanTx>::iterator, IteratorComparator>> mapOrphanTransactionsByPrev GUARDED_BY(g_cs_orphans);
|
||||
/** Orphan transactions in vector for quick random eviction */
|
||||
std::vector<std::map<uint256, COrphanTx>::iterator> g_orphan_list GUARDED_BY(g_cs_orphans);
|
||||
void AddToCompactExtraTransactions(const CTransactionRef& tx) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans);
|
||||
|
||||
/** Orphan/conflicted/etc transactions that are kept for compact block reconstruction.
|
||||
* The last -blockreconstructionextratxn/DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN of
|
||||
* these are kept in a ring buffer */
|
||||
static std::vector<std::pair<uint256, CTransactionRef>> vExtraTxnForCompact GUARDED_BY(g_cs_orphans);
|
||||
std::vector<std::pair<uint256, CTransactionRef>> vExtraTxnForCompact GUARDED_BY(g_cs_orphans);
|
||||
/** Offset into vExtraTxnForCompact to insert the next tx */
|
||||
static size_t vExtraTxnForCompactIt GUARDED_BY(g_cs_orphans) = 0;
|
||||
size_t vExtraTxnForCompactIt GUARDED_BY(g_cs_orphans) = 0;
|
||||
};
|
||||
} // namespace
|
||||
|
||||
namespace {
|
||||
/** Number of preferable block download peers. */
|
||||
int nPreferredDownload GUARDED_BY(cs_main) = 0;
|
||||
} // namespace
|
||||
|
||||
namespace {
|
||||
@ -1569,7 +1537,7 @@ void PeerManagerImpl::FinalizeNode(const CNode& node) {
|
||||
for (const QueuedBlock& entry : state->vBlocksInFlight) {
|
||||
mapBlocksInFlight.erase(entry.hash);
|
||||
}
|
||||
EraseOrphansFor(nodeid);
|
||||
WITH_LOCK(g_cs_orphans, m_orphanage.EraseForPeer(nodeid));
|
||||
nPreferredDownload -= state->fPreferredDownload;
|
||||
nPeersWithValidatedDownloads -= (state->nBlocksInFlightValidHeaders != 0);
|
||||
assert(nPeersWithValidatedDownloads >= 0);
|
||||
@ -1661,12 +1629,7 @@ bool PeerManagerImpl::GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) c
|
||||
return true;
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// mapOrphanTransactions
|
||||
//
|
||||
|
||||
static void AddToCompactExtraTransactions(const CTransactionRef& tx) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans)
|
||||
void PeerManagerImpl::AddToCompactExtraTransactions(const CTransactionRef& tx) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans)
|
||||
{
|
||||
size_t max_extra_txn = gArgs.GetArg("-blockreconstructionextratxn", DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN);
|
||||
if (max_extra_txn <= 0)
|
||||
@ -1677,131 +1640,6 @@ static void AddToCompactExtraTransactions(const CTransactionRef& tx) EXCLUSIVE_L
|
||||
vExtraTxnForCompactIt = (vExtraTxnForCompactIt + 1) % max_extra_txn;
|
||||
}
|
||||
|
||||
bool AddOrphanTx(const CTransactionRef& tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans)
|
||||
{
|
||||
const uint256& hash = tx->GetHash();
|
||||
if (mapOrphanTransactions.count(hash))
|
||||
return false;
|
||||
|
||||
// Ignore big transactions, to avoid a
|
||||
// send-big-orphans memory exhaustion attack. If a peer has a legitimate
|
||||
// large transaction with a missing parent then we assume
|
||||
// it will rebroadcast it later, after the parent transaction(s)
|
||||
// have been mined or received.
|
||||
// 100 orphans, each of which is at most 99,999 bytes big is
|
||||
// at most 10 megabytes of orphans and somewhat more byprev index (in the worst case):
|
||||
unsigned int sz = GetSerializeSize(*tx, CTransaction::CURRENT_VERSION);
|
||||
if (sz > MAX_STANDARD_TX_SIZE)
|
||||
{
|
||||
LogPrint(BCLog::MEMPOOL, "ignoring large orphan tx (size: %u, hash: %s)\n", sz, hash.ToString());
|
||||
return false;
|
||||
}
|
||||
|
||||
auto ret = mapOrphanTransactions.emplace(hash, COrphanTx{tx, peer, GetTime() + ORPHAN_TX_EXPIRE_TIME, g_orphan_list.size(), sz});
|
||||
assert(ret.second);
|
||||
g_orphan_list.push_back(ret.first);
|
||||
for (const CTxIn& txin : tx->vin) {
|
||||
mapOrphanTransactionsByPrev[txin.prevout].insert(ret.first);
|
||||
}
|
||||
|
||||
AddToCompactExtraTransactions(tx);
|
||||
|
||||
nMapOrphanTransactionsSize += sz;
|
||||
|
||||
LogPrint(BCLog::MEMPOOL, "stored orphan tx %s (mapsz %u outsz %u)\n", hash.ToString(),
|
||||
mapOrphanTransactions.size(), mapOrphanTransactionsByPrev.size());
|
||||
statsClient.inc("transactions.orphans.add", 1.0f);
|
||||
statsClient.gauge("transactions.orphans", mapOrphanTransactions.size());
|
||||
return true;
|
||||
}
|
||||
|
||||
int static EraseOrphanTx(uint256 hash) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans)
|
||||
{
|
||||
std::map<uint256, COrphanTx>::iterator it = mapOrphanTransactions.find(hash);
|
||||
if (it == mapOrphanTransactions.end())
|
||||
return 0;
|
||||
for (const CTxIn& txin : it->second.tx->vin)
|
||||
{
|
||||
auto itPrev = mapOrphanTransactionsByPrev.find(txin.prevout);
|
||||
if (itPrev == mapOrphanTransactionsByPrev.end())
|
||||
continue;
|
||||
itPrev->second.erase(it);
|
||||
if (itPrev->second.empty())
|
||||
mapOrphanTransactionsByPrev.erase(itPrev);
|
||||
}
|
||||
|
||||
size_t old_pos = it->second.list_pos;
|
||||
assert(g_orphan_list[old_pos] == it);
|
||||
if (old_pos + 1 != g_orphan_list.size()) {
|
||||
// Unless we're deleting the last entry in g_orphan_list, move the last
|
||||
// entry to the position we're deleting.
|
||||
auto it_last = g_orphan_list.back();
|
||||
g_orphan_list[old_pos] = it_last;
|
||||
it_last->second.list_pos = old_pos;
|
||||
}
|
||||
g_orphan_list.pop_back();
|
||||
|
||||
assert(nMapOrphanTransactionsSize >= it->second.nTxSize);
|
||||
nMapOrphanTransactionsSize -= it->second.nTxSize;
|
||||
mapOrphanTransactions.erase(it);
|
||||
statsClient.inc("transactions.orphans.remove", 1.0f);
|
||||
statsClient.gauge("transactions.orphans", mapOrphanTransactions.size());
|
||||
return 1;
|
||||
}
|
||||
|
||||
void EraseOrphansFor(NodeId peer)
|
||||
{
|
||||
LOCK(g_cs_orphans);
|
||||
int nErased = 0;
|
||||
std::map<uint256, COrphanTx>::iterator iter = mapOrphanTransactions.begin();
|
||||
while (iter != mapOrphanTransactions.end())
|
||||
{
|
||||
std::map<uint256, COrphanTx>::iterator maybeErase = iter++; // increment to avoid iterator becoming invalid
|
||||
if (maybeErase->second.fromPeer == peer)
|
||||
{
|
||||
nErased += EraseOrphanTx(maybeErase->second.tx->GetHash());
|
||||
}
|
||||
}
|
||||
if (nErased > 0) LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx from peer=%d\n", nErased, peer);
|
||||
}
|
||||
|
||||
|
||||
unsigned int LimitOrphanTxSize(unsigned int nMaxOrphansSize)
|
||||
{
|
||||
LOCK(g_cs_orphans);
|
||||
|
||||
unsigned int nEvicted = 0;
|
||||
static int64_t nNextSweep;
|
||||
int64_t nNow = GetTime();
|
||||
if (nNextSweep <= nNow) {
|
||||
// Sweep out expired orphan pool entries:
|
||||
int nErased = 0;
|
||||
int64_t nMinExpTime = nNow + ORPHAN_TX_EXPIRE_TIME - ORPHAN_TX_EXPIRE_INTERVAL;
|
||||
std::map<uint256, COrphanTx>::iterator iter = mapOrphanTransactions.begin();
|
||||
while (iter != mapOrphanTransactions.end())
|
||||
{
|
||||
std::map<uint256, COrphanTx>::iterator maybeErase = iter++;
|
||||
if (maybeErase->second.nTimeExpire <= nNow) {
|
||||
nErased += EraseOrphanTx(maybeErase->second.tx->GetHash());
|
||||
} else {
|
||||
nMinExpTime = std::min(maybeErase->second.nTimeExpire, nMinExpTime);
|
||||
}
|
||||
}
|
||||
// Sweep again 5 minutes after the next entry that expires in order to batch the linear scan.
|
||||
nNextSweep = nMinExpTime + ORPHAN_TX_EXPIRE_INTERVAL;
|
||||
if (nErased > 0) LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx due to expiration\n", nErased);
|
||||
}
|
||||
FastRandomContext rng;
|
||||
while (!mapOrphanTransactions.empty() && nMapOrphanTransactionsSize > nMaxOrphansSize)
|
||||
{
|
||||
// Evict a random orphan:
|
||||
size_t randompos = rng.randrange(g_orphan_list.size());
|
||||
EraseOrphanTx(g_orphan_list[randompos]->first);
|
||||
++nEvicted;
|
||||
}
|
||||
return nEvicted;
|
||||
}
|
||||
|
||||
void PeerManagerImpl::Misbehaving(const NodeId pnode, const int howmuch, const std::string& message)
|
||||
{
|
||||
assert(howmuch > 0);
|
||||
@ -2022,52 +1860,17 @@ void PeerManagerImpl::StartScheduledTasks(CScheduler& scheduler)
|
||||
*/
|
||||
void PeerManagerImpl::BlockConnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindex)
|
||||
{
|
||||
{
|
||||
LOCK2(cs_main, g_cs_orphans);
|
||||
LOCK2(::cs_main, g_cs_orphans);
|
||||
|
||||
std::vector<uint256> vOrphanErase;
|
||||
std::set<uint256> orphanWorkSet;
|
||||
|
||||
for (const CTransactionRef& ptx : pblock->vtx) {
|
||||
const CTransaction& tx = *ptx;
|
||||
|
||||
// Which orphan pool entries we should reprocess and potentially try to accept into mempool again?
|
||||
for (size_t i = 0; i < tx.vin.size(); i++) {
|
||||
auto itByPrev = mapOrphanTransactionsByPrev.find(COutPoint(tx.GetHash(), (uint32_t)i));
|
||||
if (itByPrev == mapOrphanTransactionsByPrev.end()) continue;
|
||||
for (const auto& elem : itByPrev->second) {
|
||||
orphanWorkSet.insert(elem->first);
|
||||
}
|
||||
}
|
||||
|
||||
// Which orphan pool entries must we evict?
|
||||
for (const auto& txin : tx.vin) {
|
||||
auto itByPrev = mapOrphanTransactionsByPrev.find(txin.prevout);
|
||||
if (itByPrev == mapOrphanTransactionsByPrev.end()) continue;
|
||||
for (auto mi = itByPrev->second.begin(); mi != itByPrev->second.end(); ++mi) {
|
||||
const CTransaction& orphanTx = *(*mi)->second.tx;
|
||||
const uint256& orphanHash = orphanTx.GetHash();
|
||||
vOrphanErase.push_back(orphanHash);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Erase orphan transactions included or precluded by this block
|
||||
if (vOrphanErase.size()) {
|
||||
int nErased = 0;
|
||||
for (const uint256& orphanHash : vOrphanErase) {
|
||||
nErased += EraseOrphanTx(orphanHash);
|
||||
}
|
||||
LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx included or conflicted by block\n", nErased);
|
||||
}
|
||||
|
||||
while (!orphanWorkSet.empty()) {
|
||||
LogPrint(BCLog::MEMPOOL, "Trying to process %d orphans\n", orphanWorkSet.size());
|
||||
ProcessOrphanTx(orphanWorkSet);
|
||||
}
|
||||
|
||||
m_last_tip_update = GetTime<std::chrono::seconds>();
|
||||
auto orphanWorkSet = m_orphanage.GetCandidatesForBlock(*pblock);
|
||||
while (!orphanWorkSet.empty()) {
|
||||
LogPrint(BCLog::MEMPOOL, "Trying to process %d orphans\n", orphanWorkSet.size());
|
||||
ProcessOrphanTx(orphanWorkSet);
|
||||
}
|
||||
|
||||
m_orphanage.EraseForBlock(*pblock);
|
||||
m_last_tip_update = GetTime<std::chrono::seconds>();
|
||||
|
||||
{
|
||||
LOCK(m_recent_confirmed_transactions_mutex);
|
||||
for (const auto& ptx : pblock->vtx) {
|
||||
@ -2241,10 +2044,7 @@ bool PeerManagerImpl::AlreadyHave(const CInv& inv)
|
||||
m_recent_rejects.reset();
|
||||
}
|
||||
|
||||
{
|
||||
LOCK(g_cs_orphans);
|
||||
if (mapOrphanTransactions.count(inv.hash)) return true;
|
||||
}
|
||||
if (m_orphanage.HaveTx(inv.hash)) return true;
|
||||
|
||||
{
|
||||
LOCK(m_recent_confirmed_transactions_mutex);
|
||||
@ -3144,40 +2944,32 @@ void PeerManagerImpl::ProcessOrphanTx(std::set<uint256>& orphan_work_set)
|
||||
const uint256 orphanHash = *orphan_work_set.begin();
|
||||
orphan_work_set.erase(orphan_work_set.begin());
|
||||
|
||||
auto orphan_it = mapOrphanTransactions.find(orphanHash);
|
||||
if (orphan_it == mapOrphanTransactions.end()) continue;
|
||||
const auto [porphanTx, from_peer] = m_orphanage.GetTx(orphanHash);
|
||||
if (porphanTx == nullptr) continue;
|
||||
|
||||
const CTransactionRef porphanTx = orphan_it->second.tx;
|
||||
const MempoolAcceptResult result = AcceptToMemoryPool(m_chainman.ActiveChainstate(), m_mempool, porphanTx, false /* bypass_limits */);
|
||||
const TxValidationState& state = result.m_state;
|
||||
|
||||
if (result.m_result_type == MempoolAcceptResult::ResultType::VALID) {
|
||||
LogPrint(BCLog::MEMPOOL, " accepted orphan tx %s\n", orphanHash.ToString());
|
||||
RelayTransaction(porphanTx->GetHash());
|
||||
for (unsigned int i = 0; i < porphanTx->vout.size(); i++) {
|
||||
auto it_by_prev = mapOrphanTransactionsByPrev.find(COutPoint(orphanHash, i));
|
||||
if (it_by_prev != mapOrphanTransactionsByPrev.end()) {
|
||||
for (const auto& elem : it_by_prev->second) {
|
||||
orphan_work_set.insert(elem->first);
|
||||
}
|
||||
}
|
||||
}
|
||||
EraseOrphanTx(orphanHash);
|
||||
m_orphanage.AddChildrenToWorkSet(*porphanTx, orphan_work_set);
|
||||
m_orphanage.EraseTx(orphanHash);
|
||||
break;
|
||||
} else if (state.GetResult() != TxValidationResult::TX_MISSING_INPUTS) {
|
||||
if (state.IsInvalid()) {
|
||||
LogPrint(BCLog::MEMPOOL, " invalid orphan tx %s from peer=%d. %s\n",
|
||||
orphanHash.ToString(),
|
||||
orphan_it->second.fromPeer,
|
||||
from_peer,
|
||||
state.ToString());
|
||||
// Maybe punish peer that gave us an invalid orphan tx
|
||||
MaybePunishNodeForTx(orphan_it->second.fromPeer, state);
|
||||
MaybePunishNodeForTx(from_peer, state);
|
||||
}
|
||||
// Has inputs but not accepted to mempool
|
||||
// Probably non-standard or insufficient fee
|
||||
LogPrint(BCLog::MEMPOOL, " removed orphan tx %s\n", orphanHash.ToString());
|
||||
m_recent_rejects.insert(orphanHash);
|
||||
EraseOrphanTx(orphanHash);
|
||||
m_orphanage.EraseTx(orphanHash);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -4325,15 +4117,7 @@ void PeerManagerImpl::ProcessMessage(
|
||||
|
||||
m_mempool.check(m_chainman.ActiveChainstate());
|
||||
RelayTransaction(tx.GetHash());
|
||||
|
||||
for (unsigned int i = 0; i < tx.vout.size(); i++) {
|
||||
auto it_by_prev = mapOrphanTransactionsByPrev.find(COutPoint(txid, i));
|
||||
if (it_by_prev != mapOrphanTransactionsByPrev.end()) {
|
||||
for (const auto& elem : it_by_prev->second) {
|
||||
peer->m_orphan_work_set.insert(elem->first);
|
||||
}
|
||||
}
|
||||
}
|
||||
m_orphanage.AddChildrenToWorkSet(tx, peer->m_orphan_work_set);
|
||||
|
||||
pfrom.m_last_tx_time = GetTime<std::chrono::seconds>();
|
||||
|
||||
@ -4377,13 +4161,16 @@ void PeerManagerImpl::ProcessMessage(
|
||||
AddKnownInv(*peer, _inv2.hash);
|
||||
if (!AlreadyHave(_inv2)) RequestObject(State(pfrom.GetId()), _inv2, current_time, is_masternode);
|
||||
}
|
||||
AddOrphanTx(ptx, pfrom.GetId());
|
||||
|
||||
// DoS prevention: do not allow mapOrphanTransactions to grow unbounded (see CVE-2012-3789)
|
||||
if (m_orphanage.AddTx(ptx, pfrom.GetId())) {
|
||||
AddToCompactExtraTransactions(ptx);
|
||||
}
|
||||
|
||||
// DoS prevention: do not allow m_orphans to grow unbounded (see CVE-2012-3789)
|
||||
unsigned int nMaxOrphanTxSize = (unsigned int)std::max((int64_t)0, gArgs.GetArg("-maxorphantxsize", DEFAULT_MAX_ORPHAN_TRANSACTIONS_SIZE)) * 1000000;
|
||||
unsigned int nEvicted = LimitOrphanTxSize(nMaxOrphanTxSize);
|
||||
unsigned int nEvicted = m_orphanage.LimitOrphans(nMaxOrphanTxSize);
|
||||
if (nEvicted > 0) {
|
||||
LogPrint(BCLog::MEMPOOL, "mapOrphan overflow, removed %u tx\n", nEvicted);
|
||||
LogPrint(BCLog::MEMPOOL, "orphanage overflow, removed %u tx\n", nEvicted);
|
||||
}
|
||||
} else {
|
||||
LogPrint(BCLog::MEMPOOL, "not keeping orphan with rejected parents %s\n",tx.GetHash().ToString());
|
||||
@ -6153,16 +5940,3 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
|
||||
} // release cs_main
|
||||
return true;
|
||||
}
|
||||
|
||||
class CNetProcessingCleanup
|
||||
{
|
||||
public:
|
||||
CNetProcessingCleanup() {}
|
||||
~CNetProcessingCleanup() {
|
||||
// orphan transactions
|
||||
mapOrphanTransactions.clear();
|
||||
mapOrphanTransactionsByPrev.clear();
|
||||
nMapOrphanTransactionsSize = 0;
|
||||
}
|
||||
};
|
||||
static CNetProcessingCleanup instance_of_cnetprocessingcleanup;
|
||||
|
@ -29,7 +29,6 @@ struct CJContext;
|
||||
struct LLMQContext;
|
||||
|
||||
extern RecursiveMutex cs_main;
|
||||
extern RecursiveMutex g_cs_orphans;
|
||||
|
||||
/** Default for -maxorphantxsize, maximum size in megabytes the orphan map can grow before entries are removed */
|
||||
static const unsigned int DEFAULT_MAX_ORPHAN_TRANSACTIONS_SIZE = 10; // this allows around 100 TXs of max size (and many more of normal size)
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include <test/util/net.h>
|
||||
#include <test/util/setup_common.h>
|
||||
#include <timedata.h>
|
||||
#include <txorphanage.h>
|
||||
#include <util/string.h>
|
||||
#include <util/system.h>
|
||||
#include <util/time.h>
|
||||
@ -26,18 +27,6 @@
|
||||
#include <boost/test/unit_test.hpp>
|
||||
|
||||
|
||||
// Tests these internal-to-net_processing.cpp methods:
|
||||
extern bool AddOrphanTx(const CTransactionRef& tx, NodeId peer);
|
||||
extern void EraseOrphansFor(NodeId peer);
|
||||
extern unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans);
|
||||
|
||||
struct COrphanTx {
|
||||
CTransactionRef tx;
|
||||
NodeId fromPeer;
|
||||
int64_t nTimeExpire;
|
||||
};
|
||||
extern std::map<uint256, COrphanTx> mapOrphanTransactions GUARDED_BY(g_cs_orphans);
|
||||
|
||||
static CService ip(uint32_t i)
|
||||
{
|
||||
struct in_addr s;
|
||||
@ -448,15 +437,23 @@ BOOST_AUTO_TEST_CASE(DoS_bantime)
|
||||
peerLogic->FinalizeNode(dummyNode);
|
||||
}
|
||||
|
||||
static CTransactionRef RandomOrphan()
|
||||
class TxOrphanageTest : public TxOrphanage
|
||||
{
|
||||
std::map<uint256, COrphanTx>::iterator it;
|
||||
LOCK2(cs_main, g_cs_orphans);
|
||||
it = mapOrphanTransactions.lower_bound(InsecureRand256());
|
||||
if (it == mapOrphanTransactions.end())
|
||||
it = mapOrphanTransactions.begin();
|
||||
return it->second.tx;
|
||||
}
|
||||
public:
|
||||
inline size_t CountOrphans() const EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans)
|
||||
{
|
||||
return m_orphans.size();
|
||||
}
|
||||
|
||||
CTransactionRef RandomOrphan() EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans)
|
||||
{
|
||||
std::map<uint256, OrphanTx>::iterator it;
|
||||
it = m_orphans.lower_bound(InsecureRand256());
|
||||
if (it == m_orphans.end())
|
||||
it = m_orphans.begin();
|
||||
return it->second.tx;
|
||||
}
|
||||
};
|
||||
|
||||
static void MakeNewKeyWithFastRandomContext(CKey& key)
|
||||
{
|
||||
@ -476,11 +473,14 @@ BOOST_AUTO_TEST_CASE(DoS_mapOrphans)
|
||||
// signature's R and S values have leading zeros.
|
||||
g_insecure_rand_ctx = FastRandomContext{uint256{33}};
|
||||
|
||||
TxOrphanageTest orphanage;
|
||||
CKey key;
|
||||
MakeNewKeyWithFastRandomContext(key);
|
||||
FillableSigningProvider keystore;
|
||||
BOOST_CHECK(keystore.AddKey(key));
|
||||
|
||||
LOCK(g_cs_orphans);
|
||||
|
||||
// 50 orphan transactions:
|
||||
for (int i = 0; i < 50; i++)
|
||||
{
|
||||
@ -493,13 +493,13 @@ BOOST_AUTO_TEST_CASE(DoS_mapOrphans)
|
||||
tx.vout[0].nValue = 1*CENT;
|
||||
tx.vout[0].scriptPubKey = GetScriptForDestination(PKHash(key.GetPubKey()));
|
||||
|
||||
AddOrphanTx(MakeTransactionRef(tx), i);
|
||||
orphanage.AddTx(MakeTransactionRef(tx), i);
|
||||
}
|
||||
|
||||
// ... and 50 that depend on other orphans:
|
||||
for (int i = 0; i < 50; i++)
|
||||
{
|
||||
CTransactionRef txPrev = RandomOrphan();
|
||||
CTransactionRef txPrev = orphanage.RandomOrphan();
|
||||
|
||||
CMutableTransaction tx;
|
||||
tx.vin.resize(1);
|
||||
@ -510,13 +510,13 @@ BOOST_AUTO_TEST_CASE(DoS_mapOrphans)
|
||||
tx.vout[0].scriptPubKey = GetScriptForDestination(PKHash(key.GetPubKey()));
|
||||
BOOST_CHECK(SignSignature(keystore, *txPrev, tx, 0, SIGHASH_ALL));
|
||||
|
||||
AddOrphanTx(MakeTransactionRef(tx), i);
|
||||
orphanage.AddTx(MakeTransactionRef(tx), i);
|
||||
}
|
||||
|
||||
// This really-big orphan should be ignored:
|
||||
for (int i = 0; i < 10; i++)
|
||||
{
|
||||
CTransactionRef txPrev = RandomOrphan();
|
||||
CTransactionRef txPrev = orphanage.RandomOrphan();
|
||||
|
||||
CMutableTransaction tx;
|
||||
tx.vout.resize(1);
|
||||
@ -534,25 +534,24 @@ BOOST_AUTO_TEST_CASE(DoS_mapOrphans)
|
||||
for (unsigned int j = 1; j < tx.vin.size(); j++)
|
||||
tx.vin[j].scriptSig = tx.vin[0].scriptSig;
|
||||
|
||||
BOOST_CHECK(!AddOrphanTx(MakeTransactionRef(tx), i));
|
||||
BOOST_CHECK(!orphanage.AddTx(MakeTransactionRef(tx), i));
|
||||
}
|
||||
|
||||
LOCK2(cs_main, g_cs_orphans);
|
||||
// Test EraseOrphansFor:
|
||||
for (NodeId i = 0; i < 3; i++)
|
||||
{
|
||||
size_t sizeBefore = mapOrphanTransactions.size();
|
||||
EraseOrphansFor(i);
|
||||
BOOST_CHECK(mapOrphanTransactions.size() < sizeBefore);
|
||||
size_t sizeBefore = orphanage.CountOrphans();
|
||||
orphanage.EraseForPeer(i);
|
||||
BOOST_CHECK(orphanage.CountOrphans() < sizeBefore);
|
||||
}
|
||||
|
||||
// Test LimitOrphanTxSize() function:
|
||||
LimitOrphanTxSize(40);
|
||||
BOOST_CHECK(mapOrphanTransactions.size() <= 40);
|
||||
LimitOrphanTxSize(10);
|
||||
BOOST_CHECK(mapOrphanTransactions.size() <= 10);
|
||||
LimitOrphanTxSize(0);
|
||||
BOOST_CHECK(mapOrphanTransactions.empty());
|
||||
orphanage.LimitOrphans(40);
|
||||
BOOST_CHECK(orphanage.CountOrphans() <= 40);
|
||||
orphanage.LimitOrphans(10);
|
||||
BOOST_CHECK(orphanage.CountOrphans() <= 10);
|
||||
orphanage.LimitOrphans(0);
|
||||
BOOST_CHECK(orphanage.CountOrphans() == 0);
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_SUITE_END()
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include <test/util/net.h>
|
||||
#include <test/util/setup_common.h>
|
||||
#include <test/util/validation.h>
|
||||
#include <txorphanage.h>
|
||||
#include <validationinterface.h>
|
||||
#include <version.h>
|
||||
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include <test/util/net.h>
|
||||
#include <test/util/setup_common.h>
|
||||
#include <test/util/validation.h>
|
||||
#include <txorphanage.h>
|
||||
#include <validation.h>
|
||||
#include <validationinterface.h>
|
||||
|
||||
|
227
src/txorphanage.cpp
Normal file
227
src/txorphanage.cpp
Normal file
@ -0,0 +1,227 @@
|
||||
// Copyright (c) 2021 The Bitcoin Core developers
|
||||
// Distributed under the MIT software license, see the accompanying
|
||||
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
|
||||
#include <txorphanage.h>
|
||||
|
||||
#include <consensus/validation.h>
|
||||
#include <logging.h>
|
||||
#include <policy/policy.h>
|
||||
#include <statsd_client.h>
|
||||
|
||||
#include <cassert>
|
||||
|
||||
/** Expiration time for orphan transactions in seconds */
|
||||
static constexpr int64_t ORPHAN_TX_EXPIRE_TIME = 20 * 60;
|
||||
/** Minimum time between orphan transactions expire time checks in seconds */
|
||||
static constexpr int64_t ORPHAN_TX_EXPIRE_INTERVAL = 5 * 60;
|
||||
|
||||
RecursiveMutex g_cs_orphans;
|
||||
|
||||
bool TxOrphanage::AddTx(const CTransactionRef& tx, NodeId peer)
|
||||
{
|
||||
AssertLockHeld(g_cs_orphans);
|
||||
|
||||
const uint256& hash = tx->GetHash();
|
||||
if (m_orphans.count(hash))
|
||||
return false;
|
||||
|
||||
// Ignore big transactions, to avoid a
|
||||
// send-big-orphans memory exhaustion attack. If a peer has a legitimate
|
||||
// large transaction with a missing parent then we assume
|
||||
// it will rebroadcast it later, after the parent transaction(s)
|
||||
// have been mined or received.
|
||||
// 100 orphans, each of which is at most 99,999 bytes big is
|
||||
// at most 10 megabytes of orphans and somewhat more byprev index (in the worst case):
|
||||
unsigned int sz = GetSerializeSize(*tx, CTransaction::CURRENT_VERSION);
|
||||
if (sz > MAX_STANDARD_TX_SIZE)
|
||||
{
|
||||
LogPrint(BCLog::MEMPOOL, "ignoring large orphan tx (size: %u, hash: %s)\n", sz, hash.ToString());
|
||||
return false;
|
||||
}
|
||||
|
||||
auto ret = m_orphans.emplace(hash, OrphanTx{tx, peer, GetTime() + ORPHAN_TX_EXPIRE_TIME, m_orphan_list.size(), sz});
|
||||
assert(ret.second);
|
||||
m_orphan_list.push_back(ret.first);
|
||||
for (const CTxIn& txin : tx->vin) {
|
||||
m_outpoint_to_orphan_it[txin.prevout].insert(ret.first);
|
||||
}
|
||||
|
||||
m_orphan_tx_size += sz;
|
||||
|
||||
LogPrint(BCLog::MEMPOOL, "stored orphan tx %s (mapsz %u outsz %u)\n", hash.ToString(),
|
||||
m_orphans.size(), m_outpoint_to_orphan_it.size());
|
||||
statsClient.inc("transactions.orphans.add", 1.0f);
|
||||
statsClient.gauge("transactions.orphans", m_orphans.size());
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
int TxOrphanage::EraseTx(const uint256& txid)
|
||||
{
|
||||
AssertLockHeld(g_cs_orphans);
|
||||
std::map<uint256, OrphanTx>::iterator it = m_orphans.find(txid);
|
||||
if (it == m_orphans.end())
|
||||
return 0;
|
||||
for (const CTxIn& txin : it->second.tx->vin)
|
||||
{
|
||||
auto itPrev = m_outpoint_to_orphan_it.find(txin.prevout);
|
||||
if (itPrev == m_outpoint_to_orphan_it.end())
|
||||
continue;
|
||||
itPrev->second.erase(it);
|
||||
if (itPrev->second.empty())
|
||||
m_outpoint_to_orphan_it.erase(itPrev);
|
||||
}
|
||||
|
||||
size_t old_pos = it->second.list_pos;
|
||||
assert(m_orphan_list[old_pos] == it);
|
||||
if (old_pos + 1 != m_orphan_list.size()) {
|
||||
// Unless we're deleting the last entry in m_orphan_list, move the last
|
||||
// entry to the position we're deleting.
|
||||
auto it_last = m_orphan_list.back();
|
||||
m_orphan_list[old_pos] = it_last;
|
||||
it_last->second.list_pos = old_pos;
|
||||
}
|
||||
m_orphan_list.pop_back();
|
||||
|
||||
assert(m_orphan_tx_size >= it->second.nTxSize);
|
||||
m_orphan_tx_size -= it->second.nTxSize;
|
||||
m_orphans.erase(it);
|
||||
statsClient.inc("transactions.orphans.remove", 1.0f);
|
||||
statsClient.gauge("transactions.orphans", m_orphans.size());
|
||||
return 1;
|
||||
}
|
||||
|
||||
void TxOrphanage::EraseForPeer(NodeId peer)
|
||||
{
|
||||
AssertLockHeld(g_cs_orphans);
|
||||
|
||||
int nErased = 0;
|
||||
std::map<uint256, OrphanTx>::iterator iter = m_orphans.begin();
|
||||
while (iter != m_orphans.end())
|
||||
{
|
||||
std::map<uint256, OrphanTx>::iterator maybeErase = iter++; // increment to avoid iterator becoming invalid
|
||||
if (maybeErase->second.fromPeer == peer)
|
||||
{
|
||||
nErased += EraseTx(maybeErase->second.tx->GetHash());
|
||||
}
|
||||
}
|
||||
if (nErased > 0) LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx from peer=%d\n", nErased, peer);
|
||||
}
|
||||
|
||||
unsigned int TxOrphanage::LimitOrphans(unsigned int max_orphans_size)
|
||||
{
|
||||
AssertLockHeld(g_cs_orphans);
|
||||
|
||||
unsigned int nEvicted = 0;
|
||||
static int64_t nNextSweep;
|
||||
int64_t nNow = GetTime();
|
||||
if (nNextSweep <= nNow) {
|
||||
// Sweep out expired orphan pool entries:
|
||||
int nErased = 0;
|
||||
int64_t nMinExpTime = nNow + ORPHAN_TX_EXPIRE_TIME - ORPHAN_TX_EXPIRE_INTERVAL;
|
||||
std::map<uint256, OrphanTx>::iterator iter = m_orphans.begin();
|
||||
while (iter != m_orphans.end())
|
||||
{
|
||||
std::map<uint256, OrphanTx>::iterator maybeErase = iter++;
|
||||
if (maybeErase->second.nTimeExpire <= nNow) {
|
||||
nErased += EraseTx(maybeErase->second.tx->GetHash());
|
||||
} else {
|
||||
nMinExpTime = std::min(maybeErase->second.nTimeExpire, nMinExpTime);
|
||||
}
|
||||
}
|
||||
// Sweep again 5 minutes after the next entry that expires in order to batch the linear scan.
|
||||
nNextSweep = nMinExpTime + ORPHAN_TX_EXPIRE_INTERVAL;
|
||||
if (nErased > 0) LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx due to expiration\n", nErased);
|
||||
}
|
||||
FastRandomContext rng;
|
||||
while (!m_orphans.empty() && m_orphan_tx_size > max_orphans_size)
|
||||
{
|
||||
// Evict a random orphan:
|
||||
size_t randompos = rng.randrange(m_orphan_list.size());
|
||||
EraseTx(m_orphan_list[randompos]->first);
|
||||
++nEvicted;
|
||||
}
|
||||
return nEvicted;
|
||||
}
|
||||
|
||||
void TxOrphanage::AddChildrenToWorkSet(const CTransaction& tx, std::set<uint256>& orphan_work_set) const
|
||||
{
|
||||
AssertLockHeld(g_cs_orphans);
|
||||
for (unsigned int i = 0; i < tx.vout.size(); i++) {
|
||||
const auto it_by_prev = m_outpoint_to_orphan_it.find(COutPoint(tx.GetHash(), i));
|
||||
if (it_by_prev != m_outpoint_to_orphan_it.end()) {
|
||||
for (const auto& elem : it_by_prev->second) {
|
||||
orphan_work_set.insert(elem->first);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool TxOrphanage::HaveTx(const uint256& txid) const
|
||||
{
|
||||
LOCK(g_cs_orphans);
|
||||
return m_orphans.count(txid);
|
||||
}
|
||||
|
||||
std::pair<CTransactionRef, NodeId> TxOrphanage::GetTx(const uint256& txid) const
|
||||
{
|
||||
AssertLockHeld(g_cs_orphans);
|
||||
|
||||
const auto it = m_orphans.find(txid);
|
||||
if (it == m_orphans.end()) return {nullptr, -1};
|
||||
return {it->second.tx, it->second.fromPeer};
|
||||
}
|
||||
|
||||
std::set<uint256> TxOrphanage::GetCandidatesForBlock(const CBlock& block)
|
||||
{
|
||||
AssertLockHeld(g_cs_orphans);
|
||||
|
||||
std::set<uint256> orphanWorkSet;
|
||||
|
||||
for (const CTransactionRef& ptx : block.vtx) {
|
||||
const CTransaction& tx = *ptx;
|
||||
|
||||
// Which orphan pool entries we should reprocess and potentially try to accept into mempool again?
|
||||
for (size_t i = 0; i < tx.vin.size(); i++) {
|
||||
auto itByPrev = m_outpoint_to_orphan_it.find(COutPoint(tx.GetHash(), (uint32_t)i));
|
||||
if (itByPrev == m_outpoint_to_orphan_it.end()) continue;
|
||||
for (const auto& elem : itByPrev->second) {
|
||||
orphanWorkSet.insert(elem->first);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return orphanWorkSet;
|
||||
}
|
||||
|
||||
void TxOrphanage::EraseForBlock(const CBlock& block)
|
||||
{
|
||||
AssertLockHeld(g_cs_orphans);
|
||||
|
||||
std::vector<uint256> vOrphanErase;
|
||||
|
||||
for (const CTransactionRef& ptx : block.vtx) {
|
||||
const CTransaction& tx = *ptx;
|
||||
|
||||
// Which orphan pool entries must we evict?
|
||||
for (const auto& txin : tx.vin) {
|
||||
auto itByPrev = m_outpoint_to_orphan_it.find(txin.prevout);
|
||||
if (itByPrev == m_outpoint_to_orphan_it.end()) continue;
|
||||
for (auto mi = itByPrev->second.begin(); mi != itByPrev->second.end(); ++mi) {
|
||||
const CTransaction& orphanTx = *(*mi)->second.tx;
|
||||
const uint256& orphanHash = orphanTx.GetHash();
|
||||
vOrphanErase.push_back(orphanHash);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Erase orphan transactions included or precluded by this block
|
||||
if (vOrphanErase.size()) {
|
||||
int nErased = 0;
|
||||
for (const uint256& orphanHash : vOrphanErase) {
|
||||
nErased += EraseTx(orphanHash);
|
||||
}
|
||||
LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx included or conflicted by block\n", nErased);
|
||||
}
|
||||
}
|
88
src/txorphanage.h
Normal file
88
src/txorphanage.h
Normal file
@ -0,0 +1,88 @@
|
||||
// Copyright (c) 2021 The Bitcoin Core developers
|
||||
// Distributed under the MIT software license, see the accompanying
|
||||
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
|
||||
#ifndef BITCOIN_TXORPHANAGE_H
|
||||
#define BITCOIN_TXORPHANAGE_H
|
||||
|
||||
#include <net.h>
|
||||
#include <primitives/block.h>
|
||||
#include <primitives/transaction.h>
|
||||
#include <sync.h>
|
||||
|
||||
/** Guards orphan transactions and extra txs for compact blocks */
|
||||
extern RecursiveMutex g_cs_orphans;
|
||||
|
||||
/** A class to track orphan transactions (failed on TX_MISSING_INPUTS)
|
||||
* Since we cannot distinguish orphans from bad transactions with
|
||||
* non-existent inputs, we heavily limit the number of orphans
|
||||
* we keep and the duration we keep them for.
|
||||
*/
|
||||
class TxOrphanage {
|
||||
public:
|
||||
/** Add a new orphan transaction */
|
||||
bool AddTx(const CTransactionRef& tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans);
|
||||
|
||||
/** Check if we already have an orphan transaction */
|
||||
bool HaveTx(const uint256& txid) const LOCKS_EXCLUDED(::g_cs_orphans);
|
||||
|
||||
/** Get an orphan transaction and its orginating peer
|
||||
* (Transaction ref will be nullptr if not found)
|
||||
*/
|
||||
std::pair<CTransactionRef, NodeId> GetTx(const uint256& txid) const EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans);
|
||||
|
||||
/** Get a set of orphan transactions that can be candidates for reconsideration into the mempool */
|
||||
std::set<uint256> GetCandidatesForBlock(const CBlock& block) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans);
|
||||
|
||||
/** Erase an orphan by txid */
|
||||
int EraseTx(const uint256& txid) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans);
|
||||
|
||||
/** Erase all orphans announced by a peer (eg, after that peer disconnects) */
|
||||
void EraseForPeer(NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans);
|
||||
|
||||
/** Erase all orphans included in or invalidated by a new block */
|
||||
void EraseForBlock(const CBlock& block) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans);
|
||||
|
||||
/** Limit the orphanage to the given maximum */
|
||||
unsigned int LimitOrphans(unsigned int max_orphans_size) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans);
|
||||
|
||||
/** Add any orphans that list a particular tx as a parent into a peer's work set
|
||||
* (ie orphans that may have found their final missing parent, and so should be reconsidered for the mempool) */
|
||||
void AddChildrenToWorkSet(const CTransaction& tx, std::set<uint256>& orphan_work_set) const EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans);
|
||||
|
||||
protected:
|
||||
struct OrphanTx {
|
||||
CTransactionRef tx;
|
||||
NodeId fromPeer;
|
||||
int64_t nTimeExpire;
|
||||
size_t list_pos;
|
||||
size_t nTxSize;
|
||||
};
|
||||
|
||||
/** Map from txid to orphan transaction record. Limited by
|
||||
* -maxorphantx/DEFAULT_MAX_ORPHAN_TRANSACTIONS */
|
||||
std::map<uint256, OrphanTx> m_orphans GUARDED_BY(g_cs_orphans);
|
||||
|
||||
using OrphanMap = decltype(m_orphans);
|
||||
|
||||
struct IteratorComparator
|
||||
{
|
||||
template<typename I>
|
||||
bool operator()(const I& a, const I& b) const
|
||||
{
|
||||
return &(*a) < &(*b);
|
||||
}
|
||||
};
|
||||
|
||||
/** Index from the parents' COutPoint into the m_orphans. Used
|
||||
* to remove orphan transactions from the m_orphans */
|
||||
std::map<COutPoint, std::set<OrphanMap::iterator, IteratorComparator>> m_outpoint_to_orphan_it GUARDED_BY(g_cs_orphans);
|
||||
|
||||
/** Orphan transactions in vector for quick random eviction */
|
||||
std::vector<OrphanMap::iterator> m_orphan_list GUARDED_BY(g_cs_orphans);
|
||||
|
||||
/** Cumulative size of all transactions in the orphan map */
|
||||
size_t m_orphan_tx_size{0};
|
||||
};
|
||||
|
||||
#endif // BITCOIN_TXORPHANAGE_H
|
@ -194,7 +194,7 @@ class InvalidTxRequestTest(BitcoinTestFramework):
|
||||
for j in range(110):
|
||||
orphan_tx_pool[i].vout.append(CTxOut(nValue=COIN // 10, scriptPubKey=SCRIPT_PUB_KEY_OP_TRUE))
|
||||
|
||||
with node.assert_debug_log(['mapOrphan overflow, removed 1 tx']):
|
||||
with node.assert_debug_log(['orphanage overflow, removed 1 tx']):
|
||||
node.p2ps[0].send_txs_and_test(orphan_tx_pool, node, success=False)
|
||||
|
||||
rejected_parent = CTransaction()
|
||||
|
Loading…
Reference in New Issue
Block a user