mirror of
https://github.com/dashpay/dash.git
synced 2024-12-25 20:12:57 +01:00
Eliminate TX trickle bypass, sort TX invs for privacy and priority.
Previously Bitcoin would send 1/4 of transactions out to all peers instantly. This causes high overhead because it makes >80% of INVs size 1. Doing so harms privacy, because it limits the amount of source obscurity a transaction can receive. These randomized broadcasts also disobeyed transaction dependencies and required use of the orphan pool. Because the orphan pool is so small this leads to poor propagation for dependent transactions. When the bypass wasn't in effect, transactions were sent in the order they were received. This avoided creating orphans but undermines privacy fairly significantly. This commit: Eliminates the bypass. The bypass is replaced by halving the average delay for outbound peers. Sorts candidate transactions for INV by their topological depth then by their feerate (then hash); removing the information leakage and providing priority service to higher fee transactions. Limits the amount of transactions sent in a single INV to 7tx/sec (and twice that for outbound); this limits the harm of low fee transaction floods, gives faster relay service to higher fee transactions. The 7 sounds lower than it really is because received advertisements need not be sent, and because the aggregate rate is multipled by the number of peers.
This commit is contained in:
parent
04a2937357
commit
f2d3ba7386
60
src/main.cpp
60
src/main.cpp
@ -5560,6 +5560,29 @@ bool ProcessMessages(CNode* pfrom)
|
||||
return fOk;
|
||||
}
|
||||
|
||||
class CompareInvMempoolOrder
|
||||
{
|
||||
CTxMemPool *mp;
|
||||
public:
|
||||
CompareInvMempoolOrder(CTxMemPool *mempool)
|
||||
{
|
||||
mp = mempool;
|
||||
}
|
||||
|
||||
bool operator()(const CInv &a, const CInv &b)
|
||||
{
|
||||
if (a.type != MSG_TX && b.type != MSG_TX) {
|
||||
return false;
|
||||
} else {
|
||||
if (a.type != MSG_TX) {
|
||||
return true;
|
||||
} else if (b.type != MSG_TX) {
|
||||
return false;
|
||||
}
|
||||
return mp->CompareDepthAndScore(a.hash, b.hash);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
bool SendMessages(CNode* pto)
|
||||
{
|
||||
@ -5790,42 +5813,31 @@ bool SendMessages(CNode* pto)
|
||||
bool fSendTrickle = pto->fWhitelisted;
|
||||
if (pto->nNextInvSend < nNow) {
|
||||
fSendTrickle = true;
|
||||
pto->nNextInvSend = PoissonNextSend(nNow, AVG_INVENTORY_BROADCAST_INTERVAL);
|
||||
// Use half the delay for outbound peers, as their is less privacy concern for them.
|
||||
pto->nNextInvSend = PoissonNextSend(nNow, INVENTORY_BROADCAST_INTERVAL >> !pto->fInbound);
|
||||
}
|
||||
LOCK(pto->cs_inventory);
|
||||
vInv.reserve(std::min<size_t>(1000, pto->vInventoryToSend.size()));
|
||||
if (fSendTrickle && pto->vInventoryToSend.size() > 1) {
|
||||
// Topologically and fee-rate sort the inventory we send for privacy and priority reasons.
|
||||
CompareInvMempoolOrder compareInvMempoolOrder(&mempool);
|
||||
std::stable_sort(pto->vInventoryToSend.begin(), pto->vInventoryToSend.end(), compareInvMempoolOrder);
|
||||
}
|
||||
vInv.reserve(std::min<size_t>(INVENTORY_BROADCAST_MAX, pto->vInventoryToSend.size()));
|
||||
vInvWait.reserve(pto->vInventoryToSend.size());
|
||||
BOOST_FOREACH(const CInv& inv, pto->vInventoryToSend)
|
||||
{
|
||||
if (inv.type == MSG_TX && pto->filterInventoryKnown.contains(inv.hash))
|
||||
continue;
|
||||
|
||||
// trickle out tx inv to protect privacy
|
||||
if (inv.type == MSG_TX && !fSendTrickle)
|
||||
{
|
||||
// 1/4 of tx invs blast to all immediately
|
||||
static uint256 hashSalt;
|
||||
if (hashSalt.IsNull())
|
||||
hashSalt = GetRandHash();
|
||||
uint256 hashRand = ArithToUint256(UintToArith256(inv.hash) ^ UintToArith256(hashSalt));
|
||||
hashRand = Hash(BEGIN(hashRand), END(hashRand));
|
||||
bool fTrickleWait = ((UintToArith256(hashRand) & 3) != 0);
|
||||
|
||||
if (fTrickleWait)
|
||||
{
|
||||
vInvWait.push_back(inv);
|
||||
continue;
|
||||
}
|
||||
// No reason to drain out at many times the network's capacity,
|
||||
// especially since we have many peers and some will draw much shorter delays.
|
||||
if (vInv.size() >= INVENTORY_BROADCAST_MAX || (inv.type == MSG_TX && !fSendTrickle)) {
|
||||
vInvWait.push_back(inv);
|
||||
continue;
|
||||
}
|
||||
|
||||
pto->filterInventoryKnown.insert(inv.hash);
|
||||
|
||||
vInv.push_back(inv);
|
||||
if (vInv.size() >= 1000)
|
||||
{
|
||||
pto->PushMessage(NetMsgType::INV, vInv);
|
||||
vInv.clear();
|
||||
}
|
||||
}
|
||||
pto->vInventoryToSend = vInvWait;
|
||||
}
|
||||
|
@ -99,9 +99,12 @@ static const unsigned int MAX_REJECT_MESSAGE_LENGTH = 111;
|
||||
static const unsigned int AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL = 24 * 24 * 60;
|
||||
/** Average delay between peer address broadcasts in seconds. */
|
||||
static const unsigned int AVG_ADDRESS_BROADCAST_INTERVAL = 30;
|
||||
/** Average delay between trickled inventory broadcasts in seconds.
|
||||
* Blocks, whitelisted receivers, and a random 25% of transactions bypass this. */
|
||||
static const unsigned int AVG_INVENTORY_BROADCAST_INTERVAL = 5;
|
||||
/** Average delay between trickled inventory transmissions in seconds.
|
||||
* Blocks and whitelisted receivers bypass this, outbound peers get half this delay. */
|
||||
static const unsigned int INVENTORY_BROADCAST_INTERVAL = 5;
|
||||
/** Maximum number of inventory items to send per transmission.
|
||||
* Limits the impact of low-fee transaction floods. */
|
||||
static const unsigned int INVENTORY_BROADCAST_MAX = 7 * INVENTORY_BROADCAST_INTERVAL;
|
||||
/** Average delay between feefilter broadcasts in seconds. */
|
||||
static const unsigned int AVG_FEEFILTER_BROADCAST_INTERVAL = 10 * 60;
|
||||
/** Maximum feefilter broadcast delay after significant change. */
|
||||
|
@ -752,6 +752,21 @@ void CTxMemPool::check(const CCoinsViewCache *pcoins) const
|
||||
assert(innerUsage == cachedInnerUsage);
|
||||
}
|
||||
|
||||
bool CTxMemPool::CompareDepthAndScore(const uint256& hasha, const uint256& hashb)
|
||||
{
|
||||
LOCK(cs);
|
||||
indexed_transaction_set::const_iterator i = mapTx.find(hasha);
|
||||
if (i == mapTx.end()) return false;
|
||||
indexed_transaction_set::const_iterator j = mapTx.find(hashb);
|
||||
if (j == mapTx.end()) return true;
|
||||
uint64_t counta = i->GetCountWithAncestors();
|
||||
uint64_t countb = j->GetCountWithAncestors();
|
||||
if (counta == countb) {
|
||||
return CompareTxMemPoolEntryByScore()(*i, *j);
|
||||
}
|
||||
return counta < countb;
|
||||
}
|
||||
|
||||
void CTxMemPool::queryHashes(vector<uint256>& vtxid)
|
||||
{
|
||||
vtxid.clear();
|
||||
|
@ -511,6 +511,7 @@ public:
|
||||
std::list<CTransaction>& conflicts, bool fCurrentEstimate = true);
|
||||
void clear();
|
||||
void _clear(); //lock free
|
||||
bool CompareDepthAndScore(const uint256& hasha, const uint256& hashb);
|
||||
void queryHashes(std::vector<uint256>& vtxid);
|
||||
void pruneSpent(const uint256& hash, CCoins &coins);
|
||||
unsigned int GetTransactionsUpdated() const;
|
||||
|
Loading…
Reference in New Issue
Block a user