merge bitcoin#21236: Extract addr send functionality into MaybeSendAddr()

This commit is contained in:
Kittywhiskers Van Gogh 2021-03-03 08:46:55 +00:00
parent ba1df91d8d
commit d34d2c4efb
No known key found for this signature in database
GPG Key ID: 30CD0C065E5C4AAD
2 changed files with 87 additions and 76 deletions

View File

@ -600,8 +600,9 @@ public:
std::vector<CAddress> vAddrToSend;
std::unique_ptr<CRollingBloomFilter> m_addr_known{nullptr};
bool fGetAddr{false};
std::chrono::microseconds m_next_addr_send GUARDED_BY(cs_sendProcessing){0};
std::chrono::microseconds m_next_local_addr_send GUARDED_BY(cs_sendProcessing){0};
Mutex m_addr_send_times_mutex;
std::chrono::microseconds m_next_addr_send GUARDED_BY(m_addr_send_times_mutex){0};
std::chrono::microseconds m_next_local_addr_send GUARDED_BY(m_addr_send_times_mutex){0};
bool IsBlockRelayOnly() const;

View File

@ -33,6 +33,7 @@
#include <util/system.h>
#include <util/strencodings.h>
#include <algorithm>
#include <list>
#include <memory>
#include <optional>
@ -365,8 +366,13 @@ private:
void PushNodeVersion(CNode& pnode, int64_t nTime);
/** Send a ping message every PING_INTERVAL or if requested via RPC. May
* mark the peer to be disconnected if a ping has timed out. */
void MaybeSendPing(CNode& node_to, Peer& peer);
* mark the peer to be disconnected if a ping has timed out.
* We use mockable time for ping timeouts, so setmocktime may cause pings
* to time out. */
void MaybeSendPing(CNode& node_to, Peer& peer, std::chrono::microseconds now);
/** Send `addr` messages on a regular schedule. */
void MaybeSendAddr(CNode& node, std::chrono::microseconds current_time);
const CChainParams& m_chainparams;
CConnman& m_connman;
@ -4860,12 +4866,8 @@ void PeerManagerImpl::CheckForStaleTipAndEvictPeers()
}
}
void PeerManagerImpl::MaybeSendPing(CNode& node_to, Peer& peer)
void PeerManagerImpl::MaybeSendPing(CNode& node_to, Peer& peer, std::chrono::microseconds now)
{
// Use mockable time for ping timeouts.
// This means that setmocktime may cause pings to time out.
auto now = GetTime<std::chrono::microseconds>();
if (m_connman.RunInactivityChecks(node_to) && peer.m_ping_nonce_sent &&
now > peer.m_ping_start.load() + std::chrono::seconds{TIMEOUT_INTERVAL}) {
LogPrint(BCLog::NET, "ping timeout: %fs peer=%d\n", 0.000001 * count_microseconds(now - peer.m_ping_start.load()), peer.m_id);
@ -4898,6 +4900,75 @@ void PeerManagerImpl::MaybeSendPing(CNode& node_to, Peer& peer)
}
}
void PeerManagerImpl::MaybeSendAddr(CNode& node, std::chrono::microseconds current_time)
{
// Nothing to do for non-address-relay peers
if (!node.RelayAddrsWithConn()) return;
assert(node.m_addr_known);
LOCK(node.m_addr_send_times_mutex);
// Periodically advertise our local address to the peer.
if (fListen && !m_chainman.ActiveChainstate().IsInitialBlockDownload() &&
node.m_next_local_addr_send < current_time) {
// If we've sent before, clear the bloom filter for the peer, so that our
// self-announcement will actually go out.
// This might be unnecessary if the bloom filter has already rolled
// over since our last self-announcement, but there is only a small
// bandwidth cost that we can incur by doing this (which happens
// once a day on average).
if (node.m_next_local_addr_send != 0us) {
node.m_addr_known->reset();
}
if (std::optional<CAddress> local_addr = GetLocalAddrForPeer(&node)) {
FastRandomContext insecure_rand;
node.PushAddress(*local_addr, insecure_rand);
}
node.m_next_local_addr_send = PoissonNextSend(current_time, AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL);
}
// We sent an `addr` message to this peer recently. Nothing more to do.
if (current_time <= node.m_next_addr_send) return;
node.m_next_addr_send = PoissonNextSend(current_time, AVG_ADDRESS_BROADCAST_INTERVAL);
if (!Assume(node.vAddrToSend.size() <= MAX_ADDR_TO_SEND)) {
// Should be impossible since we always check size before adding to
// vAddrToSend. Recover by trimming the vector.
node.vAddrToSend.resize(MAX_ADDR_TO_SEND);
}
// Remove addr records that the peer already knows about, and add new
// addrs to the m_addr_known filter on the same pass.
auto addr_already_known = [&node](const CAddress& addr) {
bool ret = node.m_addr_known->contains(addr.GetKey());
if (!ret) node.m_addr_known->insert(addr.GetKey());
return ret;
};
node.vAddrToSend.erase(std::remove_if(node.vAddrToSend.begin(), node.vAddrToSend.end(), addr_already_known),
node.vAddrToSend.end());
// No addr messages to send
if (node.vAddrToSend.empty()) return;
const char* msg_type;
int make_flags;
if (node.m_wants_addrv2) {
msg_type = NetMsgType::ADDRV2;
make_flags = ADDRV2_FORMAT;
} else {
msg_type = NetMsgType::ADDR;
make_flags = 0;
}
m_connman.PushMessage(&node, CNetMsgMaker(node.GetCommonVersion()).Make(make_flags, msg_type, node.vAddrToSend));
node.vAddrToSend.clear();
// we only send the big addr message once
if (node.vAddrToSend.capacity() > 40) {
node.vAddrToSend.shrink_to_fit();
}
}
namespace {
class CompareInvMempoolOrder
{
@ -4936,79 +5007,20 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
// If we get here, the outgoing message serialization version is set and can't change.
const CNetMsgMaker msgMaker(pto->GetCommonVersion());
MaybeSendPing(*pto, *peer);
const auto current_time = GetTime<std::chrono::microseconds>();
MaybeSendPing(*pto, *peer, current_time);
// MaybeSendPing may have marked peer for disconnection
if (pto->fDisconnect) return true;
MaybeSendAddr(*pto, current_time);
{
LOCK(cs_main);
CNodeState &state = *State(pto->GetId());
// Address refresh broadcast
auto current_time = GetTime<std::chrono::microseconds>();
if (fListen && pto->RelayAddrsWithConn() &&
!m_chainman.ActiveChainstate().IsInitialBlockDownload() &&
pto->m_next_local_addr_send < current_time) {
// If we've sent before, clear the bloom filter for the peer, so that our
// self-announcement will actually go out.
// This might be unnecessary if the bloom filter has already rolled
// over since our last self-announcement, but there is only a small
// bandwidth cost that we can incur by doing this (which happens
// once a day on average).
if (pto->m_next_local_addr_send != std::chrono::microseconds::zero()) {
pto->m_addr_known->reset();
}
if (std::optional<CAddress> local_addr = GetLocalAddrForPeer(pto)) {
FastRandomContext insecure_rand;
pto->PushAddress(*local_addr, insecure_rand);
}
pto->m_next_local_addr_send = PoissonNextSend(current_time, AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL);
}
//
// Message: addr
//
if (pto->RelayAddrsWithConn() && pto->m_next_addr_send < current_time) {
pto->m_next_addr_send = PoissonNextSend(current_time, AVG_ADDRESS_BROADCAST_INTERVAL);
std::vector<CAddress> vAddr;
vAddr.reserve(pto->vAddrToSend.size());
const char* msg_type;
int make_flags;
if (pto->m_wants_addrv2) {
msg_type = NetMsgType::ADDRV2;
make_flags = ADDRV2_FORMAT;
} else {
msg_type = NetMsgType::ADDR;
make_flags = 0;
}
assert(pto->m_addr_known);
for (const CAddress& addr : pto->vAddrToSend)
{
if (!pto->m_addr_known->contains(addr.GetKey()))
{
pto->m_addr_known->insert(addr.GetKey());
vAddr.push_back(addr);
// receiver rejects addr messages larger than MAX_ADDR_TO_SEND
if (vAddr.size() >= MAX_ADDR_TO_SEND)
{
m_connman.PushMessage(pto, msgMaker.Make(make_flags, msg_type, vAddr));
vAddr.clear();
}
}
}
pto->vAddrToSend.clear();
if (!vAddr.empty())
m_connman.PushMessage(pto, msgMaker.Make(make_flags, msg_type, vAddr));
// we only send the big addr message once
if (pto->vAddrToSend.capacity() > 40)
pto->vAddrToSend.shrink_to_fit();
}
// Start block sync
if (pindexBestHeader == nullptr)
pindexBestHeader = m_chainman.ActiveChain().Tip();
@ -5287,8 +5299,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
uint256 chainlockHash = ::SerializeHash(clsig);
queueAndMaybePushInv(CInv(MSG_CLSIG, chainlockHash));
}
pto->m_tx_relay->m_last_mempool_req = GetTime<std::chrono::seconds>();
pto->m_tx_relay->m_last_mempool_req = std::chrono::duration_cast<std::chrono::seconds>(current_time);
}
// Determine transactions to relay
@ -5373,7 +5384,6 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
// Detect whether we're stalling
current_time = GetTime<std::chrono::microseconds>();
if (state.m_stalling_since.count() && state.m_stalling_since < current_time - BLOCK_STALLING_TIMEOUT) {
// Stalling only triggers when the block download window cannot move. During normal steady state,
// the download window should be much larger than the to-be-downloaded set of blocks, so disconnection