partial bitcoin#21186: Move addr data into net_processing

excludes:
- 0829516d1f3868c1c2ba507feee718325d81e329
This commit is contained in:
Kittywhiskers Van Gogh 2024-03-24 20:35:04 +00:00
parent 26c39f5b92
commit 5478001a81
No known key found for this signature in database
GPG Key ID: 30CD0C065E5C4AAD
4 changed files with 146 additions and 175 deletions

View File

@ -4047,10 +4047,6 @@ CNode::CNode(NodeId idIn, ServiceFlags nLocalServicesIn, SOCKET hSocketIn, const
hSocket = hSocketIn;
addrName = addrNameIn == "" ? addr.ToStringIPPort() : addrNameIn;
if (conn_type_in != ConnectionType::BLOCK_RELAY) {
m_addr_known = std::make_unique<CRollingBloomFilter>(5000, 0.001);
}
for (const std::string &msg : getAllNetMessageTypes())
mapRecvBytesPerMsgCmd[msg] = 0;
mapRecvBytesPerMsgCmd[NET_MESSAGE_COMMAND_OTHER] = 0;

View File

@ -70,8 +70,6 @@ static constexpr auto FEELER_INTERVAL = 2min;
static const unsigned int MAX_INV_SZ = 50000;
/** Run the extra block-relay-only connection loop once every 5 minutes. **/
static constexpr auto EXTRA_BLOCK_RELAY_ONLY_PEER_INTERVAL = 5min;
/** The maximum number of addresses from our addrman to return in response to a getaddr message. */
static constexpr size_t MAX_ADDR_TO_SEND = 1000;
/** Maximum length of incoming protocol messages (no message over 3 MiB is currently acceptable). */
static const unsigned int MAX_PROTOCOL_MESSAGE_LENGTH = 3 * 1024 * 1024;
/** Maximum length of the user agent string in `version` message */
@ -482,12 +480,6 @@ public:
bool m_legacyWhitelisted{false};
bool fClient{false}; // set by version message
bool m_limited_node{false}; //after BIP159, set by version message
/**
* Whether the peer has signaled support for receiving ADDRv2 (BIP155)
* messages, implying a preference to receive ADDRv2 instead of ADDR ones.
*/
std::atomic_bool m_wants_addrv2{false};
/** fSuccessfullyConnected is set to true on receiving VERACK from the peer. */
std::atomic_bool fSuccessfullyConnected{false};
// Setting fDisconnect to true will cause the node to be disconnected the
@ -496,7 +488,6 @@ public:
std::atomic<int64_t> nDisconnectLingerTime{0};
std::atomic_bool fSocketShutdown{false};
std::atomic_bool fOtherSideDisconnected { false };
bool fSentAddr{false};
// If 'true' this node will be disconnected on CMasternodeMan::ProcessMasternodeConnections()
std::atomic<bool> m_masternode_connection{false};
/**
@ -567,15 +558,6 @@ public:
return m_conn_type == ConnectionType::INBOUND;
}
/* Whether we send addr messages over this connection */
bool RelayAddrsWithConn() const
{
// Don't relay addr messages to peers that we connect to as block-relay-only
// peers (to prevent adversaries from inferring these links from addr
// traffic).
return m_conn_type != ConnectionType::BLOCK_RELAY;
}
bool ExpectServicesFromConn() const {
switch (m_conn_type) {
case ConnectionType::INBOUND:
@ -596,16 +578,6 @@ protected:
mapMsgCmdSize mapRecvBytesPerMsgCmd GUARDED_BY(cs_vRecv);
public:
// flood relay
std::vector<CAddress> vAddrToSend;
std::unique_ptr<CRollingBloomFilter> m_addr_known{nullptr};
bool fGetAddr{false};
Mutex m_addr_send_times_mutex;
std::chrono::microseconds m_next_addr_send GUARDED_BY(m_addr_send_times_mutex){0};
std::chrono::microseconds m_next_local_addr_send GUARDED_BY(m_addr_send_times_mutex){0};
bool IsBlockRelayOnly() const;
struct TxRelay {
mutable RecursiveMutex cs_filter;
// We use fRelayTxes for two purposes -
@ -662,6 +634,8 @@ public:
// If true, we will send him all quorum related messages, even if he is not a member of our quorums
std::atomic<bool> qwatch{false};
bool IsBlockRelayOnly() const;
CNode(NodeId id, ServiceFlags nLocalServicesIn, SOCKET hSocketIn, const CAddress &addrIn, uint64_t nKeyedNetGroupIn, uint64_t nLocalHostNonceIn, const CAddress &addrBindIn, const std::string &addrNameIn, ConnectionType conn_type_in, bool inbound_onion = false);
~CNode();
CNode(const CNode&) = delete;
@ -761,40 +735,6 @@ public:
nRefCount--;
}
void AddAddressKnown(const CAddress& _addr)
{
assert(m_addr_known);
m_addr_known->insert(_addr.GetKey());
}
/**
* Whether the peer supports the address. For example, a peer that does not
* implement BIP155 cannot receive Tor v3 addresses because it requires
* ADDRv2 (BIP155) encoding.
*/
bool IsAddrCompatible(const CAddress& addr) const
{
return m_wants_addrv2 || addr.IsAddrV1Compatible();
}
void PushAddress(const CAddress& _addr, FastRandomContext &insecure_rand)
{
// Known checking here is only to save space from duplicates.
// SendMessages will filter it again for knowns that were added
// after addresses were pushed.
assert(m_addr_known);
if (_addr.IsValid() && !m_addr_known->contains(_addr.GetKey()) && IsAddrCompatible(_addr)) {
if (vAddrToSend.size() >= MAX_ADDR_TO_SEND) {
vAddrToSend[insecure_rand.randrange(vAddrToSend.size())] = _addr;
} else {
vAddrToSend.push_back(_addr);
}
}
}
void AddKnownInventory(const uint256& hash)
{
LOCK(m_tx_relay->cs_tx_inventory);

View File

@ -174,6 +174,8 @@ static constexpr uint32_t MAX_GETCFILTERS_SIZE = 1000;
static constexpr uint32_t MAX_GETCFHEADERS_SIZE = 2000;
/** the maximum percentage of addresses from our addrman to return in response to a getaddr message. */
static constexpr size_t MAX_PCT_ADDR_TO_SEND = 23;
/** The maximum number of address records permitted in an ADDR message. */
static constexpr size_t MAX_ADDR_TO_SEND{1000};
struct COrphanTx {
// When modifying, adapt the copy of this definition in tests/DoS_tests.
@ -253,6 +255,25 @@ struct Peer {
/** Whether a ping has been requested by the user */
std::atomic<bool> m_ping_queued{false};
/** A vector of addresses to send to the peer, limited to MAX_ADDR_TO_SEND. */
std::vector<CAddress> m_addrs_to_send;
/** Probabilistic filter of addresses that this peer already knows.
* Used to avoid relaying addresses to this peer more than once. */
const std::unique_ptr<CRollingBloomFilter> m_addr_known;
/** Whether a getaddr request to this peer is outstanding. */
bool m_getaddr_sent{false};
/** Guards address sending timers. */
mutable Mutex m_addr_send_times_mutex;
/** Time point to send the next ADDR message to this peer. */
std::chrono::microseconds m_next_addr_send GUARDED_BY(m_addr_send_times_mutex){0};
/** Time point to possibly re-announce our local address to this peer. */
std::chrono::microseconds m_next_local_addr_send GUARDED_BY(m_addr_send_times_mutex){0};
/** Whether the peer has signaled support for receiving ADDRv2 (BIP155)
* messages, indicating a preference to receive ADDRv2 instead of ADDR ones. */
std::atomic_bool m_wants_addrv2{false};
/** Whether this peer has already sent us a getaddr message. */
bool m_getaddr_recvd{false};
/** Set of txids to reconsider once their parent transactions have been accepted **/
std::set<uint256> m_orphan_work_set GUARDED_BY(g_cs_orphans);
@ -261,7 +282,10 @@ struct Peer {
/** Work queue of items requested by this peer **/
std::deque<CInv> m_getdata_requests GUARDED_BY(m_getdata_requests_mutex);
explicit Peer(NodeId id) : m_id(id) {}
explicit Peer(NodeId id, bool addr_relay)
: m_id(id)
, m_addr_known{addr_relay ? std::make_unique<CRollingBloomFilter>(5000, 0.001) : nullptr}
{}
};
using PeerRef = std::shared_ptr<Peer>;
@ -372,7 +396,16 @@ private:
void MaybeSendPing(CNode& node_to, Peer& peer, std::chrono::microseconds now);
/** Send `addr` messages on a regular schedule. */
void MaybeSendAddr(CNode& node, std::chrono::microseconds current_time);
void MaybeSendAddr(CNode& node, Peer& peer, std::chrono::microseconds current_time);
/** Relay (gossip) an address to a few randomly chosen nodes.
*
* @param[in] originator The id of the peer that sent us the address. We don't want to relay it back.
* @param[in] addr Address to relay.
* @param[in] fReachable Whether the address' network is reachable. We relay unreachable
* addresses less.
*/
void RelayAddress(NodeId originator, const CAddress& addr, bool fReachable);
const CChainParams& m_chainparams;
CConnman& m_connman;
@ -802,6 +835,42 @@ static CNodeState *State(NodeId pnode) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
return &it->second;
}
static bool RelayAddrsWithPeer(const Peer& peer)
{
return peer.m_addr_known != nullptr;
}
/**
* Whether the peer supports the address. For example, a peer that does not
* implement BIP155 cannot receive Tor v3 addresses because it requires
* ADDRv2 (BIP155) encoding.
*/
static bool IsAddrCompatible(const Peer& peer, const CAddress& addr)
{
return peer.m_wants_addrv2 || addr.IsAddrV1Compatible();
}
static void AddAddressKnown(Peer& peer, const CAddress& addr)
{
assert(peer.m_addr_known);
peer.m_addr_known->insert(addr.GetKey());
}
static void PushAddress(Peer& peer, const CAddress& addr, FastRandomContext& insecure_rand)
{
// Known checking here is only to save space from duplicates.
// Before sending, we'll filter it again for known addresses that were
// added after addresses were pushed.
assert(peer.m_addr_known);
if (addr.IsValid() && !peer.m_addr_known->contains(addr.GetKey()) && IsAddrCompatible(peer, addr)) {
if (peer.m_addrs_to_send.size() >= MAX_ADDR_TO_SEND) {
peer.m_addrs_to_send[insecure_rand.randrange(peer.m_addrs_to_send.size())] = addr;
} else {
peer.m_addrs_to_send.push_back(addr);
}
}
}
static void UpdatePreferredDownload(const CNode& node, CNodeState* state) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
nPreferredDownload -= state->fPreferredDownload;
@ -1244,7 +1313,9 @@ void PeerManagerImpl::InitializeNode(CNode *pnode) {
mapNodeState.emplace_hint(mapNodeState.end(), std::piecewise_construct, std::forward_as_tuple(nodeid), std::forward_as_tuple(addr, pnode->IsInboundConn()));
}
{
PeerRef peer = std::make_shared<Peer>(nodeid);
// Addr relay is disabled for outbound block-relay-only peers to
// prevent adversaries from inferring these links from addr traffic.
PeerRef peer = std::make_shared<Peer>(nodeid, /* addr_relay = */ !pnode->IsBlockOnlyConn());
LOCK(m_peer_mutex);
m_peer_map.emplace_hint(m_peer_map.end(), nodeid, std::move(peer));
}
@ -2002,59 +2073,49 @@ void PeerManagerImpl::RelayTransaction(const uint256& txid)
});
}
/**
* Relay (gossip) an address to a few randomly chosen nodes.
* We choose the same nodes within a given 24h window (if the list of connected
* nodes does not change) and we don't relay to nodes that already know an
* address. So within 24h we will likely relay a given address once. This is to
* prevent a peer from unjustly giving their address better propagation by sending
* it to us repeatedly.
* @param[in] originator The peer that sent us the address. We don't want to relay it back.
* @param[in] addr Address to relay.
* @param[in] fReachable Whether the address' network is reachable. We relay unreachable
* addresses less.
* @param[in] connman Connection manager to choose nodes to relay to.
*/
static void RelayAddress(const CNode& originator,
const CAddress& addr,
bool fReachable,
const CConnman& connman)
void PeerManagerImpl::RelayAddress(NodeId originator,
const CAddress& addr,
bool fReachable)
{
// We choose the same nodes within a given 24h window (if the list of connected
// nodes does not change) and we don't relay to nodes that already know an
// address. So within 24h we will likely relay a given address once. This is to
// prevent a peer from unjustly giving their address better propagation by sending
// it to us repeatedly.
if (!fReachable && !addr.IsRelayable()) return;
// Relay to a limited number of other nodes
// Use deterministic randomness to send to the same nodes for 24 hours
// at a time so the m_addr_knowns of the chosen nodes prevent repeats
uint64_t hashAddr = addr.GetHash();
const CSipHasher hasher = connman.GetDeterministicRandomizer(RANDOMIZER_ID_ADDRESS_RELAY).Write(hashAddr << 32).Write((GetTime() + hashAddr) / (24 * 60 * 60));
const CSipHasher hasher = m_connman.GetDeterministicRandomizer(RANDOMIZER_ID_ADDRESS_RELAY).Write(hashAddr << 32).Write((GetTime() + hashAddr) / (24 * 60 * 60));
FastRandomContext insecure_rand;
// Relay reachable addresses to 2 peers. Unreachable addresses are relayed randomly to 1 or 2 peers.
unsigned int nRelayNodes = (fReachable || (hasher.Finalize() & 1)) ? 2 : 1;
std::array<std::pair<uint64_t, CNode*>,2> best{{{0, nullptr}, {0, nullptr}}};
std::array<std::pair<uint64_t, Peer*>, 2> best{{{0, nullptr}, {0, nullptr}}};
assert(nRelayNodes <= best.size());
auto sortfunc = [&best, &hasher, nRelayNodes, &originator, &addr](CNode* pnode) {
if (pnode->RelayAddrsWithConn() && pnode != &originator && pnode->IsAddrCompatible(addr)) {
uint64_t hashKey = CSipHasher(hasher).Write(pnode->GetId()).Finalize();
LOCK(m_peer_mutex);
for (auto& [id, peer] : m_peer_map) {
if (RelayAddrsWithPeer(*peer) && id != originator && IsAddrCompatible(*peer, addr)) {
uint64_t hashKey = CSipHasher(hasher).Write(id).Finalize();
for (unsigned int i = 0; i < nRelayNodes; i++) {
if (hashKey > best[i].first) {
std::copy(best.begin() + i, best.begin() + nRelayNodes - 1, best.begin() + i + 1);
best[i] = std::make_pair(hashKey, pnode);
best[i] = std::make_pair(hashKey, peer.get());
break;
}
}
}
};
auto pushfunc = [&addr, &best, nRelayNodes, &insecure_rand] {
for (unsigned int i = 0; i < nRelayNodes && best[i].first != 0; i++) {
best[i].second->PushAddress(addr, insecure_rand);
}
};
connman.ForEachNodeThen(std::move(sortfunc), std::move(pushfunc));
for (unsigned int i = 0; i < nRelayNodes && best[i].first != 0; i++) {
PushAddress(*best[i].second, addr, insecure_rand);
}
}
void PeerManagerImpl::ProcessGetBlockData(CNode& pfrom, Peer& peer, const CInv& inv, llmq::CInstantSendManager& isman)
@ -2142,7 +2203,7 @@ void PeerManagerImpl::ProcessGetBlockData(CNode& pfrom, Peer& peer, const CInv&
} else if (inv.IsMsgFilteredBlk()) {
bool sendMerkleBlock = false;
CMerkleBlock merkleBlock;
if (pfrom.RelayAddrsWithConn()) {
if (RelayAddrsWithPeer(peer)) {
LOCK(pfrom.m_tx_relay->cs_filter);
if (pfrom.m_tx_relay->pfilter) {
sendMerkleBlock = true;
@ -2245,8 +2306,8 @@ void PeerManagerImpl::ProcessGetData(CNode& pfrom, Peer& peer, const std::atomic
const std::chrono::seconds now = GetTime<std::chrono::seconds>();
// Get last mempool request time
const std::chrono::seconds mempool_req = pfrom.RelayAddrsWithConn() ? pfrom.m_tx_relay->m_last_mempool_req.load()
: std::chrono::seconds::min();
const std::chrono::seconds mempool_req = RelayAddrsWithPeer(peer) ? pfrom.m_tx_relay->m_last_mempool_req.load()
: std::chrono::seconds::min();
// Process as many TX items from the front of the getdata queue as
// possible, since they're common and it's efficient to batch process
@ -2266,7 +2327,7 @@ void PeerManagerImpl::ProcessGetData(CNode& pfrom, Peer& peer, const std::atomic
}
++it;
if (!pfrom.RelayAddrsWithConn() && NetMessageViolatesBlocksOnly(inv.GetCommand())) {
if (!RelayAddrsWithPeer(peer) && NetMessageViolatesBlocksOnly(inv.GetCommand())) {
// Note that if we receive a getdata for non-block messages
// from a block-relay-only outbound peer that violate the policy,
// we skip such getdata messages from this peer
@ -3109,7 +3170,7 @@ void PeerManagerImpl::ProcessMessage(
// set nodes not capable of serving the complete blockchain history as "limited nodes"
pfrom.m_limited_node = (!(nServices & NODE_NETWORK) && (nServices & NODE_NETWORK_LIMITED));
if (pfrom.RelayAddrsWithConn()) {
if (RelayAddrsWithPeer(*peer)) {
LOCK(pfrom.m_tx_relay->cs_filter);
pfrom.m_tx_relay->fRelayTxes = fRelay; // set to true after we get the first filter* message
}
@ -3138,17 +3199,17 @@ void PeerManagerImpl::ProcessMessage(
if (addr.IsRoutable())
{
LogPrint(BCLog::NET, "ProcessMessages: advertising address %s\n", addr.ToString());
pfrom.PushAddress(addr, insecure_rand);
PushAddress(*peer, addr, insecure_rand);
} else if (IsPeerAddrLocalGood(&pfrom)) {
addr.SetIP(addrMe);
LogPrint(BCLog::NET, "ProcessMessages: advertising address %s\n", addr.ToString());
pfrom.PushAddress(addr, insecure_rand);
PushAddress(*peer, addr, insecure_rand);
}
}
// Get recent addresses
m_connman.PushMessage(&pfrom, CNetMsgMaker(greatest_common_version).Make(NetMsgType::GETADDR));
pfrom.fGetAddr = true;
peer->m_getaddr_sent = true;
}
if (!pfrom.IsInboundConn()) {
@ -3261,7 +3322,7 @@ void PeerManagerImpl::ProcessMessage(
pfrom.fDisconnect = true;
return;
}
pfrom.m_wants_addrv2 = true;
peer->m_wants_addrv2 = true;
return;
}
@ -3303,7 +3364,7 @@ void PeerManagerImpl::ProcessMessage(
s >> vAddr;
if (!pfrom.RelayAddrsWithConn()) {
if (!RelayAddrsWithPeer(*peer)) {
LogPrint(BCLog::NET, "ignoring %s message from %s peer=%d\n", msg_type, pfrom.ConnectionTypeAsString(), pfrom.GetId());
return;
}
@ -3330,24 +3391,22 @@ void PeerManagerImpl::ProcessMessage(
if (addr.nTime <= 100000000 || addr.nTime > nNow + 10 * 60)
addr.nTime = nNow - 5 * 24 * 60 * 60;
pfrom.AddAddressKnown(addr);
AddAddressKnown(*peer, addr);
if (m_banman && (m_banman->IsDiscouraged(addr) || m_banman->IsBanned(addr))) {
// Do not process banned/discouraged addresses beyond remembering we received them
continue;
}
bool fReachable = IsReachable(addr);
if (addr.nTime > nSince && !pfrom.fGetAddr && vAddr.size() <= 10 && addr.IsRoutable())
{
if (addr.nTime > nSince && !peer->m_getaddr_sent && vAddr.size() <= 10 && addr.IsRoutable()) {
// Relay to a limited number of other nodes
RelayAddress(pfrom, addr, fReachable, m_connman);
RelayAddress(pfrom.GetId(), addr, fReachable);
}
// Do not store addresses outside our network
if (fReachable)
vAddrOk.push_back(addr);
}
m_addrman.Add(vAddrOk, pfrom.addr, 2 * 60 * 60);
if (vAddr.size() < 1000)
pfrom.fGetAddr = false;
if (vAddr.size() < 1000) peer->m_getaddr_sent = false;
if (pfrom.IsAddrFetchConn()) {
LogPrint(BCLog::NET_NETCONN, "addrfetch connection completed peer=%d; disconnecting\n", pfrom.GetId());
pfrom.fDisconnect = true;
@ -4250,14 +4309,14 @@ void PeerManagerImpl::ProcessMessage(
}
// Only send one GetAddr response per connection to reduce resource waste
// and discourage addr stamping of INV announcements.
if (pfrom.fSentAddr) {
// and discourage addr stamping of INV announcements.
if (peer->m_getaddr_recvd) {
LogPrint(BCLog::NET, "Ignoring repeated \"getaddr\". peer=%d\n", pfrom.GetId());
return;
}
pfrom.fSentAddr = true;
peer->m_getaddr_recvd = true;
pfrom.vAddrToSend.clear();
peer->m_addrs_to_send.clear();
std::vector<CAddress> vAddr;
if (pfrom.HasPermission(NetPermissionFlags::Addr)) {
vAddr = m_connman.GetAddresses(MAX_ADDR_TO_SEND, MAX_PCT_ADDR_TO_SEND, /* network */ std::nullopt);
@ -4266,7 +4325,7 @@ void PeerManagerImpl::ProcessMessage(
}
FastRandomContext insecure_rand;
for (const CAddress &addr : vAddr) {
pfrom.PushAddress(addr, insecure_rand);
PushAddress(*peer, addr, insecure_rand);
}
return;
}
@ -4292,7 +4351,7 @@ void PeerManagerImpl::ProcessMessage(
return;
}
if (pfrom.RelayAddrsWithConn()) {
if (RelayAddrsWithPeer(*peer)) {
LOCK(pfrom.m_tx_relay->cs_tx_inventory);
pfrom.m_tx_relay->fSendMempool = true;
}
@ -4386,7 +4445,7 @@ void PeerManagerImpl::ProcessMessage(
// There is no excuse for sending a too-large filter
Misbehaving(pfrom.GetId(), 100, "too-large bloom filter");
}
else if (pfrom.RelayAddrsWithConn())
else if (RelayAddrsWithPeer(*peer))
{
LOCK(pfrom.m_tx_relay->cs_filter);
pfrom.m_tx_relay->pfilter.reset(new CBloomFilter(filter));
@ -4409,7 +4468,7 @@ void PeerManagerImpl::ProcessMessage(
bool bad = false;
if (vData.size() > MAX_SCRIPT_ELEMENT_SIZE) {
bad = true;
} else if (pfrom.RelayAddrsWithConn()) {
} else if (RelayAddrsWithPeer(*peer)) {
LOCK(pfrom.m_tx_relay->cs_filter);
if (pfrom.m_tx_relay->pfilter) {
pfrom.m_tx_relay->pfilter->insert(vData);
@ -4429,7 +4488,7 @@ void PeerManagerImpl::ProcessMessage(
pfrom.fDisconnect = true;
return;
}
if (!pfrom.RelayAddrsWithConn()) {
if (!RelayAddrsWithPeer(*peer)) {
return;
}
LOCK(pfrom.m_tx_relay->cs_filter);
@ -4900,72 +4959,70 @@ void PeerManagerImpl::MaybeSendPing(CNode& node_to, Peer& peer, std::chrono::mic
}
}
void PeerManagerImpl::MaybeSendAddr(CNode& node, std::chrono::microseconds current_time)
void PeerManagerImpl::MaybeSendAddr(CNode& node, Peer& peer, std::chrono::microseconds current_time)
{
// Nothing to do for non-address-relay peers
if (!node.RelayAddrsWithConn()) return;
if (!RelayAddrsWithPeer(peer)) return;
assert(node.m_addr_known);
LOCK(node.m_addr_send_times_mutex);
LOCK(peer.m_addr_send_times_mutex);
// Periodically advertise our local address to the peer.
if (fListen && !m_chainman.ActiveChainstate().IsInitialBlockDownload() &&
node.m_next_local_addr_send < current_time) {
peer.m_next_local_addr_send < current_time) {
// If we've sent before, clear the bloom filter for the peer, so that our
// self-announcement will actually go out.
// This might be unnecessary if the bloom filter has already rolled
// over since our last self-announcement, but there is only a small
// bandwidth cost that we can incur by doing this (which happens
// once a day on average).
if (node.m_next_local_addr_send != 0us) {
node.m_addr_known->reset();
if (peer.m_next_local_addr_send != 0us) {
peer.m_addr_known->reset();
}
if (std::optional<CAddress> local_addr = GetLocalAddrForPeer(&node)) {
FastRandomContext insecure_rand;
node.PushAddress(*local_addr, insecure_rand);
PushAddress(peer, *local_addr, insecure_rand);
}
node.m_next_local_addr_send = PoissonNextSend(current_time, AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL);
peer.m_next_local_addr_send = PoissonNextSend(current_time, AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL);
}
// We sent an `addr` message to this peer recently. Nothing more to do.
if (current_time <= node.m_next_addr_send) return;
if (current_time <= peer.m_next_addr_send) return;
node.m_next_addr_send = PoissonNextSend(current_time, AVG_ADDRESS_BROADCAST_INTERVAL);
peer.m_next_addr_send = PoissonNextSend(current_time, AVG_ADDRESS_BROADCAST_INTERVAL);
if (!Assume(node.vAddrToSend.size() <= MAX_ADDR_TO_SEND)) {
if (!Assume(peer.m_addrs_to_send.size() <= MAX_ADDR_TO_SEND)) {
// Should be impossible since we always check size before adding to
// vAddrToSend. Recover by trimming the vector.
node.vAddrToSend.resize(MAX_ADDR_TO_SEND);
// m_addrs_to_send. Recover by trimming the vector.
peer.m_addrs_to_send.resize(MAX_ADDR_TO_SEND);
}
// Remove addr records that the peer already knows about, and add new
// addrs to the m_addr_known filter on the same pass.
auto addr_already_known = [&node](const CAddress& addr) {
bool ret = node.m_addr_known->contains(addr.GetKey());
if (!ret) node.m_addr_known->insert(addr.GetKey());
auto addr_already_known = [&peer](const CAddress& addr) {
bool ret = peer.m_addr_known->contains(addr.GetKey());
if (!ret) peer.m_addr_known->insert(addr.GetKey());
return ret;
};
node.vAddrToSend.erase(std::remove_if(node.vAddrToSend.begin(), node.vAddrToSend.end(), addr_already_known),
node.vAddrToSend.end());
peer.m_addrs_to_send.erase(std::remove_if(peer.m_addrs_to_send.begin(), peer.m_addrs_to_send.end(), addr_already_known),
peer.m_addrs_to_send.end());
// No addr messages to send
if (node.vAddrToSend.empty()) return;
if (peer.m_addrs_to_send.empty()) return;
const char* msg_type;
int make_flags;
if (node.m_wants_addrv2) {
if (peer.m_wants_addrv2) {
msg_type = NetMsgType::ADDRV2;
make_flags = ADDRV2_FORMAT;
} else {
msg_type = NetMsgType::ADDR;
make_flags = 0;
}
m_connman.PushMessage(&node, CNetMsgMaker(node.GetCommonVersion()).Make(make_flags, msg_type, node.vAddrToSend));
node.vAddrToSend.clear();
m_connman.PushMessage(&node, CNetMsgMaker(node.GetCommonVersion()).Make(make_flags, msg_type, peer.m_addrs_to_send));
peer.m_addrs_to_send.clear();
// we only send the big addr message once
if (node.vAddrToSend.capacity() > 40) {
node.vAddrToSend.shrink_to_fit();
if (peer.m_addrs_to_send.capacity() > 40) {
peer.m_addrs_to_send.shrink_to_fit();
}
}
@ -5014,7 +5071,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
// MaybeSendPing may have marked peer for disconnection
if (pto->fDisconnect) return true;
MaybeSendAddr(*pto, current_time);
MaybeSendAddr(*pto, *peer, current_time);
{
LOCK(cs_main);
@ -5216,7 +5273,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
LOCK2(m_mempool.cs, peer->m_block_inv_mutex);
size_t reserve = INVENTORY_BROADCAST_MAX_PER_1MB_BLOCK * MaxBlockSize() / 1000000;
if (pto->RelayAddrsWithConn()) {
if (RelayAddrsWithPeer(*peer)) {
LOCK(pto->m_tx_relay->cs_tx_inventory);
reserve = std::min<size_t>(pto->m_tx_relay->setInventoryTxToSend.size(), reserve);
}
@ -5247,7 +5304,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
}
};
if (pto->RelayAddrsWithConn()) {
if (RelayAddrsWithPeer(*peer)) {
LOCK(pto->m_tx_relay->cs_tx_inventory);
// Check whether periodic sends should happen
// Note: If this node is running in a Masternode mode, it makes no sense to delay outgoing txes

View File

@ -60,27 +60,6 @@ FUZZ_TARGET_INIT(net, initialize_net)
node.Release();
}
},
[&] {
if (node.m_addr_known == nullptr) {
return;
}
const std::optional<CAddress> addr_opt = ConsumeDeserializable<CAddress>(fuzzed_data_provider);
if (!addr_opt) {
return;
}
node.AddAddressKnown(*addr_opt);
},
[&] {
if (node.m_addr_known == nullptr) {
return;
}
const std::optional<CAddress> addr_opt = ConsumeDeserializable<CAddress>(fuzzed_data_provider);
if (!addr_opt) {
return;
}
FastRandomContext fast_random_context{ConsumeUInt256(fuzzed_data_provider)};
node.PushAddress(*addr_opt, fast_random_context);
},
[&] {
const std::optional<CInv> inv_opt = ConsumeDeserializable<CInv>(fuzzed_data_provider);
if (!inv_opt) {
@ -117,7 +96,6 @@ FUZZ_TARGET_INIT(net, initialize_net)
const int ref_count = node.GetRefCount();
assert(ref_count >= 0);
(void)node.GetCommonVersion();
(void)node.RelayAddrsWithConn();
const NetPermissionFlags net_permission_flags = ConsumeWeakEnum(fuzzed_data_provider, ALL_NET_PERMISSION_FLAGS);
(void)node.HasPermission(net_permission_flags);