dash/src/net.h

821 lines
25 KiB
C
Raw Normal View History

// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2016 The Bitcoin Core developers
2014-12-13 05:09:33 +01:00
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef BITCOIN_NET_H
#define BITCOIN_NET_H
#include "addrdb.h"
#include "addrman.h"
#include "amount.h"
#include "bloom.h"
#include "compat.h"
#include "fs.h"
#include "hash.h"
#include "limitedmap.h"
#include "netaddress.h"
#include "protocol.h"
#include "random.h"
#include "streams.h"
#include "sync.h"
#include "uint256.h"
#include "threadinterrupt.h"
#include <atomic>
#include <deque>
#include <stdint.h>
#include <thread>
#include <memory>
#include <condition_variable>
#ifndef WIN32
#include <arpa/inet.h>
#endif
#include <boost/foreach.hpp>
#include <boost/signals2/signal.hpp>
class CAddrMan;
class CScheduler;
class CNode;
namespace boost {
class thread_group;
} // namespace boost
/** Time between pings automatically sent out for latency probing and keepalive (in seconds). */
static const int PING_INTERVAL = 2 * 60;
/** Time after which to disconnect, after waiting for a ping response (or inactivity). */
static const int TIMEOUT_INTERVAL = 20 * 60;
/** Run the feeler connection loop once every 2 minutes or 120 seconds. **/
static const int FEELER_INTERVAL = 120;
/** The maximum number of entries in an 'inv' protocol message */
static const unsigned int MAX_INV_SZ = 50000;
/** The maximum number of new addresses to accumulate before announcing. */
static const unsigned int MAX_ADDR_TO_SEND = 1000;
/** Maximum length of incoming protocol messages (no message over 4 MB is currently acceptable). */
static const unsigned int MAX_PROTOCOL_MESSAGE_LENGTH = 4 * 1000 * 1000;
/** Maximum length of strSubVer in `version` message */
static const unsigned int MAX_SUBVERSION_LENGTH = 256;
Break addnode out from the outbound connection limits. Previously addnodes were in competition with outbound connections for access to the eight outbound slots. One result of this is that frequently a node with several addnode configured peers would end up connected to none of them, because while the addnode loop was in its two minute sleep the automatic connection logic would fill any free slots with random peers. This is particularly unwelcome to users trying to maintain links to specific nodes for fast block relay or purposes. Another result is that a group of nine or more nodes which are have addnode configured towards each other can become partitioned from the public network. This commit introduces a new limit of eight connections just for addnode peers which is not subject to any of the other connection limitations (including maxconnections). The choice of eight is sufficient so that under no condition would a user find themselves connected to fewer addnoded peers than previously. It is also low enough that users who are confused about the significance of more connections and have gotten too copy-and-paste happy will not consume more than twice the slot usage of a typical user. Any additional load on the network resulting from this will likely be offset by a reduction in users applying even more wasteful workaround for the prior behavior. The retry delays are reduced to avoid nodes sitting around without their added peers up, but are still sufficient to prevent overly aggressive repeated connections. The reduced delays also make the system much more responsive to the addnode RPC. Ban-disconnects are also exempted for peers added via addnode since the outbound addnode logic ignores bans. Previously it would ban an addnode then immediately reconnect to it. A minor change was also made to CSemaphoreGrant so that it is possible to re-acquire via an object whos grant was moved.
2016-12-11 05:39:26 +01:00
/** Maximum number of automatic outgoing nodes */
2016-05-22 09:52:03 +02:00
static const int MAX_OUTBOUND_CONNECTIONS = 8;
Break addnode out from the outbound connection limits. Previously addnodes were in competition with outbound connections for access to the eight outbound slots. One result of this is that frequently a node with several addnode configured peers would end up connected to none of them, because while the addnode loop was in its two minute sleep the automatic connection logic would fill any free slots with random peers. This is particularly unwelcome to users trying to maintain links to specific nodes for fast block relay or purposes. Another result is that a group of nine or more nodes which are have addnode configured towards each other can become partitioned from the public network. This commit introduces a new limit of eight connections just for addnode peers which is not subject to any of the other connection limitations (including maxconnections). The choice of eight is sufficient so that under no condition would a user find themselves connected to fewer addnoded peers than previously. It is also low enough that users who are confused about the significance of more connections and have gotten too copy-and-paste happy will not consume more than twice the slot usage of a typical user. Any additional load on the network resulting from this will likely be offset by a reduction in users applying even more wasteful workaround for the prior behavior. The retry delays are reduced to avoid nodes sitting around without their added peers up, but are still sufficient to prevent overly aggressive repeated connections. The reduced delays also make the system much more responsive to the addnode RPC. Ban-disconnects are also exempted for peers added via addnode since the outbound addnode logic ignores bans. Previously it would ban an addnode then immediately reconnect to it. A minor change was also made to CSemaphoreGrant so that it is possible to re-acquire via an object whos grant was moved.
2016-12-11 05:39:26 +01:00
/** Maximum number of addnode outgoing nodes */
static const int MAX_ADDNODE_CONNECTIONS = 8;
/** -listen default */
static const bool DEFAULT_LISTEN = true;
/** -upnp default */
#ifdef USE_UPNP
static const bool DEFAULT_UPNP = USE_UPNP;
#else
static const bool DEFAULT_UPNP = false;
#endif
/** The maximum number of entries in mapAskFor */
static const size_t MAPASKFOR_MAX_SZ = MAX_INV_SZ;
/** The maximum number of entries in setAskFor (larger due to getdata latency)*/
static const size_t SETASKFOR_MAX_SZ = 2 * MAX_INV_SZ;
/** The maximum number of peer connections to maintain. */
static const unsigned int DEFAULT_MAX_PEER_CONNECTIONS = 125;
/** The default for -maxuploadtarget. 0 = Unlimited */
static const uint64_t DEFAULT_MAX_UPLOAD_TARGET = 0;
/** The default timeframe for -maxuploadtarget. 1 day. */
static const uint64_t MAX_UPLOAD_TIMEFRAME = 60 * 60 * 24;
/** Default for blocks only*/
static const bool DEFAULT_BLOCKSONLY = false;
static const bool DEFAULT_FORCEDNSSEED = false;
static const size_t DEFAULT_MAXRECEIVEBUFFER = 5 * 1000;
static const size_t DEFAULT_MAXSENDBUFFER = 1 * 1000;
2016-06-13 16:01:21 +02:00
static const ServiceFlags REQUIRED_SERVICES = NODE_NETWORK;
// NOTE: When adjusting this, update rpcnet:setban's help ("24h")
static const unsigned int DEFAULT_MISBEHAVING_BANTIME = 60 * 60 * 24; // Default 24-hour ban
typedef int64_t NodeId;
2016-05-06 20:50:24 +02:00
struct AddedNodeInfo
{
std::string strAddedNode;
CService resolvedAddress;
bool fConnected;
bool fInbound;
};
class CTransaction;
class CNodeStats;
class CClientUIInterface;
struct CSerializedNetMsg
{
CSerializedNetMsg() = default;
CSerializedNetMsg(CSerializedNetMsg&&) = default;
CSerializedNetMsg& operator=(CSerializedNetMsg&&) = default;
// No copying, only moves.
CSerializedNetMsg(const CSerializedNetMsg& msg) = delete;
CSerializedNetMsg& operator=(const CSerializedNetMsg&) = delete;
std::vector<unsigned char> data;
std::string command;
};
class CConnman
{
public:
enum NumConnections {
CONNECTIONS_NONE = 0,
CONNECTIONS_IN = (1U << 0),
CONNECTIONS_OUT = (1U << 1),
CONNECTIONS_ALL = (CONNECTIONS_IN | CONNECTIONS_OUT),
};
struct Options
{
ServiceFlags nLocalServices = NODE_NONE;
ServiceFlags nRelevantServices = NODE_NONE;
int nMaxConnections = 0;
int nMaxOutbound = 0;
Break addnode out from the outbound connection limits. Previously addnodes were in competition with outbound connections for access to the eight outbound slots. One result of this is that frequently a node with several addnode configured peers would end up connected to none of them, because while the addnode loop was in its two minute sleep the automatic connection logic would fill any free slots with random peers. This is particularly unwelcome to users trying to maintain links to specific nodes for fast block relay or purposes. Another result is that a group of nine or more nodes which are have addnode configured towards each other can become partitioned from the public network. This commit introduces a new limit of eight connections just for addnode peers which is not subject to any of the other connection limitations (including maxconnections). The choice of eight is sufficient so that under no condition would a user find themselves connected to fewer addnoded peers than previously. It is also low enough that users who are confused about the significance of more connections and have gotten too copy-and-paste happy will not consume more than twice the slot usage of a typical user. Any additional load on the network resulting from this will likely be offset by a reduction in users applying even more wasteful workaround for the prior behavior. The retry delays are reduced to avoid nodes sitting around without their added peers up, but are still sufficient to prevent overly aggressive repeated connections. The reduced delays also make the system much more responsive to the addnode RPC. Ban-disconnects are also exempted for peers added via addnode since the outbound addnode logic ignores bans. Previously it would ban an addnode then immediately reconnect to it. A minor change was also made to CSemaphoreGrant so that it is possible to re-acquire via an object whos grant was moved.
2016-12-11 05:39:26 +01:00
int nMaxAddnode = 0;
int nMaxFeeler = 0;
int nBestHeight = 0;
CClientUIInterface* uiInterface = nullptr;
unsigned int nSendBufferMaxSize = 0;
unsigned int nReceiveFloodSize = 0;
uint64_t nMaxOutboundTimeframe = 0;
uint64_t nMaxOutboundLimit = 0;
};
CConnman(uint64_t seed0, uint64_t seed1);
~CConnman();
bool Start(CScheduler& scheduler, std::string& strNodeError, Options options);
void Stop();
void Interrupt();
2016-04-16 21:46:00 +02:00
bool BindListenPort(const CService &bindAddr, std::string& strError, bool fWhitelisted = false);
bool GetNetworkActive() const { return fNetworkActive; };
void SetNetworkActive(bool active);
Break addnode out from the outbound connection limits. Previously addnodes were in competition with outbound connections for access to the eight outbound slots. One result of this is that frequently a node with several addnode configured peers would end up connected to none of them, because while the addnode loop was in its two minute sleep the automatic connection logic would fill any free slots with random peers. This is particularly unwelcome to users trying to maintain links to specific nodes for fast block relay or purposes. Another result is that a group of nine or more nodes which are have addnode configured towards each other can become partitioned from the public network. This commit introduces a new limit of eight connections just for addnode peers which is not subject to any of the other connection limitations (including maxconnections). The choice of eight is sufficient so that under no condition would a user find themselves connected to fewer addnoded peers than previously. It is also low enough that users who are confused about the significance of more connections and have gotten too copy-and-paste happy will not consume more than twice the slot usage of a typical user. Any additional load on the network resulting from this will likely be offset by a reduction in users applying even more wasteful workaround for the prior behavior. The retry delays are reduced to avoid nodes sitting around without their added peers up, but are still sufficient to prevent overly aggressive repeated connections. The reduced delays also make the system much more responsive to the addnode RPC. Ban-disconnects are also exempted for peers added via addnode since the outbound addnode logic ignores bans. Previously it would ban an addnode then immediately reconnect to it. A minor change was also made to CSemaphoreGrant so that it is possible to re-acquire via an object whos grant was moved.
2016-12-11 05:39:26 +01:00
bool OpenNetworkConnection(const CAddress& addrConnect, bool fCountFailure, CSemaphoreGrant *grantOutbound = NULL, const char *strDest = NULL, bool fOneShot = false, bool fFeeler = false, bool fAddnode = false);
bool CheckIncomingNonce(uint64_t nonce);
2016-04-16 21:46:00 +02:00
bool ForNode(NodeId id, std::function<bool(CNode* pnode)> func);
void PushMessage(CNode* pnode, CSerializedNetMsg&& msg);
template<typename Callable>
void ForEachNode(Callable&& func)
{
LOCK(cs_vNodes);
for (auto&& node : vNodes) {
if (NodeFullyConnected(node))
func(node);
}
};
template<typename Callable>
void ForEachNode(Callable&& func) const
{
LOCK(cs_vNodes);
for (auto&& node : vNodes) {
if (NodeFullyConnected(node))
func(node);
}
};
template<typename Callable, typename CallableAfter>
void ForEachNodeThen(Callable&& pre, CallableAfter&& post)
{
LOCK(cs_vNodes);
for (auto&& node : vNodes) {
if (NodeFullyConnected(node))
pre(node);
}
post();
};
template<typename Callable, typename CallableAfter>
void ForEachNodeThen(Callable&& pre, CallableAfter&& post) const
{
LOCK(cs_vNodes);
for (auto&& node : vNodes) {
if (NodeFullyConnected(node))
pre(node);
}
post();
};
// Addrman functions
size_t GetAddressCount() const;
void SetServices(const CService &addr, ServiceFlags nServices);
void MarkAddressGood(const CAddress& addr);
void AddNewAddresses(const std::vector<CAddress>& vAddr, const CAddress& addrFrom, int64_t nTimePenalty = 0);
std::vector<CAddress> GetAddresses();
// Denial-of-service detection/prevention
// The idea is to detect peers that are behaving
// badly and disconnect/ban them, but do it in a
// one-coding-mistake-won't-shatter-the-entire-network
// way.
// IMPORTANT: There should be nothing I can give a
// node that it will forward on that will make that
// node's peers drop it. If there is, an attacker
// can isolate a node and/or try to split the network.
// Dropping a node for sending stuff that is invalid
// now but might be valid in a later version is also
// dangerous, because it can cause a network split
// between nodes running old code and nodes running
// new code.
void Ban(const CNetAddr& netAddr, const BanReason& reason, int64_t bantimeoffset = 0, bool sinceUnixEpoch = false);
void Ban(const CSubNet& subNet, const BanReason& reason, int64_t bantimeoffset = 0, bool sinceUnixEpoch = false);
void ClearBanned(); // needed for unit testing
bool IsBanned(CNetAddr ip);
bool IsBanned(CSubNet subnet);
bool Unban(const CNetAddr &ip);
bool Unban(const CSubNet &ip);
void GetBanned(banmap_t &banmap);
void SetBanned(const banmap_t &banmap);
2016-04-16 23:51:01 +02:00
void AddOneShot(const std::string& strDest);
bool AddNode(const std::string& node);
bool RemoveAddedNode(const std::string& node);
std::vector<AddedNodeInfo> GetAddedNodeInfo();
size_t GetNodeCount(NumConnections num);
void GetNodeStats(std::vector<CNodeStats>& vstats);
bool DisconnectNode(const std::string& node);
bool DisconnectNode(NodeId id);
unsigned int GetSendBufferSize() const;
void AddWhitelistedRange(const CSubNet &subnet);
ServiceFlags GetLocalServices() const;
//!set the max outbound target in bytes
void SetMaxOutboundTarget(uint64_t limit);
uint64_t GetMaxOutboundTarget();
//!set the timeframe for the max outbound target
void SetMaxOutboundTimeframe(uint64_t timeframe);
uint64_t GetMaxOutboundTimeframe();
//!check if the outbound target is reached
// if param historicalBlockServingLimit is set true, the function will
// response true if the limit for serving historical blocks has been reached
bool OutboundTargetReached(bool historicalBlockServingLimit);
//!response the bytes left in the current max outbound cycle
// in case of no limit, it will always response 0
uint64_t GetOutboundTargetBytesLeft();
//!response the time in second left in the current max outbound cycle
// in case of no limit, it will always response 0
uint64_t GetMaxOutboundTimeLeftInCycle();
uint64_t GetTotalBytesRecv();
uint64_t GetTotalBytesSent();
void SetBestHeight(int height);
int GetBestHeight() const;
/** Get a unique deterministic randomizer. */
2017-01-24 02:32:52 +01:00
CSipHasher GetDeterministicRandomizer(uint64_t id) const;
unsigned int GetReceiveFloodSize() const;
2017-01-15 01:00:00 +01:00
void WakeMessageHandler();
private:
2016-04-16 21:46:00 +02:00
struct ListenSocket {
SOCKET socket;
bool whitelisted;
ListenSocket(SOCKET socket_, bool whitelisted_) : socket(socket_), whitelisted(whitelisted_) {}
};
void ThreadOpenAddedConnections();
void ProcessOneShot();
void ThreadOpenConnections();
void ThreadMessageHandler();
void AcceptConnection(const ListenSocket& hListenSocket);
void ThreadSocketHandler();
void ThreadDNSAddressSeed();
2016-04-16 21:46:00 +02:00
2017-01-24 02:32:52 +01:00
uint64_t CalculateKeyedNetGroup(const CAddress& ad) const;
CNode* FindNode(const CNetAddr& ip);
CNode* FindNode(const CSubNet& subNet);
CNode* FindNode(const std::string& addrName);
CNode* FindNode(const CService& addr);
bool AttemptToEvictConnection();
CNode* ConnectNode(CAddress addrConnect, const char *pszDest, bool fCountFailure);
bool IsWhitelistedRange(const CNetAddr &addr);
2016-05-25 00:59:16 +02:00
void DeleteNode(CNode* pnode);
2016-04-18 02:20:34 +02:00
NodeId GetNewNodeId();
2017-01-24 02:32:52 +01:00
size_t SocketSendData(CNode *pnode) const;
//!check is the banlist has unwritten changes
bool BannedSetIsDirty();
//!set the "dirty" flag for the banlist
void SetBannedSetDirty(bool dirty=true);
//!clean unused entries (if bantime has expired)
void SweepBanned();
void DumpAddresses();
void DumpData();
void DumpBanlist();
// Network stats
void RecordBytesRecv(uint64_t bytes);
void RecordBytesSent(uint64_t bytes);
// Whether the node should be passed out in ForEach* callbacks
static bool NodeFullyConnected(const CNode* pnode);
// Network usage totals
CCriticalSection cs_totalBytesRecv;
CCriticalSection cs_totalBytesSent;
uint64_t nTotalBytesRecv;
uint64_t nTotalBytesSent;
// outbound limit & stats
uint64_t nMaxOutboundTotalBytesSentInCycle;
uint64_t nMaxOutboundCycleStartTime;
uint64_t nMaxOutboundLimit;
uint64_t nMaxOutboundTimeframe;
// Whitelisted ranges. Any node connecting from these is automatically
// whitelisted (as well as those connecting to whitelisted binds).
std::vector<CSubNet> vWhitelistedRange;
CCriticalSection cs_vWhitelistedRange;
unsigned int nSendBufferMaxSize;
unsigned int nReceiveFloodSize;
2016-04-16 21:46:00 +02:00
std::vector<ListenSocket> vhListenSocket;
std::atomic<bool> fNetworkActive;
banmap_t setBanned;
CCriticalSection cs_setBanned;
bool setBannedIsDirty;
bool fAddressesInitialized;
CAddrMan addrman;
2016-04-16 23:51:01 +02:00
std::deque<std::string> vOneShots;
CCriticalSection cs_vOneShots;
std::vector<std::string> vAddedNodes;
CCriticalSection cs_vAddedNodes;
std::vector<CNode*> vNodes;
std::list<CNode*> vNodesDisconnected;
mutable CCriticalSection cs_vNodes;
2016-04-18 02:20:34 +02:00
std::atomic<NodeId> nLastNodeId;
/** Services this instance offers */
ServiceFlags nLocalServices;
/** Services this instance cares about */
ServiceFlags nRelevantServices;
2016-04-19 06:15:52 +02:00
CSemaphore *semOutbound;
Break addnode out from the outbound connection limits. Previously addnodes were in competition with outbound connections for access to the eight outbound slots. One result of this is that frequently a node with several addnode configured peers would end up connected to none of them, because while the addnode loop was in its two minute sleep the automatic connection logic would fill any free slots with random peers. This is particularly unwelcome to users trying to maintain links to specific nodes for fast block relay or purposes. Another result is that a group of nine or more nodes which are have addnode configured towards each other can become partitioned from the public network. This commit introduces a new limit of eight connections just for addnode peers which is not subject to any of the other connection limitations (including maxconnections). The choice of eight is sufficient so that under no condition would a user find themselves connected to fewer addnoded peers than previously. It is also low enough that users who are confused about the significance of more connections and have gotten too copy-and-paste happy will not consume more than twice the slot usage of a typical user. Any additional load on the network resulting from this will likely be offset by a reduction in users applying even more wasteful workaround for the prior behavior. The retry delays are reduced to avoid nodes sitting around without their added peers up, but are still sufficient to prevent overly aggressive repeated connections. The reduced delays also make the system much more responsive to the addnode RPC. Ban-disconnects are also exempted for peers added via addnode since the outbound addnode logic ignores bans. Previously it would ban an addnode then immediately reconnect to it. A minor change was also made to CSemaphoreGrant so that it is possible to re-acquire via an object whos grant was moved.
2016-12-11 05:39:26 +01:00
CSemaphore *semAddnode;
2016-05-22 09:52:03 +02:00
int nMaxConnections;
int nMaxOutbound;
Break addnode out from the outbound connection limits. Previously addnodes were in competition with outbound connections for access to the eight outbound slots. One result of this is that frequently a node with several addnode configured peers would end up connected to none of them, because while the addnode loop was in its two minute sleep the automatic connection logic would fill any free slots with random peers. This is particularly unwelcome to users trying to maintain links to specific nodes for fast block relay or purposes. Another result is that a group of nine or more nodes which are have addnode configured towards each other can become partitioned from the public network. This commit introduces a new limit of eight connections just for addnode peers which is not subject to any of the other connection limitations (including maxconnections). The choice of eight is sufficient so that under no condition would a user find themselves connected to fewer addnoded peers than previously. It is also low enough that users who are confused about the significance of more connections and have gotten too copy-and-paste happy will not consume more than twice the slot usage of a typical user. Any additional load on the network resulting from this will likely be offset by a reduction in users applying even more wasteful workaround for the prior behavior. The retry delays are reduced to avoid nodes sitting around without their added peers up, but are still sufficient to prevent overly aggressive repeated connections. The reduced delays also make the system much more responsive to the addnode RPC. Ban-disconnects are also exempted for peers added via addnode since the outbound addnode logic ignores bans. Previously it would ban an addnode then immediately reconnect to it. A minor change was also made to CSemaphoreGrant so that it is possible to re-acquire via an object whos grant was moved.
2016-12-11 05:39:26 +01:00
int nMaxAddnode;
int nMaxFeeler;
std::atomic<int> nBestHeight;
CClientUIInterface* clientInterface;
/** SipHasher seeds for deterministic randomness */
const uint64_t nSeed0, nSeed1;
/** flag for waking the message processor. */
bool fMsgProcWake;
std::condition_variable condMsgProc;
std::mutex mutexMsgProc;
std::atomic<bool> flagInterruptMsgProc;
CThreadInterrupt interruptNet;
std::thread threadDNSAddressSeed;
std::thread threadSocketHandler;
std::thread threadOpenAddedConnections;
std::thread threadOpenConnections;
std::thread threadMessageHandler;
};
extern std::unique_ptr<CConnman> g_connman;
void Discover(boost::thread_group& threadGroup);
void MapPort(bool fUseUPnP);
unsigned short GetListenPort();
bool BindListenPort(const CService &bindAddr, std::string& strError, bool fWhitelisted = false);
struct CombinerAll
{
typedef bool result_type;
template<typename I>
bool operator()(I first, I last) const
{
while (first != last) {
if (!(*first)) return false;
++first;
}
return true;
}
};
// Signals for message handling
struct CNodeSignals
{
2016-12-27 23:13:04 +01:00
boost::signals2::signal<bool (CNode*, CConnman&, std::atomic<bool>&), CombinerAll> ProcessMessages;
boost::signals2::signal<bool (CNode*, CConnman&, std::atomic<bool>&), CombinerAll> SendMessages;
boost::signals2::signal<void (CNode*, CConnman&)> InitializeNode;
2016-05-25 00:59:16 +02:00
boost::signals2::signal<void (NodeId, bool&)> FinalizeNode;
};
2013-11-18 01:25:17 +01:00
CNodeSignals& GetNodeSignals();
2012-02-12 13:45:24 +01:00
enum
{
LOCAL_NONE, // unknown
LOCAL_IF, // address a local interface listens on
LOCAL_BIND, // address explicit bound to
LOCAL_UPNP, // address reported by UPnP
LOCAL_MANUAL, // address explicitly specified (-externalip=)
LOCAL_MAX
2012-02-12 13:45:24 +01:00
};
bool IsPeerAddrLocalGood(CNode *pnode);
void AdvertiseLocal(CNode *pnode);
void SetLimited(enum Network net, bool fLimited = true);
2012-05-14 17:15:58 +02:00
bool IsLimited(enum Network net);
bool IsLimited(const CNetAddr& addr);
bool AddLocal(const CService& addr, int nScore = LOCAL_NONE);
bool AddLocal(const CNetAddr& addr, int nScore = LOCAL_NONE);
bool RemoveLocal(const CService& addr);
bool SeenLocal(const CService& addr);
bool IsLocal(const CService& addr);
bool GetLocal(CService &addr, const CNetAddr *paddrPeer = NULL);
bool IsReachable(enum Network net);
bool IsReachable(const CNetAddr &addr);
CAddress GetLocalAddress(const CNetAddr *paddrPeer, ServiceFlags nLocalServices);
2012-02-12 13:45:24 +01:00
2012-05-24 19:02:21 +02:00
extern bool fDiscover;
extern bool fListen;
extern bool fRelayTxes;
extern limitedmap<uint256, int64_t> mapAlreadyAskedFor;
/** Subversion as sent to the P2P network in `version` messages */
extern std::string strSubVersion;
struct LocalServiceInfo {
int nScore;
int nPort;
};
extern CCriticalSection cs_mapLocalHost;
extern std::map<CNetAddr, LocalServiceInfo> mapLocalHost;
2015-08-25 16:30:31 +02:00
typedef std::map<std::string, uint64_t> mapMsgCmdSize; //command, total bytes
class CNodeStats
{
public:
2013-11-18 01:25:17 +01:00
NodeId nodeid;
ServiceFlags nServices;
2015-11-21 00:51:44 +01:00
bool fRelayTxes;
int64_t nLastSend;
int64_t nLastRecv;
int64_t nTimeConnected;
2014-12-15 11:06:15 +01:00
int64_t nTimeOffset;
std::string addrName;
int nVersion;
2013-11-26 12:52:21 +01:00
std::string cleanSubVer;
bool fInbound;
Break addnode out from the outbound connection limits. Previously addnodes were in competition with outbound connections for access to the eight outbound slots. One result of this is that frequently a node with several addnode configured peers would end up connected to none of them, because while the addnode loop was in its two minute sleep the automatic connection logic would fill any free slots with random peers. This is particularly unwelcome to users trying to maintain links to specific nodes for fast block relay or purposes. Another result is that a group of nine or more nodes which are have addnode configured towards each other can become partitioned from the public network. This commit introduces a new limit of eight connections just for addnode peers which is not subject to any of the other connection limitations (including maxconnections). The choice of eight is sufficient so that under no condition would a user find themselves connected to fewer addnoded peers than previously. It is also low enough that users who are confused about the significance of more connections and have gotten too copy-and-paste happy will not consume more than twice the slot usage of a typical user. Any additional load on the network resulting from this will likely be offset by a reduction in users applying even more wasteful workaround for the prior behavior. The retry delays are reduced to avoid nodes sitting around without their added peers up, but are still sufficient to prevent overly aggressive repeated connections. The reduced delays also make the system much more responsive to the addnode RPC. Ban-disconnects are also exempted for peers added via addnode since the outbound addnode logic ignores bans. Previously it would ban an addnode then immediately reconnect to it. A minor change was also made to CSemaphoreGrant so that it is possible to re-acquire via an object whos grant was moved.
2016-12-11 05:39:26 +01:00
bool fAddnode;
int nStartingHeight;
uint64_t nSendBytes;
2015-08-25 16:30:31 +02:00
mapMsgCmdSize mapSendBytesPerMsgCmd;
uint64_t nRecvBytes;
2015-08-25 16:30:31 +02:00
mapMsgCmdSize mapRecvBytesPerMsgCmd;
bool fWhitelisted;
double dPingTime;
double dPingWait;
2016-10-14 16:11:38 +02:00
double dMinPing;
std::string addrLocal;
CAddress addr;
};
class CNetMessage {
private:
mutable CHash256 hasher;
mutable uint256 data_hash;
public:
bool in_data; // parsing header (false) or data (true)
CDataStream hdrbuf; // partially received header
CMessageHeader hdr; // complete header
unsigned int nHdrPos;
CDataStream vRecv; // received message data
unsigned int nDataPos;
int64_t nTime; // time (in microseconds) of message receipt.
CNetMessage(const CMessageHeader::MessageStartChars& pchMessageStartIn, int nTypeIn, int nVersionIn) : hdrbuf(nTypeIn, nVersionIn), hdr(pchMessageStartIn), vRecv(nTypeIn, nVersionIn) {
hdrbuf.resize(24);
in_data = false;
nHdrPos = 0;
nDataPos = 0;
nTime = 0;
}
bool complete() const
{
if (!in_data)
return false;
return (hdr.nMessageSize == nDataPos);
}
const uint256& GetMessageHash() const;
void SetVersion(int nVersionIn)
{
hdrbuf.SetVersion(nVersionIn);
vRecv.SetVersion(nVersionIn);
}
int readHeader(const char *pch, unsigned int nBytes);
int readData(const char *pch, unsigned int nBytes);
};
2012-03-26 16:48:23 +02:00
/** Information about a peer */
class CNode
{
friend class CConnman;
public:
// socket
2017-02-06 18:20:16 +01:00
std::atomic<ServiceFlags> nServices;
ServiceFlags nServicesExpected;
SOCKET hSocket;
size_t nSendSize; // total size of all vSendMsg entries
size_t nSendOffset; // offset inside the first vSendMsg already sent
uint64_t nSendBytes;
std::deque<std::vector<unsigned char>> vSendMsg;
CCriticalSection cs_vSend;
2017-02-06 20:05:45 +01:00
CCriticalSection cs_hSocket;
CCriticalSection cs_vRecv;
CCriticalSection cs_vProcessMsg;
std::list<CNetMessage> vProcessMsg;
size_t nProcessQueueSize;
CCriticalSection cs_sendProcessing;
std::deque<CInv> vRecvGetData;
uint64_t nRecvBytes;
std::atomic<int> nRecvVersion;
std::atomic<int64_t> nLastSend;
std::atomic<int64_t> nLastRecv;
2017-02-06 17:42:49 +01:00
const int64_t nTimeConnected;
std::atomic<int64_t> nTimeOffset;
const CAddress addr;
std::atomic<int> nVersion;
// strSubVer is whatever byte array we read from the wire. However, this field is intended
2013-11-26 12:52:21 +01:00
// to be printed out, displayed to humans in various forms and so on. So we sanitize it and
// store the sanitized version in cleanSubVer. The original should be used when dealing with
// the network or wire types and the cleaned string used when displayed or logged.
std::string strSubVer, cleanSubVer;
CCriticalSection cs_SubVer; // used for both cleanSubVer and strSubVer
bool fWhitelisted; // This peer can bypass DoS banning.
bool fFeeler; // If true this node is being used as a short lived feeler.
bool fOneShot;
Break addnode out from the outbound connection limits. Previously addnodes were in competition with outbound connections for access to the eight outbound slots. One result of this is that frequently a node with several addnode configured peers would end up connected to none of them, because while the addnode loop was in its two minute sleep the automatic connection logic would fill any free slots with random peers. This is particularly unwelcome to users trying to maintain links to specific nodes for fast block relay or purposes. Another result is that a group of nine or more nodes which are have addnode configured towards each other can become partitioned from the public network. This commit introduces a new limit of eight connections just for addnode peers which is not subject to any of the other connection limitations (including maxconnections). The choice of eight is sufficient so that under no condition would a user find themselves connected to fewer addnoded peers than previously. It is also low enough that users who are confused about the significance of more connections and have gotten too copy-and-paste happy will not consume more than twice the slot usage of a typical user. Any additional load on the network resulting from this will likely be offset by a reduction in users applying even more wasteful workaround for the prior behavior. The retry delays are reduced to avoid nodes sitting around without their added peers up, but are still sufficient to prevent overly aggressive repeated connections. The reduced delays also make the system much more responsive to the addnode RPC. Ban-disconnects are also exempted for peers added via addnode since the outbound addnode logic ignores bans. Previously it would ban an addnode then immediately reconnect to it. A minor change was also made to CSemaphoreGrant so that it is possible to re-acquire via an object whos grant was moved.
2016-12-11 05:39:26 +01:00
bool fAddnode;
bool fClient;
const bool fInbound;
std::atomic_bool fSuccessfullyConnected;
2016-11-26 03:03:12 +01:00
std::atomic_bool fDisconnect;
// We use fRelayTxes for two purposes -
// a) it allows us to not relay tx invs before receiving the peer's version message
2015-04-28 16:48:28 +02:00
// b) the peer may tell us in its version message that we should not relay tx invs
2015-11-14 14:12:31 +01:00
// unless it loads a bloom filter.
bool fRelayTxes; //protected by cs_filter
bool fSentAddr;
CSemaphoreGrant grantOutbound;
CCriticalSection cs_filter;
CBloomFilter* pfilter;
std::atomic<int> nRefCount;
const NodeId id;
const uint64_t nKeyedNetGroup;
std::atomic_bool fPauseRecv;
std::atomic_bool fPauseSend;
protected:
2015-08-25 16:30:31 +02:00
mapMsgCmdSize mapSendBytesPerMsgCmd;
mapMsgCmdSize mapRecvBytesPerMsgCmd;
public:
uint256 hashContinue;
2017-02-06 18:15:30 +01:00
std::atomic<int> nStartingHeight;
// flood relay
std::vector<CAddress> vAddrToSend;
CRollingBloomFilter addrKnown;
bool fGetAddr;
std::set<uint256> setKnown;
int64_t nNextAddrSend;
int64_t nNextLocalAddrSend;
// inventory based relay
CRollingBloomFilter filterInventoryKnown;
// Set of transaction ids we still have to announce.
// They are sorted by the mempool before relay, so the order is not important.
std::set<uint256> setInventoryTxToSend;
// List of block ids we still have announce.
// There is no final sorting before sending, as they are always sent immediately
// and in the order requested.
std::vector<uint256> vInventoryBlockToSend;
CCriticalSection cs_inventory;
std::set<uint256> setAskFor;
std::multimap<int64_t, CInv> mapAskFor;
int64_t nNextInvSend;
// Used for headers announcements - unfiltered blocks to relay
// Also protected by cs_inventory
std::vector<uint256> vBlockHashesToAnnounce;
// Used for BIP35 mempool sending, also protected by cs_inventory
bool fSendMempool;
// Last time a "MEMPOOL" request was serviced.
std::atomic<int64_t> timeLastMempoolReq;
// Block and TXN accept times
std::atomic<int64_t> nLastBlockTime;
std::atomic<int64_t> nLastTXTime;
// Ping time measurement:
// The pong reply we're expecting, or 0 if no pong expected.
std::atomic<uint64_t> nPingNonceSent;
// Time (in usec) the last ping was sent, or 0 if no ping was ever sent.
std::atomic<int64_t> nPingUsecStart;
// Last measured round-trip time.
std::atomic<int64_t> nPingUsecTime;
2015-08-13 11:31:46 +02:00
// Best measured round-trip time.
std::atomic<int64_t> nMinPingUsecTime;
// Whether a ping is requested.
std::atomic<bool> fPingQueued;
// Minimum fee rate with which to filter inv's to this node
CAmount minFeeFilter;
CCriticalSection cs_feeFilter;
CAmount lastSentFeeFilter;
int64_t nextSendTimeFeeFilter;
CNode(NodeId id, ServiceFlags nLocalServicesIn, int nMyStartingHeightIn, SOCKET hSocketIn, const CAddress &addrIn, uint64_t nKeyedNetGroupIn, uint64_t nLocalHostNonceIn, const std::string &addrNameIn = "", bool fInboundIn = false);
~CNode();
private:
CNode(const CNode&);
void operator=(const CNode&);
2013-08-22 18:09:32 +02:00
const uint64_t nLocalHostNonce;
// Services offered to this peer
const ServiceFlags nLocalServices;
const int nMyStartingHeight;
int nSendVersion;
std::list<CNetMessage> vRecvMsg; // Used only by SocketHandler thread
mutable CCriticalSection cs_addrName;
std::string addrName;
CService addrLocal;
mutable CCriticalSection cs_addrLocal;
public:
2013-11-18 01:25:17 +01:00
NodeId GetId() const {
return id;
}
uint64_t GetLocalNonce() const {
return nLocalHostNonce;
}
int GetMyStartingHeight() const {
return nMyStartingHeight;
}
int GetRefCount()
{
assert(nRefCount >= 0);
return nRefCount;
}
bool ReceiveMsgBytes(const char *pch, unsigned int nBytes, bool& complete);
void SetRecvVersion(int nVersionIn)
{
nRecvVersion = nVersionIn;
}
int GetRecvVersion()
{
return nRecvVersion;
}
void SetSendVersion(int nVersionIn);
int GetSendVersion() const;
CService GetAddrLocal() const;
//! May not be called more than once
void SetAddrLocal(const CService& addrLocalIn);
CNode* AddRef()
{
nRefCount++;
return this;
}
void Release()
{
nRefCount--;
}
void AddAddressKnown(const CAddress& _addr)
{
addrKnown.insert(_addr.GetKey());
}
void PushAddress(const CAddress& _addr, FastRandomContext &insecure_rand)
{
// Known checking here is only to save space from duplicates.
// SendMessages will filter it again for knowns that were added
// after addresses were pushed.
if (_addr.IsValid() && !addrKnown.contains(_addr.GetKey())) {
if (vAddrToSend.size() >= MAX_ADDR_TO_SEND) {
vAddrToSend[insecure_rand.rand32() % vAddrToSend.size()] = _addr;
} else {
vAddrToSend.push_back(_addr);
}
}
}
void AddInventoryKnown(const CInv& inv)
{
{
LOCK(cs_inventory);
filterInventoryKnown.insert(inv.hash);
}
}
void PushInventory(const CInv& inv)
{
LOCK(cs_inventory);
if (inv.type == MSG_TX) {
if (!filterInventoryKnown.contains(inv.hash)) {
setInventoryTxToSend.insert(inv.hash);
}
} else if (inv.type == MSG_BLOCK) {
vInventoryBlockToSend.push_back(inv.hash);
}
}
void PushBlockHash(const uint256 &hash)
{
LOCK(cs_inventory);
vBlockHashesToAnnounce.push_back(hash);
}
void AskFor(const CInv& inv);
void CloseSocketDisconnect();
void copyStats(CNodeStats &stats);
ServiceFlags GetLocalServices() const
{
return nLocalServices;
}
std::string GetAddrName() const;
//! Sets the addrName only if it was not previously set
void MaybeSetAddrName(const std::string& addrNameIn);
};
2015-06-19 15:27:37 +02:00
/** Return a timestamp in the future (in microseconds) for exponentially distributed events. */
int64_t PoissonNextSend(int64_t nNow, int average_interval_seconds);
#endif // BITCOIN_NET_H