dash/src/net_processing.cpp

2724 lines
115 KiB
C++
Raw Normal View History

// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2016 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "net_processing.h"
#include "alert.h"
#include "addrman.h"
#include "arith_uint256.h"
#include "chainparams.h"
#include "consensus/validation.h"
#include "hash.h"
#include "init.h"
#include "validation.h"
#include "merkleblock.h"
#include "net.h"
#include "netbase.h"
#include "policy/fees.h"
#include "policy/policy.h"
#include "primitives/block.h"
#include "primitives/transaction.h"
#include "random.h"
#include "tinyformat.h"
#include "txmempool.h"
#include "ui_interface.h"
#include "util.h"
#include "utilmoneystr.h"
#include "utilstrencodings.h"
#include "validationinterface.h"
#include "spork.h"
#include "governance.h"
#include "instantx.h"
#include "masternode-payments.h"
#include "masternode-sync.h"
#include "masternodeman.h"
#ifdef ENABLE_WALLET
#include "privatesend-client.h"
#endif // ENABLE_WALLET
#include "privatesend-server.h"
#include <boost/thread.hpp>
using namespace std;
#if defined(NDEBUG)
# error "Dash Core cannot be compiled without assertions."
#endif
int64_t nTimeBestReceived = 0; // Used only to inform the wallet of when we last received a block
struct COrphanTx {
CTransaction tx;
NodeId fromPeer;
};
map<uint256, COrphanTx> mapOrphanTransactions GUARDED_BY(cs_main);
map<uint256, set<uint256> > mapOrphanTransactionsByPrev GUARDED_BY(cs_main);;
void EraseOrphansFor(NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
// Internal stuff
namespace {
/** Number of nodes with fSyncStarted. */
int nSyncStarted = 0;
/**
* Sources of received blocks, saved to be able to send them reject
* messages or ban them when processing happens afterwards. Protected by
* cs_main.
*/
map<uint256, NodeId> mapBlockSource;
/**
* Filter for transactions that were recently rejected by
* AcceptToMemoryPool. These are not rerequested until the chain tip
* changes, at which point the entire filter is reset. Protected by
* cs_main.
*
* Without this filter we'd be re-requesting txs from each of our peers,
* increasing bandwidth consumption considerably. For instance, with 100
* peers, half of which relay a tx we don't accept, that might be a 50x
* bandwidth increase. A flooding attacker attempting to roll-over the
* filter using minimum-sized, 60byte, transactions might manage to send
* 1000/sec if we have fast peers, so we pick 120,000 to give our peers a
* two minute window to send invs to us.
*
* Decreasing the false positive rate is fairly cheap, so we pick one in a
* million to make it highly unlikely for users to have issues with this
* filter.
*
* Memory used: 1.3MB
*/
boost::scoped_ptr<CRollingBloomFilter> recentRejects;
uint256 hashRecentRejectsChainTip;
/** Blocks that are in flight, and that are in the queue to be downloaded. Protected by cs_main. */
struct QueuedBlock {
uint256 hash;
CBlockIndex* pindex; //!< Optional.
bool fValidatedHeaders; //!< Whether this block has validated headers at the time of request.
};
map<uint256, pair<NodeId, list<QueuedBlock>::iterator> > mapBlocksInFlight;
/** Number of preferable block download peers. */
int nPreferredDownload = 0;
/** Number of peers from which we're downloading blocks. */
int nPeersWithValidatedDownloads = 0;
} // anon namespace
//////////////////////////////////////////////////////////////////////////////
//
// Registration of network node signals.
//
namespace {
struct CBlockReject {
unsigned char chRejectCode;
string strRejectReason;
uint256 hashBlock;
};
/**
* Maintain validation-specific state about nodes, protected by cs_main, instead
* by CNode's own locks. This simplifies asynchronous operation, where
* processing of incoming data is done after the ProcessMessage call returns,
* and we're no longer holding the node's locks.
*/
struct CNodeState {
//! The peer's address
const CService address;
//! Whether we have a fully established connection.
bool fCurrentlyConnected;
//! Accumulated misbehaviour score for this peer.
int nMisbehavior;
//! Whether this peer should be disconnected and banned (unless whitelisted).
bool fShouldBan;
//! String name of this peer (debugging/logging purposes).
const std::string name;
//! List of asynchronously-determined block rejections to notify this peer about.
std::vector<CBlockReject> rejects;
//! The best known block we know this peer has announced.
CBlockIndex *pindexBestKnownBlock;
//! The hash of the last unknown block this peer has announced.
uint256 hashLastUnknownBlock;
//! The last full block we both have.
CBlockIndex *pindexLastCommonBlock;
//! The best header we have sent our peer.
CBlockIndex *pindexBestHeaderSent;
//! Whether we've started headers synchronization with this peer.
bool fSyncStarted;
Backport "assumed valid blocks" feature from Bitcoin 0.13 (#1582) * IBD check uses minimumchain work instead of checkpoints. This introduces a 'minimum chain work' chainparam which is intended to be the known amount of work in the chain for the network at the time of software release. If you don't have this much work, you're not yet caught up. This is used instead of the count of blocks test from checkpoints. This criteria is trivial to keep updated as there is no element of subjectivity, trust, or position dependence to it. It is also a more reliable metric of sync status than a block count. * Remove GetTotalBlocksEstimate and checkpoint tests that test nothing. GetTotalBlocksEstimate is no longer used and it was the only thing the checkpoint tests were testing. Since checkpoints are on their way out it makes more sense to remove the test file than to cook up a new pointless test. # Conflicts: # src/Makefile.test.include # src/test/Checkpoints_tests.cpp * IsInitialBlockDownload no longer uses header-only timestamps. This avoids a corner case (mostly visible on testnet) where bogus headers can keep nodes in IsInitialBlockDownload. * Delay parallel block download until chain has sufficient work nMinimumChainWork is an anti-DoS threshold; wait until we have a proposed tip with more work than that before downloading blocks towards that tip. * Add timeout for headers sync At startup, we choose one peer to serve us the headers chain, until our best header is close to caught up. Disconnect this peer if more than 15 minutes + 1ms/expected_header passes and our best header is still more than 1 day away from current time. * Introduce assumevalid setting to skip presumed valid scripts. This disentangles the script validation skipping from checkpoints. A new option is introduced "assumevalid" which specifies a block whos ancestors we assume all have valid scriptsigs and so we do not check them when they are also burried under the best header by two weeks worth of work. Unlike checkpoints this has no influence on consensus unless you set it to a block with an invalid history. Because of this it can be easily be updated without risk of influencing the network consensus. This results in a massive IBD speedup. This approach was independently recommended by Peter Todd and Luke-Jr since POW based signature skipping (see PR#9180) does not have the verifiable properties of a specific hash and may create bad incentives. The downside is that, like checkpoints, the defaults bitrot and older releases will sync slower. On the plus side users can provide their own value here, and if they set it to something crazy all that will happen is more time will be spend validating signatures. Checkblocks and checklevel are also moved to the hidden debug options: Especially now that checkblocks has a low default there is little need to change these settings, and users frequently misunderstand them as influencing security or IBD speed. By hiding them we offset the space added by this new option. * Add consensusParams to FindNextBlocksToDownload * Adjust check in headers timeout logic to align with 144 blocks in Dash
2017-08-23 16:21:08 +02:00
//! When to potentially disconnect peer for stalling headers download
int64_t nHeadersSyncTimeout;
//! Since when we're stalling block download progress (in microseconds), or 0.
int64_t nStallingSince;
list<QueuedBlock> vBlocksInFlight;
//! When the first entry in vBlocksInFlight started downloading. Don't care when vBlocksInFlight is empty.
int64_t nDownloadingSince;
int nBlocksInFlight;
int nBlocksInFlightValidHeaders;
//! Whether we consider this a preferred download peer.
bool fPreferredDownload;
//! Whether this peer wants invs or headers (when possible) for block announcements.
bool fPreferHeaders;
CNodeState(CAddress addrIn, std::string addrNameIn) : address(addrIn), name(addrNameIn) {
fCurrentlyConnected = false;
nMisbehavior = 0;
fShouldBan = false;
pindexBestKnownBlock = NULL;
hashLastUnknownBlock.SetNull();
pindexLastCommonBlock = NULL;
pindexBestHeaderSent = NULL;
fSyncStarted = false;
Backport "assumed valid blocks" feature from Bitcoin 0.13 (#1582) * IBD check uses minimumchain work instead of checkpoints. This introduces a 'minimum chain work' chainparam which is intended to be the known amount of work in the chain for the network at the time of software release. If you don't have this much work, you're not yet caught up. This is used instead of the count of blocks test from checkpoints. This criteria is trivial to keep updated as there is no element of subjectivity, trust, or position dependence to it. It is also a more reliable metric of sync status than a block count. * Remove GetTotalBlocksEstimate and checkpoint tests that test nothing. GetTotalBlocksEstimate is no longer used and it was the only thing the checkpoint tests were testing. Since checkpoints are on their way out it makes more sense to remove the test file than to cook up a new pointless test. # Conflicts: # src/Makefile.test.include # src/test/Checkpoints_tests.cpp * IsInitialBlockDownload no longer uses header-only timestamps. This avoids a corner case (mostly visible on testnet) where bogus headers can keep nodes in IsInitialBlockDownload. * Delay parallel block download until chain has sufficient work nMinimumChainWork is an anti-DoS threshold; wait until we have a proposed tip with more work than that before downloading blocks towards that tip. * Add timeout for headers sync At startup, we choose one peer to serve us the headers chain, until our best header is close to caught up. Disconnect this peer if more than 15 minutes + 1ms/expected_header passes and our best header is still more than 1 day away from current time. * Introduce assumevalid setting to skip presumed valid scripts. This disentangles the script validation skipping from checkpoints. A new option is introduced "assumevalid" which specifies a block whos ancestors we assume all have valid scriptsigs and so we do not check them when they are also burried under the best header by two weeks worth of work. Unlike checkpoints this has no influence on consensus unless you set it to a block with an invalid history. Because of this it can be easily be updated without risk of influencing the network consensus. This results in a massive IBD speedup. This approach was independently recommended by Peter Todd and Luke-Jr since POW based signature skipping (see PR#9180) does not have the verifiable properties of a specific hash and may create bad incentives. The downside is that, like checkpoints, the defaults bitrot and older releases will sync slower. On the plus side users can provide their own value here, and if they set it to something crazy all that will happen is more time will be spend validating signatures. Checkblocks and checklevel are also moved to the hidden debug options: Especially now that checkblocks has a low default there is little need to change these settings, and users frequently misunderstand them as influencing security or IBD speed. By hiding them we offset the space added by this new option. * Add consensusParams to FindNextBlocksToDownload * Adjust check in headers timeout logic to align with 144 blocks in Dash
2017-08-23 16:21:08 +02:00
nHeadersSyncTimeout = 0;
nStallingSince = 0;
nDownloadingSince = 0;
nBlocksInFlight = 0;
nBlocksInFlightValidHeaders = 0;
fPreferredDownload = false;
fPreferHeaders = false;
}
};
/** Map maintaining per-node state. Requires cs_main. */
map<NodeId, CNodeState> mapNodeState;
// Requires cs_main.
CNodeState *State(NodeId pnode) {
map<NodeId, CNodeState>::iterator it = mapNodeState.find(pnode);
if (it == mapNodeState.end())
return NULL;
return &it->second;
}
void UpdatePreferredDownload(CNode* node, CNodeState* state)
{
nPreferredDownload -= state->fPreferredDownload;
// Whether this node should be marked as a preferred download node.
state->fPreferredDownload = (!node->fInbound || node->fWhitelisted) && !node->fOneShot && !node->fClient;
nPreferredDownload += state->fPreferredDownload;
}
void PushNodeVersion(CNode *pnode, CConnman& connman, int64_t nTime)
{
ServiceFlags nLocalNodeServices = pnode->GetLocalServices();
uint64_t nonce = pnode->GetLocalNonce();
int nNodeStartingHeight = pnode->GetMyStartingHeight();
NodeId nodeid = pnode->GetId();
CAddress addr = pnode->addr;
CAddress addrYou = (addr.IsRoutable() && !IsProxy(addr) ? addr : CAddress(CService(), addr.nServices));
CAddress addrMe = CAddress(CService(), nLocalNodeServices);
connman.PushMessageWithVersion(pnode, INIT_PROTO_VERSION, NetMsgType::VERSION, PROTOCOL_VERSION, (uint64_t)nLocalNodeServices, nTime, addrYou, addrMe,
nonce, strSubVersion, nNodeStartingHeight, ::fRelayTxes);
if (fLogIPs)
LogPrint("net", "send version message: version %d, blocks=%d, us=%s, them=%s, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(), addrYou.ToString(), nodeid);
else
LogPrint("net", "send version message: version %d, blocks=%d, us=%s, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(), nodeid);
}
void InitializeNode(CNode *pnode, CConnman& connman) {
CAddress addr = pnode->addr;
std::string addrName = pnode->addrName;
NodeId nodeid = pnode->GetId();
{
LOCK(cs_main);
mapNodeState.emplace_hint(mapNodeState.end(), std::piecewise_construct, std::forward_as_tuple(nodeid), std::forward_as_tuple(addr, std::move(addrName)));
}
if(!pnode->fInbound)
PushNodeVersion(pnode, connman, GetTime());
}
void FinalizeNode(NodeId nodeid, bool& fUpdateConnectionTime) {
fUpdateConnectionTime = false;
LOCK(cs_main);
CNodeState *state = State(nodeid);
if (state->fSyncStarted)
nSyncStarted--;
if (state->nMisbehavior == 0 && state->fCurrentlyConnected) {
fUpdateConnectionTime = true;
}
BOOST_FOREACH(const QueuedBlock& entry, state->vBlocksInFlight) {
mapBlocksInFlight.erase(entry.hash);
}
EraseOrphansFor(nodeid);
nPreferredDownload -= state->fPreferredDownload;
nPeersWithValidatedDownloads -= (state->nBlocksInFlightValidHeaders != 0);
assert(nPeersWithValidatedDownloads >= 0);
mapNodeState.erase(nodeid);
if (mapNodeState.empty()) {
// Do a consistency check after the last peer is removed.
assert(mapBlocksInFlight.empty());
assert(nPreferredDownload == 0);
assert(nPeersWithValidatedDownloads == 0);
}
}
// Requires cs_main.
// Returns a bool indicating whether we requested this block.
bool MarkBlockAsReceived(const uint256& hash) {
map<uint256, pair<NodeId, list<QueuedBlock>::iterator> >::iterator itInFlight = mapBlocksInFlight.find(hash);
if (itInFlight != mapBlocksInFlight.end()) {
CNodeState *state = State(itInFlight->second.first);
state->nBlocksInFlightValidHeaders -= itInFlight->second.second->fValidatedHeaders;
if (state->nBlocksInFlightValidHeaders == 0 && itInFlight->second.second->fValidatedHeaders) {
// Last validated block on the queue was received.
nPeersWithValidatedDownloads--;
}
if (state->vBlocksInFlight.begin() == itInFlight->second.second) {
// First block on the queue was received, update the start download time for the next one
state->nDownloadingSince = std::max(state->nDownloadingSince, GetTimeMicros());
}
state->vBlocksInFlight.erase(itInFlight->second.second);
state->nBlocksInFlight--;
state->nStallingSince = 0;
mapBlocksInFlight.erase(itInFlight);
return true;
}
return false;
}
// Requires cs_main.
void MarkBlockAsInFlight(NodeId nodeid, const uint256& hash, const Consensus::Params& consensusParams, CBlockIndex *pindex = NULL) {
CNodeState *state = State(nodeid);
assert(state != NULL);
// Make sure it's not listed somewhere already.
MarkBlockAsReceived(hash);
QueuedBlock newentry = {hash, pindex, pindex != NULL};
list<QueuedBlock>::iterator it = state->vBlocksInFlight.insert(state->vBlocksInFlight.end(), newentry);
state->nBlocksInFlight++;
state->nBlocksInFlightValidHeaders += newentry.fValidatedHeaders;
if (state->nBlocksInFlight == 1) {
// We're starting a block download (batch) from this peer.
state->nDownloadingSince = GetTimeMicros();
}
if (state->nBlocksInFlightValidHeaders == 1 && pindex != NULL) {
nPeersWithValidatedDownloads++;
}
mapBlocksInFlight[hash] = std::make_pair(nodeid, it);
}
/** Check whether the last unknown block a peer advertised is not yet known. */
void ProcessBlockAvailability(NodeId nodeid) {
CNodeState *state = State(nodeid);
assert(state != NULL);
if (!state->hashLastUnknownBlock.IsNull()) {
BlockMap::iterator itOld = mapBlockIndex.find(state->hashLastUnknownBlock);
if (itOld != mapBlockIndex.end() && itOld->second->nChainWork > 0) {
if (state->pindexBestKnownBlock == NULL || itOld->second->nChainWork >= state->pindexBestKnownBlock->nChainWork)
state->pindexBestKnownBlock = itOld->second;
state->hashLastUnknownBlock.SetNull();
}
}
}
/** Update tracking information about which blocks a peer is assumed to have. */
void UpdateBlockAvailability(NodeId nodeid, const uint256 &hash) {
CNodeState *state = State(nodeid);
assert(state != NULL);
ProcessBlockAvailability(nodeid);
BlockMap::iterator it = mapBlockIndex.find(hash);
if (it != mapBlockIndex.end() && it->second->nChainWork > 0) {
// An actually better block was announced.
if (state->pindexBestKnownBlock == NULL || it->second->nChainWork >= state->pindexBestKnownBlock->nChainWork)
state->pindexBestKnownBlock = it->second;
} else {
// An unknown block was announced; just assume that the latest one is the best one.
state->hashLastUnknownBlock = hash;
}
}
// Requires cs_main
bool CanDirectFetch(const Consensus::Params &consensusParams)
{
return chainActive.Tip()->GetBlockTime() > GetAdjustedTime() - consensusParams.nPowTargetSpacing * 20;
}
// Requires cs_main
bool PeerHasHeader(CNodeState *state, CBlockIndex *pindex)
{
if (state->pindexBestKnownBlock && pindex == state->pindexBestKnownBlock->GetAncestor(pindex->nHeight))
return true;
if (state->pindexBestHeaderSent && pindex == state->pindexBestHeaderSent->GetAncestor(pindex->nHeight))
return true;
return false;
}
/** Find the last common ancestor two blocks have.
* Both pa and pb must be non-NULL. */
CBlockIndex* LastCommonAncestor(CBlockIndex* pa, CBlockIndex* pb) {
if (pa->nHeight > pb->nHeight) {
pa = pa->GetAncestor(pb->nHeight);
} else if (pb->nHeight > pa->nHeight) {
pb = pb->GetAncestor(pa->nHeight);
}
while (pa != pb && pa && pb) {
pa = pa->pprev;
pb = pb->pprev;
}
// Eventually all chain branches meet at the genesis block.
assert(pa == pb);
return pa;
}
/** Update pindexLastCommonBlock and add not-in-flight missing successors to vBlocks, until it has
* at most count entries. */
Backport "assumed valid blocks" feature from Bitcoin 0.13 (#1582) * IBD check uses minimumchain work instead of checkpoints. This introduces a 'minimum chain work' chainparam which is intended to be the known amount of work in the chain for the network at the time of software release. If you don't have this much work, you're not yet caught up. This is used instead of the count of blocks test from checkpoints. This criteria is trivial to keep updated as there is no element of subjectivity, trust, or position dependence to it. It is also a more reliable metric of sync status than a block count. * Remove GetTotalBlocksEstimate and checkpoint tests that test nothing. GetTotalBlocksEstimate is no longer used and it was the only thing the checkpoint tests were testing. Since checkpoints are on their way out it makes more sense to remove the test file than to cook up a new pointless test. # Conflicts: # src/Makefile.test.include # src/test/Checkpoints_tests.cpp * IsInitialBlockDownload no longer uses header-only timestamps. This avoids a corner case (mostly visible on testnet) where bogus headers can keep nodes in IsInitialBlockDownload. * Delay parallel block download until chain has sufficient work nMinimumChainWork is an anti-DoS threshold; wait until we have a proposed tip with more work than that before downloading blocks towards that tip. * Add timeout for headers sync At startup, we choose one peer to serve us the headers chain, until our best header is close to caught up. Disconnect this peer if more than 15 minutes + 1ms/expected_header passes and our best header is still more than 1 day away from current time. * Introduce assumevalid setting to skip presumed valid scripts. This disentangles the script validation skipping from checkpoints. A new option is introduced "assumevalid" which specifies a block whos ancestors we assume all have valid scriptsigs and so we do not check them when they are also burried under the best header by two weeks worth of work. Unlike checkpoints this has no influence on consensus unless you set it to a block with an invalid history. Because of this it can be easily be updated without risk of influencing the network consensus. This results in a massive IBD speedup. This approach was independently recommended by Peter Todd and Luke-Jr since POW based signature skipping (see PR#9180) does not have the verifiable properties of a specific hash and may create bad incentives. The downside is that, like checkpoints, the defaults bitrot and older releases will sync slower. On the plus side users can provide their own value here, and if they set it to something crazy all that will happen is more time will be spend validating signatures. Checkblocks and checklevel are also moved to the hidden debug options: Especially now that checkblocks has a low default there is little need to change these settings, and users frequently misunderstand them as influencing security or IBD speed. By hiding them we offset the space added by this new option. * Add consensusParams to FindNextBlocksToDownload * Adjust check in headers timeout logic to align with 144 blocks in Dash
2017-08-23 16:21:08 +02:00
void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector<CBlockIndex*>& vBlocks, NodeId& nodeStaller, const Consensus::Params& consensusParams) {
if (count == 0)
return;
vBlocks.reserve(vBlocks.size() + count);
CNodeState *state = State(nodeid);
assert(state != NULL);
// Make sure pindexBestKnownBlock is up to date, we'll need it.
ProcessBlockAvailability(nodeid);
Backport "assumed valid blocks" feature from Bitcoin 0.13 (#1582) * IBD check uses minimumchain work instead of checkpoints. This introduces a 'minimum chain work' chainparam which is intended to be the known amount of work in the chain for the network at the time of software release. If you don't have this much work, you're not yet caught up. This is used instead of the count of blocks test from checkpoints. This criteria is trivial to keep updated as there is no element of subjectivity, trust, or position dependence to it. It is also a more reliable metric of sync status than a block count. * Remove GetTotalBlocksEstimate and checkpoint tests that test nothing. GetTotalBlocksEstimate is no longer used and it was the only thing the checkpoint tests were testing. Since checkpoints are on their way out it makes more sense to remove the test file than to cook up a new pointless test. # Conflicts: # src/Makefile.test.include # src/test/Checkpoints_tests.cpp * IsInitialBlockDownload no longer uses header-only timestamps. This avoids a corner case (mostly visible on testnet) where bogus headers can keep nodes in IsInitialBlockDownload. * Delay parallel block download until chain has sufficient work nMinimumChainWork is an anti-DoS threshold; wait until we have a proposed tip with more work than that before downloading blocks towards that tip. * Add timeout for headers sync At startup, we choose one peer to serve us the headers chain, until our best header is close to caught up. Disconnect this peer if more than 15 minutes + 1ms/expected_header passes and our best header is still more than 1 day away from current time. * Introduce assumevalid setting to skip presumed valid scripts. This disentangles the script validation skipping from checkpoints. A new option is introduced "assumevalid" which specifies a block whos ancestors we assume all have valid scriptsigs and so we do not check them when they are also burried under the best header by two weeks worth of work. Unlike checkpoints this has no influence on consensus unless you set it to a block with an invalid history. Because of this it can be easily be updated without risk of influencing the network consensus. This results in a massive IBD speedup. This approach was independently recommended by Peter Todd and Luke-Jr since POW based signature skipping (see PR#9180) does not have the verifiable properties of a specific hash and may create bad incentives. The downside is that, like checkpoints, the defaults bitrot and older releases will sync slower. On the plus side users can provide their own value here, and if they set it to something crazy all that will happen is more time will be spend validating signatures. Checkblocks and checklevel are also moved to the hidden debug options: Especially now that checkblocks has a low default there is little need to change these settings, and users frequently misunderstand them as influencing security or IBD speed. By hiding them we offset the space added by this new option. * Add consensusParams to FindNextBlocksToDownload * Adjust check in headers timeout logic to align with 144 blocks in Dash
2017-08-23 16:21:08 +02:00
if (state->pindexBestKnownBlock == NULL || state->pindexBestKnownBlock->nChainWork < chainActive.Tip()->nChainWork || state->pindexBestKnownBlock->nChainWork < UintToArith256(consensusParams.nMinimumChainWork)) {
// This peer has nothing interesting.
return;
}
if (state->pindexLastCommonBlock == NULL) {
// Bootstrap quickly by guessing a parent of our best tip is the forking point.
// Guessing wrong in either direction is not a problem.
state->pindexLastCommonBlock = chainActive[std::min(state->pindexBestKnownBlock->nHeight, chainActive.Height())];
}
// If the peer reorganized, our previous pindexLastCommonBlock may not be an ancestor
// of its current tip anymore. Go back enough to fix that.
state->pindexLastCommonBlock = LastCommonAncestor(state->pindexLastCommonBlock, state->pindexBestKnownBlock);
if (state->pindexLastCommonBlock == state->pindexBestKnownBlock)
return;
std::vector<CBlockIndex*> vToFetch;
CBlockIndex *pindexWalk = state->pindexLastCommonBlock;
// Never fetch further than the best block we know the peer has, or more than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last
// linked block we have in common with this peer. The +1 is so we can detect stalling, namely if we would be able to
// download that next block if the window were 1 larger.
int nWindowEnd = state->pindexLastCommonBlock->nHeight + BLOCK_DOWNLOAD_WINDOW;
int nMaxHeight = std::min<int>(state->pindexBestKnownBlock->nHeight, nWindowEnd + 1);
NodeId waitingfor = -1;
while (pindexWalk->nHeight < nMaxHeight) {
// Read up to 128 (or more, if more blocks than that are needed) successors of pindexWalk (towards
// pindexBestKnownBlock) into vToFetch. We fetch 128, because CBlockIndex::GetAncestor may be as expensive
// as iterating over ~100 CBlockIndex* entries anyway.
int nToFetch = std::min(nMaxHeight - pindexWalk->nHeight, std::max<int>(count - vBlocks.size(), 128));
vToFetch.resize(nToFetch);
pindexWalk = state->pindexBestKnownBlock->GetAncestor(pindexWalk->nHeight + nToFetch);
vToFetch[nToFetch - 1] = pindexWalk;
for (unsigned int i = nToFetch - 1; i > 0; i--) {
vToFetch[i - 1] = vToFetch[i]->pprev;
}
// Iterate over those blocks in vToFetch (in forward direction), adding the ones that
// are not yet downloaded and not in flight to vBlocks. In the mean time, update
// pindexLastCommonBlock as long as all ancestors are already downloaded, or if it's
// already part of our chain (and therefore don't need it even if pruned).
BOOST_FOREACH(CBlockIndex* pindex, vToFetch) {
if (!pindex->IsValid(BLOCK_VALID_TREE)) {
// We consider the chain that this peer is on invalid.
return;
}
if (pindex->nStatus & BLOCK_HAVE_DATA || chainActive.Contains(pindex)) {
if (pindex->nChainTx)
state->pindexLastCommonBlock = pindex;
} else if (mapBlocksInFlight.count(pindex->GetBlockHash()) == 0) {
// The block is not already downloaded, and not yet in flight.
if (pindex->nHeight > nWindowEnd) {
// We reached the end of the window.
if (vBlocks.size() == 0 && waitingfor != nodeid) {
// We aren't able to fetch anything, but we would be if the download window was one larger.
nodeStaller = waitingfor;
}
return;
}
vBlocks.push_back(pindex);
if (vBlocks.size() == count) {
return;
}
} else if (waitingfor == -1) {
// This is the first already-in-flight block.
waitingfor = mapBlocksInFlight[pindex->GetBlockHash()].first;
}
}
}
}
} // anon namespace
bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats) {
LOCK(cs_main);
CNodeState *state = State(nodeid);
if (state == NULL)
return false;
stats.nMisbehavior = state->nMisbehavior;
stats.nSyncHeight = state->pindexBestKnownBlock ? state->pindexBestKnownBlock->nHeight : -1;
stats.nCommonHeight = state->pindexLastCommonBlock ? state->pindexLastCommonBlock->nHeight : -1;
BOOST_FOREACH(const QueuedBlock& queue, state->vBlocksInFlight) {
if (queue.pindex)
stats.vHeightInFlight.push_back(queue.pindex->nHeight);
}
return true;
}
void RegisterNodeSignals(CNodeSignals& nodeSignals)
{
nodeSignals.ProcessMessages.connect(&ProcessMessages);
nodeSignals.SendMessages.connect(&SendMessages);
nodeSignals.InitializeNode.connect(&InitializeNode);
nodeSignals.FinalizeNode.connect(&FinalizeNode);
}
void UnregisterNodeSignals(CNodeSignals& nodeSignals)
{
nodeSignals.ProcessMessages.disconnect(&ProcessMessages);
nodeSignals.SendMessages.disconnect(&SendMessages);
nodeSignals.InitializeNode.disconnect(&InitializeNode);
nodeSignals.FinalizeNode.disconnect(&FinalizeNode);
}
//////////////////////////////////////////////////////////////////////////////
//
// mapOrphanTransactions
//
bool AddOrphanTx(const CTransaction& tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
uint256 hash = tx.GetHash();
if (mapOrphanTransactions.count(hash))
return false;
// Ignore big transactions, to avoid a
// send-big-orphans memory exhaustion attack. If a peer has a legitimate
// large transaction with a missing parent then we assume
// it will rebroadcast it later, after the parent transaction(s)
// have been mined or received.
// 10,000 orphans, each of which is at most 5,000 bytes big is
// at most 500 megabytes of orphans:
unsigned int sz = tx.GetSerializeSize(SER_NETWORK, CTransaction::CURRENT_VERSION);
if (sz > 5000)
{
LogPrint("mempool", "ignoring large orphan tx (size: %u, hash: %s)\n", sz, hash.ToString());
return false;
}
mapOrphanTransactions[hash].tx = tx;
mapOrphanTransactions[hash].fromPeer = peer;
BOOST_FOREACH(const CTxIn& txin, tx.vin)
mapOrphanTransactionsByPrev[txin.prevout.hash].insert(hash);
LogPrint("mempool", "stored orphan tx %s (mapsz %u prevsz %u)\n", hash.ToString(),
mapOrphanTransactions.size(), mapOrphanTransactionsByPrev.size());
return true;
}
void static EraseOrphanTx(uint256 hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
map<uint256, COrphanTx>::iterator it = mapOrphanTransactions.find(hash);
if (it == mapOrphanTransactions.end())
return;
BOOST_FOREACH(const CTxIn& txin, it->second.tx.vin)
{
map<uint256, set<uint256> >::iterator itPrev = mapOrphanTransactionsByPrev.find(txin.prevout.hash);
if (itPrev == mapOrphanTransactionsByPrev.end())
continue;
itPrev->second.erase(hash);
if (itPrev->second.empty())
mapOrphanTransactionsByPrev.erase(itPrev);
}
mapOrphanTransactions.erase(it);
}
void EraseOrphansFor(NodeId peer)
{
int nErased = 0;
map<uint256, COrphanTx>::iterator iter = mapOrphanTransactions.begin();
while (iter != mapOrphanTransactions.end())
{
map<uint256, COrphanTx>::iterator maybeErase = iter++; // increment to avoid iterator becoming invalid
if (maybeErase->second.fromPeer == peer)
{
EraseOrphanTx(maybeErase->second.tx.GetHash());
++nErased;
}
}
if (nErased > 0) LogPrint("mempool", "Erased %d orphan tx from peer %d\n", nErased, peer);
}
unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
unsigned int nEvicted = 0;
while (mapOrphanTransactions.size() > nMaxOrphans)
{
// Evict a random orphan:
uint256 randomhash = GetRandHash();
map<uint256, COrphanTx>::iterator it = mapOrphanTransactions.lower_bound(randomhash);
if (it == mapOrphanTransactions.end())
it = mapOrphanTransactions.begin();
EraseOrphanTx(it->first);
++nEvicted;
}
return nEvicted;
}
// Requires cs_main.
void Misbehaving(NodeId pnode, int howmuch)
{
if (howmuch == 0)
return;
CNodeState *state = State(pnode);
if (state == NULL)
return;
state->nMisbehavior += howmuch;
int banscore = GetArg("-banscore", DEFAULT_BANSCORE_THRESHOLD);
if (state->nMisbehavior >= banscore && state->nMisbehavior - howmuch < banscore)
{
LogPrintf("%s: %s (%d -> %d) BAN THRESHOLD EXCEEDED\n", __func__, state->name, state->nMisbehavior-howmuch, state->nMisbehavior);
state->fShouldBan = true;
} else
LogPrintf("%s: %s (%d -> %d)\n", __func__, state->name, state->nMisbehavior-howmuch, state->nMisbehavior);
}
//////////////////////////////////////////////////////////////////////////////
//
// blockchain -> download logic notification
//
PeerLogicValidation::PeerLogicValidation(CConnman* connmanIn) : connman(connmanIn) {
// Initialize global variables that cannot be constructed at startup.
recentRejects.reset(new CRollingBloomFilter(120000, 0.000001));
}
void PeerLogicValidation::UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) {
const int nNewHeight = pindexNew->nHeight;
connman->SetBestHeight(nNewHeight);
if (!fInitialDownload) {
// Find the hashes of all blocks that weren't previously in the best chain.
std::vector<uint256> vHashes;
const CBlockIndex *pindexToAnnounce = pindexNew;
while (pindexToAnnounce != pindexFork) {
vHashes.push_back(pindexToAnnounce->GetBlockHash());
pindexToAnnounce = pindexToAnnounce->pprev;
if (vHashes.size() == MAX_BLOCKS_TO_ANNOUNCE) {
// Limit announcements in case of a huge reorganization.
// Rely on the peer's synchronization mechanism in that case.
break;
}
}
// Relay inventory, but don't relay old inventory during initial block download.
connman->ForEachNode([nNewHeight, &vHashes](CNode* pnode) {
if (nNewHeight > (pnode->nStartingHeight != -1 ? pnode->nStartingHeight - 2000 : 0)) {
BOOST_REVERSE_FOREACH(const uint256& hash, vHashes) {
pnode->PushBlockHash(hash);
}
}
});
}
nTimeBestReceived = GetTime();
}
void PeerLogicValidation::BlockChecked(const CBlock& block, const CValidationState& state) {
LOCK(cs_main);
const uint256 hash(block.GetHash());
std::map<uint256, NodeId>::iterator it = mapBlockSource.find(hash);
int nDoS = 0;
if (state.IsInvalid(nDoS)) {
if (it != mapBlockSource.end() && State(it->second)) {
assert (state.GetRejectCode() < REJECT_INTERNAL); // Blocks are never rejected with internal reject codes
CBlockReject reject = {(unsigned char)state.GetRejectCode(), state.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH), hash};
State(it->second)->rejects.push_back(reject);
if (nDoS > 0)
Misbehaving(it->second, nDoS);
}
}
if (it != mapBlockSource.end())
mapBlockSource.erase(it);
}
//////////////////////////////////////////////////////////////////////////////
//
// Messages
//
bool static AlreadyHave(const CInv& inv) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
switch (inv.type)
{
case MSG_TX:
{
assert(recentRejects);
if (chainActive.Tip()->GetBlockHash() != hashRecentRejectsChainTip)
{
// If the chain tip has changed previously rejected transactions
// might be now valid, e.g. due to a nLockTime'd tx becoming valid,
// or a double-spend. Reset the rejects filter and give those
// txs a second chance.
hashRecentRejectsChainTip = chainActive.Tip()->GetBlockHash();
recentRejects->reset();
}
return recentRejects->contains(inv.hash) ||
mempool.exists(inv.hash) ||
mapOrphanTransactions.count(inv.hash) ||
Merge #10195: Switch chainstate db and cache to per-txout model 589827975 scripted-diff: various renames for per-utxo consistency (Pieter Wuille) a5e02bc7f Increase travis unit test timeout (Pieter Wuille) 73de2c1ff Rename CCoinsCacheEntry::coins to coin (Pieter Wuille) 119e552f7 Merge CCoinsViewCache's GetOutputFor and AccessCoin (Pieter Wuille) 580b02309 [MOVEONLY] Move old CCoins class to txdb.cpp (Pieter Wuille) 8b25d2c0c Upgrade from per-tx database to per-txout (Pieter Wuille) b2af357f3 Reduce reserved memory space for flushing (Pieter Wuille) 41aa5b79a Pack Coin more tightly (Pieter Wuille) 97072d668 Remove unused CCoins methods (Pieter Wuille) ce23efaa5 Extend coins_tests (Pieter Wuille) 508307968 Switch CCoinsView and chainstate db from per-txid to per-txout (Pieter Wuille) 4ec0d9e79 Refactor GetUTXOStats in preparation for per-COutPoint iteration (Pieter Wuille) 13870b56f Replace CCoins-based CTxMemPool::pruneSpent with isSpent (Pieter Wuille) 05293f3cb Remove ModifyCoins/ModifyNewCoins (Pieter Wuille) 961e48397 Switch tests from ModifyCoins to AddCoin/SpendCoin (Pieter Wuille) 8b3868c1b Switch CScriptCheck to use Coin instead of CCoins (Pieter Wuille) c87b957a3 Only pass things committed to by tx's witness hash to CScriptCheck (Matt Corallo) f68cdfe92 Switch from per-tx to per-txout CCoinsViewCache methods in some places (Pieter Wuille) 000391132 Introduce new per-txout CCoinsViewCache functions (Pieter Wuille) bd83111a0 Optimization: Coin&& to ApplyTxInUndo (Pieter Wuille) cb2c7fdac Replace CTxInUndo with Coin (Pieter Wuille) 422634e2f Introduce Coin, a single unspent output (Pieter Wuille) 7d991b55d Store/allow tx metadata in all undo records (Pieter Wuille) c3aa0c119 Report on-disk size in gettxoutsetinfo (Pieter Wuille) d34242430 Remove/ignore tx version in utxo and undo (Pieter Wuille) 7e0032290 Add specialization of SipHash for 256 + 32 bit data (Pieter Wuille) e484652fc Introduce CHashVerifier to hash read data (Pieter Wuille) f54580e7e error() in disconnect for disk corruption, not inconsistency (Pieter Wuille) e66dbde6d Add SizeEstimate to CDBBatch (Pieter Wuille) Tree-SHA512: ce1fb1e40c77d38915cd02189fab7a8b125c7f44d425c85579d872c3bede3a437760997907c99d7b3017ced1c2de54b2ac7223d99d83a6658fe5ef61edef1de3
2017-06-02 00:47:58 +02:00
pcoinsTip->HaveCoinInCache(COutPoint(inv.hash, 0)) || // Best effort: only try output 0 and 1
pcoinsTip->HaveCoinInCache(COutPoint(inv.hash, 1));
}
case MSG_BLOCK:
return mapBlockIndex.count(inv.hash);
/*
Dash Related Inventory Messages
--
We shouldn't update the sync times for each of the messages when we already have it.
We're going to be asking many nodes upfront for the full inventory list, so we'll get duplicates of these.
We want to only update the time on new hits, so that we can time out appropriately if needed.
*/
case MSG_TXLOCK_REQUEST:
return instantsend.AlreadyHave(inv.hash);
case MSG_TXLOCK_VOTE:
return instantsend.AlreadyHave(inv.hash);
case MSG_SPORK:
return mapSporks.count(inv.hash);
case MSG_MASTERNODE_PAYMENT_VOTE:
return mnpayments.mapMasternodePaymentVotes.count(inv.hash);
case MSG_MASTERNODE_PAYMENT_BLOCK:
{
BlockMap::iterator mi = mapBlockIndex.find(inv.hash);
return mi != mapBlockIndex.end() && mnpayments.mapMasternodeBlocks.find(mi->second->nHeight) != mnpayments.mapMasternodeBlocks.end();
}
case MSG_MASTERNODE_ANNOUNCE:
return mnodeman.mapSeenMasternodeBroadcast.count(inv.hash) && !mnodeman.IsMnbRecoveryRequested(inv.hash);
case MSG_MASTERNODE_PING:
return mnodeman.mapSeenMasternodePing.count(inv.hash);
case MSG_DSTX: {
return static_cast<bool>(CPrivateSend::GetDSTX(inv.hash));
}
case MSG_GOVERNANCE_OBJECT:
case MSG_GOVERNANCE_OBJECT_VOTE:
return ! governance.ConfirmInventoryRequest(inv);
case MSG_MASTERNODE_VERIFY:
return mnodeman.mapSeenMasternodeVerification.count(inv.hash);
}
// Don't know what it is, just say we already got one
return true;
}
static void RelayAddress(const CAddress& addr, bool fReachable, CConnman& connman)
{
int nRelayNodes = fReachable ? 2 : 1; // limited relaying of addresses outside our network(s)
// Relay to a limited number of other nodes
// Use deterministic randomness to send to the same nodes for 24 hours
// at a time so the addrKnowns of the chosen nodes prevent repeats
static uint256 hashSalt;
if (hashSalt.IsNull())
hashSalt = GetRandHash();
uint64_t hashAddr = addr.GetHash();
uint256 hashRand = ArithToUint256(UintToArith256(hashSalt) ^ (hashAddr<<32) ^ ((GetTime()+hashAddr)/(24*60*60)));
hashRand = Hash(BEGIN(hashRand), END(hashRand));
std::multimap<uint256, CNode*> mapMix;
auto sortfunc = [&mapMix, &hashRand](CNode* pnode) {
if (pnode->nVersion >= CADDR_TIME_VERSION) {
unsigned int nPointer;
memcpy(&nPointer, &pnode, sizeof(nPointer));
uint256 hashKey = ArithToUint256(UintToArith256(hashRand) ^ nPointer);
hashKey = Hash(BEGIN(hashKey), END(hashKey));
mapMix.emplace(hashKey, pnode);
}
};
auto pushfunc = [&addr, &mapMix, &nRelayNodes] {
for (auto mi = mapMix.begin(); mi != mapMix.end() && nRelayNodes-- > 0; ++mi)
mi->second->PushAddress(addr);
};
connman.ForEachNodeThen(std::move(sortfunc), std::move(pushfunc));
}
void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParams, CConnman& connman, std::atomic<bool>& interruptMsgProc)
{
std::deque<CInv>::iterator it = pfrom->vRecvGetData.begin();
vector<CInv> vNotFound;
LOCK(cs_main);
while (it != pfrom->vRecvGetData.end()) {
// Don't bother if send buffer is too full to respond anyway
Backport Bitcoin PR#9441: Net: Massive speedup. Net locks overhaul (#1586) * net: fix typo causing the wrong receive buffer size Surprisingly this hasn't been causing me any issues while testing, probably because it requires lots of large blocks to be flying around. Send/Recv corks need tests! * net: make vRecvMsg a list so that we can use splice() * net: make GetReceiveFloodSize public This will be needed so that the message processor can cork incoming messages * net: only disconnect if fDisconnect has been set These conditions are problematic to check without locking, and we shouldn't be relying on the refcount to disconnect. * net: wait until the node is destroyed to delete its recv buffer when vRecvMsg becomes a private buffer, it won't make sense to allow other threads to mess with it anymore. * net: set message deserialization version when it's actually time to deserialize We'll soon no longer have access to vRecvMsg, and this is more intuitive anyway. * net: handle message accounting in ReceiveMsgBytes This allows locking to be pushed down to only where it's needed Also reuse the current time rather than checking multiple times. * net: record bytes written before notifying the message processor * net: Add a simple function for waking the message handler This may be used publicly in the future * net: remove useless comments * net: remove redundant max sendbuffer size check This is left-over from before there was proper accounting. Hitting 2x the sendbuffer size should not be possible. * net: rework the way that the messagehandler sleeps In order to sleep accurately, the message handler needs to know if _any_ node has more processing that it should do before the entire thread sleeps. Rather than returning a value that represents whether ProcessMessages encountered a message that should trigger a disconnnect, interpret the return value as whether or not that node has more work to do. Also, use a global fProcessWake value that can be set by other threads, which takes precedence (for one cycle) over the messagehandler's decision. Note that the previous behavior was to only process one message per loop (except in the case of a bad checksum or invalid header). That was changed in PR #3180. The only change here in that regard is that the current node now falls to the back of the processing queue for the bad checksum/invalid header cases. * net: add a new message queue for the message processor This separates the storage of messages from the net and queued messages for processing, allowing the locks to be split. * net: add a flag to indicate when a node's process queue is full Messages are dumped very quickly from the socket handler to the processor, so it's the depth of the processing queue that's interesting. The socket handler checks the process queue's size during the brief message hand-off and pauses if necessary, and the processor possibly unpauses each time a message is popped off of its queue. * net: add a flag to indicate when a node's send buffer is full Similar to the recv flag, but this one indicates whether or not the net's send buffer is full. The socket handler checks the send queue when a new message is added and pauses if necessary, and possibly unpauses after each message is drained from its buffer. * net: remove cs_vRecvMsg vRecvMsg is now only touched by the socket handler thread. The accounting vars (nRecvBytes/nLastRecv/mapRecvBytesPerMsgCmd) are also only used by the socket handler thread, with the exception of queries from rpc/gui. These accesses are not threadsafe, but they never were. This needs to be addressed separately. Also, update comment describing data flow
2017-08-23 16:20:43 +02:00
if (pfrom->fPauseSend)
break;
const CInv &inv = *it;
LogPrint("net", "ProcessGetData -- inv = %s\n", inv.ToString());
{
if (interruptMsgProc)
return;
it++;
if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK)
{
bool send = false;
BlockMap::iterator mi = mapBlockIndex.find(inv.hash);
if (mi != mapBlockIndex.end())
{
if (chainActive.Contains(mi->second)) {
send = true;
} else {
static const int nOneMonth = 30 * 24 * 60 * 60;
// To prevent fingerprinting attacks, only send blocks outside of the active
// chain if they are valid, and no more than a month older (both in time, and in
// best equivalent proof of work) than the best header chain we know about.
send = mi->second->IsValid(BLOCK_VALID_SCRIPTS) && (pindexBestHeader != NULL) &&
(pindexBestHeader->GetBlockTime() - mi->second->GetBlockTime() < nOneMonth) &&
(GetBlockProofEquivalentTime(*pindexBestHeader, *mi->second, *pindexBestHeader, consensusParams) < nOneMonth);
if (!send) {
LogPrintf("%s: ignoring request from peer=%i for old block that isn't in the main chain\n", __func__, pfrom->GetId());
}
}
}
// disconnect node in case we have reached the outbound limit for serving historical blocks
// never disconnect whitelisted nodes
static const int nOneWeek = 7 * 24 * 60 * 60; // assume > 1 week = historical
if (send && connman.OutboundTargetReached(true) && ( ((pindexBestHeader != NULL) && (pindexBestHeader->GetBlockTime() - mi->second->GetBlockTime() > nOneWeek)) || inv.type == MSG_FILTERED_BLOCK) && !pfrom->fWhitelisted)
{
LogPrint("net", "historical block serving limit reached, disconnect peer=%d\n", pfrom->GetId());
//disconnect node
pfrom->fDisconnect = true;
send = false;
}
// Pruned nodes may have deleted the block, so check whether
// it's available before trying to send.
if (send && (mi->second->nStatus & BLOCK_HAVE_DATA)) {
// Send block from disk
CBlock block;
if (!ReadBlockFromDisk(block, (*mi).second, consensusParams))
assert(!"cannot load block from disk");
if (inv.type == MSG_BLOCK)
connman.PushMessage(pfrom, NetMsgType::BLOCK, block);
else // MSG_FILTERED_BLOCK)
{
LOCK(pfrom->cs_filter);
if (pfrom->pfilter)
{
CMerkleBlock merkleBlock(block, *pfrom->pfilter);
connman.PushMessage(pfrom, NetMsgType::MERKLEBLOCK, merkleBlock);
// CMerkleBlock just contains hashes, so also push any transactions in the block the client did not see
// This avoids hurting performance by pointlessly requiring a round-trip
// Note that there is currently no way for a node to request any single transactions we didn't send here -
// they must either disconnect and retry or request the full block.
// Thus, the protocol spec specified allows for us to provide duplicate txn here,
// however we MUST always provide at least what the remote peer needs
typedef std::pair<unsigned int, uint256> PairType;
BOOST_FOREACH(PairType& pair, merkleBlock.vMatchedTxn)
connman.PushMessage(pfrom, NetMsgType::TX, block.vtx[pair.first]);
}
// else
// no response
}
// Trigger the peer node to send a getblocks request for the next batch of inventory
if (inv.hash == pfrom->hashContinue)
{
// Bypass PushInventory, this must send even if redundant,
// and we want it right after the last block so they don't
// wait for other stuff first.
vector<CInv> vInv;
vInv.push_back(CInv(MSG_BLOCK, chainActive.Tip()->GetBlockHash()));
connman.PushMessage(pfrom, NetMsgType::INV, vInv);
pfrom->hashContinue.SetNull();
}
}
}
else if (inv.IsKnownType())
{
// Send stream from relay memory
bool pushed = false;
{
CDataStream ss(SER_NETWORK, PROTOCOL_VERSION);
{
LOCK(cs_mapRelay);
map<CInv, CDataStream>::iterator mi = mapRelay.find(inv);
if (mi != mapRelay.end()) {
ss += (*mi).second;
pushed = true;
}
}
if(pushed)
connman.PushMessage(pfrom, inv.GetCommand(), ss);
}
if (!pushed && inv.type == MSG_TX) {
CTransaction tx;
if (mempool.lookup(inv.hash, tx)) {
CDataStream ss(SER_NETWORK, PROTOCOL_VERSION);
ss.reserve(1000);
ss << tx;
connman.PushMessage(pfrom, NetMsgType::TX, ss);
pushed = true;
}
}
if (!pushed && inv.type == MSG_TXLOCK_REQUEST) {
CTxLockRequest txLockRequest;
if(instantsend.GetTxLockRequest(inv.hash, txLockRequest)) {
CDataStream ss(SER_NETWORK, PROTOCOL_VERSION);
ss.reserve(1000);
ss << txLockRequest;
connman.PushMessage(pfrom, NetMsgType::TXLOCKREQUEST, ss);
pushed = true;
}
}
if (!pushed && inv.type == MSG_TXLOCK_VOTE) {
CTxLockVote vote;
if(instantsend.GetTxLockVote(inv.hash, vote)) {
CDataStream ss(SER_NETWORK, PROTOCOL_VERSION);
ss.reserve(1000);
ss << vote;
connman.PushMessage(pfrom, NetMsgType::TXLOCKVOTE, ss);
pushed = true;
}
}
if (!pushed && inv.type == MSG_SPORK) {
if(mapSporks.count(inv.hash)) {
CDataStream ss(SER_NETWORK, PROTOCOL_VERSION);
ss.reserve(1000);
ss << mapSporks[inv.hash];
connman.PushMessage(pfrom, NetMsgType::SPORK, ss);
pushed = true;
}
}
if (!pushed && inv.type == MSG_MASTERNODE_PAYMENT_VOTE) {
if(mnpayments.HasVerifiedPaymentVote(inv.hash)) {
CDataStream ss(SER_NETWORK, PROTOCOL_VERSION);
ss.reserve(1000);
ss << mnpayments.mapMasternodePaymentVotes[inv.hash];
connman.PushMessage(pfrom, NetMsgType::MASTERNODEPAYMENTVOTE, ss);
pushed = true;
}
}
if (!pushed && inv.type == MSG_MASTERNODE_PAYMENT_BLOCK) {
BlockMap::iterator mi = mapBlockIndex.find(inv.hash);
LOCK(cs_mapMasternodeBlocks);
if (mi != mapBlockIndex.end() && mnpayments.mapMasternodeBlocks.count(mi->second->nHeight)) {
BOOST_FOREACH(CMasternodePayee& payee, mnpayments.mapMasternodeBlocks[mi->second->nHeight].vecPayees) {
std::vector<uint256> vecVoteHashes = payee.GetVoteHashes();
BOOST_FOREACH(uint256& hash, vecVoteHashes) {
if(mnpayments.HasVerifiedPaymentVote(hash)) {
CDataStream ss(SER_NETWORK, PROTOCOL_VERSION);
ss.reserve(1000);
ss << mnpayments.mapMasternodePaymentVotes[hash];
connman.PushMessage(pfrom, NetMsgType::MASTERNODEPAYMENTVOTE, ss);
}
}
}
pushed = true;
}
}
if (!pushed && inv.type == MSG_MASTERNODE_ANNOUNCE) {
if(mnodeman.mapSeenMasternodeBroadcast.count(inv.hash)){
CDataStream ss(SER_NETWORK, PROTOCOL_VERSION);
ss.reserve(1000);
ss << mnodeman.mapSeenMasternodeBroadcast[inv.hash].second;
connman.PushMessage(pfrom, NetMsgType::MNANNOUNCE, ss);
pushed = true;
}
}
if (!pushed && inv.type == MSG_MASTERNODE_PING) {
if(mnodeman.mapSeenMasternodePing.count(inv.hash)) {
CDataStream ss(SER_NETWORK, PROTOCOL_VERSION);
ss.reserve(1000);
ss << mnodeman.mapSeenMasternodePing[inv.hash];
connman.PushMessage(pfrom, NetMsgType::MNPING, ss);
pushed = true;
}
}
if (!pushed && inv.type == MSG_DSTX) {
CDarksendBroadcastTx dstx = CPrivateSend::GetDSTX(inv.hash);
if(dstx) {
CDataStream ss(SER_NETWORK, PROTOCOL_VERSION);
ss.reserve(1000);
ss << dstx;
connman.PushMessage(pfrom, NetMsgType::DSTX, ss);
pushed = true;
}
}
if (!pushed && inv.type == MSG_GOVERNANCE_OBJECT) {
LogPrint("net", "ProcessGetData -- MSG_GOVERNANCE_OBJECT: inv = %s\n", inv.ToString());
CDataStream ss(SER_NETWORK, PROTOCOL_VERSION);
bool topush = false;
{
if(governance.HaveObjectForHash(inv.hash)) {
ss.reserve(1000);
if(governance.SerializeObjectForHash(inv.hash, ss)) {
topush = true;
}
}
}
LogPrint("net", "ProcessGetData -- MSG_GOVERNANCE_OBJECT: topush = %d, inv = %s\n", topush, inv.ToString());
if(topush) {
connman.PushMessage(pfrom, NetMsgType::MNGOVERNANCEOBJECT, ss);
pushed = true;
}
}
if (!pushed && inv.type == MSG_GOVERNANCE_OBJECT_VOTE) {
CDataStream ss(SER_NETWORK, PROTOCOL_VERSION);
bool topush = false;
{
if(governance.HaveVoteForHash(inv.hash)) {
ss.reserve(1000);
if(governance.SerializeVoteForHash(inv.hash, ss)) {
topush = true;
}
}
}
if(topush) {
LogPrint("net", "ProcessGetData -- pushing: inv = %s\n", inv.ToString());
connman.PushMessage(pfrom, NetMsgType::MNGOVERNANCEOBJECTVOTE, ss);
pushed = true;
}
}
if (!pushed && inv.type == MSG_MASTERNODE_VERIFY) {
if(mnodeman.mapSeenMasternodeVerification.count(inv.hash)) {
CDataStream ss(SER_NETWORK, PROTOCOL_VERSION);
ss.reserve(1000);
ss << mnodeman.mapSeenMasternodeVerification[inv.hash];
connman.PushMessage(pfrom, NetMsgType::MNVERIFY, ss);
pushed = true;
}
}
if (!pushed)
vNotFound.push_back(inv);
}
// Track requests for our stuff.
GetMainSignals().Inventory(inv.hash);
if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK)
break;
}
}
pfrom->vRecvGetData.erase(pfrom->vRecvGetData.begin(), it);
if (!vNotFound.empty()) {
// Let the peer know that we didn't find what it asked for, so it doesn't
// have to wait around forever. Currently only SPV clients actually care
// about this message: it's needed when they are recursively walking the
// dependencies of relevant unconfirmed transactions. SPV clients want to
// do that because they want to know about (and store and rebroadcast and
// risk analyze) the dependencies of transactions relevant to them, without
// having to download the entire memory pool.
connman.PushMessage(pfrom, NetMsgType::NOTFOUND, vNotFound);
}
}
bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv, int64_t nTimeReceived, CConnman& connman, std::atomic<bool>& interruptMsgProc)
{
const CChainParams& chainparams = Params();
RandAddSeedPerfmon();
LogPrint("net", "received: %s (%u bytes) peer=%d\n", SanitizeString(strCommand), vRecv.size(), pfrom->id);
if (mapArgs.count("-dropmessagestest") && GetRand(atoi(mapArgs["-dropmessagestest"])) == 0)
{
LogPrintf("dropmessagestest DROPPING RECV MESSAGE\n");
return true;
}
if (!(pfrom->GetLocalServices() & NODE_BLOOM) &&
(strCommand == NetMsgType::FILTERLOAD ||
strCommand == NetMsgType::FILTERADD ||
strCommand == NetMsgType::FILTERCLEAR))
{
if (pfrom->nVersion >= NO_BLOOM_VERSION) {
LOCK(cs_main);
Misbehaving(pfrom->GetId(), 100);
return false;
} else if (GetBoolArg("-enforcenodebloom", false)) {
pfrom->fDisconnect = true;
return false;
}
}
if (strCommand == NetMsgType::VERSION)
{
// Feeler connections exist only to verify if address is online.
if (pfrom->fFeeler) {
assert(pfrom->fInbound == false);
pfrom->fDisconnect = true;
}
// Each connection can only send one version message
if (pfrom->nVersion != 0)
{
connman.PushMessageWithVersion(pfrom, INIT_PROTO_VERSION, NetMsgType::REJECT, strCommand, REJECT_DUPLICATE, string("Duplicate version message"));
LOCK(cs_main);
Misbehaving(pfrom->GetId(), 1);
return false;
}
int64_t nTime;
CAddress addrMe;
CAddress addrFrom;
uint64_t nNonce = 1;
uint64_t nServiceInt;
Backport Bitcoin PR#9609: net: fix remaining net assertions (#1575) + Dashify * Dont deserialize nVersion into CNode, should fix #9212 * net: deserialize the entire version message locally This avoids having some vars set if the version negotiation fails. Also copy it all into CNode at the same site. nVersion and fSuccessfullyConnected are set last, as they are the gates for the other vars. Make them atomic for that reason. * net: don't run callbacks on nodes that haven't completed the version handshake Since ForEach* are can be used to send messages to all nodes, the caller may end up sending a message before the version handshake is complete. To limit this, filter out these nodes. While we're at it, may as well filter out disconnected nodes as well. Delete unused methods rather than updating them. * net: Disallow sending messages until the version handshake is complete This is a change in behavior, though it's much more sane now than before. * net: log an error rather than asserting if send version is misused Also cleaned up the comments and moved from the header to the .cpp so that logging headers aren't needed from net.h * Implement conditions for ForEachNode() and ForNode() methods of CConnman. A change making ForEachNode() and ForNode() methods ignore nodes that have not completed initial handshake have been backported from Bitcoin. Unfortunately, some Dash-specific code needs to iterate over all nodes. This change introduces additional condition argument to these methods. This argument is a functional object that should return true for nodes that should be taken into account, not ignored. Two functional objects are provided in CConnman namespace: * FullyConnectedOnly returns true for nodes that have handshake completed, * AllNodes returns true for all nodes. Overloads for ForEachNode() and ForNode() methods without condition argument are left for compatibility with non-Dash-specific code. They use FullyConnectedOnly functional object for condition. Signed-off-by: Oleg Girko <ol@infoserver.lv> * Iterate over all nodes in Dash-specific code using AllNodes condition. Use AllNodes functional object as newly introduced condition argument for ForEachNode() and ForNode() methods of CConnman to iterate over all nodes where needed in Dash-specific code. Signed-off-by: Oleg Girko <ol@infoserver.lv>
2017-08-17 20:37:22 +02:00
ServiceFlags nServices;
int nVersion;
int nSendVersion;
std::string strSubVer;
int nStartingHeight = -1;
bool fRelay = true;
vRecv >> nVersion >> nServiceInt >> nTime >> addrMe;
nSendVersion = std::min(nVersion, PROTOCOL_VERSION);
nServices = ServiceFlags(nServiceInt);
if (!pfrom->fInbound)
{
Backport Bitcoin PR#9609: net: fix remaining net assertions (#1575) + Dashify * Dont deserialize nVersion into CNode, should fix #9212 * net: deserialize the entire version message locally This avoids having some vars set if the version negotiation fails. Also copy it all into CNode at the same site. nVersion and fSuccessfullyConnected are set last, as they are the gates for the other vars. Make them atomic for that reason. * net: don't run callbacks on nodes that haven't completed the version handshake Since ForEach* are can be used to send messages to all nodes, the caller may end up sending a message before the version handshake is complete. To limit this, filter out these nodes. While we're at it, may as well filter out disconnected nodes as well. Delete unused methods rather than updating them. * net: Disallow sending messages until the version handshake is complete This is a change in behavior, though it's much more sane now than before. * net: log an error rather than asserting if send version is misused Also cleaned up the comments and moved from the header to the .cpp so that logging headers aren't needed from net.h * Implement conditions for ForEachNode() and ForNode() methods of CConnman. A change making ForEachNode() and ForNode() methods ignore nodes that have not completed initial handshake have been backported from Bitcoin. Unfortunately, some Dash-specific code needs to iterate over all nodes. This change introduces additional condition argument to these methods. This argument is a functional object that should return true for nodes that should be taken into account, not ignored. Two functional objects are provided in CConnman namespace: * FullyConnectedOnly returns true for nodes that have handshake completed, * AllNodes returns true for all nodes. Overloads for ForEachNode() and ForNode() methods without condition argument are left for compatibility with non-Dash-specific code. They use FullyConnectedOnly functional object for condition. Signed-off-by: Oleg Girko <ol@infoserver.lv> * Iterate over all nodes in Dash-specific code using AllNodes condition. Use AllNodes functional object as newly introduced condition argument for ForEachNode() and ForNode() methods of CConnman to iterate over all nodes where needed in Dash-specific code. Signed-off-by: Oleg Girko <ol@infoserver.lv>
2017-08-17 20:37:22 +02:00
connman.SetServices(pfrom->addr, nServices);
}
Backport Bitcoin PR#9609: net: fix remaining net assertions (#1575) + Dashify * Dont deserialize nVersion into CNode, should fix #9212 * net: deserialize the entire version message locally This avoids having some vars set if the version negotiation fails. Also copy it all into CNode at the same site. nVersion and fSuccessfullyConnected are set last, as they are the gates for the other vars. Make them atomic for that reason. * net: don't run callbacks on nodes that haven't completed the version handshake Since ForEach* are can be used to send messages to all nodes, the caller may end up sending a message before the version handshake is complete. To limit this, filter out these nodes. While we're at it, may as well filter out disconnected nodes as well. Delete unused methods rather than updating them. * net: Disallow sending messages until the version handshake is complete This is a change in behavior, though it's much more sane now than before. * net: log an error rather than asserting if send version is misused Also cleaned up the comments and moved from the header to the .cpp so that logging headers aren't needed from net.h * Implement conditions for ForEachNode() and ForNode() methods of CConnman. A change making ForEachNode() and ForNode() methods ignore nodes that have not completed initial handshake have been backported from Bitcoin. Unfortunately, some Dash-specific code needs to iterate over all nodes. This change introduces additional condition argument to these methods. This argument is a functional object that should return true for nodes that should be taken into account, not ignored. Two functional objects are provided in CConnman namespace: * FullyConnectedOnly returns true for nodes that have handshake completed, * AllNodes returns true for all nodes. Overloads for ForEachNode() and ForNode() methods without condition argument are left for compatibility with non-Dash-specific code. They use FullyConnectedOnly functional object for condition. Signed-off-by: Oleg Girko <ol@infoserver.lv> * Iterate over all nodes in Dash-specific code using AllNodes condition. Use AllNodes functional object as newly introduced condition argument for ForEachNode() and ForNode() methods of CConnman to iterate over all nodes where needed in Dash-specific code. Signed-off-by: Oleg Girko <ol@infoserver.lv>
2017-08-17 20:37:22 +02:00
if (pfrom->nServicesExpected & ~nServices)
{
Backport Bitcoin PR#9609: net: fix remaining net assertions (#1575) + Dashify * Dont deserialize nVersion into CNode, should fix #9212 * net: deserialize the entire version message locally This avoids having some vars set if the version negotiation fails. Also copy it all into CNode at the same site. nVersion and fSuccessfullyConnected are set last, as they are the gates for the other vars. Make them atomic for that reason. * net: don't run callbacks on nodes that haven't completed the version handshake Since ForEach* are can be used to send messages to all nodes, the caller may end up sending a message before the version handshake is complete. To limit this, filter out these nodes. While we're at it, may as well filter out disconnected nodes as well. Delete unused methods rather than updating them. * net: Disallow sending messages until the version handshake is complete This is a change in behavior, though it's much more sane now than before. * net: log an error rather than asserting if send version is misused Also cleaned up the comments and moved from the header to the .cpp so that logging headers aren't needed from net.h * Implement conditions for ForEachNode() and ForNode() methods of CConnman. A change making ForEachNode() and ForNode() methods ignore nodes that have not completed initial handshake have been backported from Bitcoin. Unfortunately, some Dash-specific code needs to iterate over all nodes. This change introduces additional condition argument to these methods. This argument is a functional object that should return true for nodes that should be taken into account, not ignored. Two functional objects are provided in CConnman namespace: * FullyConnectedOnly returns true for nodes that have handshake completed, * AllNodes returns true for all nodes. Overloads for ForEachNode() and ForNode() methods without condition argument are left for compatibility with non-Dash-specific code. They use FullyConnectedOnly functional object for condition. Signed-off-by: Oleg Girko <ol@infoserver.lv> * Iterate over all nodes in Dash-specific code using AllNodes condition. Use AllNodes functional object as newly introduced condition argument for ForEachNode() and ForNode() methods of CConnman to iterate over all nodes where needed in Dash-specific code. Signed-off-by: Oleg Girko <ol@infoserver.lv>
2017-08-17 20:37:22 +02:00
LogPrint("net", "peer=%d does not offer the expected services (%08x offered, %08x expected); disconnecting\n", pfrom->id, nServices, pfrom->nServicesExpected);
connman.PushMessageWithVersion(pfrom, INIT_PROTO_VERSION, NetMsgType::REJECT, strCommand, REJECT_NONSTANDARD,
strprintf("Expected to offer services %08x", pfrom->nServicesExpected));
pfrom->fDisconnect = true;
return false;
}
Backport Bitcoin PR#9609: net: fix remaining net assertions (#1575) + Dashify * Dont deserialize nVersion into CNode, should fix #9212 * net: deserialize the entire version message locally This avoids having some vars set if the version negotiation fails. Also copy it all into CNode at the same site. nVersion and fSuccessfullyConnected are set last, as they are the gates for the other vars. Make them atomic for that reason. * net: don't run callbacks on nodes that haven't completed the version handshake Since ForEach* are can be used to send messages to all nodes, the caller may end up sending a message before the version handshake is complete. To limit this, filter out these nodes. While we're at it, may as well filter out disconnected nodes as well. Delete unused methods rather than updating them. * net: Disallow sending messages until the version handshake is complete This is a change in behavior, though it's much more sane now than before. * net: log an error rather than asserting if send version is misused Also cleaned up the comments and moved from the header to the .cpp so that logging headers aren't needed from net.h * Implement conditions for ForEachNode() and ForNode() methods of CConnman. A change making ForEachNode() and ForNode() methods ignore nodes that have not completed initial handshake have been backported from Bitcoin. Unfortunately, some Dash-specific code needs to iterate over all nodes. This change introduces additional condition argument to these methods. This argument is a functional object that should return true for nodes that should be taken into account, not ignored. Two functional objects are provided in CConnman namespace: * FullyConnectedOnly returns true for nodes that have handshake completed, * AllNodes returns true for all nodes. Overloads for ForEachNode() and ForNode() methods without condition argument are left for compatibility with non-Dash-specific code. They use FullyConnectedOnly functional object for condition. Signed-off-by: Oleg Girko <ol@infoserver.lv> * Iterate over all nodes in Dash-specific code using AllNodes condition. Use AllNodes functional object as newly introduced condition argument for ForEachNode() and ForNode() methods of CConnman to iterate over all nodes where needed in Dash-specific code. Signed-off-by: Oleg Girko <ol@infoserver.lv>
2017-08-17 20:37:22 +02:00
if (nVersion < MIN_PEER_PROTO_VERSION)
{
// disconnect from peers older than this proto version
Backport Bitcoin PR#9609: net: fix remaining net assertions (#1575) + Dashify * Dont deserialize nVersion into CNode, should fix #9212 * net: deserialize the entire version message locally This avoids having some vars set if the version negotiation fails. Also copy it all into CNode at the same site. nVersion and fSuccessfullyConnected are set last, as they are the gates for the other vars. Make them atomic for that reason. * net: don't run callbacks on nodes that haven't completed the version handshake Since ForEach* are can be used to send messages to all nodes, the caller may end up sending a message before the version handshake is complete. To limit this, filter out these nodes. While we're at it, may as well filter out disconnected nodes as well. Delete unused methods rather than updating them. * net: Disallow sending messages until the version handshake is complete This is a change in behavior, though it's much more sane now than before. * net: log an error rather than asserting if send version is misused Also cleaned up the comments and moved from the header to the .cpp so that logging headers aren't needed from net.h * Implement conditions for ForEachNode() and ForNode() methods of CConnman. A change making ForEachNode() and ForNode() methods ignore nodes that have not completed initial handshake have been backported from Bitcoin. Unfortunately, some Dash-specific code needs to iterate over all nodes. This change introduces additional condition argument to these methods. This argument is a functional object that should return true for nodes that should be taken into account, not ignored. Two functional objects are provided in CConnman namespace: * FullyConnectedOnly returns true for nodes that have handshake completed, * AllNodes returns true for all nodes. Overloads for ForEachNode() and ForNode() methods without condition argument are left for compatibility with non-Dash-specific code. They use FullyConnectedOnly functional object for condition. Signed-off-by: Oleg Girko <ol@infoserver.lv> * Iterate over all nodes in Dash-specific code using AllNodes condition. Use AllNodes functional object as newly introduced condition argument for ForEachNode() and ForNode() methods of CConnman to iterate over all nodes where needed in Dash-specific code. Signed-off-by: Oleg Girko <ol@infoserver.lv>
2017-08-17 20:37:22 +02:00
LogPrintf("peer=%d using obsolete version %i; disconnecting\n", pfrom->id, nVersion);
connman.PushMessageWithVersion(pfrom, INIT_PROTO_VERSION, NetMsgType::REJECT, strCommand, REJECT_OBSOLETE,
strprintf("Version must be %d or greater", MIN_PEER_PROTO_VERSION));
pfrom->fDisconnect = true;
return false;
}
Backport Bitcoin PR#9609: net: fix remaining net assertions (#1575) + Dashify * Dont deserialize nVersion into CNode, should fix #9212 * net: deserialize the entire version message locally This avoids having some vars set if the version negotiation fails. Also copy it all into CNode at the same site. nVersion and fSuccessfullyConnected are set last, as they are the gates for the other vars. Make them atomic for that reason. * net: don't run callbacks on nodes that haven't completed the version handshake Since ForEach* are can be used to send messages to all nodes, the caller may end up sending a message before the version handshake is complete. To limit this, filter out these nodes. While we're at it, may as well filter out disconnected nodes as well. Delete unused methods rather than updating them. * net: Disallow sending messages until the version handshake is complete This is a change in behavior, though it's much more sane now than before. * net: log an error rather than asserting if send version is misused Also cleaned up the comments and moved from the header to the .cpp so that logging headers aren't needed from net.h * Implement conditions for ForEachNode() and ForNode() methods of CConnman. A change making ForEachNode() and ForNode() methods ignore nodes that have not completed initial handshake have been backported from Bitcoin. Unfortunately, some Dash-specific code needs to iterate over all nodes. This change introduces additional condition argument to these methods. This argument is a functional object that should return true for nodes that should be taken into account, not ignored. Two functional objects are provided in CConnman namespace: * FullyConnectedOnly returns true for nodes that have handshake completed, * AllNodes returns true for all nodes. Overloads for ForEachNode() and ForNode() methods without condition argument are left for compatibility with non-Dash-specific code. They use FullyConnectedOnly functional object for condition. Signed-off-by: Oleg Girko <ol@infoserver.lv> * Iterate over all nodes in Dash-specific code using AllNodes condition. Use AllNodes functional object as newly introduced condition argument for ForEachNode() and ForNode() methods of CConnman to iterate over all nodes where needed in Dash-specific code. Signed-off-by: Oleg Girko <ol@infoserver.lv>
2017-08-17 20:37:22 +02:00
if (nVersion == 10300)
nVersion = 300;
if (!vRecv.empty())
vRecv >> addrFrom >> nNonce;
if (!vRecv.empty()) {
Backport Bitcoin PR#9609: net: fix remaining net assertions (#1575) + Dashify * Dont deserialize nVersion into CNode, should fix #9212 * net: deserialize the entire version message locally This avoids having some vars set if the version negotiation fails. Also copy it all into CNode at the same site. nVersion and fSuccessfullyConnected are set last, as they are the gates for the other vars. Make them atomic for that reason. * net: don't run callbacks on nodes that haven't completed the version handshake Since ForEach* are can be used to send messages to all nodes, the caller may end up sending a message before the version handshake is complete. To limit this, filter out these nodes. While we're at it, may as well filter out disconnected nodes as well. Delete unused methods rather than updating them. * net: Disallow sending messages until the version handshake is complete This is a change in behavior, though it's much more sane now than before. * net: log an error rather than asserting if send version is misused Also cleaned up the comments and moved from the header to the .cpp so that logging headers aren't needed from net.h * Implement conditions for ForEachNode() and ForNode() methods of CConnman. A change making ForEachNode() and ForNode() methods ignore nodes that have not completed initial handshake have been backported from Bitcoin. Unfortunately, some Dash-specific code needs to iterate over all nodes. This change introduces additional condition argument to these methods. This argument is a functional object that should return true for nodes that should be taken into account, not ignored. Two functional objects are provided in CConnman namespace: * FullyConnectedOnly returns true for nodes that have handshake completed, * AllNodes returns true for all nodes. Overloads for ForEachNode() and ForNode() methods without condition argument are left for compatibility with non-Dash-specific code. They use FullyConnectedOnly functional object for condition. Signed-off-by: Oleg Girko <ol@infoserver.lv> * Iterate over all nodes in Dash-specific code using AllNodes condition. Use AllNodes functional object as newly introduced condition argument for ForEachNode() and ForNode() methods of CConnman to iterate over all nodes where needed in Dash-specific code. Signed-off-by: Oleg Girko <ol@infoserver.lv>
2017-08-17 20:37:22 +02:00
vRecv >> LIMITED_STRING(strSubVer, MAX_SUBVERSION_LENGTH);
}
if (!vRecv.empty()) {
vRecv >> nStartingHeight;
}
if (!vRecv.empty())
Backport Bitcoin PR#9609: net: fix remaining net assertions (#1575) + Dashify * Dont deserialize nVersion into CNode, should fix #9212 * net: deserialize the entire version message locally This avoids having some vars set if the version negotiation fails. Also copy it all into CNode at the same site. nVersion and fSuccessfullyConnected are set last, as they are the gates for the other vars. Make them atomic for that reason. * net: don't run callbacks on nodes that haven't completed the version handshake Since ForEach* are can be used to send messages to all nodes, the caller may end up sending a message before the version handshake is complete. To limit this, filter out these nodes. While we're at it, may as well filter out disconnected nodes as well. Delete unused methods rather than updating them. * net: Disallow sending messages until the version handshake is complete This is a change in behavior, though it's much more sane now than before. * net: log an error rather than asserting if send version is misused Also cleaned up the comments and moved from the header to the .cpp so that logging headers aren't needed from net.h * Implement conditions for ForEachNode() and ForNode() methods of CConnman. A change making ForEachNode() and ForNode() methods ignore nodes that have not completed initial handshake have been backported from Bitcoin. Unfortunately, some Dash-specific code needs to iterate over all nodes. This change introduces additional condition argument to these methods. This argument is a functional object that should return true for nodes that should be taken into account, not ignored. Two functional objects are provided in CConnman namespace: * FullyConnectedOnly returns true for nodes that have handshake completed, * AllNodes returns true for all nodes. Overloads for ForEachNode() and ForNode() methods without condition argument are left for compatibility with non-Dash-specific code. They use FullyConnectedOnly functional object for condition. Signed-off-by: Oleg Girko <ol@infoserver.lv> * Iterate over all nodes in Dash-specific code using AllNodes condition. Use AllNodes functional object as newly introduced condition argument for ForEachNode() and ForNode() methods of CConnman to iterate over all nodes where needed in Dash-specific code. Signed-off-by: Oleg Girko <ol@infoserver.lv>
2017-08-17 20:37:22 +02:00
vRecv >> fRelay;
// Disconnect if we connected to ourself
if (pfrom->fInbound && !connman.CheckIncomingNonce(nNonce))
{
LogPrintf("connected to self at %s, disconnecting\n", pfrom->addr.ToString());
pfrom->fDisconnect = true;
return true;
}
if (pfrom->fInbound && addrMe.IsRoutable())
{
SeenLocal(addrMe);
}
// Be shy and don't send version until we hear
if (pfrom->fInbound)
PushNodeVersion(pfrom, connman, GetAdjustedTime());
Backport Bitcoin PR#9609: net: fix remaining net assertions (#1575) + Dashify * Dont deserialize nVersion into CNode, should fix #9212 * net: deserialize the entire version message locally This avoids having some vars set if the version negotiation fails. Also copy it all into CNode at the same site. nVersion and fSuccessfullyConnected are set last, as they are the gates for the other vars. Make them atomic for that reason. * net: don't run callbacks on nodes that haven't completed the version handshake Since ForEach* are can be used to send messages to all nodes, the caller may end up sending a message before the version handshake is complete. To limit this, filter out these nodes. While we're at it, may as well filter out disconnected nodes as well. Delete unused methods rather than updating them. * net: Disallow sending messages until the version handshake is complete This is a change in behavior, though it's much more sane now than before. * net: log an error rather than asserting if send version is misused Also cleaned up the comments and moved from the header to the .cpp so that logging headers aren't needed from net.h * Implement conditions for ForEachNode() and ForNode() methods of CConnman. A change making ForEachNode() and ForNode() methods ignore nodes that have not completed initial handshake have been backported from Bitcoin. Unfortunately, some Dash-specific code needs to iterate over all nodes. This change introduces additional condition argument to these methods. This argument is a functional object that should return true for nodes that should be taken into account, not ignored. Two functional objects are provided in CConnman namespace: * FullyConnectedOnly returns true for nodes that have handshake completed, * AllNodes returns true for all nodes. Overloads for ForEachNode() and ForNode() methods without condition argument are left for compatibility with non-Dash-specific code. They use FullyConnectedOnly functional object for condition. Signed-off-by: Oleg Girko <ol@infoserver.lv> * Iterate over all nodes in Dash-specific code using AllNodes condition. Use AllNodes functional object as newly introduced condition argument for ForEachNode() and ForNode() methods of CConnman to iterate over all nodes where needed in Dash-specific code. Signed-off-by: Oleg Girko <ol@infoserver.lv>
2017-08-17 20:37:22 +02:00
connman.PushMessageWithVersion(pfrom, INIT_PROTO_VERSION, NetMsgType::VERACK);
pfrom->nServices = nServices;
pfrom->addrLocal = addrMe;
pfrom->strSubVer = strSubVer;
pfrom->cleanSubVer = SanitizeString(strSubVer);
pfrom->nStartingHeight = nStartingHeight;
pfrom->fClient = !(nServices & NODE_NETWORK);
{
LOCK(pfrom->cs_filter);
pfrom->fRelayTxes = fRelay; // set to true after we get the first filter* message
}
// Change version
pfrom->SetSendVersion(nSendVersion);
pfrom->nVersion = nVersion;
// Potentially mark this peer as a preferred download peer.
{
LOCK(cs_main);
UpdatePreferredDownload(pfrom, State(pfrom->GetId()));
}
if (!pfrom->fInbound)
{
// Advertise our address
if (fListen && !IsInitialBlockDownload())
{
CAddress addr = GetLocalAddress(&pfrom->addr, pfrom->GetLocalServices());
if (addr.IsRoutable())
{
LogPrintf("ProcessMessages: advertising address %s\n", addr.ToString());
pfrom->PushAddress(addr);
} else if (IsPeerAddrLocalGood(pfrom)) {
addr.SetIP(pfrom->addrLocal);
LogPrintf("ProcessMessages: advertising address %s\n", addr.ToString());
pfrom->PushAddress(addr);
}
}
// Get recent addresses
if (pfrom->fOneShot || pfrom->nVersion >= CADDR_TIME_VERSION || connman.GetAddressCount() < 1000)
{
connman.PushMessage(pfrom, NetMsgType::GETADDR);
pfrom->fGetAddr = true;
}
connman.MarkAddressGood(pfrom->addr);
}
// Relay alerts
{
LOCK(cs_mapAlerts);
BOOST_FOREACH(PAIRTYPE(const uint256, CAlert)& item, mapAlerts)
item.second.RelayTo(pfrom, connman);
}
string remoteAddr;
if (fLogIPs)
remoteAddr = ", peeraddr=" + pfrom->addr.ToString();
LogPrintf("receive version message: %s: version %d, blocks=%d, us=%s, peer=%d%s\n",
pfrom->cleanSubVer, pfrom->nVersion,
pfrom->nStartingHeight, addrMe.ToString(), pfrom->id,
remoteAddr);
int64_t nTimeOffset = nTime - GetTime();
pfrom->nTimeOffset = nTimeOffset;
AddTimeData(pfrom->addr, nTimeOffset);
}
else if (pfrom->nVersion == 0)
{
// Must have a version message before anything else
LOCK(cs_main);
Misbehaving(pfrom->GetId(), 1);
return false;
}
else if (strCommand == NetMsgType::VERACK)
{
Backport Bitcoin PR#9609: net: fix remaining net assertions (#1575) + Dashify * Dont deserialize nVersion into CNode, should fix #9212 * net: deserialize the entire version message locally This avoids having some vars set if the version negotiation fails. Also copy it all into CNode at the same site. nVersion and fSuccessfullyConnected are set last, as they are the gates for the other vars. Make them atomic for that reason. * net: don't run callbacks on nodes that haven't completed the version handshake Since ForEach* are can be used to send messages to all nodes, the caller may end up sending a message before the version handshake is complete. To limit this, filter out these nodes. While we're at it, may as well filter out disconnected nodes as well. Delete unused methods rather than updating them. * net: Disallow sending messages until the version handshake is complete This is a change in behavior, though it's much more sane now than before. * net: log an error rather than asserting if send version is misused Also cleaned up the comments and moved from the header to the .cpp so that logging headers aren't needed from net.h * Implement conditions for ForEachNode() and ForNode() methods of CConnman. A change making ForEachNode() and ForNode() methods ignore nodes that have not completed initial handshake have been backported from Bitcoin. Unfortunately, some Dash-specific code needs to iterate over all nodes. This change introduces additional condition argument to these methods. This argument is a functional object that should return true for nodes that should be taken into account, not ignored. Two functional objects are provided in CConnman namespace: * FullyConnectedOnly returns true for nodes that have handshake completed, * AllNodes returns true for all nodes. Overloads for ForEachNode() and ForNode() methods without condition argument are left for compatibility with non-Dash-specific code. They use FullyConnectedOnly functional object for condition. Signed-off-by: Oleg Girko <ol@infoserver.lv> * Iterate over all nodes in Dash-specific code using AllNodes condition. Use AllNodes functional object as newly introduced condition argument for ForEachNode() and ForNode() methods of CConnman to iterate over all nodes where needed in Dash-specific code. Signed-off-by: Oleg Girko <ol@infoserver.lv>
2017-08-17 20:37:22 +02:00
pfrom->SetRecvVersion(std::min(pfrom->nVersion.load(), PROTOCOL_VERSION));
// Mark this node as currently connected, so we update its timestamp later.
if (pfrom->fNetworkNode) {
LOCK(cs_main);
State(pfrom->GetId())->fCurrentlyConnected = true;
}
if (pfrom->nVersion >= SENDHEADERS_VERSION) {
// Tell our peer we prefer to receive headers rather than inv's
// We send this to non-NODE NETWORK peers as well, because even
// non-NODE NETWORK peers can announce blocks (such as pruning
// nodes)
connman.PushMessage(pfrom, NetMsgType::SENDHEADERS);
}
Backport Bitcoin PR#9609: net: fix remaining net assertions (#1575) + Dashify * Dont deserialize nVersion into CNode, should fix #9212 * net: deserialize the entire version message locally This avoids having some vars set if the version negotiation fails. Also copy it all into CNode at the same site. nVersion and fSuccessfullyConnected are set last, as they are the gates for the other vars. Make them atomic for that reason. * net: don't run callbacks on nodes that haven't completed the version handshake Since ForEach* are can be used to send messages to all nodes, the caller may end up sending a message before the version handshake is complete. To limit this, filter out these nodes. While we're at it, may as well filter out disconnected nodes as well. Delete unused methods rather than updating them. * net: Disallow sending messages until the version handshake is complete This is a change in behavior, though it's much more sane now than before. * net: log an error rather than asserting if send version is misused Also cleaned up the comments and moved from the header to the .cpp so that logging headers aren't needed from net.h * Implement conditions for ForEachNode() and ForNode() methods of CConnman. A change making ForEachNode() and ForNode() methods ignore nodes that have not completed initial handshake have been backported from Bitcoin. Unfortunately, some Dash-specific code needs to iterate over all nodes. This change introduces additional condition argument to these methods. This argument is a functional object that should return true for nodes that should be taken into account, not ignored. Two functional objects are provided in CConnman namespace: * FullyConnectedOnly returns true for nodes that have handshake completed, * AllNodes returns true for all nodes. Overloads for ForEachNode() and ForNode() methods without condition argument are left for compatibility with non-Dash-specific code. They use FullyConnectedOnly functional object for condition. Signed-off-by: Oleg Girko <ol@infoserver.lv> * Iterate over all nodes in Dash-specific code using AllNodes condition. Use AllNodes functional object as newly introduced condition argument for ForEachNode() and ForNode() methods of CConnman to iterate over all nodes where needed in Dash-specific code. Signed-off-by: Oleg Girko <ol@infoserver.lv>
2017-08-17 20:37:22 +02:00
pfrom->fSuccessfullyConnected = true;
}
else if (strCommand == NetMsgType::ADDR)
{
vector<CAddress> vAddr;
vRecv >> vAddr;
// Don't want addr from older versions unless seeding
if (pfrom->nVersion < CADDR_TIME_VERSION && connman.GetAddressCount() > 1000)
return true;
if (vAddr.size() > 1000)
{
LOCK(cs_main);
Misbehaving(pfrom->GetId(), 20);
return error("message addr size() = %u", vAddr.size());
}
// Store the new addresses
vector<CAddress> vAddrOk;
int64_t nNow = GetAdjustedTime();
int64_t nSince = nNow - 10 * 60;
BOOST_FOREACH(CAddress& addr, vAddr)
{
if (interruptMsgProc)
return true;
if ((addr.nServices & REQUIRED_SERVICES) != REQUIRED_SERVICES)
continue;
if (addr.nTime <= 100000000 || addr.nTime > nNow + 10 * 60)
addr.nTime = nNow - 5 * 24 * 60 * 60;
pfrom->AddAddressKnown(addr);
bool fReachable = IsReachable(addr);
if (addr.nTime > nSince && !pfrom->fGetAddr && vAddr.size() <= 10 && addr.IsRoutable())
{
RelayAddress(addr, fReachable, connman);
}
// Do not store addresses outside our network
if (fReachable)
vAddrOk.push_back(addr);
}
connman.AddNewAddresses(vAddrOk, pfrom->addr, 2 * 60 * 60);
if (vAddr.size() < 1000)
pfrom->fGetAddr = false;
if (pfrom->fOneShot)
pfrom->fDisconnect = true;
}
else if (strCommand == NetMsgType::SENDHEADERS)
{
LOCK(cs_main);
State(pfrom->GetId())->fPreferHeaders = true;
}
else if (strCommand == NetMsgType::INV)
{
vector<CInv> vInv;
vRecv >> vInv;
if (vInv.size() > MAX_INV_SZ)
{
LOCK(cs_main);
Misbehaving(pfrom->GetId(), 20);
return error("message inv size() = %u", vInv.size());
}
bool fBlocksOnly = !fRelayTxes;
// Allow whitelisted peers to send data other than blocks in blocks only mode if whitelistrelay is true
if (pfrom->fWhitelisted && GetBoolArg("-whitelistrelay", DEFAULT_WHITELISTRELAY))
fBlocksOnly = false;
LOCK(cs_main);
std::vector<CInv> vToFetch;
for (unsigned int nInv = 0; nInv < vInv.size(); nInv++)
{
const CInv &inv = vInv[nInv];
if(!inv.IsKnownType()) {
LogPrint("net", "got inv of unknown type %d: %s peer=%d\n", inv.type, inv.hash.ToString(), pfrom->id);
continue;
}
if (interruptMsgProc)
return true;
pfrom->AddInventoryKnown(inv);
bool fAlreadyHave = AlreadyHave(inv);
LogPrint("net", "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom->id);
if (inv.type == MSG_BLOCK) {
UpdateBlockAvailability(pfrom->GetId(), inv.hash);
if (!fAlreadyHave && !fImporting && !fReindex && !mapBlocksInFlight.count(inv.hash)) {
Fix duplicate headers download in initial sync (#1589) * Fix duplicate headers download in initial sync Now that initial block download is delayed until the headers sync is done, it was noticed that the initial headers sync may happen multiple times in parallel in the case new blocks are announced. This happens because for every block in INV that is received, a getheaders message is immediately sent out resulting in a full download of the headers chain starting from the point of where the initial headers sync is currently at. This happens once for each peer that announces the new block. This slows down the initial headers sync and increases the chance of another block being announced before it is finished, probably leading to the same behavior as already described, slowing down the sync even more...and so on. This commit delays sending of GETHEADERS to later in case the chain is too far behind while a new block gets announced. Header chains will still be downloaded multiple times, but the downloading will start much closer to the tip of the chain, so the damage is not that bad anymore. This ensures that we get all headers from all peers, even if any of them is on another chain. This should avoid what happened in https://github.com/bitcoin/bitcoin/pull/8054 which needed to be reverted later. This fixes the Bitcoin issue https://github.com/bitcoin/bitcoin/issues/6755 * Introduce DelayGetHeadersTime chain param and fix tests The delaying of GETHEADERS in combination with very old block times in test cases resulted in the delaying being triggered when the first newly mined block arrives. This results in a completely stalled sync. This is fixed by avoiding delaying in when running tests. * Disconnect peers which are not catched up Peers which stop sending us headers too early are very likely peers which did not catch up before and stalled for some reason. We should disconnect these peers and chose another one to continue.
2017-09-04 08:10:52 +02:00
if (chainparams.DelayGetHeadersTime() != 0 && pindexBestHeader->GetBlockTime() < GetAdjustedTime() - chainparams.DelayGetHeadersTime()) {
// We are pretty far from being completely synced at the moment. If we would initiate a new
// chain of GETHEADERS/HEADERS now, we may end up downnloading the full chain from multiple
// peers at the same time, slowing down the initial sync. At the same time, we don't know
// if the peer we got this INV from may have a chain we don't know about yet, so we HAVE TO
// send a GETHEADERS message at some point in time. This is delayed to later in SendMessages
// when the headers chain has catched up enough.
LogPrint("net", "delaying getheaders (%d) %s to peer=%d\n", pindexBestHeader->nHeight, inv.hash.ToString(), pfrom->id);
pfrom->PushBlockHashFromINV(inv.hash);
} else {
// First request the headers preceding the announced block. In the normal fully-synced
// case where a new block is announced that succeeds the current tip (no reorganization),
// there are no such headers.
// Secondly, and only when we are close to being synced, we request the announced block directly,
// to avoid an extra round-trip. Note that we must *first* ask for the headers, so by the
// time the block arrives, the header chain leading up to it is already validated. Not
// doing this will result in the received block being rejected as an orphan in case it is
// not a direct successor.
connman.PushMessage(pfrom, NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), inv.hash);
CNodeState *nodestate = State(pfrom->GetId());
if (CanDirectFetch(chainparams.GetConsensus()) &&
nodestate->nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
vToFetch.push_back(inv);
// Mark block as in flight already, even though the actual "getdata" message only goes out
// later (within the same cs_main lock, though).
MarkBlockAsInFlight(pfrom->GetId(), inv.hash, chainparams.GetConsensus());
}
LogPrint("net", "getheaders (%d) %s to peer=%d\n", pindexBestHeader->nHeight, inv.hash.ToString(), pfrom->id);
}
}
}
else
{
if (fBlocksOnly)
LogPrint("net", "transaction (%s) inv sent in violation of protocol peer=%d\n", inv.hash.ToString(), pfrom->id);
else if (!fAlreadyHave && !fImporting && !fReindex && !IsInitialBlockDownload())
pfrom->AskFor(inv);
}
// Track requests for our stuff
GetMainSignals().Inventory(inv.hash);
}
if (!vToFetch.empty())
connman.PushMessage(pfrom, NetMsgType::GETDATA, vToFetch);
}
else if (strCommand == NetMsgType::GETDATA)
{
vector<CInv> vInv;
vRecv >> vInv;
if (vInv.size() > MAX_INV_SZ)
{
LOCK(cs_main);
Misbehaving(pfrom->GetId(), 20);
return error("message getdata size() = %u", vInv.size());
}
if (fDebug || (vInv.size() != 1))
LogPrint("net", "received getdata (%u invsz) peer=%d\n", vInv.size(), pfrom->id);
if ((fDebug && vInv.size() > 0) || (vInv.size() == 1))
LogPrint("net", "received getdata for: %s peer=%d\n", vInv[0].ToString(), pfrom->id);
pfrom->vRecvGetData.insert(pfrom->vRecvGetData.end(), vInv.begin(), vInv.end());
ProcessGetData(pfrom, chainparams.GetConsensus(), connman, interruptMsgProc);
}
else if (strCommand == NetMsgType::GETBLOCKS)
{
CBlockLocator locator;
uint256 hashStop;
vRecv >> locator >> hashStop;
LOCK(cs_main);
// Find the last block the caller has in the main chain
CBlockIndex* pindex = FindForkInGlobalIndex(chainActive, locator);
// Send the rest of the chain
if (pindex)
pindex = chainActive.Next(pindex);
int nLimit = 500;
LogPrint("net", "getblocks %d to %s limit %d from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), nLimit, pfrom->id);
for (; pindex; pindex = chainActive.Next(pindex))
{
if (pindex->GetBlockHash() == hashStop)
{
LogPrint("net", " getblocks stopping at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
break;
}
// If pruning, don't inv blocks unless we have on disk and are likely to still have
// for some reasonable time window (1 hour) that block relay might require.
const int nPrunedBlocksLikelyToHave = MIN_BLOCKS_TO_KEEP - 3600 / chainparams.GetConsensus().nPowTargetSpacing;
if (fPruneMode && (!(pindex->nStatus & BLOCK_HAVE_DATA) || pindex->nHeight <= chainActive.Tip()->nHeight - nPrunedBlocksLikelyToHave))
{
LogPrint("net", " getblocks stopping, pruned or too old block at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
break;
}
pfrom->PushInventory(CInv(MSG_BLOCK, pindex->GetBlockHash()));
if (--nLimit <= 0)
{
// When this block is requested, we'll send an inv that'll
// trigger the peer to getblocks the next batch of inventory.
LogPrint("net", " getblocks stopping at limit %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
pfrom->hashContinue = pindex->GetBlockHash();
break;
}
}
}
else if (strCommand == NetMsgType::GETHEADERS)
{
CBlockLocator locator;
uint256 hashStop;
vRecv >> locator >> hashStop;
LOCK(cs_main);
if (IsInitialBlockDownload() && !pfrom->fWhitelisted) {
LogPrint("net", "Ignoring getheaders from peer=%d because node is in initial block download\n", pfrom->id);
return true;
}
CNodeState *nodestate = State(pfrom->GetId());
CBlockIndex* pindex = NULL;
if (locator.IsNull())
{
// If locator is null, return the hashStop block
BlockMap::iterator mi = mapBlockIndex.find(hashStop);
if (mi == mapBlockIndex.end())
return true;
pindex = (*mi).second;
}
else
{
// Find the last block the caller has in the main chain
pindex = FindForkInGlobalIndex(chainActive, locator);
if (pindex)
pindex = chainActive.Next(pindex);
}
// we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx count at the end
vector<CBlock> vHeaders;
int nLimit = MAX_HEADERS_RESULTS;
LogPrint("net", "getheaders %d to %s from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.ToString(), pfrom->id);
for (; pindex; pindex = chainActive.Next(pindex))
{
vHeaders.push_back(pindex->GetBlockHeader());
if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop)
break;
}
// pindex can be NULL either if we sent chainActive.Tip() OR
// if our peer has chainActive.Tip() (and thus we are sending an empty
// headers message). In both cases it's safe to update
// pindexBestHeaderSent to be our tip.
nodestate->pindexBestHeaderSent = pindex ? pindex : chainActive.Tip();
connman.PushMessage(pfrom, NetMsgType::HEADERS, vHeaders);
}
else if (strCommand == NetMsgType::TX || strCommand == NetMsgType::DSTX || strCommand == NetMsgType::TXLOCKREQUEST)
{
// Stop processing the transaction early if
// We are in blocks only mode and peer is either not whitelisted or whitelistrelay is off
if (!fRelayTxes && (!pfrom->fWhitelisted || !GetBoolArg("-whitelistrelay", DEFAULT_WHITELISTRELAY)))
{
LogPrint("net", "transaction sent in violation of protocol peer=%d\n", pfrom->id);
return true;
}
vector<uint256> vWorkQueue;
vector<uint256> vEraseQueue;
CTransaction tx;
CTxLockRequest txLockRequest;
CDarksendBroadcastTx dstx;
int nInvType = MSG_TX;
// Read data and assign inv type
if(strCommand == NetMsgType::TX) {
vRecv >> tx;
} else if(strCommand == NetMsgType::TXLOCKREQUEST) {
vRecv >> txLockRequest;
tx = txLockRequest;
nInvType = MSG_TXLOCK_REQUEST;
} else if (strCommand == NetMsgType::DSTX) {
vRecv >> dstx;
tx = dstx.tx;
nInvType = MSG_DSTX;
}
CInv inv(nInvType, tx.GetHash());
pfrom->AddInventoryKnown(inv);
pfrom->setAskFor.erase(inv.hash);
// Process custom logic, no matter if tx will be accepted to mempool later or not
if (strCommand == NetMsgType::TXLOCKREQUEST) {
if(!instantsend.ProcessTxLockRequest(txLockRequest, connman)) {
LogPrint("instantsend", "TXLOCKREQUEST -- failed %s\n", txLockRequest.GetHash().ToString());
return false;
}
} else if (strCommand == NetMsgType::DSTX) {
uint256 hashTx = tx.GetHash();
if(CPrivateSend::GetDSTX(hashTx)) {
LogPrint("privatesend", "DSTX -- Already have %s, skipping...\n", hashTx.ToString());
return true; // not an error
}
CMasternode mn;
if(!mnodeman.Get(dstx.vin.prevout, mn)) {
LogPrint("privatesend", "DSTX -- Can't find masternode %s to verify %s\n", dstx.vin.prevout.ToStringShort(), hashTx.ToString());
return false;
}
if(!mn.fAllowMixingTx) {
LogPrint("privatesend", "DSTX -- Masternode %s is sending too many transactions %s\n", dstx.vin.prevout.ToStringShort(), hashTx.ToString());
return true;
// TODO: Not an error? Could it be that someone is relaying old DSTXes
// we have no idea about (e.g we were offline)? How to handle them?
}
if(!dstx.CheckSignature(mn.pubKeyMasternode)) {
LogPrint("privatesend", "DSTX -- CheckSignature() failed for %s\n", hashTx.ToString());
return false;
}
LogPrintf("DSTX -- Got Masternode transaction %s\n", hashTx.ToString());
mempool.PrioritiseTransaction(hashTx, hashTx.ToString(), 1000, 0.1*COIN);
mnodeman.DisallowMixing(dstx.vin.prevout);
}
LOCK(cs_main);
bool fMissingInputs = false;
CValidationState state;
mapAlreadyAskedFor.erase(inv.hash);
if (!AlreadyHave(inv) && AcceptToMemoryPool(mempool, state, tx, true, &fMissingInputs))
{
// Process custom txes, this changes AlreadyHave to "true"
if (strCommand == NetMsgType::DSTX) {
LogPrintf("DSTX -- Masternode transaction accepted, txid=%s, peer=%d\n",
tx.GetHash().ToString(), pfrom->id);
CPrivateSend::AddDSTX(dstx);
} else if (strCommand == NetMsgType::TXLOCKREQUEST) {
LogPrintf("TXLOCKREQUEST -- Transaction Lock Request accepted, txid=%s, peer=%d\n",
tx.GetHash().ToString(), pfrom->id);
instantsend.AcceptLockRequest(txLockRequest);
}
mempool.check(pcoinsTip);
connman.RelayTransaction(tx);
vWorkQueue.push_back(inv.hash);
pfrom->nLastTXTime = GetTime();
LogPrint("mempool", "AcceptToMemoryPool: peer=%d: accepted %s (poolsz %u txn, %u kB)\n",
pfrom->id,
tx.GetHash().ToString(),
mempool.size(), mempool.DynamicMemoryUsage() / 1000);
// Recursively process any orphan transactions that depended on this one
set<NodeId> setMisbehaving;
for (unsigned int i = 0; i < vWorkQueue.size(); i++)
{
map<uint256, set<uint256> >::iterator itByPrev = mapOrphanTransactionsByPrev.find(vWorkQueue[i]);
if (itByPrev == mapOrphanTransactionsByPrev.end())
continue;
for (set<uint256>::iterator mi = itByPrev->second.begin();
mi != itByPrev->second.end();
++mi)
{
const uint256& orphanHash = *mi;
const CTransaction& orphanTx = mapOrphanTransactions[orphanHash].tx;
NodeId fromPeer = mapOrphanTransactions[orphanHash].fromPeer;
bool fMissingInputs2 = false;
// Use a dummy CValidationState so someone can't setup nodes to counter-DoS based on orphan
// resolution (that is, feeding people an invalid transaction based on LegitTxX in order to get
// anyone relaying LegitTxX banned)
CValidationState stateDummy;
if (setMisbehaving.count(fromPeer))
continue;
if (AcceptToMemoryPool(mempool, stateDummy, orphanTx, true, &fMissingInputs2))
{
LogPrint("mempool", " accepted orphan tx %s\n", orphanHash.ToString());
connman.RelayTransaction(orphanTx);
vWorkQueue.push_back(orphanHash);
vEraseQueue.push_back(orphanHash);
}
else if (!fMissingInputs2)
{
int nDos = 0;
if (stateDummy.IsInvalid(nDos) && nDos > 0)
{
// Punish peer that gave us an invalid orphan tx
Misbehaving(fromPeer, nDos);
setMisbehaving.insert(fromPeer);
LogPrint("mempool", " invalid orphan tx %s\n", orphanHash.ToString());
}
// Has inputs but not accepted to mempool
// Probably non-standard or insufficient fee/priority
LogPrint("mempool", " removed orphan tx %s\n", orphanHash.ToString());
vEraseQueue.push_back(orphanHash);
assert(recentRejects);
recentRejects->insert(orphanHash);
}
mempool.check(pcoinsTip);
}
}
BOOST_FOREACH(uint256 hash, vEraseQueue)
EraseOrphanTx(hash);
}
else if (fMissingInputs)
{
AddOrphanTx(tx, pfrom->GetId());
// DoS prevention: do not allow mapOrphanTransactions to grow unbounded
unsigned int nMaxOrphanTx = (unsigned int)std::max((int64_t)0, GetArg("-maxorphantx", DEFAULT_MAX_ORPHAN_TRANSACTIONS));
unsigned int nEvicted = LimitOrphanTxSize(nMaxOrphanTx);
if (nEvicted > 0)
LogPrint("mempool", "mapOrphan overflow, removed %u tx\n", nEvicted);
} else {
assert(recentRejects);
recentRejects->insert(tx.GetHash());
if (strCommand == NetMsgType::TXLOCKREQUEST && !AlreadyHave(inv)) {
// i.e. AcceptToMemoryPool failed, probably because it's conflicting
// with existing normal tx or tx lock for another tx. For the same tx lock
// AlreadyHave would have return "true" already.
// It's the first time we failed for this tx lock request,
// this should switch AlreadyHave to "true".
instantsend.RejectLockRequest(txLockRequest);
// this lets other nodes to create lock request candidate i.e.
// this allows multiple conflicting lock requests to compete for votes
connman.RelayTransaction(tx);
}
if (pfrom->fWhitelisted && GetBoolArg("-whitelistforcerelay", DEFAULT_WHITELISTFORCERELAY)) {
// Always relay transactions received from whitelisted peers, even
// if they were already in the mempool or rejected from it due
// to policy, allowing the node to function as a gateway for
// nodes hidden behind it.
//
// Never relay transactions that we would assign a non-zero DoS
// score for, as we expect peers to do the same with us in that
// case.
int nDoS = 0;
if (!state.IsInvalid(nDoS) || nDoS == 0) {
LogPrintf("Force relaying tx %s from whitelisted peer=%d\n", tx.GetHash().ToString(), pfrom->id);
connman.RelayTransaction(tx);
} else {
LogPrintf("Not relaying invalid transaction %s from whitelisted peer=%d (%s)\n", tx.GetHash().ToString(), pfrom->id, FormatStateMessage(state));
}
}
}
int nDoS = 0;
if (state.IsInvalid(nDoS))
{
LogPrint("mempoolrej", "%s from peer=%d was not accepted: %s\n", tx.GetHash().ToString(),
pfrom->id,
FormatStateMessage(state));
if (state.GetRejectCode() < REJECT_INTERNAL) // Never send AcceptToMemoryPool's internal codes over P2P
connman.PushMessage(pfrom, NetMsgType::REJECT, strCommand, (unsigned char)state.GetRejectCode(),
state.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH), inv.hash);
if (nDoS > 0)
Misbehaving(pfrom->GetId(), nDoS);
}
}
else if (strCommand == NetMsgType::HEADERS && !fImporting && !fReindex) // Ignore headers received while importing
{
std::vector<CBlockHeader> headers;
// Bypass the normal CBlock deserialization, as we don't want to risk deserializing 2000 full blocks.
unsigned int nCount = ReadCompactSize(vRecv);
if (nCount > MAX_HEADERS_RESULTS) {
LOCK(cs_main);
Misbehaving(pfrom->GetId(), 20);
return error("headers message size = %u", nCount);
}
headers.resize(nCount);
for (unsigned int n = 0; n < nCount; n++) {
vRecv >> headers[n];
ReadCompactSize(vRecv); // ignore tx count; assume it is 0.
}
CBlockIndex *pindexLast = NULL;
{
LOCK(cs_main);
uint256 hashLastBlock;
for (const CBlockHeader& header : headers) {
if (!hashLastBlock.IsNull() && header.hashPrevBlock != hashLastBlock) {
Misbehaving(pfrom->GetId(), 20);
return error("non-continuous headers sequence");
}
hashLastBlock = header.GetHash();
}
}
CValidationState state;
if (!ProcessNewBlockHeaders(headers, state, chainparams, &pindexLast)) {
int nDoS;
if (state.IsInvalid(nDoS)) {
if (nDoS > 0) {
LOCK(cs_main);
Misbehaving(pfrom->GetId(), nDoS);
}
return error("invalid header received");
}
}
{
LOCK(cs_main);
if (pindexLast)
UpdateBlockAvailability(pfrom->GetId(), pindexLast->GetBlockHash());
if (nCount == MAX_HEADERS_RESULTS && pindexLast) {
// Headers message had its maximum size; the peer may have more headers.
// TODO: optimize: if pindexLast is an ancestor of chainActive.Tip or pindexBestHeader, continue
// from there instead.
LogPrint("net", "more getheaders (%d) to end to peer=%d (startheight:%d)\n", pindexLast->nHeight, pfrom->id, pfrom->nStartingHeight);
connman.PushMessage(pfrom, NetMsgType::GETHEADERS, chainActive.GetLocator(pindexLast), uint256());
Fix duplicate headers download in initial sync (#1589) * Fix duplicate headers download in initial sync Now that initial block download is delayed until the headers sync is done, it was noticed that the initial headers sync may happen multiple times in parallel in the case new blocks are announced. This happens because for every block in INV that is received, a getheaders message is immediately sent out resulting in a full download of the headers chain starting from the point of where the initial headers sync is currently at. This happens once for each peer that announces the new block. This slows down the initial headers sync and increases the chance of another block being announced before it is finished, probably leading to the same behavior as already described, slowing down the sync even more...and so on. This commit delays sending of GETHEADERS to later in case the chain is too far behind while a new block gets announced. Header chains will still be downloaded multiple times, but the downloading will start much closer to the tip of the chain, so the damage is not that bad anymore. This ensures that we get all headers from all peers, even if any of them is on another chain. This should avoid what happened in https://github.com/bitcoin/bitcoin/pull/8054 which needed to be reverted later. This fixes the Bitcoin issue https://github.com/bitcoin/bitcoin/issues/6755 * Introduce DelayGetHeadersTime chain param and fix tests The delaying of GETHEADERS in combination with very old block times in test cases resulted in the delaying being triggered when the first newly mined block arrives. This results in a completely stalled sync. This is fixed by avoiding delaying in when running tests. * Disconnect peers which are not catched up Peers which stop sending us headers too early are very likely peers which did not catch up before and stalled for some reason. We should disconnect these peers and chose another one to continue.
2017-09-04 08:10:52 +02:00
} else {
if (chainparams.DelayGetHeadersTime() != 0 && pindexBestHeader->GetBlockTime() < GetAdjustedTime() - chainparams.DelayGetHeadersTime()) {
// peer has sent us a HEADERS message below maximum size and we are still quite far from being fully
// synced, this means we probably got a bad peer for initial sync and need to continue with another one.
// By disconnecting we force to start a new iteration of initial headers sync in SendMessages
// TODO should we handle whitelisted peers here as we do in headers sync timeout handling?
pfrom->fDisconnect = true;
return error("detected bad peer for initial headers sync, disconnecting %d", pfrom->id);
}
if (nCount == 0) {
// Nothing interesting. Stop asking this peers for more headers.
return true;
}
}
bool fCanDirectFetch = CanDirectFetch(chainparams.GetConsensus());
CNodeState *nodestate = State(pfrom->GetId());
// If this set of headers is valid and ends in a block with at least as
// much work as our tip, download as much as possible.
if (fCanDirectFetch && pindexLast->IsValid(BLOCK_VALID_TREE) && chainActive.Tip()->nChainWork <= pindexLast->nChainWork) {
vector<CBlockIndex *> vToFetch;
CBlockIndex *pindexWalk = pindexLast;
// Calculate all the blocks we'd need to switch to pindexLast, up to a limit.
while (pindexWalk && !chainActive.Contains(pindexWalk) && vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
if (!(pindexWalk->nStatus & BLOCK_HAVE_DATA) &&
!mapBlocksInFlight.count(pindexWalk->GetBlockHash())) {
// We don't have this block, and it's not yet in flight.
vToFetch.push_back(pindexWalk);
}
pindexWalk = pindexWalk->pprev;
}
// If pindexWalk still isn't on our main chain, we're looking at a
// very large reorg at a time we think we're close to caught up to
// the main chain -- this shouldn't really happen. Bail out on the
// direct fetch and rely on parallel download instead.
if (!chainActive.Contains(pindexWalk)) {
LogPrint("net", "Large reorg, won't direct fetch to %s (%d)\n",
pindexLast->GetBlockHash().ToString(),
pindexLast->nHeight);
} else {
vector<CInv> vGetData;
// Download as much as possible, from earliest to latest.
BOOST_REVERSE_FOREACH(CBlockIndex *pindex, vToFetch) {
if (nodestate->nBlocksInFlight >= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
// Can't download any more from this peer
break;
}
vGetData.push_back(CInv(MSG_BLOCK, pindex->GetBlockHash()));
MarkBlockAsInFlight(pfrom->GetId(), pindex->GetBlockHash(), chainparams.GetConsensus(), pindex);
LogPrint("net", "Requesting block %s from peer=%d\n",
pindex->GetBlockHash().ToString(), pfrom->id);
}
if (vGetData.size() > 1) {
LogPrint("net", "Downloading blocks toward %s (%d) via headers direct fetch\n",
pindexLast->GetBlockHash().ToString(), pindexLast->nHeight);
}
if (vGetData.size() > 0) {
connman.PushMessage(pfrom, NetMsgType::GETDATA, vGetData);
}
}
}
}
}
else if (strCommand == NetMsgType::BLOCK && !fImporting && !fReindex) // Ignore blocks received while importing
{
CBlock block;
vRecv >> block;
CInv inv(MSG_BLOCK, block.GetHash());
LogPrint("net", "received block %s peer=%d\n", inv.hash.ToString(), pfrom->id);
pfrom->AddInventoryKnown(inv);
// Process all blocks from whitelisted peers, even if not requested,
// unless we're still syncing with the network.
// Such an unrequested block may still be processed, subject to the
// conditions in AcceptBlock().
bool forceProcessing = pfrom->fWhitelisted && !IsInitialBlockDownload();
const uint256 hash(block.GetHash());
{
LOCK(cs_main);
// Also always process if we requested the block explicitly, as we may
// need it even though it is not a candidate for a new best tip.
forceProcessing |= MarkBlockAsReceived(hash);
// mapBlockSource is only used for sending reject messages and DoS scores,
// so the race between here and cs_main in ProcessNewBlock is fine.
mapBlockSource.emplace(hash, pfrom->GetId());
}
bool fNewBlock = false;
ProcessNewBlock(chainparams, &block, forceProcessing, NULL, &fNewBlock);
if (fNewBlock)
pfrom->nLastBlockTime = GetTime();
}
else if (strCommand == NetMsgType::GETADDR)
{
// This asymmetric behavior for inbound and outbound connections was introduced
// to prevent a fingerprinting attack: an attacker can send specific fake addresses
// to users' AddrMan and later request them by sending getaddr messages.
// Making nodes which are behind NAT and can only make outgoing connections ignore
// the getaddr message mitigates the attack.
if (!pfrom->fInbound) {
LogPrint("net", "Ignoring \"getaddr\" from outbound connection. peer=%d\n", pfrom->id);
return true;
}
pfrom->vAddrToSend.clear();
vector<CAddress> vAddr = connman.GetAddresses();
BOOST_FOREACH(const CAddress &addr, vAddr)
pfrom->PushAddress(addr);
}
else if (strCommand == NetMsgType::MEMPOOL)
{
if (connman.OutboundTargetReached(false) && !pfrom->fWhitelisted)
{
LogPrint("net", "mempool request with bandwidth limit reached, disconnect peer=%d\n", pfrom->GetId());
pfrom->fDisconnect = true;
return true;
}
LOCK2(cs_main, pfrom->cs_filter);
std::vector<uint256> vtxid;
mempool.queryHashes(vtxid);
vector<CInv> vInv;
BOOST_FOREACH(uint256& hash, vtxid) {
CInv inv(MSG_TX, hash);
if (pfrom->pfilter) {
CTransaction tx;
bool fInMemPool = mempool.lookup(hash, tx);
if (!fInMemPool) continue; // another thread removed since queryHashes, maybe...
if (!pfrom->pfilter->IsRelevantAndUpdate(tx)) continue;
}
vInv.push_back(inv);
if (vInv.size() == MAX_INV_SZ) {
connman.PushMessage(pfrom, NetMsgType::INV, vInv);
vInv.clear();
}
}
if (vInv.size() > 0)
connman.PushMessage(pfrom, NetMsgType::INV, vInv);
}
else if (strCommand == NetMsgType::PING)
{
if (pfrom->nVersion > BIP0031_VERSION)
{
uint64_t nonce = 0;
vRecv >> nonce;
// Echo the message back with the nonce. This allows for two useful features:
//
// 1) A remote node can quickly check if the connection is operational
// 2) Remote nodes can measure the latency of the network thread. If this node
// is overloaded it won't respond to pings quickly and the remote node can
// avoid sending us more work, like chain download requests.
//
// The nonce stops the remote getting confused between different pings: without
// it, if the remote node sends a ping once per second and this node takes 5
// seconds to respond to each, the 5th ping the remote sends would appear to
// return very quickly.
connman.PushMessage(pfrom, NetMsgType::PONG, nonce);
}
}
else if (strCommand == NetMsgType::PONG)
{
int64_t pingUsecEnd = nTimeReceived;
uint64_t nonce = 0;
size_t nAvail = vRecv.in_avail();
bool bPingFinished = false;
std::string sProblem;
if (nAvail >= sizeof(nonce)) {
vRecv >> nonce;
// Only process pong message if there is an outstanding ping (old ping without nonce should never pong)
if (pfrom->nPingNonceSent != 0) {
if (nonce == pfrom->nPingNonceSent) {
// Matching pong received, this ping is no longer outstanding
bPingFinished = true;
int64_t pingUsecTime = pingUsecEnd - pfrom->nPingUsecStart;
if (pingUsecTime > 0) {
// Successful ping time measurement, replace previous
pfrom->nPingUsecTime = pingUsecTime;
pfrom->nMinPingUsecTime = std::min(pfrom->nMinPingUsecTime, pingUsecTime);
} else {
// This should never happen
sProblem = "Timing mishap";
}
} else {
// Nonce mismatches are normal when pings are overlapping
sProblem = "Nonce mismatch";
if (nonce == 0) {
// This is most likely a bug in another implementation somewhere; cancel this ping
bPingFinished = true;
sProblem = "Nonce zero";
}
}
} else {
sProblem = "Unsolicited pong without ping";
}
} else {
// This is most likely a bug in another implementation somewhere; cancel this ping
bPingFinished = true;
sProblem = "Short payload";
}
if (!(sProblem.empty())) {
LogPrint("net", "pong peer=%d: %s, %x expected, %x received, %u bytes\n",
pfrom->id,
sProblem,
pfrom->nPingNonceSent,
nonce,
nAvail);
}
if (bPingFinished) {
pfrom->nPingNonceSent = 0;
}
}
else if (fAlerts && strCommand == NetMsgType::ALERT)
{
CAlert alert;
vRecv >> alert;
uint256 alertHash = alert.GetHash();
if (pfrom->setKnown.count(alertHash) == 0)
{
if (alert.ProcessAlert(chainparams.AlertKey()))
{
// Relay
pfrom->setKnown.insert(alertHash);
{
connman.ForEachNode([&alert, &connman](CNode* pnode) {
alert.RelayTo(pnode, connman);
});
}
}
else {
// Small DoS penalty so peers that send us lots of
// duplicate/expired/invalid-signature/whatever alerts
// eventually get banned.
// This isn't a Misbehaving(100) (immediate ban) because the
// peer might be an older or different implementation with
// a different signature key, etc.
Misbehaving(pfrom->GetId(), 10);
}
}
}
else if (strCommand == NetMsgType::FILTERLOAD)
{
CBloomFilter filter;
vRecv >> filter;
if (!filter.IsWithinSizeConstraints())
{
// There is no excuse for sending a too-large filter
LOCK(cs_main);
Misbehaving(pfrom->GetId(), 100);
}
else
{
LOCK(pfrom->cs_filter);
delete pfrom->pfilter;
pfrom->pfilter = new CBloomFilter(filter);
pfrom->pfilter->UpdateEmptyFull();
}
pfrom->fRelayTxes = true;
}
else if (strCommand == NetMsgType::FILTERADD)
{
vector<unsigned char> vData;
vRecv >> vData;
// Nodes must NEVER send a data item > 520 bytes (the max size for a script data object,
// and thus, the maximum size any matched object can have) in a filteradd message
if (vData.size() > MAX_SCRIPT_ELEMENT_SIZE)
{
LOCK(cs_main);
Misbehaving(pfrom->GetId(), 100);
} else {
LOCK(pfrom->cs_filter);
if (pfrom->pfilter)
pfrom->pfilter->insert(vData);
else
{
LOCK(cs_main);
Misbehaving(pfrom->GetId(), 100);
}
}
}
else if (strCommand == NetMsgType::FILTERCLEAR)
{
LOCK(pfrom->cs_filter);
delete pfrom->pfilter;
pfrom->pfilter = new CBloomFilter();
pfrom->fRelayTxes = true;
}
else if (strCommand == NetMsgType::REJECT)
{
if (fDebug) {
try {
string strMsg; unsigned char ccode; string strReason;
vRecv >> LIMITED_STRING(strMsg, CMessageHeader::COMMAND_SIZE) >> ccode >> LIMITED_STRING(strReason, MAX_REJECT_MESSAGE_LENGTH);
ostringstream ss;
ss << strMsg << " code " << itostr(ccode) << ": " << strReason;
if (strMsg == NetMsgType::BLOCK || strMsg == NetMsgType::TX)
{
uint256 hash;
vRecv >> hash;
ss << ": hash " << hash.ToString();
}
LogPrint("net", "Reject %s\n", SanitizeString(ss.str()));
} catch (const std::ios_base::failure&) {
// Avoid feedback loops by preventing reject messages from triggering a new reject message.
LogPrint("net", "Unparseable reject message received\n");
}
}
}
else
{
bool found = false;
const std::vector<std::string> &allMessages = getAllNetMessageTypes();
BOOST_FOREACH(const std::string msg, allMessages) {
if(msg == strCommand) {
found = true;
break;
}
}
if (found)
{
//probably one the extensions
#ifdef ENABLE_WALLET
Eliminate remaining uses of g_connman in Dash-specific code. (#1635) This monstrous change eliminates all remaining uses of g_connman global variable in Dash-specific code. Unlike previous changes eliminating g_connman use that were isolated to particular modules, this one covers multiple modules simultaneously because they are so interdependent that change in one module was quickly spreading to others. This is mostly invariant change that was done by * changing all functions using g_connman to use connman argument, * changing all functions calling these functions to use connman argument, * repeating previous step until there's nothing to change. After multiple iterations, this process converged to final result, producing code that is mostly equivalent to original one, but passing CConnman instance through arguments instead of global variable. The only exception to equivalence of resulting code is that I had to create overload of CMasternodeMan::CheckAndRemove() method without arguments that does nothing just for use in CFlatDB<CMasternodeMan>::Dump() and CFlatDB<CMasternodeMan>::Load() methods. Normal CMasternodeMan::CheckAndRemove() overload now has argument of CConnman& type and is used everywhere else. The normal overload has this code in the beginning: if(!masternodeSync.IsMasternodeListSynced()) return; Masternode list is not synced yet when we load "mncache.dat" file, and we save "mncache.dat" file on shutdown, so I presume that it's OK to use overload that does nothing in both cases. Signed-off-by: Oleg Girko <ol@infoserver.lv>
2017-09-19 16:51:38 +02:00
privateSendClient.ProcessMessage(pfrom, strCommand, vRecv, connman);
#endif // ENABLE_WALLET
Eliminate remaining uses of g_connman in Dash-specific code. (#1635) This monstrous change eliminates all remaining uses of g_connman global variable in Dash-specific code. Unlike previous changes eliminating g_connman use that were isolated to particular modules, this one covers multiple modules simultaneously because they are so interdependent that change in one module was quickly spreading to others. This is mostly invariant change that was done by * changing all functions using g_connman to use connman argument, * changing all functions calling these functions to use connman argument, * repeating previous step until there's nothing to change. After multiple iterations, this process converged to final result, producing code that is mostly equivalent to original one, but passing CConnman instance through arguments instead of global variable. The only exception to equivalence of resulting code is that I had to create overload of CMasternodeMan::CheckAndRemove() method without arguments that does nothing just for use in CFlatDB<CMasternodeMan>::Dump() and CFlatDB<CMasternodeMan>::Load() methods. Normal CMasternodeMan::CheckAndRemove() overload now has argument of CConnman& type and is used everywhere else. The normal overload has this code in the beginning: if(!masternodeSync.IsMasternodeListSynced()) return; Masternode list is not synced yet when we load "mncache.dat" file, and we save "mncache.dat" file on shutdown, so I presume that it's OK to use overload that does nothing in both cases. Signed-off-by: Oleg Girko <ol@infoserver.lv>
2017-09-19 16:51:38 +02:00
privateSendServer.ProcessMessage(pfrom, strCommand, vRecv, connman);
mnodeman.ProcessMessage(pfrom, strCommand, vRecv, connman);
mnpayments.ProcessMessage(pfrom, strCommand, vRecv, connman);
instantsend.ProcessMessage(pfrom, strCommand, vRecv, connman);
sporkManager.ProcessSpork(pfrom, strCommand, vRecv, connman);
masternodeSync.ProcessMessage(pfrom, strCommand, vRecv);
Eliminate remaining uses of g_connman in Dash-specific code. (#1635) This monstrous change eliminates all remaining uses of g_connman global variable in Dash-specific code. Unlike previous changes eliminating g_connman use that were isolated to particular modules, this one covers multiple modules simultaneously because they are so interdependent that change in one module was quickly spreading to others. This is mostly invariant change that was done by * changing all functions using g_connman to use connman argument, * changing all functions calling these functions to use connman argument, * repeating previous step until there's nothing to change. After multiple iterations, this process converged to final result, producing code that is mostly equivalent to original one, but passing CConnman instance through arguments instead of global variable. The only exception to equivalence of resulting code is that I had to create overload of CMasternodeMan::CheckAndRemove() method without arguments that does nothing just for use in CFlatDB<CMasternodeMan>::Dump() and CFlatDB<CMasternodeMan>::Load() methods. Normal CMasternodeMan::CheckAndRemove() overload now has argument of CConnman& type and is used everywhere else. The normal overload has this code in the beginning: if(!masternodeSync.IsMasternodeListSynced()) return; Masternode list is not synced yet when we load "mncache.dat" file, and we save "mncache.dat" file on shutdown, so I presume that it's OK to use overload that does nothing in both cases. Signed-off-by: Oleg Girko <ol@infoserver.lv>
2017-09-19 16:51:38 +02:00
governance.ProcessMessage(pfrom, strCommand, vRecv, connman);
}
else
{
// Ignore unknown commands for extensibility
LogPrint("net", "Unknown command \"%s\" from peer=%d\n", SanitizeString(strCommand), pfrom->id);
}
}
return true;
}
bool ProcessMessages(CNode* pfrom, CConnman& connman, std::atomic<bool>& interruptMsgProc)
{
const CChainParams& chainparams = Params();
//
// Message format
// (4) message start
// (12) command
// (4) size
// (4) checksum
// (x) data
//
Backport Bitcoin PR#9441: Net: Massive speedup. Net locks overhaul (#1586) * net: fix typo causing the wrong receive buffer size Surprisingly this hasn't been causing me any issues while testing, probably because it requires lots of large blocks to be flying around. Send/Recv corks need tests! * net: make vRecvMsg a list so that we can use splice() * net: make GetReceiveFloodSize public This will be needed so that the message processor can cork incoming messages * net: only disconnect if fDisconnect has been set These conditions are problematic to check without locking, and we shouldn't be relying on the refcount to disconnect. * net: wait until the node is destroyed to delete its recv buffer when vRecvMsg becomes a private buffer, it won't make sense to allow other threads to mess with it anymore. * net: set message deserialization version when it's actually time to deserialize We'll soon no longer have access to vRecvMsg, and this is more intuitive anyway. * net: handle message accounting in ReceiveMsgBytes This allows locking to be pushed down to only where it's needed Also reuse the current time rather than checking multiple times. * net: record bytes written before notifying the message processor * net: Add a simple function for waking the message handler This may be used publicly in the future * net: remove useless comments * net: remove redundant max sendbuffer size check This is left-over from before there was proper accounting. Hitting 2x the sendbuffer size should not be possible. * net: rework the way that the messagehandler sleeps In order to sleep accurately, the message handler needs to know if _any_ node has more processing that it should do before the entire thread sleeps. Rather than returning a value that represents whether ProcessMessages encountered a message that should trigger a disconnnect, interpret the return value as whether or not that node has more work to do. Also, use a global fProcessWake value that can be set by other threads, which takes precedence (for one cycle) over the messagehandler's decision. Note that the previous behavior was to only process one message per loop (except in the case of a bad checksum or invalid header). That was changed in PR #3180. The only change here in that regard is that the current node now falls to the back of the processing queue for the bad checksum/invalid header cases. * net: add a new message queue for the message processor This separates the storage of messages from the net and queued messages for processing, allowing the locks to be split. * net: add a flag to indicate when a node's process queue is full Messages are dumped very quickly from the socket handler to the processor, so it's the depth of the processing queue that's interesting. The socket handler checks the process queue's size during the brief message hand-off and pauses if necessary, and the processor possibly unpauses each time a message is popped off of its queue. * net: add a flag to indicate when a node's send buffer is full Similar to the recv flag, but this one indicates whether or not the net's send buffer is full. The socket handler checks the send queue when a new message is added and pauses if necessary, and possibly unpauses after each message is drained from its buffer. * net: remove cs_vRecvMsg vRecvMsg is now only touched by the socket handler thread. The accounting vars (nRecvBytes/nLastRecv/mapRecvBytesPerMsgCmd) are also only used by the socket handler thread, with the exception of queries from rpc/gui. These accesses are not threadsafe, but they never were. This needs to be addressed separately. Also, update comment describing data flow
2017-08-23 16:20:43 +02:00
bool fMoreWork = false;
if (!pfrom->vRecvGetData.empty())
ProcessGetData(pfrom, chainparams.GetConsensus(), connman, interruptMsgProc);
Backport Bitcoin PR#9441: Net: Massive speedup. Net locks overhaul (#1586) * net: fix typo causing the wrong receive buffer size Surprisingly this hasn't been causing me any issues while testing, probably because it requires lots of large blocks to be flying around. Send/Recv corks need tests! * net: make vRecvMsg a list so that we can use splice() * net: make GetReceiveFloodSize public This will be needed so that the message processor can cork incoming messages * net: only disconnect if fDisconnect has been set These conditions are problematic to check without locking, and we shouldn't be relying on the refcount to disconnect. * net: wait until the node is destroyed to delete its recv buffer when vRecvMsg becomes a private buffer, it won't make sense to allow other threads to mess with it anymore. * net: set message deserialization version when it's actually time to deserialize We'll soon no longer have access to vRecvMsg, and this is more intuitive anyway. * net: handle message accounting in ReceiveMsgBytes This allows locking to be pushed down to only where it's needed Also reuse the current time rather than checking multiple times. * net: record bytes written before notifying the message processor * net: Add a simple function for waking the message handler This may be used publicly in the future * net: remove useless comments * net: remove redundant max sendbuffer size check This is left-over from before there was proper accounting. Hitting 2x the sendbuffer size should not be possible. * net: rework the way that the messagehandler sleeps In order to sleep accurately, the message handler needs to know if _any_ node has more processing that it should do before the entire thread sleeps. Rather than returning a value that represents whether ProcessMessages encountered a message that should trigger a disconnnect, interpret the return value as whether or not that node has more work to do. Also, use a global fProcessWake value that can be set by other threads, which takes precedence (for one cycle) over the messagehandler's decision. Note that the previous behavior was to only process one message per loop (except in the case of a bad checksum or invalid header). That was changed in PR #3180. The only change here in that regard is that the current node now falls to the back of the processing queue for the bad checksum/invalid header cases. * net: add a new message queue for the message processor This separates the storage of messages from the net and queued messages for processing, allowing the locks to be split. * net: add a flag to indicate when a node's process queue is full Messages are dumped very quickly from the socket handler to the processor, so it's the depth of the processing queue that's interesting. The socket handler checks the process queue's size during the brief message hand-off and pauses if necessary, and the processor possibly unpauses each time a message is popped off of its queue. * net: add a flag to indicate when a node's send buffer is full Similar to the recv flag, but this one indicates whether or not the net's send buffer is full. The socket handler checks the send queue when a new message is added and pauses if necessary, and possibly unpauses after each message is drained from its buffer. * net: remove cs_vRecvMsg vRecvMsg is now only touched by the socket handler thread. The accounting vars (nRecvBytes/nLastRecv/mapRecvBytesPerMsgCmd) are also only used by the socket handler thread, with the exception of queries from rpc/gui. These accesses are not threadsafe, but they never were. This needs to be addressed separately. Also, update comment describing data flow
2017-08-23 16:20:43 +02:00
if (pfrom->fDisconnect)
return false;
// this maintains the order of responses
Backport Bitcoin PR#9441: Net: Massive speedup. Net locks overhaul (#1586) * net: fix typo causing the wrong receive buffer size Surprisingly this hasn't been causing me any issues while testing, probably because it requires lots of large blocks to be flying around. Send/Recv corks need tests! * net: make vRecvMsg a list so that we can use splice() * net: make GetReceiveFloodSize public This will be needed so that the message processor can cork incoming messages * net: only disconnect if fDisconnect has been set These conditions are problematic to check without locking, and we shouldn't be relying on the refcount to disconnect. * net: wait until the node is destroyed to delete its recv buffer when vRecvMsg becomes a private buffer, it won't make sense to allow other threads to mess with it anymore. * net: set message deserialization version when it's actually time to deserialize We'll soon no longer have access to vRecvMsg, and this is more intuitive anyway. * net: handle message accounting in ReceiveMsgBytes This allows locking to be pushed down to only where it's needed Also reuse the current time rather than checking multiple times. * net: record bytes written before notifying the message processor * net: Add a simple function for waking the message handler This may be used publicly in the future * net: remove useless comments * net: remove redundant max sendbuffer size check This is left-over from before there was proper accounting. Hitting 2x the sendbuffer size should not be possible. * net: rework the way that the messagehandler sleeps In order to sleep accurately, the message handler needs to know if _any_ node has more processing that it should do before the entire thread sleeps. Rather than returning a value that represents whether ProcessMessages encountered a message that should trigger a disconnnect, interpret the return value as whether or not that node has more work to do. Also, use a global fProcessWake value that can be set by other threads, which takes precedence (for one cycle) over the messagehandler's decision. Note that the previous behavior was to only process one message per loop (except in the case of a bad checksum or invalid header). That was changed in PR #3180. The only change here in that regard is that the current node now falls to the back of the processing queue for the bad checksum/invalid header cases. * net: add a new message queue for the message processor This separates the storage of messages from the net and queued messages for processing, allowing the locks to be split. * net: add a flag to indicate when a node's process queue is full Messages are dumped very quickly from the socket handler to the processor, so it's the depth of the processing queue that's interesting. The socket handler checks the process queue's size during the brief message hand-off and pauses if necessary, and the processor possibly unpauses each time a message is popped off of its queue. * net: add a flag to indicate when a node's send buffer is full Similar to the recv flag, but this one indicates whether or not the net's send buffer is full. The socket handler checks the send queue when a new message is added and pauses if necessary, and possibly unpauses after each message is drained from its buffer. * net: remove cs_vRecvMsg vRecvMsg is now only touched by the socket handler thread. The accounting vars (nRecvBytes/nLastRecv/mapRecvBytesPerMsgCmd) are also only used by the socket handler thread, with the exception of queries from rpc/gui. These accesses are not threadsafe, but they never were. This needs to be addressed separately. Also, update comment describing data flow
2017-08-23 16:20:43 +02:00
if (!pfrom->vRecvGetData.empty()) return true;
// Don't bother if send buffer is too full to respond anyway
Backport Bitcoin PR#9441: Net: Massive speedup. Net locks overhaul (#1586) * net: fix typo causing the wrong receive buffer size Surprisingly this hasn't been causing me any issues while testing, probably because it requires lots of large blocks to be flying around. Send/Recv corks need tests! * net: make vRecvMsg a list so that we can use splice() * net: make GetReceiveFloodSize public This will be needed so that the message processor can cork incoming messages * net: only disconnect if fDisconnect has been set These conditions are problematic to check without locking, and we shouldn't be relying on the refcount to disconnect. * net: wait until the node is destroyed to delete its recv buffer when vRecvMsg becomes a private buffer, it won't make sense to allow other threads to mess with it anymore. * net: set message deserialization version when it's actually time to deserialize We'll soon no longer have access to vRecvMsg, and this is more intuitive anyway. * net: handle message accounting in ReceiveMsgBytes This allows locking to be pushed down to only where it's needed Also reuse the current time rather than checking multiple times. * net: record bytes written before notifying the message processor * net: Add a simple function for waking the message handler This may be used publicly in the future * net: remove useless comments * net: remove redundant max sendbuffer size check This is left-over from before there was proper accounting. Hitting 2x the sendbuffer size should not be possible. * net: rework the way that the messagehandler sleeps In order to sleep accurately, the message handler needs to know if _any_ node has more processing that it should do before the entire thread sleeps. Rather than returning a value that represents whether ProcessMessages encountered a message that should trigger a disconnnect, interpret the return value as whether or not that node has more work to do. Also, use a global fProcessWake value that can be set by other threads, which takes precedence (for one cycle) over the messagehandler's decision. Note that the previous behavior was to only process one message per loop (except in the case of a bad checksum or invalid header). That was changed in PR #3180. The only change here in that regard is that the current node now falls to the back of the processing queue for the bad checksum/invalid header cases. * net: add a new message queue for the message processor This separates the storage of messages from the net and queued messages for processing, allowing the locks to be split. * net: add a flag to indicate when a node's process queue is full Messages are dumped very quickly from the socket handler to the processor, so it's the depth of the processing queue that's interesting. The socket handler checks the process queue's size during the brief message hand-off and pauses if necessary, and the processor possibly unpauses each time a message is popped off of its queue. * net: add a flag to indicate when a node's send buffer is full Similar to the recv flag, but this one indicates whether or not the net's send buffer is full. The socket handler checks the send queue when a new message is added and pauses if necessary, and possibly unpauses after each message is drained from its buffer. * net: remove cs_vRecvMsg vRecvMsg is now only touched by the socket handler thread. The accounting vars (nRecvBytes/nLastRecv/mapRecvBytesPerMsgCmd) are also only used by the socket handler thread, with the exception of queries from rpc/gui. These accesses are not threadsafe, but they never were. This needs to be addressed separately. Also, update comment describing data flow
2017-08-23 16:20:43 +02:00
if (pfrom->fPauseSend)
return false;
Backport Bitcoin PR#9441: Net: Massive speedup. Net locks overhaul (#1586) * net: fix typo causing the wrong receive buffer size Surprisingly this hasn't been causing me any issues while testing, probably because it requires lots of large blocks to be flying around. Send/Recv corks need tests! * net: make vRecvMsg a list so that we can use splice() * net: make GetReceiveFloodSize public This will be needed so that the message processor can cork incoming messages * net: only disconnect if fDisconnect has been set These conditions are problematic to check without locking, and we shouldn't be relying on the refcount to disconnect. * net: wait until the node is destroyed to delete its recv buffer when vRecvMsg becomes a private buffer, it won't make sense to allow other threads to mess with it anymore. * net: set message deserialization version when it's actually time to deserialize We'll soon no longer have access to vRecvMsg, and this is more intuitive anyway. * net: handle message accounting in ReceiveMsgBytes This allows locking to be pushed down to only where it's needed Also reuse the current time rather than checking multiple times. * net: record bytes written before notifying the message processor * net: Add a simple function for waking the message handler This may be used publicly in the future * net: remove useless comments * net: remove redundant max sendbuffer size check This is left-over from before there was proper accounting. Hitting 2x the sendbuffer size should not be possible. * net: rework the way that the messagehandler sleeps In order to sleep accurately, the message handler needs to know if _any_ node has more processing that it should do before the entire thread sleeps. Rather than returning a value that represents whether ProcessMessages encountered a message that should trigger a disconnnect, interpret the return value as whether or not that node has more work to do. Also, use a global fProcessWake value that can be set by other threads, which takes precedence (for one cycle) over the messagehandler's decision. Note that the previous behavior was to only process one message per loop (except in the case of a bad checksum or invalid header). That was changed in PR #3180. The only change here in that regard is that the current node now falls to the back of the processing queue for the bad checksum/invalid header cases. * net: add a new message queue for the message processor This separates the storage of messages from the net and queued messages for processing, allowing the locks to be split. * net: add a flag to indicate when a node's process queue is full Messages are dumped very quickly from the socket handler to the processor, so it's the depth of the processing queue that's interesting. The socket handler checks the process queue's size during the brief message hand-off and pauses if necessary, and the processor possibly unpauses each time a message is popped off of its queue. * net: add a flag to indicate when a node's send buffer is full Similar to the recv flag, but this one indicates whether or not the net's send buffer is full. The socket handler checks the send queue when a new message is added and pauses if necessary, and possibly unpauses after each message is drained from its buffer. * net: remove cs_vRecvMsg vRecvMsg is now only touched by the socket handler thread. The accounting vars (nRecvBytes/nLastRecv/mapRecvBytesPerMsgCmd) are also only used by the socket handler thread, with the exception of queries from rpc/gui. These accesses are not threadsafe, but they never were. This needs to be addressed separately. Also, update comment describing data flow
2017-08-23 16:20:43 +02:00
std::list<CNetMessage> msgs;
{
LOCK(pfrom->cs_vProcessMsg);
if (pfrom->vProcessMsg.empty())
return false;
// Just take one message
msgs.splice(msgs.begin(), pfrom->vProcessMsg, pfrom->vProcessMsg.begin());
pfrom->nProcessQueueSize -= msgs.front().vRecv.size() + CMessageHeader::HEADER_SIZE;
pfrom->fPauseRecv = pfrom->nProcessQueueSize > connman.GetReceiveFloodSize();
fMoreWork = !pfrom->vProcessMsg.empty();
}
CNetMessage& msg(msgs.front());
Backport Bitcoin PR#9441: Net: Massive speedup. Net locks overhaul (#1586) * net: fix typo causing the wrong receive buffer size Surprisingly this hasn't been causing me any issues while testing, probably because it requires lots of large blocks to be flying around. Send/Recv corks need tests! * net: make vRecvMsg a list so that we can use splice() * net: make GetReceiveFloodSize public This will be needed so that the message processor can cork incoming messages * net: only disconnect if fDisconnect has been set These conditions are problematic to check without locking, and we shouldn't be relying on the refcount to disconnect. * net: wait until the node is destroyed to delete its recv buffer when vRecvMsg becomes a private buffer, it won't make sense to allow other threads to mess with it anymore. * net: set message deserialization version when it's actually time to deserialize We'll soon no longer have access to vRecvMsg, and this is more intuitive anyway. * net: handle message accounting in ReceiveMsgBytes This allows locking to be pushed down to only where it's needed Also reuse the current time rather than checking multiple times. * net: record bytes written before notifying the message processor * net: Add a simple function for waking the message handler This may be used publicly in the future * net: remove useless comments * net: remove redundant max sendbuffer size check This is left-over from before there was proper accounting. Hitting 2x the sendbuffer size should not be possible. * net: rework the way that the messagehandler sleeps In order to sleep accurately, the message handler needs to know if _any_ node has more processing that it should do before the entire thread sleeps. Rather than returning a value that represents whether ProcessMessages encountered a message that should trigger a disconnnect, interpret the return value as whether or not that node has more work to do. Also, use a global fProcessWake value that can be set by other threads, which takes precedence (for one cycle) over the messagehandler's decision. Note that the previous behavior was to only process one message per loop (except in the case of a bad checksum or invalid header). That was changed in PR #3180. The only change here in that regard is that the current node now falls to the back of the processing queue for the bad checksum/invalid header cases. * net: add a new message queue for the message processor This separates the storage of messages from the net and queued messages for processing, allowing the locks to be split. * net: add a flag to indicate when a node's process queue is full Messages are dumped very quickly from the socket handler to the processor, so it's the depth of the processing queue that's interesting. The socket handler checks the process queue's size during the brief message hand-off and pauses if necessary, and the processor possibly unpauses each time a message is popped off of its queue. * net: add a flag to indicate when a node's send buffer is full Similar to the recv flag, but this one indicates whether or not the net's send buffer is full. The socket handler checks the send queue when a new message is added and pauses if necessary, and possibly unpauses after each message is drained from its buffer. * net: remove cs_vRecvMsg vRecvMsg is now only touched by the socket handler thread. The accounting vars (nRecvBytes/nLastRecv/mapRecvBytesPerMsgCmd) are also only used by the socket handler thread, with the exception of queries from rpc/gui. These accesses are not threadsafe, but they never were. This needs to be addressed separately. Also, update comment describing data flow
2017-08-23 16:20:43 +02:00
msg.SetVersion(pfrom->GetRecvVersion());
// Scan for message start
if (memcmp(msg.hdr.pchMessageStart, chainparams.MessageStart(), MESSAGE_START_SIZE) != 0) {
LogPrintf("PROCESSMESSAGE: INVALID MESSAGESTART %s peer=%d\n", SanitizeString(msg.hdr.GetCommand()), pfrom->id);
Backport Bitcoin PR#9441: Net: Massive speedup. Net locks overhaul (#1586) * net: fix typo causing the wrong receive buffer size Surprisingly this hasn't been causing me any issues while testing, probably because it requires lots of large blocks to be flying around. Send/Recv corks need tests! * net: make vRecvMsg a list so that we can use splice() * net: make GetReceiveFloodSize public This will be needed so that the message processor can cork incoming messages * net: only disconnect if fDisconnect has been set These conditions are problematic to check without locking, and we shouldn't be relying on the refcount to disconnect. * net: wait until the node is destroyed to delete its recv buffer when vRecvMsg becomes a private buffer, it won't make sense to allow other threads to mess with it anymore. * net: set message deserialization version when it's actually time to deserialize We'll soon no longer have access to vRecvMsg, and this is more intuitive anyway. * net: handle message accounting in ReceiveMsgBytes This allows locking to be pushed down to only where it's needed Also reuse the current time rather than checking multiple times. * net: record bytes written before notifying the message processor * net: Add a simple function for waking the message handler This may be used publicly in the future * net: remove useless comments * net: remove redundant max sendbuffer size check This is left-over from before there was proper accounting. Hitting 2x the sendbuffer size should not be possible. * net: rework the way that the messagehandler sleeps In order to sleep accurately, the message handler needs to know if _any_ node has more processing that it should do before the entire thread sleeps. Rather than returning a value that represents whether ProcessMessages encountered a message that should trigger a disconnnect, interpret the return value as whether or not that node has more work to do. Also, use a global fProcessWake value that can be set by other threads, which takes precedence (for one cycle) over the messagehandler's decision. Note that the previous behavior was to only process one message per loop (except in the case of a bad checksum or invalid header). That was changed in PR #3180. The only change here in that regard is that the current node now falls to the back of the processing queue for the bad checksum/invalid header cases. * net: add a new message queue for the message processor This separates the storage of messages from the net and queued messages for processing, allowing the locks to be split. * net: add a flag to indicate when a node's process queue is full Messages are dumped very quickly from the socket handler to the processor, so it's the depth of the processing queue that's interesting. The socket handler checks the process queue's size during the brief message hand-off and pauses if necessary, and the processor possibly unpauses each time a message is popped off of its queue. * net: add a flag to indicate when a node's send buffer is full Similar to the recv flag, but this one indicates whether or not the net's send buffer is full. The socket handler checks the send queue when a new message is added and pauses if necessary, and possibly unpauses after each message is drained from its buffer. * net: remove cs_vRecvMsg vRecvMsg is now only touched by the socket handler thread. The accounting vars (nRecvBytes/nLastRecv/mapRecvBytesPerMsgCmd) are also only used by the socket handler thread, with the exception of queries from rpc/gui. These accesses are not threadsafe, but they never were. This needs to be addressed separately. Also, update comment describing data flow
2017-08-23 16:20:43 +02:00
pfrom->fDisconnect = true;
return false;
}
// Read header
CMessageHeader& hdr = msg.hdr;
if (!hdr.IsValid(chainparams.MessageStart()))
{
LogPrintf("PROCESSMESSAGE: ERRORS IN HEADER %s peer=%d\n", SanitizeString(hdr.GetCommand()), pfrom->id);
Backport Bitcoin PR#9441: Net: Massive speedup. Net locks overhaul (#1586) * net: fix typo causing the wrong receive buffer size Surprisingly this hasn't been causing me any issues while testing, probably because it requires lots of large blocks to be flying around. Send/Recv corks need tests! * net: make vRecvMsg a list so that we can use splice() * net: make GetReceiveFloodSize public This will be needed so that the message processor can cork incoming messages * net: only disconnect if fDisconnect has been set These conditions are problematic to check without locking, and we shouldn't be relying on the refcount to disconnect. * net: wait until the node is destroyed to delete its recv buffer when vRecvMsg becomes a private buffer, it won't make sense to allow other threads to mess with it anymore. * net: set message deserialization version when it's actually time to deserialize We'll soon no longer have access to vRecvMsg, and this is more intuitive anyway. * net: handle message accounting in ReceiveMsgBytes This allows locking to be pushed down to only where it's needed Also reuse the current time rather than checking multiple times. * net: record bytes written before notifying the message processor * net: Add a simple function for waking the message handler This may be used publicly in the future * net: remove useless comments * net: remove redundant max sendbuffer size check This is left-over from before there was proper accounting. Hitting 2x the sendbuffer size should not be possible. * net: rework the way that the messagehandler sleeps In order to sleep accurately, the message handler needs to know if _any_ node has more processing that it should do before the entire thread sleeps. Rather than returning a value that represents whether ProcessMessages encountered a message that should trigger a disconnnect, interpret the return value as whether or not that node has more work to do. Also, use a global fProcessWake value that can be set by other threads, which takes precedence (for one cycle) over the messagehandler's decision. Note that the previous behavior was to only process one message per loop (except in the case of a bad checksum or invalid header). That was changed in PR #3180. The only change here in that regard is that the current node now falls to the back of the processing queue for the bad checksum/invalid header cases. * net: add a new message queue for the message processor This separates the storage of messages from the net and queued messages for processing, allowing the locks to be split. * net: add a flag to indicate when a node's process queue is full Messages are dumped very quickly from the socket handler to the processor, so it's the depth of the processing queue that's interesting. The socket handler checks the process queue's size during the brief message hand-off and pauses if necessary, and the processor possibly unpauses each time a message is popped off of its queue. * net: add a flag to indicate when a node's send buffer is full Similar to the recv flag, but this one indicates whether or not the net's send buffer is full. The socket handler checks the send queue when a new message is added and pauses if necessary, and possibly unpauses after each message is drained from its buffer. * net: remove cs_vRecvMsg vRecvMsg is now only touched by the socket handler thread. The accounting vars (nRecvBytes/nLastRecv/mapRecvBytesPerMsgCmd) are also only used by the socket handler thread, with the exception of queries from rpc/gui. These accesses are not threadsafe, but they never were. This needs to be addressed separately. Also, update comment describing data flow
2017-08-23 16:20:43 +02:00
return fMoreWork;
}
string strCommand = hdr.GetCommand();
// Message size
unsigned int nMessageSize = hdr.nMessageSize;
// Checksum
CDataStream& vRecv = msg.vRecv;
uint256 hash = Hash(vRecv.begin(), vRecv.begin() + nMessageSize);
if (memcmp(hash.begin(), hdr.pchChecksum, CMessageHeader::CHECKSUM_SIZE) != 0)
{
LogPrintf("%s(%s, %u bytes): CHECKSUM ERROR expected %s was %s\n", __func__,
SanitizeString(strCommand), nMessageSize,
HexStr(hash.begin(), hash.begin()+CMessageHeader::CHECKSUM_SIZE),
HexStr(hdr.pchChecksum, hdr.pchChecksum+CMessageHeader::CHECKSUM_SIZE));
Backport Bitcoin PR#9441: Net: Massive speedup. Net locks overhaul (#1586) * net: fix typo causing the wrong receive buffer size Surprisingly this hasn't been causing me any issues while testing, probably because it requires lots of large blocks to be flying around. Send/Recv corks need tests! * net: make vRecvMsg a list so that we can use splice() * net: make GetReceiveFloodSize public This will be needed so that the message processor can cork incoming messages * net: only disconnect if fDisconnect has been set These conditions are problematic to check without locking, and we shouldn't be relying on the refcount to disconnect. * net: wait until the node is destroyed to delete its recv buffer when vRecvMsg becomes a private buffer, it won't make sense to allow other threads to mess with it anymore. * net: set message deserialization version when it's actually time to deserialize We'll soon no longer have access to vRecvMsg, and this is more intuitive anyway. * net: handle message accounting in ReceiveMsgBytes This allows locking to be pushed down to only where it's needed Also reuse the current time rather than checking multiple times. * net: record bytes written before notifying the message processor * net: Add a simple function for waking the message handler This may be used publicly in the future * net: remove useless comments * net: remove redundant max sendbuffer size check This is left-over from before there was proper accounting. Hitting 2x the sendbuffer size should not be possible. * net: rework the way that the messagehandler sleeps In order to sleep accurately, the message handler needs to know if _any_ node has more processing that it should do before the entire thread sleeps. Rather than returning a value that represents whether ProcessMessages encountered a message that should trigger a disconnnect, interpret the return value as whether or not that node has more work to do. Also, use a global fProcessWake value that can be set by other threads, which takes precedence (for one cycle) over the messagehandler's decision. Note that the previous behavior was to only process one message per loop (except in the case of a bad checksum or invalid header). That was changed in PR #3180. The only change here in that regard is that the current node now falls to the back of the processing queue for the bad checksum/invalid header cases. * net: add a new message queue for the message processor This separates the storage of messages from the net and queued messages for processing, allowing the locks to be split. * net: add a flag to indicate when a node's process queue is full Messages are dumped very quickly from the socket handler to the processor, so it's the depth of the processing queue that's interesting. The socket handler checks the process queue's size during the brief message hand-off and pauses if necessary, and the processor possibly unpauses each time a message is popped off of its queue. * net: add a flag to indicate when a node's send buffer is full Similar to the recv flag, but this one indicates whether or not the net's send buffer is full. The socket handler checks the send queue when a new message is added and pauses if necessary, and possibly unpauses after each message is drained from its buffer. * net: remove cs_vRecvMsg vRecvMsg is now only touched by the socket handler thread. The accounting vars (nRecvBytes/nLastRecv/mapRecvBytesPerMsgCmd) are also only used by the socket handler thread, with the exception of queries from rpc/gui. These accesses are not threadsafe, but they never were. This needs to be addressed separately. Also, update comment describing data flow
2017-08-23 16:20:43 +02:00
return fMoreWork;
}
// Process message
bool fRet = false;
try
{
fRet = ProcessMessage(pfrom, strCommand, vRecv, msg.nTime, connman, interruptMsgProc);
if (interruptMsgProc)
Backport Bitcoin PR#9441: Net: Massive speedup. Net locks overhaul (#1586) * net: fix typo causing the wrong receive buffer size Surprisingly this hasn't been causing me any issues while testing, probably because it requires lots of large blocks to be flying around. Send/Recv corks need tests! * net: make vRecvMsg a list so that we can use splice() * net: make GetReceiveFloodSize public This will be needed so that the message processor can cork incoming messages * net: only disconnect if fDisconnect has been set These conditions are problematic to check without locking, and we shouldn't be relying on the refcount to disconnect. * net: wait until the node is destroyed to delete its recv buffer when vRecvMsg becomes a private buffer, it won't make sense to allow other threads to mess with it anymore. * net: set message deserialization version when it's actually time to deserialize We'll soon no longer have access to vRecvMsg, and this is more intuitive anyway. * net: handle message accounting in ReceiveMsgBytes This allows locking to be pushed down to only where it's needed Also reuse the current time rather than checking multiple times. * net: record bytes written before notifying the message processor * net: Add a simple function for waking the message handler This may be used publicly in the future * net: remove useless comments * net: remove redundant max sendbuffer size check This is left-over from before there was proper accounting. Hitting 2x the sendbuffer size should not be possible. * net: rework the way that the messagehandler sleeps In order to sleep accurately, the message handler needs to know if _any_ node has more processing that it should do before the entire thread sleeps. Rather than returning a value that represents whether ProcessMessages encountered a message that should trigger a disconnnect, interpret the return value as whether or not that node has more work to do. Also, use a global fProcessWake value that can be set by other threads, which takes precedence (for one cycle) over the messagehandler's decision. Note that the previous behavior was to only process one message per loop (except in the case of a bad checksum or invalid header). That was changed in PR #3180. The only change here in that regard is that the current node now falls to the back of the processing queue for the bad checksum/invalid header cases. * net: add a new message queue for the message processor This separates the storage of messages from the net and queued messages for processing, allowing the locks to be split. * net: add a flag to indicate when a node's process queue is full Messages are dumped very quickly from the socket handler to the processor, so it's the depth of the processing queue that's interesting. The socket handler checks the process queue's size during the brief message hand-off and pauses if necessary, and the processor possibly unpauses each time a message is popped off of its queue. * net: add a flag to indicate when a node's send buffer is full Similar to the recv flag, but this one indicates whether or not the net's send buffer is full. The socket handler checks the send queue when a new message is added and pauses if necessary, and possibly unpauses after each message is drained from its buffer. * net: remove cs_vRecvMsg vRecvMsg is now only touched by the socket handler thread. The accounting vars (nRecvBytes/nLastRecv/mapRecvBytesPerMsgCmd) are also only used by the socket handler thread, with the exception of queries from rpc/gui. These accesses are not threadsafe, but they never were. This needs to be addressed separately. Also, update comment describing data flow
2017-08-23 16:20:43 +02:00
return false;
if (!pfrom->vRecvGetData.empty())
fMoreWork = true;
}
catch (const std::ios_base::failure& e)
{
connman.PushMessageWithVersion(pfrom, INIT_PROTO_VERSION, NetMsgType::REJECT, strCommand, REJECT_MALFORMED, string("error parsing message"));
if (strstr(e.what(), "end of data"))
{
// Allow exceptions from under-length message on vRecv
LogPrintf("%s(%s, %u bytes): Exception '%s' caught, normally caused by a message being shorter than its stated length\n", __func__, SanitizeString(strCommand), nMessageSize, e.what());
}
else if (strstr(e.what(), "size too large"))
{
// Allow exceptions from over-long size
LogPrintf("%s(%s, %u bytes): Exception '%s' caught\n", __func__, SanitizeString(strCommand), nMessageSize, e.what());
}
else
{
PrintExceptionContinue(&e, "ProcessMessages()");
}
}
catch (const std::exception& e) {
PrintExceptionContinue(&e, "ProcessMessages()");
} catch (...) {
PrintExceptionContinue(NULL, "ProcessMessages()");
}
if (!fRet)
LogPrintf("%s(%s, %u bytes) FAILED peer=%d\n", __func__, SanitizeString(strCommand), nMessageSize, pfrom->id);
Backport Bitcoin PR#9441: Net: Massive speedup. Net locks overhaul (#1586) * net: fix typo causing the wrong receive buffer size Surprisingly this hasn't been causing me any issues while testing, probably because it requires lots of large blocks to be flying around. Send/Recv corks need tests! * net: make vRecvMsg a list so that we can use splice() * net: make GetReceiveFloodSize public This will be needed so that the message processor can cork incoming messages * net: only disconnect if fDisconnect has been set These conditions are problematic to check without locking, and we shouldn't be relying on the refcount to disconnect. * net: wait until the node is destroyed to delete its recv buffer when vRecvMsg becomes a private buffer, it won't make sense to allow other threads to mess with it anymore. * net: set message deserialization version when it's actually time to deserialize We'll soon no longer have access to vRecvMsg, and this is more intuitive anyway. * net: handle message accounting in ReceiveMsgBytes This allows locking to be pushed down to only where it's needed Also reuse the current time rather than checking multiple times. * net: record bytes written before notifying the message processor * net: Add a simple function for waking the message handler This may be used publicly in the future * net: remove useless comments * net: remove redundant max sendbuffer size check This is left-over from before there was proper accounting. Hitting 2x the sendbuffer size should not be possible. * net: rework the way that the messagehandler sleeps In order to sleep accurately, the message handler needs to know if _any_ node has more processing that it should do before the entire thread sleeps. Rather than returning a value that represents whether ProcessMessages encountered a message that should trigger a disconnnect, interpret the return value as whether or not that node has more work to do. Also, use a global fProcessWake value that can be set by other threads, which takes precedence (for one cycle) over the messagehandler's decision. Note that the previous behavior was to only process one message per loop (except in the case of a bad checksum or invalid header). That was changed in PR #3180. The only change here in that regard is that the current node now falls to the back of the processing queue for the bad checksum/invalid header cases. * net: add a new message queue for the message processor This separates the storage of messages from the net and queued messages for processing, allowing the locks to be split. * net: add a flag to indicate when a node's process queue is full Messages are dumped very quickly from the socket handler to the processor, so it's the depth of the processing queue that's interesting. The socket handler checks the process queue's size during the brief message hand-off and pauses if necessary, and the processor possibly unpauses each time a message is popped off of its queue. * net: add a flag to indicate when a node's send buffer is full Similar to the recv flag, but this one indicates whether or not the net's send buffer is full. The socket handler checks the send queue when a new message is added and pauses if necessary, and possibly unpauses after each message is drained from its buffer. * net: remove cs_vRecvMsg vRecvMsg is now only touched by the socket handler thread. The accounting vars (nRecvBytes/nLastRecv/mapRecvBytesPerMsgCmd) are also only used by the socket handler thread, with the exception of queries from rpc/gui. These accesses are not threadsafe, but they never were. This needs to be addressed separately. Also, update comment describing data flow
2017-08-23 16:20:43 +02:00
return fMoreWork;
}
bool SendMessages(CNode* pto, CConnman& connman, std::atomic<bool>& interruptMsgProc)
{
Fix duplicate headers download in initial sync (#1589) * Fix duplicate headers download in initial sync Now that initial block download is delayed until the headers sync is done, it was noticed that the initial headers sync may happen multiple times in parallel in the case new blocks are announced. This happens because for every block in INV that is received, a getheaders message is immediately sent out resulting in a full download of the headers chain starting from the point of where the initial headers sync is currently at. This happens once for each peer that announces the new block. This slows down the initial headers sync and increases the chance of another block being announced before it is finished, probably leading to the same behavior as already described, slowing down the sync even more...and so on. This commit delays sending of GETHEADERS to later in case the chain is too far behind while a new block gets announced. Header chains will still be downloaded multiple times, but the downloading will start much closer to the tip of the chain, so the damage is not that bad anymore. This ensures that we get all headers from all peers, even if any of them is on another chain. This should avoid what happened in https://github.com/bitcoin/bitcoin/pull/8054 which needed to be reverted later. This fixes the Bitcoin issue https://github.com/bitcoin/bitcoin/issues/6755 * Introduce DelayGetHeadersTime chain param and fix tests The delaying of GETHEADERS in combination with very old block times in test cases resulted in the delaying being triggered when the first newly mined block arrives. This results in a completely stalled sync. This is fixed by avoiding delaying in when running tests. * Disconnect peers which are not catched up Peers which stop sending us headers too early are very likely peers which did not catch up before and stalled for some reason. We should disconnect these peers and chose another one to continue.
2017-09-04 08:10:52 +02:00
const CChainParams chainParams = Params();
const Consensus::Params& consensusParams = chainParams.GetConsensus();
{
Backport Bitcoin PR#9609: net: fix remaining net assertions (#1575) + Dashify * Dont deserialize nVersion into CNode, should fix #9212 * net: deserialize the entire version message locally This avoids having some vars set if the version negotiation fails. Also copy it all into CNode at the same site. nVersion and fSuccessfullyConnected are set last, as they are the gates for the other vars. Make them atomic for that reason. * net: don't run callbacks on nodes that haven't completed the version handshake Since ForEach* are can be used to send messages to all nodes, the caller may end up sending a message before the version handshake is complete. To limit this, filter out these nodes. While we're at it, may as well filter out disconnected nodes as well. Delete unused methods rather than updating them. * net: Disallow sending messages until the version handshake is complete This is a change in behavior, though it's much more sane now than before. * net: log an error rather than asserting if send version is misused Also cleaned up the comments and moved from the header to the .cpp so that logging headers aren't needed from net.h * Implement conditions for ForEachNode() and ForNode() methods of CConnman. A change making ForEachNode() and ForNode() methods ignore nodes that have not completed initial handshake have been backported from Bitcoin. Unfortunately, some Dash-specific code needs to iterate over all nodes. This change introduces additional condition argument to these methods. This argument is a functional object that should return true for nodes that should be taken into account, not ignored. Two functional objects are provided in CConnman namespace: * FullyConnectedOnly returns true for nodes that have handshake completed, * AllNodes returns true for all nodes. Overloads for ForEachNode() and ForNode() methods without condition argument are left for compatibility with non-Dash-specific code. They use FullyConnectedOnly functional object for condition. Signed-off-by: Oleg Girko <ol@infoserver.lv> * Iterate over all nodes in Dash-specific code using AllNodes condition. Use AllNodes functional object as newly introduced condition argument for ForEachNode() and ForNode() methods of CConnman to iterate over all nodes where needed in Dash-specific code. Signed-off-by: Oleg Girko <ol@infoserver.lv>
2017-08-17 20:37:22 +02:00
// Don't send anything until the version handshake is complete
if (!pto->fSuccessfullyConnected || pto->fDisconnect)
return true;
//
// Message: ping
//
bool pingSend = false;
if (pto->fPingQueued) {
// RPC ping request by user
pingSend = true;
}
if (pto->nPingNonceSent == 0 && pto->nPingUsecStart + PING_INTERVAL * 1000000 < GetTimeMicros()) {
// Ping automatically sent as a latency probe & keepalive.
pingSend = true;
}
if (pingSend) {
uint64_t nonce = 0;
while (nonce == 0) {
GetRandBytes((unsigned char*)&nonce, sizeof(nonce));
}
pto->fPingQueued = false;
pto->nPingUsecStart = GetTimeMicros();
if (pto->nVersion > BIP0031_VERSION) {
pto->nPingNonceSent = nonce;
connman.PushMessage(pto, NetMsgType::PING, nonce);
} else {
// Peer is too old to support ping command with nonce, pong will never arrive.
pto->nPingNonceSent = 0;
connman.PushMessage(pto, NetMsgType::PING);
}
}
TRY_LOCK(cs_main, lockMain); // Acquire cs_main for IsInitialBlockDownload() and CNodeState()
if (!lockMain)
return true;
// Address refresh broadcast
int64_t nNow = GetTimeMicros();
if (!IsInitialBlockDownload() && pto->nNextLocalAddrSend < nNow) {
AdvertiseLocal(pto);
pto->nNextLocalAddrSend = PoissonNextSend(nNow, AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL);
}
//
// Message: addr
//
if (pto->nNextAddrSend < nNow) {
pto->nNextAddrSend = PoissonNextSend(nNow, AVG_ADDRESS_BROADCAST_INTERVAL);
vector<CAddress> vAddr;
vAddr.reserve(pto->vAddrToSend.size());
BOOST_FOREACH(const CAddress& addr, pto->vAddrToSend)
{
if (!pto->addrKnown.contains(addr.GetKey()))
{
pto->addrKnown.insert(addr.GetKey());
vAddr.push_back(addr);
// receiver rejects addr messages larger than 1000
if (vAddr.size() >= 1000)
{
connman.PushMessage(pto, NetMsgType::ADDR, vAddr);
vAddr.clear();
}
}
}
pto->vAddrToSend.clear();
if (!vAddr.empty())
connman.PushMessage(pto, NetMsgType::ADDR, vAddr);
}
CNodeState &state = *State(pto->GetId());
if (state.fShouldBan) {
if (pto->fWhitelisted)
LogPrintf("Warning: not punishing whitelisted peer %s!\n", pto->addr.ToString());
else {
pto->fDisconnect = true;
if (pto->addr.IsLocal())
LogPrintf("Warning: not banning local peer %s!\n", pto->addr.ToString());
else
{
connman.Ban(pto->addr, BanReasonNodeMisbehaving);
}
}
state.fShouldBan = false;
}
BOOST_FOREACH(const CBlockReject& reject, state.rejects)
connman.PushMessage(pto, NetMsgType::REJECT, (string)NetMsgType::BLOCK, reject.chRejectCode, reject.strRejectReason, reject.hashBlock);
state.rejects.clear();
// Start block sync
if (pindexBestHeader == NULL)
pindexBestHeader = chainActive.Tip();
bool fFetch = state.fPreferredDownload || (nPreferredDownload == 0 && !pto->fClient && !pto->fOneShot); // Download if this is a nice peer, or we have no nice peers and this one might do.
if (!state.fSyncStarted && !pto->fClient && !fImporting && !fReindex) {
// Only actively request headers from a single peer, unless we're close to end of initial download.
if ((nSyncStarted == 0 && fFetch) || pindexBestHeader->GetBlockTime() > GetAdjustedTime() - 6 * 60 * 60) { // NOTE: was "close to today" and 24h in Bitcoin
state.fSyncStarted = true;
Backport "assumed valid blocks" feature from Bitcoin 0.13 (#1582) * IBD check uses minimumchain work instead of checkpoints. This introduces a 'minimum chain work' chainparam which is intended to be the known amount of work in the chain for the network at the time of software release. If you don't have this much work, you're not yet caught up. This is used instead of the count of blocks test from checkpoints. This criteria is trivial to keep updated as there is no element of subjectivity, trust, or position dependence to it. It is also a more reliable metric of sync status than a block count. * Remove GetTotalBlocksEstimate and checkpoint tests that test nothing. GetTotalBlocksEstimate is no longer used and it was the only thing the checkpoint tests were testing. Since checkpoints are on their way out it makes more sense to remove the test file than to cook up a new pointless test. # Conflicts: # src/Makefile.test.include # src/test/Checkpoints_tests.cpp * IsInitialBlockDownload no longer uses header-only timestamps. This avoids a corner case (mostly visible on testnet) where bogus headers can keep nodes in IsInitialBlockDownload. * Delay parallel block download until chain has sufficient work nMinimumChainWork is an anti-DoS threshold; wait until we have a proposed tip with more work than that before downloading blocks towards that tip. * Add timeout for headers sync At startup, we choose one peer to serve us the headers chain, until our best header is close to caught up. Disconnect this peer if more than 15 minutes + 1ms/expected_header passes and our best header is still more than 1 day away from current time. * Introduce assumevalid setting to skip presumed valid scripts. This disentangles the script validation skipping from checkpoints. A new option is introduced "assumevalid" which specifies a block whos ancestors we assume all have valid scriptsigs and so we do not check them when they are also burried under the best header by two weeks worth of work. Unlike checkpoints this has no influence on consensus unless you set it to a block with an invalid history. Because of this it can be easily be updated without risk of influencing the network consensus. This results in a massive IBD speedup. This approach was independently recommended by Peter Todd and Luke-Jr since POW based signature skipping (see PR#9180) does not have the verifiable properties of a specific hash and may create bad incentives. The downside is that, like checkpoints, the defaults bitrot and older releases will sync slower. On the plus side users can provide their own value here, and if they set it to something crazy all that will happen is more time will be spend validating signatures. Checkblocks and checklevel are also moved to the hidden debug options: Especially now that checkblocks has a low default there is little need to change these settings, and users frequently misunderstand them as influencing security or IBD speed. By hiding them we offset the space added by this new option. * Add consensusParams to FindNextBlocksToDownload * Adjust check in headers timeout logic to align with 144 blocks in Dash
2017-08-23 16:21:08 +02:00
state.nHeadersSyncTimeout = GetTimeMicros() + HEADERS_DOWNLOAD_TIMEOUT_BASE + HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER * (GetAdjustedTime() - pindexBestHeader->GetBlockTime())/(consensusParams.nPowTargetSpacing);
nSyncStarted++;
const CBlockIndex *pindexStart = pindexBestHeader;
/* If possible, start at the block preceding the currently
best known header. This ensures that we always get a
non-empty list of headers back as long as the peer
is up-to-date. With a non-empty response, we can initialise
the peer's known best block. This wouldn't be possible
if we requested starting at pindexBestHeader and
got back an empty response. */
if (pindexStart->pprev)
pindexStart = pindexStart->pprev;
LogPrint("net", "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, pto->id, pto->nStartingHeight);
connman.PushMessage(pto, NetMsgType::GETHEADERS, chainActive.GetLocator(pindexStart), uint256());
}
}
Fix duplicate headers download in initial sync (#1589) * Fix duplicate headers download in initial sync Now that initial block download is delayed until the headers sync is done, it was noticed that the initial headers sync may happen multiple times in parallel in the case new blocks are announced. This happens because for every block in INV that is received, a getheaders message is immediately sent out resulting in a full download of the headers chain starting from the point of where the initial headers sync is currently at. This happens once for each peer that announces the new block. This slows down the initial headers sync and increases the chance of another block being announced before it is finished, probably leading to the same behavior as already described, slowing down the sync even more...and so on. This commit delays sending of GETHEADERS to later in case the chain is too far behind while a new block gets announced. Header chains will still be downloaded multiple times, but the downloading will start much closer to the tip of the chain, so the damage is not that bad anymore. This ensures that we get all headers from all peers, even if any of them is on another chain. This should avoid what happened in https://github.com/bitcoin/bitcoin/pull/8054 which needed to be reverted later. This fixes the Bitcoin issue https://github.com/bitcoin/bitcoin/issues/6755 * Introduce DelayGetHeadersTime chain param and fix tests The delaying of GETHEADERS in combination with very old block times in test cases resulted in the delaying being triggered when the first newly mined block arrives. This results in a completely stalled sync. This is fixed by avoiding delaying in when running tests. * Disconnect peers which are not catched up Peers which stop sending us headers too early are very likely peers which did not catch up before and stalled for some reason. We should disconnect these peers and chose another one to continue.
2017-09-04 08:10:52 +02:00
if (chainParams.DelayGetHeadersTime() != 0 && pindexBestHeader->GetBlockTime() >= GetAdjustedTime() - chainParams.DelayGetHeadersTime()) {
// Headers chain has catched up enough so we can send out GETHEADER messages which were initially meant to
// be sent directly after INV was received
LOCK(pto->cs_inventory);
BOOST_FOREACH(const uint256 &hash, pto->vBlockHashesFromINV) {
LogPrint("net", "process delayed getheaders (%d) to peer=%d\n", pindexBestHeader->nHeight, pto->id);
connman.PushMessage(pto, NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), hash);
}
pto->vBlockHashesFromINV.clear();
}
// Resend wallet transactions that haven't gotten in a block yet
// Except during reindex, importing and IBD, when old wallet
// transactions become unconfirmed and spams other nodes.
if (!fReindex && !fImporting && !IsInitialBlockDownload())
{
GetMainSignals().Broadcast(nTimeBestReceived, &connman);
}
//
// Try sending block announcements via headers
//
{
// If we have less than MAX_BLOCKS_TO_ANNOUNCE in our
// list of block hashes we're relaying, and our peer wants
// headers announcements, then find the first header
// not yet known to our peer but would connect, and send.
// If no header would connect, or if we have too many
// blocks, or if the peer doesn't want headers, just
// add all to the inv queue.
LOCK(pto->cs_inventory);
vector<CBlock> vHeaders;
bool fRevertToInv = (!state.fPreferHeaders || pto->vBlockHashesToAnnounce.size() > MAX_BLOCKS_TO_ANNOUNCE);
CBlockIndex *pBestIndex = NULL; // last header queued for delivery
ProcessBlockAvailability(pto->id); // ensure pindexBestKnownBlock is up-to-date
if (!fRevertToInv) {
bool fFoundStartingHeader = false;
// Try to find first header that our peer doesn't have, and
// then send all headers past that one. If we come across any
// headers that aren't on chainActive, give up.
BOOST_FOREACH(const uint256 &hash, pto->vBlockHashesToAnnounce) {
BlockMap::iterator mi = mapBlockIndex.find(hash);
assert(mi != mapBlockIndex.end());
CBlockIndex *pindex = mi->second;
if (chainActive[pindex->nHeight] != pindex) {
// Bail out if we reorged away from this block
fRevertToInv = true;
break;
}
if (pBestIndex != NULL && pindex->pprev != pBestIndex) {
// This means that the list of blocks to announce don't
// connect to each other.
// This shouldn't really be possible to hit during
// regular operation (because reorgs should take us to
// a chain that has some block not on the prior chain,
// which should be caught by the prior check), but one
// way this could happen is by using invalidateblock /
// reconsiderblock repeatedly on the tip, causing it to
// be added multiple times to vBlockHashesToAnnounce.
// Robustly deal with this rare situation by reverting
// to an inv.
fRevertToInv = true;
break;
}
pBestIndex = pindex;
if (fFoundStartingHeader) {
// add this to the headers message
vHeaders.push_back(pindex->GetBlockHeader());
} else if (PeerHasHeader(&state, pindex)) {
continue; // keep looking for the first new block
} else if (pindex->pprev == NULL || PeerHasHeader(&state, pindex->pprev)) {
// Peer doesn't have this header but they do have the prior one.
// Start sending headers.
fFoundStartingHeader = true;
vHeaders.push_back(pindex->GetBlockHeader());
} else {
// Peer doesn't have this header or the prior one -- nothing will
// connect, so bail out.
fRevertToInv = true;
break;
}
}
}
if (fRevertToInv) {
// If falling back to using an inv, just try to inv the tip.
// The last entry in vBlockHashesToAnnounce was our tip at some point
// in the past.
if (!pto->vBlockHashesToAnnounce.empty()) {
const uint256 &hashToAnnounce = pto->vBlockHashesToAnnounce.back();
BlockMap::iterator mi = mapBlockIndex.find(hashToAnnounce);
assert(mi != mapBlockIndex.end());
CBlockIndex *pindex = mi->second;
// Warn if we're announcing a block that is not on the main chain.
// This should be very rare and could be optimized out.
// Just log for now.
if (chainActive[pindex->nHeight] != pindex) {
LogPrint("net", "Announcing block %s not on main chain (tip=%s)\n",
hashToAnnounce.ToString(), chainActive.Tip()->GetBlockHash().ToString());
}
// If the peer announced this block to us, don't inv it back.
// (Since block announcements may not be via inv's, we can't solely rely on
// setInventoryKnown to track this.)
if (!PeerHasHeader(&state, pindex)) {
pto->PushInventory(CInv(MSG_BLOCK, hashToAnnounce));
LogPrint("net", "%s: sending inv peer=%d hash=%s\n", __func__,
pto->id, hashToAnnounce.ToString());
}
}
} else if (!vHeaders.empty()) {
if (vHeaders.size() > 1) {
LogPrint("net", "%s: %u headers, range (%s, %s), to peer=%d\n", __func__,
vHeaders.size(),
vHeaders.front().GetHash().ToString(),
vHeaders.back().GetHash().ToString(), pto->id);
} else {
LogPrint("net", "%s: sending header %s to peer=%d\n", __func__,
vHeaders.front().GetHash().ToString(), pto->id);
}
connman.PushMessage(pto, NetMsgType::HEADERS, vHeaders);
state.pindexBestHeaderSent = pBestIndex;
}
pto->vBlockHashesToAnnounce.clear();
}
//
// Message: inventory
//
vector<CInv> vInv;
vector<CInv> vInvWait;
{
bool fSendTrickle = pto->fWhitelisted;
if (pto->nNextInvSend < nNow) {
fSendTrickle = true;
pto->nNextInvSend = PoissonNextSend(nNow, AVG_INVENTORY_BROADCAST_INTERVAL);
}
LOCK(pto->cs_inventory);
vInv.reserve(std::min<size_t>(1000, pto->vInventoryToSend.size()));
vInvWait.reserve(pto->vInventoryToSend.size());
BOOST_FOREACH(const CInv& inv, pto->vInventoryToSend)
{
if (inv.type == MSG_TX && pto->filterInventoryKnown.contains(inv.hash))
continue;
// trickle out tx inv to protect privacy
if (inv.type == MSG_TX && !fSendTrickle)
{
// 1/4 of tx invs blast to all immediately
static uint256 hashSalt;
if (hashSalt.IsNull())
hashSalt = GetRandHash();
uint256 hashRand = ArithToUint256(UintToArith256(inv.hash) ^ UintToArith256(hashSalt));
hashRand = Hash(BEGIN(hashRand), END(hashRand));
bool fTrickleWait = ((UintToArith256(hashRand) & 3) != 0);
if (fTrickleWait)
{
LogPrint("net", "SendMessages -- queued inv(vInvWait): %s index=%d peer=%d\n", inv.ToString(), vInvWait.size(), pto->id);
vInvWait.push_back(inv);
continue;
}
}
pto->filterInventoryKnown.insert(inv.hash);
LogPrint("net", "SendMessages -- queued inv: %s index=%d peer=%d\n", inv.ToString(), vInv.size(), pto->id);
vInv.push_back(inv);
if (vInv.size() >= 1000)
{
LogPrint("net", "SendMessages -- pushing inv's: count=%d peer=%d\n", vInv.size(), pto->id);
connman.PushMessage(pto, NetMsgType::INV, vInv);
vInv.clear();
}
}
pto->vInventoryToSend = vInvWait;
}
if (!vInv.empty()) {
LogPrint("net", "SendMessages -- pushing tailing inv's: count=%d peer=%d\n", vInv.size(), pto->id);
connman.PushMessage(pto, NetMsgType::INV, vInv);
}
// Detect whether we're stalling
nNow = GetTimeMicros();
if (!pto->fDisconnect && state.nStallingSince && state.nStallingSince < nNow - 1000000 * BLOCK_STALLING_TIMEOUT) {
// Stalling only triggers when the block download window cannot move. During normal steady state,
// the download window should be much larger than the to-be-downloaded set of blocks, so disconnection
// should only happen during initial block download.
LogPrintf("Peer=%d is stalling block download, disconnecting\n", pto->id);
pto->fDisconnect = true;
}
// In case there is a block that has been in flight from this peer for 2 + 0.5 * N times the block interval
// (with N the number of peers from which we're downloading validated blocks), disconnect due to timeout.
// We compensate for other peers to prevent killing off peers due to our own downstream link
// being saturated. We only count validated in-flight blocks so peers can't advertise non-existing block hashes
// to unreasonably increase our timeout.
if (!pto->fDisconnect && state.vBlocksInFlight.size() > 0) {
QueuedBlock &queuedBlock = state.vBlocksInFlight.front();
int nOtherPeersWithValidatedDownloads = nPeersWithValidatedDownloads - (state.nBlocksInFlightValidHeaders > 0);
if (nNow > state.nDownloadingSince + consensusParams.nPowTargetSpacing * (BLOCK_DOWNLOAD_TIMEOUT_BASE + BLOCK_DOWNLOAD_TIMEOUT_PER_PEER * nOtherPeersWithValidatedDownloads)) {
LogPrintf("Timeout downloading block %s from peer=%d, disconnecting\n", queuedBlock.hash.ToString(), pto->id);
pto->fDisconnect = true;
}
}
Backport "assumed valid blocks" feature from Bitcoin 0.13 (#1582) * IBD check uses minimumchain work instead of checkpoints. This introduces a 'minimum chain work' chainparam which is intended to be the known amount of work in the chain for the network at the time of software release. If you don't have this much work, you're not yet caught up. This is used instead of the count of blocks test from checkpoints. This criteria is trivial to keep updated as there is no element of subjectivity, trust, or position dependence to it. It is also a more reliable metric of sync status than a block count. * Remove GetTotalBlocksEstimate and checkpoint tests that test nothing. GetTotalBlocksEstimate is no longer used and it was the only thing the checkpoint tests were testing. Since checkpoints are on their way out it makes more sense to remove the test file than to cook up a new pointless test. # Conflicts: # src/Makefile.test.include # src/test/Checkpoints_tests.cpp * IsInitialBlockDownload no longer uses header-only timestamps. This avoids a corner case (mostly visible on testnet) where bogus headers can keep nodes in IsInitialBlockDownload. * Delay parallel block download until chain has sufficient work nMinimumChainWork is an anti-DoS threshold; wait until we have a proposed tip with more work than that before downloading blocks towards that tip. * Add timeout for headers sync At startup, we choose one peer to serve us the headers chain, until our best header is close to caught up. Disconnect this peer if more than 15 minutes + 1ms/expected_header passes and our best header is still more than 1 day away from current time. * Introduce assumevalid setting to skip presumed valid scripts. This disentangles the script validation skipping from checkpoints. A new option is introduced "assumevalid" which specifies a block whos ancestors we assume all have valid scriptsigs and so we do not check them when they are also burried under the best header by two weeks worth of work. Unlike checkpoints this has no influence on consensus unless you set it to a block with an invalid history. Because of this it can be easily be updated without risk of influencing the network consensus. This results in a massive IBD speedup. This approach was independently recommended by Peter Todd and Luke-Jr since POW based signature skipping (see PR#9180) does not have the verifiable properties of a specific hash and may create bad incentives. The downside is that, like checkpoints, the defaults bitrot and older releases will sync slower. On the plus side users can provide their own value here, and if they set it to something crazy all that will happen is more time will be spend validating signatures. Checkblocks and checklevel are also moved to the hidden debug options: Especially now that checkblocks has a low default there is little need to change these settings, and users frequently misunderstand them as influencing security or IBD speed. By hiding them we offset the space added by this new option. * Add consensusParams to FindNextBlocksToDownload * Adjust check in headers timeout logic to align with 144 blocks in Dash
2017-08-23 16:21:08 +02:00
// Check for headers sync timeouts
if (state.fSyncStarted && state.nHeadersSyncTimeout < std::numeric_limits<int64_t>::max()) {
// Detect whether this is a stalling initial-headers-sync peer
if (pindexBestHeader->GetBlockTime() <= GetAdjustedTime() - 6*60*60) { // was 24*60*60 in bitcoin
if (nNow > state.nHeadersSyncTimeout && nSyncStarted == 1 && (nPreferredDownload - state.fPreferredDownload >= 1)) {
// Disconnect a (non-whitelisted) peer if it is our only sync peer,
// and we have others we could be using instead.
// Note: If all our peers are inbound, then we won't
// disconnect our sync peer for stalling; we have bigger
// problems if we can't get any outbound peers.
if (!pto->fWhitelisted) {
LogPrintf("Timeout downloading headers from peer=%d, disconnecting\n", pto->GetId());
pto->fDisconnect = true;
return true;
} else {
LogPrintf("Timeout downloading headers from whitelisted peer=%d, not disconnecting\n", pto->GetId());
// Reset the headers sync state so that we have a
// chance to try downloading from a different peer.
// Note: this will also result in at least one more
// getheaders message to be sent to
// this peer (eventually).
state.fSyncStarted = false;
nSyncStarted--;
state.nHeadersSyncTimeout = 0;
}
}
} else {
// After we've caught up once, reset the timeout so we can't trigger
// disconnect later.
state.nHeadersSyncTimeout = std::numeric_limits<int64_t>::max();
}
}
//
// Message: getdata (blocks)
//
vector<CInv> vGetData;
if (!pto->fDisconnect && !pto->fClient && (fFetch || !IsInitialBlockDownload()) && state.nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
vector<CBlockIndex*> vToDownload;
NodeId staller = -1;
Backport "assumed valid blocks" feature from Bitcoin 0.13 (#1582) * IBD check uses minimumchain work instead of checkpoints. This introduces a 'minimum chain work' chainparam which is intended to be the known amount of work in the chain for the network at the time of software release. If you don't have this much work, you're not yet caught up. This is used instead of the count of blocks test from checkpoints. This criteria is trivial to keep updated as there is no element of subjectivity, trust, or position dependence to it. It is also a more reliable metric of sync status than a block count. * Remove GetTotalBlocksEstimate and checkpoint tests that test nothing. GetTotalBlocksEstimate is no longer used and it was the only thing the checkpoint tests were testing. Since checkpoints are on their way out it makes more sense to remove the test file than to cook up a new pointless test. # Conflicts: # src/Makefile.test.include # src/test/Checkpoints_tests.cpp * IsInitialBlockDownload no longer uses header-only timestamps. This avoids a corner case (mostly visible on testnet) where bogus headers can keep nodes in IsInitialBlockDownload. * Delay parallel block download until chain has sufficient work nMinimumChainWork is an anti-DoS threshold; wait until we have a proposed tip with more work than that before downloading blocks towards that tip. * Add timeout for headers sync At startup, we choose one peer to serve us the headers chain, until our best header is close to caught up. Disconnect this peer if more than 15 minutes + 1ms/expected_header passes and our best header is still more than 1 day away from current time. * Introduce assumevalid setting to skip presumed valid scripts. This disentangles the script validation skipping from checkpoints. A new option is introduced "assumevalid" which specifies a block whos ancestors we assume all have valid scriptsigs and so we do not check them when they are also burried under the best header by two weeks worth of work. Unlike checkpoints this has no influence on consensus unless you set it to a block with an invalid history. Because of this it can be easily be updated without risk of influencing the network consensus. This results in a massive IBD speedup. This approach was independently recommended by Peter Todd and Luke-Jr since POW based signature skipping (see PR#9180) does not have the verifiable properties of a specific hash and may create bad incentives. The downside is that, like checkpoints, the defaults bitrot and older releases will sync slower. On the plus side users can provide their own value here, and if they set it to something crazy all that will happen is more time will be spend validating signatures. Checkblocks and checklevel are also moved to the hidden debug options: Especially now that checkblocks has a low default there is little need to change these settings, and users frequently misunderstand them as influencing security or IBD speed. By hiding them we offset the space added by this new option. * Add consensusParams to FindNextBlocksToDownload * Adjust check in headers timeout logic to align with 144 blocks in Dash
2017-08-23 16:21:08 +02:00
FindNextBlocksToDownload(pto->GetId(), MAX_BLOCKS_IN_TRANSIT_PER_PEER - state.nBlocksInFlight, vToDownload, staller, consensusParams);
BOOST_FOREACH(CBlockIndex *pindex, vToDownload) {
vGetData.push_back(CInv(MSG_BLOCK, pindex->GetBlockHash()));
MarkBlockAsInFlight(pto->GetId(), pindex->GetBlockHash(), consensusParams, pindex);
LogPrint("net", "Requesting block %s (%d) peer=%d\n", pindex->GetBlockHash().ToString(),
pindex->nHeight, pto->id);
}
if (state.nBlocksInFlight == 0 && staller != -1) {
if (State(staller)->nStallingSince == 0) {
State(staller)->nStallingSince = nNow;
LogPrint("net", "Stall started peer=%d\n", staller);
}
}
}
//
// Message: getdata (non-blocks)
//
while (!pto->fDisconnect && !pto->mapAskFor.empty() && (*pto->mapAskFor.begin()).first <= nNow)
{
const CInv& inv = (*pto->mapAskFor.begin()).second;
if (!AlreadyHave(inv))
{
LogPrint("net", "SendMessages -- GETDATA -- requesting inv = %s peer=%d\n", inv.ToString(), pto->id);
vGetData.push_back(inv);
if (vGetData.size() >= 1000)
{
connman.PushMessage(pto, NetMsgType::GETDATA, vGetData);
LogPrint("net", "SendMessages -- GETDATA -- pushed size = %lu peer=%d\n", vGetData.size(), pto->id);
vGetData.clear();
}
} else {
//If we're not going to ask, don't expect a response.
LogPrint("net", "SendMessages -- GETDATA -- already have inv = %s peer=%d\n", inv.ToString(), pto->id);
pto->setAskFor.erase(inv.hash);
}
pto->mapAskFor.erase(pto->mapAskFor.begin());
}
if (!vGetData.empty()) {
connman.PushMessage(pto, NetMsgType::GETDATA, vGetData);
LogPrint("net", "SendMessages -- GETDATA -- pushed size = %lu peer=%d\n", vGetData.size(), pto->id);
}
}
return true;
}
class CNetProcessingCleanup
{
public:
CNetProcessingCleanup() {}
~CNetProcessingCleanup() {
// orphan transactions
mapOrphanTransactions.clear();
mapOrphanTransactionsByPrev.clear();
}
} instance_of_cnetprocessingcleanup;