2017-08-09 02:19:06 +02:00
// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2016 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
# include "net_processing.h"
# include "alert.h"
# include "addrman.h"
# include "arith_uint256.h"
# include "chainparams.h"
# include "consensus/validation.h"
# include "hash.h"
# include "init.h"
# include "validation.h"
# include "merkleblock.h"
# include "net.h"
# include "netbase.h"
# include "policy/fees.h"
# include "policy/policy.h"
# include "primitives/block.h"
# include "primitives/transaction.h"
# include "random.h"
# include "tinyformat.h"
# include "txmempool.h"
# include "ui_interface.h"
# include "util.h"
# include "utilmoneystr.h"
# include "utilstrencodings.h"
# include "validationinterface.h"
# include "spork.h"
# include "governance.h"
# include "instantx.h"
# include "masternode-payments.h"
# include "masternode-sync.h"
# include "masternodeman.h"
# include "privatesend-client.h"
# include "privatesend-server.h"
# include <boost/thread.hpp>
using namespace std ;
# if defined(NDEBUG)
# error "Dash Core cannot be compiled without assertions."
# endif
int64_t nTimeBestReceived = 0 ; // Used only to inform the wallet of when we last received a block
struct COrphanTx {
CTransaction tx ;
NodeId fromPeer ;
} ;
map < uint256 , COrphanTx > mapOrphanTransactions GUARDED_BY ( cs_main ) ;
map < uint256 , set < uint256 > > mapOrphanTransactionsByPrev GUARDED_BY ( cs_main ) ; ;
void EraseOrphansFor ( NodeId peer ) EXCLUSIVE_LOCKS_REQUIRED ( cs_main ) ;
// Internal stuff
namespace {
/** Number of nodes with fSyncStarted. */
int nSyncStarted = 0 ;
/**
* Sources of received blocks , saved to be able to send them reject
* messages or ban them when processing happens afterwards . Protected by
* cs_main .
*/
map < uint256 , NodeId > mapBlockSource ;
/**
* Filter for transactions that were recently rejected by
* AcceptToMemoryPool . These are not rerequested until the chain tip
* changes , at which point the entire filter is reset . Protected by
* cs_main .
*
* Without this filter we ' d be re - requesting txs from each of our peers ,
* increasing bandwidth consumption considerably . For instance , with 100
* peers , half of which relay a tx we don ' t accept , that might be a 50 x
* bandwidth increase . A flooding attacker attempting to roll - over the
* filter using minimum - sized , 60 byte , transactions might manage to send
* 1000 / sec if we have fast peers , so we pick 120 , 000 to give our peers a
* two minute window to send invs to us .
*
* Decreasing the false positive rate is fairly cheap , so we pick one in a
* million to make it highly unlikely for users to have issues with this
* filter .
*
* Memory used : 1.7 MB
*/
boost : : scoped_ptr < CRollingBloomFilter > recentRejects ;
uint256 hashRecentRejectsChainTip ;
/** Blocks that are in flight, and that are in the queue to be downloaded. Protected by cs_main. */
struct QueuedBlock {
uint256 hash ;
CBlockIndex * pindex ; //!< Optional.
bool fValidatedHeaders ; //!< Whether this block has validated headers at the time of request.
} ;
map < uint256 , pair < NodeId , list < QueuedBlock > : : iterator > > mapBlocksInFlight ;
/** Number of preferable block download peers. */
int nPreferredDownload = 0 ;
/** Number of peers from which we're downloading blocks. */
int nPeersWithValidatedDownloads = 0 ;
} // anon namespace
//////////////////////////////////////////////////////////////////////////////
//
// Registration of network node signals.
//
namespace {
struct CBlockReject {
unsigned char chRejectCode ;
string strRejectReason ;
uint256 hashBlock ;
} ;
/**
* Maintain validation - specific state about nodes , protected by cs_main , instead
* by CNode ' s own locks . This simplifies asynchronous operation , where
* processing of incoming data is done after the ProcessMessage call returns ,
* and we ' re no longer holding the node ' s locks .
*/
struct CNodeState {
//! The peer's address
const CService address ;
//! Whether we have a fully established connection.
bool fCurrentlyConnected ;
//! Accumulated misbehaviour score for this peer.
int nMisbehavior ;
//! Whether this peer should be disconnected and banned (unless whitelisted).
bool fShouldBan ;
//! String name of this peer (debugging/logging purposes).
const std : : string name ;
//! List of asynchronously-determined block rejections to notify this peer about.
std : : vector < CBlockReject > rejects ;
//! The best known block we know this peer has announced.
CBlockIndex * pindexBestKnownBlock ;
//! The hash of the last unknown block this peer has announced.
uint256 hashLastUnknownBlock ;
//! The last full block we both have.
CBlockIndex * pindexLastCommonBlock ;
//! The best header we have sent our peer.
CBlockIndex * pindexBestHeaderSent ;
//! Whether we've started headers synchronization with this peer.
bool fSyncStarted ;
//! Since when we're stalling block download progress (in microseconds), or 0.
int64_t nStallingSince ;
list < QueuedBlock > vBlocksInFlight ;
//! When the first entry in vBlocksInFlight started downloading. Don't care when vBlocksInFlight is empty.
int64_t nDownloadingSince ;
int nBlocksInFlight ;
int nBlocksInFlightValidHeaders ;
//! Whether we consider this a preferred download peer.
bool fPreferredDownload ;
//! Whether this peer wants invs or headers (when possible) for block announcements.
bool fPreferHeaders ;
CNodeState ( CAddress addrIn , std : : string addrNameIn ) : address ( addrIn ) , name ( addrNameIn ) {
fCurrentlyConnected = false ;
nMisbehavior = 0 ;
fShouldBan = false ;
pindexBestKnownBlock = NULL ;
hashLastUnknownBlock . SetNull ( ) ;
pindexLastCommonBlock = NULL ;
pindexBestHeaderSent = NULL ;
fSyncStarted = false ;
nStallingSince = 0 ;
nDownloadingSince = 0 ;
nBlocksInFlight = 0 ;
nBlocksInFlightValidHeaders = 0 ;
fPreferredDownload = false ;
fPreferHeaders = false ;
}
} ;
/** Map maintaining per-node state. Requires cs_main. */
map < NodeId , CNodeState > mapNodeState ;
// Requires cs_main.
CNodeState * State ( NodeId pnode ) {
map < NodeId , CNodeState > : : iterator it = mapNodeState . find ( pnode ) ;
if ( it = = mapNodeState . end ( ) )
return NULL ;
return & it - > second ;
}
void UpdatePreferredDownload ( CNode * node , CNodeState * state )
{
nPreferredDownload - = state - > fPreferredDownload ;
// Whether this node should be marked as a preferred download node.
state - > fPreferredDownload = ( ! node - > fInbound | | node - > fWhitelisted ) & & ! node - > fOneShot & & ! node - > fClient ;
nPreferredDownload + = state - > fPreferredDownload ;
}
void PushNodeVersion ( CNode * pnode , CConnman & connman , int64_t nTime )
{
ServiceFlags nLocalNodeServices = pnode - > GetLocalServices ( ) ;
uint64_t nonce = pnode - > GetLocalNonce ( ) ;
int nNodeStartingHeight = pnode - > GetMyStartingHeight ( ) ;
NodeId nodeid = pnode - > GetId ( ) ;
CAddress addr = pnode - > addr ;
CAddress addrYou = ( addr . IsRoutable ( ) & & ! IsProxy ( addr ) ? addr : CAddress ( CService ( " 0.0.0.0 " , 0 ) , addr . nServices ) ) ;
CAddress addrMe = GetLocalAddress ( & addr , nLocalNodeServices ) ;
connman . PushMessageWithVersion ( pnode , INIT_PROTO_VERSION , NetMsgType : : VERSION , PROTOCOL_VERSION , ( uint64_t ) nLocalNodeServices , nTime , addrYou , addrMe ,
nonce , strSubVersion , nNodeStartingHeight , : : fRelayTxes ) ;
if ( fLogIPs )
LogPrint ( " net " , " send version message: version %d, blocks=%d, us=%s, them=%s, peer=%d \n " , PROTOCOL_VERSION , nNodeStartingHeight , addrMe . ToString ( ) , addrYou . ToString ( ) , nodeid ) ;
else
LogPrint ( " net " , " send version message: version %d, blocks=%d, us=%s, peer=%d \n " , PROTOCOL_VERSION , nNodeStartingHeight , addrMe . ToString ( ) , nodeid ) ;
}
void InitializeNode ( CNode * pnode , CConnman & connman ) {
CAddress addr = pnode - > addr ;
std : : string addrName = pnode - > addrName ;
NodeId nodeid = pnode - > GetId ( ) ;
{
LOCK ( cs_main ) ;
mapNodeState . emplace_hint ( mapNodeState . end ( ) , std : : piecewise_construct , std : : forward_as_tuple ( nodeid ) , std : : forward_as_tuple ( addr , std : : move ( addrName ) ) ) ;
}
if ( ! pnode - > fInbound )
PushNodeVersion ( pnode , connman , GetTime ( ) ) ;
}
void FinalizeNode ( NodeId nodeid , bool & fUpdateConnectionTime ) {
fUpdateConnectionTime = false ;
LOCK ( cs_main ) ;
CNodeState * state = State ( nodeid ) ;
if ( state - > fSyncStarted )
nSyncStarted - - ;
if ( state - > nMisbehavior = = 0 & & state - > fCurrentlyConnected ) {
fUpdateConnectionTime = true ;
}
BOOST_FOREACH ( const QueuedBlock & entry , state - > vBlocksInFlight ) {
mapBlocksInFlight . erase ( entry . hash ) ;
}
EraseOrphansFor ( nodeid ) ;
nPreferredDownload - = state - > fPreferredDownload ;
nPeersWithValidatedDownloads - = ( state - > nBlocksInFlightValidHeaders ! = 0 ) ;
assert ( nPeersWithValidatedDownloads > = 0 ) ;
mapNodeState . erase ( nodeid ) ;
if ( mapNodeState . empty ( ) ) {
// Do a consistency check after the last peer is removed.
assert ( mapBlocksInFlight . empty ( ) ) ;
assert ( nPreferredDownload = = 0 ) ;
assert ( nPeersWithValidatedDownloads = = 0 ) ;
}
}
// Requires cs_main.
// Returns a bool indicating whether we requested this block.
bool MarkBlockAsReceived ( const uint256 & hash ) {
map < uint256 , pair < NodeId , list < QueuedBlock > : : iterator > > : : iterator itInFlight = mapBlocksInFlight . find ( hash ) ;
if ( itInFlight ! = mapBlocksInFlight . end ( ) ) {
CNodeState * state = State ( itInFlight - > second . first ) ;
state - > nBlocksInFlightValidHeaders - = itInFlight - > second . second - > fValidatedHeaders ;
if ( state - > nBlocksInFlightValidHeaders = = 0 & & itInFlight - > second . second - > fValidatedHeaders ) {
// Last validated block on the queue was received.
nPeersWithValidatedDownloads - - ;
}
if ( state - > vBlocksInFlight . begin ( ) = = itInFlight - > second . second ) {
// First block on the queue was received, update the start download time for the next one
state - > nDownloadingSince = std : : max ( state - > nDownloadingSince , GetTimeMicros ( ) ) ;
}
state - > vBlocksInFlight . erase ( itInFlight - > second . second ) ;
state - > nBlocksInFlight - - ;
state - > nStallingSince = 0 ;
mapBlocksInFlight . erase ( itInFlight ) ;
return true ;
}
return false ;
}
// Requires cs_main.
void MarkBlockAsInFlight ( NodeId nodeid , const uint256 & hash , const Consensus : : Params & consensusParams , CBlockIndex * pindex = NULL ) {
CNodeState * state = State ( nodeid ) ;
assert ( state ! = NULL ) ;
// Make sure it's not listed somewhere already.
MarkBlockAsReceived ( hash ) ;
QueuedBlock newentry = { hash , pindex , pindex ! = NULL } ;
list < QueuedBlock > : : iterator it = state - > vBlocksInFlight . insert ( state - > vBlocksInFlight . end ( ) , newentry ) ;
state - > nBlocksInFlight + + ;
state - > nBlocksInFlightValidHeaders + = newentry . fValidatedHeaders ;
if ( state - > nBlocksInFlight = = 1 ) {
// We're starting a block download (batch) from this peer.
state - > nDownloadingSince = GetTimeMicros ( ) ;
}
if ( state - > nBlocksInFlightValidHeaders = = 1 & & pindex ! = NULL ) {
nPeersWithValidatedDownloads + + ;
}
mapBlocksInFlight [ hash ] = std : : make_pair ( nodeid , it ) ;
}
/** Check whether the last unknown block a peer advertised is not yet known. */
void ProcessBlockAvailability ( NodeId nodeid ) {
CNodeState * state = State ( nodeid ) ;
assert ( state ! = NULL ) ;
if ( ! state - > hashLastUnknownBlock . IsNull ( ) ) {
BlockMap : : iterator itOld = mapBlockIndex . find ( state - > hashLastUnknownBlock ) ;
if ( itOld ! = mapBlockIndex . end ( ) & & itOld - > second - > nChainWork > 0 ) {
if ( state - > pindexBestKnownBlock = = NULL | | itOld - > second - > nChainWork > = state - > pindexBestKnownBlock - > nChainWork )
state - > pindexBestKnownBlock = itOld - > second ;
state - > hashLastUnknownBlock . SetNull ( ) ;
}
}
}
/** Update tracking information about which blocks a peer is assumed to have. */
void UpdateBlockAvailability ( NodeId nodeid , const uint256 & hash ) {
CNodeState * state = State ( nodeid ) ;
assert ( state ! = NULL ) ;
ProcessBlockAvailability ( nodeid ) ;
BlockMap : : iterator it = mapBlockIndex . find ( hash ) ;
if ( it ! = mapBlockIndex . end ( ) & & it - > second - > nChainWork > 0 ) {
// An actually better block was announced.
if ( state - > pindexBestKnownBlock = = NULL | | it - > second - > nChainWork > = state - > pindexBestKnownBlock - > nChainWork )
state - > pindexBestKnownBlock = it - > second ;
} else {
// An unknown block was announced; just assume that the latest one is the best one.
state - > hashLastUnknownBlock = hash ;
}
}
// Requires cs_main
bool CanDirectFetch ( const Consensus : : Params & consensusParams )
{
return chainActive . Tip ( ) - > GetBlockTime ( ) > GetAdjustedTime ( ) - consensusParams . nPowTargetSpacing * 20 ;
}
// Requires cs_main
bool PeerHasHeader ( CNodeState * state , CBlockIndex * pindex )
{
if ( state - > pindexBestKnownBlock & & pindex = = state - > pindexBestKnownBlock - > GetAncestor ( pindex - > nHeight ) )
return true ;
if ( state - > pindexBestHeaderSent & & pindex = = state - > pindexBestHeaderSent - > GetAncestor ( pindex - > nHeight ) )
return true ;
return false ;
}
/** Find the last common ancestor two blocks have.
* Both pa and pb must be non - NULL . */
CBlockIndex * LastCommonAncestor ( CBlockIndex * pa , CBlockIndex * pb ) {
if ( pa - > nHeight > pb - > nHeight ) {
pa = pa - > GetAncestor ( pb - > nHeight ) ;
} else if ( pb - > nHeight > pa - > nHeight ) {
pb = pb - > GetAncestor ( pa - > nHeight ) ;
}
while ( pa ! = pb & & pa & & pb ) {
pa = pa - > pprev ;
pb = pb - > pprev ;
}
// Eventually all chain branches meet at the genesis block.
assert ( pa = = pb ) ;
return pa ;
}
/** Update pindexLastCommonBlock and add not-in-flight missing successors to vBlocks, until it has
* at most count entries . */
void FindNextBlocksToDownload ( NodeId nodeid , unsigned int count , std : : vector < CBlockIndex * > & vBlocks , NodeId & nodeStaller ) {
if ( count = = 0 )
return ;
vBlocks . reserve ( vBlocks . size ( ) + count ) ;
CNodeState * state = State ( nodeid ) ;
assert ( state ! = NULL ) ;
// Make sure pindexBestKnownBlock is up to date, we'll need it.
ProcessBlockAvailability ( nodeid ) ;
if ( state - > pindexBestKnownBlock = = NULL | | state - > pindexBestKnownBlock - > nChainWork < chainActive . Tip ( ) - > nChainWork ) {
// This peer has nothing interesting.
return ;
}
if ( state - > pindexLastCommonBlock = = NULL ) {
// Bootstrap quickly by guessing a parent of our best tip is the forking point.
// Guessing wrong in either direction is not a problem.
state - > pindexLastCommonBlock = chainActive [ std : : min ( state - > pindexBestKnownBlock - > nHeight , chainActive . Height ( ) ) ] ;
}
// If the peer reorganized, our previous pindexLastCommonBlock may not be an ancestor
// of its current tip anymore. Go back enough to fix that.
state - > pindexLastCommonBlock = LastCommonAncestor ( state - > pindexLastCommonBlock , state - > pindexBestKnownBlock ) ;
if ( state - > pindexLastCommonBlock = = state - > pindexBestKnownBlock )
return ;
std : : vector < CBlockIndex * > vToFetch ;
CBlockIndex * pindexWalk = state - > pindexLastCommonBlock ;
// Never fetch further than the best block we know the peer has, or more than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last
// linked block we have in common with this peer. The +1 is so we can detect stalling, namely if we would be able to
// download that next block if the window were 1 larger.
int nWindowEnd = state - > pindexLastCommonBlock - > nHeight + BLOCK_DOWNLOAD_WINDOW ;
int nMaxHeight = std : : min < int > ( state - > pindexBestKnownBlock - > nHeight , nWindowEnd + 1 ) ;
NodeId waitingfor = - 1 ;
while ( pindexWalk - > nHeight < nMaxHeight ) {
// Read up to 128 (or more, if more blocks than that are needed) successors of pindexWalk (towards
// pindexBestKnownBlock) into vToFetch. We fetch 128, because CBlockIndex::GetAncestor may be as expensive
// as iterating over ~100 CBlockIndex* entries anyway.
int nToFetch = std : : min ( nMaxHeight - pindexWalk - > nHeight , std : : max < int > ( count - vBlocks . size ( ) , 128 ) ) ;
vToFetch . resize ( nToFetch ) ;
pindexWalk = state - > pindexBestKnownBlock - > GetAncestor ( pindexWalk - > nHeight + nToFetch ) ;
vToFetch [ nToFetch - 1 ] = pindexWalk ;
for ( unsigned int i = nToFetch - 1 ; i > 0 ; i - - ) {
vToFetch [ i - 1 ] = vToFetch [ i ] - > pprev ;
}
// Iterate over those blocks in vToFetch (in forward direction), adding the ones that
// are not yet downloaded and not in flight to vBlocks. In the mean time, update
// pindexLastCommonBlock as long as all ancestors are already downloaded, or if it's
// already part of our chain (and therefore don't need it even if pruned).
BOOST_FOREACH ( CBlockIndex * pindex , vToFetch ) {
if ( ! pindex - > IsValid ( BLOCK_VALID_TREE ) ) {
// We consider the chain that this peer is on invalid.
return ;
}
if ( pindex - > nStatus & BLOCK_HAVE_DATA | | chainActive . Contains ( pindex ) ) {
if ( pindex - > nChainTx )
state - > pindexLastCommonBlock = pindex ;
} else if ( mapBlocksInFlight . count ( pindex - > GetBlockHash ( ) ) = = 0 ) {
// The block is not already downloaded, and not yet in flight.
if ( pindex - > nHeight > nWindowEnd ) {
// We reached the end of the window.
if ( vBlocks . size ( ) = = 0 & & waitingfor ! = nodeid ) {
// We aren't able to fetch anything, but we would be if the download window was one larger.
nodeStaller = waitingfor ;
}
return ;
}
vBlocks . push_back ( pindex ) ;
if ( vBlocks . size ( ) = = count ) {
return ;
}
} else if ( waitingfor = = - 1 ) {
// This is the first already-in-flight block.
waitingfor = mapBlocksInFlight [ pindex - > GetBlockHash ( ) ] . first ;
}
}
}
}
} // anon namespace
bool GetNodeStateStats ( NodeId nodeid , CNodeStateStats & stats ) {
LOCK ( cs_main ) ;
CNodeState * state = State ( nodeid ) ;
if ( state = = NULL )
return false ;
stats . nMisbehavior = state - > nMisbehavior ;
stats . nSyncHeight = state - > pindexBestKnownBlock ? state - > pindexBestKnownBlock - > nHeight : - 1 ;
stats . nCommonHeight = state - > pindexLastCommonBlock ? state - > pindexLastCommonBlock - > nHeight : - 1 ;
BOOST_FOREACH ( const QueuedBlock & queue , state - > vBlocksInFlight ) {
if ( queue . pindex )
stats . vHeightInFlight . push_back ( queue . pindex - > nHeight ) ;
}
return true ;
}
void RegisterNodeSignals ( CNodeSignals & nodeSignals )
{
nodeSignals . ProcessMessages . connect ( & ProcessMessages ) ;
nodeSignals . SendMessages . connect ( & SendMessages ) ;
nodeSignals . InitializeNode . connect ( & InitializeNode ) ;
nodeSignals . FinalizeNode . connect ( & FinalizeNode ) ;
}
void UnregisterNodeSignals ( CNodeSignals & nodeSignals )
{
nodeSignals . ProcessMessages . disconnect ( & ProcessMessages ) ;
nodeSignals . SendMessages . disconnect ( & SendMessages ) ;
nodeSignals . InitializeNode . disconnect ( & InitializeNode ) ;
nodeSignals . FinalizeNode . disconnect ( & FinalizeNode ) ;
}
//////////////////////////////////////////////////////////////////////////////
//
// mapOrphanTransactions
//
bool AddOrphanTx ( const CTransaction & tx , NodeId peer ) EXCLUSIVE_LOCKS_REQUIRED ( cs_main )
{
uint256 hash = tx . GetHash ( ) ;
if ( mapOrphanTransactions . count ( hash ) )
return false ;
// Ignore big transactions, to avoid a
// send-big-orphans memory exhaustion attack. If a peer has a legitimate
// large transaction with a missing parent then we assume
// it will rebroadcast it later, after the parent transaction(s)
// have been mined or received.
// 10,000 orphans, each of which is at most 5,000 bytes big is
// at most 500 megabytes of orphans:
unsigned int sz = tx . GetSerializeSize ( SER_NETWORK , CTransaction : : CURRENT_VERSION ) ;
if ( sz > 5000 )
{
LogPrint ( " mempool " , " ignoring large orphan tx (size: %u, hash: %s) \n " , sz , hash . ToString ( ) ) ;
return false ;
}
mapOrphanTransactions [ hash ] . tx = tx ;
mapOrphanTransactions [ hash ] . fromPeer = peer ;
BOOST_FOREACH ( const CTxIn & txin , tx . vin )
mapOrphanTransactionsByPrev [ txin . prevout . hash ] . insert ( hash ) ;
LogPrint ( " mempool " , " stored orphan tx %s (mapsz %u prevsz %u) \n " , hash . ToString ( ) ,
mapOrphanTransactions . size ( ) , mapOrphanTransactionsByPrev . size ( ) ) ;
return true ;
}
void static EraseOrphanTx ( uint256 hash ) EXCLUSIVE_LOCKS_REQUIRED ( cs_main )
{
map < uint256 , COrphanTx > : : iterator it = mapOrphanTransactions . find ( hash ) ;
if ( it = = mapOrphanTransactions . end ( ) )
return ;
BOOST_FOREACH ( const CTxIn & txin , it - > second . tx . vin )
{
map < uint256 , set < uint256 > > : : iterator itPrev = mapOrphanTransactionsByPrev . find ( txin . prevout . hash ) ;
if ( itPrev = = mapOrphanTransactionsByPrev . end ( ) )
continue ;
itPrev - > second . erase ( hash ) ;
if ( itPrev - > second . empty ( ) )
mapOrphanTransactionsByPrev . erase ( itPrev ) ;
}
mapOrphanTransactions . erase ( it ) ;
}
void EraseOrphansFor ( NodeId peer )
{
int nErased = 0 ;
map < uint256 , COrphanTx > : : iterator iter = mapOrphanTransactions . begin ( ) ;
while ( iter ! = mapOrphanTransactions . end ( ) )
{
map < uint256 , COrphanTx > : : iterator maybeErase = iter + + ; // increment to avoid iterator becoming invalid
if ( maybeErase - > second . fromPeer = = peer )
{
EraseOrphanTx ( maybeErase - > second . tx . GetHash ( ) ) ;
+ + nErased ;
}
}
if ( nErased > 0 ) LogPrint ( " mempool " , " Erased %d orphan tx from peer %d \n " , nErased , peer ) ;
}
unsigned int LimitOrphanTxSize ( unsigned int nMaxOrphans ) EXCLUSIVE_LOCKS_REQUIRED ( cs_main )
{
unsigned int nEvicted = 0 ;
while ( mapOrphanTransactions . size ( ) > nMaxOrphans )
{
// Evict a random orphan:
uint256 randomhash = GetRandHash ( ) ;
map < uint256 , COrphanTx > : : iterator it = mapOrphanTransactions . lower_bound ( randomhash ) ;
if ( it = = mapOrphanTransactions . end ( ) )
it = mapOrphanTransactions . begin ( ) ;
EraseOrphanTx ( it - > first ) ;
+ + nEvicted ;
}
return nEvicted ;
}
// Requires cs_main.
void Misbehaving ( NodeId pnode , int howmuch )
{
if ( howmuch = = 0 )
return ;
CNodeState * state = State ( pnode ) ;
if ( state = = NULL )
return ;
state - > nMisbehavior + = howmuch ;
int banscore = GetArg ( " -banscore " , DEFAULT_BANSCORE_THRESHOLD ) ;
if ( state - > nMisbehavior > = banscore & & state - > nMisbehavior - howmuch < banscore )
{
LogPrintf ( " %s: %s (%d -> %d) BAN THRESHOLD EXCEEDED \n " , __func__ , state - > name , state - > nMisbehavior - howmuch , state - > nMisbehavior ) ;
state - > fShouldBan = true ;
} else
LogPrintf ( " %s: %s (%d -> %d) \n " , __func__ , state - > name , state - > nMisbehavior - howmuch , state - > nMisbehavior ) ;
}
//////////////////////////////////////////////////////////////////////////////
//
// blockchain -> download logic notification
//
PeerLogicValidation : : PeerLogicValidation ( CConnman * connmanIn ) : connman ( connmanIn ) {
// Initialize global variables that cannot be constructed at startup.
recentRejects . reset ( new CRollingBloomFilter ( 120000 , 0.000001 ) ) ;
}
void PeerLogicValidation : : UpdatedBlockTip ( const CBlockIndex * pindexNew , const CBlockIndex * pindexFork , bool fInitialDownload ) {
const int nNewHeight = pindexNew - > nHeight ;
connman - > SetBestHeight ( nNewHeight ) ;
if ( ! fInitialDownload ) {
// Find the hashes of all blocks that weren't previously in the best chain.
std : : vector < uint256 > vHashes ;
const CBlockIndex * pindexToAnnounce = pindexNew ;
while ( pindexToAnnounce ! = pindexFork ) {
vHashes . push_back ( pindexToAnnounce - > GetBlockHash ( ) ) ;
pindexToAnnounce = pindexToAnnounce - > pprev ;
if ( vHashes . size ( ) = = MAX_BLOCKS_TO_ANNOUNCE ) {
// Limit announcements in case of a huge reorganization.
// Rely on the peer's synchronization mechanism in that case.
break ;
}
}
// Relay inventory, but don't relay old inventory during initial block download.
connman - > ForEachNode ( [ nNewHeight , & vHashes ] ( CNode * pnode ) {
if ( nNewHeight > ( pnode - > nStartingHeight ! = - 1 ? pnode - > nStartingHeight - 2000 : 0 ) ) {
BOOST_REVERSE_FOREACH ( const uint256 & hash , vHashes ) {
pnode - > PushBlockHash ( hash ) ;
}
}
} ) ;
}
nTimeBestReceived = GetTime ( ) ;
}
void PeerLogicValidation : : BlockChecked ( const CBlock & block , const CValidationState & state ) {
LOCK ( cs_main ) ;
const uint256 hash ( block . GetHash ( ) ) ;
std : : map < uint256 , NodeId > : : iterator it = mapBlockSource . find ( hash ) ;
int nDoS = 0 ;
if ( state . IsInvalid ( nDoS ) ) {
if ( it ! = mapBlockSource . end ( ) & & State ( it - > second ) ) {
assert ( state . GetRejectCode ( ) < REJECT_INTERNAL ) ; // Blocks are never rejected with internal reject codes
CBlockReject reject = { ( unsigned char ) state . GetRejectCode ( ) , state . GetRejectReason ( ) . substr ( 0 , MAX_REJECT_MESSAGE_LENGTH ) , hash } ;
State ( it - > second ) - > rejects . push_back ( reject ) ;
if ( nDoS > 0 )
Misbehaving ( it - > second , nDoS ) ;
}
}
if ( it ! = mapBlockSource . end ( ) )
mapBlockSource . erase ( it ) ;
}
//////////////////////////////////////////////////////////////////////////////
//
// Messages
//
bool static AlreadyHave ( const CInv & inv ) EXCLUSIVE_LOCKS_REQUIRED ( cs_main )
{
switch ( inv . type )
{
case MSG_TX :
{
assert ( recentRejects ) ;
if ( chainActive . Tip ( ) - > GetBlockHash ( ) ! = hashRecentRejectsChainTip )
{
// If the chain tip has changed previously rejected transactions
// might be now valid, e.g. due to a nLockTime'd tx becoming valid,
// or a double-spend. Reset the rejects filter and give those
// txs a second chance.
hashRecentRejectsChainTip = chainActive . Tip ( ) - > GetBlockHash ( ) ;
recentRejects - > reset ( ) ;
}
return recentRejects - > contains ( inv . hash ) | |
mempool . exists ( inv . hash ) | |
mapOrphanTransactions . count ( inv . hash ) | |
pcoinsTip - > HaveCoins ( inv . hash ) ;
}
case MSG_BLOCK :
return mapBlockIndex . count ( inv . hash ) ;
/*
Dash Related Inventory Messages
- -
We shouldn ' t update the sync times for each of the messages when we already have it .
We ' re going to be asking many nodes upfront for the full inventory list , so we ' ll get duplicates of these .
We want to only update the time on new hits , so that we can time out appropriately if needed .
*/
case MSG_TXLOCK_REQUEST :
return instantsend . AlreadyHave ( inv . hash ) ;
case MSG_TXLOCK_VOTE :
return instantsend . AlreadyHave ( inv . hash ) ;
case MSG_SPORK :
return mapSporks . count ( inv . hash ) ;
case MSG_MASTERNODE_PAYMENT_VOTE :
return mnpayments . mapMasternodePaymentVotes . count ( inv . hash ) ;
case MSG_MASTERNODE_PAYMENT_BLOCK :
{
BlockMap : : iterator mi = mapBlockIndex . find ( inv . hash ) ;
return mi ! = mapBlockIndex . end ( ) & & mnpayments . mapMasternodeBlocks . find ( mi - > second - > nHeight ) ! = mnpayments . mapMasternodeBlocks . end ( ) ;
}
case MSG_MASTERNODE_ANNOUNCE :
return mnodeman . mapSeenMasternodeBroadcast . count ( inv . hash ) & & ! mnodeman . IsMnbRecoveryRequested ( inv . hash ) ;
case MSG_MASTERNODE_PING :
return mnodeman . mapSeenMasternodePing . count ( inv . hash ) ;
case MSG_DSTX : {
return static_cast < bool > ( CPrivateSend : : GetDSTX ( inv . hash ) ) ;
}
case MSG_GOVERNANCE_OBJECT :
case MSG_GOVERNANCE_OBJECT_VOTE :
return ! governance . ConfirmInventoryRequest ( inv ) ;
case MSG_MASTERNODE_VERIFY :
return mnodeman . mapSeenMasternodeVerification . count ( inv . hash ) ;
}
// Don't know what it is, just say we already got one
return true ;
}
static void RelayAddress ( const CAddress & addr , bool fReachable , CConnman & connman )
{
int nRelayNodes = fReachable ? 2 : 1 ; // limited relaying of addresses outside our network(s)
// Relay to a limited number of other nodes
// Use deterministic randomness to send to the same nodes for 24 hours
// at a time so the addrKnowns of the chosen nodes prevent repeats
static uint256 hashSalt ;
if ( hashSalt . IsNull ( ) )
hashSalt = GetRandHash ( ) ;
uint64_t hashAddr = addr . GetHash ( ) ;
uint256 hashRand = ArithToUint256 ( UintToArith256 ( hashSalt ) ^ ( hashAddr < < 32 ) ^ ( ( GetTime ( ) + hashAddr ) / ( 24 * 60 * 60 ) ) ) ;
hashRand = Hash ( BEGIN ( hashRand ) , END ( hashRand ) ) ;
std : : multimap < uint256 , CNode * > mapMix ;
auto sortfunc = [ & mapMix , & hashRand ] ( CNode * pnode ) {
if ( pnode - > nVersion > = CADDR_TIME_VERSION ) {
unsigned int nPointer ;
memcpy ( & nPointer , & pnode , sizeof ( nPointer ) ) ;
uint256 hashKey = ArithToUint256 ( UintToArith256 ( hashRand ) ^ nPointer ) ;
hashKey = Hash ( BEGIN ( hashKey ) , END ( hashKey ) ) ;
mapMix . emplace ( hashKey , pnode ) ;
}
} ;
auto pushfunc = [ & addr , & mapMix , & nRelayNodes ] {
for ( auto mi = mapMix . begin ( ) ; mi ! = mapMix . end ( ) & & nRelayNodes - - > 0 ; + + mi )
mi - > second - > PushAddress ( addr ) ;
} ;
connman . ForEachNodeThen ( std : : move ( sortfunc ) , std : : move ( pushfunc ) ) ;
}
2017-08-09 18:06:31 +02:00
void static ProcessGetData ( CNode * pfrom , const Consensus : : Params & consensusParams , CConnman & connman , std : : atomic < bool > & interruptMsgProc )
2017-08-09 02:19:06 +02:00
{
std : : deque < CInv > : : iterator it = pfrom - > vRecvGetData . begin ( ) ;
unsigned int nMaxSendBufferSize = connman . GetSendBufferSize ( ) ;
vector < CInv > vNotFound ;
LOCK ( cs_main ) ;
while ( it ! = pfrom - > vRecvGetData . end ( ) ) {
// Don't bother if send buffer is too full to respond anyway
if ( pfrom - > nSendSize > = nMaxSendBufferSize )
break ;
const CInv & inv = * it ;
LogPrint ( " net " , " ProcessGetData -- inv = %s \n " , inv . ToString ( ) ) ;
{
2017-08-09 18:06:31 +02:00
if ( interruptMsgProc )
return ;
2017-08-09 02:19:06 +02:00
it + + ;
if ( inv . type = = MSG_BLOCK | | inv . type = = MSG_FILTERED_BLOCK )
{
bool send = false ;
BlockMap : : iterator mi = mapBlockIndex . find ( inv . hash ) ;
if ( mi ! = mapBlockIndex . end ( ) )
{
if ( chainActive . Contains ( mi - > second ) ) {
send = true ;
} else {
static const int nOneMonth = 30 * 24 * 60 * 60 ;
// To prevent fingerprinting attacks, only send blocks outside of the active
// chain if they are valid, and no more than a month older (both in time, and in
// best equivalent proof of work) than the best header chain we know about.
send = mi - > second - > IsValid ( BLOCK_VALID_SCRIPTS ) & & ( pindexBestHeader ! = NULL ) & &
( pindexBestHeader - > GetBlockTime ( ) - mi - > second - > GetBlockTime ( ) < nOneMonth ) & &
( GetBlockProofEquivalentTime ( * pindexBestHeader , * mi - > second , * pindexBestHeader , consensusParams ) < nOneMonth ) ;
if ( ! send ) {
LogPrintf ( " %s: ignoring request from peer=%i for old block that isn't in the main chain \n " , __func__ , pfrom - > GetId ( ) ) ;
}
}
}
// disconnect node in case we have reached the outbound limit for serving historical blocks
// never disconnect whitelisted nodes
static const int nOneWeek = 7 * 24 * 60 * 60 ; // assume > 1 week = historical
if ( send & & connman . OutboundTargetReached ( true ) & & ( ( ( pindexBestHeader ! = NULL ) & & ( pindexBestHeader - > GetBlockTime ( ) - mi - > second - > GetBlockTime ( ) > nOneWeek ) ) | | inv . type = = MSG_FILTERED_BLOCK ) & & ! pfrom - > fWhitelisted )
{
LogPrint ( " net " , " historical block serving limit reached, disconnect peer=%d \n " , pfrom - > GetId ( ) ) ;
//disconnect node
pfrom - > fDisconnect = true ;
send = false ;
}
// Pruned nodes may have deleted the block, so check whether
// it's available before trying to send.
if ( send & & ( mi - > second - > nStatus & BLOCK_HAVE_DATA ) ) {
// Send block from disk
CBlock block ;
if ( ! ReadBlockFromDisk ( block , ( * mi ) . second , consensusParams ) )
assert ( ! " cannot load block from disk " ) ;
if ( inv . type = = MSG_BLOCK )
connman . PushMessage ( pfrom , NetMsgType : : BLOCK , block ) ;
else // MSG_FILTERED_BLOCK)
{
LOCK ( pfrom - > cs_filter ) ;
if ( pfrom - > pfilter )
{
CMerkleBlock merkleBlock ( block , * pfrom - > pfilter ) ;
connman . PushMessage ( pfrom , NetMsgType : : MERKLEBLOCK , merkleBlock ) ;
// CMerkleBlock just contains hashes, so also push any transactions in the block the client did not see
// This avoids hurting performance by pointlessly requiring a round-trip
// Note that there is currently no way for a node to request any single transactions we didn't send here -
// they must either disconnect and retry or request the full block.
// Thus, the protocol spec specified allows for us to provide duplicate txn here,
// however we MUST always provide at least what the remote peer needs
typedef std : : pair < unsigned int , uint256 > PairType ;
BOOST_FOREACH ( PairType & pair , merkleBlock . vMatchedTxn )
connman . PushMessage ( pfrom , NetMsgType : : TX , block . vtx [ pair . first ] ) ;
}
// else
// no response
}
// Trigger the peer node to send a getblocks request for the next batch of inventory
if ( inv . hash = = pfrom - > hashContinue )
{
// Bypass PushInventory, this must send even if redundant,
// and we want it right after the last block so they don't
// wait for other stuff first.
vector < CInv > vInv ;
vInv . push_back ( CInv ( MSG_BLOCK , chainActive . Tip ( ) - > GetBlockHash ( ) ) ) ;
connman . PushMessage ( pfrom , NetMsgType : : INV , vInv ) ;
pfrom - > hashContinue . SetNull ( ) ;
}
}
}
else if ( inv . IsKnownType ( ) )
{
// Send stream from relay memory
bool pushed = false ;
{
CDataStream ss ( SER_NETWORK , PROTOCOL_VERSION ) ;
{
LOCK ( cs_mapRelay ) ;
map < CInv , CDataStream > : : iterator mi = mapRelay . find ( inv ) ;
if ( mi ! = mapRelay . end ( ) ) {
ss + = ( * mi ) . second ;
pushed = true ;
}
}
if ( pushed )
connman . PushMessage ( pfrom , inv . GetCommand ( ) , ss ) ;
}
if ( ! pushed & & inv . type = = MSG_TX ) {
CTransaction tx ;
if ( mempool . lookup ( inv . hash , tx ) ) {
CDataStream ss ( SER_NETWORK , PROTOCOL_VERSION ) ;
ss . reserve ( 1000 ) ;
ss < < tx ;
connman . PushMessage ( pfrom , NetMsgType : : TX , ss ) ;
pushed = true ;
}
}
if ( ! pushed & & inv . type = = MSG_TXLOCK_REQUEST ) {
CTxLockRequest txLockRequest ;
if ( instantsend . GetTxLockRequest ( inv . hash , txLockRequest ) ) {
CDataStream ss ( SER_NETWORK , PROTOCOL_VERSION ) ;
ss . reserve ( 1000 ) ;
ss < < txLockRequest ;
connman . PushMessage ( pfrom , NetMsgType : : TXLOCKREQUEST , ss ) ;
pushed = true ;
}
}
if ( ! pushed & & inv . type = = MSG_TXLOCK_VOTE ) {
CTxLockVote vote ;
if ( instantsend . GetTxLockVote ( inv . hash , vote ) ) {
CDataStream ss ( SER_NETWORK , PROTOCOL_VERSION ) ;
ss . reserve ( 1000 ) ;
ss < < vote ;
connman . PushMessage ( pfrom , NetMsgType : : TXLOCKVOTE , ss ) ;
pushed = true ;
}
}
if ( ! pushed & & inv . type = = MSG_SPORK ) {
if ( mapSporks . count ( inv . hash ) ) {
CDataStream ss ( SER_NETWORK , PROTOCOL_VERSION ) ;
ss . reserve ( 1000 ) ;
ss < < mapSporks [ inv . hash ] ;
connman . PushMessage ( pfrom , NetMsgType : : SPORK , ss ) ;
pushed = true ;
}
}
if ( ! pushed & & inv . type = = MSG_MASTERNODE_PAYMENT_VOTE ) {
if ( mnpayments . HasVerifiedPaymentVote ( inv . hash ) ) {
CDataStream ss ( SER_NETWORK , PROTOCOL_VERSION ) ;
ss . reserve ( 1000 ) ;
ss < < mnpayments . mapMasternodePaymentVotes [ inv . hash ] ;
connman . PushMessage ( pfrom , NetMsgType : : MASTERNODEPAYMENTVOTE , ss ) ;
pushed = true ;
}
}
if ( ! pushed & & inv . type = = MSG_MASTERNODE_PAYMENT_BLOCK ) {
BlockMap : : iterator mi = mapBlockIndex . find ( inv . hash ) ;
LOCK ( cs_mapMasternodeBlocks ) ;
if ( mi ! = mapBlockIndex . end ( ) & & mnpayments . mapMasternodeBlocks . count ( mi - > second - > nHeight ) ) {
BOOST_FOREACH ( CMasternodePayee & payee , mnpayments . mapMasternodeBlocks [ mi - > second - > nHeight ] . vecPayees ) {
std : : vector < uint256 > vecVoteHashes = payee . GetVoteHashes ( ) ;
BOOST_FOREACH ( uint256 & hash , vecVoteHashes ) {
if ( mnpayments . HasVerifiedPaymentVote ( hash ) ) {
CDataStream ss ( SER_NETWORK , PROTOCOL_VERSION ) ;
ss . reserve ( 1000 ) ;
ss < < mnpayments . mapMasternodePaymentVotes [ hash ] ;
connman . PushMessage ( pfrom , NetMsgType : : MASTERNODEPAYMENTVOTE , ss ) ;
}
}
}
pushed = true ;
}
}
if ( ! pushed & & inv . type = = MSG_MASTERNODE_ANNOUNCE ) {
if ( mnodeman . mapSeenMasternodeBroadcast . count ( inv . hash ) ) {
CDataStream ss ( SER_NETWORK , PROTOCOL_VERSION ) ;
ss . reserve ( 1000 ) ;
ss < < mnodeman . mapSeenMasternodeBroadcast [ inv . hash ] . second ;
connman . PushMessage ( pfrom , NetMsgType : : MNANNOUNCE , ss ) ;
pushed = true ;
}
}
if ( ! pushed & & inv . type = = MSG_MASTERNODE_PING ) {
if ( mnodeman . mapSeenMasternodePing . count ( inv . hash ) ) {
CDataStream ss ( SER_NETWORK , PROTOCOL_VERSION ) ;
ss . reserve ( 1000 ) ;
ss < < mnodeman . mapSeenMasternodePing [ inv . hash ] ;
connman . PushMessage ( pfrom , NetMsgType : : MNPING , ss ) ;
pushed = true ;
}
}
if ( ! pushed & & inv . type = = MSG_DSTX ) {
CDarksendBroadcastTx dstx = CPrivateSend : : GetDSTX ( inv . hash ) ;
if ( dstx ) {
CDataStream ss ( SER_NETWORK , PROTOCOL_VERSION ) ;
ss . reserve ( 1000 ) ;
ss < < dstx ;
connman . PushMessage ( pfrom , NetMsgType : : DSTX , ss ) ;
pushed = true ;
}
}
if ( ! pushed & & inv . type = = MSG_GOVERNANCE_OBJECT ) {
LogPrint ( " net " , " ProcessGetData -- MSG_GOVERNANCE_OBJECT: inv = %s \n " , inv . ToString ( ) ) ;
CDataStream ss ( SER_NETWORK , PROTOCOL_VERSION ) ;
bool topush = false ;
{
if ( governance . HaveObjectForHash ( inv . hash ) ) {
ss . reserve ( 1000 ) ;
if ( governance . SerializeObjectForHash ( inv . hash , ss ) ) {
topush = true ;
}
}
}
LogPrint ( " net " , " ProcessGetData -- MSG_GOVERNANCE_OBJECT: topush = %d, inv = %s \n " , topush , inv . ToString ( ) ) ;
if ( topush ) {
connman . PushMessage ( pfrom , NetMsgType : : MNGOVERNANCEOBJECT , ss ) ;
pushed = true ;
}
}
if ( ! pushed & & inv . type = = MSG_GOVERNANCE_OBJECT_VOTE ) {
CDataStream ss ( SER_NETWORK , PROTOCOL_VERSION ) ;
bool topush = false ;
{
if ( governance . HaveVoteForHash ( inv . hash ) ) {
ss . reserve ( 1000 ) ;
if ( governance . SerializeVoteForHash ( inv . hash , ss ) ) {
topush = true ;
}
}
}
if ( topush ) {
LogPrint ( " net " , " ProcessGetData -- pushing: inv = %s \n " , inv . ToString ( ) ) ;
connman . PushMessage ( pfrom , NetMsgType : : MNGOVERNANCEOBJECTVOTE , ss ) ;
pushed = true ;
}
}
if ( ! pushed & & inv . type = = MSG_MASTERNODE_VERIFY ) {
if ( mnodeman . mapSeenMasternodeVerification . count ( inv . hash ) ) {
CDataStream ss ( SER_NETWORK , PROTOCOL_VERSION ) ;
ss . reserve ( 1000 ) ;
ss < < mnodeman . mapSeenMasternodeVerification [ inv . hash ] ;
connman . PushMessage ( pfrom , NetMsgType : : MNVERIFY , ss ) ;
pushed = true ;
}
}
if ( ! pushed )
vNotFound . push_back ( inv ) ;
}
// Track requests for our stuff.
GetMainSignals ( ) . Inventory ( inv . hash ) ;
if ( inv . type = = MSG_BLOCK | | inv . type = = MSG_FILTERED_BLOCK )
break ;
}
}
pfrom - > vRecvGetData . erase ( pfrom - > vRecvGetData . begin ( ) , it ) ;
if ( ! vNotFound . empty ( ) ) {
// Let the peer know that we didn't find what it asked for, so it doesn't
// have to wait around forever. Currently only SPV clients actually care
// about this message: it's needed when they are recursively walking the
// dependencies of relevant unconfirmed transactions. SPV clients want to
// do that because they want to know about (and store and rebroadcast and
// risk analyze) the dependencies of transactions relevant to them, without
// having to download the entire memory pool.
connman . PushMessage ( pfrom , NetMsgType : : NOTFOUND , vNotFound ) ;
}
}
2017-08-09 18:06:31 +02:00
bool static ProcessMessage ( CNode * pfrom , string strCommand , CDataStream & vRecv , int64_t nTimeReceived , CConnman & connman , std : : atomic < bool > & interruptMsgProc )
2017-08-09 02:19:06 +02:00
{
const CChainParams & chainparams = Params ( ) ;
RandAddSeedPerfmon ( ) ;
unsigned int nMaxSendBufferSize = connman . GetSendBufferSize ( ) ;
LogPrint ( " net " , " received: %s (%u bytes) peer=%d \n " , SanitizeString ( strCommand ) , vRecv . size ( ) , pfrom - > id ) ;
if ( mapArgs . count ( " -dropmessagestest " ) & & GetRand ( atoi ( mapArgs [ " -dropmessagestest " ] ) ) = = 0 )
{
LogPrintf ( " dropmessagestest DROPPING RECV MESSAGE \n " ) ;
return true ;
}
if ( ! ( pfrom - > GetLocalServices ( ) & NODE_BLOOM ) & &
( strCommand = = NetMsgType : : FILTERLOAD | |
strCommand = = NetMsgType : : FILTERADD | |
strCommand = = NetMsgType : : FILTERCLEAR ) )
{
if ( pfrom - > nVersion > = NO_BLOOM_VERSION ) {
LOCK ( cs_main ) ;
Misbehaving ( pfrom - > GetId ( ) , 100 ) ;
return false ;
} else if ( GetBoolArg ( " -enforcenodebloom " , false ) ) {
pfrom - > fDisconnect = true ;
return false ;
}
}
if ( strCommand = = NetMsgType : : VERSION )
{
// Feeler connections exist only to verify if address is online.
if ( pfrom - > fFeeler ) {
assert ( pfrom - > fInbound = = false ) ;
pfrom - > fDisconnect = true ;
}
// Each connection can only send one version message
if ( pfrom - > nVersion ! = 0 )
{
connman . PushMessageWithVersion ( pfrom , INIT_PROTO_VERSION , NetMsgType : : REJECT , strCommand , REJECT_DUPLICATE , string ( " Duplicate version message " ) ) ;
LOCK ( cs_main ) ;
Misbehaving ( pfrom - > GetId ( ) , 1 ) ;
return false ;
}
int64_t nTime ;
CAddress addrMe ;
CAddress addrFrom ;
uint64_t nNonce = 1 ;
uint64_t nServiceInt ;
vRecv > > pfrom - > nVersion > > nServiceInt > > nTime > > addrMe ;
pfrom - > nServices = ServiceFlags ( nServiceInt ) ;
if ( ! pfrom - > fInbound )
{
connman . SetServices ( pfrom - > addr , pfrom - > nServices ) ;
}
if ( pfrom - > nServicesExpected & ~ pfrom - > nServices )
{
LogPrint ( " net " , " peer=%d does not offer the expected services (%08x offered, %08x expected); disconnecting \n " , pfrom - > id , pfrom - > nServices , pfrom - > nServicesExpected ) ;
connman . PushMessageWithVersion ( pfrom , INIT_PROTO_VERSION , NetMsgType : : REJECT , strCommand , REJECT_NONSTANDARD ,
strprintf ( " Expected to offer services %08x " , pfrom - > nServicesExpected ) ) ;
pfrom - > fDisconnect = true ;
return false ;
}
if ( pfrom - > nVersion < MIN_PEER_PROTO_VERSION )
{
// disconnect from peers older than this proto version
LogPrintf ( " peer=%d using obsolete version %i; disconnecting \n " , pfrom - > id , pfrom - > nVersion ) ;
connman . PushMessageWithVersion ( pfrom , INIT_PROTO_VERSION , NetMsgType : : REJECT , strCommand , REJECT_OBSOLETE ,
strprintf ( " Version must be %d or greater " , MIN_PEER_PROTO_VERSION ) ) ;
pfrom - > fDisconnect = true ;
return false ;
}
if ( pfrom - > nVersion = = 10300 )
pfrom - > nVersion = 300 ;
if ( ! vRecv . empty ( ) )
vRecv > > addrFrom > > nNonce ;
if ( ! vRecv . empty ( ) ) {
vRecv > > LIMITED_STRING ( pfrom - > strSubVer , MAX_SUBVERSION_LENGTH ) ;
pfrom - > cleanSubVer = SanitizeString ( pfrom - > strSubVer ) ;
}
if ( ! vRecv . empty ( ) )
vRecv > > pfrom - > nStartingHeight ;
if ( ! vRecv . empty ( ) )
vRecv > > pfrom - > fRelayTxes ; // set to true after we get the first filter* message
else
pfrom - > fRelayTxes = true ;
// Disconnect if we connected to ourself
if ( pfrom - > fInbound & & ! connman . CheckIncomingNonce ( nNonce ) )
{
LogPrintf ( " connected to self at %s, disconnecting \n " , pfrom - > addr . ToString ( ) ) ;
pfrom - > fDisconnect = true ;
return true ;
}
pfrom - > addrLocal = addrMe ;
if ( pfrom - > fInbound & & addrMe . IsRoutable ( ) )
{
SeenLocal ( addrMe ) ;
}
// Be shy and don't send version until we hear
if ( pfrom - > fInbound )
PushNodeVersion ( pfrom , connman , GetAdjustedTime ( ) ) ;
pfrom - > fClient = ! ( pfrom - > nServices & NODE_NETWORK ) ;
// Potentially mark this peer as a preferred download peer.
{
LOCK ( cs_main ) ;
UpdatePreferredDownload ( pfrom , State ( pfrom - > GetId ( ) ) ) ;
}
// Change version
connman . PushMessageWithVersion ( pfrom , INIT_PROTO_VERSION , NetMsgType : : VERACK ) ;
pfrom - > SetSendVersion ( min ( pfrom - > nVersion , PROTOCOL_VERSION ) ) ;
if ( ! pfrom - > fInbound )
{
// Advertise our address
if ( fListen & & ! IsInitialBlockDownload ( ) )
{
CAddress addr = GetLocalAddress ( & pfrom - > addr , pfrom - > GetLocalServices ( ) ) ;
if ( addr . IsRoutable ( ) )
{
LogPrintf ( " ProcessMessages: advertising address %s \n " , addr . ToString ( ) ) ;
pfrom - > PushAddress ( addr ) ;
} else if ( IsPeerAddrLocalGood ( pfrom ) ) {
addr . SetIP ( pfrom - > addrLocal ) ;
LogPrintf ( " ProcessMessages: advertising address %s \n " , addr . ToString ( ) ) ;
pfrom - > PushAddress ( addr ) ;
}
}
// Get recent addresses
if ( pfrom - > fOneShot | | pfrom - > nVersion > = CADDR_TIME_VERSION | | connman . GetAddressCount ( ) < 1000 )
{
connman . PushMessage ( pfrom , NetMsgType : : GETADDR ) ;
pfrom - > fGetAddr = true ;
}
connman . MarkAddressGood ( pfrom - > addr ) ;
} else {
if ( ( ( CNetAddr ) pfrom - > addr ) = = ( CNetAddr ) addrFrom )
{
connman . AddNewAddress ( addrFrom , addrFrom ) ;
connman . MarkAddressGood ( addrFrom ) ;
}
}
// Relay alerts
{
LOCK ( cs_mapAlerts ) ;
BOOST_FOREACH ( PAIRTYPE ( const uint256 , CAlert ) & item , mapAlerts )
item . second . RelayTo ( pfrom , connman ) ;
}
pfrom - > fSuccessfullyConnected = true ;
string remoteAddr ;
if ( fLogIPs )
remoteAddr = " , peeraddr= " + pfrom - > addr . ToString ( ) ;
LogPrintf ( " receive version message: %s: version %d, blocks=%d, us=%s, peer=%d%s \n " ,
pfrom - > cleanSubVer , pfrom - > nVersion ,
pfrom - > nStartingHeight , addrMe . ToString ( ) , pfrom - > id ,
remoteAddr ) ;
int64_t nTimeOffset = nTime - GetTime ( ) ;
pfrom - > nTimeOffset = nTimeOffset ;
AddTimeData ( pfrom - > addr , nTimeOffset ) ;
}
else if ( pfrom - > nVersion = = 0 )
{
// Must have a version message before anything else
LOCK ( cs_main ) ;
Misbehaving ( pfrom - > GetId ( ) , 1 ) ;
return false ;
}
else if ( strCommand = = NetMsgType : : VERACK )
{
pfrom - > SetRecvVersion ( min ( pfrom - > nVersion , PROTOCOL_VERSION ) ) ;
// Mark this node as currently connected, so we update its timestamp later.
if ( pfrom - > fNetworkNode ) {
LOCK ( cs_main ) ;
State ( pfrom - > GetId ( ) ) - > fCurrentlyConnected = true ;
}
if ( pfrom - > nVersion > = SENDHEADERS_VERSION ) {
// Tell our peer we prefer to receive headers rather than inv's
// We send this to non-NODE NETWORK peers as well, because even
// non-NODE NETWORK peers can announce blocks (such as pruning
// nodes)
connman . PushMessage ( pfrom , NetMsgType : : SENDHEADERS ) ;
}
}
else if ( strCommand = = NetMsgType : : ADDR )
{
vector < CAddress > vAddr ;
vRecv > > vAddr ;
// Don't want addr from older versions unless seeding
if ( pfrom - > nVersion < CADDR_TIME_VERSION & & connman . GetAddressCount ( ) > 1000 )
return true ;
if ( vAddr . size ( ) > 1000 )
{
LOCK ( cs_main ) ;
Misbehaving ( pfrom - > GetId ( ) , 20 ) ;
return error ( " message addr size() = % u " , vAddr.size()) ;
}
// Store the new addresses
vector < CAddress > vAddrOk ;
int64_t nNow = GetAdjustedTime ( ) ;
int64_t nSince = nNow - 10 * 60 ;
BOOST_FOREACH ( CAddress & addr , vAddr )
{
2017-08-09 18:06:31 +02:00
if ( interruptMsgProc )
return true ;
2017-08-09 02:19:06 +02:00
if ( ( addr . nServices & REQUIRED_SERVICES ) ! = REQUIRED_SERVICES )
continue ;
if ( addr . nTime < = 100000000 | | addr . nTime > nNow + 10 * 60 )
addr . nTime = nNow - 5 * 24 * 60 * 60 ;
pfrom - > AddAddressKnown ( addr ) ;
bool fReachable = IsReachable ( addr ) ;
if ( addr . nTime > nSince & & ! pfrom - > fGetAddr & & vAddr . size ( ) < = 10 & & addr . IsRoutable ( ) )
{
RelayAddress ( addr , fReachable , connman ) ;
}
// Do not store addresses outside our network
if ( fReachable )
vAddrOk . push_back ( addr ) ;
}
connman . AddNewAddresses ( vAddrOk , pfrom - > addr , 2 * 60 * 60 ) ;
if ( vAddr . size ( ) < 1000 )
pfrom - > fGetAddr = false ;
if ( pfrom - > fOneShot )
pfrom - > fDisconnect = true ;
}
else if ( strCommand = = NetMsgType : : SENDHEADERS )
{
LOCK ( cs_main ) ;
State ( pfrom - > GetId ( ) ) - > fPreferHeaders = true ;
}
else if ( strCommand = = NetMsgType : : INV )
{
vector < CInv > vInv ;
vRecv > > vInv ;
if ( vInv . size ( ) > MAX_INV_SZ )
{
LOCK ( cs_main ) ;
Misbehaving ( pfrom - > GetId ( ) , 20 ) ;
return error ( " message inv size() = % u " , vInv.size()) ;
}
bool fBlocksOnly = ! fRelayTxes ;
// Allow whitelisted peers to send data other than blocks in blocks only mode if whitelistrelay is true
if ( pfrom - > fWhitelisted & & GetBoolArg ( " -whitelistrelay " , DEFAULT_WHITELISTRELAY ) )
fBlocksOnly = false ;
LOCK ( cs_main ) ;
std : : vector < CInv > vToFetch ;
for ( unsigned int nInv = 0 ; nInv < vInv . size ( ) ; nInv + + )
{
const CInv & inv = vInv [ nInv ] ;
if ( ! inv . IsKnownType ( ) ) {
LogPrint ( " net " , " got inv of unknown type %d: %s peer=%d \n " , inv . type , inv . hash . ToString ( ) , pfrom - > id ) ;
continue ;
}
2017-08-09 18:06:31 +02:00
if ( interruptMsgProc )
return true ;
2017-08-09 02:19:06 +02:00
pfrom - > AddInventoryKnown ( inv ) ;
bool fAlreadyHave = AlreadyHave ( inv ) ;
LogPrint ( " net " , " got inv: %s %s peer=%d \n " , inv . ToString ( ) , fAlreadyHave ? " have " : " new " , pfrom - > id ) ;
if ( inv . type = = MSG_BLOCK ) {
UpdateBlockAvailability ( pfrom - > GetId ( ) , inv . hash ) ;
if ( ! fAlreadyHave & & ! fImporting & & ! fReindex & & ! mapBlocksInFlight . count ( inv . hash ) ) {
// First request the headers preceding the announced block. In the normal fully-synced
// case where a new block is announced that succeeds the current tip (no reorganization),
// there are no such headers.
// Secondly, and only when we are close to being synced, we request the announced block directly,
// to avoid an extra round-trip. Note that we must *first* ask for the headers, so by the
// time the block arrives, the header chain leading up to it is already validated. Not
// doing this will result in the received block being rejected as an orphan in case it is
// not a direct successor.
connman . PushMessage ( pfrom , NetMsgType : : GETHEADERS , chainActive . GetLocator ( pindexBestHeader ) , inv . hash ) ;
CNodeState * nodestate = State ( pfrom - > GetId ( ) ) ;
if ( CanDirectFetch ( chainparams . GetConsensus ( ) ) & &
nodestate - > nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER ) {
vToFetch . push_back ( inv ) ;
// Mark block as in flight already, even though the actual "getdata" message only goes out
// later (within the same cs_main lock, though).
MarkBlockAsInFlight ( pfrom - > GetId ( ) , inv . hash , chainparams . GetConsensus ( ) ) ;
}
LogPrint ( " net " , " getheaders (%d) %s to peer=%d \n " , pindexBestHeader - > nHeight , inv . hash . ToString ( ) , pfrom - > id ) ;
}
}
else
{
if ( fBlocksOnly )
LogPrint ( " net " , " transaction (%s) inv sent in violation of protocol peer=%d \n " , inv . hash . ToString ( ) , pfrom - > id ) ;
else if ( ! fAlreadyHave & & ! fImporting & & ! fReindex & & ! IsInitialBlockDownload ( ) )
pfrom - > AskFor ( inv ) ;
}
// Track requests for our stuff
GetMainSignals ( ) . Inventory ( inv . hash ) ;
if ( pfrom - > nSendSize > ( nMaxSendBufferSize * 2 ) ) {
Misbehaving ( pfrom - > GetId ( ) , 50 ) ;
return error ( " send buffer size() = % u " , pfrom->nSendSize) ;
}
}
if ( ! vToFetch . empty ( ) )
connman . PushMessage ( pfrom , NetMsgType : : GETDATA , vToFetch ) ;
}
else if ( strCommand = = NetMsgType : : GETDATA )
{
vector < CInv > vInv ;
vRecv > > vInv ;
if ( vInv . size ( ) > MAX_INV_SZ )
{
LOCK ( cs_main ) ;
Misbehaving ( pfrom - > GetId ( ) , 20 ) ;
return error ( " message getdata size() = % u " , vInv.size()) ;
}
if ( fDebug | | ( vInv . size ( ) ! = 1 ) )
LogPrint ( " net " , " received getdata (%u invsz) peer=%d \n " , vInv . size ( ) , pfrom - > id ) ;
if ( ( fDebug & & vInv . size ( ) > 0 ) | | ( vInv . size ( ) = = 1 ) )
LogPrint ( " net " , " received getdata for: %s peer=%d \n " , vInv [ 0 ] . ToString ( ) , pfrom - > id ) ;
pfrom - > vRecvGetData . insert ( pfrom - > vRecvGetData . end ( ) , vInv . begin ( ) , vInv . end ( ) ) ;
2017-08-09 18:06:31 +02:00
ProcessGetData ( pfrom , chainparams . GetConsensus ( ) , connman , interruptMsgProc ) ;
2017-08-09 02:19:06 +02:00
}
else if ( strCommand = = NetMsgType : : GETBLOCKS )
{
CBlockLocator locator ;
uint256 hashStop ;
vRecv > > locator > > hashStop ;
LOCK ( cs_main ) ;
// Find the last block the caller has in the main chain
CBlockIndex * pindex = FindForkInGlobalIndex ( chainActive , locator ) ;
// Send the rest of the chain
if ( pindex )
pindex = chainActive . Next ( pindex ) ;
int nLimit = 500 ;
LogPrint ( " net " , " getblocks %d to %s limit %d from peer=%d \n " , ( pindex ? pindex - > nHeight : - 1 ) , hashStop . IsNull ( ) ? " end " : hashStop . ToString ( ) , nLimit , pfrom - > id ) ;
for ( ; pindex ; pindex = chainActive . Next ( pindex ) )
{
if ( pindex - > GetBlockHash ( ) = = hashStop )
{
LogPrint ( " net " , " getblocks stopping at %d %s \n " , pindex - > nHeight , pindex - > GetBlockHash ( ) . ToString ( ) ) ;
break ;
}
// If pruning, don't inv blocks unless we have on disk and are likely to still have
// for some reasonable time window (1 hour) that block relay might require.
const int nPrunedBlocksLikelyToHave = MIN_BLOCKS_TO_KEEP - 3600 / chainparams . GetConsensus ( ) . nPowTargetSpacing ;
if ( fPruneMode & & ( ! ( pindex - > nStatus & BLOCK_HAVE_DATA ) | | pindex - > nHeight < = chainActive . Tip ( ) - > nHeight - nPrunedBlocksLikelyToHave ) )
{
LogPrint ( " net " , " getblocks stopping, pruned or too old block at %d %s \n " , pindex - > nHeight , pindex - > GetBlockHash ( ) . ToString ( ) ) ;
break ;
}
pfrom - > PushInventory ( CInv ( MSG_BLOCK , pindex - > GetBlockHash ( ) ) ) ;
if ( - - nLimit < = 0 )
{
// When this block is requested, we'll send an inv that'll
// trigger the peer to getblocks the next batch of inventory.
LogPrint ( " net " , " getblocks stopping at limit %d %s \n " , pindex - > nHeight , pindex - > GetBlockHash ( ) . ToString ( ) ) ;
pfrom - > hashContinue = pindex - > GetBlockHash ( ) ;
break ;
}
}
}
else if ( strCommand = = NetMsgType : : GETHEADERS )
{
CBlockLocator locator ;
uint256 hashStop ;
vRecv > > locator > > hashStop ;
LOCK ( cs_main ) ;
if ( IsInitialBlockDownload ( ) & & ! pfrom - > fWhitelisted ) {
LogPrint ( " net " , " Ignoring getheaders from peer=%d because node is in initial block download \n " , pfrom - > id ) ;
return true ;
}
CNodeState * nodestate = State ( pfrom - > GetId ( ) ) ;
CBlockIndex * pindex = NULL ;
if ( locator . IsNull ( ) )
{
// If locator is null, return the hashStop block
BlockMap : : iterator mi = mapBlockIndex . find ( hashStop ) ;
if ( mi = = mapBlockIndex . end ( ) )
return true ;
pindex = ( * mi ) . second ;
}
else
{
// Find the last block the caller has in the main chain
pindex = FindForkInGlobalIndex ( chainActive , locator ) ;
if ( pindex )
pindex = chainActive . Next ( pindex ) ;
}
// we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx count at the end
vector < CBlock > vHeaders ;
int nLimit = MAX_HEADERS_RESULTS ;
LogPrint ( " net " , " getheaders %d to %s from peer=%d \n " , ( pindex ? pindex - > nHeight : - 1 ) , hashStop . ToString ( ) , pfrom - > id ) ;
for ( ; pindex ; pindex = chainActive . Next ( pindex ) )
{
vHeaders . push_back ( pindex - > GetBlockHeader ( ) ) ;
if ( - - nLimit < = 0 | | pindex - > GetBlockHash ( ) = = hashStop )
break ;
}
// pindex can be NULL either if we sent chainActive.Tip() OR
// if our peer has chainActive.Tip() (and thus we are sending an empty
// headers message). In both cases it's safe to update
// pindexBestHeaderSent to be our tip.
nodestate - > pindexBestHeaderSent = pindex ? pindex : chainActive . Tip ( ) ;
connman . PushMessage ( pfrom , NetMsgType : : HEADERS , vHeaders ) ;
}
else if ( strCommand = = NetMsgType : : TX | | strCommand = = NetMsgType : : DSTX | | strCommand = = NetMsgType : : TXLOCKREQUEST )
{
// Stop processing the transaction early if
// We are in blocks only mode and peer is either not whitelisted or whitelistrelay is off
if ( ! fRelayTxes & & ( ! pfrom - > fWhitelisted | | ! GetBoolArg ( " -whitelistrelay " , DEFAULT_WHITELISTRELAY ) ) )
{
LogPrint ( " net " , " transaction sent in violation of protocol peer=%d \n " , pfrom - > id ) ;
return true ;
}
vector < uint256 > vWorkQueue ;
vector < uint256 > vEraseQueue ;
CTransaction tx ;
CTxLockRequest txLockRequest ;
CDarksendBroadcastTx dstx ;
int nInvType = MSG_TX ;
// Read data and assign inv type
if ( strCommand = = NetMsgType : : TX ) {
vRecv > > tx ;
} else if ( strCommand = = NetMsgType : : TXLOCKREQUEST ) {
vRecv > > txLockRequest ;
tx = txLockRequest ;
nInvType = MSG_TXLOCK_REQUEST ;
} else if ( strCommand = = NetMsgType : : DSTX ) {
vRecv > > dstx ;
tx = dstx . tx ;
nInvType = MSG_DSTX ;
}
CInv inv ( nInvType , tx . GetHash ( ) ) ;
pfrom - > AddInventoryKnown ( inv ) ;
pfrom - > setAskFor . erase ( inv . hash ) ;
// Process custom logic, no matter if tx will be accepted to mempool later or not
if ( strCommand = = NetMsgType : : TXLOCKREQUEST ) {
if ( ! instantsend . ProcessTxLockRequest ( txLockRequest ) ) {
LogPrint ( " instantsend " , " TXLOCKREQUEST -- failed %s \n " , txLockRequest . GetHash ( ) . ToString ( ) ) ;
return false ;
}
} else if ( strCommand = = NetMsgType : : DSTX ) {
uint256 hashTx = tx . GetHash ( ) ;
if ( CPrivateSend : : GetDSTX ( hashTx ) ) {
LogPrint ( " privatesend " , " DSTX -- Already have %s, skipping... \n " , hashTx . ToString ( ) ) ;
return true ; // not an error
}
CMasternode * pmn = mnodeman . Find ( dstx . vin ) ;
if ( pmn = = NULL ) {
LogPrint ( " privatesend " , " DSTX -- Can't find masternode %s to verify %s \n " , dstx . vin . prevout . ToStringShort ( ) , hashTx . ToString ( ) ) ;
return false ;
}
if ( ! pmn - > fAllowMixingTx ) {
LogPrint ( " privatesend " , " DSTX -- Masternode %s is sending too many transactions %s \n " , dstx . vin . prevout . ToStringShort ( ) , hashTx . ToString ( ) ) ;
return true ;
// TODO: Not an error? Could it be that someone is relaying old DSTXes
// we have no idea about (e.g we were offline)? How to handle them?
}
if ( ! dstx . CheckSignature ( pmn - > pubKeyMasternode ) ) {
LogPrint ( " privatesend " , " DSTX -- CheckSignature() failed for %s \n " , hashTx . ToString ( ) ) ;
return false ;
}
LogPrintf ( " DSTX -- Got Masternode transaction %s \n " , hashTx . ToString ( ) ) ;
mempool . PrioritiseTransaction ( hashTx , hashTx . ToString ( ) , 1000 , 0.1 * COIN ) ;
pmn - > fAllowMixingTx = false ;
}
LOCK ( cs_main ) ;
bool fMissingInputs = false ;
CValidationState state ;
mapAlreadyAskedFor . erase ( inv . hash ) ;
if ( ! AlreadyHave ( inv ) & & AcceptToMemoryPool ( mempool , state , tx , true , & fMissingInputs ) )
{
// Process custom txes, this changes AlreadyHave to "true"
if ( strCommand = = NetMsgType : : DSTX ) {
LogPrintf ( " DSTX -- Masternode transaction accepted, txid=%s, peer=%d \n " ,
tx . GetHash ( ) . ToString ( ) , pfrom - > id ) ;
CPrivateSend : : AddDSTX ( dstx ) ;
} else if ( strCommand = = NetMsgType : : TXLOCKREQUEST ) {
LogPrintf ( " TXLOCKREQUEST -- Transaction Lock Request accepted, txid=%s, peer=%d \n " ,
tx . GetHash ( ) . ToString ( ) , pfrom - > id ) ;
instantsend . AcceptLockRequest ( txLockRequest ) ;
}
mempool . check ( pcoinsTip ) ;
connman . RelayTransaction ( tx ) ;
vWorkQueue . push_back ( inv . hash ) ;
pfrom - > nLastTXTime = GetTime ( ) ;
LogPrint ( " mempool " , " AcceptToMemoryPool: peer=%d: accepted %s (poolsz %u txn, %u kB) \n " ,
pfrom - > id ,
tx . GetHash ( ) . ToString ( ) ,
mempool . size ( ) , mempool . DynamicMemoryUsage ( ) / 1000 ) ;
// Recursively process any orphan transactions that depended on this one
set < NodeId > setMisbehaving ;
for ( unsigned int i = 0 ; i < vWorkQueue . size ( ) ; i + + )
{
map < uint256 , set < uint256 > > : : iterator itByPrev = mapOrphanTransactionsByPrev . find ( vWorkQueue [ i ] ) ;
if ( itByPrev = = mapOrphanTransactionsByPrev . end ( ) )
continue ;
for ( set < uint256 > : : iterator mi = itByPrev - > second . begin ( ) ;
mi ! = itByPrev - > second . end ( ) ;
+ + mi )
{
const uint256 & orphanHash = * mi ;
const CTransaction & orphanTx = mapOrphanTransactions [ orphanHash ] . tx ;
NodeId fromPeer = mapOrphanTransactions [ orphanHash ] . fromPeer ;
bool fMissingInputs2 = false ;
// Use a dummy CValidationState so someone can't setup nodes to counter-DoS based on orphan
// resolution (that is, feeding people an invalid transaction based on LegitTxX in order to get
// anyone relaying LegitTxX banned)
CValidationState stateDummy ;
if ( setMisbehaving . count ( fromPeer ) )
continue ;
if ( AcceptToMemoryPool ( mempool , stateDummy , orphanTx , true , & fMissingInputs2 ) )
{
LogPrint ( " mempool " , " accepted orphan tx %s \n " , orphanHash . ToString ( ) ) ;
connman . RelayTransaction ( orphanTx ) ;
vWorkQueue . push_back ( orphanHash ) ;
vEraseQueue . push_back ( orphanHash ) ;
}
else if ( ! fMissingInputs2 )
{
int nDos = 0 ;
if ( stateDummy . IsInvalid ( nDos ) & & nDos > 0 )
{
// Punish peer that gave us an invalid orphan tx
Misbehaving ( fromPeer , nDos ) ;
setMisbehaving . insert ( fromPeer ) ;
LogPrint ( " mempool " , " invalid orphan tx %s \n " , orphanHash . ToString ( ) ) ;
}
// Has inputs but not accepted to mempool
// Probably non-standard or insufficient fee/priority
LogPrint ( " mempool " , " removed orphan tx %s \n " , orphanHash . ToString ( ) ) ;
vEraseQueue . push_back ( orphanHash ) ;
assert ( recentRejects ) ;
recentRejects - > insert ( orphanHash ) ;
}
mempool . check ( pcoinsTip ) ;
}
}
BOOST_FOREACH ( uint256 hash , vEraseQueue )
EraseOrphanTx ( hash ) ;
}
else if ( fMissingInputs )
{
AddOrphanTx ( tx , pfrom - > GetId ( ) ) ;
// DoS prevention: do not allow mapOrphanTransactions to grow unbounded
unsigned int nMaxOrphanTx = ( unsigned int ) std : : max ( ( int64_t ) 0 , GetArg ( " -maxorphantx " , DEFAULT_MAX_ORPHAN_TRANSACTIONS ) ) ;
unsigned int nEvicted = LimitOrphanTxSize ( nMaxOrphanTx ) ;
if ( nEvicted > 0 )
LogPrint ( " mempool " , " mapOrphan overflow, removed %u tx \n " , nEvicted ) ;
} else {
assert ( recentRejects ) ;
recentRejects - > insert ( tx . GetHash ( ) ) ;
if ( strCommand = = NetMsgType : : TXLOCKREQUEST & & ! AlreadyHave ( inv ) ) {
// i.e. AcceptToMemoryPool failed, probably because it's conflicting
// with existing normal tx or tx lock for another tx. For the same tx lock
// AlreadyHave would have return "true" already.
// It's the first time we failed for this tx lock request,
// this should switch AlreadyHave to "true".
instantsend . RejectLockRequest ( txLockRequest ) ;
// this lets other nodes to create lock request candidate i.e.
// this allows multiple conflicting lock requests to compete for votes
connman . RelayTransaction ( tx ) ;
}
if ( pfrom - > fWhitelisted & & GetBoolArg ( " -whitelistforcerelay " , DEFAULT_WHITELISTFORCERELAY ) ) {
// Always relay transactions received from whitelisted peers, even
// if they were already in the mempool or rejected from it due
// to policy, allowing the node to function as a gateway for
// nodes hidden behind it.
//
// Never relay transactions that we would assign a non-zero DoS
// score for, as we expect peers to do the same with us in that
// case.
int nDoS = 0 ;
if ( ! state . IsInvalid ( nDoS ) | | nDoS = = 0 ) {
LogPrintf ( " Force relaying tx %s from whitelisted peer=%d \n " , tx . GetHash ( ) . ToString ( ) , pfrom - > id ) ;
connman . RelayTransaction ( tx ) ;
} else {
LogPrintf ( " Not relaying invalid transaction %s from whitelisted peer=%d (%s) \n " , tx . GetHash ( ) . ToString ( ) , pfrom - > id , FormatStateMessage ( state ) ) ;
}
}
}
int nDoS = 0 ;
if ( state . IsInvalid ( nDoS ) )
{
LogPrint ( " mempoolrej " , " %s from peer=%d was not accepted: %s \n " , tx . GetHash ( ) . ToString ( ) ,
pfrom - > id ,
FormatStateMessage ( state ) ) ;
if ( state . GetRejectCode ( ) < REJECT_INTERNAL ) // Never send AcceptToMemoryPool's internal codes over P2P
connman . PushMessage ( pfrom , NetMsgType : : REJECT , strCommand , ( unsigned char ) state . GetRejectCode ( ) ,
state . GetRejectReason ( ) . substr ( 0 , MAX_REJECT_MESSAGE_LENGTH ) , inv . hash ) ;
if ( nDoS > 0 )
Misbehaving ( pfrom - > GetId ( ) , nDoS ) ;
}
}
else if ( strCommand = = NetMsgType : : HEADERS & & ! fImporting & & ! fReindex ) // Ignore headers received while importing
{
std : : vector < CBlockHeader > headers ;
// Bypass the normal CBlock deserialization, as we don't want to risk deserializing 2000 full blocks.
unsigned int nCount = ReadCompactSize ( vRecv ) ;
if ( nCount > MAX_HEADERS_RESULTS ) {
LOCK ( cs_main ) ;
Misbehaving ( pfrom - > GetId ( ) , 20 ) ;
return error ( " headers message size = %u " , nCount ) ;
}
headers . resize ( nCount ) ;
for ( unsigned int n = 0 ; n < nCount ; n + + ) {
vRecv > > headers [ n ] ;
ReadCompactSize ( vRecv ) ; // ignore tx count; assume it is 0.
}
if ( nCount = = 0 ) {
// Nothing interesting. Stop asking this peers for more headers.
return true ;
}
CBlockIndex * pindexLast = NULL ;
{
LOCK ( cs_main ) ;
uint256 hashLastBlock ;
for ( const CBlockHeader & header : headers ) {
if ( ! hashLastBlock . IsNull ( ) & & header . hashPrevBlock ! = hashLastBlock ) {
Misbehaving ( pfrom - > GetId ( ) , 20 ) ;
return error ( " non-continuous headers sequence " ) ;
}
hashLastBlock = header . GetHash ( ) ;
}
}
CValidationState state ;
if ( ! ProcessNewBlockHeaders ( headers , state , chainparams , & pindexLast ) ) {
int nDoS ;
if ( state . IsInvalid ( nDoS ) ) {
if ( nDoS > 0 ) {
LOCK ( cs_main ) ;
Misbehaving ( pfrom - > GetId ( ) , nDoS ) ;
}
return error ( " invalid header received " ) ;
}
}
{
LOCK ( cs_main ) ;
if ( pindexLast )
UpdateBlockAvailability ( pfrom - > GetId ( ) , pindexLast - > GetBlockHash ( ) ) ;
if ( nCount = = MAX_HEADERS_RESULTS & & pindexLast ) {
// Headers message had its maximum size; the peer may have more headers.
// TODO: optimize: if pindexLast is an ancestor of chainActive.Tip or pindexBestHeader, continue
// from there instead.
LogPrint ( " net " , " more getheaders (%d) to end to peer=%d (startheight:%d) \n " , pindexLast - > nHeight , pfrom - > id , pfrom - > nStartingHeight ) ;
connman . PushMessage ( pfrom , NetMsgType : : GETHEADERS , chainActive . GetLocator ( pindexLast ) , uint256 ( ) ) ;
}
bool fCanDirectFetch = CanDirectFetch ( chainparams . GetConsensus ( ) ) ;
CNodeState * nodestate = State ( pfrom - > GetId ( ) ) ;
// If this set of headers is valid and ends in a block with at least as
// much work as our tip, download as much as possible.
if ( fCanDirectFetch & & pindexLast - > IsValid ( BLOCK_VALID_TREE ) & & chainActive . Tip ( ) - > nChainWork < = pindexLast - > nChainWork ) {
vector < CBlockIndex * > vToFetch ;
CBlockIndex * pindexWalk = pindexLast ;
// Calculate all the blocks we'd need to switch to pindexLast, up to a limit.
while ( pindexWalk & & ! chainActive . Contains ( pindexWalk ) & & vToFetch . size ( ) < = MAX_BLOCKS_IN_TRANSIT_PER_PEER ) {
if ( ! ( pindexWalk - > nStatus & BLOCK_HAVE_DATA ) & &
! mapBlocksInFlight . count ( pindexWalk - > GetBlockHash ( ) ) ) {
// We don't have this block, and it's not yet in flight.
vToFetch . push_back ( pindexWalk ) ;
}
pindexWalk = pindexWalk - > pprev ;
}
// If pindexWalk still isn't on our main chain, we're looking at a
// very large reorg at a time we think we're close to caught up to
// the main chain -- this shouldn't really happen. Bail out on the
// direct fetch and rely on parallel download instead.
if ( ! chainActive . Contains ( pindexWalk ) ) {
LogPrint ( " net " , " Large reorg, won't direct fetch to %s (%d) \n " ,
pindexLast - > GetBlockHash ( ) . ToString ( ) ,
pindexLast - > nHeight ) ;
} else {
vector < CInv > vGetData ;
// Download as much as possible, from earliest to latest.
BOOST_REVERSE_FOREACH ( CBlockIndex * pindex , vToFetch ) {
if ( nodestate - > nBlocksInFlight > = MAX_BLOCKS_IN_TRANSIT_PER_PEER ) {
// Can't download any more from this peer
break ;
}
vGetData . push_back ( CInv ( MSG_BLOCK , pindex - > GetBlockHash ( ) ) ) ;
MarkBlockAsInFlight ( pfrom - > GetId ( ) , pindex - > GetBlockHash ( ) , chainparams . GetConsensus ( ) , pindex ) ;
LogPrint ( " net " , " Requesting block %s from peer=%d \n " ,
pindex - > GetBlockHash ( ) . ToString ( ) , pfrom - > id ) ;
}
if ( vGetData . size ( ) > 1 ) {
LogPrint ( " net " , " Downloading blocks toward %s (%d) via headers direct fetch \n " ,
pindexLast - > GetBlockHash ( ) . ToString ( ) , pindexLast - > nHeight ) ;
}
if ( vGetData . size ( ) > 0 ) {
connman . PushMessage ( pfrom , NetMsgType : : GETDATA , vGetData ) ;
}
}
}
}
}
else if ( strCommand = = NetMsgType : : BLOCK & & ! fImporting & & ! fReindex ) // Ignore blocks received while importing
{
CBlock block ;
vRecv > > block ;
CInv inv ( MSG_BLOCK , block . GetHash ( ) ) ;
LogPrint ( " net " , " received block %s peer=%d \n " , inv . hash . ToString ( ) , pfrom - > id ) ;
pfrom - > AddInventoryKnown ( inv ) ;
// Process all blocks from whitelisted peers, even if not requested,
// unless we're still syncing with the network.
// Such an unrequested block may still be processed, subject to the
// conditions in AcceptBlock().
bool forceProcessing = pfrom - > fWhitelisted & & ! IsInitialBlockDownload ( ) ;
const uint256 hash ( block . GetHash ( ) ) ;
{
LOCK ( cs_main ) ;
// Also always process if we requested the block explicitly, as we may
// need it even though it is not a candidate for a new best tip.
forceProcessing | = MarkBlockAsReceived ( hash ) ;
// mapBlockSource is only used for sending reject messages and DoS scores,
// so the race between here and cs_main in ProcessNewBlock is fine.
mapBlockSource . emplace ( hash , pfrom - > GetId ( ) ) ;
}
bool fNewBlock = false ;
ProcessNewBlock ( chainparams , & block , forceProcessing , NULL , & fNewBlock ) ;
if ( fNewBlock )
pfrom - > nLastBlockTime = GetTime ( ) ;
}
else if ( strCommand = = NetMsgType : : GETADDR )
{
// This asymmetric behavior for inbound and outbound connections was introduced
// to prevent a fingerprinting attack: an attacker can send specific fake addresses
// to users' AddrMan and later request them by sending getaddr messages.
// Making nodes which are behind NAT and can only make outgoing connections ignore
// the getaddr message mitigates the attack.
if ( ! pfrom - > fInbound ) {
LogPrint ( " net " , " Ignoring \" getaddr \" from outbound connection. peer=%d \n " , pfrom - > id ) ;
return true ;
}
pfrom - > vAddrToSend . clear ( ) ;
vector < CAddress > vAddr = connman . GetAddresses ( ) ;
BOOST_FOREACH ( const CAddress & addr , vAddr )
pfrom - > PushAddress ( addr ) ;
}
else if ( strCommand = = NetMsgType : : MEMPOOL )
{
if ( connman . OutboundTargetReached ( false ) & & ! pfrom - > fWhitelisted )
{
LogPrint ( " net " , " mempool request with bandwidth limit reached, disconnect peer=%d \n " , pfrom - > GetId ( ) ) ;
pfrom - > fDisconnect = true ;
return true ;
}
LOCK2 ( cs_main , pfrom - > cs_filter ) ;
std : : vector < uint256 > vtxid ;
mempool . queryHashes ( vtxid ) ;
vector < CInv > vInv ;
BOOST_FOREACH ( uint256 & hash , vtxid ) {
CInv inv ( MSG_TX , hash ) ;
if ( pfrom - > pfilter ) {
CTransaction tx ;
bool fInMemPool = mempool . lookup ( hash , tx ) ;
if ( ! fInMemPool ) continue ; // another thread removed since queryHashes, maybe...
if ( ! pfrom - > pfilter - > IsRelevantAndUpdate ( tx ) ) continue ;
}
vInv . push_back ( inv ) ;
if ( vInv . size ( ) = = MAX_INV_SZ ) {
connman . PushMessage ( pfrom , NetMsgType : : INV , vInv ) ;
vInv . clear ( ) ;
}
}
if ( vInv . size ( ) > 0 )
connman . PushMessage ( pfrom , NetMsgType : : INV , vInv ) ;
}
else if ( strCommand = = NetMsgType : : PING )
{
if ( pfrom - > nVersion > BIP0031_VERSION )
{
uint64_t nonce = 0 ;
vRecv > > nonce ;
// Echo the message back with the nonce. This allows for two useful features:
//
// 1) A remote node can quickly check if the connection is operational
// 2) Remote nodes can measure the latency of the network thread. If this node
// is overloaded it won't respond to pings quickly and the remote node can
// avoid sending us more work, like chain download requests.
//
// The nonce stops the remote getting confused between different pings: without
// it, if the remote node sends a ping once per second and this node takes 5
// seconds to respond to each, the 5th ping the remote sends would appear to
// return very quickly.
connman . PushMessage ( pfrom , NetMsgType : : PONG , nonce ) ;
}
}
else if ( strCommand = = NetMsgType : : PONG )
{
int64_t pingUsecEnd = nTimeReceived ;
uint64_t nonce = 0 ;
size_t nAvail = vRecv . in_avail ( ) ;
bool bPingFinished = false ;
std : : string sProblem ;
if ( nAvail > = sizeof ( nonce ) ) {
vRecv > > nonce ;
// Only process pong message if there is an outstanding ping (old ping without nonce should never pong)
if ( pfrom - > nPingNonceSent ! = 0 ) {
if ( nonce = = pfrom - > nPingNonceSent ) {
// Matching pong received, this ping is no longer outstanding
bPingFinished = true ;
int64_t pingUsecTime = pingUsecEnd - pfrom - > nPingUsecStart ;
if ( pingUsecTime > 0 ) {
// Successful ping time measurement, replace previous
pfrom - > nPingUsecTime = pingUsecTime ;
pfrom - > nMinPingUsecTime = std : : min ( pfrom - > nMinPingUsecTime , pingUsecTime ) ;
} else {
// This should never happen
sProblem = " Timing mishap " ;
}
} else {
// Nonce mismatches are normal when pings are overlapping
sProblem = " Nonce mismatch " ;
if ( nonce = = 0 ) {
// This is most likely a bug in another implementation somewhere; cancel this ping
bPingFinished = true ;
sProblem = " Nonce zero " ;
}
}
} else {
sProblem = " Unsolicited pong without ping " ;
}
} else {
// This is most likely a bug in another implementation somewhere; cancel this ping
bPingFinished = true ;
sProblem = " Short payload " ;
}
if ( ! ( sProblem . empty ( ) ) ) {
LogPrint ( " net " , " pong peer=%d: %s, %x expected, %x received, %u bytes \n " ,
pfrom - > id ,
sProblem ,
pfrom - > nPingNonceSent ,
nonce ,
nAvail ) ;
}
if ( bPingFinished ) {
pfrom - > nPingNonceSent = 0 ;
}
}
else if ( fAlerts & & strCommand = = NetMsgType : : ALERT )
{
CAlert alert ;
vRecv > > alert ;
uint256 alertHash = alert . GetHash ( ) ;
if ( pfrom - > setKnown . count ( alertHash ) = = 0 )
{
if ( alert . ProcessAlert ( chainparams . AlertKey ( ) ) )
{
// Relay
pfrom - > setKnown . insert ( alertHash ) ;
{
connman . ForEachNode ( [ & alert , & connman ] ( CNode * pnode ) {
alert . RelayTo ( pnode , connman ) ;
} ) ;
}
}
else {
// Small DoS penalty so peers that send us lots of
// duplicate/expired/invalid-signature/whatever alerts
// eventually get banned.
// This isn't a Misbehaving(100) (immediate ban) because the
// peer might be an older or different implementation with
// a different signature key, etc.
Misbehaving ( pfrom - > GetId ( ) , 10 ) ;
}
}
}
else if ( strCommand = = NetMsgType : : FILTERLOAD )
{
CBloomFilter filter ;
vRecv > > filter ;
if ( ! filter . IsWithinSizeConstraints ( ) )
{
// There is no excuse for sending a too-large filter
LOCK ( cs_main ) ;
Misbehaving ( pfrom - > GetId ( ) , 100 ) ;
}
else
{
LOCK ( pfrom - > cs_filter ) ;
delete pfrom - > pfilter ;
pfrom - > pfilter = new CBloomFilter ( filter ) ;
pfrom - > pfilter - > UpdateEmptyFull ( ) ;
}
pfrom - > fRelayTxes = true ;
}
else if ( strCommand = = NetMsgType : : FILTERADD )
{
vector < unsigned char > vData ;
vRecv > > vData ;
// Nodes must NEVER send a data item > 520 bytes (the max size for a script data object,
// and thus, the maximum size any matched object can have) in a filteradd message
if ( vData . size ( ) > MAX_SCRIPT_ELEMENT_SIZE )
{
LOCK ( cs_main ) ;
Misbehaving ( pfrom - > GetId ( ) , 100 ) ;
} else {
LOCK ( pfrom - > cs_filter ) ;
if ( pfrom - > pfilter )
pfrom - > pfilter - > insert ( vData ) ;
else
{
LOCK ( cs_main ) ;
Misbehaving ( pfrom - > GetId ( ) , 100 ) ;
}
}
}
else if ( strCommand = = NetMsgType : : FILTERCLEAR )
{
LOCK ( pfrom - > cs_filter ) ;
delete pfrom - > pfilter ;
pfrom - > pfilter = new CBloomFilter ( ) ;
pfrom - > fRelayTxes = true ;
}
else if ( strCommand = = NetMsgType : : REJECT )
{
if ( fDebug ) {
try {
string strMsg ; unsigned char ccode ; string strReason ;
vRecv > > LIMITED_STRING ( strMsg , CMessageHeader : : COMMAND_SIZE ) > > ccode > > LIMITED_STRING ( strReason , MAX_REJECT_MESSAGE_LENGTH ) ;
ostringstream ss ;
ss < < strMsg < < " code " < < itostr ( ccode ) < < " : " < < strReason ;
if ( strMsg = = NetMsgType : : BLOCK | | strMsg = = NetMsgType : : TX )
{
uint256 hash ;
vRecv > > hash ;
ss < < " : hash " < < hash . ToString ( ) ;
}
LogPrint ( " net " , " Reject %s \n " , SanitizeString ( ss . str ( ) ) ) ;
} catch ( const std : : ios_base : : failure & ) {
// Avoid feedback loops by preventing reject messages from triggering a new reject message.
LogPrint ( " net " , " Unparseable reject message received \n " ) ;
}
}
}
else
{
bool found = false ;
const std : : vector < std : : string > & allMessages = getAllNetMessageTypes ( ) ;
BOOST_FOREACH ( const std : : string msg , allMessages ) {
if ( msg = = strCommand ) {
found = true ;
break ;
}
}
if ( found )
{
//probably one the extensions
privateSendClient . ProcessMessage ( pfrom , strCommand , vRecv ) ;
privateSendServer . ProcessMessage ( pfrom , strCommand , vRecv ) ;
mnodeman . ProcessMessage ( pfrom , strCommand , vRecv ) ;
mnpayments . ProcessMessage ( pfrom , strCommand , vRecv ) ;
instantsend . ProcessMessage ( pfrom , strCommand , vRecv ) ;
sporkManager . ProcessSpork ( pfrom , strCommand , vRecv ) ;
masternodeSync . ProcessMessage ( pfrom , strCommand , vRecv ) ;
governance . ProcessMessage ( pfrom , strCommand , vRecv ) ;
}
else
{
// Ignore unknown commands for extensibility
LogPrint ( " net " , " Unknown command \" %s \" from peer=%d \n " , SanitizeString ( strCommand ) , pfrom - > id ) ;
}
}
return true ;
}
// requires LOCK(cs_vRecvMsg)
2017-08-09 18:06:31 +02:00
bool ProcessMessages ( CNode * pfrom , CConnman & connman , std : : atomic < bool > & interruptMsgProc )
2017-08-09 02:19:06 +02:00
{
const CChainParams & chainparams = Params ( ) ;
unsigned int nMaxSendBufferSize = connman . GetSendBufferSize ( ) ;
//if (fDebug)
// LogPrintf("%s(%u messages)\n", __func__, pfrom->vRecvMsg.size());
//
// Message format
// (4) message start
// (12) command
// (4) size
// (4) checksum
// (x) data
//
bool fOk = true ;
if ( ! pfrom - > vRecvGetData . empty ( ) )
2017-08-09 18:06:31 +02:00
ProcessGetData ( pfrom , chainparams . GetConsensus ( ) , connman , interruptMsgProc ) ;
2017-08-09 02:19:06 +02:00
// this maintains the order of responses
if ( ! pfrom - > vRecvGetData . empty ( ) ) return fOk ;
std : : deque < CNetMessage > : : iterator it = pfrom - > vRecvMsg . begin ( ) ;
while ( ! pfrom - > fDisconnect & & it ! = pfrom - > vRecvMsg . end ( ) ) {
// Don't bother if send buffer is too full to respond anyway
if ( pfrom - > nSendSize > = nMaxSendBufferSize )
break ;
// get next message
CNetMessage & msg = * it ;
//if (fDebug)
// LogPrintf("%s(message %u msgsz, %u bytes, complete:%s)\n", __func__,
// msg.hdr.nMessageSize, msg.vRecv.size(),
// msg.complete() ? "Y" : "N");
// end, if an incomplete message is found
if ( ! msg . complete ( ) )
break ;
// at this point, any failure means we can delete the current message
it + + ;
// Scan for message start
if ( memcmp ( msg . hdr . pchMessageStart , chainparams . MessageStart ( ) , MESSAGE_START_SIZE ) ! = 0 ) {
LogPrintf ( " PROCESSMESSAGE: INVALID MESSAGESTART %s peer=%d \n " , SanitizeString ( msg . hdr . GetCommand ( ) ) , pfrom - > id ) ;
fOk = false ;
break ;
}
// Read header
CMessageHeader & hdr = msg . hdr ;
if ( ! hdr . IsValid ( chainparams . MessageStart ( ) ) )
{
LogPrintf ( " PROCESSMESSAGE: ERRORS IN HEADER %s peer=%d \n " , SanitizeString ( hdr . GetCommand ( ) ) , pfrom - > id ) ;
continue ;
}
string strCommand = hdr . GetCommand ( ) ;
// Message size
unsigned int nMessageSize = hdr . nMessageSize ;
// Checksum
CDataStream & vRecv = msg . vRecv ;
uint256 hash = Hash ( vRecv . begin ( ) , vRecv . begin ( ) + nMessageSize ) ;
if ( memcmp ( hash . begin ( ) , hdr . pchChecksum , CMessageHeader : : CHECKSUM_SIZE ) ! = 0 )
{
LogPrintf ( " %s(%s, %u bytes): CHECKSUM ERROR expected %s was %s \n " , __func__ ,
SanitizeString ( strCommand ) , nMessageSize ,
HexStr ( hash . begin ( ) , hash . begin ( ) + CMessageHeader : : CHECKSUM_SIZE ) ,
HexStr ( hdr . pchChecksum , hdr . pchChecksum + CMessageHeader : : CHECKSUM_SIZE ) ) ;
continue ;
}
// Process message
bool fRet = false ;
try
{
2017-08-09 18:06:31 +02:00
fRet = ProcessMessage ( pfrom , strCommand , vRecv , msg . nTime , connman , interruptMsgProc ) ;
if ( interruptMsgProc )
return true ;
2017-08-09 02:19:06 +02:00
}
catch ( const std : : ios_base : : failure & e )
{
connman . PushMessageWithVersion ( pfrom , INIT_PROTO_VERSION , NetMsgType : : REJECT , strCommand , REJECT_MALFORMED , string ( " error parsing message " ) ) ;
if ( strstr ( e . what ( ) , " end of data " ) )
{
// Allow exceptions from under-length message on vRecv
LogPrintf ( " %s(%s, %u bytes): Exception '%s' caught, normally caused by a message being shorter than its stated length \n " , __func__ , SanitizeString ( strCommand ) , nMessageSize , e . what ( ) ) ;
}
else if ( strstr ( e . what ( ) , " size too large " ) )
{
// Allow exceptions from over-long size
LogPrintf ( " %s(%s, %u bytes): Exception '%s' caught \n " , __func__ , SanitizeString ( strCommand ) , nMessageSize , e . what ( ) ) ;
}
else
{
PrintExceptionContinue ( & e , " ProcessMessages() " ) ;
}
}
catch ( const std : : exception & e ) {
PrintExceptionContinue ( & e , " ProcessMessages() " ) ;
} catch ( . . . ) {
PrintExceptionContinue ( NULL , " ProcessMessages() " ) ;
}
if ( ! fRet )
LogPrintf ( " %s(%s, %u bytes) FAILED peer=%d \n " , __func__ , SanitizeString ( strCommand ) , nMessageSize , pfrom - > id ) ;
break ;
}
// In case the connection got shut down, its receive buffer was wiped
if ( ! pfrom - > fDisconnect )
pfrom - > vRecvMsg . erase ( pfrom - > vRecvMsg . begin ( ) , it ) ;
return fOk ;
}
2017-08-09 18:06:31 +02:00
bool SendMessages ( CNode * pto , CConnman & connman , std : : atomic < bool > & interruptMsgProc )
2017-08-09 02:19:06 +02:00
{
const Consensus : : Params & consensusParams = Params ( ) . GetConsensus ( ) ;
{
// Don't send anything until we get its version message
if ( pto - > nVersion = = 0 )
return true ;
//
// Message: ping
//
bool pingSend = false ;
if ( pto - > fPingQueued ) {
// RPC ping request by user
pingSend = true ;
}
if ( pto - > nPingNonceSent = = 0 & & pto - > nPingUsecStart + PING_INTERVAL * 1000000 < GetTimeMicros ( ) ) {
// Ping automatically sent as a latency probe & keepalive.
pingSend = true ;
}
if ( pingSend ) {
uint64_t nonce = 0 ;
while ( nonce = = 0 ) {
GetRandBytes ( ( unsigned char * ) & nonce , sizeof ( nonce ) ) ;
}
pto - > fPingQueued = false ;
pto - > nPingUsecStart = GetTimeMicros ( ) ;
if ( pto - > nVersion > BIP0031_VERSION ) {
pto - > nPingNonceSent = nonce ;
connman . PushMessage ( pto , NetMsgType : : PING , nonce ) ;
} else {
// Peer is too old to support ping command with nonce, pong will never arrive.
pto - > nPingNonceSent = 0 ;
connman . PushMessage ( pto , NetMsgType : : PING ) ;
}
}
TRY_LOCK ( cs_main , lockMain ) ; // Acquire cs_main for IsInitialBlockDownload() and CNodeState()
if ( ! lockMain )
return true ;
// Address refresh broadcast
int64_t nNow = GetTimeMicros ( ) ;
if ( ! IsInitialBlockDownload ( ) & & pto - > nNextLocalAddrSend < nNow ) {
AdvertiseLocal ( pto ) ;
pto - > nNextLocalAddrSend = PoissonNextSend ( nNow , AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL ) ;
}
//
// Message: addr
//
if ( pto - > nNextAddrSend < nNow ) {
pto - > nNextAddrSend = PoissonNextSend ( nNow , AVG_ADDRESS_BROADCAST_INTERVAL ) ;
vector < CAddress > vAddr ;
vAddr . reserve ( pto - > vAddrToSend . size ( ) ) ;
BOOST_FOREACH ( const CAddress & addr , pto - > vAddrToSend )
{
if ( ! pto - > addrKnown . contains ( addr . GetKey ( ) ) )
{
pto - > addrKnown . insert ( addr . GetKey ( ) ) ;
vAddr . push_back ( addr ) ;
// receiver rejects addr messages larger than 1000
if ( vAddr . size ( ) > = 1000 )
{
connman . PushMessage ( pto , NetMsgType : : ADDR , vAddr ) ;
vAddr . clear ( ) ;
}
}
}
pto - > vAddrToSend . clear ( ) ;
if ( ! vAddr . empty ( ) )
connman . PushMessage ( pto , NetMsgType : : ADDR , vAddr ) ;
}
CNodeState & state = * State ( pto - > GetId ( ) ) ;
if ( state . fShouldBan ) {
if ( pto - > fWhitelisted )
LogPrintf ( " Warning: not punishing whitelisted peer %s! \n " , pto - > addr . ToString ( ) ) ;
else {
pto - > fDisconnect = true ;
if ( pto - > addr . IsLocal ( ) )
LogPrintf ( " Warning: not banning local peer %s! \n " , pto - > addr . ToString ( ) ) ;
else
{
connman . Ban ( pto - > addr , BanReasonNodeMisbehaving ) ;
}
}
state . fShouldBan = false ;
}
BOOST_FOREACH ( const CBlockReject & reject , state . rejects )
connman . PushMessage ( pto , NetMsgType : : REJECT , ( string ) NetMsgType : : BLOCK , reject . chRejectCode , reject . strRejectReason , reject . hashBlock ) ;
state . rejects . clear ( ) ;
// Start block sync
if ( pindexBestHeader = = NULL )
pindexBestHeader = chainActive . Tip ( ) ;
bool fFetch = state . fPreferredDownload | | ( nPreferredDownload = = 0 & & ! pto - > fClient & & ! pto - > fOneShot ) ; // Download if this is a nice peer, or we have no nice peers and this one might do.
if ( ! state . fSyncStarted & & ! pto - > fClient & & ! fImporting & & ! fReindex ) {
// Only actively request headers from a single peer, unless we're close to end of initial download.
if ( ( nSyncStarted = = 0 & & fFetch ) | | pindexBestHeader - > GetBlockTime ( ) > GetAdjustedTime ( ) - 6 * 60 * 60 ) { // NOTE: was "close to today" and 24h in Bitcoin
state . fSyncStarted = true ;
nSyncStarted + + ;
const CBlockIndex * pindexStart = pindexBestHeader ;
/* If possible, start at the block preceding the currently
best known header . This ensures that we always get a
non - empty list of headers back as long as the peer
is up - to - date . With a non - empty response , we can initialise
the peer ' s known best block . This wouldn ' t be possible
if we requested starting at pindexBestHeader and
got back an empty response . */
if ( pindexStart - > pprev )
pindexStart = pindexStart - > pprev ;
LogPrint ( " net " , " initial getheaders (%d) to peer=%d (startheight:%d) \n " , pindexStart - > nHeight , pto - > id , pto - > nStartingHeight ) ;
connman . PushMessage ( pto , NetMsgType : : GETHEADERS , chainActive . GetLocator ( pindexStart ) , uint256 ( ) ) ;
}
}
// Resend wallet transactions that haven't gotten in a block yet
// Except during reindex, importing and IBD, when old wallet
// transactions become unconfirmed and spams other nodes.
if ( ! fReindex & & ! fImporting & & ! IsInitialBlockDownload ( ) )
{
GetMainSignals ( ) . Broadcast ( nTimeBestReceived , & connman ) ;
}
//
// Try sending block announcements via headers
//
{
// If we have less than MAX_BLOCKS_TO_ANNOUNCE in our
// list of block hashes we're relaying, and our peer wants
// headers announcements, then find the first header
// not yet known to our peer but would connect, and send.
// If no header would connect, or if we have too many
// blocks, or if the peer doesn't want headers, just
// add all to the inv queue.
LOCK ( pto - > cs_inventory ) ;
vector < CBlock > vHeaders ;
bool fRevertToInv = ( ! state . fPreferHeaders | | pto - > vBlockHashesToAnnounce . size ( ) > MAX_BLOCKS_TO_ANNOUNCE ) ;
CBlockIndex * pBestIndex = NULL ; // last header queued for delivery
ProcessBlockAvailability ( pto - > id ) ; // ensure pindexBestKnownBlock is up-to-date
if ( ! fRevertToInv ) {
bool fFoundStartingHeader = false ;
// Try to find first header that our peer doesn't have, and
// then send all headers past that one. If we come across any
// headers that aren't on chainActive, give up.
BOOST_FOREACH ( const uint256 & hash , pto - > vBlockHashesToAnnounce ) {
BlockMap : : iterator mi = mapBlockIndex . find ( hash ) ;
assert ( mi ! = mapBlockIndex . end ( ) ) ;
CBlockIndex * pindex = mi - > second ;
if ( chainActive [ pindex - > nHeight ] ! = pindex ) {
// Bail out if we reorged away from this block
fRevertToInv = true ;
break ;
}
if ( pBestIndex ! = NULL & & pindex - > pprev ! = pBestIndex ) {
// This means that the list of blocks to announce don't
// connect to each other.
// This shouldn't really be possible to hit during
// regular operation (because reorgs should take us to
// a chain that has some block not on the prior chain,
// which should be caught by the prior check), but one
// way this could happen is by using invalidateblock /
// reconsiderblock repeatedly on the tip, causing it to
// be added multiple times to vBlockHashesToAnnounce.
// Robustly deal with this rare situation by reverting
// to an inv.
fRevertToInv = true ;
break ;
}
pBestIndex = pindex ;
if ( fFoundStartingHeader ) {
// add this to the headers message
vHeaders . push_back ( pindex - > GetBlockHeader ( ) ) ;
} else if ( PeerHasHeader ( & state , pindex ) ) {
continue ; // keep looking for the first new block
} else if ( pindex - > pprev = = NULL | | PeerHasHeader ( & state , pindex - > pprev ) ) {
// Peer doesn't have this header but they do have the prior one.
// Start sending headers.
fFoundStartingHeader = true ;
vHeaders . push_back ( pindex - > GetBlockHeader ( ) ) ;
} else {
// Peer doesn't have this header or the prior one -- nothing will
// connect, so bail out.
fRevertToInv = true ;
break ;
}
}
}
if ( fRevertToInv ) {
// If falling back to using an inv, just try to inv the tip.
// The last entry in vBlockHashesToAnnounce was our tip at some point
// in the past.
if ( ! pto - > vBlockHashesToAnnounce . empty ( ) ) {
const uint256 & hashToAnnounce = pto - > vBlockHashesToAnnounce . back ( ) ;
BlockMap : : iterator mi = mapBlockIndex . find ( hashToAnnounce ) ;
assert ( mi ! = mapBlockIndex . end ( ) ) ;
CBlockIndex * pindex = mi - > second ;
// Warn if we're announcing a block that is not on the main chain.
// This should be very rare and could be optimized out.
// Just log for now.
if ( chainActive [ pindex - > nHeight ] ! = pindex ) {
LogPrint ( " net " , " Announcing block %s not on main chain (tip=%s) \n " ,
hashToAnnounce . ToString ( ) , chainActive . Tip ( ) - > GetBlockHash ( ) . ToString ( ) ) ;
}
// If the peer announced this block to us, don't inv it back.
// (Since block announcements may not be via inv's, we can't solely rely on
// setInventoryKnown to track this.)
if ( ! PeerHasHeader ( & state , pindex ) ) {
pto - > PushInventory ( CInv ( MSG_BLOCK , hashToAnnounce ) ) ;
LogPrint ( " net " , " %s: sending inv peer=%d hash=%s \n " , __func__ ,
pto - > id , hashToAnnounce . ToString ( ) ) ;
}
}
} else if ( ! vHeaders . empty ( ) ) {
if ( vHeaders . size ( ) > 1 ) {
LogPrint ( " net " , " %s: %u headers, range (%s, %s), to peer=%d \n " , __func__ ,
vHeaders . size ( ) ,
vHeaders . front ( ) . GetHash ( ) . ToString ( ) ,
vHeaders . back ( ) . GetHash ( ) . ToString ( ) , pto - > id ) ;
} else {
LogPrint ( " net " , " %s: sending header %s to peer=%d \n " , __func__ ,
vHeaders . front ( ) . GetHash ( ) . ToString ( ) , pto - > id ) ;
}
connman . PushMessage ( pto , NetMsgType : : HEADERS , vHeaders ) ;
state . pindexBestHeaderSent = pBestIndex ;
}
pto - > vBlockHashesToAnnounce . clear ( ) ;
}
//
// Message: inventory
//
vector < CInv > vInv ;
vector < CInv > vInvWait ;
{
bool fSendTrickle = pto - > fWhitelisted ;
if ( pto - > nNextInvSend < nNow ) {
fSendTrickle = true ;
pto - > nNextInvSend = PoissonNextSend ( nNow , AVG_INVENTORY_BROADCAST_INTERVAL ) ;
}
LOCK ( pto - > cs_inventory ) ;
vInv . reserve ( std : : min < size_t > ( 1000 , pto - > vInventoryToSend . size ( ) ) ) ;
vInvWait . reserve ( pto - > vInventoryToSend . size ( ) ) ;
BOOST_FOREACH ( const CInv & inv , pto - > vInventoryToSend )
{
if ( inv . type = = MSG_TX & & pto - > filterInventoryKnown . contains ( inv . hash ) )
continue ;
// trickle out tx inv to protect privacy
if ( inv . type = = MSG_TX & & ! fSendTrickle )
{
// 1/4 of tx invs blast to all immediately
static uint256 hashSalt ;
if ( hashSalt . IsNull ( ) )
hashSalt = GetRandHash ( ) ;
uint256 hashRand = ArithToUint256 ( UintToArith256 ( inv . hash ) ^ UintToArith256 ( hashSalt ) ) ;
hashRand = Hash ( BEGIN ( hashRand ) , END ( hashRand ) ) ;
bool fTrickleWait = ( ( UintToArith256 ( hashRand ) & 3 ) ! = 0 ) ;
if ( fTrickleWait )
{
LogPrint ( " net " , " SendMessages -- queued inv(vInvWait): %s index=%d peer=%d \n " , inv . ToString ( ) , vInvWait . size ( ) , pto - > id ) ;
vInvWait . push_back ( inv ) ;
continue ;
}
}
pto - > filterInventoryKnown . insert ( inv . hash ) ;
LogPrint ( " net " , " SendMessages -- queued inv: %s index=%d peer=%d \n " , inv . ToString ( ) , vInv . size ( ) , pto - > id ) ;
vInv . push_back ( inv ) ;
if ( vInv . size ( ) > = 1000 )
{
LogPrint ( " net " , " SendMessages -- pushing inv's: count=%d peer=%d \n " , vInv . size ( ) , pto - > id ) ;
connman . PushMessage ( pto , NetMsgType : : INV , vInv ) ;
vInv . clear ( ) ;
}
}
pto - > vInventoryToSend = vInvWait ;
}
if ( ! vInv . empty ( ) ) {
LogPrint ( " net " , " SendMessages -- pushing tailing inv's: count=%d peer=%d \n " , vInv . size ( ) , pto - > id ) ;
connman . PushMessage ( pto , NetMsgType : : INV , vInv ) ;
}
// Detect whether we're stalling
nNow = GetTimeMicros ( ) ;
if ( ! pto - > fDisconnect & & state . nStallingSince & & state . nStallingSince < nNow - 1000000 * BLOCK_STALLING_TIMEOUT ) {
// Stalling only triggers when the block download window cannot move. During normal steady state,
// the download window should be much larger than the to-be-downloaded set of blocks, so disconnection
// should only happen during initial block download.
LogPrintf ( " Peer=%d is stalling block download, disconnecting \n " , pto - > id ) ;
pto - > fDisconnect = true ;
}
// In case there is a block that has been in flight from this peer for 2 + 0.5 * N times the block interval
// (with N the number of peers from which we're downloading validated blocks), disconnect due to timeout.
// We compensate for other peers to prevent killing off peers due to our own downstream link
// being saturated. We only count validated in-flight blocks so peers can't advertise non-existing block hashes
// to unreasonably increase our timeout.
if ( ! pto - > fDisconnect & & state . vBlocksInFlight . size ( ) > 0 ) {
QueuedBlock & queuedBlock = state . vBlocksInFlight . front ( ) ;
int nOtherPeersWithValidatedDownloads = nPeersWithValidatedDownloads - ( state . nBlocksInFlightValidHeaders > 0 ) ;
if ( nNow > state . nDownloadingSince + consensusParams . nPowTargetSpacing * ( BLOCK_DOWNLOAD_TIMEOUT_BASE + BLOCK_DOWNLOAD_TIMEOUT_PER_PEER * nOtherPeersWithValidatedDownloads ) ) {
LogPrintf ( " Timeout downloading block %s from peer=%d, disconnecting \n " , queuedBlock . hash . ToString ( ) , pto - > id ) ;
pto - > fDisconnect = true ;
}
}
//
// Message: getdata (blocks)
//
vector < CInv > vGetData ;
if ( ! pto - > fDisconnect & & ! pto - > fClient & & ( fFetch | | ! IsInitialBlockDownload ( ) ) & & state . nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER ) {
vector < CBlockIndex * > vToDownload ;
NodeId staller = - 1 ;
FindNextBlocksToDownload ( pto - > GetId ( ) , MAX_BLOCKS_IN_TRANSIT_PER_PEER - state . nBlocksInFlight , vToDownload , staller ) ;
BOOST_FOREACH ( CBlockIndex * pindex , vToDownload ) {
vGetData . push_back ( CInv ( MSG_BLOCK , pindex - > GetBlockHash ( ) ) ) ;
MarkBlockAsInFlight ( pto - > GetId ( ) , pindex - > GetBlockHash ( ) , consensusParams , pindex ) ;
LogPrint ( " net " , " Requesting block %s (%d) peer=%d \n " , pindex - > GetBlockHash ( ) . ToString ( ) ,
pindex - > nHeight , pto - > id ) ;
}
if ( state . nBlocksInFlight = = 0 & & staller ! = - 1 ) {
if ( State ( staller ) - > nStallingSince = = 0 ) {
State ( staller ) - > nStallingSince = nNow ;
LogPrint ( " net " , " Stall started peer=%d \n " , staller ) ;
}
}
}
//
// Message: getdata (non-blocks)
//
while ( ! pto - > fDisconnect & & ! pto - > mapAskFor . empty ( ) & & ( * pto - > mapAskFor . begin ( ) ) . first < = nNow )
{
const CInv & inv = ( * pto - > mapAskFor . begin ( ) ) . second ;
if ( ! AlreadyHave ( inv ) )
{
LogPrint ( " net " , " SendMessages -- GETDATA -- requesting inv = %s peer=%d \n " , inv . ToString ( ) , pto - > id ) ;
vGetData . push_back ( inv ) ;
if ( vGetData . size ( ) > = 1000 )
{
connman . PushMessage ( pto , NetMsgType : : GETDATA , vGetData ) ;
LogPrint ( " net " , " SendMessages -- GETDATA -- pushed size = %lu peer=%d \n " , vGetData . size ( ) , pto - > id ) ;
vGetData . clear ( ) ;
}
} else {
//If we're not going to ask, don't expect a response.
LogPrint ( " net " , " SendMessages -- GETDATA -- already have inv = %s peer=%d \n " , inv . ToString ( ) , pto - > id ) ;
pto - > setAskFor . erase ( inv . hash ) ;
}
pto - > mapAskFor . erase ( pto - > mapAskFor . begin ( ) ) ;
}
if ( ! vGetData . empty ( ) ) {
connman . PushMessage ( pto , NetMsgType : : GETDATA , vGetData ) ;
LogPrint ( " net " , " SendMessages -- GETDATA -- pushed size = %lu peer=%d \n " , vGetData . size ( ) , pto - > id ) ;
}
}
return true ;
}
class CNetProcessingCleanup
{
public :
CNetProcessingCleanup ( ) { }
~ CNetProcessingCleanup ( ) {
// orphan transactions
mapOrphanTransactions . clear ( ) ;
mapOrphanTransactionsByPrev . clear ( ) ;
}
} instance_of_cnetprocessingcleanup ;