Drop delayed headers logic and fix duplicate initial headers sync by handling block inv correctly (#2032)
* Drop custom logic for delaying GETHEADERS
Reverts "Fix duplicate headers download in initial sync (#1589)" and all following fixes
This reverts commit 169afafd50
.
* Fix duplicate initial headers sync
This commit is contained in:
parent
99085c5b68
commit
a648d6efff
@ -98,8 +98,9 @@ class BIP68_112_113Test(ComparisonTestFramework):
|
||||
|
||||
def setup_network(self):
|
||||
# Must set the blockversion for this test
|
||||
# Must also set '-maxtipage=600100' to allow syncing from very old blocks
|
||||
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir,
|
||||
extra_args=[['-debug', '-whitelist=127.0.0.1', '-blockversion=4']],
|
||||
extra_args=[['-debug', '-whitelist=127.0.0.1', '-blockversion=4', '-maxtipage=600100']],
|
||||
binary=[self.options.testbinary])
|
||||
|
||||
def run_test(self):
|
||||
|
@ -91,7 +91,7 @@ class MaxUploadTest(BitcoinTestFramework):
|
||||
def setup_network(self):
|
||||
# Start a node with maxuploadtarget of 200 MB (/24h)
|
||||
self.nodes = []
|
||||
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-maxuploadtarget=200", "-blockmaxsize=999000"]))
|
||||
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-maxuploadtarget=200", "-blockmaxsize=999000", "-maxtipage="+str(2*60*60*24*7)]))
|
||||
|
||||
def run_test(self):
|
||||
# Advance all nodes 2 weeks in the future
|
||||
@ -205,7 +205,7 @@ class MaxUploadTest(BitcoinTestFramework):
|
||||
#stop and start node 0 with 1MB maxuploadtarget, whitelist 127.0.0.1
|
||||
print("Restarting nodes with -whitelist=127.0.0.1")
|
||||
stop_node(self.nodes[0], 0)
|
||||
self.nodes[0] = start_node(0, self.options.tmpdir, ["-debug", "-whitelist=127.0.0.1", "-maxuploadtarget=1", "-blockmaxsize=999000"])
|
||||
self.nodes[0] = start_node(0, self.options.tmpdir, ["-debug", "-whitelist=127.0.0.1", "-maxuploadtarget=1", "-blockmaxsize=999000", "-maxtipage="+str(2*60*60*24*7)])
|
||||
|
||||
#recreate/reconnect 3 test nodes
|
||||
test_nodes = []
|
||||
|
@ -191,7 +191,6 @@ public:
|
||||
pchMessageStart[3] = 0xbd;
|
||||
vAlertPubKey = ParseHex("048240a8748a80a286b270ba126705ced4f2ce5a7847b3610ea3c06513150dade2a8512ed5ea86320824683fc0818f0ac019214973e677acd1244f6d0571fc5103");
|
||||
nDefaultPort = 9999;
|
||||
nDelayGetHeadersTime = 24 * 60 * 60;
|
||||
nPruneAfterHeight = 100000;
|
||||
|
||||
genesis = CreateGenesisBlock(1390095618, 28917698, 0x1e0ffff0, 1, 50 * COIN);
|
||||
@ -339,7 +338,6 @@ public:
|
||||
pchMessageStart[3] = 0xff;
|
||||
vAlertPubKey = ParseHex("04517d8a699cb43d3938d7b24faaff7cda448ca4ea267723ba614784de661949bf632d6304316b244646dea079735b9a6fc4af804efb4752075b9fe2245e14e412");
|
||||
nDefaultPort = 19999;
|
||||
nDelayGetHeadersTime = 24 * 60 * 60;
|
||||
nPruneAfterHeight = 1000;
|
||||
|
||||
genesis = CreateGenesisBlock(1390666206UL, 3861367235UL, 0x1e0ffff0, 1, 50 * COIN);
|
||||
@ -470,7 +468,6 @@ public:
|
||||
pchMessageStart[3] = 0xce;
|
||||
vAlertPubKey = ParseHex("04517d8a699cb43d3938d7b24faaff7cda448ca4ea267723ba614784de661949bf632d6304316b244646dea079735b9a6fc4af804efb4752075b9fe2245e14e412");
|
||||
nDefaultPort = 19999;
|
||||
nDelayGetHeadersTime = 24 * 60 * 60;
|
||||
nPruneAfterHeight = 1000;
|
||||
|
||||
genesis = CreateGenesisBlock(1417713337, 1096447, 0x207fffff, 1, 50 * COIN);
|
||||
@ -586,7 +583,6 @@ public:
|
||||
pchMessageStart[1] = 0xc1;
|
||||
pchMessageStart[2] = 0xb7;
|
||||
pchMessageStart[3] = 0xdc;
|
||||
nDelayGetHeadersTime = std::numeric_limits<int64_t>::max(); // never delay GETHEADERS in regtests
|
||||
nDefaultPort = 19994;
|
||||
nPruneAfterHeight = 1000;
|
||||
|
||||
|
@ -69,7 +69,6 @@ public:
|
||||
bool DefaultConsistencyChecks() const { return fDefaultConsistencyChecks; }
|
||||
/** Policy: Filter transactions that do not match well-defined patterns */
|
||||
bool RequireStandard() const { return fRequireStandard; }
|
||||
int64_t DelayGetHeadersTime() const { return nDelayGetHeadersTime; }
|
||||
uint64_t PruneAfterHeight() const { return nPruneAfterHeight; }
|
||||
/** Make miner stop after a block is found. In RPC, don't return until nGenProcLimit blocks are generated */
|
||||
bool MineBlocksOnDemand() const { return fMineBlocksOnDemand; }
|
||||
@ -96,7 +95,6 @@ protected:
|
||||
//! Raw pub key bytes for the broadcast alert signing key.
|
||||
std::vector<unsigned char> vAlertPubKey;
|
||||
int nDefaultPort;
|
||||
int64_t nDelayGetHeadersTime;
|
||||
uint64_t nPruneAfterHeight;
|
||||
std::vector<CDNSSeedData> vSeeds;
|
||||
std::vector<unsigned char> base58Prefixes[MAX_BASE58_TYPES];
|
||||
|
@ -774,9 +774,6 @@ public:
|
||||
// Used for headers announcements - unfiltered blocks to relay
|
||||
// Also protected by cs_inventory
|
||||
std::vector<uint256> vBlockHashesToAnnounce;
|
||||
// Blocks received by INV while headers chain was too far behind. These are used to delay GETHEADERS messages
|
||||
// Also protected by cs_inventory
|
||||
std::vector<uint256> vDelayedGetHeaders;
|
||||
// Used for BIP35 mempool sending, also protected by cs_inventory
|
||||
bool fSendMempool;
|
||||
|
||||
@ -921,12 +918,6 @@ public:
|
||||
vBlockHashesToAnnounce.push_back(hash);
|
||||
}
|
||||
|
||||
void PushDelayedGetHeaders(const uint256 &hash)
|
||||
{
|
||||
LOCK(cs_inventory);
|
||||
vDelayedGetHeaders.push_back(hash);
|
||||
}
|
||||
|
||||
void AskFor(const CInv& inv);
|
||||
|
||||
void CloseSocketDisconnect();
|
||||
|
@ -1302,24 +1302,6 @@ inline void static SendBlockTransactions(const CBlock& block, const BlockTransac
|
||||
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::BLOCKTXN, resp));
|
||||
}
|
||||
|
||||
bool static CheckGoodHeadersSyncPeer(CNode* pfrom, const CChainParams& chainparams, int nCount)
|
||||
{
|
||||
AssertLockHeld(cs_main);
|
||||
|
||||
bool fGenesis = pindexBestHeader->GetBlockHash() == chainparams.GetConsensus().hashGenesisBlock;
|
||||
bool fDevNetGenesis = !chainparams.GetConsensus().hashDevnetGenesisBlock.IsNull() && pindexBestHeader->GetBlockHash() == chainparams.GetConsensus().hashDevnetGenesisBlock;
|
||||
if (!fGenesis && !fDevNetGenesis && nCount < MAX_HEADERS_RESULTS && GetAdjustedTime() - pindexBestHeader->GetBlockTime() > chainparams.DelayGetHeadersTime()) {
|
||||
// peer has sent us a HEADERS message below maximum size and we are still quite far from being fully
|
||||
// synced, this means we probably got a bad peer for initial sync and need to continue with another one.
|
||||
// By disconnecting we force to start a new iteration of initial headers sync in SendMessages
|
||||
// TODO should we handle whitelisted peers here as we do in headers sync timeout handling?
|
||||
pfrom->fDisconnect = true;
|
||||
return error("detected bad peer for initial headers sync, disconnecting peer=%d", pfrom->id);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStream& vRecv, int64_t nTimeReceived, const CChainParams& chainparams, CConnman& connman, const std::atomic<bool>& interruptMsgProc)
|
||||
{
|
||||
LogPrint("net", "received: %s (%u bytes) peer=%d\n", SanitizeString(strCommand), vRecv.size(), pfrom->id);
|
||||
@ -1700,31 +1682,31 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
|
||||
|
||||
if (inv.type == MSG_BLOCK) {
|
||||
UpdateBlockAvailability(pfrom->GetId(), inv.hash);
|
||||
if (!fAlreadyHave && !fImporting && !fReindex && !mapBlocksInFlight.count(inv.hash)) {
|
||||
// Always send GETHEADERS when we are still on the devnet genesis block. Otherwise we'll never sync.
|
||||
// This is because after startup of the node, we are in IBD mode, which will only be left when recent
|
||||
// blocks arrive. At the same time, we won't get any blocks from peers because we keep delaying
|
||||
// GETHEADERS
|
||||
bool fDevNetGenesis = !chainparams.GetConsensus().hashDevnetGenesisBlock.IsNull() && pindexBestHeader->GetBlockHash() == chainparams.GetConsensus().hashDevnetGenesisBlock;
|
||||
|
||||
if (!fDevNetGenesis && (GetAdjustedTime() - pindexBestHeader->GetBlockTime() > chainparams.DelayGetHeadersTime())) {
|
||||
// We are pretty far from being completely synced at the moment. If we would initiate a new
|
||||
// chain of GETHEADERS/HEADERS now, we may end up downnloading the full chain from multiple
|
||||
// peers at the same time, slowing down the initial sync. At the same time, we don't know
|
||||
// if the peer we got this INV from may have a chain we don't know about yet, so we HAVE TO
|
||||
// send a GETHEADERS message at some point in time. This is delayed to later in SendMessages
|
||||
// when the headers chain has catched up enough.
|
||||
LogPrint("net", "delaying getheaders (%d) %s to peer=%d\n", pindexBestHeader->nHeight, inv.hash.ToString(), pfrom->id);
|
||||
pfrom->PushDelayedGetHeaders(inv.hash);
|
||||
} else {
|
||||
// We used to request the full block here, but since headers-announcements are now the
|
||||
// primary method of announcement on the network, and since, in the case that a node
|
||||
// fell back to inv we probably have a reorg which we should get the headers for first,
|
||||
// we now only provide a getheaders response here. When we receive the headers, we will
|
||||
// then ask for the blocks we need.
|
||||
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), inv.hash));
|
||||
LogPrint("net", "getheaders (%d) %s to peer=%d\n", pindexBestHeader->nHeight, inv.hash.ToString(), pfrom->id);
|
||||
}
|
||||
if (fAlreadyHave || fImporting || fReindex || mapBlocksInFlight.count(inv.hash)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
CNodeState *state = State(pfrom->id);
|
||||
if (!state) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Download if this is a nice peer, or we have no nice peers and this one might do.
|
||||
bool fFetch = state->fPreferredDownload || (nPreferredDownload == 0 && !pfrom->fOneShot);
|
||||
// Only actively request headers from a single peer, unless we're close to end of initial download.
|
||||
if ((nSyncStarted == 0 && fFetch) || pindexBestHeader->GetBlockTime() > GetAdjustedTime() - nMaxTipAge) {
|
||||
// Make sure to mark this peer as the one we are currently syncing with etc.
|
||||
state->fSyncStarted = true;
|
||||
state->nHeadersSyncTimeout = GetTimeMicros() + HEADERS_DOWNLOAD_TIMEOUT_BASE + HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER * (GetAdjustedTime() - pindexBestHeader->GetBlockTime())/(chainparams.GetConsensus().nPowTargetSpacing);
|
||||
nSyncStarted++;
|
||||
// We used to request the full block here, but since headers-announcements are now the
|
||||
// primary method of announcement on the network, and since, in the case that a node
|
||||
// fell back to inv we probably have a reorg which we should get the headers for first,
|
||||
// we now only provide a getheaders response here. When we receive the headers, we will
|
||||
// then ask for the blocks we need.
|
||||
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), inv.hash));
|
||||
LogPrint("net", "getheaders (%d) %s to peer=%d\n", pindexBestHeader->nHeight, inv.hash.ToString(), pfrom->id);
|
||||
}
|
||||
}
|
||||
else
|
||||
@ -2464,9 +2446,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
|
||||
|
||||
if (nCount == 0) {
|
||||
// Nothing interesting. Stop asking this peers for more headers.
|
||||
// See if it's a peer with "good" headers though.
|
||||
LOCK(cs_main);
|
||||
return CheckGoodHeadersSyncPeer(pfrom, chainparams, nCount);
|
||||
return true;
|
||||
}
|
||||
|
||||
const CBlockIndex *pindexLast = NULL;
|
||||
@ -2540,8 +2520,6 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
|
||||
// from there instead.
|
||||
LogPrint("net", "more getheaders (%d) to end to peer=%d (startheight:%d)\n", pindexLast->nHeight, pfrom->id, pfrom->nStartingHeight);
|
||||
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexLast), uint256()));
|
||||
} else if (!CheckGoodHeadersSyncPeer(pfrom, chainparams, nCount)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool fCanDirectFetch = CanDirectFetch(chainparams.GetConsensus());
|
||||
@ -3050,8 +3028,7 @@ public:
|
||||
|
||||
bool SendMessages(CNode* pto, CConnman& connman, const std::atomic<bool>& interruptMsgProc)
|
||||
{
|
||||
const CChainParams chainParams = Params();
|
||||
const Consensus::Params& consensusParams = chainParams.GetConsensus();
|
||||
const Consensus::Params& consensusParams = Params().GetConsensus();
|
||||
{
|
||||
// Don't send anything until the version handshake is complete
|
||||
if (!pto->fSuccessfullyConnected || pto->fDisconnect)
|
||||
@ -3139,7 +3116,7 @@ bool SendMessages(CNode* pto, CConnman& connman, const std::atomic<bool>& interr
|
||||
bool fFetch = state.fPreferredDownload || (nPreferredDownload == 0 && !pto->fClient && !pto->fOneShot); // Download if this is a nice peer, or we have no nice peers and this one might do.
|
||||
if (!state.fSyncStarted && !pto->fClient && !fImporting && !fReindex) {
|
||||
// Only actively request headers from a single peer, unless we're close to end of initial download.
|
||||
if ((nSyncStarted == 0 && fFetch) || pindexBestHeader->GetBlockTime() > GetAdjustedTime() - 6 * 60 * 60) { // NOTE: was "close to today" and 24h in Bitcoin
|
||||
if ((nSyncStarted == 0 && fFetch) || pindexBestHeader->GetBlockTime() > GetAdjustedTime() - nMaxTipAge) {
|
||||
state.fSyncStarted = true;
|
||||
state.nHeadersSyncTimeout = GetTimeMicros() + HEADERS_DOWNLOAD_TIMEOUT_BASE + HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER * (GetAdjustedTime() - pindexBestHeader->GetBlockTime())/(consensusParams.nPowTargetSpacing);
|
||||
nSyncStarted++;
|
||||
@ -3158,17 +3135,6 @@ bool SendMessages(CNode* pto, CConnman& connman, const std::atomic<bool>& interr
|
||||
}
|
||||
}
|
||||
|
||||
if (GetAdjustedTime() - pindexBestHeader->GetBlockTime() <= chainParams.DelayGetHeadersTime()) {
|
||||
// Headers chain has catched up enough so we can send out GETHEADER messages which were initially meant to
|
||||
// be sent directly after INV was received
|
||||
LOCK(pto->cs_inventory);
|
||||
BOOST_FOREACH(const uint256 &hash, pto->vDelayedGetHeaders) {
|
||||
LogPrint("net", "process delayed getheaders (%d) to peer=%d\n", pindexBestHeader->nHeight, pto->id);
|
||||
connman.PushMessage(pto, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), hash));
|
||||
}
|
||||
pto->vDelayedGetHeaders.clear();
|
||||
}
|
||||
|
||||
// Resend wallet transactions that haven't gotten in a block yet
|
||||
// Except during reindex, importing and IBD, when old wallet
|
||||
// transactions become unconfirmed and spams other nodes.
|
||||
@ -3469,7 +3435,7 @@ bool SendMessages(CNode* pto, CConnman& connman, const std::atomic<bool>& interr
|
||||
// Check for headers sync timeouts
|
||||
if (state.fSyncStarted && state.nHeadersSyncTimeout < std::numeric_limits<int64_t>::max()) {
|
||||
// Detect whether this is a stalling initial-headers-sync peer
|
||||
if (pindexBestHeader->GetBlockTime() <= GetAdjustedTime() - 6*60*60) { // was 24*60*60 in bitcoin
|
||||
if (pindexBestHeader->GetBlockTime() <= GetAdjustedTime() - nMaxTipAge) {
|
||||
if (nNow > state.nHeadersSyncTimeout && nSyncStarted == 1 && (nPreferredDownload - state.fPreferredDownload >= 1)) {
|
||||
// Disconnect a (non-whitelisted) peer if it is our only sync peer,
|
||||
// and we have others we could be using instead.
|
||||
|
Loading…
Reference in New Issue
Block a user