2016-12-20 14:26:45 +01:00
|
|
|
// Copyright (c) 2014-2017 The Dash Core developers
|
2015-07-15 04:44:58 +02:00
|
|
|
// Distributed under the MIT/X11 software license, see the accompanying
|
|
|
|
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
|
|
|
|
2015-07-29 05:26:08 +02:00
|
|
|
#include "activemasternode.h"
|
2016-04-15 04:54:11 +02:00
|
|
|
#include "governance.h"
|
2017-08-09 02:19:06 +02:00
|
|
|
#include "validation.h"
|
2016-08-28 12:12:14 +02:00
|
|
|
#include "masternode-payments.h"
|
|
|
|
#include "masternode-sync.h"
|
2015-07-17 11:17:15 +02:00
|
|
|
#include "masternodeman.h"
|
2016-09-27 09:50:04 +02:00
|
|
|
#include "netfulfilledman.h"
|
2016-11-25 20:01:56 +01:00
|
|
|
#include "netmessagemaker.h"
|
2017-12-01 19:53:34 +01:00
|
|
|
#include "ui_interface.h"
|
2018-02-15 17:45:53 +01:00
|
|
|
#include "evo/deterministicmns.h"
|
2015-07-15 04:44:58 +02:00
|
|
|
|
|
|
|
class CMasternodeSync;
|
|
|
|
CMasternodeSync masternodeSync;
|
|
|
|
|
2016-08-28 12:12:14 +02:00
|
|
|
void CMasternodeSync::Fail()
|
|
|
|
{
|
|
|
|
nTimeLastFailure = GetTime();
|
2018-09-30 13:02:52 +02:00
|
|
|
nCurrentAsset = MASTERNODE_SYNC_FAILED;
|
2016-08-28 12:12:14 +02:00
|
|
|
}
|
|
|
|
|
2015-08-04 20:21:27 +02:00
|
|
|
void CMasternodeSync::Reset()
|
2016-08-17 09:08:25 +02:00
|
|
|
{
|
2018-09-30 13:02:52 +02:00
|
|
|
nCurrentAsset = MASTERNODE_SYNC_INITIAL;
|
|
|
|
nTriedPeerCount = 0;
|
2016-08-28 12:12:14 +02:00
|
|
|
nTimeAssetSyncStarted = GetTime();
|
2017-09-03 15:30:08 +02:00
|
|
|
nTimeLastBumped = GetTime();
|
2016-08-28 12:12:14 +02:00
|
|
|
nTimeLastFailure = 0;
|
2015-07-15 04:44:58 +02:00
|
|
|
}
|
|
|
|
|
2018-02-12 13:49:00 +01:00
|
|
|
void CMasternodeSync::BumpAssetLastTime(const std::string& strFuncName)
|
2017-08-09 18:07:03 +02:00
|
|
|
{
|
|
|
|
if(IsSynced() || IsFailed()) return;
|
|
|
|
nTimeLastBumped = GetTime();
|
2017-09-15 20:05:28 +02:00
|
|
|
LogPrint("mnsync", "CMasternodeSync::BumpAssetLastTime -- %s\n", strFuncName);
|
2017-08-09 18:07:03 +02:00
|
|
|
}
|
|
|
|
|
2016-06-08 08:57:16 +02:00
|
|
|
std::string CMasternodeSync::GetAssetName()
|
|
|
|
{
|
2018-09-30 13:02:52 +02:00
|
|
|
switch(nCurrentAsset)
|
2016-06-08 08:57:16 +02:00
|
|
|
{
|
2016-08-28 12:12:14 +02:00
|
|
|
case(MASTERNODE_SYNC_INITIAL): return "MASTERNODE_SYNC_INITIAL";
|
2017-09-03 15:30:08 +02:00
|
|
|
case(MASTERNODE_SYNC_WAITING): return "MASTERNODE_SYNC_WAITING";
|
2016-08-28 12:12:14 +02:00
|
|
|
case(MASTERNODE_SYNC_LIST): return "MASTERNODE_SYNC_LIST";
|
|
|
|
case(MASTERNODE_SYNC_MNW): return "MASTERNODE_SYNC_MNW";
|
|
|
|
case(MASTERNODE_SYNC_GOVERNANCE): return "MASTERNODE_SYNC_GOVERNANCE";
|
|
|
|
case(MASTERNODE_SYNC_FAILED): return "MASTERNODE_SYNC_FAILED";
|
|
|
|
case MASTERNODE_SYNC_FINISHED: return "MASTERNODE_SYNC_FINISHED";
|
|
|
|
default: return "UNKNOWN";
|
2016-06-08 08:57:16 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-19 16:51:38 +02:00
|
|
|
void CMasternodeSync::SwitchToNextAsset(CConnman& connman)
|
2015-07-15 04:44:58 +02:00
|
|
|
{
|
2018-09-30 13:02:52 +02:00
|
|
|
switch(nCurrentAsset)
|
2015-07-15 04:44:58 +02:00
|
|
|
{
|
2016-08-29 21:11:34 +02:00
|
|
|
case(MASTERNODE_SYNC_FAILED):
|
|
|
|
throw std::runtime_error("Can't switch to next asset from failed, should use Reset() first!");
|
|
|
|
break;
|
2015-07-17 12:26:24 +02:00
|
|
|
case(MASTERNODE_SYNC_INITIAL):
|
2018-09-30 13:02:52 +02:00
|
|
|
nCurrentAsset = MASTERNODE_SYNC_WAITING;
|
2017-09-03 15:30:08 +02:00
|
|
|
LogPrintf("CMasternodeSync::SwitchToNextAsset -- Starting %s\n", GetAssetName());
|
|
|
|
break;
|
|
|
|
case(MASTERNODE_SYNC_WAITING):
|
|
|
|
LogPrintf("CMasternodeSync::SwitchToNextAsset -- Completed %s in %llds\n", GetAssetName(), GetTime() - nTimeAssetSyncStarted);
|
2018-02-15 17:45:53 +01:00
|
|
|
if (deterministicMNManager->IsDeterministicMNsSporkActive()) {
|
2018-09-30 13:02:52 +02:00
|
|
|
nCurrentAsset = MASTERNODE_SYNC_GOVERNANCE;
|
2018-02-15 17:45:53 +01:00
|
|
|
} else {
|
2018-09-30 13:02:52 +02:00
|
|
|
nCurrentAsset = MASTERNODE_SYNC_LIST;
|
2018-02-15 17:45:53 +01:00
|
|
|
}
|
2017-01-18 16:23:49 +01:00
|
|
|
LogPrintf("CMasternodeSync::SwitchToNextAsset -- Starting %s\n", GetAssetName());
|
2015-07-15 04:44:58 +02:00
|
|
|
break;
|
|
|
|
case(MASTERNODE_SYNC_LIST):
|
2017-08-09 18:07:03 +02:00
|
|
|
LogPrintf("CMasternodeSync::SwitchToNextAsset -- Completed %s in %llds\n", GetAssetName(), GetTime() - nTimeAssetSyncStarted);
|
2018-02-15 17:45:53 +01:00
|
|
|
if (deterministicMNManager->IsDeterministicMNsSporkActive()) {
|
2018-09-30 13:02:52 +02:00
|
|
|
nCurrentAsset = MASTERNODE_SYNC_GOVERNANCE;
|
2018-02-15 17:45:53 +01:00
|
|
|
} else {
|
2018-09-30 13:02:52 +02:00
|
|
|
nCurrentAsset = MASTERNODE_SYNC_MNW;
|
2018-02-15 17:45:53 +01:00
|
|
|
}
|
2017-01-18 16:23:49 +01:00
|
|
|
LogPrintf("CMasternodeSync::SwitchToNextAsset -- Starting %s\n", GetAssetName());
|
2015-07-15 04:44:58 +02:00
|
|
|
break;
|
|
|
|
case(MASTERNODE_SYNC_MNW):
|
2017-08-09 18:07:03 +02:00
|
|
|
LogPrintf("CMasternodeSync::SwitchToNextAsset -- Completed %s in %llds\n", GetAssetName(), GetTime() - nTimeAssetSyncStarted);
|
2018-09-30 13:02:52 +02:00
|
|
|
nCurrentAsset = MASTERNODE_SYNC_GOVERNANCE;
|
2017-01-18 16:23:49 +01:00
|
|
|
LogPrintf("CMasternodeSync::SwitchToNextAsset -- Starting %s\n", GetAssetName());
|
2015-07-15 04:44:58 +02:00
|
|
|
break;
|
2016-06-08 08:57:16 +02:00
|
|
|
case(MASTERNODE_SYNC_GOVERNANCE):
|
2017-08-09 18:07:03 +02:00
|
|
|
LogPrintf("CMasternodeSync::SwitchToNextAsset -- Completed %s in %llds\n", GetAssetName(), GetTime() - nTimeAssetSyncStarted);
|
2018-09-30 13:02:52 +02:00
|
|
|
nCurrentAsset = MASTERNODE_SYNC_FINISHED;
|
2016-02-19 19:09:54 +01:00
|
|
|
uiInterface.NotifyAdditionalDataSyncProgressChanged(1);
|
2016-08-05 18:25:03 +02:00
|
|
|
//try to activate our masternode if possible
|
2018-08-11 21:55:56 +02:00
|
|
|
legacyActiveMasternodeManager.ManageState(connman);
|
2016-09-27 09:50:04 +02:00
|
|
|
|
2017-09-19 16:51:38 +02:00
|
|
|
connman.ForEachNode(CConnman::AllNodes, [](CNode* pnode) {
|
Backport Bitcoin PR#8085: p2p: Begin encapsulation (#1537)
* net: move CBanDB and CAddrDB out of net.h/cpp
This will eventually solve a circular dependency
* net: Create CConnman to encapsulate p2p connections
* net: Move socket binding into CConnman
* net: move OpenNetworkConnection into CConnman
* net: move ban and addrman functions into CConnman
* net: Add oneshot functions to CConnman
* net: move added node functions to CConnman
* net: Add most functions needed for vNodes to CConnman
* net: handle nodesignals in CConnman
* net: Pass CConnection to wallet rather than using the global
* net: Add rpc error for missing/disabled p2p functionality
* net: Pass CConnman around as needed
* gui: add NodeID to the peer table
* net: create generic functor accessors and move vNodes to CConnman
* net: move whitelist functions into CConnman
* net: move nLastNodeId to CConnman
* net: move nLocalHostNonce to CConnman
This behavior seems to have been quite racy and broken.
Move nLocalHostNonce into CNode, and check received nonces against all
non-fully-connected nodes. If there's a match, assume we've connected
to ourself.
* net: move messageHandlerCondition to CConnman
* net: move send/recv statistics to CConnman
* net: move SendBufferSize/ReceiveFloodSize to CConnman
* net: move nLocalServices/nRelevantServices to CConnman
These are in-turn passed to CNode at connection time. This allows us to offer
different services to different peers (or test the effects of doing so).
* net: move semOutbound and semMasternodeOutbound to CConnman
* net: SocketSendData returns written size
* net: move max/max-outbound to CConnman
* net: Pass best block known height into CConnman
CConnman then passes the current best height into CNode at creation time.
This way CConnman/CNode have no dependency on main for height, and the signals
only move in one direction.
This also helps to prevent identity leakage a tiny bit. Before this change, an
attacker could theoretically make 2 connections on different interfaces. They
would connect fully on one, and only establish the initial connection on the
other. Once they receive a new block, they would relay it to your first
connection, and immediately commence the version handshake on the second. Since
the new block height is reflected immediately, they could attempt to learn
whether the two connections were correlated.
This is, of course, incredibly unlikely to work due to the small timings
involved and receipt from other senders. But it doesn't hurt to lock-in
nBestHeight at the time of connection, rather than letting the remote choose
the time.
* net: pass CClientUIInterface into CConnman
* net: Drop StartNode/StopNode and use CConnman directly
* net: Introduce CConnection::Options to avoid passing so many params
* net: add nSendBufferMaxSize/nReceiveFloodSize to CConnection::Options
* net: move vNodesDisconnected into CConnman
* Made the ForEachNode* functions in src/net.cpp more pragmatic and self documenting
* Convert ForEachNode* functions to take a templated function argument rather than a std::function to eliminate std::function overhead
* net: move MAX_FEELER_CONNECTIONS into connman
2017-07-21 11:35:19 +02:00
|
|
|
netfulfilledman.AddFulfilledRequest(pnode->addr, "full-sync");
|
|
|
|
});
|
2017-05-31 05:49:09 +02:00
|
|
|
LogPrintf("CMasternodeSync::SwitchToNextAsset -- Sync has finished\n");
|
2016-09-27 09:50:04 +02:00
|
|
|
|
2015-07-15 04:44:58 +02:00
|
|
|
break;
|
|
|
|
}
|
2018-09-30 13:02:52 +02:00
|
|
|
nTriedPeerCount = 0;
|
2016-08-28 12:12:14 +02:00
|
|
|
nTimeAssetSyncStarted = GetTime();
|
2017-08-09 18:07:03 +02:00
|
|
|
BumpAssetLastTime("CMasternodeSync::SwitchToNextAsset");
|
2015-07-15 04:44:58 +02:00
|
|
|
}
|
|
|
|
|
2015-08-15 17:53:55 +02:00
|
|
|
std::string CMasternodeSync::GetSyncStatus()
|
|
|
|
{
|
2018-09-30 13:02:52 +02:00
|
|
|
switch (masternodeSync.nCurrentAsset) {
|
2017-09-03 15:30:08 +02:00
|
|
|
case MASTERNODE_SYNC_INITIAL: return _("Synchroning blockchain...");
|
|
|
|
case MASTERNODE_SYNC_WAITING: return _("Synchronization pending...");
|
2016-08-28 12:12:14 +02:00
|
|
|
case MASTERNODE_SYNC_LIST: return _("Synchronizing masternodes...");
|
2016-09-21 16:45:29 +02:00
|
|
|
case MASTERNODE_SYNC_MNW: return _("Synchronizing masternode payments...");
|
2016-08-28 12:12:14 +02:00
|
|
|
case MASTERNODE_SYNC_GOVERNANCE: return _("Synchronizing governance objects...");
|
|
|
|
case MASTERNODE_SYNC_FAILED: return _("Synchronization failed");
|
|
|
|
case MASTERNODE_SYNC_FINISHED: return _("Synchronization finished");
|
|
|
|
default: return "";
|
2015-08-15 17:53:55 +02:00
|
|
|
}
|
|
|
|
}
|
2015-07-29 06:16:11 +02:00
|
|
|
|
2017-02-06 14:31:37 +01:00
|
|
|
void CMasternodeSync::ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStream& vRecv)
|
2015-07-29 06:16:11 +02:00
|
|
|
{
|
2016-02-17 23:18:57 +01:00
|
|
|
if (strCommand == NetMsgType::SYNCSTATUSCOUNT) { //Sync status count
|
2015-07-29 06:16:11 +02:00
|
|
|
|
2016-08-29 21:11:34 +02:00
|
|
|
//do not care about stats if sync process finished or failed
|
|
|
|
if(IsSynced() || IsFailed()) return;
|
2015-07-29 06:16:11 +02:00
|
|
|
|
2016-09-11 19:49:40 +02:00
|
|
|
int nItemID;
|
|
|
|
int nCount;
|
|
|
|
vRecv >> nItemID >> nCount;
|
|
|
|
|
2016-10-22 18:52:14 +02:00
|
|
|
LogPrintf("SYNCSTATUSCOUNT -- got inventory count: nItemID=%d nCount=%d peer=%d\n", nItemID, nCount, pfrom->id);
|
2015-07-30 10:51:48 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-19 16:51:38 +02:00
|
|
|
void CMasternodeSync::ProcessTick(CConnman& connman)
|
2015-07-15 04:44:58 +02:00
|
|
|
{
|
2016-08-28 12:12:14 +02:00
|
|
|
static int nTick = 0;
|
2018-07-16 14:47:37 +02:00
|
|
|
nTick++;
|
2016-03-02 22:20:04 +01:00
|
|
|
|
2017-08-09 18:07:03 +02:00
|
|
|
// reset the sync process if the last call to this function was more than 60 minutes ago (client was in sleep mode)
|
|
|
|
static int64_t nTimeLastProcess = GetTime();
|
|
|
|
if(GetTime() - nTimeLastProcess > 60*60) {
|
2018-02-26 12:10:20 +01:00
|
|
|
LogPrintf("CMasternodeSync::ProcessTick -- WARNING: no actions for too long, restarting sync...\n");
|
2017-08-09 18:07:03 +02:00
|
|
|
Reset();
|
2017-09-19 16:51:38 +02:00
|
|
|
SwitchToNextAsset(connman);
|
2017-10-17 18:41:08 +02:00
|
|
|
nTimeLastProcess = GetTime();
|
2017-08-09 18:07:03 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
nTimeLastProcess = GetTime();
|
2015-07-19 10:19:54 +02:00
|
|
|
|
2017-08-09 18:07:03 +02:00
|
|
|
// reset sync status in case of any other sync failure
|
|
|
|
if(IsFailed()) {
|
|
|
|
if(nTimeLastFailure + (1*60) < GetTime()) { // 1 minute cooldown after failed sync
|
2018-02-26 12:10:20 +01:00
|
|
|
LogPrintf("CMasternodeSync::ProcessTick -- WARNING: failed to sync, trying again...\n");
|
2017-08-09 18:07:03 +02:00
|
|
|
Reset();
|
2017-09-19 16:51:38 +02:00
|
|
|
SwitchToNextAsset(connman);
|
2016-02-04 20:29:09 +01:00
|
|
|
}
|
2017-08-09 18:07:03 +02:00
|
|
|
return;
|
|
|
|
}
|
2016-02-04 20:29:09 +01:00
|
|
|
|
2017-08-09 18:07:03 +02:00
|
|
|
// gradually request the rest of the votes after sync finished
|
|
|
|
if(IsSynced()) {
|
2018-01-22 14:17:11 +01:00
|
|
|
std::vector<CNode*> vNodesCopy = connman.CopyNodeVector(CConnman::FullyConnectedOnly);
|
2017-09-19 16:51:38 +02:00
|
|
|
governance.RequestGovernanceObjectVotes(vNodesCopy, connman);
|
|
|
|
connman.ReleaseNodeVector(vNodesCopy);
|
2017-08-09 18:07:03 +02:00
|
|
|
return;
|
2015-07-18 01:49:41 +02:00
|
|
|
}
|
2015-07-15 04:44:58 +02:00
|
|
|
|
2017-08-09 18:07:03 +02:00
|
|
|
// Calculate "progress" for LOG reporting / GUI notification
|
2018-09-30 13:02:52 +02:00
|
|
|
double nSyncProgress = double(nTriedPeerCount + (nCurrentAsset - 1) * 8) / (8*4);
|
|
|
|
LogPrintf("CMasternodeSync::ProcessTick -- nTick %d nCurrentAsset %d nTriedPeerCount %d nSyncProgress %f\n", nTick, nCurrentAsset, nTriedPeerCount, nSyncProgress);
|
2016-08-05 18:25:03 +02:00
|
|
|
uiInterface.NotifyAdditionalDataSyncProgressChanged(nSyncProgress);
|
2015-07-17 11:17:15 +02:00
|
|
|
|
2018-01-22 14:17:11 +01:00
|
|
|
std::vector<CNode*> vNodesCopy = connman.CopyNodeVector(CConnman::FullyConnectedOnly);
|
2016-11-28 15:21:50 +01:00
|
|
|
|
2018-02-06 12:09:33 +01:00
|
|
|
for (auto& pnode : vNodesCopy)
|
2015-07-17 11:17:15 +02:00
|
|
|
{
|
2016-11-25 20:01:56 +01:00
|
|
|
CNetMsgMaker msgMaker(pnode->GetSendVersion());
|
|
|
|
|
2017-02-16 16:14:42 +01:00
|
|
|
// Don't try to sync any data from outbound "masternode" connections -
|
|
|
|
// they are temporary and should be considered unreliable for a sync process.
|
|
|
|
// Inbound connection this early is most likely a "masternode" connection
|
2017-07-03 15:14:07 +02:00
|
|
|
// initiated from another node, so skip it too.
|
2018-01-26 02:11:01 +01:00
|
|
|
if(pnode->fMasternode || (fMasternodeMode && pnode->fInbound)) continue;
|
2017-02-16 16:14:42 +01:00
|
|
|
|
2016-06-08 08:57:16 +02:00
|
|
|
// QUICK MODE (REGTEST ONLY!)
|
|
|
|
if(Params().NetworkIDString() == CBaseChainParams::REGTEST)
|
|
|
|
{
|
2018-09-30 13:02:52 +02:00
|
|
|
if (nCurrentAsset == MASTERNODE_SYNC_WAITING) {
|
2018-01-26 02:11:30 +01:00
|
|
|
connman.PushMessage(pnode, msgMaker.Make(NetMsgType::GETSPORKS)); //get current network sporks
|
2018-08-29 12:02:54 +02:00
|
|
|
SwitchToNextAsset(connman);
|
2018-09-30 13:02:52 +02:00
|
|
|
} else if (nCurrentAsset == MASTERNODE_SYNC_LIST) {
|
2018-02-15 17:45:53 +01:00
|
|
|
if (!deterministicMNManager->IsDeterministicMNsSporkActive()) {
|
|
|
|
mnodeman.DsegUpdate(pnode, connman);
|
|
|
|
}
|
2018-08-29 12:02:54 +02:00
|
|
|
SwitchToNextAsset(connman);
|
2018-09-30 13:02:52 +02:00
|
|
|
} else if (nCurrentAsset == MASTERNODE_SYNC_MNW) {
|
2018-02-15 17:45:53 +01:00
|
|
|
if (!deterministicMNManager->IsDeterministicMNsSporkActive()) {
|
2018-09-15 12:19:31 +02:00
|
|
|
connman.PushMessage(pnode, msgMaker.Make(NetMsgType::MASTERNODEPAYMENTSYNC)); //sync payment votes
|
2018-02-21 17:32:21 +01:00
|
|
|
}
|
2018-08-29 12:02:54 +02:00
|
|
|
SwitchToNextAsset(connman);
|
2018-09-30 13:02:52 +02:00
|
|
|
} else if (nCurrentAsset == MASTERNODE_SYNC_GOVERNANCE) {
|
2017-09-19 16:51:38 +02:00
|
|
|
SendGovernanceSyncRequest(pnode, connman);
|
2018-08-29 12:02:54 +02:00
|
|
|
SwitchToNextAsset(connman);
|
2015-07-17 11:17:15 +02:00
|
|
|
}
|
2017-09-19 16:51:38 +02:00
|
|
|
connman.ReleaseNodeVector(vNodesCopy);
|
2015-07-19 01:25:52 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-06-08 08:57:16 +02:00
|
|
|
// NORMAL NETWORK MODE - TESTNET/MAINNET
|
2016-08-17 09:08:25 +02:00
|
|
|
{
|
2016-09-27 09:50:04 +02:00
|
|
|
if(netfulfilledman.HasFulfilledRequest(pnode->addr, "full-sync")) {
|
2017-02-19 22:02:33 +01:00
|
|
|
// We already fully synced from this node recently,
|
|
|
|
// disconnect to free this connection slot for another peer.
|
2016-09-27 09:50:04 +02:00
|
|
|
pnode->fDisconnect = true;
|
2018-02-08 09:19:36 +01:00
|
|
|
LogPrintf("CMasternodeSync::ProcessTick -- disconnecting from recently synced peer=%d\n", pnode->id);
|
2016-09-27 09:50:04 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2017-08-09 18:07:03 +02:00
|
|
|
// SPORK : ALWAYS ASK FOR SPORKS AS WE SYNC
|
2015-07-20 20:56:02 +02:00
|
|
|
|
2016-09-27 09:50:04 +02:00
|
|
|
if(!netfulfilledman.HasFulfilledRequest(pnode->addr, "spork-sync")) {
|
2017-08-09 18:07:03 +02:00
|
|
|
// always get sporks first, only request once from each peer
|
2016-09-27 09:50:04 +02:00
|
|
|
netfulfilledman.AddFulfilledRequest(pnode->addr, "spork-sync");
|
2016-06-08 08:57:16 +02:00
|
|
|
// get current network sporks
|
2018-01-26 02:11:30 +01:00
|
|
|
connman.PushMessage(pnode, msgMaker.Make(NetMsgType::GETSPORKS));
|
2018-09-30 13:02:52 +02:00
|
|
|
LogPrintf("CMasternodeSync::ProcessTick -- nTick %d nCurrentAsset %d -- requesting sporks from peer=%d\n", nTick, nCurrentAsset, pnode->id);
|
2016-06-08 08:57:16 +02:00
|
|
|
}
|
2016-05-24 21:20:10 +02:00
|
|
|
|
2017-09-03 15:30:08 +02:00
|
|
|
// INITIAL TIMEOUT
|
|
|
|
|
2018-09-30 13:02:52 +02:00
|
|
|
if(nCurrentAsset == MASTERNODE_SYNC_WAITING) {
|
2017-09-03 15:30:08 +02:00
|
|
|
if(GetTime() - nTimeLastBumped > MASTERNODE_SYNC_TIMEOUT_SECONDS) {
|
|
|
|
// At this point we know that:
|
|
|
|
// a) there are peers (because we are looping on at least one of them);
|
|
|
|
// b) we waited for at least MASTERNODE_SYNC_TIMEOUT_SECONDS since we reached
|
|
|
|
// the headers tip the last time (i.e. since we switched from
|
|
|
|
// MASTERNODE_SYNC_INITIAL to MASTERNODE_SYNC_WAITING and bumped time);
|
2017-09-20 22:30:56 +02:00
|
|
|
// c) there were no blocks (UpdatedBlockTip, NotifyHeaderTip) or headers (AcceptedBlockHeader)
|
2017-09-03 15:30:08 +02:00
|
|
|
// for at least MASTERNODE_SYNC_TIMEOUT_SECONDS.
|
|
|
|
// We must be at the tip already, let's move to the next asset.
|
2017-09-19 16:51:38 +02:00
|
|
|
SwitchToNextAsset(connman);
|
2017-09-03 15:30:08 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-05 18:25:03 +02:00
|
|
|
// MNLIST : SYNC MASTERNODE LIST FROM OTHER CONNECTED CLIENTS
|
2015-07-15 04:44:58 +02:00
|
|
|
|
2018-09-30 13:02:52 +02:00
|
|
|
if(nCurrentAsset == MASTERNODE_SYNC_LIST) {
|
2018-02-15 17:45:53 +01:00
|
|
|
if (deterministicMNManager->IsDeterministicMNsSporkActive()) {
|
|
|
|
SwitchToNextAsset(connman);
|
|
|
|
connman.ReleaseNodeVector(vNodesCopy);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-09-30 13:02:52 +02:00
|
|
|
LogPrint("masternode", "CMasternodeSync::ProcessTick -- nTick %d nCurrentAsset %d nTimeLastBumped %lld GetTime() %lld diff %lld\n", nTick, nCurrentAsset, nTimeLastBumped, GetTime(), GetTime() - nTimeLastBumped);
|
2016-08-05 18:25:03 +02:00
|
|
|
// check for timeout first
|
2017-08-09 18:07:03 +02:00
|
|
|
if(GetTime() - nTimeLastBumped > MASTERNODE_SYNC_TIMEOUT_SECONDS) {
|
2018-09-30 13:02:52 +02:00
|
|
|
LogPrintf("CMasternodeSync::ProcessTick -- nTick %d nCurrentAsset %d -- timeout\n", nTick, nCurrentAsset);
|
|
|
|
if (nTriedPeerCount == 0) {
|
2016-10-22 18:52:14 +02:00
|
|
|
LogPrintf("CMasternodeSync::ProcessTick -- ERROR: failed to sync %s\n", GetAssetName());
|
2016-08-29 21:11:34 +02:00
|
|
|
// there is no way we can continue without masternode list, fail here and try later
|
|
|
|
Fail();
|
2017-09-19 16:51:38 +02:00
|
|
|
connman.ReleaseNodeVector(vNodesCopy);
|
2016-08-29 21:11:34 +02:00
|
|
|
return;
|
2016-08-28 12:12:14 +02:00
|
|
|
}
|
2017-09-19 16:51:38 +02:00
|
|
|
SwitchToNextAsset(connman);
|
|
|
|
connman.ReleaseNodeVector(vNodesCopy);
|
2015-08-15 15:27:26 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-07-07 23:19:54 +02:00
|
|
|
// request from three peers max
|
2018-09-30 13:02:52 +02:00
|
|
|
if (nTriedPeerCount > 2) {
|
2018-07-07 23:19:54 +02:00
|
|
|
connman.ReleaseNodeVector(vNodesCopy);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-08-05 18:25:03 +02:00
|
|
|
// only request once from each peer
|
2016-09-27 09:50:04 +02:00
|
|
|
if(netfulfilledman.HasFulfilledRequest(pnode->addr, "masternode-list-sync")) continue;
|
|
|
|
netfulfilledman.AddFulfilledRequest(pnode->addr, "masternode-list-sync");
|
2015-07-27 05:41:25 +02:00
|
|
|
|
2016-08-05 18:25:03 +02:00
|
|
|
if (pnode->nVersion < mnpayments.GetMinMasternodePaymentsProto()) continue;
|
2018-09-30 13:02:52 +02:00
|
|
|
nTriedPeerCount++;
|
2015-07-21 04:24:43 +02:00
|
|
|
|
2017-09-19 16:51:38 +02:00
|
|
|
mnodeman.DsegUpdate(pnode, connman);
|
2016-08-05 18:25:03 +02:00
|
|
|
|
2017-09-19 16:51:38 +02:00
|
|
|
connman.ReleaseNodeVector(vNodesCopy);
|
2016-02-04 23:48:23 +01:00
|
|
|
return; //this will cause each peer to get one request each six seconds for the various assets we need
|
|
|
|
}
|
2015-07-15 04:44:58 +02:00
|
|
|
|
2016-09-21 16:45:29 +02:00
|
|
|
// MNW : SYNC MASTERNODE PAYMENT VOTES FROM OTHER CONNECTED CLIENTS
|
2016-05-24 21:20:10 +02:00
|
|
|
|
2018-09-30 13:02:52 +02:00
|
|
|
if(nCurrentAsset == MASTERNODE_SYNC_MNW) {
|
2018-02-15 17:45:53 +01:00
|
|
|
if (deterministicMNManager->IsDeterministicMNsSporkActive()) {
|
|
|
|
SwitchToNextAsset(connman);
|
|
|
|
connman.ReleaseNodeVector(vNodesCopy);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-09-30 13:02:52 +02:00
|
|
|
LogPrint("mnpayments", "CMasternodeSync::ProcessTick -- nTick %d nCurrentAsset %d nTimeLastBumped %lld GetTime() %lld diff %lld\n", nTick, nCurrentAsset, nTimeLastBumped, GetTime(), GetTime() - nTimeLastBumped);
|
2016-08-05 18:25:03 +02:00
|
|
|
// check for timeout first
|
2017-08-09 18:07:03 +02:00
|
|
|
// This might take a lot longer than MASTERNODE_SYNC_TIMEOUT_SECONDS due to new blocks,
|
2016-08-05 18:25:03 +02:00
|
|
|
// but that should be OK and it should timeout eventually.
|
2017-08-09 18:07:03 +02:00
|
|
|
if(GetTime() - nTimeLastBumped > MASTERNODE_SYNC_TIMEOUT_SECONDS) {
|
2018-09-30 13:02:52 +02:00
|
|
|
LogPrintf("CMasternodeSync::ProcessTick -- nTick %d nCurrentAsset %d -- timeout\n", nTick, nCurrentAsset);
|
|
|
|
if (nTriedPeerCount == 0) {
|
2016-10-22 18:52:14 +02:00
|
|
|
LogPrintf("CMasternodeSync::ProcessTick -- ERROR: failed to sync %s\n", GetAssetName());
|
2016-08-29 21:11:34 +02:00
|
|
|
// probably not a good idea to proceed without winner list
|
|
|
|
Fail();
|
2017-09-19 16:51:38 +02:00
|
|
|
connman.ReleaseNodeVector(vNodesCopy);
|
2016-08-29 21:11:34 +02:00
|
|
|
return;
|
2016-08-28 12:12:14 +02:00
|
|
|
}
|
2017-09-19 16:51:38 +02:00
|
|
|
SwitchToNextAsset(connman);
|
|
|
|
connman.ReleaseNodeVector(vNodesCopy);
|
2015-07-15 04:44:58 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-08-05 18:25:03 +02:00
|
|
|
// check for data
|
2016-08-28 12:12:14 +02:00
|
|
|
// if mnpayments already has enough blocks and votes, switch to the next asset
|
|
|
|
// try to fetch data from at least two peers though
|
2018-09-30 13:02:52 +02:00
|
|
|
if(nTriedPeerCount > 1 && mnpayments.IsEnoughData()) {
|
|
|
|
LogPrintf("CMasternodeSync::ProcessTick -- nTick %d nCurrentAsset %d -- found enough data\n", nTick, nCurrentAsset);
|
2017-09-19 16:51:38 +02:00
|
|
|
SwitchToNextAsset(connman);
|
|
|
|
connman.ReleaseNodeVector(vNodesCopy);
|
2016-05-29 20:35:09 +02:00
|
|
|
return;
|
2015-08-14 05:56:58 +02:00
|
|
|
}
|
|
|
|
|
2018-07-07 23:19:54 +02:00
|
|
|
// request from three peers max
|
2018-09-30 13:02:52 +02:00
|
|
|
if (nTriedPeerCount > 2) {
|
2018-07-07 23:19:54 +02:00
|
|
|
connman.ReleaseNodeVector(vNodesCopy);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-08-05 18:25:03 +02:00
|
|
|
// only request once from each peer
|
2016-09-27 09:50:04 +02:00
|
|
|
if(netfulfilledman.HasFulfilledRequest(pnode->addr, "masternode-payment-sync")) continue;
|
|
|
|
netfulfilledman.AddFulfilledRequest(pnode->addr, "masternode-payment-sync");
|
2016-02-04 20:29:09 +01:00
|
|
|
|
2016-08-28 12:12:14 +02:00
|
|
|
if(pnode->nVersion < mnpayments.GetMinMasternodePaymentsProto()) continue;
|
2018-09-30 13:02:52 +02:00
|
|
|
nTriedPeerCount++;
|
2016-02-04 20:29:09 +01:00
|
|
|
|
2016-09-21 16:45:29 +02:00
|
|
|
// ask node for all payment votes it has (new nodes will only return votes for future payments)
|
2018-09-15 12:19:31 +02:00
|
|
|
connman.PushMessage(pnode, msgMaker.Make(NetMsgType::MASTERNODEPAYMENTSYNC));
|
2016-09-19 00:23:52 +02:00
|
|
|
// ask node for missing pieces only (old nodes will not be asked)
|
2017-09-19 16:51:38 +02:00
|
|
|
mnpayments.RequestLowDataPaymentBlocks(pnode, connman);
|
2016-02-04 20:29:09 +01:00
|
|
|
|
2017-09-19 16:51:38 +02:00
|
|
|
connman.ReleaseNodeVector(vNodesCopy);
|
2016-02-04 23:48:23 +01:00
|
|
|
return; //this will cause each peer to get one request each six seconds for the various assets we need
|
|
|
|
}
|
2016-06-08 08:57:16 +02:00
|
|
|
|
2016-08-05 18:25:03 +02:00
|
|
|
// GOVOBJ : SYNC GOVERNANCE ITEMS FROM OUR PEERS
|
2016-06-08 08:57:16 +02:00
|
|
|
|
2018-09-30 13:02:52 +02:00
|
|
|
if(nCurrentAsset == MASTERNODE_SYNC_GOVERNANCE) {
|
|
|
|
LogPrint("gobject", "CMasternodeSync::ProcessTick -- nTick %d nCurrentAsset %d nTimeLastBumped %lld GetTime() %lld diff %lld\n", nTick, nCurrentAsset, nTimeLastBumped, GetTime(), GetTime() - nTimeLastBumped);
|
2016-10-22 18:52:14 +02:00
|
|
|
|
2016-08-05 18:25:03 +02:00
|
|
|
// check for timeout first
|
2017-08-09 18:07:03 +02:00
|
|
|
if(GetTime() - nTimeLastBumped > MASTERNODE_SYNC_TIMEOUT_SECONDS) {
|
2018-09-30 13:02:52 +02:00
|
|
|
LogPrintf("CMasternodeSync::ProcessTick -- nTick %d nCurrentAsset %d -- timeout\n", nTick, nCurrentAsset);
|
|
|
|
if(nTriedPeerCount == 0) {
|
2016-10-22 18:52:14 +02:00
|
|
|
LogPrintf("CMasternodeSync::ProcessTick -- WARNING: failed to sync %s\n", GetAssetName());
|
2016-08-28 12:12:14 +02:00
|
|
|
// it's kind of ok to skip this for now, hopefully we'll catch up later?
|
|
|
|
}
|
2017-09-19 16:51:38 +02:00
|
|
|
SwitchToNextAsset(connman);
|
|
|
|
connman.ReleaseNodeVector(vNodesCopy);
|
2016-08-05 18:25:03 +02:00
|
|
|
return;
|
|
|
|
}
|
2016-05-24 21:20:10 +02:00
|
|
|
|
2017-01-17 21:02:38 +01:00
|
|
|
// only request obj sync once from each peer, then request votes on per-obj basis
|
|
|
|
if(netfulfilledman.HasFulfilledRequest(pnode->addr, "governance-sync")) {
|
2017-09-19 16:51:38 +02:00
|
|
|
int nObjsLeftToAsk = governance.RequestGovernanceObjectVotes(pnode, connman);
|
2017-02-17 21:08:41 +01:00
|
|
|
static int64_t nTimeNoObjectsLeft = 0;
|
|
|
|
// check for data
|
|
|
|
if(nObjsLeftToAsk == 0) {
|
|
|
|
static int nLastTick = 0;
|
|
|
|
static int nLastVotes = 0;
|
|
|
|
if(nTimeNoObjectsLeft == 0) {
|
|
|
|
// asked all objects for votes for the first time
|
|
|
|
nTimeNoObjectsLeft = GetTime();
|
|
|
|
}
|
|
|
|
// make sure the condition below is checked only once per tick
|
|
|
|
if(nLastTick == nTick) continue;
|
|
|
|
if(GetTime() - nTimeNoObjectsLeft > MASTERNODE_SYNC_TIMEOUT_SECONDS &&
|
|
|
|
governance.GetVoteCount() - nLastVotes < std::max(int(0.0001 * nLastVotes), MASTERNODE_SYNC_TICK_SECONDS)
|
|
|
|
) {
|
|
|
|
// We already asked for all objects, waited for MASTERNODE_SYNC_TIMEOUT_SECONDS
|
|
|
|
// after that and less then 0.01% or MASTERNODE_SYNC_TICK_SECONDS
|
|
|
|
// (i.e. 1 per second) votes were recieved during the last tick.
|
|
|
|
// We can be pretty sure that we are done syncing.
|
2018-09-30 13:02:52 +02:00
|
|
|
LogPrintf("CMasternodeSync::ProcessTick -- nTick %d nCurrentAsset %d -- asked for all objects, nothing to do\n", nTick, nCurrentAsset);
|
2017-02-17 21:08:41 +01:00
|
|
|
// reset nTimeNoObjectsLeft to be able to use the same condition on resync
|
|
|
|
nTimeNoObjectsLeft = 0;
|
2017-09-19 16:51:38 +02:00
|
|
|
SwitchToNextAsset(connman);
|
|
|
|
connman.ReleaseNodeVector(vNodesCopy);
|
2017-02-17 21:08:41 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
nLastTick = nTick;
|
|
|
|
nLastVotes = governance.GetVoteCount();
|
|
|
|
}
|
2017-01-17 21:02:38 +01:00
|
|
|
continue;
|
|
|
|
}
|
2016-09-27 09:50:04 +02:00
|
|
|
netfulfilledman.AddFulfilledRequest(pnode->addr, "governance-sync");
|
2016-05-24 21:25:33 +02:00
|
|
|
|
2016-09-28 22:03:54 +02:00
|
|
|
if (pnode->nVersion < MIN_GOVERNANCE_PEER_PROTO_VERSION) continue;
|
2018-09-30 13:02:52 +02:00
|
|
|
nTriedPeerCount++;
|
2016-05-24 21:25:33 +02:00
|
|
|
|
2017-09-19 16:51:38 +02:00
|
|
|
SendGovernanceSyncRequest(pnode, connman);
|
2016-08-05 18:25:03 +02:00
|
|
|
|
2017-09-19 16:51:38 +02:00
|
|
|
connman.ReleaseNodeVector(vNodesCopy);
|
2016-05-24 21:25:33 +02:00
|
|
|
return; //this will cause each peer to get one request each six seconds for the various assets we need
|
|
|
|
}
|
|
|
|
}
|
2015-07-15 04:44:58 +02:00
|
|
|
}
|
2016-12-05 11:06:51 +01:00
|
|
|
// looped through all nodes, release them
|
2017-09-19 16:51:38 +02:00
|
|
|
connman.ReleaseNodeVector(vNodesCopy);
|
2015-07-17 11:17:15 +02:00
|
|
|
}
|
2016-03-02 22:20:04 +01:00
|
|
|
|
2017-09-19 16:51:38 +02:00
|
|
|
void CMasternodeSync::SendGovernanceSyncRequest(CNode* pnode, CConnman& connman)
|
2017-02-02 09:50:44 +01:00
|
|
|
{
|
2016-11-25 20:01:56 +01:00
|
|
|
CNetMsgMaker msgMaker(pnode->GetSendVersion());
|
|
|
|
|
2017-02-02 09:50:44 +01:00
|
|
|
if(pnode->nVersion >= GOVERNANCE_FILTER_PROTO_VERSION) {
|
|
|
|
CBloomFilter filter;
|
|
|
|
filter.clear();
|
|
|
|
|
2016-11-25 20:01:56 +01:00
|
|
|
connman.PushMessage(pnode, msgMaker.Make(NetMsgType::MNGOVERNANCESYNC, uint256(), filter));
|
2017-02-02 09:50:44 +01:00
|
|
|
}
|
|
|
|
else {
|
2016-11-25 20:01:56 +01:00
|
|
|
connman.PushMessage(pnode, msgMaker.Make(NetMsgType::MNGOVERNANCESYNC, uint256()));
|
2017-02-02 09:50:44 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-03 15:30:08 +02:00
|
|
|
void CMasternodeSync::AcceptedBlockHeader(const CBlockIndex *pindexNew)
|
|
|
|
{
|
2017-09-15 20:05:28 +02:00
|
|
|
LogPrint("mnsync", "CMasternodeSync::AcceptedBlockHeader -- pindexNew->nHeight: %d\n", pindexNew->nHeight);
|
2017-09-03 15:30:08 +02:00
|
|
|
|
|
|
|
if (!IsBlockchainSynced()) {
|
|
|
|
// Postpone timeout each time new block header arrives while we are still syncing blockchain
|
|
|
|
BumpAssetLastTime("CMasternodeSync::AcceptedBlockHeader");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-19 16:51:38 +02:00
|
|
|
void CMasternodeSync::NotifyHeaderTip(const CBlockIndex *pindexNew, bool fInitialDownload, CConnman& connman)
|
2016-03-02 22:20:04 +01:00
|
|
|
{
|
2017-09-15 20:05:28 +02:00
|
|
|
LogPrint("mnsync", "CMasternodeSync::NotifyHeaderTip -- pindexNew->nHeight: %d fInitialDownload=%d\n", pindexNew->nHeight, fInitialDownload);
|
2017-09-03 15:30:08 +02:00
|
|
|
|
|
|
|
if (IsFailed() || IsSynced() || !pindexBestHeader)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (!IsBlockchainSynced()) {
|
|
|
|
// Postpone timeout each time new block arrives while we are still syncing blockchain
|
|
|
|
BumpAssetLastTime("CMasternodeSync::NotifyHeaderTip");
|
|
|
|
}
|
2017-09-20 22:30:56 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void CMasternodeSync::UpdatedBlockTip(const CBlockIndex *pindexNew, bool fInitialDownload, CConnman& connman)
|
|
|
|
{
|
|
|
|
LogPrint("mnsync", "CMasternodeSync::UpdatedBlockTip -- pindexNew->nHeight: %d fInitialDownload=%d\n", pindexNew->nHeight, fInitialDownload);
|
|
|
|
|
|
|
|
if (IsFailed() || IsSynced() || !pindexBestHeader)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (!IsBlockchainSynced()) {
|
|
|
|
// Postpone timeout each time new block arrives while we are still syncing blockchain
|
|
|
|
BumpAssetLastTime("CMasternodeSync::UpdatedBlockTip");
|
|
|
|
}
|
2017-09-03 15:30:08 +02:00
|
|
|
|
|
|
|
if (fInitialDownload) {
|
2017-09-20 22:30:56 +02:00
|
|
|
// switched too early
|
|
|
|
if (IsBlockchainSynced()) {
|
|
|
|
Reset();
|
|
|
|
}
|
|
|
|
|
2017-09-03 15:30:08 +02:00
|
|
|
// no need to check any further while still in IBD mode
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Note: since we sync headers first, it should be ok to use this
|
|
|
|
static bool fReachedBestHeader = false;
|
|
|
|
bool fReachedBestHeaderNew = pindexNew->GetBlockHash() == pindexBestHeader->GetBlockHash();
|
|
|
|
|
|
|
|
if (fReachedBestHeader && !fReachedBestHeaderNew) {
|
|
|
|
// Switching from true to false means that we previousely stuck syncing headers for some reason,
|
|
|
|
// probably initial timeout was not enough,
|
|
|
|
// because there is no way we can update tip not having best header
|
|
|
|
Reset();
|
|
|
|
fReachedBestHeader = false;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
fReachedBestHeader = fReachedBestHeaderNew;
|
|
|
|
|
2017-09-20 22:30:56 +02:00
|
|
|
LogPrint("mnsync", "CMasternodeSync::UpdatedBlockTip -- pindexNew->nHeight: %d pindexBestHeader->nHeight: %d fInitialDownload=%d fReachedBestHeader=%d\n",
|
2017-09-03 15:30:08 +02:00
|
|
|
pindexNew->nHeight, pindexBestHeader->nHeight, fInitialDownload, fReachedBestHeader);
|
|
|
|
|
|
|
|
if (!IsBlockchainSynced() && fReachedBestHeader) {
|
2018-03-29 17:08:00 +02:00
|
|
|
if (fLiteMode) {
|
|
|
|
// nothing to do in lite mode, just finish the process immediately
|
2018-09-30 13:02:52 +02:00
|
|
|
nCurrentAsset = MASTERNODE_SYNC_FINISHED;
|
2018-03-29 17:08:00 +02:00
|
|
|
return;
|
|
|
|
}
|
2017-09-03 15:30:08 +02:00
|
|
|
// Reached best header while being in initial mode.
|
|
|
|
// We must be at the tip already, let's move to the next asset.
|
2017-09-19 16:51:38 +02:00
|
|
|
SwitchToNextAsset(connman);
|
2017-09-03 15:30:08 +02:00
|
|
|
}
|
2016-03-02 22:20:04 +01:00
|
|
|
}
|