dash/src/llmq/quorums_chainlocks.cpp

652 lines
22 KiB
C++
Raw Normal View History

2019-01-22 14:20:32 +01:00
// Copyright (c) 2019 The Dash Core developers
// Distributed under the MIT/X11 software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "quorums.h"
#include "quorums_chainlocks.h"
#include "quorums_instantsend.h"
2019-01-22 14:20:32 +01:00
#include "quorums_signing.h"
#include "quorums_utils.h"
#include "chain.h"
#include "net_processing.h"
#include "scheduler.h"
2019-01-23 17:40:37 +01:00
#include "spork.h"
#include "txmempool.h"
2019-01-22 14:20:32 +01:00
#include "validation.h"
namespace llmq
{
static const std::string CLSIG_REQUESTID_PREFIX = "clsig";
CChainLocksHandler* chainLocksHandler;
std::string CChainLockSig::ToString() const
{
return strprintf("CChainLockSig(nHeight=%d, blockHash=%s)", nHeight, blockHash.ToString());
}
CChainLocksHandler::CChainLocksHandler(CScheduler* _scheduler) :
scheduler(_scheduler)
{
}
CChainLocksHandler::~CChainLocksHandler()
{
}
void CChainLocksHandler::Start()
{
quorumSigningManager->RegisterRecoveredSigsListener(this);
scheduler->scheduleEvery([&]() {
CheckActiveState();
Multiple fixes/refactorings for ChainLocks (#2765) * Print which DKG type aborted * Don't directly call EnforceBestChainLock and instead schedule the call Calling EnforceBestChainLock might result in switching chains, which in turn might end up calling signals, so we get into a recursive call chain. Better to call EnforceBestChainLock from the scheduler. * Regularly call EnforceBestChainLock and reset error flags on locked chain * Don't invalidate blocks from CChainLocksHandler::TrySignChainTip As the name of this method implies, it's trying to sign something and not enforce/invalidate chains. Invalidating blocks is the job of EnforceBestChainLock. * Only call ActivateBestChain when tip != best CL tip * Fix unprotected access of bestChainLockBlockIndex and bail out if its null * Fix ChainLocks tests after changes in enforcement handling * Only invoke NotifyChainLock signal from EnforceBestChainLock This ensures that NotifyChainLock is not prematurely called before the block is fully connected. * Use a mutex to ensure that only one thread executes ActivateBestChain It might happen that 2 threads enter ActivateBestChain at the same time start processing block by block, while randomly switching between threads so that sometimes one thread processed the block and then another one processes it. A mutex protects ActivateBestChain now against this race. * Rename local copy of bestChainLockBlockIndex to currentBestChainLockBlockIndex * Don't call ActivateBestChain when best CL is part of the main chain
2019-03-13 14:00:54 +01:00
EnforceBestChainLock();
// regularly retry signing the current chaintip as it might have failed before due to missing ixlocks
TrySignChainTip();
}, 5000);
}
void CChainLocksHandler::Stop()
2019-01-22 14:20:32 +01:00
{
quorumSigningManager->UnregisterRecoveredSigsListener(this);
}
bool CChainLocksHandler::AlreadyHave(const CInv& inv)
{
LOCK(cs);
return seenChainLocks.count(inv.hash) != 0;
}
bool CChainLocksHandler::GetChainLockByHash(const uint256& hash, llmq::CChainLockSig& ret)
{
LOCK(cs);
if (hash != bestChainLockHash) {
// we only propagate the best one and ditch all the old ones
return false;
}
ret = bestChainLock;
return true;
}
void CChainLocksHandler::ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStream& vRecv, CConnman& connman)
{
2019-01-23 17:40:37 +01:00
if (!sporkManager.IsSporkActive(SPORK_19_CHAINLOCKS_ENABLED)) {
return;
}
2019-01-22 14:20:32 +01:00
if (strCommand == NetMsgType::CLSIG) {
CChainLockSig clsig;
vRecv >> clsig;
auto hash = ::SerializeHash(clsig);
ProcessNewChainLock(pfrom->id, clsig, hash);
}
}
void CChainLocksHandler::ProcessNewChainLock(NodeId from, const llmq::CChainLockSig& clsig, const uint256& hash)
{
Multiple fixes and optimizations for LLMQs and ChainLocks (#2724) * Indicate success when signing was unnecessary * Fix typo in name of LLMQ_400_60 * Move RemoveAskFor call for CLSIGs into ProcessNewChainLock In case we got INV items for the same CLSIG that we recreated through HandleNewRecoveredSig, (re-)requesting of the CLSIG from other peers becomes unnecessary. * Move Cleanup() call in CChainLocksHandler::UpdatedBlockTip up We bail out early in a few situations from this method, so that Cleanup() might not be called while its at the bottom. * Bail out from CChainLocksHandler::UpdatedBlockTip if we already got the CLSIG * Call RemoveAskFor when QFCOMMITMENT was received Otherwise we might end up re-requesting it for a very long time when the commitment INV was received shortly before it got mined. * Call RemoveSigSharesForSession when a recovered sig is received Otherwise we end up with session data in node states lingering around until a fake "timeout" occurs (can be seen in the logs). * Better handling of false-positive conflicts in CSigningManager The old code was emitting a lot of messages in logs as it treated sigs for exactly the same session as a conflict. This commit fixes this by looking at the signHash before logging. Also handle a corner-case where a recovered sig might be deleted between the HasRecoveredSigForId and GetRecoveredSigById call. * Don't run into session timeout when sig shares come in slow Instead of just tracking when the first share was received, we now also track when the last (non-duplicate) share was received. Sessios will now timeout 5 minutes after the first share arrives, or 1 minute after the last one arrived.
2019-02-27 14:10:12 +01:00
{
LOCK(cs_main);
g_connman->RemoveAskFor(hash);
}
2019-01-22 14:20:32 +01:00
{
LOCK(cs);
if (!seenChainLocks.emplace(hash, GetTimeMillis()).second) {
return;
}
if (bestChainLock.nHeight != -1 && clsig.nHeight <= bestChainLock.nHeight) {
// no need to process/relay older CLSIGs
return;
}
}
uint256 requestId = ::SerializeHash(std::make_pair(CLSIG_REQUESTID_PREFIX, clsig.nHeight));
uint256 msgHash = clsig.blockHash;
if (!quorumSigningManager->VerifyRecoveredSig(Params().GetConsensus().llmqChainLocks, clsig.nHeight, requestId, msgHash, clsig.sig)) {
LogPrintf("CChainLocksHandler::%s -- invalid CLSIG (%s), peer=%d\n", __func__, clsig.ToString(), from);
if (from != -1) {
LOCK(cs_main);
Misbehaving(from, 10);
2019-01-22 14:20:32 +01:00
}
return;
}
{
LOCK2(cs_main, cs);
if (InternalHasConflictingChainLock(clsig.nHeight, clsig.blockHash)) {
// This should not happen. If it happens, it means that a malicious entity controls a large part of the MN
// network. In this case, we don't allow him to reorg older chainlocks.
LogPrintf("CChainLocksHandler::%s -- new CLSIG (%s) tries to reorg previous CLSIG (%s), peer=%d\n",
__func__, clsig.ToString(), bestChainLock.ToString(), from);
return;
}
bestChainLockHash = hash;
bestChainLock = clsig;
CInv inv(MSG_CLSIG, hash);
g_connman->RelayInv(inv, LLMQS_PROTO_VERSION);
2019-01-22 14:20:32 +01:00
auto blockIt = mapBlockIndex.find(clsig.blockHash);
if (blockIt == mapBlockIndex.end()) {
// we don't know the block/header for this CLSIG yet, so bail out for now
// when the block or the header later comes in, we will enforce the correct chain
return;
}
if (blockIt->second->nHeight != clsig.nHeight) {
// Should not happen, same as the conflict check from above.
LogPrintf("CChainLocksHandler::%s -- height of CLSIG (%s) does not match the specified block's height (%d)\n",
__func__, clsig.ToString(), blockIt->second->nHeight);
return;
}
const CBlockIndex* pindex = blockIt->second;
bestChainLockWithKnownBlock = bestChainLock;
bestChainLockBlockIndex = pindex;
}
Multiple fixes/refactorings for ChainLocks (#2765) * Print which DKG type aborted * Don't directly call EnforceBestChainLock and instead schedule the call Calling EnforceBestChainLock might result in switching chains, which in turn might end up calling signals, so we get into a recursive call chain. Better to call EnforceBestChainLock from the scheduler. * Regularly call EnforceBestChainLock and reset error flags on locked chain * Don't invalidate blocks from CChainLocksHandler::TrySignChainTip As the name of this method implies, it's trying to sign something and not enforce/invalidate chains. Invalidating blocks is the job of EnforceBestChainLock. * Only call ActivateBestChain when tip != best CL tip * Fix unprotected access of bestChainLockBlockIndex and bail out if its null * Fix ChainLocks tests after changes in enforcement handling * Only invoke NotifyChainLock signal from EnforceBestChainLock This ensures that NotifyChainLock is not prematurely called before the block is fully connected. * Use a mutex to ensure that only one thread executes ActivateBestChain It might happen that 2 threads enter ActivateBestChain at the same time start processing block by block, while randomly switching between threads so that sometimes one thread processed the block and then another one processes it. A mutex protects ActivateBestChain now against this race. * Rename local copy of bestChainLockBlockIndex to currentBestChainLockBlockIndex * Don't call ActivateBestChain when best CL is part of the main chain
2019-03-13 14:00:54 +01:00
scheduler->scheduleFromNow([&]() {
CheckActiveState();
Multiple fixes/refactorings for ChainLocks (#2765) * Print which DKG type aborted * Don't directly call EnforceBestChainLock and instead schedule the call Calling EnforceBestChainLock might result in switching chains, which in turn might end up calling signals, so we get into a recursive call chain. Better to call EnforceBestChainLock from the scheduler. * Regularly call EnforceBestChainLock and reset error flags on locked chain * Don't invalidate blocks from CChainLocksHandler::TrySignChainTip As the name of this method implies, it's trying to sign something and not enforce/invalidate chains. Invalidating blocks is the job of EnforceBestChainLock. * Only call ActivateBestChain when tip != best CL tip * Fix unprotected access of bestChainLockBlockIndex and bail out if its null * Fix ChainLocks tests after changes in enforcement handling * Only invoke NotifyChainLock signal from EnforceBestChainLock This ensures that NotifyChainLock is not prematurely called before the block is fully connected. * Use a mutex to ensure that only one thread executes ActivateBestChain It might happen that 2 threads enter ActivateBestChain at the same time start processing block by block, while randomly switching between threads so that sometimes one thread processed the block and then another one processes it. A mutex protects ActivateBestChain now against this race. * Rename local copy of bestChainLockBlockIndex to currentBestChainLockBlockIndex * Don't call ActivateBestChain when best CL is part of the main chain
2019-03-13 14:00:54 +01:00
EnforceBestChainLock();
}, 0);
2019-01-22 14:20:32 +01:00
LogPrint("chainlocks", "CChainLocksHandler::%s -- processed new CLSIG (%s), peer=%d\n",
2019-01-22 14:20:32 +01:00
__func__, clsig.ToString(), from);
}
void CChainLocksHandler::AcceptedBlockHeader(const CBlockIndex* pindexNew)
{
Multiple fixes/refactorings for ChainLocks (#2765) * Print which DKG type aborted * Don't directly call EnforceBestChainLock and instead schedule the call Calling EnforceBestChainLock might result in switching chains, which in turn might end up calling signals, so we get into a recursive call chain. Better to call EnforceBestChainLock from the scheduler. * Regularly call EnforceBestChainLock and reset error flags on locked chain * Don't invalidate blocks from CChainLocksHandler::TrySignChainTip As the name of this method implies, it's trying to sign something and not enforce/invalidate chains. Invalidating blocks is the job of EnforceBestChainLock. * Only call ActivateBestChain when tip != best CL tip * Fix unprotected access of bestChainLockBlockIndex and bail out if its null * Fix ChainLocks tests after changes in enforcement handling * Only invoke NotifyChainLock signal from EnforceBestChainLock This ensures that NotifyChainLock is not prematurely called before the block is fully connected. * Use a mutex to ensure that only one thread executes ActivateBestChain It might happen that 2 threads enter ActivateBestChain at the same time start processing block by block, while randomly switching between threads so that sometimes one thread processed the block and then another one processes it. A mutex protects ActivateBestChain now against this race. * Rename local copy of bestChainLockBlockIndex to currentBestChainLockBlockIndex * Don't call ActivateBestChain when best CL is part of the main chain
2019-03-13 14:00:54 +01:00
LOCK2(cs_main, cs);
2019-01-22 14:20:32 +01:00
Multiple fixes/refactorings for ChainLocks (#2765) * Print which DKG type aborted * Don't directly call EnforceBestChainLock and instead schedule the call Calling EnforceBestChainLock might result in switching chains, which in turn might end up calling signals, so we get into a recursive call chain. Better to call EnforceBestChainLock from the scheduler. * Regularly call EnforceBestChainLock and reset error flags on locked chain * Don't invalidate blocks from CChainLocksHandler::TrySignChainTip As the name of this method implies, it's trying to sign something and not enforce/invalidate chains. Invalidating blocks is the job of EnforceBestChainLock. * Only call ActivateBestChain when tip != best CL tip * Fix unprotected access of bestChainLockBlockIndex and bail out if its null * Fix ChainLocks tests after changes in enforcement handling * Only invoke NotifyChainLock signal from EnforceBestChainLock This ensures that NotifyChainLock is not prematurely called before the block is fully connected. * Use a mutex to ensure that only one thread executes ActivateBestChain It might happen that 2 threads enter ActivateBestChain at the same time start processing block by block, while randomly switching between threads so that sometimes one thread processed the block and then another one processes it. A mutex protects ActivateBestChain now against this race. * Rename local copy of bestChainLockBlockIndex to currentBestChainLockBlockIndex * Don't call ActivateBestChain when best CL is part of the main chain
2019-03-13 14:00:54 +01:00
if (pindexNew->GetBlockHash() == bestChainLock.blockHash) {
LogPrintf("CChainLocksHandler::%s -- block header %s came in late, updating and enforcing\n", __func__, pindexNew->GetBlockHash().ToString());
2019-01-22 14:20:32 +01:00
Multiple fixes/refactorings for ChainLocks (#2765) * Print which DKG type aborted * Don't directly call EnforceBestChainLock and instead schedule the call Calling EnforceBestChainLock might result in switching chains, which in turn might end up calling signals, so we get into a recursive call chain. Better to call EnforceBestChainLock from the scheduler. * Regularly call EnforceBestChainLock and reset error flags on locked chain * Don't invalidate blocks from CChainLocksHandler::TrySignChainTip As the name of this method implies, it's trying to sign something and not enforce/invalidate chains. Invalidating blocks is the job of EnforceBestChainLock. * Only call ActivateBestChain when tip != best CL tip * Fix unprotected access of bestChainLockBlockIndex and bail out if its null * Fix ChainLocks tests after changes in enforcement handling * Only invoke NotifyChainLock signal from EnforceBestChainLock This ensures that NotifyChainLock is not prematurely called before the block is fully connected. * Use a mutex to ensure that only one thread executes ActivateBestChain It might happen that 2 threads enter ActivateBestChain at the same time start processing block by block, while randomly switching between threads so that sometimes one thread processed the block and then another one processes it. A mutex protects ActivateBestChain now against this race. * Rename local copy of bestChainLockBlockIndex to currentBestChainLockBlockIndex * Don't call ActivateBestChain when best CL is part of the main chain
2019-03-13 14:00:54 +01:00
if (bestChainLock.nHeight != pindexNew->nHeight) {
// Should not happen, same as the conflict check from ProcessNewChainLock.
LogPrintf("CChainLocksHandler::%s -- height of CLSIG (%s) does not match the specified block's height (%d)\n",
__func__, bestChainLock.ToString(), pindexNew->nHeight);
return;
2019-01-22 14:20:32 +01:00
}
Multiple fixes/refactorings for ChainLocks (#2765) * Print which DKG type aborted * Don't directly call EnforceBestChainLock and instead schedule the call Calling EnforceBestChainLock might result in switching chains, which in turn might end up calling signals, so we get into a recursive call chain. Better to call EnforceBestChainLock from the scheduler. * Regularly call EnforceBestChainLock and reset error flags on locked chain * Don't invalidate blocks from CChainLocksHandler::TrySignChainTip As the name of this method implies, it's trying to sign something and not enforce/invalidate chains. Invalidating blocks is the job of EnforceBestChainLock. * Only call ActivateBestChain when tip != best CL tip * Fix unprotected access of bestChainLockBlockIndex and bail out if its null * Fix ChainLocks tests after changes in enforcement handling * Only invoke NotifyChainLock signal from EnforceBestChainLock This ensures that NotifyChainLock is not prematurely called before the block is fully connected. * Use a mutex to ensure that only one thread executes ActivateBestChain It might happen that 2 threads enter ActivateBestChain at the same time start processing block by block, while randomly switching between threads so that sometimes one thread processed the block and then another one processes it. A mutex protects ActivateBestChain now against this race. * Rename local copy of bestChainLockBlockIndex to currentBestChainLockBlockIndex * Don't call ActivateBestChain when best CL is part of the main chain
2019-03-13 14:00:54 +01:00
// when EnforceBestChainLock is called later, it might end up invalidating other chains but not activating the
// CLSIG locked chain. This happens when only the header is known but the block is still missing yet. The usual
// block processing logic will handle this when the block arrives
bestChainLockWithKnownBlock = bestChainLock;
Multiple fixes/refactorings for ChainLocks (#2765) * Print which DKG type aborted * Don't directly call EnforceBestChainLock and instead schedule the call Calling EnforceBestChainLock might result in switching chains, which in turn might end up calling signals, so we get into a recursive call chain. Better to call EnforceBestChainLock from the scheduler. * Regularly call EnforceBestChainLock and reset error flags on locked chain * Don't invalidate blocks from CChainLocksHandler::TrySignChainTip As the name of this method implies, it's trying to sign something and not enforce/invalidate chains. Invalidating blocks is the job of EnforceBestChainLock. * Only call ActivateBestChain when tip != best CL tip * Fix unprotected access of bestChainLockBlockIndex and bail out if its null * Fix ChainLocks tests after changes in enforcement handling * Only invoke NotifyChainLock signal from EnforceBestChainLock This ensures that NotifyChainLock is not prematurely called before the block is fully connected. * Use a mutex to ensure that only one thread executes ActivateBestChain It might happen that 2 threads enter ActivateBestChain at the same time start processing block by block, while randomly switching between threads so that sometimes one thread processed the block and then another one processes it. A mutex protects ActivateBestChain now against this race. * Rename local copy of bestChainLockBlockIndex to currentBestChainLockBlockIndex * Don't call ActivateBestChain when best CL is part of the main chain
2019-03-13 14:00:54 +01:00
bestChainLockBlockIndex = pindexNew;
2019-01-22 14:20:32 +01:00
}
}
void CChainLocksHandler::UpdatedBlockTip(const CBlockIndex* pindexNew)
2019-01-22 14:20:32 +01:00
{
// don't call TrySignChainTip directly but instead let the scheduler call it. This way we ensure that cs_main is
Multiple fixes/refactorings for ChainLocks (#2765) * Print which DKG type aborted * Don't directly call EnforceBestChainLock and instead schedule the call Calling EnforceBestChainLock might result in switching chains, which in turn might end up calling signals, so we get into a recursive call chain. Better to call EnforceBestChainLock from the scheduler. * Regularly call EnforceBestChainLock and reset error flags on locked chain * Don't invalidate blocks from CChainLocksHandler::TrySignChainTip As the name of this method implies, it's trying to sign something and not enforce/invalidate chains. Invalidating blocks is the job of EnforceBestChainLock. * Only call ActivateBestChain when tip != best CL tip * Fix unprotected access of bestChainLockBlockIndex and bail out if its null * Fix ChainLocks tests after changes in enforcement handling * Only invoke NotifyChainLock signal from EnforceBestChainLock This ensures that NotifyChainLock is not prematurely called before the block is fully connected. * Use a mutex to ensure that only one thread executes ActivateBestChain It might happen that 2 threads enter ActivateBestChain at the same time start processing block by block, while randomly switching between threads so that sometimes one thread processed the block and then another one processes it. A mutex protects ActivateBestChain now against this race. * Rename local copy of bestChainLockBlockIndex to currentBestChainLockBlockIndex * Don't call ActivateBestChain when best CL is part of the main chain
2019-03-13 14:00:54 +01:00
// never locked and TrySignChainTip is not called twice in parallel. Also avoids recursive calls due to
// EnforceBestChainLock switching chains.
LOCK(cs);
if (tryLockChainTipScheduled) {
return;
}
tryLockChainTipScheduled = true;
scheduler->scheduleFromNow([&]() {
CheckActiveState();
Multiple fixes/refactorings for ChainLocks (#2765) * Print which DKG type aborted * Don't directly call EnforceBestChainLock and instead schedule the call Calling EnforceBestChainLock might result in switching chains, which in turn might end up calling signals, so we get into a recursive call chain. Better to call EnforceBestChainLock from the scheduler. * Regularly call EnforceBestChainLock and reset error flags on locked chain * Don't invalidate blocks from CChainLocksHandler::TrySignChainTip As the name of this method implies, it's trying to sign something and not enforce/invalidate chains. Invalidating blocks is the job of EnforceBestChainLock. * Only call ActivateBestChain when tip != best CL tip * Fix unprotected access of bestChainLockBlockIndex and bail out if its null * Fix ChainLocks tests after changes in enforcement handling * Only invoke NotifyChainLock signal from EnforceBestChainLock This ensures that NotifyChainLock is not prematurely called before the block is fully connected. * Use a mutex to ensure that only one thread executes ActivateBestChain It might happen that 2 threads enter ActivateBestChain at the same time start processing block by block, while randomly switching between threads so that sometimes one thread processed the block and then another one processes it. A mutex protects ActivateBestChain now against this race. * Rename local copy of bestChainLockBlockIndex to currentBestChainLockBlockIndex * Don't call ActivateBestChain when best CL is part of the main chain
2019-03-13 14:00:54 +01:00
EnforceBestChainLock();
TrySignChainTip();
LOCK(cs);
tryLockChainTipScheduled = false;
}, 0);
}
void CChainLocksHandler::CheckActiveState()
{
bool fDIP0008Active;
{
LOCK(cs_main);
fDIP0008Active = VersionBitsState(chainActive.Tip()->pprev, Params().GetConsensus(), Consensus::DEPLOYMENT_DIP0008, versionbitscache) == THRESHOLD_ACTIVE;
}
LOCK(cs);
bool oldIsEnforced = isEnforced;
isSporkActive = sporkManager.IsSporkActive(SPORK_19_CHAINLOCKS_ENABLED);
// TODO remove this after DIP8 is active
bool fEnforcedBySpork = (Params().NetworkIDString() == CBaseChainParams::TESTNET) && (sporkManager.GetSporkValue(SPORK_19_CHAINLOCKS_ENABLED) == 1);
isEnforced = (fDIP0008Active && isSporkActive) || fEnforcedBySpork;
if (!oldIsEnforced && isEnforced) {
// ChainLocks got activated just recently, but it's possible that it was already running before, leaving
// us with some stale values which we should not try to enforce anymore (there probably was a good reason
// to disable spork19)
bestChainLockHash = uint256();
bestChainLock = bestChainLockWithKnownBlock = CChainLockSig();
bestChainLockBlockIndex = lastNotifyChainLockBlockIndex = nullptr;
}
}
void CChainLocksHandler::TrySignChainTip()
{
Cleanup();
const CBlockIndex* pindex;
{
LOCK(cs_main);
pindex = chainActive.Tip();
}
2019-01-22 14:20:32 +01:00
if (!fMasternodeMode) {
return;
}
if (!pindex->pprev) {
2019-01-22 14:20:32 +01:00
return;
}
// DIP8 defines a process called "Signing attempts" which should run before the CLSIG is finalized
// To simplify the initial implementation, we skip this process and directly try to create a CLSIG
// This will fail when multiple blocks compete, but we accept this for the initial implementation.
// Later, we'll add the multiple attempts process.
{
LOCK(cs);
if (!isSporkActive) {
return;
}
if (pindex->nHeight == lastSignedHeight) {
// already signed this one
return;
}
if (bestChainLock.nHeight >= pindex->nHeight) {
// already got the same CLSIG or a better one
return;
}
if (InternalHasConflictingChainLock(pindex->nHeight, pindex->GetBlockHash())) {
Multiple fixes/refactorings for ChainLocks (#2765) * Print which DKG type aborted * Don't directly call EnforceBestChainLock and instead schedule the call Calling EnforceBestChainLock might result in switching chains, which in turn might end up calling signals, so we get into a recursive call chain. Better to call EnforceBestChainLock from the scheduler. * Regularly call EnforceBestChainLock and reset error flags on locked chain * Don't invalidate blocks from CChainLocksHandler::TrySignChainTip As the name of this method implies, it's trying to sign something and not enforce/invalidate chains. Invalidating blocks is the job of EnforceBestChainLock. * Only call ActivateBestChain when tip != best CL tip * Fix unprotected access of bestChainLockBlockIndex and bail out if its null * Fix ChainLocks tests after changes in enforcement handling * Only invoke NotifyChainLock signal from EnforceBestChainLock This ensures that NotifyChainLock is not prematurely called before the block is fully connected. * Use a mutex to ensure that only one thread executes ActivateBestChain It might happen that 2 threads enter ActivateBestChain at the same time start processing block by block, while randomly switching between threads so that sometimes one thread processed the block and then another one processes it. A mutex protects ActivateBestChain now against this race. * Rename local copy of bestChainLockBlockIndex to currentBestChainLockBlockIndex * Don't call ActivateBestChain when best CL is part of the main chain
2019-03-13 14:00:54 +01:00
// don't sign if another conflicting CLSIG is already present. EnforceBestChainLock will later enforce
// the correct chain.
2019-01-22 14:20:32 +01:00
return;
}
}
2019-01-22 14:20:32 +01:00
LogPrint("chainlocks", "CChainLocksHandler::%s -- trying to sign %s, height=%d\n", __func__, pindex->GetBlockHash().ToString(), pindex->nHeight);
// When the new IX system is activated, we only try to ChainLock blocks which include safe transactions. A TX is
// considered safe when it is ixlocked or at least known since 10 minutes (from mempool or block). These checks are
// performed for the tip (which we try to sign) and the previous 5 blocks. If a ChainLocked block is found on the
// way down, we consider all TXs to be safe.
if (IsNewInstantSendEnabled() && sporkManager.IsSporkActive(SPORK_3_INSTANTSEND_BLOCK_FILTERING)) {
auto pindexWalk = pindex;
while (pindexWalk) {
if (pindex->nHeight - pindexWalk->nHeight > 5) {
// no need to check further down, 6 confs is safe to assume that TXs below this height won't be
// ixlocked anymore if they aren't already
LogPrint("chainlocks", "CChainLocksHandler::%s -- tip and previous 5 blocks all safe\n", __func__);
break;
}
if (HasChainLock(pindexWalk->nHeight, pindexWalk->GetBlockHash())) {
// we don't care about ixlocks for TXs that are ChainLocked already
LogPrint("chainlocks", "CChainLocksHandler::%s -- chainlock at height %d \n", __func__, pindexWalk->nHeight);
break;
}
decltype(blockTxs.begin()->second) txids;
{
LOCK(cs);
auto it = blockTxs.find(pindexWalk->GetBlockHash());
if (it == blockTxs.end()) {
// this should actually not happen as NewPoWValidBlock should have been called before
LogPrint("chainlocks", "CChainLocksHandler::%s -- blockTxs for %s not found\n", __func__,
pindexWalk->GetBlockHash().ToString());
return;
}
txids = it->second;
}
for (auto& txid : *txids) {
int64_t txAge = 0;
{
LOCK(cs);
auto it = txFirstSeenTime.find(txid);
if (it != txFirstSeenTime.end()) {
txAge = GetAdjustedTime() - it->second;
}
}
if (txAge < WAIT_FOR_ISLOCK_TIMEOUT && !quorumInstantSendManager->IsLocked(txid)) {
LogPrint("chainlocks", "CChainLocksHandler::%s -- not signing block %s due to TX %s not being ixlocked and not old enough. age=%d\n", __func__,
pindexWalk->GetBlockHash().ToString(), txid.ToString(), txAge);
return;
}
}
pindexWalk = pindexWalk->pprev;
}
}
uint256 requestId = ::SerializeHash(std::make_pair(CLSIG_REQUESTID_PREFIX, pindex->nHeight));
uint256 msgHash = pindex->GetBlockHash();
2019-01-22 14:20:32 +01:00
{
LOCK(cs);
if (bestChainLock.nHeight >= pindex->nHeight) {
// might have happened while we didn't hold cs
2019-01-22 14:20:32 +01:00
return;
}
lastSignedHeight = pindex->nHeight;
2019-01-22 14:20:32 +01:00
lastSignedRequestId = requestId;
lastSignedMsgHash = msgHash;
}
quorumSigningManager->AsyncSignIfMember(Params().GetConsensus().llmqChainLocks, requestId, msgHash);
}
Implement retroactive IS locking of transactions first seen in blocks instead of mempool (#2770) * Don't rely on UTXO set in CheckCanLock The UTXO set only works for TXs in the mempool and won't work when we try to retroactively lock unlocked TXs from blocks. This is safe as ProcessTx is only called when a TX was accepted into the mempool or connected in a block, which means that all input checks were good. * Rename RetryLockMempoolTxs to RetryLockTxs and let it retry connected TXs * Instead of manually calling ProcessTx, let SyncTransaction handle all cases SyncTransaction is called from AcceptToMemoryPool and when transactions got connected in a block. So this is the time we want to run TXs through ProcessTx. This also enables retroactive signing of TXs that were unknown before a new block appeared. * Test retroactive signing and safe TXs in LLMQ ChainLocks tests * Also test for retroactive signing of chained TXs * Honor lockedParentTx when looking for TXs to retry signing * Stop scanning for TXs to retry after a depth of 6 * Generate 6 block to avoid retroactive signing overloading Travis * Avoid retroactive signing * Don't rely on NewPoWValidBlock and use SyncTransaction to build blockTxs NewPoWValidBlock is not guaranteed to be called when blocks come in fast. When a block is accepted in AcceptBlock, NewPoWValidBlock is only called when the new block is a successor of the currently active tip. This is not the case when after the first block a second block is accepted immediately as the first block is not connected yet. This might be a bug actually in the handling of NewPoWValidBlock, so we might need to check/fix this later, but currently I prefer to not touch that part. Instead, we now use SyncTransaction to gather TXs for blockTxs. This works because SyncTransaction is called for all transactions in a freshly connected block in one go. The call also happens before UpdatedBlockTip is called, so it's fine with the existing logic. * Use tx.IsCoinBase() instead of checking index 0 Also check for empty vin.
2019-03-19 11:55:51 +01:00
void CChainLocksHandler::SyncTransaction(const CTransaction& tx, const CBlockIndex* pindex, int posInBlock)
{
Implement retroactive IS locking of transactions first seen in blocks instead of mempool (#2770) * Don't rely on UTXO set in CheckCanLock The UTXO set only works for TXs in the mempool and won't work when we try to retroactively lock unlocked TXs from blocks. This is safe as ProcessTx is only called when a TX was accepted into the mempool or connected in a block, which means that all input checks were good. * Rename RetryLockMempoolTxs to RetryLockTxs and let it retry connected TXs * Instead of manually calling ProcessTx, let SyncTransaction handle all cases SyncTransaction is called from AcceptToMemoryPool and when transactions got connected in a block. So this is the time we want to run TXs through ProcessTx. This also enables retroactive signing of TXs that were unknown before a new block appeared. * Test retroactive signing and safe TXs in LLMQ ChainLocks tests * Also test for retroactive signing of chained TXs * Honor lockedParentTx when looking for TXs to retry signing * Stop scanning for TXs to retry after a depth of 6 * Generate 6 block to avoid retroactive signing overloading Travis * Avoid retroactive signing * Don't rely on NewPoWValidBlock and use SyncTransaction to build blockTxs NewPoWValidBlock is not guaranteed to be called when blocks come in fast. When a block is accepted in AcceptBlock, NewPoWValidBlock is only called when the new block is a successor of the currently active tip. This is not the case when after the first block a second block is accepted immediately as the first block is not connected yet. This might be a bug actually in the handling of NewPoWValidBlock, so we might need to check/fix this later, but currently I prefer to not touch that part. Instead, we now use SyncTransaction to gather TXs for blockTxs. This works because SyncTransaction is called for all transactions in a freshly connected block in one go. The call also happens before UpdatedBlockTip is called, so it's fine with the existing logic. * Use tx.IsCoinBase() instead of checking index 0 Also check for empty vin.
2019-03-19 11:55:51 +01:00
bool handleTx = true;
if (tx.IsCoinBase() || tx.vin.empty()) {
handleTx = false;
}
Implement retroactive IS locking of transactions first seen in blocks instead of mempool (#2770) * Don't rely on UTXO set in CheckCanLock The UTXO set only works for TXs in the mempool and won't work when we try to retroactively lock unlocked TXs from blocks. This is safe as ProcessTx is only called when a TX was accepted into the mempool or connected in a block, which means that all input checks were good. * Rename RetryLockMempoolTxs to RetryLockTxs and let it retry connected TXs * Instead of manually calling ProcessTx, let SyncTransaction handle all cases SyncTransaction is called from AcceptToMemoryPool and when transactions got connected in a block. So this is the time we want to run TXs through ProcessTx. This also enables retroactive signing of TXs that were unknown before a new block appeared. * Test retroactive signing and safe TXs in LLMQ ChainLocks tests * Also test for retroactive signing of chained TXs * Honor lockedParentTx when looking for TXs to retry signing * Stop scanning for TXs to retry after a depth of 6 * Generate 6 block to avoid retroactive signing overloading Travis * Avoid retroactive signing * Don't rely on NewPoWValidBlock and use SyncTransaction to build blockTxs NewPoWValidBlock is not guaranteed to be called when blocks come in fast. When a block is accepted in AcceptBlock, NewPoWValidBlock is only called when the new block is a successor of the currently active tip. This is not the case when after the first block a second block is accepted immediately as the first block is not connected yet. This might be a bug actually in the handling of NewPoWValidBlock, so we might need to check/fix this later, but currently I prefer to not touch that part. Instead, we now use SyncTransaction to gather TXs for blockTxs. This works because SyncTransaction is called for all transactions in a freshly connected block in one go. The call also happens before UpdatedBlockTip is called, so it's fine with the existing logic. * Use tx.IsCoinBase() instead of checking index 0 Also check for empty vin.
2019-03-19 11:55:51 +01:00
LOCK(cs);
Implement retroactive IS locking of transactions first seen in blocks instead of mempool (#2770) * Don't rely on UTXO set in CheckCanLock The UTXO set only works for TXs in the mempool and won't work when we try to retroactively lock unlocked TXs from blocks. This is safe as ProcessTx is only called when a TX was accepted into the mempool or connected in a block, which means that all input checks were good. * Rename RetryLockMempoolTxs to RetryLockTxs and let it retry connected TXs * Instead of manually calling ProcessTx, let SyncTransaction handle all cases SyncTransaction is called from AcceptToMemoryPool and when transactions got connected in a block. So this is the time we want to run TXs through ProcessTx. This also enables retroactive signing of TXs that were unknown before a new block appeared. * Test retroactive signing and safe TXs in LLMQ ChainLocks tests * Also test for retroactive signing of chained TXs * Honor lockedParentTx when looking for TXs to retry signing * Stop scanning for TXs to retry after a depth of 6 * Generate 6 block to avoid retroactive signing overloading Travis * Avoid retroactive signing * Don't rely on NewPoWValidBlock and use SyncTransaction to build blockTxs NewPoWValidBlock is not guaranteed to be called when blocks come in fast. When a block is accepted in AcceptBlock, NewPoWValidBlock is only called when the new block is a successor of the currently active tip. This is not the case when after the first block a second block is accepted immediately as the first block is not connected yet. This might be a bug actually in the handling of NewPoWValidBlock, so we might need to check/fix this later, but currently I prefer to not touch that part. Instead, we now use SyncTransaction to gather TXs for blockTxs. This works because SyncTransaction is called for all transactions in a freshly connected block in one go. The call also happens before UpdatedBlockTip is called, so it's fine with the existing logic. * Use tx.IsCoinBase() instead of checking index 0 Also check for empty vin.
2019-03-19 11:55:51 +01:00
if (handleTx) {
int64_t curTime = GetAdjustedTime();
txFirstSeenTime.emplace(tx.GetHash(), curTime);
}
// We listen for SyncTransaction so that we can collect all TX ids of all included TXs of newly received blocks
// We need this information later when we try to sign a new tip, so that we can determine if all included TXs are
// safe.
Implement retroactive IS locking of transactions first seen in blocks instead of mempool (#2770) * Don't rely on UTXO set in CheckCanLock The UTXO set only works for TXs in the mempool and won't work when we try to retroactively lock unlocked TXs from blocks. This is safe as ProcessTx is only called when a TX was accepted into the mempool or connected in a block, which means that all input checks were good. * Rename RetryLockMempoolTxs to RetryLockTxs and let it retry connected TXs * Instead of manually calling ProcessTx, let SyncTransaction handle all cases SyncTransaction is called from AcceptToMemoryPool and when transactions got connected in a block. So this is the time we want to run TXs through ProcessTx. This also enables retroactive signing of TXs that were unknown before a new block appeared. * Test retroactive signing and safe TXs in LLMQ ChainLocks tests * Also test for retroactive signing of chained TXs * Honor lockedParentTx when looking for TXs to retry signing * Stop scanning for TXs to retry after a depth of 6 * Generate 6 block to avoid retroactive signing overloading Travis * Avoid retroactive signing * Don't rely on NewPoWValidBlock and use SyncTransaction to build blockTxs NewPoWValidBlock is not guaranteed to be called when blocks come in fast. When a block is accepted in AcceptBlock, NewPoWValidBlock is only called when the new block is a successor of the currently active tip. This is not the case when after the first block a second block is accepted immediately as the first block is not connected yet. This might be a bug actually in the handling of NewPoWValidBlock, so we might need to check/fix this later, but currently I prefer to not touch that part. Instead, we now use SyncTransaction to gather TXs for blockTxs. This works because SyncTransaction is called for all transactions in a freshly connected block in one go. The call also happens before UpdatedBlockTip is called, so it's fine with the existing logic. * Use tx.IsCoinBase() instead of checking index 0 Also check for empty vin.
2019-03-19 11:55:51 +01:00
if (pindex && posInBlock != CMainSignals::SYNC_TRANSACTION_NOT_IN_BLOCK) {
auto it = blockTxs.find(pindex->GetBlockHash());
if (it == blockTxs.end()) {
// we want this to be run even if handleTx == false, so that the coinbase TX triggers creation of an empty entry
it = blockTxs.emplace(pindex->GetBlockHash(), std::make_shared<std::unordered_set<uint256, StaticSaltedHasher>>()).first;
}
if (handleTx) {
auto& txs = *it->second;
txs.emplace(tx.GetHash());
}
}
}
bool CChainLocksHandler::IsTxSafeForMining(const uint256& txid)
{
if (!sporkManager.IsSporkActive(SPORK_3_INSTANTSEND_BLOCK_FILTERING)) {
return true;
}
if (!IsNewInstantSendEnabled()) {
return true;
}
int64_t txAge = 0;
{
LOCK(cs);
if (!isSporkActive) {
return true;
}
auto it = txFirstSeenTime.find(txid);
if (it != txFirstSeenTime.end()) {
txAge = GetAdjustedTime() - it->second;
}
}
if (txAge < WAIT_FOR_ISLOCK_TIMEOUT && !quorumInstantSendManager->IsLocked(txid)) {
return false;
}
return true;
}
2019-01-22 14:20:32 +01:00
// WARNING: cs_main and cs should not be held!
Multiple fixes/refactorings for ChainLocks (#2765) * Print which DKG type aborted * Don't directly call EnforceBestChainLock and instead schedule the call Calling EnforceBestChainLock might result in switching chains, which in turn might end up calling signals, so we get into a recursive call chain. Better to call EnforceBestChainLock from the scheduler. * Regularly call EnforceBestChainLock and reset error flags on locked chain * Don't invalidate blocks from CChainLocksHandler::TrySignChainTip As the name of this method implies, it's trying to sign something and not enforce/invalidate chains. Invalidating blocks is the job of EnforceBestChainLock. * Only call ActivateBestChain when tip != best CL tip * Fix unprotected access of bestChainLockBlockIndex and bail out if its null * Fix ChainLocks tests after changes in enforcement handling * Only invoke NotifyChainLock signal from EnforceBestChainLock This ensures that NotifyChainLock is not prematurely called before the block is fully connected. * Use a mutex to ensure that only one thread executes ActivateBestChain It might happen that 2 threads enter ActivateBestChain at the same time start processing block by block, while randomly switching between threads so that sometimes one thread processed the block and then another one processes it. A mutex protects ActivateBestChain now against this race. * Rename local copy of bestChainLockBlockIndex to currentBestChainLockBlockIndex * Don't call ActivateBestChain when best CL is part of the main chain
2019-03-13 14:00:54 +01:00
// This should also not be called from validation signals, as this might result in recursive calls
2019-01-22 14:20:32 +01:00
void CChainLocksHandler::EnforceBestChainLock()
{
CChainLockSig clsig;
const CBlockIndex* pindex;
Multiple fixes/refactorings for ChainLocks (#2765) * Print which DKG type aborted * Don't directly call EnforceBestChainLock and instead schedule the call Calling EnforceBestChainLock might result in switching chains, which in turn might end up calling signals, so we get into a recursive call chain. Better to call EnforceBestChainLock from the scheduler. * Regularly call EnforceBestChainLock and reset error flags on locked chain * Don't invalidate blocks from CChainLocksHandler::TrySignChainTip As the name of this method implies, it's trying to sign something and not enforce/invalidate chains. Invalidating blocks is the job of EnforceBestChainLock. * Only call ActivateBestChain when tip != best CL tip * Fix unprotected access of bestChainLockBlockIndex and bail out if its null * Fix ChainLocks tests after changes in enforcement handling * Only invoke NotifyChainLock signal from EnforceBestChainLock This ensures that NotifyChainLock is not prematurely called before the block is fully connected. * Use a mutex to ensure that only one thread executes ActivateBestChain It might happen that 2 threads enter ActivateBestChain at the same time start processing block by block, while randomly switching between threads so that sometimes one thread processed the block and then another one processes it. A mutex protects ActivateBestChain now against this race. * Rename local copy of bestChainLockBlockIndex to currentBestChainLockBlockIndex * Don't call ActivateBestChain when best CL is part of the main chain
2019-03-13 14:00:54 +01:00
const CBlockIndex* currentBestChainLockBlockIndex;
2019-01-22 14:20:32 +01:00
{
LOCK(cs);
if (!isEnforced) {
return;
}
2019-01-22 14:20:32 +01:00
clsig = bestChainLockWithKnownBlock;
Multiple fixes/refactorings for ChainLocks (#2765) * Print which DKG type aborted * Don't directly call EnforceBestChainLock and instead schedule the call Calling EnforceBestChainLock might result in switching chains, which in turn might end up calling signals, so we get into a recursive call chain. Better to call EnforceBestChainLock from the scheduler. * Regularly call EnforceBestChainLock and reset error flags on locked chain * Don't invalidate blocks from CChainLocksHandler::TrySignChainTip As the name of this method implies, it's trying to sign something and not enforce/invalidate chains. Invalidating blocks is the job of EnforceBestChainLock. * Only call ActivateBestChain when tip != best CL tip * Fix unprotected access of bestChainLockBlockIndex and bail out if its null * Fix ChainLocks tests after changes in enforcement handling * Only invoke NotifyChainLock signal from EnforceBestChainLock This ensures that NotifyChainLock is not prematurely called before the block is fully connected. * Use a mutex to ensure that only one thread executes ActivateBestChain It might happen that 2 threads enter ActivateBestChain at the same time start processing block by block, while randomly switching between threads so that sometimes one thread processed the block and then another one processes it. A mutex protects ActivateBestChain now against this race. * Rename local copy of bestChainLockBlockIndex to currentBestChainLockBlockIndex * Don't call ActivateBestChain when best CL is part of the main chain
2019-03-13 14:00:54 +01:00
pindex = currentBestChainLockBlockIndex = this->bestChainLockBlockIndex;
if (!currentBestChainLockBlockIndex) {
// we don't have the header/block, so we can't do anything right now
return;
}
2019-01-22 14:20:32 +01:00
}
Multiple fixes/refactorings for ChainLocks (#2765) * Print which DKG type aborted * Don't directly call EnforceBestChainLock and instead schedule the call Calling EnforceBestChainLock might result in switching chains, which in turn might end up calling signals, so we get into a recursive call chain. Better to call EnforceBestChainLock from the scheduler. * Regularly call EnforceBestChainLock and reset error flags on locked chain * Don't invalidate blocks from CChainLocksHandler::TrySignChainTip As the name of this method implies, it's trying to sign something and not enforce/invalidate chains. Invalidating blocks is the job of EnforceBestChainLock. * Only call ActivateBestChain when tip != best CL tip * Fix unprotected access of bestChainLockBlockIndex and bail out if its null * Fix ChainLocks tests after changes in enforcement handling * Only invoke NotifyChainLock signal from EnforceBestChainLock This ensures that NotifyChainLock is not prematurely called before the block is fully connected. * Use a mutex to ensure that only one thread executes ActivateBestChain It might happen that 2 threads enter ActivateBestChain at the same time start processing block by block, while randomly switching between threads so that sometimes one thread processed the block and then another one processes it. A mutex protects ActivateBestChain now against this race. * Rename local copy of bestChainLockBlockIndex to currentBestChainLockBlockIndex * Don't call ActivateBestChain when best CL is part of the main chain
2019-03-13 14:00:54 +01:00
bool activateNeeded;
2019-01-22 14:20:32 +01:00
{
LOCK(cs_main);
// Go backwards through the chain referenced by clsig until we find a block that is part of the main chain.
// For each of these blocks, check if there are children that are NOT part of the chain referenced by clsig
// and invalidate each of them.
while (pindex && !chainActive.Contains(pindex)) {
// Invalidate all blocks that have the same prevBlockHash but are not equal to blockHash
auto itp = mapPrevBlockIndex.equal_range(pindex->pprev->GetBlockHash());
for (auto jt = itp.first; jt != itp.second; ++jt) {
if (jt->second == pindex) {
continue;
}
LogPrintf("CChainLocksHandler::%s -- CLSIG (%s) invalidates block %s\n",
__func__, clsig.ToString(), jt->second->GetBlockHash().ToString());
2019-01-22 14:20:32 +01:00
DoInvalidateBlock(jt->second, false);
}
pindex = pindex->pprev;
}
Multiple fixes/refactorings for ChainLocks (#2765) * Print which DKG type aborted * Don't directly call EnforceBestChainLock and instead schedule the call Calling EnforceBestChainLock might result in switching chains, which in turn might end up calling signals, so we get into a recursive call chain. Better to call EnforceBestChainLock from the scheduler. * Regularly call EnforceBestChainLock and reset error flags on locked chain * Don't invalidate blocks from CChainLocksHandler::TrySignChainTip As the name of this method implies, it's trying to sign something and not enforce/invalidate chains. Invalidating blocks is the job of EnforceBestChainLock. * Only call ActivateBestChain when tip != best CL tip * Fix unprotected access of bestChainLockBlockIndex and bail out if its null * Fix ChainLocks tests after changes in enforcement handling * Only invoke NotifyChainLock signal from EnforceBestChainLock This ensures that NotifyChainLock is not prematurely called before the block is fully connected. * Use a mutex to ensure that only one thread executes ActivateBestChain It might happen that 2 threads enter ActivateBestChain at the same time start processing block by block, while randomly switching between threads so that sometimes one thread processed the block and then another one processes it. A mutex protects ActivateBestChain now against this race. * Rename local copy of bestChainLockBlockIndex to currentBestChainLockBlockIndex * Don't call ActivateBestChain when best CL is part of the main chain
2019-03-13 14:00:54 +01:00
// In case blocks from the correct chain are invalid at the moment, reconsider them. The only case where this
// can happen right now is when missing superblock triggers caused the main chain to be dismissed first. When
// the trigger later appears, this should bring us to the correct chain eventually. Please note that this does
// NOT enforce invalid blocks in any way, it just causes re-validation.
if (!currentBestChainLockBlockIndex->IsValid()) {
ResetBlockFailureFlags(mapBlockIndex.at(currentBestChainLockBlockIndex->GetBlockHash()));
}
activateNeeded = chainActive.Tip()->GetAncestor(currentBestChainLockBlockIndex->nHeight) != currentBestChainLockBlockIndex;
2019-01-22 14:20:32 +01:00
}
CValidationState state;
Multiple fixes/refactorings for ChainLocks (#2765) * Print which DKG type aborted * Don't directly call EnforceBestChainLock and instead schedule the call Calling EnforceBestChainLock might result in switching chains, which in turn might end up calling signals, so we get into a recursive call chain. Better to call EnforceBestChainLock from the scheduler. * Regularly call EnforceBestChainLock and reset error flags on locked chain * Don't invalidate blocks from CChainLocksHandler::TrySignChainTip As the name of this method implies, it's trying to sign something and not enforce/invalidate chains. Invalidating blocks is the job of EnforceBestChainLock. * Only call ActivateBestChain when tip != best CL tip * Fix unprotected access of bestChainLockBlockIndex and bail out if its null * Fix ChainLocks tests after changes in enforcement handling * Only invoke NotifyChainLock signal from EnforceBestChainLock This ensures that NotifyChainLock is not prematurely called before the block is fully connected. * Use a mutex to ensure that only one thread executes ActivateBestChain It might happen that 2 threads enter ActivateBestChain at the same time start processing block by block, while randomly switching between threads so that sometimes one thread processed the block and then another one processes it. A mutex protects ActivateBestChain now against this race. * Rename local copy of bestChainLockBlockIndex to currentBestChainLockBlockIndex * Don't call ActivateBestChain when best CL is part of the main chain
2019-03-13 14:00:54 +01:00
if (activateNeeded && !ActivateBestChain(state, Params())) {
LogPrintf("CChainLocksHandler::%s -- ActivateBestChain failed: %s\n", __func__, FormatStateMessage(state));
Multiple fixes/refactorings for ChainLocks (#2765) * Print which DKG type aborted * Don't directly call EnforceBestChainLock and instead schedule the call Calling EnforceBestChainLock might result in switching chains, which in turn might end up calling signals, so we get into a recursive call chain. Better to call EnforceBestChainLock from the scheduler. * Regularly call EnforceBestChainLock and reset error flags on locked chain * Don't invalidate blocks from CChainLocksHandler::TrySignChainTip As the name of this method implies, it's trying to sign something and not enforce/invalidate chains. Invalidating blocks is the job of EnforceBestChainLock. * Only call ActivateBestChain when tip != best CL tip * Fix unprotected access of bestChainLockBlockIndex and bail out if its null * Fix ChainLocks tests after changes in enforcement handling * Only invoke NotifyChainLock signal from EnforceBestChainLock This ensures that NotifyChainLock is not prematurely called before the block is fully connected. * Use a mutex to ensure that only one thread executes ActivateBestChain It might happen that 2 threads enter ActivateBestChain at the same time start processing block by block, while randomly switching between threads so that sometimes one thread processed the block and then another one processes it. A mutex protects ActivateBestChain now against this race. * Rename local copy of bestChainLockBlockIndex to currentBestChainLockBlockIndex * Don't call ActivateBestChain when best CL is part of the main chain
2019-03-13 14:00:54 +01:00
}
const CBlockIndex* pindexNotify = nullptr;
{
LOCK(cs_main);
if (lastNotifyChainLockBlockIndex != currentBestChainLockBlockIndex &&
chainActive.Tip()->GetAncestor(currentBestChainLockBlockIndex->nHeight) == currentBestChainLockBlockIndex) {
lastNotifyChainLockBlockIndex = currentBestChainLockBlockIndex;
pindexNotify = currentBestChainLockBlockIndex;
}
}
if (pindexNotify) {
GetMainSignals().NotifyChainLock(pindexNotify);
2019-01-22 14:20:32 +01:00
}
}
void CChainLocksHandler::HandleNewRecoveredSig(const llmq::CRecoveredSig& recoveredSig)
{
CChainLockSig clsig;
{
LOCK(cs);
if (!isSporkActive) {
return;
}
2019-01-22 14:20:32 +01:00
if (recoveredSig.id != lastSignedRequestId || recoveredSig.msgHash != lastSignedMsgHash) {
// this is not what we signed, so lets not create a CLSIG for it
return;
}
if (bestChainLock.nHeight >= lastSignedHeight) {
// already got the same or a better CLSIG through the CLSIG message
return;
}
clsig.nHeight = lastSignedHeight;
clsig.blockHash = lastSignedMsgHash;
clsig.sig = recoveredSig.sig.GetSig();
2019-01-22 14:20:32 +01:00
}
ProcessNewChainLock(-1, clsig, ::SerializeHash(clsig));
}
// WARNING, do not hold cs while calling this method as we'll otherwise run into a deadlock
void CChainLocksHandler::DoInvalidateBlock(const CBlockIndex* pindex, bool activateBestChain)
{
auto& params = Params();
{
LOCK(cs_main);
// get the non-const pointer
CBlockIndex* pindex2 = mapBlockIndex[pindex->GetBlockHash()];
CValidationState state;
if (!InvalidateBlock(state, params, pindex2)) {
Multiple refactorings/fixes for LLMQ bases InstantSend and ChainLocks (#2779) * Remove unused parameters from CInstantSendManager::ProcessTx * Pass txHash in CheckCanLock by reference instead of pointer * Dont' allow locking of TXs without inputs * Remove unused local variable nInstantSendConfirmationsRequired * Don't subtract 1 from nInstantSendConfirmationsRequired This was necessary in the old system but is not necessary in the new system. It also prevented proper retroactive signing of chained TXs in regtest as it resulted in child TXs to return true immediately for CheckCanLock when it should actually have waited for the parent TX to become locked first. * Access chainActive.Height() while cs_main is locked * Properly read and write lastChainLockBlock "pindex" is NOT the chainlocked block after the while loop finishes. We must use the pindex (renamed to pindexChainLock now) given on method entry. Also, the GetLastChainLockBlock() result was not assigned to, lastChainLockBlock which resulted in the while loop to run unnecessarily long. * Generalize filtering in NewPoWValidBlock and SyncTransaction We're actually interested in all TXs that have inputs, so no need to explicitly check for tx types. * Use tx.IsCoinBase() instead of checking for index 0 * Handle cases where a TX is not received yet in wait_for_instantlock * Wait on all nodes for the locks Otherwise we end up with the sender having it locked but other nodes not yet, failing the test. * Fix LogPrintf call in CChainLocksHandler::DoInvalidateBlock
2019-03-19 08:38:16 +01:00
LogPrintf("CChainLocksHandler::%s -- InvalidateBlock failed: %s\n", __func__, FormatStateMessage(state));
2019-01-22 14:20:32 +01:00
// This should not have happened and we are in a state were it's not safe to continue anymore
assert(false);
}
}
CValidationState state;
if (activateBestChain && !ActivateBestChain(state, params)) {
Multiple refactorings/fixes for LLMQ bases InstantSend and ChainLocks (#2779) * Remove unused parameters from CInstantSendManager::ProcessTx * Pass txHash in CheckCanLock by reference instead of pointer * Dont' allow locking of TXs without inputs * Remove unused local variable nInstantSendConfirmationsRequired * Don't subtract 1 from nInstantSendConfirmationsRequired This was necessary in the old system but is not necessary in the new system. It also prevented proper retroactive signing of chained TXs in regtest as it resulted in child TXs to return true immediately for CheckCanLock when it should actually have waited for the parent TX to become locked first. * Access chainActive.Height() while cs_main is locked * Properly read and write lastChainLockBlock "pindex" is NOT the chainlocked block after the while loop finishes. We must use the pindex (renamed to pindexChainLock now) given on method entry. Also, the GetLastChainLockBlock() result was not assigned to, lastChainLockBlock which resulted in the while loop to run unnecessarily long. * Generalize filtering in NewPoWValidBlock and SyncTransaction We're actually interested in all TXs that have inputs, so no need to explicitly check for tx types. * Use tx.IsCoinBase() instead of checking for index 0 * Handle cases where a TX is not received yet in wait_for_instantlock * Wait on all nodes for the locks Otherwise we end up with the sender having it locked but other nodes not yet, failing the test. * Fix LogPrintf call in CChainLocksHandler::DoInvalidateBlock
2019-03-19 08:38:16 +01:00
LogPrintf("CChainLocksHandler::%s -- ActivateBestChain failed: %s\n", __func__, FormatStateMessage(state));
2019-01-22 14:20:32 +01:00
// This should not have happened and we are in a state were it's not safe to continue anymore
assert(false);
}
}
bool CChainLocksHandler::HasChainLock(int nHeight, const uint256& blockHash)
{
LOCK(cs);
return InternalHasChainLock(nHeight, blockHash);
}
bool CChainLocksHandler::InternalHasChainLock(int nHeight, const uint256& blockHash)
{
AssertLockHeld(cs);
if (!isEnforced) {
return false;
}
2019-01-22 14:20:32 +01:00
if (!bestChainLockBlockIndex) {
return false;
}
if (nHeight > bestChainLockBlockIndex->nHeight) {
return false;
}
if (nHeight == bestChainLockBlockIndex->nHeight) {
return blockHash == bestChainLockBlockIndex->GetBlockHash();
}
auto pAncestor = bestChainLockBlockIndex->GetAncestor(nHeight);
return pAncestor && pAncestor->GetBlockHash() == blockHash;
}
bool CChainLocksHandler::HasConflictingChainLock(int nHeight, const uint256& blockHash)
{
LOCK(cs);
return InternalHasConflictingChainLock(nHeight, blockHash);
}
bool CChainLocksHandler::InternalHasConflictingChainLock(int nHeight, const uint256& blockHash)
{
AssertLockHeld(cs);
if (!isEnforced) {
return false;
}
2019-01-22 14:20:32 +01:00
if (!bestChainLockBlockIndex) {
return false;
}
if (nHeight > bestChainLockBlockIndex->nHeight) {
return false;
}
if (nHeight == bestChainLockBlockIndex->nHeight) {
return blockHash != bestChainLockBlockIndex->GetBlockHash();
}
auto pAncestor = bestChainLockBlockIndex->GetAncestor(nHeight);
assert(pAncestor);
return pAncestor->GetBlockHash() != blockHash;
}
void CChainLocksHandler::Cleanup()
{
{
LOCK(cs);
if (GetTimeMillis() - lastCleanupTime < CLEANUP_INTERVAL) {
return;
}
}
// need mempool.cs due to GetTransaction calls
LOCK2(cs_main, mempool.cs);
LOCK(cs);
2019-01-22 14:20:32 +01:00
for (auto it = seenChainLocks.begin(); it != seenChainLocks.end(); ) {
if (GetTimeMillis() - it->second >= CLEANUP_SEEN_TIMEOUT) {
it = seenChainLocks.erase(it);
} else {
++it;
}
}
for (auto it = blockTxs.begin(); it != blockTxs.end(); ) {
auto pindex = mapBlockIndex.at(it->first);
if (InternalHasChainLock(pindex->nHeight, pindex->GetBlockHash())) {
for (auto& txid : *it->second) {
txFirstSeenTime.erase(txid);
}
it = blockTxs.erase(it);
} else if (InternalHasConflictingChainLock(pindex->nHeight, pindex->GetBlockHash())) {
it = blockTxs.erase(it);
} else {
++it;
}
}
for (auto it = txFirstSeenTime.begin(); it != txFirstSeenTime.end(); ) {
CTransactionRef tx;
uint256 hashBlock;
if (!GetTransaction(it->first, tx, Params().GetConsensus(), hashBlock)) {
// tx has vanished, probably due to conflicts
it = txFirstSeenTime.erase(it);
} else if (!hashBlock.IsNull()) {
auto pindex = mapBlockIndex.at(hashBlock);
if (chainActive.Tip()->GetAncestor(pindex->nHeight) == pindex && chainActive.Height() - pindex->nHeight >= 6) {
// tx got confirmed >= 6 times, so we can stop keeping track of it
it = txFirstSeenTime.erase(it);
} else {
++it;
}
} else {
++it;
}
}
2019-01-22 14:20:32 +01:00
lastCleanupTime = GetTimeMillis();
}
}