dash/src/miner.cpp

658 lines
25 KiB
C++
Raw Normal View History

// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2015 The Bitcoin Core developers
// Copyright (c) 2014-2018 The Dash Core developers
2014-12-13 05:09:33 +01:00
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "miner.h"
#include "amount.h"
2015-07-05 14:17:46 +02:00
#include "chain.h"
#include "chainparams.h"
2015-07-05 14:17:46 +02:00
#include "coins.h"
#include "consensus/consensus.h"
#include "consensus/merkle.h"
#include "consensus/validation.h"
#include "hash.h"
#include "validation.h"
#include "net.h"
#include "policy/policy.h"
#include "pow.h"
#include "primitives/transaction.h"
2015-07-05 14:17:46 +02:00
#include "script/standard.h"
#include "timedata.h"
2015-07-05 14:17:46 +02:00
#include "txmempool.h"
#include "util.h"
#include "utilmoneystr.h"
2015-04-16 21:58:09 +02:00
#include "masternode-payments.h"
#include "masternode-sync.h"
2015-04-10 12:49:01 +02:00
#include "validationinterface.h"
2014-12-12 12:44:25 +01:00
#include "evo/specialtx.h"
#include "evo/cbtx.h"
#include "evo/simplifiedmns.h"
#include "evo/deterministicmns.h"
Implement and enforce DIP6 commitments (#2477) * Add LLMQ parameters to consensus params * Add DIP6 quorum commitment special TX * Implement CQuorumBlockProcessor which validates and handles commitments * Add quorum commitments to new blocks * Propagate QFCOMMITMENT messages to all nodes * Allow special transactions in blocks which have no inputs/outputs But only for TRANSACTION_QUORUM_COMMITMENT for now. * Add quorum commitments to self-crafted blocks in DIP3 tests * Add simple fork logic for current testnet This should avoid a fork on the current testnet. It only applies to the current chain which activated DIP3 at height 264000 and block 00000048e6e71d4bd90e7c456dcb94683ae832fcad13e1760d8283f7e89f332f. When we revert the chain to retest the DIP3 deployment, this fork logic can be removed again. * Use quorumVvecHash instead of quorumHash to make null commitments unique Implementation of https://github.com/dashpay/dips/pull/31 * Re-add quorum commitments after pruning mempool selected blocks * Refactor CQuorumBlockProcessor::ProcessBlock to have less nested if/else statements Also add BEGIN/END markers for temporary code. * Add comments/documentation to LLMQParams * Move code which determines if a commitment is required into IsCommitmentRequired This should make the code easier to read and also removes some duplication. The also changes the error types that are possible from 3 to 2 now. Instead of having "bad-qc-already-mined" and "bad-qc-not-mining-phase", there is only "bad-qc-not-allowed" now. * Use new parameter from consensus parames for the temporary fork
2018-11-23 15:42:09 +01:00
#include "llmq/quorums_blockprocessor.h"
#include "llmq/quorums_chainlocks.h"
Implement and enforce DIP6 commitments (#2477) * Add LLMQ parameters to consensus params * Add DIP6 quorum commitment special TX * Implement CQuorumBlockProcessor which validates and handles commitments * Add quorum commitments to new blocks * Propagate QFCOMMITMENT messages to all nodes * Allow special transactions in blocks which have no inputs/outputs But only for TRANSACTION_QUORUM_COMMITMENT for now. * Add quorum commitments to self-crafted blocks in DIP3 tests * Add simple fork logic for current testnet This should avoid a fork on the current testnet. It only applies to the current chain which activated DIP3 at height 264000 and block 00000048e6e71d4bd90e7c456dcb94683ae832fcad13e1760d8283f7e89f332f. When we revert the chain to retest the DIP3 deployment, this fork logic can be removed again. * Use quorumVvecHash instead of quorumHash to make null commitments unique Implementation of https://github.com/dashpay/dips/pull/31 * Re-add quorum commitments after pruning mempool selected blocks * Refactor CQuorumBlockProcessor::ProcessBlock to have less nested if/else statements Also add BEGIN/END markers for temporary code. * Add comments/documentation to LLMQParams * Move code which determines if a commitment is required into IsCommitmentRequired This should make the code easier to read and also removes some duplication. The also changes the error types that are possible from 3 to 2 now. Instead of having "bad-qc-already-mined" and "bad-qc-not-mining-phase", there is only "bad-qc-not-allowed" now. * Use new parameter from consensus parames for the temporary fork
2018-11-23 15:42:09 +01:00
#include <algorithm>
#include <boost/thread.hpp>
#include <boost/tuple/tuple.hpp>
#include <queue>
#include <utility>
//////////////////////////////////////////////////////////////////////////////
//
2015-04-03 00:51:08 +02:00
// DashMiner
//
//
// Unconfirmed transactions in the memory pool often depend on other
// transactions in the memory pool. When we select transactions from the
// pool, we select by highest priority or fee rate, so we might consider
// transactions that depend on transactions that aren't yet in the block.
uint64_t nLastBlockTx = 0;
uint64_t nLastBlockSize = 0;
class ScoreCompare
{
public:
ScoreCompare() {}
bool operator()(const CTxMemPool::txiter a, const CTxMemPool::txiter b)
{
return CompareTxMemPoolEntryByScore()(*b,*a); // Convert to less than
}
};
int64_t UpdateTime(CBlockHeader* pblock, const Consensus::Params& consensusParams, const CBlockIndex* pindexPrev)
{
int64_t nOldTime = pblock->nTime;
int64_t nNewTime = std::max(pindexPrev->GetMedianTimePast()+1, GetAdjustedTime());
if (nOldTime < nNewTime)
pblock->nTime = nNewTime;
// Updating time can change work required on testnet:
if (consensusParams.fPowAllowMinDifficultyBlocks)
pblock->nBits = GetNextWorkRequired(pindexPrev, pblock, consensusParams);
return nNewTime - nOldTime;
}
BlockAssembler::Options::Options() {
blockMinFeeRate = CFeeRate(DEFAULT_BLOCK_MIN_TX_FEE);
nBlockMaxSize = DEFAULT_BLOCK_MAX_SIZE;
}
BlockAssembler::BlockAssembler(const CChainParams& params, const Options& options) : chainparams(params)
{
blockMinFeeRate = options.blockMinFeeRate;
// Limit size to between 1K and MaxBlockSize()-1K for sanity:
nBlockMaxSize = std::max((unsigned int)1000, std::min((unsigned int)(MaxBlockSize(fDIP0001ActiveAtTip) - 1000), (unsigned int)options.nBlockMaxSize));
}
static BlockAssembler::Options DefaultOptions(const CChainParams& params)
{
// Block resource limits
BlockAssembler::Options options;
options.nBlockMaxSize = DEFAULT_BLOCK_MAX_SIZE;
if (IsArgSet("-blockmaxsize")) {
options.nBlockMaxSize = GetArg("-blockmaxsize", DEFAULT_BLOCK_MAX_SIZE);
}
if (IsArgSet("-blockmintxfee")) {
CAmount n = 0;
ParseMoney(GetArg("-blockmintxfee", ""), n);
options.blockMinFeeRate = CFeeRate(n);
} else {
options.blockMinFeeRate = CFeeRate(DEFAULT_BLOCK_MIN_TX_FEE);
}
return options;
}
BlockAssembler::BlockAssembler(const CChainParams& params) : BlockAssembler(params, DefaultOptions(params)) {}
void BlockAssembler::resetBlock()
{
inBlock.clear();
// Reserve space for coinbase tx
nBlockSize = 1000;
nBlockSigOps = 100;
// These counters do not include coinbase tx
nBlockTx = 0;
nFees = 0;
lastFewTxs = 0;
blockFinished = false;
}
std::unique_ptr<CBlockTemplate> BlockAssembler::CreateNewBlock(const CScript& scriptPubKeyIn)
{
int64_t nTimeStart = GetTimeMicros();
resetBlock();
pblocktemplate.reset(new CBlockTemplate());
if(!pblocktemplate.get())
return nullptr;
pblock = &pblocktemplate->block; // pointer for convenience
// Add dummy coinbase tx as first transaction
pblock->vtx.emplace_back();
pblocktemplate->vTxFees.push_back(-1); // updated at end
pblocktemplate->vTxSigOps.push_back(-1); // updated at end
LOCK2(cs_main, mempool.cs);
bool fDIP0003Active_context = VersionBitsState(chainActive.Tip(), chainparams.GetConsensus(), Consensus::DEPLOYMENT_DIP0003, versionbitscache) == THRESHOLD_ACTIVE;
CBlockIndex* pindexPrev = chainActive.Tip();
nHeight = pindexPrev->nHeight + 1;
pblock->nVersion = ComputeBlockVersion(pindexPrev, chainparams.GetConsensus(), chainparams.BIP9CheckMasternodesUpgraded());
// -regtest only: allow overriding block.nVersion with
// -blockversion=N to test forking scenarios
if (chainparams.MineBlocksOnDemand())
pblock->nVersion = GetArg("-blockversion", pblock->nVersion);
pblock->nTime = GetAdjustedTime();
const int64_t nMedianTimePast = pindexPrev->GetMedianTimePast();
nLockTimeCutoff = (STANDARD_LOCKTIME_VERIFY_FLAGS & LOCKTIME_MEDIAN_TIME_PAST)
? nMedianTimePast
: pblock->GetBlockTime();
Implement and enforce DIP6 commitments (#2477) * Add LLMQ parameters to consensus params * Add DIP6 quorum commitment special TX * Implement CQuorumBlockProcessor which validates and handles commitments * Add quorum commitments to new blocks * Propagate QFCOMMITMENT messages to all nodes * Allow special transactions in blocks which have no inputs/outputs But only for TRANSACTION_QUORUM_COMMITMENT for now. * Add quorum commitments to self-crafted blocks in DIP3 tests * Add simple fork logic for current testnet This should avoid a fork on the current testnet. It only applies to the current chain which activated DIP3 at height 264000 and block 00000048e6e71d4bd90e7c456dcb94683ae832fcad13e1760d8283f7e89f332f. When we revert the chain to retest the DIP3 deployment, this fork logic can be removed again. * Use quorumVvecHash instead of quorumHash to make null commitments unique Implementation of https://github.com/dashpay/dips/pull/31 * Re-add quorum commitments after pruning mempool selected blocks * Refactor CQuorumBlockProcessor::ProcessBlock to have less nested if/else statements Also add BEGIN/END markers for temporary code. * Add comments/documentation to LLMQParams * Move code which determines if a commitment is required into IsCommitmentRequired This should make the code easier to read and also removes some duplication. The also changes the error types that are possible from 3 to 2 now. Instead of having "bad-qc-already-mined" and "bad-qc-not-mining-phase", there is only "bad-qc-not-allowed" now. * Use new parameter from consensus parames for the temporary fork
2018-11-23 15:42:09 +01:00
if (fDIP0003Active_context) {
for (auto& p : Params().GetConsensus().llmqs) {
CTransactionRef qcTx;
if (llmq::quorumBlockProcessor->GetMinableCommitmentTx(p.first, nHeight, qcTx)) {
Implement and enforce DIP6 commitments (#2477) * Add LLMQ parameters to consensus params * Add DIP6 quorum commitment special TX * Implement CQuorumBlockProcessor which validates and handles commitments * Add quorum commitments to new blocks * Propagate QFCOMMITMENT messages to all nodes * Allow special transactions in blocks which have no inputs/outputs But only for TRANSACTION_QUORUM_COMMITMENT for now. * Add quorum commitments to self-crafted blocks in DIP3 tests * Add simple fork logic for current testnet This should avoid a fork on the current testnet. It only applies to the current chain which activated DIP3 at height 264000 and block 00000048e6e71d4bd90e7c456dcb94683ae832fcad13e1760d8283f7e89f332f. When we revert the chain to retest the DIP3 deployment, this fork logic can be removed again. * Use quorumVvecHash instead of quorumHash to make null commitments unique Implementation of https://github.com/dashpay/dips/pull/31 * Re-add quorum commitments after pruning mempool selected blocks * Refactor CQuorumBlockProcessor::ProcessBlock to have less nested if/else statements Also add BEGIN/END markers for temporary code. * Add comments/documentation to LLMQParams * Move code which determines if a commitment is required into IsCommitmentRequired This should make the code easier to read and also removes some duplication. The also changes the error types that are possible from 3 to 2 now. Instead of having "bad-qc-already-mined" and "bad-qc-not-mining-phase", there is only "bad-qc-not-allowed" now. * Use new parameter from consensus parames for the temporary fork
2018-11-23 15:42:09 +01:00
pblock->vtx.emplace_back(qcTx);
pblocktemplate->vTxFees.emplace_back(0);
pblocktemplate->vTxSigOps.emplace_back(0);
nBlockSize += qcTx->GetTotalSize();
++nBlockTx;
Implement and enforce DIP6 commitments (#2477) * Add LLMQ parameters to consensus params * Add DIP6 quorum commitment special TX * Implement CQuorumBlockProcessor which validates and handles commitments * Add quorum commitments to new blocks * Propagate QFCOMMITMENT messages to all nodes * Allow special transactions in blocks which have no inputs/outputs But only for TRANSACTION_QUORUM_COMMITMENT for now. * Add quorum commitments to self-crafted blocks in DIP3 tests * Add simple fork logic for current testnet This should avoid a fork on the current testnet. It only applies to the current chain which activated DIP3 at height 264000 and block 00000048e6e71d4bd90e7c456dcb94683ae832fcad13e1760d8283f7e89f332f. When we revert the chain to retest the DIP3 deployment, this fork logic can be removed again. * Use quorumVvecHash instead of quorumHash to make null commitments unique Implementation of https://github.com/dashpay/dips/pull/31 * Re-add quorum commitments after pruning mempool selected blocks * Refactor CQuorumBlockProcessor::ProcessBlock to have less nested if/else statements Also add BEGIN/END markers for temporary code. * Add comments/documentation to LLMQParams * Move code which determines if a commitment is required into IsCommitmentRequired This should make the code easier to read and also removes some duplication. The also changes the error types that are possible from 3 to 2 now. Instead of having "bad-qc-already-mined" and "bad-qc-not-mining-phase", there is only "bad-qc-not-allowed" now. * Use new parameter from consensus parames for the temporary fork
2018-11-23 15:42:09 +01:00
}
}
}
addPriorityTxs();
int nPackagesSelected = 0;
int nDescendantsUpdated = 0;
addPackageTxs(nPackagesSelected, nDescendantsUpdated);
int64_t nTime1 = GetTimeMicros();
nLastBlockTx = nBlockTx;
nLastBlockSize = nBlockSize;
LogPrintf("CreateNewBlock(): total size %u txs: %u fees: %ld sigops %d\n", nBlockSize, nBlockTx, nFees, nBlockSigOps);
// Create coinbase transaction.
CMutableTransaction coinbaseTx;
coinbaseTx.vin.resize(1);
coinbaseTx.vin[0].prevout.SetNull();
coinbaseTx.vout.resize(1);
coinbaseTx.vout[0].scriptPubKey = scriptPubKeyIn;
// NOTE: unlike in bitcoin, we need to pass PREVIOUS block height here
CAmount blockReward = nFees + GetBlockSubsidy(pindexPrev->nBits, pindexPrev->nHeight, Params().GetConsensus());
// Compute regular coinbase transaction.
coinbaseTx.vout[0].nValue = blockReward;
if (!fDIP0003Active_context) {
coinbaseTx.vin[0].scriptSig = CScript() << nHeight << OP_0;
} else {
coinbaseTx.vin[0].scriptSig = CScript() << OP_RETURN;
coinbaseTx.nVersion = 3;
coinbaseTx.nType = TRANSACTION_COINBASE;
CCbTx cbTx;
cbTx.nHeight = nHeight;
CValidationState state;
if (!CalcCbTxMerkleRootMNList(*pblock, pindexPrev, cbTx.merkleRootMNList, state)) {
throw std::runtime_error(strprintf("%s: CalcSMLMerkleRootForNewBlock failed: %s", __func__, FormatStateMessage(state)));
}
SetTxPayload(coinbaseTx, cbTx);
}
// Update coinbase transaction with additional info about masternode and governance payments,
// get some info back to pass to getblocktemplate
FillBlockPayments(coinbaseTx, nHeight, blockReward, pblocktemplate->voutMasternodePayments, pblocktemplate->voutSuperblockPayments);
// LogPrintf("CreateNewBlock -- nBlockHeight %d blockReward %lld txoutMasternode %s coinbaseTx %s",
// nHeight, blockReward, pblocktemplate->txoutsMasternode.ToString(), coinbaseTx.ToString());
pblock->vtx[0] = MakeTransactionRef(std::move(coinbaseTx));
pblocktemplate->vTxFees[0] = -nFees;
// Fill in header
pblock->hashPrevBlock = pindexPrev->GetBlockHash();
UpdateTime(pblock, chainparams.GetConsensus(), pindexPrev);
pblock->nBits = GetNextWorkRequired(pindexPrev, pblock, chainparams.GetConsensus());
pblock->nNonce = 0;
pblocktemplate->nPrevBits = pindexPrev->nBits;
pblocktemplate->vTxSigOps[0] = GetLegacySigOpCount(*pblock->vtx[0]);
CValidationState state;
if (!TestBlockValidity(state, chainparams, *pblock, pindexPrev, false, false)) {
throw std::runtime_error(strprintf("%s: TestBlockValidity failed: %s", __func__, FormatStateMessage(state)));
}
int64_t nTime2 = GetTimeMicros();
LogPrint("bench", "CreateNewBlock() packages: %.2fms (%d packages, %d updated descendants), validity: %.2fms (total %.2fms)\n", 0.001 * (nTime1 - nTimeStart), nPackagesSelected, nDescendantsUpdated, 0.001 * (nTime2 - nTime1), 0.001 * (nTime2 - nTimeStart));
return std::move(pblocktemplate);
}
bool BlockAssembler::isStillDependent(CTxMemPool::txiter iter)
{
BOOST_FOREACH(CTxMemPool::txiter parent, mempool.GetMemPoolParents(iter))
{
if (!inBlock.count(parent)) {
return true;
}
}
return false;
}
void BlockAssembler::onlyUnconfirmed(CTxMemPool::setEntries& testSet)
{
for (CTxMemPool::setEntries::iterator iit = testSet.begin(); iit != testSet.end(); ) {
// Only test txs not already in the block
if (inBlock.count(*iit)) {
testSet.erase(iit++);
}
else {
iit++;
}
}
}
bool BlockAssembler::TestPackage(uint64_t packageSize, unsigned int packageSigOps)
{
if (nBlockSize + packageSize >= nBlockMaxSize)
return false;
if (nBlockSigOps + packageSigOps >= MaxBlockSigOps(fDIP0001ActiveAtTip))
return false;
return true;
}
// Perform transaction-level checks before adding to block:
// - transaction finality (locktime)
bool BlockAssembler::TestPackageTransactions(const CTxMemPool::setEntries& package)
{
BOOST_FOREACH (const CTxMemPool::txiter it, package) {
if (!IsFinalTx(it->GetTx(), nHeight, nLockTimeCutoff))
return false;
}
return true;
}
bool BlockAssembler::TestForBlock(CTxMemPool::txiter iter)
{
if (nBlockSize + iter->GetTxSize() >= nBlockMaxSize) {
// If the block is so close to full that no more txs will fit
// or if we've tried more than 50 times to fill remaining space
// then flag that the block is finished
if (nBlockSize > nBlockMaxSize - 100 || lastFewTxs > 50) {
blockFinished = true;
return false;
}
// Once we're within 1000 bytes of a full block, only look at 50 more txs
// to try to fill the remaining space.
if (nBlockSize > nBlockMaxSize - 1000) {
lastFewTxs++;
}
return false;
}
unsigned int nMaxBlockSigOps = MaxBlockSigOps(fDIP0001ActiveAtTip);
if (nBlockSigOps + iter->GetSigOpCount() >= nMaxBlockSigOps) {
// If the block has room for no more sig ops then
// flag that the block is finished
if (nBlockSigOps > nMaxBlockSigOps - 2) {
blockFinished = true;
return false;
}
// Otherwise attempt to find another tx with fewer sigops
// to put in the block.
return false;
}
// Must check that lock times are still valid
// This can be removed once MTP is always enforced
// as long as reorgs keep the mempool consistent.
if (!IsFinalTx(iter->GetTx(), nHeight, nLockTimeCutoff))
return false;
return true;
}
void BlockAssembler::AddToBlock(CTxMemPool::txiter iter)
{
pblock->vtx.emplace_back(iter->GetSharedTx());
pblocktemplate->vTxFees.push_back(iter->GetFee());
pblocktemplate->vTxSigOps.push_back(iter->GetSigOpCount());
nBlockSize += iter->GetTxSize();
++nBlockTx;
nBlockSigOps += iter->GetSigOpCount();
nFees += iter->GetFee();
inBlock.insert(iter);
bool fPrintPriority = GetBoolArg("-printpriority", DEFAULT_PRINTPRIORITY);
if (fPrintPriority) {
double dPriority = iter->GetPriority(nHeight);
CAmount dummy;
mempool.ApplyDeltas(iter->GetTx().GetHash(), dPriority, dummy);
LogPrintf("priority %.1f fee %s txid %s\n",
dPriority,
CFeeRate(iter->GetModifiedFee(), iter->GetTxSize()).ToString(),
iter->GetTx().GetHash().ToString());
}
}
int BlockAssembler::UpdatePackagesForAdded(const CTxMemPool::setEntries& alreadyAdded,
indexed_modified_transaction_set &mapModifiedTx)
{
int nDescendantsUpdated = 0;
BOOST_FOREACH(const CTxMemPool::txiter it, alreadyAdded) {
CTxMemPool::setEntries descendants;
mempool.CalculateDescendants(it, descendants);
// Insert all descendants (not yet in block) into the modified set
BOOST_FOREACH(CTxMemPool::txiter desc, descendants) {
if (alreadyAdded.count(desc))
continue;
++nDescendantsUpdated;
modtxiter mit = mapModifiedTx.find(desc);
if (mit == mapModifiedTx.end()) {
CTxMemPoolModifiedEntry modEntry(desc);
modEntry.nSizeWithAncestors -= it->GetTxSize();
modEntry.nModFeesWithAncestors -= it->GetModifiedFee();
modEntry.nSigOpCountWithAncestors -= it->GetSigOpCount();
mapModifiedTx.insert(modEntry);
} else {
mapModifiedTx.modify(mit, update_for_parent_inclusion(it));
}
}
}
return nDescendantsUpdated;
}
// Skip entries in mapTx that are already in a block or are present
// in mapModifiedTx (which implies that the mapTx ancestor state is
// stale due to ancestor inclusion in the block)
// Also skip transactions that we've already failed to add. This can happen if
// we consider a transaction in mapModifiedTx and it fails: we can then
// potentially consider it again while walking mapTx. It's currently
// guaranteed to fail again, but as a belt-and-suspenders check we put it in
// failedTx and avoid re-evaluation, since the re-evaluation would be using
// cached size/sigops/fee values that are not actually correct.
bool BlockAssembler::SkipMapTxEntry(CTxMemPool::txiter it, indexed_modified_transaction_set &mapModifiedTx, CTxMemPool::setEntries &failedTx)
{
assert (it != mempool.mapTx.end());
if (mapModifiedTx.count(it) || inBlock.count(it) || failedTx.count(it))
return true;
return false;
}
void BlockAssembler::SortForBlock(const CTxMemPool::setEntries& package, CTxMemPool::txiter entry, std::vector<CTxMemPool::txiter>& sortedEntries)
{
// Sort package by ancestor count
// If a transaction A depends on transaction B, then A's ancestor count
// must be greater than B's. So this is sufficient to validly order the
// transactions for block inclusion.
sortedEntries.clear();
sortedEntries.insert(sortedEntries.begin(), package.begin(), package.end());
std::sort(sortedEntries.begin(), sortedEntries.end(), CompareTxIterByAncestorCount());
}
// This transaction selection algorithm orders the mempool based
// on feerate of a transaction including all unconfirmed ancestors.
// Since we don't remove transactions from the mempool as we select them
// for block inclusion, we need an alternate method of updating the feerate
// of a transaction with its not-yet-selected ancestors as we go.
// This is accomplished by walking the in-mempool descendants of selected
// transactions and storing a temporary modified state in mapModifiedTxs.
// Each time through the loop, we compare the best transaction in
// mapModifiedTxs with the next transaction in the mempool to decide what
// transaction package to work on next.
void BlockAssembler::addPackageTxs(int &nPackagesSelected, int &nDescendantsUpdated)
{
// mapModifiedTx will store sorted packages after they are modified
// because some of their txs are already in the block
indexed_modified_transaction_set mapModifiedTx;
// Keep track of entries that failed inclusion, to avoid duplicate work
CTxMemPool::setEntries failedTx;
// Start by adding all descendants of previously added txs to mapModifiedTx
// and modifying them for their already included ancestors
UpdatePackagesForAdded(inBlock, mapModifiedTx);
CTxMemPool::indexed_transaction_set::index<ancestor_score>::type::iterator mi = mempool.mapTx.get<ancestor_score>().begin();
CTxMemPool::txiter iter;
// Limit the number of attempts to add transactions to the block when it is
// close to full; this is just a simple heuristic to finish quickly if the
// mempool has a lot of entries.
const int64_t MAX_CONSECUTIVE_FAILURES = 1000;
int64_t nConsecutiveFailed = 0;
while (mi != mempool.mapTx.get<ancestor_score>().end() || !mapModifiedTx.empty())
{
// First try to find a new transaction in mapTx to evaluate.
if (mi != mempool.mapTx.get<ancestor_score>().end() &&
SkipMapTxEntry(mempool.mapTx.project<0>(mi), mapModifiedTx, failedTx)) {
++mi;
continue;
}
if (mi != mempool.mapTx.get<ancestor_score>().end() &&
!llmq::chainLocksHandler->IsTxSafeForMining(mi->GetTx().GetHash())) {
++mi;
continue;
}
// Now that mi is not stale, determine which transaction to evaluate:
// the next entry from mapTx, or the best from mapModifiedTx?
bool fUsingModified = false;
modtxscoreiter modit = mapModifiedTx.get<ancestor_score>().begin();
if (mi == mempool.mapTx.get<ancestor_score>().end()) {
// We're out of entries in mapTx; use the entry from mapModifiedTx
iter = modit->iter;
fUsingModified = true;
} else {
// Try to compare the mapTx entry to the mapModifiedTx entry
iter = mempool.mapTx.project<0>(mi);
if (modit != mapModifiedTx.get<ancestor_score>().end() &&
CompareModifiedEntry()(*modit, CTxMemPoolModifiedEntry(iter))) {
// The best entry in mapModifiedTx has higher score
// than the one from mapTx.
// Switch which transaction (package) to consider
iter = modit->iter;
fUsingModified = true;
} else {
// Either no entry in mapModifiedTx, or it's worse than mapTx.
// Increment mi for the next loop iteration.
++mi;
}
}
// We skip mapTx entries that are inBlock, and mapModifiedTx shouldn't
// contain anything that is inBlock.
assert(!inBlock.count(iter));
uint64_t packageSize = iter->GetSizeWithAncestors();
CAmount packageFees = iter->GetModFeesWithAncestors();
unsigned int packageSigOps = iter->GetSigOpCountWithAncestors();
if (fUsingModified) {
packageSize = modit->nSizeWithAncestors;
packageFees = modit->nModFeesWithAncestors;
packageSigOps = modit->nSigOpCountWithAncestors;
}
if (packageFees < blockMinFeeRate.GetFee(packageSize)) {
// Everything else we might consider has a lower fee rate
return;
}
if (!TestPackage(packageSize, packageSigOps)) {
if (fUsingModified) {
// Since we always look at the best entry in mapModifiedTx,
// we must erase failed entries so that we can consider the
// next best entry on the next loop iteration
mapModifiedTx.get<ancestor_score>().erase(modit);
failedTx.insert(iter);
}
++nConsecutiveFailed;
if (nConsecutiveFailed > MAX_CONSECUTIVE_FAILURES && nBlockSize > nBlockMaxSize - 1000) {
// Give up if we're close to full and haven't succeeded in a while
break;
}
continue;
}
CTxMemPool::setEntries ancestors;
uint64_t nNoLimit = std::numeric_limits<uint64_t>::max();
std::string dummy;
mempool.CalculateMemPoolAncestors(*iter, ancestors, nNoLimit, nNoLimit, nNoLimit, nNoLimit, dummy, false);
onlyUnconfirmed(ancestors);
ancestors.insert(iter);
// Test if all tx's are Final
if (!TestPackageTransactions(ancestors)) {
if (fUsingModified) {
mapModifiedTx.get<ancestor_score>().erase(modit);
failedTx.insert(iter);
}
continue;
}
// This transaction will make it in; reset the failed counter.
nConsecutiveFailed = 0;
// Package can be added. Sort the entries in a valid order.
std::vector<CTxMemPool::txiter> sortedEntries;
SortForBlock(ancestors, iter, sortedEntries);
for (size_t i=0; i<sortedEntries.size(); ++i) {
AddToBlock(sortedEntries[i]);
// Erase from the modified set, if present
mapModifiedTx.erase(sortedEntries[i]);
}
++nPackagesSelected;
// Update transactions that depend on each of these
nDescendantsUpdated += UpdatePackagesForAdded(ancestors, mapModifiedTx);
}
}
void BlockAssembler::addPriorityTxs()
{
// How much of the block should be dedicated to high-priority transactions,
// included regardless of the fees they pay
unsigned int nBlockPrioritySize = GetArg("-blockprioritysize", DEFAULT_BLOCK_PRIORITY_SIZE);
nBlockPrioritySize = std::min(nBlockMaxSize, nBlockPrioritySize);
if (nBlockPrioritySize == 0) {
return;
}
// This vector will be sorted into a priority queue:
std::vector<TxCoinAgePriority> vecPriority;
TxCoinAgePriorityCompare pricomparer;
std::map<CTxMemPool::txiter, double, CTxMemPool::CompareIteratorByHash> waitPriMap;
typedef std::map<CTxMemPool::txiter, double, CTxMemPool::CompareIteratorByHash>::iterator waitPriIter;
double actualPriority = -1;
vecPriority.reserve(mempool.mapTx.size());
for (CTxMemPool::indexed_transaction_set::iterator mi = mempool.mapTx.begin();
mi != mempool.mapTx.end(); ++mi)
{
double dPriority = mi->GetPriority(nHeight);
CAmount dummy;
mempool.ApplyDeltas(mi->GetTx().GetHash(), dPriority, dummy);
vecPriority.push_back(TxCoinAgePriority(dPriority, mi));
}
std::make_heap(vecPriority.begin(), vecPriority.end(), pricomparer);
CTxMemPool::txiter iter;
while (!vecPriority.empty() && !blockFinished) { // add a tx from priority queue to fill the blockprioritysize
iter = vecPriority.front().second;
actualPriority = vecPriority.front().first;
std::pop_heap(vecPriority.begin(), vecPriority.end(), pricomparer);
vecPriority.pop_back();
// If tx already in block, skip
if (inBlock.count(iter)) {
assert(false); // shouldn't happen for priority txs
continue;
}
// If tx is dependent on other mempool txs which haven't yet been included
// then put it in the waitSet
if (isStillDependent(iter)) {
waitPriMap.insert(std::make_pair(iter, actualPriority));
continue;
}
if (!llmq::chainLocksHandler->IsTxSafeForMining(iter->GetTx().GetHash())) {
continue;
}
// If this tx fits in the block add it, otherwise keep looping
if (TestForBlock(iter)) {
AddToBlock(iter);
// If now that this txs is added we've surpassed our desired priority size
// or have dropped below the AllowFreeThreshold, then we're done adding priority txs
if (nBlockSize >= nBlockPrioritySize || !AllowFree(actualPriority)) {
break;
}
// This tx was successfully added, so
// add transactions that depend on this one to the priority queue to try again
BOOST_FOREACH(CTxMemPool::txiter child, mempool.GetMemPoolChildren(iter))
{
waitPriIter wpiter = waitPriMap.find(child);
if (wpiter != waitPriMap.end()) {
vecPriority.push_back(TxCoinAgePriority(wpiter->second,child));
std::push_heap(vecPriority.begin(), vecPriority.end(), pricomparer);
waitPriMap.erase(wpiter);
}
}
}
}
}
void IncrementExtraNonce(CBlock* pblock, const CBlockIndex* pindexPrev, unsigned int& nExtraNonce)
{
// Update nExtraNonce
static uint256 hashPrevBlock;
if (hashPrevBlock != pblock->hashPrevBlock)
{
nExtraNonce = 0;
hashPrevBlock = pblock->hashPrevBlock;
}
++nExtraNonce;
unsigned int nHeight = pindexPrev->nHeight+1; // Height first in coinbase required for block.version=2
CMutableTransaction txCoinbase(*pblock->vtx[0]);
txCoinbase.vin[0].scriptSig = (CScript() << nHeight << CScriptNum(nExtraNonce)) + COINBASE_FLAGS;
assert(txCoinbase.vin[0].scriptSig.size() <= 100);
pblock->vtx[0] = MakeTransactionRef(std::move(txCoinbase));
pblock->hashMerkleRoot = BlockMerkleRoot(*pblock);
}