dash/src/miner.cpp

663 lines
25 KiB
C++
Raw Normal View History

// Copyright (c) 2009-2010 Satoshi Nakamoto
2015-12-13 17:58:29 +01:00
// Copyright (c) 2009-2015 The Bitcoin Core developers
2014-12-13 05:09:33 +01:00
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "miner.h"
#include "amount.h"
2015-07-05 14:17:46 +02:00
#include "chain.h"
#include "chainparams.h"
2015-07-05 14:17:46 +02:00
#include "coins.h"
#include "consensus/consensus.h"
#include "consensus/merkle.h"
#include "consensus/validation.h"
#include "hash.h"
#include "main.h"
#include "net.h"
#include "policy/policy.h"
#include "pow.h"
#include "primitives/transaction.h"
2015-07-05 14:17:46 +02:00
#include "script/standard.h"
#include "timedata.h"
2015-07-05 14:17:46 +02:00
#include "txmempool.h"
#include "util.h"
#include "utilmoneystr.h"
2015-04-10 12:49:01 +02:00
#include "validationinterface.h"
#include <algorithm>
#include <boost/thread.hpp>
#include <boost/tuple/tuple.hpp>
#include <queue>
using namespace std;
//////////////////////////////////////////////////////////////////////////////
//
// BitcoinMiner
//
//
// Unconfirmed transactions in the memory pool often depend on other
// transactions in the memory pool. When we select transactions from the
// pool, we select by highest priority or fee rate, so we might consider
// transactions that depend on transactions that aren't yet in the block.
uint64_t nLastBlockTx = 0;
uint64_t nLastBlockSize = 0;
uint64_t nLastBlockCost = 0;
class ScoreCompare
{
public:
ScoreCompare() {}
bool operator()(const CTxMemPool::txiter a, const CTxMemPool::txiter b)
{
return CompareTxMemPoolEntryByScore()(*b,*a); // Convert to less than
}
};
int64_t UpdateTime(CBlockHeader* pblock, const Consensus::Params& consensusParams, const CBlockIndex* pindexPrev)
{
int64_t nOldTime = pblock->nTime;
int64_t nNewTime = std::max(pindexPrev->GetMedianTimePast()+1, GetAdjustedTime());
if (nOldTime < nNewTime)
pblock->nTime = nNewTime;
// Updating time can change work required on testnet:
if (consensusParams.fPowAllowMinDifficultyBlocks)
pblock->nBits = GetNextWorkRequired(pindexPrev, pblock, consensusParams);
return nNewTime - nOldTime;
}
BlockAssembler::BlockAssembler(const CChainParams& _chainparams)
: chainparams(_chainparams)
{
// Block resource limits
// If neither -blockmaxsize or -blockmaxcost is given, limit to DEFAULT_BLOCK_MAX_*
// If only one is given, only restrict the specified resource.
// If both are given, restrict both.
nBlockMaxCost = DEFAULT_BLOCK_MAX_COST;
nBlockMaxSize = DEFAULT_BLOCK_MAX_SIZE;
bool fCostSet = false;
if (mapArgs.count("-blockmaxcost")) {
nBlockMaxCost = GetArg("-blockmaxcost", DEFAULT_BLOCK_MAX_COST);
nBlockMaxSize = MAX_BLOCK_SERIALIZED_SIZE;
fCostSet = true;
}
if (mapArgs.count("-blockmaxsize")) {
nBlockMaxSize = GetArg("-blockmaxsize", DEFAULT_BLOCK_MAX_SIZE);
if (!fCostSet) {
nBlockMaxCost = nBlockMaxSize * WITNESS_SCALE_FACTOR;
}
}
// Limit cost to between 4K and MAX_BLOCK_COST-4K for sanity:
nBlockMaxCost = std::max((unsigned int)4000, std::min((unsigned int)(MAX_BLOCK_COST-4000), nBlockMaxCost));
// Limit size to between 1K and MAX_BLOCK_SERIALIZED_SIZE-1K for sanity:
nBlockMaxSize = std::max((unsigned int)1000, std::min((unsigned int)(MAX_BLOCK_SERIALIZED_SIZE-1000), nBlockMaxSize));
// Minimum block size you want to create; block will be filled with free transactions
// until there are no more or the block reaches this size:
nBlockMinSize = GetArg("-blockminsize", DEFAULT_BLOCK_MIN_SIZE);
nBlockMinSize = std::min(nBlockMaxSize, nBlockMinSize);
// Whether we need to account for byte usage (in addition to cost usage)
fNeedSizeAccounting = (nBlockMaxSize < MAX_BLOCK_SERIALIZED_SIZE-1000) || (nBlockMinSize > 0);
}
void BlockAssembler::resetBlock()
{
inBlock.clear();
// Reserve space for coinbase tx
nBlockSize = 1000;
nBlockCost = 4000;
nBlockSigOpsCost = 400;
fIncludeWitness = false;
// These counters do not include coinbase tx
nBlockTx = 0;
nFees = 0;
lastFewTxs = 0;
blockFinished = false;
}
CBlockTemplate* BlockAssembler::CreateNewBlock(const CScript& scriptPubKeyIn)
{
resetBlock();
pblocktemplate.reset(new CBlockTemplate());
if(!pblocktemplate.get())
return NULL;
pblock = &pblocktemplate->block; // pointer for convenience
// Add dummy coinbase tx as first transaction
pblock->vtx.push_back(CTransaction());
pblocktemplate->vTxFees.push_back(-1); // updated at end
pblocktemplate->vTxSigOpsCost.push_back(-1); // updated at end
LOCK2(cs_main, mempool.cs);
CBlockIndex* pindexPrev = chainActive.Tip();
nHeight = pindexPrev->nHeight + 1;
pblock->nVersion = ComputeBlockVersion(pindexPrev, chainparams.GetConsensus());
// -regtest only: allow overriding block.nVersion with
// -blockversion=N to test forking scenarios
if (chainparams.MineBlocksOnDemand())
pblock->nVersion = GetArg("-blockversion", pblock->nVersion);
pblock->nTime = GetAdjustedTime();
const int64_t nMedianTimePast = pindexPrev->GetMedianTimePast();
nLockTimeCutoff = (STANDARD_LOCKTIME_VERIFY_FLAGS & LOCKTIME_MEDIAN_TIME_PAST)
? nMedianTimePast
: pblock->GetBlockTime();
// Decide whether to include witness transactions
// This is only needed in case the witness softfork activation is reverted
// (which would require a very deep reorganization) or when
// -promiscuousmempoolflags is used.
// TODO: replace this with a call to main to assess validity of a mempool
// transaction (which in most cases can be a no-op).
fIncludeWitness = IsWitnessEnabled(pindexPrev, chainparams.GetConsensus());
addPriorityTxs();
if (fNeedSizeAccounting) {
// addPackageTxs (the CPFP-based algorithm) cannot deal with size based
// accounting, so fall back to the old algorithm.
addScoreTxs();
} else {
addPackageTxs();
}
nLastBlockTx = nBlockTx;
nLastBlockSize = nBlockSize;
nLastBlockCost = nBlockCost;
LogPrintf("CreateNewBlock(): total size %u txs: %u fees: %ld sigops %d\n", nBlockSize, nBlockTx, nFees, nBlockSigOpsCost);
// Create coinbase transaction.
CMutableTransaction coinbaseTx;
coinbaseTx.vin.resize(1);
coinbaseTx.vin[0].prevout.SetNull();
coinbaseTx.vout.resize(1);
coinbaseTx.vout[0].scriptPubKey = scriptPubKeyIn;
coinbaseTx.vout[0].nValue = nFees + GetBlockSubsidy(nHeight, chainparams.GetConsensus());
coinbaseTx.vin[0].scriptSig = CScript() << nHeight << OP_0;
pblock->vtx[0] = coinbaseTx;
pblocktemplate->vchCoinbaseCommitment = GenerateCoinbaseCommitment(*pblock, pindexPrev, chainparams.GetConsensus());
pblocktemplate->vTxFees[0] = -nFees;
// Fill in header
pblock->hashPrevBlock = pindexPrev->GetBlockHash();
UpdateTime(pblock, chainparams.GetConsensus(), pindexPrev);
pblock->nBits = GetNextWorkRequired(pindexPrev, pblock, chainparams.GetConsensus());
pblock->nNonce = 0;
pblocktemplate->vTxSigOpsCost[0] = GetLegacySigOpCount(pblock->vtx[0]);
CValidationState state;
if (!TestBlockValidity(state, chainparams, *pblock, pindexPrev, false, false)) {
throw std::runtime_error(strprintf("%s: TestBlockValidity failed: %s", __func__, FormatStateMessage(state)));
}
return pblocktemplate.release();
}
bool BlockAssembler::isStillDependent(CTxMemPool::txiter iter)
{
BOOST_FOREACH(CTxMemPool::txiter parent, mempool.GetMemPoolParents(iter))
{
if (!inBlock.count(parent)) {
return true;
}
}
return false;
}
void BlockAssembler::onlyUnconfirmed(CTxMemPool::setEntries& testSet)
{
for (CTxMemPool::setEntries::iterator iit = testSet.begin(); iit != testSet.end(); ) {
// Only test txs not already in the block
if (inBlock.count(*iit)) {
testSet.erase(iit++);
}
else {
iit++;
}
}
}
bool BlockAssembler::TestPackage(uint64_t packageSize, int64_t packageSigOpsCost)
{
// TODO: switch to cost-based accounting for packages instead of vsize-based accounting.
if (nBlockCost + WITNESS_SCALE_FACTOR * packageSize >= nBlockMaxCost)
return false;
if (nBlockSigOpsCost + packageSigOpsCost >= MAX_BLOCK_SIGOPS_COST)
return false;
return true;
}
// Block size and sigops have already been tested. Check that all transactions
// are final.
bool BlockAssembler::TestPackageFinality(const CTxMemPool::setEntries& package)
{
BOOST_FOREACH (const CTxMemPool::txiter it, package) {
if (!IsFinalTx(it->GetTx(), nHeight, nLockTimeCutoff))
return false;
}
return true;
}
bool BlockAssembler::TestForBlock(CTxMemPool::txiter iter)
{
if (nBlockCost + iter->GetTxCost() >= nBlockMaxCost) {
// If the block is so close to full that no more txs will fit
// or if we've tried more than 50 times to fill remaining space
// then flag that the block is finished
if (nBlockCost > nBlockMaxCost - 400 || lastFewTxs > 50) {
blockFinished = true;
return false;
}
// Once we're within 4000 cost of a full block, only look at 50 more txs
// to try to fill the remaining space.
if (nBlockCost > nBlockMaxCost - 4000) {
lastFewTxs++;
}
return false;
}
if (fNeedSizeAccounting) {
if (nBlockSize + ::GetSerializeSize(iter->GetTx(), SER_NETWORK, PROTOCOL_VERSION) >= nBlockMaxSize) {
if (nBlockSize > nBlockMaxSize - 100 || lastFewTxs > 50) {
blockFinished = true;
return false;
}
if (nBlockSize > nBlockMaxSize - 1000) {
lastFewTxs++;
}
return false;
}
}
if (nBlockSigOpsCost + iter->GetSigOpCost() >= MAX_BLOCK_SIGOPS_COST) {
// If the block has room for no more sig ops then
// flag that the block is finished
if (nBlockSigOpsCost > MAX_BLOCK_SIGOPS_COST - 8) {
blockFinished = true;
return false;
}
// Otherwise attempt to find another tx with fewer sigops
// to put in the block.
return false;
}
// Must check that lock times are still valid
// This can be removed once MTP is always enforced
// as long as reorgs keep the mempool consistent.
if (!IsFinalTx(iter->GetTx(), nHeight, nLockTimeCutoff))
return false;
return true;
}
void BlockAssembler::AddToBlock(CTxMemPool::txiter iter)
{
pblock->vtx.push_back(iter->GetTx());
pblocktemplate->vTxFees.push_back(iter->GetFee());
pblocktemplate->vTxSigOpsCost.push_back(iter->GetSigOpCost());
if (fNeedSizeAccounting) {
nBlockSize += ::GetSerializeSize(iter->GetTx(), SER_NETWORK, PROTOCOL_VERSION);
}
nBlockCost += iter->GetTxCost();
++nBlockTx;
nBlockSigOpsCost += iter->GetSigOpCost();
nFees += iter->GetFee();
inBlock.insert(iter);
bool fPrintPriority = GetBoolArg("-printpriority", DEFAULT_PRINTPRIORITY);
if (fPrintPriority) {
double dPriority = iter->GetPriority(nHeight);
CAmount dummy;
mempool.ApplyDeltas(iter->GetTx().GetHash(), dPriority, dummy);
LogPrintf("priority %.1f fee %s txid %s\n",
dPriority,
CFeeRate(iter->GetModifiedFee(), iter->GetTxSize()).ToString(),
iter->GetTx().GetHash().ToString());
}
}
void BlockAssembler::addScoreTxs()
{
std::priority_queue<CTxMemPool::txiter, std::vector<CTxMemPool::txiter>, ScoreCompare> clearedTxs;
CTxMemPool::setEntries waitSet;
CTxMemPool::indexed_transaction_set::index<mining_score>::type::iterator mi = mempool.mapTx.get<mining_score>().begin();
CTxMemPool::txiter iter;
while (!blockFinished && (mi != mempool.mapTx.get<mining_score>().end() || !clearedTxs.empty()))
{
// If no txs that were previously postponed are available to try
// again, then try the next highest score tx
if (clearedTxs.empty()) {
iter = mempool.mapTx.project<0>(mi);
mi++;
}
// If a previously postponed tx is available to try again, then it
// has higher score than all untried so far txs
else {
iter = clearedTxs.top();
clearedTxs.pop();
}
// If tx already in block, skip (added by addPriorityTxs)
if (inBlock.count(iter)) {
continue;
}
// cannot accept witness transactions into a non-witness block
if (!fIncludeWitness && !iter->GetTx().wit.IsNull())
continue;
// If tx is dependent on other mempool txs which haven't yet been included
// then put it in the waitSet
if (isStillDependent(iter)) {
waitSet.insert(iter);
continue;
}
// If the fee rate is below the min fee rate for mining, then we're done
// adding txs based on score (fee rate)
if (iter->GetModifiedFee() < ::minRelayTxFee.GetFee(iter->GetTxSize()) && nBlockSize >= nBlockMinSize) {
return;
}
// If this tx fits in the block add it, otherwise keep looping
if (TestForBlock(iter)) {
AddToBlock(iter);
// This tx was successfully added, so
// add transactions that depend on this one to the priority queue to try again
BOOST_FOREACH(CTxMemPool::txiter child, mempool.GetMemPoolChildren(iter))
{
if (waitSet.count(child)) {
clearedTxs.push(child);
waitSet.erase(child);
}
}
}
}
}
void BlockAssembler::UpdatePackagesForAdded(const CTxMemPool::setEntries& alreadyAdded,
indexed_modified_transaction_set &mapModifiedTx)
{
BOOST_FOREACH(const CTxMemPool::txiter it, alreadyAdded) {
CTxMemPool::setEntries descendants;
mempool.CalculateDescendants(it, descendants);
// Insert all descendants (not yet in block) into the modified set
BOOST_FOREACH(CTxMemPool::txiter desc, descendants) {
if (alreadyAdded.count(desc))
continue;
modtxiter mit = mapModifiedTx.find(desc);
if (mit == mapModifiedTx.end()) {
CTxMemPoolModifiedEntry modEntry(desc);
modEntry.nSizeWithAncestors -= it->GetTxSize();
modEntry.nModFeesWithAncestors -= it->GetModifiedFee();
modEntry.nSigOpCostWithAncestors -= it->GetSigOpCost();
mapModifiedTx.insert(modEntry);
} else {
mapModifiedTx.modify(mit, update_for_parent_inclusion(it));
}
}
}
}
// Skip entries in mapTx that are already in a block or are present
// in mapModifiedTx (which implies that the mapTx ancestor state is
// stale due to ancestor inclusion in the block)
// Also skip transactions that we've already failed to add. This can happen if
// we consider a transaction in mapModifiedTx and it fails: we can then
// potentially consider it again while walking mapTx. It's currently
// guaranteed to fail again, but as a belt-and-suspenders check we put it in
// failedTx and avoid re-evaluation, since the re-evaluation would be using
// cached size/sigops/fee values that are not actually correct.
bool BlockAssembler::SkipMapTxEntry(CTxMemPool::txiter it, indexed_modified_transaction_set &mapModifiedTx, CTxMemPool::setEntries &failedTx)
{
assert (it != mempool.mapTx.end());
if (mapModifiedTx.count(it) || inBlock.count(it) || failedTx.count(it))
return true;
return false;
}
void BlockAssembler::SortForBlock(const CTxMemPool::setEntries& package, CTxMemPool::txiter entry, std::vector<CTxMemPool::txiter>& sortedEntries)
{
// Sort package by ancestor count
// If a transaction A depends on transaction B, then A's ancestor count
// must be greater than B's. So this is sufficient to validly order the
// transactions for block inclusion.
sortedEntries.clear();
sortedEntries.insert(sortedEntries.begin(), package.begin(), package.end());
std::sort(sortedEntries.begin(), sortedEntries.end(), CompareTxIterByAncestorCount());
}
// This transaction selection algorithm orders the mempool based
// on feerate of a transaction including all unconfirmed ancestors.
// Since we don't remove transactions from the mempool as we select them
// for block inclusion, we need an alternate method of updating the feerate
// of a transaction with its not-yet-selected ancestors as we go.
// This is accomplished by walking the in-mempool descendants of selected
// transactions and storing a temporary modified state in mapModifiedTxs.
// Each time through the loop, we compare the best transaction in
// mapModifiedTxs with the next transaction in the mempool to decide what
// transaction package to work on next.
void BlockAssembler::addPackageTxs()
{
// mapModifiedTx will store sorted packages after they are modified
// because some of their txs are already in the block
indexed_modified_transaction_set mapModifiedTx;
// Keep track of entries that failed inclusion, to avoid duplicate work
CTxMemPool::setEntries failedTx;
// Start by adding all descendants of previously added txs to mapModifiedTx
// and modifying them for their already included ancestors
UpdatePackagesForAdded(inBlock, mapModifiedTx);
CTxMemPool::indexed_transaction_set::index<ancestor_score>::type::iterator mi = mempool.mapTx.get<ancestor_score>().begin();
CTxMemPool::txiter iter;
while (mi != mempool.mapTx.get<ancestor_score>().end() || !mapModifiedTx.empty())
{
// First try to find a new transaction in mapTx to evaluate.
if (mi != mempool.mapTx.get<ancestor_score>().end() &&
SkipMapTxEntry(mempool.mapTx.project<0>(mi), mapModifiedTx, failedTx)) {
++mi;
continue;
}
// Now that mi is not stale, determine which transaction to evaluate:
// the next entry from mapTx, or the best from mapModifiedTx?
bool fUsingModified = false;
modtxscoreiter modit = mapModifiedTx.get<ancestor_score>().begin();
if (mi == mempool.mapTx.get<ancestor_score>().end()) {
// We're out of entries in mapTx; use the entry from mapModifiedTx
iter = modit->iter;
fUsingModified = true;
} else {
// Try to compare the mapTx entry to the mapModifiedTx entry
iter = mempool.mapTx.project<0>(mi);
if (modit != mapModifiedTx.get<ancestor_score>().end() &&
CompareModifiedEntry()(*modit, CTxMemPoolModifiedEntry(iter))) {
// The best entry in mapModifiedTx has higher score
// than the one from mapTx.
// Switch which transaction (package) to consider
iter = modit->iter;
fUsingModified = true;
} else {
// Either no entry in mapModifiedTx, or it's worse than mapTx.
// Increment mi for the next loop iteration.
++mi;
}
}
// We skip mapTx entries that are inBlock, and mapModifiedTx shouldn't
// contain anything that is inBlock.
assert(!inBlock.count(iter));
uint64_t packageSize = iter->GetSizeWithAncestors();
CAmount packageFees = iter->GetModFeesWithAncestors();
int64_t packageSigOpsCost = iter->GetSigOpCostWithAncestors();
if (fUsingModified) {
packageSize = modit->nSizeWithAncestors;
packageFees = modit->nModFeesWithAncestors;
packageSigOpsCost = modit->nSigOpCostWithAncestors;
}
if (packageFees < ::minRelayTxFee.GetFee(packageSize)) {
// Everything else we might consider has a lower fee rate
return;
}
if (!TestPackage(packageSize, packageSigOpsCost)) {
if (fUsingModified) {
// Since we always look at the best entry in mapModifiedTx,
// we must erase failed entries so that we can consider the
// next best entry on the next loop iteration
mapModifiedTx.get<ancestor_score>().erase(modit);
failedTx.insert(iter);
}
continue;
}
CTxMemPool::setEntries ancestors;
uint64_t nNoLimit = std::numeric_limits<uint64_t>::max();
std::string dummy;
mempool.CalculateMemPoolAncestors(*iter, ancestors, nNoLimit, nNoLimit, nNoLimit, nNoLimit, dummy, false);
onlyUnconfirmed(ancestors);
ancestors.insert(iter);
// Test if all tx's are Final
if (!TestPackageFinality(ancestors)) {
if (fUsingModified) {
mapModifiedTx.get<ancestor_score>().erase(modit);
failedTx.insert(iter);
}
continue;
}
// Package can be added. Sort the entries in a valid order.
vector<CTxMemPool::txiter> sortedEntries;
SortForBlock(ancestors, iter, sortedEntries);
for (size_t i=0; i<sortedEntries.size(); ++i) {
AddToBlock(sortedEntries[i]);
// Erase from the modified set, if present
mapModifiedTx.erase(sortedEntries[i]);
}
// Update transactions that depend on each of these
UpdatePackagesForAdded(ancestors, mapModifiedTx);
}
}
void BlockAssembler::addPriorityTxs()
{
// How much of the block should be dedicated to high-priority transactions,
// included regardless of the fees they pay
unsigned int nBlockPrioritySize = GetArg("-blockprioritysize", DEFAULT_BLOCK_PRIORITY_SIZE);
nBlockPrioritySize = std::min(nBlockMaxSize, nBlockPrioritySize);
if (nBlockPrioritySize == 0) {
return;
}
fNeedSizeAccounting = true;
// This vector will be sorted into a priority queue:
vector<TxCoinAgePriority> vecPriority;
TxCoinAgePriorityCompare pricomparer;
std::map<CTxMemPool::txiter, double, CTxMemPool::CompareIteratorByHash> waitPriMap;
typedef std::map<CTxMemPool::txiter, double, CTxMemPool::CompareIteratorByHash>::iterator waitPriIter;
double actualPriority = -1;
vecPriority.reserve(mempool.mapTx.size());
for (CTxMemPool::indexed_transaction_set::iterator mi = mempool.mapTx.begin();
mi != mempool.mapTx.end(); ++mi)
{
double dPriority = mi->GetPriority(nHeight);
CAmount dummy;
mempool.ApplyDeltas(mi->GetTx().GetHash(), dPriority, dummy);
vecPriority.push_back(TxCoinAgePriority(dPriority, mi));
}
std::make_heap(vecPriority.begin(), vecPriority.end(), pricomparer);
CTxMemPool::txiter iter;
while (!vecPriority.empty() && !blockFinished) { // add a tx from priority queue to fill the blockprioritysize
iter = vecPriority.front().second;
actualPriority = vecPriority.front().first;
std::pop_heap(vecPriority.begin(), vecPriority.end(), pricomparer);
vecPriority.pop_back();
// If tx already in block, skip
if (inBlock.count(iter)) {
assert(false); // shouldn't happen for priority txs
continue;
}
// cannot accept witness transactions into a non-witness block
if (!fIncludeWitness && !iter->GetTx().wit.IsNull())
continue;
// If tx is dependent on other mempool txs which haven't yet been included
// then put it in the waitSet
if (isStillDependent(iter)) {
waitPriMap.insert(std::make_pair(iter, actualPriority));
continue;
}
// If this tx fits in the block add it, otherwise keep looping
if (TestForBlock(iter)) {
AddToBlock(iter);
// If now that this txs is added we've surpassed our desired priority size
// or have dropped below the AllowFreeThreshold, then we're done adding priority txs
if (nBlockSize >= nBlockPrioritySize || !AllowFree(actualPriority)) {
return;
}
// This tx was successfully added, so
// add transactions that depend on this one to the priority queue to try again
BOOST_FOREACH(CTxMemPool::txiter child, mempool.GetMemPoolChildren(iter))
{
waitPriIter wpiter = waitPriMap.find(child);
if (wpiter != waitPriMap.end()) {
vecPriority.push_back(TxCoinAgePriority(wpiter->second,child));
std::push_heap(vecPriority.begin(), vecPriority.end(), pricomparer);
waitPriMap.erase(wpiter);
}
}
}
}
}
void IncrementExtraNonce(CBlock* pblock, const CBlockIndex* pindexPrev, unsigned int& nExtraNonce)
{
// Update nExtraNonce
static uint256 hashPrevBlock;
if (hashPrevBlock != pblock->hashPrevBlock)
{
nExtraNonce = 0;
hashPrevBlock = pblock->hashPrevBlock;
}
++nExtraNonce;
unsigned int nHeight = pindexPrev->nHeight+1; // Height first in coinbase required for block.version=2
CMutableTransaction txCoinbase(pblock->vtx[0]);
txCoinbase.vin[0].scriptSig = (CScript() << nHeight << CScriptNum(nExtraNonce)) + COINBASE_FLAGS;
assert(txCoinbase.vin[0].scriptSig.size() <= 100);
pblock->vtx[0] = txCoinbase;
pblock->hashMerkleRoot = BlockMerkleRoot(*pblock);
}