mirror of
https://github.com/dashpay/dash.git
synced 2024-12-29 13:59:06 +01:00
0aa89c08ff
In cases of very large reorganisations (hundreds of blocks), a situation may appear where an 'inv' is sent as response to a 'getblocks', but the last block mentioned in the inv is already known to the receiver node. However, the supplying node uses a request for this last block as a trigger to send the rest of the inv blocks. If it never comes, the block chain download is stuck. This commit makes the receiver node always request the last inv'ed block, even if it is already known, to prevent this problem.
3586 lines
117 KiB
C++
3586 lines
117 KiB
C++
// Copyright (c) 2009-2010 Satoshi Nakamoto
|
|
// Copyright (c) 2009-2012 The Bitcoin developers
|
|
// Distributed under the MIT/X11 software license, see the accompanying
|
|
// file license.txt or http://www.opensource.org/licenses/mit-license.php.
|
|
#include "headers.h"
|
|
#include "checkpoints.h"
|
|
#include "db.h"
|
|
#include "net.h"
|
|
#include "init.h"
|
|
#include <boost/algorithm/string/replace.hpp>
|
|
#include <boost/filesystem.hpp>
|
|
#include <boost/filesystem/fstream.hpp>
|
|
|
|
using namespace std;
|
|
using namespace boost;
|
|
|
|
//
|
|
// Global state
|
|
//
|
|
|
|
// Name of client reported in the 'version' message. Report the same name
|
|
// for both bitcoind and bitcoin-qt, to make it harder for attackers to
|
|
// target servers or GUI users specifically.
|
|
const std::string CLIENT_NAME("Satoshi");
|
|
|
|
CCriticalSection cs_setpwalletRegistered;
|
|
set<CWallet*> setpwalletRegistered;
|
|
|
|
CCriticalSection cs_main;
|
|
|
|
static map<uint256, CTransaction> mapTransactions;
|
|
CCriticalSection cs_mapTransactions;
|
|
unsigned int nTransactionsUpdated = 0;
|
|
map<COutPoint, CInPoint> mapNextTx;
|
|
|
|
map<uint256, CBlockIndex*> mapBlockIndex;
|
|
uint256 hashGenesisBlock("0x000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f");
|
|
static CBigNum bnProofOfWorkLimit(~uint256(0) >> 32);
|
|
CBlockIndex* pindexGenesisBlock = NULL;
|
|
int nBestHeight = -1;
|
|
CBigNum bnBestChainWork = 0;
|
|
CBigNum bnBestInvalidWork = 0;
|
|
uint256 hashBestChain = 0;
|
|
CBlockIndex* pindexBest = NULL;
|
|
int64 nTimeBestReceived = 0;
|
|
|
|
CMedianFilter<int> cPeerBlockCounts(5, 0); // Amount of blocks that other nodes claim to have
|
|
|
|
map<uint256, CBlock*> mapOrphanBlocks;
|
|
multimap<uint256, CBlock*> mapOrphanBlocksByPrev;
|
|
|
|
map<uint256, CDataStream*> mapOrphanTransactions;
|
|
multimap<uint256, CDataStream*> mapOrphanTransactionsByPrev;
|
|
|
|
// Constant stuff for coinbase transactions we create:
|
|
CScript COINBASE_FLAGS;
|
|
|
|
const string strMessageMagic = "Bitcoin Signed Message:\n";
|
|
|
|
double dHashesPerSec;
|
|
int64 nHPSTimerStart;
|
|
|
|
// Settings
|
|
int64 nTransactionFee = 0;
|
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////////////
|
|
//
|
|
// dispatching functions
|
|
//
|
|
|
|
// These functions dispatch to one or all registered wallets
|
|
|
|
|
|
void RegisterWallet(CWallet* pwalletIn)
|
|
{
|
|
CRITICAL_BLOCK(cs_setpwalletRegistered)
|
|
{
|
|
setpwalletRegistered.insert(pwalletIn);
|
|
}
|
|
}
|
|
|
|
void UnregisterWallet(CWallet* pwalletIn)
|
|
{
|
|
CRITICAL_BLOCK(cs_setpwalletRegistered)
|
|
{
|
|
setpwalletRegistered.erase(pwalletIn);
|
|
}
|
|
}
|
|
|
|
// check whether the passed transaction is from us
|
|
bool static IsFromMe(CTransaction& tx)
|
|
{
|
|
BOOST_FOREACH(CWallet* pwallet, setpwalletRegistered)
|
|
if (pwallet->IsFromMe(tx))
|
|
return true;
|
|
return false;
|
|
}
|
|
|
|
// get the wallet transaction with the given hash (if it exists)
|
|
bool static GetTransaction(const uint256& hashTx, CWalletTx& wtx)
|
|
{
|
|
BOOST_FOREACH(CWallet* pwallet, setpwalletRegistered)
|
|
if (pwallet->GetTransaction(hashTx,wtx))
|
|
return true;
|
|
return false;
|
|
}
|
|
|
|
// erases transaction with the given hash from all wallets
|
|
void static EraseFromWallets(uint256 hash)
|
|
{
|
|
BOOST_FOREACH(CWallet* pwallet, setpwalletRegistered)
|
|
pwallet->EraseFromWallet(hash);
|
|
}
|
|
|
|
// make sure all wallets know about the given transaction, in the given block
|
|
void static SyncWithWallets(const CTransaction& tx, const CBlock* pblock = NULL, bool fUpdate = false)
|
|
{
|
|
BOOST_FOREACH(CWallet* pwallet, setpwalletRegistered)
|
|
pwallet->AddToWalletIfInvolvingMe(tx, pblock, fUpdate);
|
|
}
|
|
|
|
// notify wallets about a new best chain
|
|
void static SetBestChain(const CBlockLocator& loc)
|
|
{
|
|
BOOST_FOREACH(CWallet* pwallet, setpwalletRegistered)
|
|
pwallet->SetBestChain(loc);
|
|
}
|
|
|
|
// notify wallets about an updated transaction
|
|
void static UpdatedTransaction(const uint256& hashTx)
|
|
{
|
|
BOOST_FOREACH(CWallet* pwallet, setpwalletRegistered)
|
|
pwallet->UpdatedTransaction(hashTx);
|
|
}
|
|
|
|
// dump all wallets
|
|
void static PrintWallets(const CBlock& block)
|
|
{
|
|
BOOST_FOREACH(CWallet* pwallet, setpwalletRegistered)
|
|
pwallet->PrintWallet(block);
|
|
}
|
|
|
|
// notify wallets about an incoming inventory (for request counts)
|
|
void static Inventory(const uint256& hash)
|
|
{
|
|
BOOST_FOREACH(CWallet* pwallet, setpwalletRegistered)
|
|
pwallet->Inventory(hash);
|
|
}
|
|
|
|
// ask wallets to resend their transactions
|
|
void static ResendWalletTransactions()
|
|
{
|
|
BOOST_FOREACH(CWallet* pwallet, setpwalletRegistered)
|
|
pwallet->ResendWalletTransactions();
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////////////
|
|
//
|
|
// mapOrphanTransactions
|
|
//
|
|
|
|
void AddOrphanTx(const CDataStream& vMsg)
|
|
{
|
|
CTransaction tx;
|
|
CDataStream(vMsg) >> tx;
|
|
uint256 hash = tx.GetHash();
|
|
if (mapOrphanTransactions.count(hash))
|
|
return;
|
|
|
|
CDataStream* pvMsg = mapOrphanTransactions[hash] = new CDataStream(vMsg);
|
|
BOOST_FOREACH(const CTxIn& txin, tx.vin)
|
|
mapOrphanTransactionsByPrev.insert(make_pair(txin.prevout.hash, pvMsg));
|
|
}
|
|
|
|
void static EraseOrphanTx(uint256 hash)
|
|
{
|
|
if (!mapOrphanTransactions.count(hash))
|
|
return;
|
|
const CDataStream* pvMsg = mapOrphanTransactions[hash];
|
|
CTransaction tx;
|
|
CDataStream(*pvMsg) >> tx;
|
|
BOOST_FOREACH(const CTxIn& txin, tx.vin)
|
|
{
|
|
for (multimap<uint256, CDataStream*>::iterator mi = mapOrphanTransactionsByPrev.lower_bound(txin.prevout.hash);
|
|
mi != mapOrphanTransactionsByPrev.upper_bound(txin.prevout.hash);)
|
|
{
|
|
if ((*mi).second == pvMsg)
|
|
mapOrphanTransactionsByPrev.erase(mi++);
|
|
else
|
|
mi++;
|
|
}
|
|
}
|
|
delete pvMsg;
|
|
mapOrphanTransactions.erase(hash);
|
|
}
|
|
|
|
int LimitOrphanTxSize(int nMaxOrphans)
|
|
{
|
|
int nEvicted = 0;
|
|
while (mapOrphanTransactions.size() > nMaxOrphans)
|
|
{
|
|
// Evict a random orphan:
|
|
std::vector<unsigned char> randbytes(32);
|
|
RAND_bytes(&randbytes[0], 32);
|
|
uint256 randomhash(randbytes);
|
|
map<uint256, CDataStream*>::iterator it = mapOrphanTransactions.lower_bound(randomhash);
|
|
if (it == mapOrphanTransactions.end())
|
|
it = mapOrphanTransactions.begin();
|
|
EraseOrphanTx(it->first);
|
|
++nEvicted;
|
|
}
|
|
return nEvicted;
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////////////
|
|
//
|
|
// CTransaction and CTxIndex
|
|
//
|
|
|
|
bool CTransaction::ReadFromDisk(CTxDB& txdb, COutPoint prevout, CTxIndex& txindexRet)
|
|
{
|
|
SetNull();
|
|
if (!txdb.ReadTxIndex(prevout.hash, txindexRet))
|
|
return false;
|
|
if (!ReadFromDisk(txindexRet.pos))
|
|
return false;
|
|
if (prevout.n >= vout.size())
|
|
{
|
|
SetNull();
|
|
return false;
|
|
}
|
|
return true;
|
|
}
|
|
|
|
bool CTransaction::ReadFromDisk(CTxDB& txdb, COutPoint prevout)
|
|
{
|
|
CTxIndex txindex;
|
|
return ReadFromDisk(txdb, prevout, txindex);
|
|
}
|
|
|
|
bool CTransaction::ReadFromDisk(COutPoint prevout)
|
|
{
|
|
CTxDB txdb("r");
|
|
CTxIndex txindex;
|
|
return ReadFromDisk(txdb, prevout, txindex);
|
|
}
|
|
|
|
bool CTransaction::IsStandard() const
|
|
{
|
|
BOOST_FOREACH(const CTxIn& txin, vin)
|
|
{
|
|
// Biggest 'standard' txin is a 3-signature 3-of-3 CHECKMULTISIG
|
|
// pay-to-script-hash, which is 3 ~80-byte signatures, 3
|
|
// ~65-byte public keys, plus a few script ops.
|
|
if (txin.scriptSig.size() > 500)
|
|
return false;
|
|
if (!txin.scriptSig.IsPushOnly())
|
|
return false;
|
|
}
|
|
BOOST_FOREACH(const CTxOut& txout, vout)
|
|
if (!::IsStandard(txout.scriptPubKey))
|
|
return false;
|
|
return true;
|
|
}
|
|
|
|
//
|
|
// Check transaction inputs, and make sure any
|
|
// pay-to-script-hash transactions are evaluating IsStandard scripts
|
|
//
|
|
// Why bother? To avoid denial-of-service attacks; an attacker
|
|
// can submit a standard HASH... OP_EQUAL transaction,
|
|
// which will get accepted into blocks. The redemption
|
|
// script can be anything; an attacker could use a very
|
|
// expensive-to-check-upon-redemption script like:
|
|
// DUP CHECKSIG DROP ... repeated 100 times... OP_1
|
|
//
|
|
bool CTransaction::AreInputsStandard(const MapPrevTx& mapInputs) const
|
|
{
|
|
if (IsCoinBase())
|
|
return true; // Coinbases don't use vin normally
|
|
|
|
for (int i = 0; i < vin.size(); i++)
|
|
{
|
|
const CTxOut& prev = GetOutputFor(vin[i], mapInputs);
|
|
|
|
vector<vector<unsigned char> > vSolutions;
|
|
txnouttype whichType;
|
|
// get the scriptPubKey corresponding to this input:
|
|
const CScript& prevScript = prev.scriptPubKey;
|
|
if (!Solver(prevScript, whichType, vSolutions))
|
|
return false;
|
|
int nArgsExpected = ScriptSigArgsExpected(whichType, vSolutions);
|
|
|
|
// Transactions with extra stuff in their scriptSigs are
|
|
// non-standard. Note that this EvalScript() call will
|
|
// be quick, because if there are any operations
|
|
// beside "push data" in the scriptSig the
|
|
// IsStandard() call returns false
|
|
vector<vector<unsigned char> > stack;
|
|
if (!EvalScript(stack, vin[i].scriptSig, *this, i, 0))
|
|
return false;
|
|
|
|
if (whichType == TX_SCRIPTHASH)
|
|
{
|
|
if (stack.empty())
|
|
return false;
|
|
CScript subscript(stack.back().begin(), stack.back().end());
|
|
vector<vector<unsigned char> > vSolutions2;
|
|
txnouttype whichType2;
|
|
if (!Solver(subscript, whichType2, vSolutions2))
|
|
return false;
|
|
if (whichType2 == TX_SCRIPTHASH)
|
|
return false;
|
|
nArgsExpected += ScriptSigArgsExpected(whichType2, vSolutions2);
|
|
}
|
|
|
|
if (stack.size() != nArgsExpected)
|
|
return false;
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
int
|
|
CTransaction::GetLegacySigOpCount() const
|
|
{
|
|
int nSigOps = 0;
|
|
BOOST_FOREACH(const CTxIn& txin, vin)
|
|
{
|
|
nSigOps += txin.scriptSig.GetSigOpCount(false);
|
|
}
|
|
BOOST_FOREACH(const CTxOut& txout, vout)
|
|
{
|
|
nSigOps += txout.scriptPubKey.GetSigOpCount(false);
|
|
}
|
|
return nSigOps;
|
|
}
|
|
|
|
|
|
int CMerkleTx::SetMerkleBranch(const CBlock* pblock)
|
|
{
|
|
if (fClient)
|
|
{
|
|
if (hashBlock == 0)
|
|
return 0;
|
|
}
|
|
else
|
|
{
|
|
CBlock blockTmp;
|
|
if (pblock == NULL)
|
|
{
|
|
// Load the block this tx is in
|
|
CTxIndex txindex;
|
|
if (!CTxDB("r").ReadTxIndex(GetHash(), txindex))
|
|
return 0;
|
|
if (!blockTmp.ReadFromDisk(txindex.pos.nFile, txindex.pos.nBlockPos))
|
|
return 0;
|
|
pblock = &blockTmp;
|
|
}
|
|
|
|
// Update the tx's hashBlock
|
|
hashBlock = pblock->GetHash();
|
|
|
|
// Locate the transaction
|
|
for (nIndex = 0; nIndex < pblock->vtx.size(); nIndex++)
|
|
if (pblock->vtx[nIndex] == *(CTransaction*)this)
|
|
break;
|
|
if (nIndex == pblock->vtx.size())
|
|
{
|
|
vMerkleBranch.clear();
|
|
nIndex = -1;
|
|
printf("ERROR: SetMerkleBranch() : couldn't find tx in block\n");
|
|
return 0;
|
|
}
|
|
|
|
// Fill in merkle branch
|
|
vMerkleBranch = pblock->GetMerkleBranch(nIndex);
|
|
}
|
|
|
|
// Is the tx in a block that's in the main chain
|
|
map<uint256, CBlockIndex*>::iterator mi = mapBlockIndex.find(hashBlock);
|
|
if (mi == mapBlockIndex.end())
|
|
return 0;
|
|
CBlockIndex* pindex = (*mi).second;
|
|
if (!pindex || !pindex->IsInMainChain())
|
|
return 0;
|
|
|
|
return pindexBest->nHeight - pindex->nHeight + 1;
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
bool CTransaction::CheckTransaction() const
|
|
{
|
|
// Basic checks that don't depend on any context
|
|
if (vin.empty())
|
|
return DoS(10, error("CTransaction::CheckTransaction() : vin empty"));
|
|
if (vout.empty())
|
|
return DoS(10, error("CTransaction::CheckTransaction() : vout empty"));
|
|
// Size limits
|
|
if (::GetSerializeSize(*this, SER_NETWORK) > MAX_BLOCK_SIZE)
|
|
return DoS(100, error("CTransaction::CheckTransaction() : size limits failed"));
|
|
|
|
// Check for negative or overflow output values
|
|
int64 nValueOut = 0;
|
|
BOOST_FOREACH(const CTxOut& txout, vout)
|
|
{
|
|
if (txout.nValue < 0)
|
|
return DoS(100, error("CTransaction::CheckTransaction() : txout.nValue negative"));
|
|
if (txout.nValue > MAX_MONEY)
|
|
return DoS(100, error("CTransaction::CheckTransaction() : txout.nValue too high"));
|
|
nValueOut += txout.nValue;
|
|
if (!MoneyRange(nValueOut))
|
|
return DoS(100, error("CTransaction::CheckTransaction() : txout total out of range"));
|
|
}
|
|
|
|
// Check for duplicate inputs
|
|
set<COutPoint> vInOutPoints;
|
|
BOOST_FOREACH(const CTxIn& txin, vin)
|
|
{
|
|
if (vInOutPoints.count(txin.prevout))
|
|
return false;
|
|
vInOutPoints.insert(txin.prevout);
|
|
}
|
|
|
|
if (IsCoinBase())
|
|
{
|
|
if (vin[0].scriptSig.size() < 2 || vin[0].scriptSig.size() > 100)
|
|
return DoS(100, error("CTransaction::CheckTransaction() : coinbase script size"));
|
|
}
|
|
else
|
|
{
|
|
BOOST_FOREACH(const CTxIn& txin, vin)
|
|
if (txin.prevout.IsNull())
|
|
return DoS(10, error("CTransaction::CheckTransaction() : prevout is null"));
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
bool CTransaction::AcceptToMemoryPool(CTxDB& txdb, bool fCheckInputs, bool* pfMissingInputs)
|
|
{
|
|
if (pfMissingInputs)
|
|
*pfMissingInputs = false;
|
|
|
|
if (!CheckTransaction())
|
|
return error("AcceptToMemoryPool() : CheckTransaction failed");
|
|
|
|
// Coinbase is only valid in a block, not as a loose transaction
|
|
if (IsCoinBase())
|
|
return DoS(100, error("AcceptToMemoryPool() : coinbase as individual tx"));
|
|
|
|
// To help v0.1.5 clients who would see it as a negative number
|
|
if ((int64)nLockTime > std::numeric_limits<int>::max())
|
|
return error("AcceptToMemoryPool() : not accepting nLockTime beyond 2038 yet");
|
|
|
|
// Rather not work on nonstandard transactions (unless -testnet)
|
|
if (!fTestNet && !IsStandard())
|
|
return error("AcceptToMemoryPool() : nonstandard transaction type");
|
|
|
|
// Do we already have it?
|
|
uint256 hash = GetHash();
|
|
CRITICAL_BLOCK(cs_mapTransactions)
|
|
if (mapTransactions.count(hash))
|
|
return false;
|
|
if (fCheckInputs)
|
|
if (txdb.ContainsTx(hash))
|
|
return false;
|
|
|
|
// Check for conflicts with in-memory transactions
|
|
CTransaction* ptxOld = NULL;
|
|
for (int i = 0; i < vin.size(); i++)
|
|
{
|
|
COutPoint outpoint = vin[i].prevout;
|
|
if (mapNextTx.count(outpoint))
|
|
{
|
|
// Disable replacement feature for now
|
|
return false;
|
|
|
|
// Allow replacing with a newer version of the same transaction
|
|
if (i != 0)
|
|
return false;
|
|
ptxOld = mapNextTx[outpoint].ptx;
|
|
if (ptxOld->IsFinal())
|
|
return false;
|
|
if (!IsNewerThan(*ptxOld))
|
|
return false;
|
|
for (int i = 0; i < vin.size(); i++)
|
|
{
|
|
COutPoint outpoint = vin[i].prevout;
|
|
if (!mapNextTx.count(outpoint) || mapNextTx[outpoint].ptx != ptxOld)
|
|
return false;
|
|
}
|
|
break;
|
|
}
|
|
}
|
|
|
|
if (fCheckInputs)
|
|
{
|
|
MapPrevTx mapInputs;
|
|
map<uint256, CTxIndex> mapUnused;
|
|
bool fInvalid = false;
|
|
if (!FetchInputs(txdb, mapUnused, false, false, mapInputs, fInvalid))
|
|
{
|
|
if (fInvalid)
|
|
return error("AcceptToMemoryPool() : FetchInputs found invalid tx %s", hash.ToString().substr(0,10).c_str());
|
|
if (pfMissingInputs)
|
|
*pfMissingInputs = true;
|
|
return error("AcceptToMemoryPool() : FetchInputs failed %s", hash.ToString().substr(0,10).c_str());
|
|
}
|
|
|
|
// Check for non-standard pay-to-script-hash in inputs
|
|
if (!AreInputsStandard(mapInputs) && !fTestNet)
|
|
return error("AcceptToMemoryPool() : nonstandard transaction input");
|
|
|
|
// Note: if you modify this code to accept non-standard transactions, then
|
|
// you should add code here to check that the transaction does a
|
|
// reasonable number of ECDSA signature verifications.
|
|
|
|
int64 nFees = GetValueIn(mapInputs)-GetValueOut();
|
|
unsigned int nSize = ::GetSerializeSize(*this, SER_NETWORK);
|
|
|
|
// Don't accept it if it can't get into a block
|
|
if (nFees < GetMinFee(1000, true, GMF_RELAY))
|
|
return error("AcceptToMemoryPool() : not enough fees");
|
|
|
|
// Continuously rate-limit free transactions
|
|
// This mitigates 'penny-flooding' -- sending thousands of free transactions just to
|
|
// be annoying or make other's transactions take longer to confirm.
|
|
if (nFees < MIN_RELAY_TX_FEE)
|
|
{
|
|
static CCriticalSection cs;
|
|
static double dFreeCount;
|
|
static int64 nLastTime;
|
|
int64 nNow = GetTime();
|
|
|
|
CRITICAL_BLOCK(cs)
|
|
{
|
|
// Use an exponentially decaying ~10-minute window:
|
|
dFreeCount *= pow(1.0 - 1.0/600.0, (double)(nNow - nLastTime));
|
|
nLastTime = nNow;
|
|
// -limitfreerelay unit is thousand-bytes-per-minute
|
|
// At default rate it would take over a month to fill 1GB
|
|
if (dFreeCount > GetArg("-limitfreerelay", 15)*10*1000 && !IsFromMe(*this))
|
|
return error("AcceptToMemoryPool() : free transaction rejected by rate limiter");
|
|
if (fDebug)
|
|
printf("Rate limit dFreeCount: %g => %g\n", dFreeCount, dFreeCount+nSize);
|
|
dFreeCount += nSize;
|
|
}
|
|
}
|
|
|
|
// Check against previous transactions
|
|
// This is done last to help prevent CPU exhaustion denial-of-service attacks.
|
|
if (!ConnectInputs(mapInputs, mapUnused, CDiskTxPos(1,1,1), pindexBest, false, false))
|
|
{
|
|
return error("AcceptToMemoryPool() : ConnectInputs failed %s", hash.ToString().substr(0,10).c_str());
|
|
}
|
|
}
|
|
|
|
// Store transaction in memory
|
|
CRITICAL_BLOCK(cs_mapTransactions)
|
|
{
|
|
if (ptxOld)
|
|
{
|
|
printf("AcceptToMemoryPool() : replacing tx %s with new version\n", ptxOld->GetHash().ToString().c_str());
|
|
ptxOld->RemoveFromMemoryPool();
|
|
}
|
|
AddToMemoryPoolUnchecked();
|
|
}
|
|
|
|
///// are we sure this is ok when loading transactions or restoring block txes
|
|
// If updated, erase old tx from wallet
|
|
if (ptxOld)
|
|
EraseFromWallets(ptxOld->GetHash());
|
|
|
|
printf("AcceptToMemoryPool(): accepted %s\n", hash.ToString().substr(0,10).c_str());
|
|
return true;
|
|
}
|
|
|
|
bool CTransaction::AcceptToMemoryPool(bool fCheckInputs, bool* pfMissingInputs)
|
|
{
|
|
CTxDB txdb("r");
|
|
return AcceptToMemoryPool(txdb, fCheckInputs, pfMissingInputs);
|
|
}
|
|
|
|
uint64 nPooledTx = 0;
|
|
|
|
bool CTransaction::AddToMemoryPoolUnchecked()
|
|
{
|
|
printf("AcceptToMemoryPoolUnchecked(): size %lu\n", mapTransactions.size());
|
|
// Add to memory pool without checking anything. Don't call this directly,
|
|
// call AcceptToMemoryPool to properly check the transaction first.
|
|
CRITICAL_BLOCK(cs_mapTransactions)
|
|
{
|
|
uint256 hash = GetHash();
|
|
mapTransactions[hash] = *this;
|
|
for (int i = 0; i < vin.size(); i++)
|
|
mapNextTx[vin[i].prevout] = CInPoint(&mapTransactions[hash], i);
|
|
nTransactionsUpdated++;
|
|
++nPooledTx;
|
|
}
|
|
return true;
|
|
}
|
|
|
|
|
|
bool CTransaction::RemoveFromMemoryPool()
|
|
{
|
|
// Remove transaction from memory pool
|
|
CRITICAL_BLOCK(cs_mapTransactions)
|
|
{
|
|
uint256 hash = GetHash();
|
|
if (mapTransactions.count(hash))
|
|
{
|
|
BOOST_FOREACH(const CTxIn& txin, vin)
|
|
mapNextTx.erase(txin.prevout);
|
|
mapTransactions.erase(hash);
|
|
nTransactionsUpdated++;
|
|
--nPooledTx;
|
|
}
|
|
}
|
|
return true;
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
int CMerkleTx::GetDepthInMainChain(CBlockIndex* &pindexRet) const
|
|
{
|
|
if (hashBlock == 0 || nIndex == -1)
|
|
return 0;
|
|
|
|
// Find the block it claims to be in
|
|
map<uint256, CBlockIndex*>::iterator mi = mapBlockIndex.find(hashBlock);
|
|
if (mi == mapBlockIndex.end())
|
|
return 0;
|
|
CBlockIndex* pindex = (*mi).second;
|
|
if (!pindex || !pindex->IsInMainChain())
|
|
return 0;
|
|
|
|
// Make sure the merkle branch connects to this block
|
|
if (!fMerkleVerified)
|
|
{
|
|
if (CBlock::CheckMerkleBranch(GetHash(), vMerkleBranch, nIndex) != pindex->hashMerkleRoot)
|
|
return 0;
|
|
fMerkleVerified = true;
|
|
}
|
|
|
|
pindexRet = pindex;
|
|
return pindexBest->nHeight - pindex->nHeight + 1;
|
|
}
|
|
|
|
|
|
int CMerkleTx::GetBlocksToMaturity() const
|
|
{
|
|
if (!IsCoinBase())
|
|
return 0;
|
|
return max(0, (COINBASE_MATURITY+20) - GetDepthInMainChain());
|
|
}
|
|
|
|
|
|
bool CMerkleTx::AcceptToMemoryPool(CTxDB& txdb, bool fCheckInputs)
|
|
{
|
|
if (fClient)
|
|
{
|
|
if (!IsInMainChain() && !ClientConnectInputs())
|
|
return false;
|
|
return CTransaction::AcceptToMemoryPool(txdb, false);
|
|
}
|
|
else
|
|
{
|
|
return CTransaction::AcceptToMemoryPool(txdb, fCheckInputs);
|
|
}
|
|
}
|
|
|
|
bool CMerkleTx::AcceptToMemoryPool()
|
|
{
|
|
CTxDB txdb("r");
|
|
return AcceptToMemoryPool(txdb);
|
|
}
|
|
|
|
|
|
|
|
bool CWalletTx::AcceptWalletTransaction(CTxDB& txdb, bool fCheckInputs)
|
|
{
|
|
CRITICAL_BLOCK(cs_mapTransactions)
|
|
{
|
|
// Add previous supporting transactions first
|
|
BOOST_FOREACH(CMerkleTx& tx, vtxPrev)
|
|
{
|
|
if (!tx.IsCoinBase())
|
|
{
|
|
uint256 hash = tx.GetHash();
|
|
if (!mapTransactions.count(hash) && !txdb.ContainsTx(hash))
|
|
tx.AcceptToMemoryPool(txdb, fCheckInputs);
|
|
}
|
|
}
|
|
return AcceptToMemoryPool(txdb, fCheckInputs);
|
|
}
|
|
return false;
|
|
}
|
|
|
|
bool CWalletTx::AcceptWalletTransaction()
|
|
{
|
|
CTxDB txdb("r");
|
|
return AcceptWalletTransaction(txdb);
|
|
}
|
|
|
|
int CTxIndex::GetDepthInMainChain() const
|
|
{
|
|
// Read block header
|
|
CBlock block;
|
|
if (!block.ReadFromDisk(pos.nFile, pos.nBlockPos, false))
|
|
return 0;
|
|
// Find the block in the index
|
|
map<uint256, CBlockIndex*>::iterator mi = mapBlockIndex.find(block.GetHash());
|
|
if (mi == mapBlockIndex.end())
|
|
return 0;
|
|
CBlockIndex* pindex = (*mi).second;
|
|
if (!pindex || !pindex->IsInMainChain())
|
|
return 0;
|
|
return 1 + nBestHeight - pindex->nHeight;
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////////////
|
|
//
|
|
// CBlock and CBlockIndex
|
|
//
|
|
|
|
bool CBlock::ReadFromDisk(const CBlockIndex* pindex, bool fReadTransactions)
|
|
{
|
|
if (!fReadTransactions)
|
|
{
|
|
*this = pindex->GetBlockHeader();
|
|
return true;
|
|
}
|
|
if (!ReadFromDisk(pindex->nFile, pindex->nBlockPos, fReadTransactions))
|
|
return false;
|
|
if (GetHash() != pindex->GetBlockHash())
|
|
return error("CBlock::ReadFromDisk() : GetHash() doesn't match index");
|
|
return true;
|
|
}
|
|
|
|
uint256 static GetOrphanRoot(const CBlock* pblock)
|
|
{
|
|
// Work back to the first block in the orphan chain
|
|
while (mapOrphanBlocks.count(pblock->hashPrevBlock))
|
|
pblock = mapOrphanBlocks[pblock->hashPrevBlock];
|
|
return pblock->GetHash();
|
|
}
|
|
|
|
int64 static GetBlockValue(int nHeight, int64 nFees)
|
|
{
|
|
int64 nSubsidy = 50 * COIN;
|
|
|
|
// Subsidy is cut in half every 4 years
|
|
nSubsidy >>= (nHeight / 210000);
|
|
|
|
return nSubsidy + nFees;
|
|
}
|
|
|
|
static const int64 nTargetTimespan = 14 * 24 * 60 * 60; // two weeks
|
|
static const int64 nTargetSpacing = 10 * 60;
|
|
static const int64 nInterval = nTargetTimespan / nTargetSpacing;
|
|
|
|
//
|
|
// minimum amount of work that could possibly be required nTime after
|
|
// minimum work required was nBase
|
|
//
|
|
unsigned int ComputeMinWork(unsigned int nBase, int64 nTime)
|
|
{
|
|
// Testnet has min-difficulty blocks
|
|
// after nTargetSpacing*2 time between blocks:
|
|
if (fTestNet && nTime > nTargetSpacing*2)
|
|
return bnProofOfWorkLimit.GetCompact();
|
|
|
|
CBigNum bnResult;
|
|
bnResult.SetCompact(nBase);
|
|
while (nTime > 0 && bnResult < bnProofOfWorkLimit)
|
|
{
|
|
// Maximum 400% adjustment...
|
|
bnResult *= 4;
|
|
// ... in best-case exactly 4-times-normal target time
|
|
nTime -= nTargetTimespan*4;
|
|
}
|
|
if (bnResult > bnProofOfWorkLimit)
|
|
bnResult = bnProofOfWorkLimit;
|
|
return bnResult.GetCompact();
|
|
}
|
|
|
|
unsigned int static GetNextWorkRequired(const CBlockIndex* pindexLast, const CBlock *pblock)
|
|
{
|
|
unsigned int nProofOfWorkLimit = bnProofOfWorkLimit.GetCompact();
|
|
|
|
// Genesis block
|
|
if (pindexLast == NULL)
|
|
return nProofOfWorkLimit;
|
|
|
|
// Only change once per interval
|
|
if ((pindexLast->nHeight+1) % nInterval != 0)
|
|
{
|
|
// Special rules for testnet after 15 Feb 2012:
|
|
if (fTestNet && pblock->nTime > 1329264000)
|
|
{
|
|
// If the new block's timestamp is more than 2* 10 minutes
|
|
// then allow mining of a min-difficulty block.
|
|
if (pblock->nTime - pindexLast->nTime > nTargetSpacing*2)
|
|
return nProofOfWorkLimit;
|
|
else
|
|
{
|
|
// Return the last non-special-min-difficulty-rules-block
|
|
const CBlockIndex* pindex = pindexLast;
|
|
while (pindex->pprev && pindex->nHeight % nInterval != 0 && pindex->nBits == nProofOfWorkLimit)
|
|
pindex = pindex->pprev;
|
|
return pindex->nBits;
|
|
}
|
|
}
|
|
|
|
return pindexLast->nBits;
|
|
}
|
|
|
|
// Go back by what we want to be 14 days worth of blocks
|
|
const CBlockIndex* pindexFirst = pindexLast;
|
|
for (int i = 0; pindexFirst && i < nInterval-1; i++)
|
|
pindexFirst = pindexFirst->pprev;
|
|
assert(pindexFirst);
|
|
|
|
// Limit adjustment step
|
|
int64 nActualTimespan = pindexLast->GetBlockTime() - pindexFirst->GetBlockTime();
|
|
printf(" nActualTimespan = %"PRI64d" before bounds\n", nActualTimespan);
|
|
if (nActualTimespan < nTargetTimespan/4)
|
|
nActualTimespan = nTargetTimespan/4;
|
|
if (nActualTimespan > nTargetTimespan*4)
|
|
nActualTimespan = nTargetTimespan*4;
|
|
|
|
// Retarget
|
|
CBigNum bnNew;
|
|
bnNew.SetCompact(pindexLast->nBits);
|
|
bnNew *= nActualTimespan;
|
|
bnNew /= nTargetTimespan;
|
|
|
|
if (bnNew > bnProofOfWorkLimit)
|
|
bnNew = bnProofOfWorkLimit;
|
|
|
|
/// debug print
|
|
printf("GetNextWorkRequired RETARGET\n");
|
|
printf("nTargetTimespan = %"PRI64d" nActualTimespan = %"PRI64d"\n", nTargetTimespan, nActualTimespan);
|
|
printf("Before: %08x %s\n", pindexLast->nBits, CBigNum().SetCompact(pindexLast->nBits).getuint256().ToString().c_str());
|
|
printf("After: %08x %s\n", bnNew.GetCompact(), bnNew.getuint256().ToString().c_str());
|
|
|
|
return bnNew.GetCompact();
|
|
}
|
|
|
|
bool CheckProofOfWork(uint256 hash, unsigned int nBits)
|
|
{
|
|
CBigNum bnTarget;
|
|
bnTarget.SetCompact(nBits);
|
|
|
|
// Check range
|
|
if (bnTarget <= 0 || bnTarget > bnProofOfWorkLimit)
|
|
return error("CheckProofOfWork() : nBits below minimum work");
|
|
|
|
// Check proof of work matches claimed amount
|
|
if (hash > bnTarget.getuint256())
|
|
return error("CheckProofOfWork() : hash doesn't match nBits");
|
|
|
|
return true;
|
|
}
|
|
|
|
// Return maximum amount of blocks that other nodes claim to have
|
|
int GetNumBlocksOfPeers()
|
|
{
|
|
return std::max(cPeerBlockCounts.median(), Checkpoints::GetTotalBlocksEstimate());
|
|
}
|
|
|
|
bool IsInitialBlockDownload()
|
|
{
|
|
if (pindexBest == NULL || nBestHeight < Checkpoints::GetTotalBlocksEstimate())
|
|
return true;
|
|
static int64 nLastUpdate;
|
|
static CBlockIndex* pindexLastBest;
|
|
if (pindexBest != pindexLastBest)
|
|
{
|
|
pindexLastBest = pindexBest;
|
|
nLastUpdate = GetTime();
|
|
}
|
|
return (GetTime() - nLastUpdate < 10 &&
|
|
pindexBest->GetBlockTime() < GetTime() - 24 * 60 * 60);
|
|
}
|
|
|
|
void static InvalidChainFound(CBlockIndex* pindexNew)
|
|
{
|
|
if (pindexNew->bnChainWork > bnBestInvalidWork)
|
|
{
|
|
bnBestInvalidWork = pindexNew->bnChainWork;
|
|
CTxDB().WriteBestInvalidWork(bnBestInvalidWork);
|
|
MainFrameRepaint();
|
|
}
|
|
printf("InvalidChainFound: invalid block=%s height=%d work=%s\n", pindexNew->GetBlockHash().ToString().substr(0,20).c_str(), pindexNew->nHeight, pindexNew->bnChainWork.ToString().c_str());
|
|
printf("InvalidChainFound: current best=%s height=%d work=%s\n", hashBestChain.ToString().substr(0,20).c_str(), nBestHeight, bnBestChainWork.ToString().c_str());
|
|
if (pindexBest && bnBestInvalidWork > bnBestChainWork + pindexBest->GetBlockWork() * 6)
|
|
printf("InvalidChainFound: WARNING: Displayed transactions may not be correct! You may need to upgrade, or other nodes may need to upgrade.\n");
|
|
}
|
|
|
|
void CBlock::UpdateTime(const CBlockIndex* pindexPrev)
|
|
{
|
|
nTime = max(pindexPrev->GetMedianTimePast()+1, GetAdjustedTime());
|
|
|
|
// Updating time can change work required on testnet:
|
|
if (fTestNet)
|
|
nBits = GetNextWorkRequired(pindexPrev, this);
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
bool CTransaction::DisconnectInputs(CTxDB& txdb)
|
|
{
|
|
// Relinquish previous transactions' spent pointers
|
|
if (!IsCoinBase())
|
|
{
|
|
BOOST_FOREACH(const CTxIn& txin, vin)
|
|
{
|
|
COutPoint prevout = txin.prevout;
|
|
|
|
// Get prev txindex from disk
|
|
CTxIndex txindex;
|
|
if (!txdb.ReadTxIndex(prevout.hash, txindex))
|
|
return error("DisconnectInputs() : ReadTxIndex failed");
|
|
|
|
if (prevout.n >= txindex.vSpent.size())
|
|
return error("DisconnectInputs() : prevout.n out of range");
|
|
|
|
// Mark outpoint as not spent
|
|
txindex.vSpent[prevout.n].SetNull();
|
|
|
|
// Write back
|
|
if (!txdb.UpdateTxIndex(prevout.hash, txindex))
|
|
return error("DisconnectInputs() : UpdateTxIndex failed");
|
|
}
|
|
}
|
|
|
|
// Remove transaction from index
|
|
// This can fail if a duplicate of this transaction was in a chain that got
|
|
// reorganized away. This is only possible if this transaction was completely
|
|
// spent, so erasing it would be a no-op anway.
|
|
txdb.EraseTxIndex(*this);
|
|
|
|
return true;
|
|
}
|
|
|
|
|
|
bool CTransaction::FetchInputs(CTxDB& txdb, const map<uint256, CTxIndex>& mapTestPool,
|
|
bool fBlock, bool fMiner, MapPrevTx& inputsRet, bool& fInvalid)
|
|
{
|
|
// FetchInputs can return false either because we just haven't seen some inputs
|
|
// (in which case the transaction should be stored as an orphan)
|
|
// or because the transaction is malformed (in which case the transaction should
|
|
// be dropped). If tx is definitely invalid, fInvalid will be set to true.
|
|
fInvalid = false;
|
|
|
|
if (IsCoinBase())
|
|
return true; // Coinbase transactions have no inputs to fetch.
|
|
|
|
for (int i = 0; i < vin.size(); i++)
|
|
{
|
|
COutPoint prevout = vin[i].prevout;
|
|
if (inputsRet.count(prevout.hash))
|
|
continue; // Got it already
|
|
|
|
// Read txindex
|
|
CTxIndex& txindex = inputsRet[prevout.hash].first;
|
|
bool fFound = true;
|
|
if ((fBlock || fMiner) && mapTestPool.count(prevout.hash))
|
|
{
|
|
// Get txindex from current proposed changes
|
|
txindex = mapTestPool.find(prevout.hash)->second;
|
|
}
|
|
else
|
|
{
|
|
// Read txindex from txdb
|
|
fFound = txdb.ReadTxIndex(prevout.hash, txindex);
|
|
}
|
|
if (!fFound && (fBlock || fMiner))
|
|
return fMiner ? false : error("FetchInputs() : %s prev tx %s index entry not found", GetHash().ToString().substr(0,10).c_str(), prevout.hash.ToString().substr(0,10).c_str());
|
|
|
|
// Read txPrev
|
|
CTransaction& txPrev = inputsRet[prevout.hash].second;
|
|
if (!fFound || txindex.pos == CDiskTxPos(1,1,1))
|
|
{
|
|
// Get prev tx from single transactions in memory
|
|
CRITICAL_BLOCK(cs_mapTransactions)
|
|
{
|
|
if (!mapTransactions.count(prevout.hash))
|
|
return error("FetchInputs() : %s mapTransactions prev not found %s", GetHash().ToString().substr(0,10).c_str(), prevout.hash.ToString().substr(0,10).c_str());
|
|
txPrev = mapTransactions[prevout.hash];
|
|
}
|
|
if (!fFound)
|
|
txindex.vSpent.resize(txPrev.vout.size());
|
|
}
|
|
else
|
|
{
|
|
// Get prev tx from disk
|
|
if (!txPrev.ReadFromDisk(txindex.pos))
|
|
return error("FetchInputs() : %s ReadFromDisk prev tx %s failed", GetHash().ToString().substr(0,10).c_str(), prevout.hash.ToString().substr(0,10).c_str());
|
|
}
|
|
}
|
|
|
|
// Make sure all prevout.n's are valid:
|
|
for (int i = 0; i < vin.size(); i++)
|
|
{
|
|
const COutPoint prevout = vin[i].prevout;
|
|
assert(inputsRet.count(prevout.hash) != 0);
|
|
const CTxIndex& txindex = inputsRet[prevout.hash].first;
|
|
const CTransaction& txPrev = inputsRet[prevout.hash].second;
|
|
if (prevout.n >= txPrev.vout.size() || prevout.n >= txindex.vSpent.size())
|
|
{
|
|
// Revisit this if/when transaction replacement is implemented and allows
|
|
// adding inputs:
|
|
fInvalid = true;
|
|
return DoS(100, error("FetchInputs() : %s prevout.n out of range %d %d %d prev tx %s\n%s", GetHash().ToString().substr(0,10).c_str(), prevout.n, txPrev.vout.size(), txindex.vSpent.size(), prevout.hash.ToString().substr(0,10).c_str(), txPrev.ToString().c_str()));
|
|
}
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
const CTxOut& CTransaction::GetOutputFor(const CTxIn& input, const MapPrevTx& inputs) const
|
|
{
|
|
MapPrevTx::const_iterator mi = inputs.find(input.prevout.hash);
|
|
if (mi == inputs.end())
|
|
throw std::runtime_error("CTransaction::GetOutputFor() : prevout.hash not found");
|
|
|
|
const CTransaction& txPrev = (mi->second).second;
|
|
if (input.prevout.n >= txPrev.vout.size())
|
|
throw std::runtime_error("CTransaction::GetOutputFor() : prevout.n out of range");
|
|
|
|
return txPrev.vout[input.prevout.n];
|
|
}
|
|
|
|
int64 CTransaction::GetValueIn(const MapPrevTx& inputs) const
|
|
{
|
|
if (IsCoinBase())
|
|
return 0;
|
|
|
|
int64 nResult = 0;
|
|
for (int i = 0; i < vin.size(); i++)
|
|
{
|
|
nResult += GetOutputFor(vin[i], inputs).nValue;
|
|
}
|
|
return nResult;
|
|
|
|
}
|
|
|
|
int CTransaction::GetP2SHSigOpCount(const MapPrevTx& inputs) const
|
|
{
|
|
if (IsCoinBase())
|
|
return 0;
|
|
|
|
int nSigOps = 0;
|
|
for (int i = 0; i < vin.size(); i++)
|
|
{
|
|
const CTxOut& prevout = GetOutputFor(vin[i], inputs);
|
|
if (prevout.scriptPubKey.IsPayToScriptHash())
|
|
nSigOps += prevout.scriptPubKey.GetSigOpCount(vin[i].scriptSig);
|
|
}
|
|
return nSigOps;
|
|
}
|
|
|
|
bool CTransaction::ConnectInputs(MapPrevTx inputs,
|
|
map<uint256, CTxIndex>& mapTestPool, const CDiskTxPos& posThisTx,
|
|
const CBlockIndex* pindexBlock, bool fBlock, bool fMiner, bool fStrictPayToScriptHash)
|
|
{
|
|
// Take over previous transactions' spent pointers
|
|
// fBlock is true when this is called from AcceptBlock when a new best-block is added to the blockchain
|
|
// fMiner is true when called from the internal bitcoin miner
|
|
// ... both are false when called from CTransaction::AcceptToMemoryPool
|
|
if (!IsCoinBase())
|
|
{
|
|
int64 nValueIn = 0;
|
|
int64 nFees = 0;
|
|
for (int i = 0; i < vin.size(); i++)
|
|
{
|
|
COutPoint prevout = vin[i].prevout;
|
|
assert(inputs.count(prevout.hash) > 0);
|
|
CTxIndex& txindex = inputs[prevout.hash].first;
|
|
CTransaction& txPrev = inputs[prevout.hash].second;
|
|
|
|
if (prevout.n >= txPrev.vout.size() || prevout.n >= txindex.vSpent.size())
|
|
return DoS(100, error("ConnectInputs() : %s prevout.n out of range %d %d %d prev tx %s\n%s", GetHash().ToString().substr(0,10).c_str(), prevout.n, txPrev.vout.size(), txindex.vSpent.size(), prevout.hash.ToString().substr(0,10).c_str(), txPrev.ToString().c_str()));
|
|
|
|
// If prev is coinbase, check that it's matured
|
|
if (txPrev.IsCoinBase())
|
|
for (const CBlockIndex* pindex = pindexBlock; pindex && pindexBlock->nHeight - pindex->nHeight < COINBASE_MATURITY; pindex = pindex->pprev)
|
|
if (pindex->nBlockPos == txindex.pos.nBlockPos && pindex->nFile == txindex.pos.nFile)
|
|
return error("ConnectInputs() : tried to spend coinbase at depth %d", pindexBlock->nHeight - pindex->nHeight);
|
|
|
|
// Check for conflicts (double-spend)
|
|
// This doesn't trigger the DoS code on purpose; if it did, it would make it easier
|
|
// for an attacker to attempt to split the network.
|
|
if (!txindex.vSpent[prevout.n].IsNull())
|
|
return fMiner ? false : error("ConnectInputs() : %s prev tx already used at %s", GetHash().ToString().substr(0,10).c_str(), txindex.vSpent[prevout.n].ToString().c_str());
|
|
|
|
// Check for negative or overflow input values
|
|
nValueIn += txPrev.vout[prevout.n].nValue;
|
|
if (!MoneyRange(txPrev.vout[prevout.n].nValue) || !MoneyRange(nValueIn))
|
|
return DoS(100, error("ConnectInputs() : txin values out of range"));
|
|
|
|
// Skip ECDSA signature verification when connecting blocks (fBlock=true)
|
|
// before the last blockchain checkpoint. This is safe because block merkle hashes are
|
|
// still computed and checked, and any change will be caught at the next checkpoint.
|
|
if (!(fBlock && (nBestHeight < Checkpoints::GetTotalBlocksEstimate())))
|
|
{
|
|
// Verify signature
|
|
if (!VerifySignature(txPrev, *this, i, fStrictPayToScriptHash, 0))
|
|
{
|
|
// only during transition phase for P2SH: do not invoke anti-DoS code for
|
|
// potentially old clients relaying bad P2SH transactions
|
|
if (fStrictPayToScriptHash && VerifySignature(txPrev, *this, i, false, 0))
|
|
return error("ConnectInputs() : %s P2SH VerifySignature failed", GetHash().ToString().substr(0,10).c_str());
|
|
|
|
return DoS(100,error("ConnectInputs() : %s VerifySignature failed", GetHash().ToString().substr(0,10).c_str()));
|
|
}
|
|
}
|
|
|
|
// Mark outpoints as spent
|
|
txindex.vSpent[prevout.n] = posThisTx;
|
|
|
|
// Write back
|
|
if (fBlock || fMiner)
|
|
{
|
|
mapTestPool[prevout.hash] = txindex;
|
|
}
|
|
}
|
|
|
|
if (nValueIn < GetValueOut())
|
|
return DoS(100, error("ConnectInputs() : %s value in < value out", GetHash().ToString().substr(0,10).c_str()));
|
|
|
|
// Tally transaction fees
|
|
int64 nTxFee = nValueIn - GetValueOut();
|
|
if (nTxFee < 0)
|
|
return DoS(100, error("ConnectInputs() : %s nTxFee < 0", GetHash().ToString().substr(0,10).c_str()));
|
|
nFees += nTxFee;
|
|
if (!MoneyRange(nFees))
|
|
return DoS(100, error("ConnectInputs() : nFees out of range"));
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
|
|
bool CTransaction::ClientConnectInputs()
|
|
{
|
|
if (IsCoinBase())
|
|
return false;
|
|
|
|
// Take over previous transactions' spent pointers
|
|
CRITICAL_BLOCK(cs_mapTransactions)
|
|
{
|
|
int64 nValueIn = 0;
|
|
for (int i = 0; i < vin.size(); i++)
|
|
{
|
|
// Get prev tx from single transactions in memory
|
|
COutPoint prevout = vin[i].prevout;
|
|
if (!mapTransactions.count(prevout.hash))
|
|
return false;
|
|
CTransaction& txPrev = mapTransactions[prevout.hash];
|
|
|
|
if (prevout.n >= txPrev.vout.size())
|
|
return false;
|
|
|
|
// Verify signature
|
|
if (!VerifySignature(txPrev, *this, i, true, 0))
|
|
return error("ConnectInputs() : VerifySignature failed");
|
|
|
|
///// this is redundant with the mapNextTx stuff, not sure which I want to get rid of
|
|
///// this has to go away now that posNext is gone
|
|
// // Check for conflicts
|
|
// if (!txPrev.vout[prevout.n].posNext.IsNull())
|
|
// return error("ConnectInputs() : prev tx already used");
|
|
//
|
|
// // Flag outpoints as used
|
|
// txPrev.vout[prevout.n].posNext = posThisTx;
|
|
|
|
nValueIn += txPrev.vout[prevout.n].nValue;
|
|
|
|
if (!MoneyRange(txPrev.vout[prevout.n].nValue) || !MoneyRange(nValueIn))
|
|
return error("ClientConnectInputs() : txin values out of range");
|
|
}
|
|
if (GetValueOut() > nValueIn)
|
|
return false;
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
|
|
|
|
|
|
bool CBlock::DisconnectBlock(CTxDB& txdb, CBlockIndex* pindex)
|
|
{
|
|
// Disconnect in reverse order
|
|
for (int i = vtx.size()-1; i >= 0; i--)
|
|
if (!vtx[i].DisconnectInputs(txdb))
|
|
return false;
|
|
|
|
// Update block index on disk without changing it in memory.
|
|
// The memory index structure will be changed after the db commits.
|
|
if (pindex->pprev)
|
|
{
|
|
CDiskBlockIndex blockindexPrev(pindex->pprev);
|
|
blockindexPrev.hashNext = 0;
|
|
if (!txdb.WriteBlockIndex(blockindexPrev))
|
|
return error("DisconnectBlock() : WriteBlockIndex failed");
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
bool CBlock::ConnectBlock(CTxDB& txdb, CBlockIndex* pindex)
|
|
{
|
|
// Check it again in case a previous version let a bad block in
|
|
if (!CheckBlock())
|
|
return false;
|
|
|
|
// Do not allow blocks that contain transactions which 'overwrite' older transactions,
|
|
// unless those are already completely spent.
|
|
// If such overwrites are allowed, coinbases and transactions depending upon those
|
|
// can be duplicated to remove the ability to spend the first instance -- even after
|
|
// being sent to another address.
|
|
// See BIP30 and http://r6.ca/blog/20120206T005236Z.html for more information.
|
|
// This logic is not necessary for memory pool transactions, as AcceptToMemoryPool
|
|
// already refuses previously-known transaction id's entirely.
|
|
// This rule applies to all blocks whose timestamp is after March 15, 2012, 0:00 UTC.
|
|
// On testnet it is enabled as of februari 20, 2012, 0:00 UTC.
|
|
if (pindex->nTime > 1331769600 || (fTestNet && pindex->nTime > 1329696000))
|
|
BOOST_FOREACH(CTransaction& tx, vtx)
|
|
{
|
|
CTxIndex txindexOld;
|
|
if (txdb.ReadTxIndex(tx.GetHash(), txindexOld))
|
|
BOOST_FOREACH(CDiskTxPos &pos, txindexOld.vSpent)
|
|
if (pos.IsNull())
|
|
return false;
|
|
}
|
|
|
|
// To avoid being on the short end of a block-chain split,
|
|
// don't do secondary validation of pay-to-script-hash transactions
|
|
// until blocks with timestamps after paytoscripthashtime (see init.cpp for default).
|
|
// This code can be removed once a super-majority of the network has upgraded.
|
|
int64 nEvalSwitchTime = GetArg("-paytoscripthashtime", std::numeric_limits<int64_t>::max());
|
|
bool fStrictPayToScriptHash = (pindex->nTime >= nEvalSwitchTime);
|
|
|
|
//// issue here: it doesn't know the version
|
|
unsigned int nTxPos = pindex->nBlockPos + ::GetSerializeSize(CBlock(), SER_DISK) - 1 + GetSizeOfCompactSize(vtx.size());
|
|
|
|
map<uint256, CTxIndex> mapQueuedChanges;
|
|
int64 nFees = 0;
|
|
int nSigOps = 0;
|
|
BOOST_FOREACH(CTransaction& tx, vtx)
|
|
{
|
|
nSigOps += tx.GetLegacySigOpCount();
|
|
if (nSigOps > MAX_BLOCK_SIGOPS)
|
|
return DoS(100, error("ConnectBlock() : too many sigops"));
|
|
|
|
CDiskTxPos posThisTx(pindex->nFile, pindex->nBlockPos, nTxPos);
|
|
nTxPos += ::GetSerializeSize(tx, SER_DISK);
|
|
|
|
MapPrevTx mapInputs;
|
|
if (!tx.IsCoinBase())
|
|
{
|
|
bool fInvalid;
|
|
if (!tx.FetchInputs(txdb, mapQueuedChanges, true, false, mapInputs, fInvalid))
|
|
return false;
|
|
|
|
if (fStrictPayToScriptHash)
|
|
{
|
|
// Add in sigops done by pay-to-script-hash inputs;
|
|
// this is to prevent a "rogue miner" from creating
|
|
// an incredibly-expensive-to-validate block.
|
|
nSigOps += tx.GetP2SHSigOpCount(mapInputs);
|
|
if (nSigOps > MAX_BLOCK_SIGOPS)
|
|
return DoS(100, error("ConnectBlock() : too many sigops"));
|
|
}
|
|
|
|
nFees += tx.GetValueIn(mapInputs)-tx.GetValueOut();
|
|
|
|
if (!tx.ConnectInputs(mapInputs, mapQueuedChanges, posThisTx, pindex, true, false, fStrictPayToScriptHash))
|
|
return false;
|
|
}
|
|
|
|
mapQueuedChanges[tx.GetHash()] = CTxIndex(posThisTx, tx.vout.size());
|
|
}
|
|
|
|
// Write queued txindex changes
|
|
for (map<uint256, CTxIndex>::iterator mi = mapQueuedChanges.begin(); mi != mapQueuedChanges.end(); ++mi)
|
|
{
|
|
if (!txdb.UpdateTxIndex((*mi).first, (*mi).second))
|
|
return error("ConnectBlock() : UpdateTxIndex failed");
|
|
}
|
|
|
|
if (vtx[0].GetValueOut() > GetBlockValue(pindex->nHeight, nFees))
|
|
return false;
|
|
|
|
// Update block index on disk without changing it in memory.
|
|
// The memory index structure will be changed after the db commits.
|
|
if (pindex->pprev)
|
|
{
|
|
CDiskBlockIndex blockindexPrev(pindex->pprev);
|
|
blockindexPrev.hashNext = pindex->GetBlockHash();
|
|
if (!txdb.WriteBlockIndex(blockindexPrev))
|
|
return error("ConnectBlock() : WriteBlockIndex failed");
|
|
}
|
|
|
|
// Watch for transactions paying to me
|
|
BOOST_FOREACH(CTransaction& tx, vtx)
|
|
SyncWithWallets(tx, this, true);
|
|
|
|
return true;
|
|
}
|
|
|
|
bool static Reorganize(CTxDB& txdb, CBlockIndex* pindexNew)
|
|
{
|
|
printf("REORGANIZE\n");
|
|
|
|
// Find the fork
|
|
CBlockIndex* pfork = pindexBest;
|
|
CBlockIndex* plonger = pindexNew;
|
|
while (pfork != plonger)
|
|
{
|
|
while (plonger->nHeight > pfork->nHeight)
|
|
if (!(plonger = plonger->pprev))
|
|
return error("Reorganize() : plonger->pprev is null");
|
|
if (pfork == plonger)
|
|
break;
|
|
if (!(pfork = pfork->pprev))
|
|
return error("Reorganize() : pfork->pprev is null");
|
|
}
|
|
|
|
// List of what to disconnect
|
|
vector<CBlockIndex*> vDisconnect;
|
|
for (CBlockIndex* pindex = pindexBest; pindex != pfork; pindex = pindex->pprev)
|
|
vDisconnect.push_back(pindex);
|
|
|
|
// List of what to connect
|
|
vector<CBlockIndex*> vConnect;
|
|
for (CBlockIndex* pindex = pindexNew; pindex != pfork; pindex = pindex->pprev)
|
|
vConnect.push_back(pindex);
|
|
reverse(vConnect.begin(), vConnect.end());
|
|
|
|
// Disconnect shorter branch
|
|
vector<CTransaction> vResurrect;
|
|
BOOST_FOREACH(CBlockIndex* pindex, vDisconnect)
|
|
{
|
|
CBlock block;
|
|
if (!block.ReadFromDisk(pindex))
|
|
return error("Reorganize() : ReadFromDisk for disconnect failed");
|
|
if (!block.DisconnectBlock(txdb, pindex))
|
|
return error("Reorganize() : DisconnectBlock failed");
|
|
|
|
// Queue memory transactions to resurrect
|
|
BOOST_FOREACH(const CTransaction& tx, block.vtx)
|
|
if (!tx.IsCoinBase())
|
|
vResurrect.push_back(tx);
|
|
}
|
|
|
|
// Connect longer branch
|
|
vector<CTransaction> vDelete;
|
|
for (int i = 0; i < vConnect.size(); i++)
|
|
{
|
|
CBlockIndex* pindex = vConnect[i];
|
|
CBlock block;
|
|
if (!block.ReadFromDisk(pindex))
|
|
return error("Reorganize() : ReadFromDisk for connect failed");
|
|
if (!block.ConnectBlock(txdb, pindex))
|
|
{
|
|
// Invalid block
|
|
txdb.TxnAbort();
|
|
return error("Reorganize() : ConnectBlock failed");
|
|
}
|
|
|
|
// Queue memory transactions to delete
|
|
BOOST_FOREACH(const CTransaction& tx, block.vtx)
|
|
vDelete.push_back(tx);
|
|
}
|
|
if (!txdb.WriteHashBestChain(pindexNew->GetBlockHash()))
|
|
return error("Reorganize() : WriteHashBestChain failed");
|
|
|
|
// Make sure it's successfully written to disk before changing memory structure
|
|
if (!txdb.TxnCommit())
|
|
return error("Reorganize() : TxnCommit failed");
|
|
|
|
// Disconnect shorter branch
|
|
BOOST_FOREACH(CBlockIndex* pindex, vDisconnect)
|
|
if (pindex->pprev)
|
|
pindex->pprev->pnext = NULL;
|
|
|
|
// Connect longer branch
|
|
BOOST_FOREACH(CBlockIndex* pindex, vConnect)
|
|
if (pindex->pprev)
|
|
pindex->pprev->pnext = pindex;
|
|
|
|
// Resurrect memory transactions that were in the disconnected branch
|
|
BOOST_FOREACH(CTransaction& tx, vResurrect)
|
|
tx.AcceptToMemoryPool(txdb, false);
|
|
|
|
// Delete redundant memory transactions that are in the connected branch
|
|
BOOST_FOREACH(CTransaction& tx, vDelete)
|
|
tx.RemoveFromMemoryPool();
|
|
|
|
printf("REORGANIZE: Disconnected %i blocks; %s..%s\n", vDisconnect.size(), pfork->GetBlockHash().ToString().substr(0,20).c_str(), pindexBest->GetBlockHash().ToString().substr(0,20).c_str());
|
|
printf("REORGANIZE: Connected %i blocks; %s..%s\n", vConnect.size(), pfork->GetBlockHash().ToString().substr(0,20).c_str(), pindexNew->GetBlockHash().ToString().substr(0,20).c_str());
|
|
|
|
return true;
|
|
}
|
|
|
|
|
|
static void
|
|
runCommand(std::string strCommand)
|
|
{
|
|
int nErr = ::system(strCommand.c_str());
|
|
if (nErr)
|
|
printf("runCommand error: system(%s) returned %d\n", strCommand.c_str(), nErr);
|
|
}
|
|
|
|
// Called from inside SetBestChain: attaches a block to the new best chain being built
|
|
bool CBlock::SetBestChainInner(CTxDB& txdb, CBlockIndex *pindexNew)
|
|
{
|
|
uint256 hash = GetHash();
|
|
|
|
// Adding to current best branch
|
|
if (!ConnectBlock(txdb, pindexNew) || !txdb.WriteHashBestChain(hash))
|
|
{
|
|
txdb.TxnAbort();
|
|
InvalidChainFound(pindexNew);
|
|
return false;
|
|
}
|
|
if (!txdb.TxnCommit())
|
|
return error("SetBestChain() : TxnCommit failed");
|
|
|
|
// Add to current best branch
|
|
pindexNew->pprev->pnext = pindexNew;
|
|
|
|
// Delete redundant memory transactions
|
|
BOOST_FOREACH(CTransaction& tx, vtx)
|
|
tx.RemoveFromMemoryPool();
|
|
|
|
return true;
|
|
}
|
|
|
|
bool CBlock::SetBestChain(CTxDB& txdb, CBlockIndex* pindexNew)
|
|
{
|
|
uint256 hash = GetHash();
|
|
|
|
txdb.TxnBegin();
|
|
if (pindexGenesisBlock == NULL && hash == hashGenesisBlock)
|
|
{
|
|
txdb.WriteHashBestChain(hash);
|
|
if (!txdb.TxnCommit())
|
|
return error("SetBestChain() : TxnCommit failed");
|
|
pindexGenesisBlock = pindexNew;
|
|
}
|
|
else if (hashPrevBlock == hashBestChain)
|
|
{
|
|
if (!SetBestChainInner(txdb, pindexNew))
|
|
return error("SetBestChain() : SetBestChainInner failed");
|
|
}
|
|
else
|
|
{
|
|
// the first block in the new chain that will cause it to become the new best chain
|
|
CBlockIndex *pindexIntermediate = pindexNew;
|
|
|
|
// list of blocks that need to be connected afterwards
|
|
std::vector<CBlockIndex*> vpindexSecondary;
|
|
|
|
// Reorganize is costly in terms of db load, as it works in a single db transaction.
|
|
// Try to limit how much needs to be done inside
|
|
while (pindexIntermediate->pprev && pindexIntermediate->pprev->bnChainWork > pindexBest->bnChainWork)
|
|
{
|
|
vpindexSecondary.push_back(pindexIntermediate);
|
|
pindexIntermediate = pindexIntermediate->pprev;
|
|
}
|
|
|
|
if (!vpindexSecondary.empty())
|
|
printf("Postponing %i reconnects\n", vpindexSecondary.size());
|
|
|
|
// Switch to new best branch
|
|
if (!Reorganize(txdb, pindexIntermediate))
|
|
{
|
|
txdb.TxnAbort();
|
|
InvalidChainFound(pindexNew);
|
|
return error("SetBestChain() : Reorganize failed");
|
|
}
|
|
|
|
// Connect futher blocks
|
|
BOOST_REVERSE_FOREACH(CBlockIndex *pindex, vpindexSecondary)
|
|
{
|
|
CBlock block;
|
|
if (!block.ReadFromDisk(pindex))
|
|
{
|
|
printf("SetBestChain() : ReadFromDisk failed\n");
|
|
break;
|
|
}
|
|
txdb.TxnBegin();
|
|
// errors now are not fatal, we still did a reorganisation to a new chain in a valid way
|
|
if (!block.SetBestChainInner(txdb, pindex))
|
|
break;
|
|
}
|
|
}
|
|
|
|
// Update best block in wallet (so we can detect restored wallets)
|
|
bool fIsInitialDownload = IsInitialBlockDownload();
|
|
if (!fIsInitialDownload)
|
|
{
|
|
const CBlockLocator locator(pindexNew);
|
|
::SetBestChain(locator);
|
|
}
|
|
|
|
// New best block
|
|
hashBestChain = hash;
|
|
pindexBest = pindexNew;
|
|
nBestHeight = pindexBest->nHeight;
|
|
bnBestChainWork = pindexNew->bnChainWork;
|
|
nTimeBestReceived = GetTime();
|
|
nTransactionsUpdated++;
|
|
printf("SetBestChain: new best=%s height=%d work=%s\n", hashBestChain.ToString().substr(0,20).c_str(), nBestHeight, bnBestChainWork.ToString().c_str());
|
|
|
|
std::string strCmd = GetArg("-blocknotify", "");
|
|
|
|
if (!fIsInitialDownload && !strCmd.empty())
|
|
{
|
|
boost::replace_all(strCmd, "%s", hashBestChain.GetHex());
|
|
boost::thread t(runCommand, strCmd); // thread runs free
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
|
|
bool CBlock::AddToBlockIndex(unsigned int nFile, unsigned int nBlockPos)
|
|
{
|
|
// Check for duplicate
|
|
uint256 hash = GetHash();
|
|
if (mapBlockIndex.count(hash))
|
|
return error("AddToBlockIndex() : %s already exists", hash.ToString().substr(0,20).c_str());
|
|
|
|
// Construct new block index object
|
|
CBlockIndex* pindexNew = new CBlockIndex(nFile, nBlockPos, *this);
|
|
if (!pindexNew)
|
|
return error("AddToBlockIndex() : new CBlockIndex failed");
|
|
map<uint256, CBlockIndex*>::iterator mi = mapBlockIndex.insert(make_pair(hash, pindexNew)).first;
|
|
pindexNew->phashBlock = &((*mi).first);
|
|
map<uint256, CBlockIndex*>::iterator miPrev = mapBlockIndex.find(hashPrevBlock);
|
|
if (miPrev != mapBlockIndex.end())
|
|
{
|
|
pindexNew->pprev = (*miPrev).second;
|
|
pindexNew->nHeight = pindexNew->pprev->nHeight + 1;
|
|
}
|
|
pindexNew->bnChainWork = (pindexNew->pprev ? pindexNew->pprev->bnChainWork : 0) + pindexNew->GetBlockWork();
|
|
|
|
CTxDB txdb;
|
|
txdb.TxnBegin();
|
|
txdb.WriteBlockIndex(CDiskBlockIndex(pindexNew));
|
|
if (!txdb.TxnCommit())
|
|
return false;
|
|
|
|
// New best
|
|
if (pindexNew->bnChainWork > bnBestChainWork)
|
|
if (!SetBestChain(txdb, pindexNew))
|
|
return false;
|
|
|
|
txdb.Close();
|
|
|
|
if (pindexNew == pindexBest)
|
|
{
|
|
// Notify UI to display prev block's coinbase if it was ours
|
|
static uint256 hashPrevBestCoinBase;
|
|
UpdatedTransaction(hashPrevBestCoinBase);
|
|
hashPrevBestCoinBase = vtx[0].GetHash();
|
|
}
|
|
|
|
MainFrameRepaint();
|
|
return true;
|
|
}
|
|
|
|
|
|
|
|
|
|
bool CBlock::CheckBlock() const
|
|
{
|
|
// These are checks that are independent of context
|
|
// that can be verified before saving an orphan block.
|
|
|
|
// Size limits
|
|
if (vtx.empty() || vtx.size() > MAX_BLOCK_SIZE || ::GetSerializeSize(*this, SER_NETWORK) > MAX_BLOCK_SIZE)
|
|
return DoS(100, error("CheckBlock() : size limits failed"));
|
|
|
|
// Check proof of work matches claimed amount
|
|
if (!CheckProofOfWork(GetHash(), nBits))
|
|
return DoS(50, error("CheckBlock() : proof of work failed"));
|
|
|
|
// Check timestamp
|
|
if (GetBlockTime() > GetAdjustedTime() + 2 * 60 * 60)
|
|
return error("CheckBlock() : block timestamp too far in the future");
|
|
|
|
// First transaction must be coinbase, the rest must not be
|
|
if (vtx.empty() || !vtx[0].IsCoinBase())
|
|
return DoS(100, error("CheckBlock() : first tx is not coinbase"));
|
|
for (int i = 1; i < vtx.size(); i++)
|
|
if (vtx[i].IsCoinBase())
|
|
return DoS(100, error("CheckBlock() : more than one coinbase"));
|
|
|
|
// Check transactions
|
|
BOOST_FOREACH(const CTransaction& tx, vtx)
|
|
if (!tx.CheckTransaction())
|
|
return DoS(tx.nDoS, error("CheckBlock() : CheckTransaction failed"));
|
|
|
|
int nSigOps = 0;
|
|
BOOST_FOREACH(const CTransaction& tx, vtx)
|
|
{
|
|
nSigOps += tx.GetLegacySigOpCount();
|
|
}
|
|
if (nSigOps > MAX_BLOCK_SIGOPS)
|
|
return DoS(100, error("CheckBlock() : out-of-bounds SigOpCount"));
|
|
|
|
// Check merkleroot
|
|
if (hashMerkleRoot != BuildMerkleTree())
|
|
return DoS(100, error("CheckBlock() : hashMerkleRoot mismatch"));
|
|
|
|
return true;
|
|
}
|
|
|
|
bool CBlock::AcceptBlock()
|
|
{
|
|
// Check for duplicate
|
|
uint256 hash = GetHash();
|
|
if (mapBlockIndex.count(hash))
|
|
return error("AcceptBlock() : block already in mapBlockIndex");
|
|
|
|
// Get prev block index
|
|
map<uint256, CBlockIndex*>::iterator mi = mapBlockIndex.find(hashPrevBlock);
|
|
if (mi == mapBlockIndex.end())
|
|
return DoS(10, error("AcceptBlock() : prev block not found"));
|
|
CBlockIndex* pindexPrev = (*mi).second;
|
|
int nHeight = pindexPrev->nHeight+1;
|
|
|
|
// Check proof of work
|
|
if (nBits != GetNextWorkRequired(pindexPrev, this))
|
|
return DoS(100, error("AcceptBlock() : incorrect proof of work"));
|
|
|
|
// Check timestamp against prev
|
|
if (GetBlockTime() <= pindexPrev->GetMedianTimePast())
|
|
return error("AcceptBlock() : block's timestamp is too early");
|
|
|
|
// Check that all transactions are finalized
|
|
BOOST_FOREACH(const CTransaction& tx, vtx)
|
|
if (!tx.IsFinal(nHeight, GetBlockTime()))
|
|
return DoS(10, error("AcceptBlock() : contains a non-final transaction"));
|
|
|
|
// Check that the block chain matches the known block chain up to a checkpoint
|
|
if (!Checkpoints::CheckBlock(nHeight, hash))
|
|
return DoS(100, error("AcceptBlock() : rejected by checkpoint lockin at %d", nHeight));
|
|
|
|
// Write block to history file
|
|
if (!CheckDiskSpace(::GetSerializeSize(*this, SER_DISK)))
|
|
return error("AcceptBlock() : out of disk space");
|
|
unsigned int nFile = -1;
|
|
unsigned int nBlockPos = 0;
|
|
if (!WriteToDisk(nFile, nBlockPos))
|
|
return error("AcceptBlock() : WriteToDisk failed");
|
|
if (!AddToBlockIndex(nFile, nBlockPos))
|
|
return error("AcceptBlock() : AddToBlockIndex failed");
|
|
|
|
// Relay inventory, but don't relay old inventory during initial block download
|
|
int nBlockEstimate = Checkpoints::GetTotalBlocksEstimate();
|
|
if (hashBestChain == hash)
|
|
CRITICAL_BLOCK(cs_vNodes)
|
|
BOOST_FOREACH(CNode* pnode, vNodes)
|
|
if (nBestHeight > (pnode->nStartingHeight != -1 ? pnode->nStartingHeight - 2000 : nBlockEstimate))
|
|
pnode->PushInventory(CInv(MSG_BLOCK, hash));
|
|
|
|
return true;
|
|
}
|
|
|
|
bool ProcessBlock(CNode* pfrom, CBlock* pblock)
|
|
{
|
|
// Check for duplicate
|
|
uint256 hash = pblock->GetHash();
|
|
if (mapBlockIndex.count(hash))
|
|
return error("ProcessBlock() : already have block %d %s", mapBlockIndex[hash]->nHeight, hash.ToString().substr(0,20).c_str());
|
|
if (mapOrphanBlocks.count(hash))
|
|
return error("ProcessBlock() : already have block (orphan) %s", hash.ToString().substr(0,20).c_str());
|
|
|
|
// Preliminary checks
|
|
if (!pblock->CheckBlock())
|
|
return error("ProcessBlock() : CheckBlock FAILED");
|
|
|
|
CBlockIndex* pcheckpoint = Checkpoints::GetLastCheckpoint(mapBlockIndex);
|
|
if (pcheckpoint && pblock->hashPrevBlock != hashBestChain)
|
|
{
|
|
// Extra checks to prevent "fill up memory by spamming with bogus blocks"
|
|
int64 deltaTime = pblock->GetBlockTime() - pcheckpoint->nTime;
|
|
if (deltaTime < 0)
|
|
{
|
|
if (pfrom)
|
|
pfrom->Misbehaving(100);
|
|
return error("ProcessBlock() : block with timestamp before last checkpoint");
|
|
}
|
|
CBigNum bnNewBlock;
|
|
bnNewBlock.SetCompact(pblock->nBits);
|
|
CBigNum bnRequired;
|
|
bnRequired.SetCompact(ComputeMinWork(pcheckpoint->nBits, deltaTime));
|
|
if (bnNewBlock > bnRequired)
|
|
{
|
|
if (pfrom)
|
|
pfrom->Misbehaving(100);
|
|
return error("ProcessBlock() : block with too little proof-of-work");
|
|
}
|
|
}
|
|
|
|
|
|
// If don't already have its previous block, shunt it off to holding area until we get it
|
|
if (!mapBlockIndex.count(pblock->hashPrevBlock))
|
|
{
|
|
printf("ProcessBlock: ORPHAN BLOCK, prev=%s\n", pblock->hashPrevBlock.ToString().substr(0,20).c_str());
|
|
CBlock* pblock2 = new CBlock(*pblock);
|
|
mapOrphanBlocks.insert(make_pair(hash, pblock2));
|
|
mapOrphanBlocksByPrev.insert(make_pair(pblock2->hashPrevBlock, pblock2));
|
|
|
|
// Ask this guy to fill in what we're missing
|
|
if (pfrom)
|
|
pfrom->PushGetBlocks(pindexBest, GetOrphanRoot(pblock2));
|
|
return true;
|
|
}
|
|
|
|
// Store to disk
|
|
if (!pblock->AcceptBlock())
|
|
return error("ProcessBlock() : AcceptBlock FAILED");
|
|
|
|
// Recursively process any orphan blocks that depended on this one
|
|
vector<uint256> vWorkQueue;
|
|
vWorkQueue.push_back(hash);
|
|
for (int i = 0; i < vWorkQueue.size(); i++)
|
|
{
|
|
uint256 hashPrev = vWorkQueue[i];
|
|
for (multimap<uint256, CBlock*>::iterator mi = mapOrphanBlocksByPrev.lower_bound(hashPrev);
|
|
mi != mapOrphanBlocksByPrev.upper_bound(hashPrev);
|
|
++mi)
|
|
{
|
|
CBlock* pblockOrphan = (*mi).second;
|
|
if (pblockOrphan->AcceptBlock())
|
|
vWorkQueue.push_back(pblockOrphan->GetHash());
|
|
mapOrphanBlocks.erase(pblockOrphan->GetHash());
|
|
delete pblockOrphan;
|
|
}
|
|
mapOrphanBlocksByPrev.erase(hashPrev);
|
|
}
|
|
|
|
printf("ProcessBlock: ACCEPTED\n");
|
|
return true;
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
bool CheckDiskSpace(uint64 nAdditionalBytes)
|
|
{
|
|
uint64 nFreeBytesAvailable = filesystem::space(GetDataDir()).available;
|
|
|
|
// Check for 15MB because database could create another 10MB log file at any time
|
|
if (nFreeBytesAvailable < (uint64)15000000 + nAdditionalBytes)
|
|
{
|
|
fShutdown = true;
|
|
string strMessage = _("Warning: Disk space is low ");
|
|
strMiscWarning = strMessage;
|
|
printf("*** %s\n", strMessage.c_str());
|
|
ThreadSafeMessageBox(strMessage, "Bitcoin", wxOK | wxICON_EXCLAMATION);
|
|
CreateThread(Shutdown, NULL);
|
|
return false;
|
|
}
|
|
return true;
|
|
}
|
|
|
|
FILE* OpenBlockFile(unsigned int nFile, unsigned int nBlockPos, const char* pszMode)
|
|
{
|
|
if (nFile == -1)
|
|
return NULL;
|
|
FILE* file = fopen(strprintf("%s/blk%04d.dat", GetDataDir().c_str(), nFile).c_str(), pszMode);
|
|
if (!file)
|
|
return NULL;
|
|
if (nBlockPos != 0 && !strchr(pszMode, 'a') && !strchr(pszMode, 'w'))
|
|
{
|
|
if (fseek(file, nBlockPos, SEEK_SET) != 0)
|
|
{
|
|
fclose(file);
|
|
return NULL;
|
|
}
|
|
}
|
|
return file;
|
|
}
|
|
|
|
static unsigned int nCurrentBlockFile = 1;
|
|
|
|
FILE* AppendBlockFile(unsigned int& nFileRet)
|
|
{
|
|
nFileRet = 0;
|
|
loop
|
|
{
|
|
FILE* file = OpenBlockFile(nCurrentBlockFile, 0, "ab");
|
|
if (!file)
|
|
return NULL;
|
|
if (fseek(file, 0, SEEK_END) != 0)
|
|
return NULL;
|
|
// FAT32 filesize max 4GB, fseek and ftell max 2GB, so we must stay under 2GB
|
|
if (ftell(file) < 0x7F000000 - MAX_SIZE)
|
|
{
|
|
nFileRet = nCurrentBlockFile;
|
|
return file;
|
|
}
|
|
fclose(file);
|
|
nCurrentBlockFile++;
|
|
}
|
|
}
|
|
|
|
bool LoadBlockIndex(bool fAllowNew)
|
|
{
|
|
if (fTestNet)
|
|
{
|
|
hashGenesisBlock = uint256("0x00000007199508e34a9ff81e6ec0c477a4cccff2a4767a8eee39c11db367b008");
|
|
bnProofOfWorkLimit = CBigNum(~uint256(0) >> 28);
|
|
pchMessageStart[0] = 0xfa;
|
|
pchMessageStart[1] = 0xbf;
|
|
pchMessageStart[2] = 0xb5;
|
|
pchMessageStart[3] = 0xda;
|
|
}
|
|
|
|
//
|
|
// Load block index
|
|
//
|
|
CTxDB txdb("cr");
|
|
if (!txdb.LoadBlockIndex())
|
|
return false;
|
|
txdb.Close();
|
|
|
|
//
|
|
// Init with genesis block
|
|
//
|
|
if (mapBlockIndex.empty())
|
|
{
|
|
if (!fAllowNew)
|
|
return false;
|
|
|
|
// Genesis Block:
|
|
// CBlock(hash=000000000019d6, ver=1, hashPrevBlock=00000000000000, hashMerkleRoot=4a5e1e, nTime=1231006505, nBits=1d00ffff, nNonce=2083236893, vtx=1)
|
|
// CTransaction(hash=4a5e1e, ver=1, vin.size=1, vout.size=1, nLockTime=0)
|
|
// CTxIn(COutPoint(000000, -1), coinbase 04ffff001d0104455468652054696d65732030332f4a616e2f32303039204368616e63656c6c6f72206f6e206272696e6b206f66207365636f6e64206261696c6f757420666f722062616e6b73)
|
|
// CTxOut(nValue=50.00000000, scriptPubKey=0x5F1DF16B2B704C8A578D0B)
|
|
// vMerkleTree: 4a5e1e
|
|
|
|
// Genesis block
|
|
const char* pszTimestamp = "The Times 03/Jan/2009 Chancellor on brink of second bailout for banks";
|
|
CTransaction txNew;
|
|
txNew.vin.resize(1);
|
|
txNew.vout.resize(1);
|
|
txNew.vin[0].scriptSig = CScript() << 486604799 << CBigNum(4) << vector<unsigned char>((const unsigned char*)pszTimestamp, (const unsigned char*)pszTimestamp + strlen(pszTimestamp));
|
|
txNew.vout[0].nValue = 50 * COIN;
|
|
txNew.vout[0].scriptPubKey = CScript() << ParseHex("04678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5f") << OP_CHECKSIG;
|
|
CBlock block;
|
|
block.vtx.push_back(txNew);
|
|
block.hashPrevBlock = 0;
|
|
block.hashMerkleRoot = block.BuildMerkleTree();
|
|
block.nVersion = 1;
|
|
block.nTime = 1231006505;
|
|
block.nBits = 0x1d00ffff;
|
|
block.nNonce = 2083236893;
|
|
|
|
if (fTestNet)
|
|
{
|
|
block.nTime = 1296688602;
|
|
block.nBits = 0x1d07fff8;
|
|
block.nNonce = 384568319;
|
|
}
|
|
|
|
//// debug print
|
|
printf("%s\n", block.GetHash().ToString().c_str());
|
|
printf("%s\n", hashGenesisBlock.ToString().c_str());
|
|
printf("%s\n", block.hashMerkleRoot.ToString().c_str());
|
|
assert(block.hashMerkleRoot == uint256("0x4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b"));
|
|
block.print();
|
|
assert(block.GetHash() == hashGenesisBlock);
|
|
|
|
// Start new block file
|
|
unsigned int nFile;
|
|
unsigned int nBlockPos;
|
|
if (!block.WriteToDisk(nFile, nBlockPos))
|
|
return error("LoadBlockIndex() : writing genesis block to disk failed");
|
|
if (!block.AddToBlockIndex(nFile, nBlockPos))
|
|
return error("LoadBlockIndex() : genesis block not accepted");
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
|
|
|
|
void PrintBlockTree()
|
|
{
|
|
// precompute tree structure
|
|
map<CBlockIndex*, vector<CBlockIndex*> > mapNext;
|
|
for (map<uint256, CBlockIndex*>::iterator mi = mapBlockIndex.begin(); mi != mapBlockIndex.end(); ++mi)
|
|
{
|
|
CBlockIndex* pindex = (*mi).second;
|
|
mapNext[pindex->pprev].push_back(pindex);
|
|
// test
|
|
//while (rand() % 3 == 0)
|
|
// mapNext[pindex->pprev].push_back(pindex);
|
|
}
|
|
|
|
vector<pair<int, CBlockIndex*> > vStack;
|
|
vStack.push_back(make_pair(0, pindexGenesisBlock));
|
|
|
|
int nPrevCol = 0;
|
|
while (!vStack.empty())
|
|
{
|
|
int nCol = vStack.back().first;
|
|
CBlockIndex* pindex = vStack.back().second;
|
|
vStack.pop_back();
|
|
|
|
// print split or gap
|
|
if (nCol > nPrevCol)
|
|
{
|
|
for (int i = 0; i < nCol-1; i++)
|
|
printf("| ");
|
|
printf("|\\\n");
|
|
}
|
|
else if (nCol < nPrevCol)
|
|
{
|
|
for (int i = 0; i < nCol; i++)
|
|
printf("| ");
|
|
printf("|\n");
|
|
}
|
|
nPrevCol = nCol;
|
|
|
|
// print columns
|
|
for (int i = 0; i < nCol; i++)
|
|
printf("| ");
|
|
|
|
// print item
|
|
CBlock block;
|
|
block.ReadFromDisk(pindex);
|
|
printf("%d (%u,%u) %s %s tx %d",
|
|
pindex->nHeight,
|
|
pindex->nFile,
|
|
pindex->nBlockPos,
|
|
block.GetHash().ToString().substr(0,20).c_str(),
|
|
DateTimeStrFormat("%x %H:%M:%S", block.GetBlockTime()).c_str(),
|
|
block.vtx.size());
|
|
|
|
PrintWallets(block);
|
|
|
|
// put the main timechain first
|
|
vector<CBlockIndex*>& vNext = mapNext[pindex];
|
|
for (int i = 0; i < vNext.size(); i++)
|
|
{
|
|
if (vNext[i]->pnext)
|
|
{
|
|
swap(vNext[0], vNext[i]);
|
|
break;
|
|
}
|
|
}
|
|
|
|
// iterate children
|
|
for (int i = 0; i < vNext.size(); i++)
|
|
vStack.push_back(make_pair(nCol+i, vNext[i]));
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////////////
|
|
//
|
|
// CAlert
|
|
//
|
|
|
|
map<uint256, CAlert> mapAlerts;
|
|
CCriticalSection cs_mapAlerts;
|
|
|
|
string GetWarnings(string strFor)
|
|
{
|
|
int nPriority = 0;
|
|
string strStatusBar;
|
|
string strRPC;
|
|
if (GetBoolArg("-testsafemode"))
|
|
strRPC = "test";
|
|
|
|
// Misc warnings like out of disk space and clock is wrong
|
|
if (strMiscWarning != "")
|
|
{
|
|
nPriority = 1000;
|
|
strStatusBar = strMiscWarning;
|
|
}
|
|
|
|
// Longer invalid proof-of-work chain
|
|
if (pindexBest && bnBestInvalidWork > bnBestChainWork + pindexBest->GetBlockWork() * 6)
|
|
{
|
|
nPriority = 2000;
|
|
strStatusBar = strRPC = "WARNING: Displayed transactions may not be correct! You may need to upgrade, or other nodes may need to upgrade.";
|
|
}
|
|
|
|
// Alerts
|
|
CRITICAL_BLOCK(cs_mapAlerts)
|
|
{
|
|
BOOST_FOREACH(PAIRTYPE(const uint256, CAlert)& item, mapAlerts)
|
|
{
|
|
const CAlert& alert = item.second;
|
|
if (alert.AppliesToMe() && alert.nPriority > nPriority)
|
|
{
|
|
nPriority = alert.nPriority;
|
|
strStatusBar = alert.strStatusBar;
|
|
}
|
|
}
|
|
}
|
|
|
|
if (strFor == "statusbar")
|
|
return strStatusBar;
|
|
else if (strFor == "rpc")
|
|
return strRPC;
|
|
assert(!"GetWarnings() : invalid parameter");
|
|
return "error";
|
|
}
|
|
|
|
bool CAlert::ProcessAlert()
|
|
{
|
|
if (!CheckSignature())
|
|
return false;
|
|
if (!IsInEffect())
|
|
return false;
|
|
|
|
CRITICAL_BLOCK(cs_mapAlerts)
|
|
{
|
|
// Cancel previous alerts
|
|
for (map<uint256, CAlert>::iterator mi = mapAlerts.begin(); mi != mapAlerts.end();)
|
|
{
|
|
const CAlert& alert = (*mi).second;
|
|
if (Cancels(alert))
|
|
{
|
|
printf("cancelling alert %d\n", alert.nID);
|
|
mapAlerts.erase(mi++);
|
|
}
|
|
else if (!alert.IsInEffect())
|
|
{
|
|
printf("expiring alert %d\n", alert.nID);
|
|
mapAlerts.erase(mi++);
|
|
}
|
|
else
|
|
mi++;
|
|
}
|
|
|
|
// Check if this alert has been cancelled
|
|
BOOST_FOREACH(PAIRTYPE(const uint256, CAlert)& item, mapAlerts)
|
|
{
|
|
const CAlert& alert = item.second;
|
|
if (alert.Cancels(*this))
|
|
{
|
|
printf("alert already cancelled by %d\n", alert.nID);
|
|
return false;
|
|
}
|
|
}
|
|
|
|
// Add to mapAlerts
|
|
mapAlerts.insert(make_pair(GetHash(), *this));
|
|
}
|
|
|
|
printf("accepted alert %d, AppliesToMe()=%d\n", nID, AppliesToMe());
|
|
MainFrameRepaint();
|
|
return true;
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////////////
|
|
//
|
|
// Messages
|
|
//
|
|
|
|
|
|
bool static AlreadyHave(CTxDB& txdb, const CInv& inv)
|
|
{
|
|
switch (inv.type)
|
|
{
|
|
case MSG_TX: return mapTransactions.count(inv.hash) || mapOrphanTransactions.count(inv.hash) || txdb.ContainsTx(inv.hash);
|
|
case MSG_BLOCK: return mapBlockIndex.count(inv.hash) || mapOrphanBlocks.count(inv.hash);
|
|
}
|
|
// Don't know what it is, just say we already got one
|
|
return true;
|
|
}
|
|
|
|
|
|
|
|
|
|
// The message start string is designed to be unlikely to occur in normal data.
|
|
// The characters are rarely used upper ascii, not valid as UTF-8, and produce
|
|
// a large 4-byte int at any alignment.
|
|
unsigned char pchMessageStart[4] = { 0xf9, 0xbe, 0xb4, 0xd9 };
|
|
|
|
|
|
bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv)
|
|
{
|
|
static map<CService, vector<unsigned char> > mapReuseKey;
|
|
RandAddSeedPerfmon();
|
|
if (fDebug) {
|
|
printf("%s ", DateTimeStrFormat("%x %H:%M:%S", GetTime()).c_str());
|
|
printf("received: %s (%d bytes)\n", strCommand.c_str(), vRecv.size());
|
|
}
|
|
if (mapArgs.count("-dropmessagestest") && GetRand(atoi(mapArgs["-dropmessagestest"])) == 0)
|
|
{
|
|
printf("dropmessagestest DROPPING RECV MESSAGE\n");
|
|
return true;
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
if (strCommand == "version")
|
|
{
|
|
// Each connection can only send one version message
|
|
if (pfrom->nVersion != 0)
|
|
{
|
|
pfrom->Misbehaving(1);
|
|
return false;
|
|
}
|
|
|
|
int64 nTime;
|
|
CAddress addrMe;
|
|
CAddress addrFrom;
|
|
uint64 nNonce = 1;
|
|
vRecv >> pfrom->nVersion >> pfrom->nServices >> nTime >> addrMe;
|
|
if (pfrom->nVersion < 209)
|
|
{
|
|
// Since February 20, 2012, the protocol is initiated at version 209,
|
|
// and earlier versions are no longer supported
|
|
printf("partner %s using obsolete version %i; disconnecting\n", pfrom->addr.ToString().c_str(), pfrom->nVersion);
|
|
pfrom->fDisconnect = true;
|
|
return false;
|
|
}
|
|
|
|
if (pfrom->nVersion == 10300)
|
|
pfrom->nVersion = 300;
|
|
if (!vRecv.empty())
|
|
vRecv >> addrFrom >> nNonce;
|
|
if (!vRecv.empty())
|
|
vRecv >> pfrom->strSubVer;
|
|
if (!vRecv.empty())
|
|
vRecv >> pfrom->nStartingHeight;
|
|
|
|
// Disconnect if we connected to ourself
|
|
if (nNonce == nLocalHostNonce && nNonce > 1)
|
|
{
|
|
printf("connected to self at %s, disconnecting\n", pfrom->addr.ToString().c_str());
|
|
pfrom->fDisconnect = true;
|
|
return true;
|
|
}
|
|
|
|
// Be shy and don't send version until we hear
|
|
if (pfrom->fInbound)
|
|
pfrom->PushVersion();
|
|
|
|
pfrom->fClient = !(pfrom->nServices & NODE_NETWORK);
|
|
|
|
AddTimeData(pfrom->addr, nTime);
|
|
|
|
// Change version
|
|
pfrom->PushMessage("verack");
|
|
pfrom->vSend.SetVersion(min(pfrom->nVersion, PROTOCOL_VERSION));
|
|
|
|
if (!pfrom->fInbound)
|
|
{
|
|
// Advertise our address
|
|
if (!fNoListen && !fUseProxy && addrLocalHost.IsRoutable() &&
|
|
!IsInitialBlockDownload())
|
|
{
|
|
CAddress addr(addrLocalHost);
|
|
addr.nTime = GetAdjustedTime();
|
|
pfrom->PushAddress(addr);
|
|
}
|
|
|
|
// Get recent addresses
|
|
if (pfrom->nVersion >= 31402 || mapAddresses.size() < 1000)
|
|
{
|
|
pfrom->PushMessage("getaddr");
|
|
pfrom->fGetAddr = true;
|
|
}
|
|
}
|
|
|
|
// Ask the first connected node for block updates
|
|
static int nAskedForBlocks = 0;
|
|
if (!pfrom->fClient &&
|
|
(pfrom->nVersion < 32000 || pfrom->nVersion >= 32400) &&
|
|
(nAskedForBlocks < 1 || vNodes.size() <= 1))
|
|
{
|
|
nAskedForBlocks++;
|
|
pfrom->PushGetBlocks(pindexBest, uint256(0));
|
|
}
|
|
|
|
// Relay alerts
|
|
CRITICAL_BLOCK(cs_mapAlerts)
|
|
BOOST_FOREACH(PAIRTYPE(const uint256, CAlert)& item, mapAlerts)
|
|
item.second.RelayTo(pfrom);
|
|
|
|
pfrom->fSuccessfullyConnected = true;
|
|
|
|
printf("version message: version %d, blocks=%d\n", pfrom->nVersion, pfrom->nStartingHeight);
|
|
|
|
cPeerBlockCounts.input(pfrom->nStartingHeight);
|
|
}
|
|
|
|
|
|
else if (pfrom->nVersion == 0)
|
|
{
|
|
// Must have a version message before anything else
|
|
pfrom->Misbehaving(1);
|
|
return false;
|
|
}
|
|
|
|
|
|
else if (strCommand == "verack")
|
|
{
|
|
pfrom->vRecv.SetVersion(min(pfrom->nVersion, PROTOCOL_VERSION));
|
|
}
|
|
|
|
|
|
else if (strCommand == "addr")
|
|
{
|
|
vector<CAddress> vAddr;
|
|
vRecv >> vAddr;
|
|
|
|
// Don't want addr from older versions unless seeding
|
|
if (pfrom->nVersion < 31402 && mapAddresses.size() > 1000)
|
|
return true;
|
|
if (vAddr.size() > 1000)
|
|
{
|
|
pfrom->Misbehaving(20);
|
|
return error("message addr size() = %d", vAddr.size());
|
|
}
|
|
|
|
// Store the new addresses
|
|
CAddrDB addrDB;
|
|
addrDB.TxnBegin();
|
|
int64 nNow = GetAdjustedTime();
|
|
int64 nSince = nNow - 10 * 60;
|
|
BOOST_FOREACH(CAddress& addr, vAddr)
|
|
{
|
|
if (fShutdown)
|
|
return true;
|
|
// ignore IPv6 for now, since it isn't implemented anyway
|
|
if (!addr.IsIPv4())
|
|
continue;
|
|
if (addr.nTime <= 100000000 || addr.nTime > nNow + 10 * 60)
|
|
addr.nTime = nNow - 5 * 24 * 60 * 60;
|
|
AddAddress(addr, 2 * 60 * 60, &addrDB);
|
|
pfrom->AddAddressKnown(addr);
|
|
if (addr.nTime > nSince && !pfrom->fGetAddr && vAddr.size() <= 10 && addr.IsRoutable())
|
|
{
|
|
// Relay to a limited number of other nodes
|
|
CRITICAL_BLOCK(cs_vNodes)
|
|
{
|
|
// Use deterministic randomness to send to the same nodes for 24 hours
|
|
// at a time so the setAddrKnowns of the chosen nodes prevent repeats
|
|
static uint256 hashSalt;
|
|
if (hashSalt == 0)
|
|
RAND_bytes((unsigned char*)&hashSalt, sizeof(hashSalt));
|
|
int64 hashAddr = addr.GetHash();
|
|
uint256 hashRand = hashSalt ^ (hashAddr<<32) ^ ((GetTime()+hashAddr)/(24*60*60));
|
|
hashRand = Hash(BEGIN(hashRand), END(hashRand));
|
|
multimap<uint256, CNode*> mapMix;
|
|
BOOST_FOREACH(CNode* pnode, vNodes)
|
|
{
|
|
if (pnode->nVersion < 31402)
|
|
continue;
|
|
unsigned int nPointer;
|
|
memcpy(&nPointer, &pnode, sizeof(nPointer));
|
|
uint256 hashKey = hashRand ^ nPointer;
|
|
hashKey = Hash(BEGIN(hashKey), END(hashKey));
|
|
mapMix.insert(make_pair(hashKey, pnode));
|
|
}
|
|
int nRelayNodes = 2;
|
|
for (multimap<uint256, CNode*>::iterator mi = mapMix.begin(); mi != mapMix.end() && nRelayNodes-- > 0; ++mi)
|
|
((*mi).second)->PushAddress(addr);
|
|
}
|
|
}
|
|
}
|
|
addrDB.TxnCommit(); // Save addresses (it's ok if this fails)
|
|
if (vAddr.size() < 1000)
|
|
pfrom->fGetAddr = false;
|
|
}
|
|
|
|
|
|
else if (strCommand == "inv")
|
|
{
|
|
vector<CInv> vInv;
|
|
vRecv >> vInv;
|
|
if (vInv.size() > 50000)
|
|
{
|
|
pfrom->Misbehaving(20);
|
|
return error("message inv size() = %d", vInv.size());
|
|
}
|
|
|
|
CTxDB txdb("r");
|
|
for (int nInv = 0; nInv < vInv.size(); nInv++)
|
|
{
|
|
const CInv &inv = vInv[nInv];
|
|
|
|
if (fShutdown)
|
|
return true;
|
|
pfrom->AddInventoryKnown(inv);
|
|
|
|
bool fAlreadyHave = AlreadyHave(txdb, inv);
|
|
if (fDebug)
|
|
printf(" got inventory: %s %s\n", inv.ToString().c_str(), fAlreadyHave ? "have" : "new");
|
|
|
|
// Always request the last block in an inv bundle (even if we already have it), as it is the
|
|
// trigger for the other side to send further invs. If we are stuck on a (very long) side chain,
|
|
// this is necessary to connect earlier received orphan blocks to the chain again.
|
|
if (!fAlreadyHave || (inv.type == MSG_BLOCK && nInv==vInv.size()-1))
|
|
pfrom->AskFor(inv);
|
|
if (inv.type == MSG_BLOCK && mapOrphanBlocks.count(inv.hash))
|
|
pfrom->PushGetBlocks(pindexBest, GetOrphanRoot(mapOrphanBlocks[inv.hash]));
|
|
|
|
// Track requests for our stuff
|
|
Inventory(inv.hash);
|
|
}
|
|
}
|
|
|
|
|
|
else if (strCommand == "getdata")
|
|
{
|
|
vector<CInv> vInv;
|
|
vRecv >> vInv;
|
|
if (vInv.size() > 50000)
|
|
{
|
|
pfrom->Misbehaving(20);
|
|
return error("message getdata size() = %d", vInv.size());
|
|
}
|
|
|
|
BOOST_FOREACH(const CInv& inv, vInv)
|
|
{
|
|
if (fShutdown)
|
|
return true;
|
|
printf("received getdata for: %s\n", inv.ToString().c_str());
|
|
|
|
if (inv.type == MSG_BLOCK)
|
|
{
|
|
// Send block from disk
|
|
map<uint256, CBlockIndex*>::iterator mi = mapBlockIndex.find(inv.hash);
|
|
if (mi != mapBlockIndex.end())
|
|
{
|
|
CBlock block;
|
|
block.ReadFromDisk((*mi).second);
|
|
pfrom->PushMessage("block", block);
|
|
|
|
// Trigger them to send a getblocks request for the next batch of inventory
|
|
if (inv.hash == pfrom->hashContinue)
|
|
{
|
|
// Bypass PushInventory, this must send even if redundant,
|
|
// and we want it right after the last block so they don't
|
|
// wait for other stuff first.
|
|
vector<CInv> vInv;
|
|
vInv.push_back(CInv(MSG_BLOCK, hashBestChain));
|
|
pfrom->PushMessage("inv", vInv);
|
|
pfrom->hashContinue = 0;
|
|
}
|
|
}
|
|
}
|
|
else if (inv.IsKnownType())
|
|
{
|
|
// Send stream from relay memory
|
|
CRITICAL_BLOCK(cs_mapRelay)
|
|
{
|
|
map<CInv, CDataStream>::iterator mi = mapRelay.find(inv);
|
|
if (mi != mapRelay.end())
|
|
pfrom->PushMessage(inv.GetCommand(), (*mi).second);
|
|
}
|
|
}
|
|
|
|
// Track requests for our stuff
|
|
Inventory(inv.hash);
|
|
}
|
|
}
|
|
|
|
|
|
else if (strCommand == "getblocks")
|
|
{
|
|
CBlockLocator locator;
|
|
uint256 hashStop;
|
|
vRecv >> locator >> hashStop;
|
|
|
|
// Find the last block the caller has in the main chain
|
|
CBlockIndex* pindex = locator.GetBlockIndex();
|
|
|
|
// Send the rest of the chain
|
|
if (pindex)
|
|
pindex = pindex->pnext;
|
|
int nLimit = 500 + locator.GetDistanceBack();
|
|
unsigned int nBytes = 0;
|
|
printf("getblocks %d to %s limit %d\n", (pindex ? pindex->nHeight : -1), hashStop.ToString().substr(0,20).c_str(), nLimit);
|
|
for (; pindex; pindex = pindex->pnext)
|
|
{
|
|
if (pindex->GetBlockHash() == hashStop)
|
|
{
|
|
printf(" getblocks stopping at %d %s (%u bytes)\n", pindex->nHeight, pindex->GetBlockHash().ToString().substr(0,20).c_str(), nBytes);
|
|
break;
|
|
}
|
|
pfrom->PushInventory(CInv(MSG_BLOCK, pindex->GetBlockHash()));
|
|
CBlock block;
|
|
block.ReadFromDisk(pindex, true);
|
|
nBytes += block.GetSerializeSize(SER_NETWORK);
|
|
if (--nLimit <= 0 || nBytes >= SendBufferSize()/2)
|
|
{
|
|
// When this block is requested, we'll send an inv that'll make them
|
|
// getblocks the next batch of inventory.
|
|
printf(" getblocks stopping at limit %d %s (%u bytes)\n", pindex->nHeight, pindex->GetBlockHash().ToString().substr(0,20).c_str(), nBytes);
|
|
pfrom->hashContinue = pindex->GetBlockHash();
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
else if (strCommand == "getheaders")
|
|
{
|
|
CBlockLocator locator;
|
|
uint256 hashStop;
|
|
vRecv >> locator >> hashStop;
|
|
|
|
CBlockIndex* pindex = NULL;
|
|
if (locator.IsNull())
|
|
{
|
|
// If locator is null, return the hashStop block
|
|
map<uint256, CBlockIndex*>::iterator mi = mapBlockIndex.find(hashStop);
|
|
if (mi == mapBlockIndex.end())
|
|
return true;
|
|
pindex = (*mi).second;
|
|
}
|
|
else
|
|
{
|
|
// Find the last block the caller has in the main chain
|
|
pindex = locator.GetBlockIndex();
|
|
if (pindex)
|
|
pindex = pindex->pnext;
|
|
}
|
|
|
|
vector<CBlock> vHeaders;
|
|
int nLimit = 2000 + locator.GetDistanceBack();
|
|
printf("getheaders %d to %s limit %d\n", (pindex ? pindex->nHeight : -1), hashStop.ToString().substr(0,20).c_str(), nLimit);
|
|
for (; pindex; pindex = pindex->pnext)
|
|
{
|
|
vHeaders.push_back(pindex->GetBlockHeader());
|
|
if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop)
|
|
break;
|
|
}
|
|
pfrom->PushMessage("headers", vHeaders);
|
|
}
|
|
|
|
|
|
else if (strCommand == "tx")
|
|
{
|
|
vector<uint256> vWorkQueue;
|
|
CDataStream vMsg(vRecv);
|
|
CTransaction tx;
|
|
vRecv >> tx;
|
|
|
|
CInv inv(MSG_TX, tx.GetHash());
|
|
pfrom->AddInventoryKnown(inv);
|
|
|
|
bool fMissingInputs = false;
|
|
if (tx.AcceptToMemoryPool(true, &fMissingInputs))
|
|
{
|
|
SyncWithWallets(tx, NULL, true);
|
|
RelayMessage(inv, vMsg);
|
|
mapAlreadyAskedFor.erase(inv);
|
|
vWorkQueue.push_back(inv.hash);
|
|
|
|
// Recursively process any orphan transactions that depended on this one
|
|
for (int i = 0; i < vWorkQueue.size(); i++)
|
|
{
|
|
uint256 hashPrev = vWorkQueue[i];
|
|
for (multimap<uint256, CDataStream*>::iterator mi = mapOrphanTransactionsByPrev.lower_bound(hashPrev);
|
|
mi != mapOrphanTransactionsByPrev.upper_bound(hashPrev);
|
|
++mi)
|
|
{
|
|
const CDataStream& vMsg = *((*mi).second);
|
|
CTransaction tx;
|
|
CDataStream(vMsg) >> tx;
|
|
CInv inv(MSG_TX, tx.GetHash());
|
|
|
|
if (tx.AcceptToMemoryPool(true))
|
|
{
|
|
printf(" accepted orphan tx %s\n", inv.hash.ToString().substr(0,10).c_str());
|
|
SyncWithWallets(tx, NULL, true);
|
|
RelayMessage(inv, vMsg);
|
|
mapAlreadyAskedFor.erase(inv);
|
|
vWorkQueue.push_back(inv.hash);
|
|
}
|
|
}
|
|
}
|
|
|
|
BOOST_FOREACH(uint256 hash, vWorkQueue)
|
|
EraseOrphanTx(hash);
|
|
}
|
|
else if (fMissingInputs)
|
|
{
|
|
printf("storing orphan tx %s\n", inv.hash.ToString().substr(0,10).c_str());
|
|
AddOrphanTx(vMsg);
|
|
|
|
// DoS prevention: do not allow mapOrphanTransactions to grow unbounded
|
|
int nEvicted = LimitOrphanTxSize(MAX_ORPHAN_TRANSACTIONS);
|
|
if (nEvicted > 0)
|
|
printf("mapOrphan overflow, removed %d tx\n", nEvicted);
|
|
}
|
|
if (tx.nDoS) pfrom->Misbehaving(tx.nDoS);
|
|
}
|
|
|
|
|
|
else if (strCommand == "block")
|
|
{
|
|
CBlock block;
|
|
vRecv >> block;
|
|
|
|
printf("received block %s\n", block.GetHash().ToString().substr(0,20).c_str());
|
|
// block.print();
|
|
|
|
CInv inv(MSG_BLOCK, block.GetHash());
|
|
pfrom->AddInventoryKnown(inv);
|
|
|
|
if (ProcessBlock(pfrom, &block))
|
|
mapAlreadyAskedFor.erase(inv);
|
|
if (block.nDoS) pfrom->Misbehaving(block.nDoS);
|
|
}
|
|
|
|
|
|
else if (strCommand == "getaddr")
|
|
{
|
|
// Nodes rebroadcast an addr every 24 hours
|
|
pfrom->vAddrToSend.clear();
|
|
int64 nSince = GetAdjustedTime() - 3 * 60 * 60; // in the last 3 hours
|
|
CRITICAL_BLOCK(cs_mapAddresses)
|
|
{
|
|
unsigned int nCount = 0;
|
|
BOOST_FOREACH(const PAIRTYPE(vector<unsigned char>, CAddress)& item, mapAddresses)
|
|
{
|
|
const CAddress& addr = item.second;
|
|
if (addr.nTime > nSince)
|
|
nCount++;
|
|
}
|
|
BOOST_FOREACH(const PAIRTYPE(vector<unsigned char>, CAddress)& item, mapAddresses)
|
|
{
|
|
const CAddress& addr = item.second;
|
|
if (addr.nTime > nSince && GetRand(nCount) < 2500)
|
|
pfrom->PushAddress(addr);
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
else if (strCommand == "checkorder")
|
|
{
|
|
uint256 hashReply;
|
|
vRecv >> hashReply;
|
|
|
|
if (!GetBoolArg("-allowreceivebyip"))
|
|
{
|
|
pfrom->PushMessage("reply", hashReply, (int)2, string(""));
|
|
return true;
|
|
}
|
|
|
|
CWalletTx order;
|
|
vRecv >> order;
|
|
|
|
/// we have a chance to check the order here
|
|
|
|
// Keep giving the same key to the same ip until they use it
|
|
if (!mapReuseKey.count(pfrom->addr))
|
|
pwalletMain->GetKeyFromPool(mapReuseKey[pfrom->addr], true);
|
|
|
|
// Send back approval of order and pubkey to use
|
|
CScript scriptPubKey;
|
|
scriptPubKey << mapReuseKey[pfrom->addr] << OP_CHECKSIG;
|
|
pfrom->PushMessage("reply", hashReply, (int)0, scriptPubKey);
|
|
}
|
|
|
|
|
|
else if (strCommand == "reply")
|
|
{
|
|
uint256 hashReply;
|
|
vRecv >> hashReply;
|
|
|
|
CRequestTracker tracker;
|
|
CRITICAL_BLOCK(pfrom->cs_mapRequests)
|
|
{
|
|
map<uint256, CRequestTracker>::iterator mi = pfrom->mapRequests.find(hashReply);
|
|
if (mi != pfrom->mapRequests.end())
|
|
{
|
|
tracker = (*mi).second;
|
|
pfrom->mapRequests.erase(mi);
|
|
}
|
|
}
|
|
if (!tracker.IsNull())
|
|
tracker.fn(tracker.param1, vRecv);
|
|
}
|
|
|
|
|
|
else if (strCommand == "ping")
|
|
{
|
|
}
|
|
|
|
|
|
else if (strCommand == "alert")
|
|
{
|
|
CAlert alert;
|
|
vRecv >> alert;
|
|
|
|
if (alert.ProcessAlert())
|
|
{
|
|
// Relay
|
|
pfrom->setKnown.insert(alert.GetHash());
|
|
CRITICAL_BLOCK(cs_vNodes)
|
|
BOOST_FOREACH(CNode* pnode, vNodes)
|
|
alert.RelayTo(pnode);
|
|
}
|
|
}
|
|
|
|
|
|
else
|
|
{
|
|
// Ignore unknown commands for extensibility
|
|
}
|
|
|
|
|
|
// Update the last seen time for this node's address
|
|
if (pfrom->fNetworkNode)
|
|
if (strCommand == "version" || strCommand == "addr" || strCommand == "inv" || strCommand == "getdata" || strCommand == "ping")
|
|
AddressCurrentlyConnected(pfrom->addr);
|
|
|
|
|
|
return true;
|
|
}
|
|
|
|
bool ProcessMessages(CNode* pfrom)
|
|
{
|
|
CDataStream& vRecv = pfrom->vRecv;
|
|
if (vRecv.empty())
|
|
return true;
|
|
//if (fDebug)
|
|
// printf("ProcessMessages(%u bytes)\n", vRecv.size());
|
|
|
|
//
|
|
// Message format
|
|
// (4) message start
|
|
// (12) command
|
|
// (4) size
|
|
// (4) checksum
|
|
// (x) data
|
|
//
|
|
|
|
loop
|
|
{
|
|
// Scan for message start
|
|
CDataStream::iterator pstart = search(vRecv.begin(), vRecv.end(), BEGIN(pchMessageStart), END(pchMessageStart));
|
|
int nHeaderSize = vRecv.GetSerializeSize(CMessageHeader());
|
|
if (vRecv.end() - pstart < nHeaderSize)
|
|
{
|
|
if (vRecv.size() > nHeaderSize)
|
|
{
|
|
printf("\n\nPROCESSMESSAGE MESSAGESTART NOT FOUND\n\n");
|
|
vRecv.erase(vRecv.begin(), vRecv.end() - nHeaderSize);
|
|
}
|
|
break;
|
|
}
|
|
if (pstart - vRecv.begin() > 0)
|
|
printf("\n\nPROCESSMESSAGE SKIPPED %d BYTES\n\n", pstart - vRecv.begin());
|
|
vRecv.erase(vRecv.begin(), pstart);
|
|
|
|
// Read header
|
|
vector<char> vHeaderSave(vRecv.begin(), vRecv.begin() + nHeaderSize);
|
|
CMessageHeader hdr;
|
|
vRecv >> hdr;
|
|
if (!hdr.IsValid())
|
|
{
|
|
printf("\n\nPROCESSMESSAGE: ERRORS IN HEADER %s\n\n\n", hdr.GetCommand().c_str());
|
|
continue;
|
|
}
|
|
string strCommand = hdr.GetCommand();
|
|
|
|
// Message size
|
|
unsigned int nMessageSize = hdr.nMessageSize;
|
|
if (nMessageSize > MAX_SIZE)
|
|
{
|
|
printf("ProcessMessage(%s, %u bytes) : nMessageSize > MAX_SIZE\n", strCommand.c_str(), nMessageSize);
|
|
continue;
|
|
}
|
|
if (nMessageSize > vRecv.size())
|
|
{
|
|
// Rewind and wait for rest of message
|
|
vRecv.insert(vRecv.begin(), vHeaderSave.begin(), vHeaderSave.end());
|
|
break;
|
|
}
|
|
|
|
// Checksum
|
|
uint256 hash = Hash(vRecv.begin(), vRecv.begin() + nMessageSize);
|
|
unsigned int nChecksum = 0;
|
|
memcpy(&nChecksum, &hash, sizeof(nChecksum));
|
|
if (nChecksum != hdr.nChecksum)
|
|
{
|
|
printf("ProcessMessage(%s, %u bytes) : CHECKSUM ERROR nChecksum=%08x hdr.nChecksum=%08x\n",
|
|
strCommand.c_str(), nMessageSize, nChecksum, hdr.nChecksum);
|
|
continue;
|
|
}
|
|
|
|
// Copy message to its own buffer
|
|
CDataStream vMsg(vRecv.begin(), vRecv.begin() + nMessageSize, vRecv.nType, vRecv.nVersion);
|
|
vRecv.ignore(nMessageSize);
|
|
|
|
// Process message
|
|
bool fRet = false;
|
|
try
|
|
{
|
|
CRITICAL_BLOCK(cs_main)
|
|
fRet = ProcessMessage(pfrom, strCommand, vMsg);
|
|
if (fShutdown)
|
|
return true;
|
|
}
|
|
catch (std::ios_base::failure& e)
|
|
{
|
|
if (strstr(e.what(), "end of data"))
|
|
{
|
|
// Allow exceptions from underlength message on vRecv
|
|
printf("ProcessMessage(%s, %u bytes) : Exception '%s' caught, normally caused by a message being shorter than its stated length\n", strCommand.c_str(), nMessageSize, e.what());
|
|
}
|
|
else if (strstr(e.what(), "size too large"))
|
|
{
|
|
// Allow exceptions from overlong size
|
|
printf("ProcessMessage(%s, %u bytes) : Exception '%s' caught\n", strCommand.c_str(), nMessageSize, e.what());
|
|
}
|
|
else
|
|
{
|
|
PrintExceptionContinue(&e, "ProcessMessage()");
|
|
}
|
|
}
|
|
catch (std::exception& e) {
|
|
PrintExceptionContinue(&e, "ProcessMessage()");
|
|
} catch (...) {
|
|
PrintExceptionContinue(NULL, "ProcessMessage()");
|
|
}
|
|
|
|
if (!fRet)
|
|
printf("ProcessMessage(%s, %u bytes) FAILED\n", strCommand.c_str(), nMessageSize);
|
|
}
|
|
|
|
vRecv.Compact();
|
|
return true;
|
|
}
|
|
|
|
|
|
bool SendMessages(CNode* pto, bool fSendTrickle)
|
|
{
|
|
CRITICAL_BLOCK(cs_main)
|
|
{
|
|
// Don't send anything until we get their version message
|
|
if (pto->nVersion == 0)
|
|
return true;
|
|
|
|
// Keep-alive ping
|
|
if (pto->nLastSend && GetTime() - pto->nLastSend > 30 * 60 && pto->vSend.empty())
|
|
pto->PushMessage("ping");
|
|
|
|
// Resend wallet transactions that haven't gotten in a block yet
|
|
ResendWalletTransactions();
|
|
|
|
// Address refresh broadcast
|
|
static int64 nLastRebroadcast;
|
|
if (!IsInitialBlockDownload() && (GetTime() - nLastRebroadcast > 24 * 60 * 60))
|
|
{
|
|
CRITICAL_BLOCK(cs_vNodes)
|
|
{
|
|
BOOST_FOREACH(CNode* pnode, vNodes)
|
|
{
|
|
// Periodically clear setAddrKnown to allow refresh broadcasts
|
|
if (nLastRebroadcast)
|
|
pnode->setAddrKnown.clear();
|
|
|
|
// Rebroadcast our address
|
|
if (!fNoListen && !fUseProxy && addrLocalHost.IsRoutable())
|
|
{
|
|
CAddress addr(addrLocalHost);
|
|
addr.nTime = GetAdjustedTime();
|
|
pnode->PushAddress(addr);
|
|
}
|
|
}
|
|
}
|
|
nLastRebroadcast = GetTime();
|
|
}
|
|
|
|
// Clear out old addresses periodically so it's not too much work at once
|
|
static int64 nLastClear;
|
|
if (nLastClear == 0)
|
|
nLastClear = GetTime();
|
|
if (GetTime() - nLastClear > 10 * 60 && vNodes.size() >= 3)
|
|
{
|
|
nLastClear = GetTime();
|
|
CRITICAL_BLOCK(cs_mapAddresses)
|
|
{
|
|
CAddrDB addrdb;
|
|
int64 nSince = GetAdjustedTime() - 14 * 24 * 60 * 60;
|
|
for (map<vector<unsigned char>, CAddress>::iterator mi = mapAddresses.begin();
|
|
mi != mapAddresses.end();)
|
|
{
|
|
const CAddress& addr = (*mi).second;
|
|
if (addr.nTime < nSince)
|
|
{
|
|
if (mapAddresses.size() < 1000 || GetTime() > nLastClear + 20)
|
|
break;
|
|
addrdb.EraseAddress(addr);
|
|
mapAddresses.erase(mi++);
|
|
}
|
|
else
|
|
mi++;
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
//
|
|
// Message: addr
|
|
//
|
|
if (fSendTrickle)
|
|
{
|
|
vector<CAddress> vAddr;
|
|
vAddr.reserve(pto->vAddrToSend.size());
|
|
BOOST_FOREACH(const CAddress& addr, pto->vAddrToSend)
|
|
{
|
|
// returns true if wasn't already contained in the set
|
|
if (pto->setAddrKnown.insert(addr).second)
|
|
{
|
|
vAddr.push_back(addr);
|
|
// receiver rejects addr messages larger than 1000
|
|
if (vAddr.size() >= 1000)
|
|
{
|
|
pto->PushMessage("addr", vAddr);
|
|
vAddr.clear();
|
|
}
|
|
}
|
|
}
|
|
pto->vAddrToSend.clear();
|
|
if (!vAddr.empty())
|
|
pto->PushMessage("addr", vAddr);
|
|
}
|
|
|
|
|
|
//
|
|
// Message: inventory
|
|
//
|
|
vector<CInv> vInv;
|
|
vector<CInv> vInvWait;
|
|
CRITICAL_BLOCK(pto->cs_inventory)
|
|
{
|
|
vInv.reserve(pto->vInventoryToSend.size());
|
|
vInvWait.reserve(pto->vInventoryToSend.size());
|
|
BOOST_FOREACH(const CInv& inv, pto->vInventoryToSend)
|
|
{
|
|
if (pto->setInventoryKnown.count(inv))
|
|
continue;
|
|
|
|
// trickle out tx inv to protect privacy
|
|
if (inv.type == MSG_TX && !fSendTrickle)
|
|
{
|
|
// 1/4 of tx invs blast to all immediately
|
|
static uint256 hashSalt;
|
|
if (hashSalt == 0)
|
|
RAND_bytes((unsigned char*)&hashSalt, sizeof(hashSalt));
|
|
uint256 hashRand = inv.hash ^ hashSalt;
|
|
hashRand = Hash(BEGIN(hashRand), END(hashRand));
|
|
bool fTrickleWait = ((hashRand & 3) != 0);
|
|
|
|
// always trickle our own transactions
|
|
if (!fTrickleWait)
|
|
{
|
|
CWalletTx wtx;
|
|
if (GetTransaction(inv.hash, wtx))
|
|
if (wtx.fFromMe)
|
|
fTrickleWait = true;
|
|
}
|
|
|
|
if (fTrickleWait)
|
|
{
|
|
vInvWait.push_back(inv);
|
|
continue;
|
|
}
|
|
}
|
|
|
|
// returns true if wasn't already contained in the set
|
|
if (pto->setInventoryKnown.insert(inv).second)
|
|
{
|
|
vInv.push_back(inv);
|
|
if (vInv.size() >= 1000)
|
|
{
|
|
pto->PushMessage("inv", vInv);
|
|
vInv.clear();
|
|
}
|
|
}
|
|
}
|
|
pto->vInventoryToSend = vInvWait;
|
|
}
|
|
if (!vInv.empty())
|
|
pto->PushMessage("inv", vInv);
|
|
|
|
|
|
//
|
|
// Message: getdata
|
|
//
|
|
vector<CInv> vGetData;
|
|
int64 nNow = GetTime() * 1000000;
|
|
CTxDB txdb("r");
|
|
while (!pto->mapAskFor.empty() && (*pto->mapAskFor.begin()).first <= nNow)
|
|
{
|
|
const CInv& inv = (*pto->mapAskFor.begin()).second;
|
|
if (!AlreadyHave(txdb, inv))
|
|
{
|
|
printf("sending getdata: %s\n", inv.ToString().c_str());
|
|
vGetData.push_back(inv);
|
|
if (vGetData.size() >= 1000)
|
|
{
|
|
pto->PushMessage("getdata", vGetData);
|
|
vGetData.clear();
|
|
}
|
|
}
|
|
mapAlreadyAskedFor[inv] = nNow;
|
|
pto->mapAskFor.erase(pto->mapAskFor.begin());
|
|
}
|
|
if (!vGetData.empty())
|
|
pto->PushMessage("getdata", vGetData);
|
|
|
|
}
|
|
return true;
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////////////
|
|
//
|
|
// BitcoinMiner
|
|
//
|
|
|
|
int static FormatHashBlocks(void* pbuffer, unsigned int len)
|
|
{
|
|
unsigned char* pdata = (unsigned char*)pbuffer;
|
|
unsigned int blocks = 1 + ((len + 8) / 64);
|
|
unsigned char* pend = pdata + 64 * blocks;
|
|
memset(pdata + len, 0, 64 * blocks - len);
|
|
pdata[len] = 0x80;
|
|
unsigned int bits = len * 8;
|
|
pend[-1] = (bits >> 0) & 0xff;
|
|
pend[-2] = (bits >> 8) & 0xff;
|
|
pend[-3] = (bits >> 16) & 0xff;
|
|
pend[-4] = (bits >> 24) & 0xff;
|
|
return blocks;
|
|
}
|
|
|
|
static const unsigned int pSHA256InitState[8] =
|
|
{0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19};
|
|
|
|
void SHA256Transform(void* pstate, void* pinput, const void* pinit)
|
|
{
|
|
SHA256_CTX ctx;
|
|
unsigned char data[64];
|
|
|
|
SHA256_Init(&ctx);
|
|
|
|
for (int i = 0; i < 16; i++)
|
|
((uint32_t*)data)[i] = ByteReverse(((uint32_t*)pinput)[i]);
|
|
|
|
for (int i = 0; i < 8; i++)
|
|
ctx.h[i] = ((uint32_t*)pinit)[i];
|
|
|
|
SHA256_Update(&ctx, data, sizeof(data));
|
|
for (int i = 0; i < 8; i++)
|
|
((uint32_t*)pstate)[i] = ctx.h[i];
|
|
}
|
|
|
|
//
|
|
// ScanHash scans nonces looking for a hash with at least some zero bits.
|
|
// It operates on big endian data. Caller does the byte reversing.
|
|
// All input buffers are 16-byte aligned. nNonce is usually preserved
|
|
// between calls, but periodically or if nNonce is 0xffff0000 or above,
|
|
// the block is rebuilt and nNonce starts over at zero.
|
|
//
|
|
unsigned int static ScanHash_CryptoPP(char* pmidstate, char* pdata, char* phash1, char* phash, unsigned int& nHashesDone)
|
|
{
|
|
unsigned int& nNonce = *(unsigned int*)(pdata + 12);
|
|
for (;;)
|
|
{
|
|
// Crypto++ SHA-256
|
|
// Hash pdata using pmidstate as the starting state into
|
|
// preformatted buffer phash1, then hash phash1 into phash
|
|
nNonce++;
|
|
SHA256Transform(phash1, pdata, pmidstate);
|
|
SHA256Transform(phash, phash1, pSHA256InitState);
|
|
|
|
// Return the nonce if the hash has at least some zero bits,
|
|
// caller will check if it has enough to reach the target
|
|
if (((unsigned short*)phash)[14] == 0)
|
|
return nNonce;
|
|
|
|
// If nothing found after trying for a while, return -1
|
|
if ((nNonce & 0xffff) == 0)
|
|
{
|
|
nHashesDone = 0xffff+1;
|
|
return -1;
|
|
}
|
|
}
|
|
}
|
|
|
|
// Some explaining would be appreciated
|
|
class COrphan
|
|
{
|
|
public:
|
|
CTransaction* ptx;
|
|
set<uint256> setDependsOn;
|
|
double dPriority;
|
|
|
|
COrphan(CTransaction* ptxIn)
|
|
{
|
|
ptx = ptxIn;
|
|
dPriority = 0;
|
|
}
|
|
|
|
void print() const
|
|
{
|
|
printf("COrphan(hash=%s, dPriority=%.1f)\n", ptx->GetHash().ToString().substr(0,10).c_str(), dPriority);
|
|
BOOST_FOREACH(uint256 hash, setDependsOn)
|
|
printf(" setDependsOn %s\n", hash.ToString().substr(0,10).c_str());
|
|
}
|
|
};
|
|
|
|
|
|
uint64 nLastBlockTx = 0;
|
|
uint64 nLastBlockSize = 0;
|
|
|
|
CBlock* CreateNewBlock(CReserveKey& reservekey)
|
|
{
|
|
CBlockIndex* pindexPrev = pindexBest;
|
|
|
|
// Create new block
|
|
auto_ptr<CBlock> pblock(new CBlock());
|
|
if (!pblock.get())
|
|
return NULL;
|
|
|
|
// Create coinbase tx
|
|
CTransaction txNew;
|
|
txNew.vin.resize(1);
|
|
txNew.vin[0].prevout.SetNull();
|
|
txNew.vout.resize(1);
|
|
txNew.vout[0].scriptPubKey << reservekey.GetReservedKey() << OP_CHECKSIG;
|
|
|
|
// Add our coinbase tx as first transaction
|
|
pblock->vtx.push_back(txNew);
|
|
|
|
// Collect memory pool transactions into the block
|
|
int64 nFees = 0;
|
|
CRITICAL_BLOCK(cs_main)
|
|
CRITICAL_BLOCK(cs_mapTransactions)
|
|
{
|
|
CTxDB txdb("r");
|
|
|
|
// Priority order to process transactions
|
|
list<COrphan> vOrphan; // list memory doesn't move
|
|
map<uint256, vector<COrphan*> > mapDependers;
|
|
multimap<double, CTransaction*> mapPriority;
|
|
for (map<uint256, CTransaction>::iterator mi = mapTransactions.begin(); mi != mapTransactions.end(); ++mi)
|
|
{
|
|
CTransaction& tx = (*mi).second;
|
|
if (tx.IsCoinBase() || !tx.IsFinal())
|
|
continue;
|
|
|
|
COrphan* porphan = NULL;
|
|
double dPriority = 0;
|
|
BOOST_FOREACH(const CTxIn& txin, tx.vin)
|
|
{
|
|
// Read prev transaction
|
|
CTransaction txPrev;
|
|
CTxIndex txindex;
|
|
if (!txPrev.ReadFromDisk(txdb, txin.prevout, txindex))
|
|
{
|
|
// Has to wait for dependencies
|
|
if (!porphan)
|
|
{
|
|
// Use list for automatic deletion
|
|
vOrphan.push_back(COrphan(&tx));
|
|
porphan = &vOrphan.back();
|
|
}
|
|
mapDependers[txin.prevout.hash].push_back(porphan);
|
|
porphan->setDependsOn.insert(txin.prevout.hash);
|
|
continue;
|
|
}
|
|
int64 nValueIn = txPrev.vout[txin.prevout.n].nValue;
|
|
|
|
// Read block header
|
|
int nConf = txindex.GetDepthInMainChain();
|
|
|
|
dPriority += (double)nValueIn * nConf;
|
|
|
|
if (fDebug && GetBoolArg("-printpriority"))
|
|
printf("priority nValueIn=%-12I64d nConf=%-5d dPriority=%-20.1f\n", nValueIn, nConf, dPriority);
|
|
}
|
|
|
|
// Priority is sum(valuein * age) / txsize
|
|
dPriority /= ::GetSerializeSize(tx, SER_NETWORK);
|
|
|
|
if (porphan)
|
|
porphan->dPriority = dPriority;
|
|
else
|
|
mapPriority.insert(make_pair(-dPriority, &(*mi).second));
|
|
|
|
if (fDebug && GetBoolArg("-printpriority"))
|
|
{
|
|
printf("priority %-20.1f %s\n%s", dPriority, tx.GetHash().ToString().substr(0,10).c_str(), tx.ToString().c_str());
|
|
if (porphan)
|
|
porphan->print();
|
|
printf("\n");
|
|
}
|
|
}
|
|
|
|
// Collect transactions into block
|
|
map<uint256, CTxIndex> mapTestPool;
|
|
uint64 nBlockSize = 1000;
|
|
uint64 nBlockTx = 0;
|
|
int nBlockSigOps = 100;
|
|
while (!mapPriority.empty())
|
|
{
|
|
// Take highest priority transaction off priority queue
|
|
double dPriority = -(*mapPriority.begin()).first;
|
|
CTransaction& tx = *(*mapPriority.begin()).second;
|
|
mapPriority.erase(mapPriority.begin());
|
|
|
|
// Size limits
|
|
unsigned int nTxSize = ::GetSerializeSize(tx, SER_NETWORK);
|
|
if (nBlockSize + nTxSize >= MAX_BLOCK_SIZE_GEN)
|
|
continue;
|
|
|
|
// Legacy limits on sigOps:
|
|
int nTxSigOps = tx.GetLegacySigOpCount();
|
|
if (nBlockSigOps + nTxSigOps >= MAX_BLOCK_SIGOPS)
|
|
continue;
|
|
|
|
// Transaction fee required depends on block size
|
|
bool fAllowFree = (nBlockSize + nTxSize < 4000 || CTransaction::AllowFree(dPriority));
|
|
int64 nMinFee = tx.GetMinFee(nBlockSize, fAllowFree, GMF_BLOCK);
|
|
|
|
// Connecting shouldn't fail due to dependency on other memory pool transactions
|
|
// because we're already processing them in order of dependency
|
|
map<uint256, CTxIndex> mapTestPoolTmp(mapTestPool);
|
|
MapPrevTx mapInputs;
|
|
bool fInvalid;
|
|
if (!tx.FetchInputs(txdb, mapTestPoolTmp, false, true, mapInputs, fInvalid))
|
|
continue;
|
|
|
|
int64 nTxFees = tx.GetValueIn(mapInputs)-tx.GetValueOut();
|
|
if (nTxFees < nMinFee)
|
|
continue;
|
|
|
|
nTxSigOps += tx.GetP2SHSigOpCount(mapInputs);
|
|
if (nBlockSigOps + nTxSigOps >= MAX_BLOCK_SIGOPS)
|
|
continue;
|
|
|
|
if (!tx.ConnectInputs(mapInputs, mapTestPoolTmp, CDiskTxPos(1,1,1), pindexPrev, false, true))
|
|
continue;
|
|
mapTestPoolTmp[tx.GetHash()] = CTxIndex(CDiskTxPos(1,1,1), tx.vout.size());
|
|
swap(mapTestPool, mapTestPoolTmp);
|
|
|
|
// Added
|
|
pblock->vtx.push_back(tx);
|
|
nBlockSize += nTxSize;
|
|
++nBlockTx;
|
|
nBlockSigOps += nTxSigOps;
|
|
nFees += nTxFees;
|
|
|
|
// Add transactions that depend on this one to the priority queue
|
|
uint256 hash = tx.GetHash();
|
|
if (mapDependers.count(hash))
|
|
{
|
|
BOOST_FOREACH(COrphan* porphan, mapDependers[hash])
|
|
{
|
|
if (!porphan->setDependsOn.empty())
|
|
{
|
|
porphan->setDependsOn.erase(hash);
|
|
if (porphan->setDependsOn.empty())
|
|
mapPriority.insert(make_pair(-porphan->dPriority, porphan->ptx));
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
nLastBlockTx = nBlockTx;
|
|
nLastBlockSize = nBlockSize;
|
|
printf("CreateNewBlock(): total size %lu\n", nBlockSize);
|
|
|
|
}
|
|
pblock->vtx[0].vout[0].nValue = GetBlockValue(pindexPrev->nHeight+1, nFees);
|
|
|
|
// Fill in header
|
|
pblock->hashPrevBlock = pindexPrev->GetBlockHash();
|
|
pblock->hashMerkleRoot = pblock->BuildMerkleTree();
|
|
pblock->UpdateTime(pindexPrev);
|
|
pblock->nBits = GetNextWorkRequired(pindexPrev, pblock.get());
|
|
pblock->nNonce = 0;
|
|
|
|
return pblock.release();
|
|
}
|
|
|
|
|
|
void IncrementExtraNonce(CBlock* pblock, CBlockIndex* pindexPrev, unsigned int& nExtraNonce)
|
|
{
|
|
// Update nExtraNonce
|
|
static uint256 hashPrevBlock;
|
|
if (hashPrevBlock != pblock->hashPrevBlock)
|
|
{
|
|
nExtraNonce = 0;
|
|
hashPrevBlock = pblock->hashPrevBlock;
|
|
}
|
|
++nExtraNonce;
|
|
pblock->vtx[0].vin[0].scriptSig = (CScript() << pblock->nTime << CBigNum(nExtraNonce)) + COINBASE_FLAGS;
|
|
assert(pblock->vtx[0].vin[0].scriptSig.size() <= 100);
|
|
|
|
pblock->hashMerkleRoot = pblock->BuildMerkleTree();
|
|
}
|
|
|
|
|
|
void FormatHashBuffers(CBlock* pblock, char* pmidstate, char* pdata, char* phash1)
|
|
{
|
|
//
|
|
// Prebuild hash buffers
|
|
//
|
|
struct
|
|
{
|
|
struct unnamed2
|
|
{
|
|
int nVersion;
|
|
uint256 hashPrevBlock;
|
|
uint256 hashMerkleRoot;
|
|
unsigned int nTime;
|
|
unsigned int nBits;
|
|
unsigned int nNonce;
|
|
}
|
|
block;
|
|
unsigned char pchPadding0[64];
|
|
uint256 hash1;
|
|
unsigned char pchPadding1[64];
|
|
}
|
|
tmp;
|
|
memset(&tmp, 0, sizeof(tmp));
|
|
|
|
tmp.block.nVersion = pblock->nVersion;
|
|
tmp.block.hashPrevBlock = pblock->hashPrevBlock;
|
|
tmp.block.hashMerkleRoot = pblock->hashMerkleRoot;
|
|
tmp.block.nTime = pblock->nTime;
|
|
tmp.block.nBits = pblock->nBits;
|
|
tmp.block.nNonce = pblock->nNonce;
|
|
|
|
FormatHashBlocks(&tmp.block, sizeof(tmp.block));
|
|
FormatHashBlocks(&tmp.hash1, sizeof(tmp.hash1));
|
|
|
|
// Byte swap all the input buffer
|
|
for (int i = 0; i < sizeof(tmp)/4; i++)
|
|
((unsigned int*)&tmp)[i] = ByteReverse(((unsigned int*)&tmp)[i]);
|
|
|
|
// Precalc the first half of the first hash, which stays constant
|
|
SHA256Transform(pmidstate, &tmp.block, pSHA256InitState);
|
|
|
|
memcpy(pdata, &tmp.block, 128);
|
|
memcpy(phash1, &tmp.hash1, 64);
|
|
}
|
|
|
|
|
|
bool CheckWork(CBlock* pblock, CWallet& wallet, CReserveKey& reservekey)
|
|
{
|
|
uint256 hash = pblock->GetHash();
|
|
uint256 hashTarget = CBigNum().SetCompact(pblock->nBits).getuint256();
|
|
|
|
if (hash > hashTarget)
|
|
return false;
|
|
|
|
//// debug print
|
|
printf("BitcoinMiner:\n");
|
|
printf("proof-of-work found \n hash: %s \ntarget: %s\n", hash.GetHex().c_str(), hashTarget.GetHex().c_str());
|
|
pblock->print();
|
|
printf("%s ", DateTimeStrFormat("%x %H:%M", GetTime()).c_str());
|
|
printf("generated %s\n", FormatMoney(pblock->vtx[0].vout[0].nValue).c_str());
|
|
|
|
// Found a solution
|
|
CRITICAL_BLOCK(cs_main)
|
|
{
|
|
if (pblock->hashPrevBlock != hashBestChain)
|
|
return error("BitcoinMiner : generated block is stale");
|
|
|
|
// Remove key from key pool
|
|
reservekey.KeepKey();
|
|
|
|
// Track how many getdata requests this block gets
|
|
CRITICAL_BLOCK(wallet.cs_wallet)
|
|
wallet.mapRequestCount[pblock->GetHash()] = 0;
|
|
|
|
// Process this block the same as if we had received it from another node
|
|
if (!ProcessBlock(NULL, pblock))
|
|
return error("BitcoinMiner : ProcessBlock, block not accepted");
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
void static ThreadBitcoinMiner(void* parg);
|
|
|
|
static bool fGenerateBitcoins = false;
|
|
static bool fLimitProcessors = false;
|
|
static int nLimitProcessors = -1;
|
|
|
|
void static BitcoinMiner(CWallet *pwallet)
|
|
{
|
|
printf("BitcoinMiner started\n");
|
|
SetThreadPriority(THREAD_PRIORITY_LOWEST);
|
|
|
|
// Each thread has its own key and counter
|
|
CReserveKey reservekey(pwallet);
|
|
unsigned int nExtraNonce = 0;
|
|
|
|
while (fGenerateBitcoins)
|
|
{
|
|
if (AffinityBugWorkaround(ThreadBitcoinMiner))
|
|
return;
|
|
if (fShutdown)
|
|
return;
|
|
while (vNodes.empty() || IsInitialBlockDownload())
|
|
{
|
|
Sleep(1000);
|
|
if (fShutdown)
|
|
return;
|
|
if (!fGenerateBitcoins)
|
|
return;
|
|
}
|
|
|
|
|
|
//
|
|
// Create new block
|
|
//
|
|
unsigned int nTransactionsUpdatedLast = nTransactionsUpdated;
|
|
CBlockIndex* pindexPrev = pindexBest;
|
|
|
|
auto_ptr<CBlock> pblock(CreateNewBlock(reservekey));
|
|
if (!pblock.get())
|
|
return;
|
|
IncrementExtraNonce(pblock.get(), pindexPrev, nExtraNonce);
|
|
|
|
printf("Running BitcoinMiner with %d transactions in block\n", pblock->vtx.size());
|
|
|
|
|
|
//
|
|
// Prebuild hash buffers
|
|
//
|
|
char pmidstatebuf[32+16]; char* pmidstate = alignup<16>(pmidstatebuf);
|
|
char pdatabuf[128+16]; char* pdata = alignup<16>(pdatabuf);
|
|
char phash1buf[64+16]; char* phash1 = alignup<16>(phash1buf);
|
|
|
|
FormatHashBuffers(pblock.get(), pmidstate, pdata, phash1);
|
|
|
|
unsigned int& nBlockTime = *(unsigned int*)(pdata + 64 + 4);
|
|
unsigned int& nBlockBits = *(unsigned int*)(pdata + 64 + 8);
|
|
unsigned int& nBlockNonce = *(unsigned int*)(pdata + 64 + 12);
|
|
|
|
|
|
//
|
|
// Search
|
|
//
|
|
int64 nStart = GetTime();
|
|
uint256 hashTarget = CBigNum().SetCompact(pblock->nBits).getuint256();
|
|
uint256 hashbuf[2];
|
|
uint256& hash = *alignup<16>(hashbuf);
|
|
loop
|
|
{
|
|
unsigned int nHashesDone = 0;
|
|
unsigned int nNonceFound;
|
|
|
|
// Crypto++ SHA-256
|
|
nNonceFound = ScanHash_CryptoPP(pmidstate, pdata + 64, phash1,
|
|
(char*)&hash, nHashesDone);
|
|
|
|
// Check if something found
|
|
if (nNonceFound != -1)
|
|
{
|
|
for (int i = 0; i < sizeof(hash)/4; i++)
|
|
((unsigned int*)&hash)[i] = ByteReverse(((unsigned int*)&hash)[i]);
|
|
|
|
if (hash <= hashTarget)
|
|
{
|
|
// Found a solution
|
|
pblock->nNonce = ByteReverse(nNonceFound);
|
|
assert(hash == pblock->GetHash());
|
|
|
|
SetThreadPriority(THREAD_PRIORITY_NORMAL);
|
|
CheckWork(pblock.get(), *pwalletMain, reservekey);
|
|
SetThreadPriority(THREAD_PRIORITY_LOWEST);
|
|
break;
|
|
}
|
|
}
|
|
|
|
// Meter hashes/sec
|
|
static int64 nHashCounter;
|
|
if (nHPSTimerStart == 0)
|
|
{
|
|
nHPSTimerStart = GetTimeMillis();
|
|
nHashCounter = 0;
|
|
}
|
|
else
|
|
nHashCounter += nHashesDone;
|
|
if (GetTimeMillis() - nHPSTimerStart > 4000)
|
|
{
|
|
static CCriticalSection cs;
|
|
CRITICAL_BLOCK(cs)
|
|
{
|
|
if (GetTimeMillis() - nHPSTimerStart > 4000)
|
|
{
|
|
dHashesPerSec = 1000.0 * nHashCounter / (GetTimeMillis() - nHPSTimerStart);
|
|
nHPSTimerStart = GetTimeMillis();
|
|
nHashCounter = 0;
|
|
string strStatus = strprintf(" %.0f khash/s", dHashesPerSec/1000.0);
|
|
UIThreadCall(boost::bind(CalledSetStatusBar, strStatus, 0));
|
|
static int64 nLogTime;
|
|
if (GetTime() - nLogTime > 30 * 60)
|
|
{
|
|
nLogTime = GetTime();
|
|
printf("%s ", DateTimeStrFormat("%x %H:%M", GetTime()).c_str());
|
|
printf("hashmeter %3d CPUs %6.0f khash/s\n", vnThreadsRunning[THREAD_MINER], dHashesPerSec/1000.0);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
// Check for stop or if block needs to be rebuilt
|
|
if (fShutdown)
|
|
return;
|
|
if (!fGenerateBitcoins)
|
|
return;
|
|
if (fLimitProcessors && vnThreadsRunning[THREAD_MINER] > nLimitProcessors)
|
|
return;
|
|
if (vNodes.empty())
|
|
break;
|
|
if (nBlockNonce >= 0xffff0000)
|
|
break;
|
|
if (nTransactionsUpdated != nTransactionsUpdatedLast && GetTime() - nStart > 60)
|
|
break;
|
|
if (pindexPrev != pindexBest)
|
|
break;
|
|
|
|
// Update nTime every few seconds
|
|
pblock->UpdateTime(pindexPrev);
|
|
nBlockTime = ByteReverse(pblock->nTime);
|
|
if (fTestNet)
|
|
{
|
|
// Changing pblock->nTime can change work required on testnet:
|
|
nBlockBits = ByteReverse(pblock->nBits);
|
|
hashTarget = CBigNum().SetCompact(pblock->nBits).getuint256();
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
void static ThreadBitcoinMiner(void* parg)
|
|
{
|
|
CWallet* pwallet = (CWallet*)parg;
|
|
try
|
|
{
|
|
vnThreadsRunning[THREAD_MINER]++;
|
|
BitcoinMiner(pwallet);
|
|
vnThreadsRunning[THREAD_MINER]--;
|
|
}
|
|
catch (std::exception& e) {
|
|
vnThreadsRunning[THREAD_MINER]--;
|
|
PrintException(&e, "ThreadBitcoinMiner()");
|
|
} catch (...) {
|
|
vnThreadsRunning[THREAD_MINER]--;
|
|
PrintException(NULL, "ThreadBitcoinMiner()");
|
|
}
|
|
UIThreadCall(boost::bind(CalledSetStatusBar, "", 0));
|
|
nHPSTimerStart = 0;
|
|
if (vnThreadsRunning[THREAD_MINER] == 0)
|
|
dHashesPerSec = 0;
|
|
printf("ThreadBitcoinMiner exiting, %d threads remaining\n", vnThreadsRunning[THREAD_MINER]);
|
|
}
|
|
|
|
|
|
void GenerateBitcoins(bool fGenerate, CWallet* pwallet)
|
|
{
|
|
fGenerateBitcoins = fGenerate;
|
|
nLimitProcessors = GetArg("-genproclimit", -1);
|
|
if (nLimitProcessors == 0)
|
|
fGenerateBitcoins = false;
|
|
fLimitProcessors = (nLimitProcessors != -1);
|
|
|
|
if (fGenerate)
|
|
{
|
|
int nProcessors = boost::thread::hardware_concurrency();
|
|
printf("%d processors\n", nProcessors);
|
|
if (nProcessors < 1)
|
|
nProcessors = 1;
|
|
if (fLimitProcessors && nProcessors > nLimitProcessors)
|
|
nProcessors = nLimitProcessors;
|
|
int nAddThreads = nProcessors - vnThreadsRunning[THREAD_MINER];
|
|
printf("Starting %d BitcoinMiner threads\n", nAddThreads);
|
|
for (int i = 0; i < nAddThreads; i++)
|
|
{
|
|
if (!CreateThread(ThreadBitcoinMiner, pwallet))
|
|
printf("Error: CreateThread(ThreadBitcoinMiner) failed\n");
|
|
Sleep(10);
|
|
}
|
|
}
|
|
}
|