2012-09-03 21:14:03 +02:00
|
|
|
// Copyright (c) 2009-2010 Satoshi Nakamoto
|
2015-12-13 14:51:43 +01:00
|
|
|
// Copyright (c) 2009-2015 The Bitcoin Core developers
|
2014-12-13 05:09:33 +01:00
|
|
|
// Distributed under the MIT software license, see the accompanying
|
2012-09-03 21:14:03 +02:00
|
|
|
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
|
|
|
|
2020-03-19 23:46:56 +01:00
|
|
|
#include <txdb.h>
|
|
|
|
|
|
|
|
#include <chainparams.h>
|
|
|
|
#include <hash.h>
|
|
|
|
#include <random.h>
|
|
|
|
#include <pow.h>
|
|
|
|
#include <uint256.h>
|
|
|
|
#include <util.h>
|
|
|
|
#include <ui_interface.h>
|
|
|
|
#include <init.h>
|
2013-04-13 07:13:08 +02:00
|
|
|
|
|
|
|
#include <stdint.h>
|
2012-09-03 21:14:03 +02:00
|
|
|
|
2014-09-14 12:43:56 +02:00
|
|
|
#include <boost/thread.hpp>
|
|
|
|
|
2017-06-02 00:47:58 +02:00
|
|
|
static const char DB_COIN = 'C';
|
2015-01-25 16:27:54 +01:00
|
|
|
static const char DB_COINS = 'c';
|
|
|
|
static const char DB_BLOCK_FILES = 'f';
|
|
|
|
static const char DB_TXINDEX = 't';
|
2016-03-05 22:31:10 +01:00
|
|
|
static const char DB_ADDRESSINDEX = 'a';
|
2016-03-29 21:17:30 +02:00
|
|
|
static const char DB_ADDRESSUNSPENTINDEX = 'u';
|
2016-03-22 23:11:04 +01:00
|
|
|
static const char DB_TIMESTAMPINDEX = 's';
|
2016-04-05 21:53:38 +02:00
|
|
|
static const char DB_SPENTINDEX = 'p';
|
2015-01-25 16:27:54 +01:00
|
|
|
static const char DB_BLOCK_INDEX = 'b';
|
|
|
|
|
|
|
|
static const char DB_BEST_BLOCK = 'B';
|
2017-06-28 18:24:32 +02:00
|
|
|
static const char DB_HEAD_BLOCKS = 'H';
|
2015-01-25 16:27:54 +01:00
|
|
|
static const char DB_FLAG = 'F';
|
|
|
|
static const char DB_REINDEX_FLAG = 'R';
|
|
|
|
static const char DB_LAST_BLOCK = 'l';
|
|
|
|
|
2017-06-02 00:47:58 +02:00
|
|
|
namespace {
|
|
|
|
|
|
|
|
struct CoinEntry {
|
|
|
|
COutPoint* outpoint;
|
|
|
|
char key;
|
2017-08-17 22:59:56 +02:00
|
|
|
explicit CoinEntry(const COutPoint* ptr) : outpoint(const_cast<COutPoint*>(ptr)), key(DB_COIN) {}
|
2017-06-02 00:47:58 +02:00
|
|
|
|
|
|
|
template<typename Stream>
|
2016-11-09 12:32:57 +01:00
|
|
|
void Serialize(Stream &s) const {
|
2017-06-02 00:47:58 +02:00
|
|
|
s << key;
|
|
|
|
s << outpoint->hash;
|
|
|
|
s << VARINT(outpoint->n);
|
|
|
|
}
|
|
|
|
|
|
|
|
template<typename Stream>
|
2016-11-09 12:32:57 +01:00
|
|
|
void Unserialize(Stream& s) {
|
2017-06-02 00:47:58 +02:00
|
|
|
s >> key;
|
|
|
|
s >> outpoint->hash;
|
|
|
|
s >> VARINT(outpoint->n);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
}
|
2015-01-25 16:27:54 +01:00
|
|
|
|
2020-07-29 03:23:12 +02:00
|
|
|
CCoinsViewDB::CCoinsViewDB(size_t nCacheSize, bool fMemory, bool fWipe) : db(GetDataDir() / "chainstate", nCacheSize, fMemory, fWipe, true)
|
2015-09-08 00:22:23 +02:00
|
|
|
{
|
2012-09-03 21:14:03 +02:00
|
|
|
}
|
|
|
|
|
2017-06-02 00:47:58 +02:00
|
|
|
bool CCoinsViewDB::GetCoin(const COutPoint &outpoint, Coin &coin) const {
|
|
|
|
return db.Read(CoinEntry(&outpoint), coin);
|
2012-09-03 21:14:03 +02:00
|
|
|
}
|
|
|
|
|
2017-06-02 00:47:58 +02:00
|
|
|
bool CCoinsViewDB::HaveCoin(const COutPoint &outpoint) const {
|
|
|
|
return db.Exists(CoinEntry(&outpoint));
|
2012-09-03 21:14:03 +02:00
|
|
|
}
|
|
|
|
|
2014-07-19 16:42:48 +02:00
|
|
|
uint256 CCoinsViewDB::GetBestBlock() const {
|
2012-09-03 21:14:03 +02:00
|
|
|
uint256 hashBestChain;
|
2015-01-25 16:27:54 +01:00
|
|
|
if (!db.Read(DB_BEST_BLOCK, hashBestChain))
|
2014-12-15 09:11:16 +01:00
|
|
|
return uint256();
|
2013-11-05 02:27:39 +01:00
|
|
|
return hashBestChain;
|
2012-09-03 21:14:03 +02:00
|
|
|
}
|
|
|
|
|
2017-06-28 18:24:32 +02:00
|
|
|
std::vector<uint256> CCoinsViewDB::GetHeadBlocks() const {
|
|
|
|
std::vector<uint256> vhashHeadBlocks;
|
|
|
|
if (!db.Read(DB_HEAD_BLOCKS, vhashHeadBlocks)) {
|
|
|
|
return std::vector<uint256>();
|
|
|
|
}
|
|
|
|
return vhashHeadBlocks;
|
|
|
|
}
|
|
|
|
|
2014-08-24 02:08:05 +02:00
|
|
|
bool CCoinsViewDB::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) {
|
2016-04-25 12:44:58 +02:00
|
|
|
CDBBatch batch(db);
|
2014-09-03 09:37:47 +02:00
|
|
|
size_t count = 0;
|
|
|
|
size_t changed = 0;
|
2019-08-02 19:41:37 +02:00
|
|
|
size_t batch_size = (size_t)gArgs.GetArg("-dbbatchsize", nDefaultDbBatchSize);
|
|
|
|
int crash_simulate = gArgs.GetArg("-dbcrashratio", 0);
|
2017-06-28 18:24:32 +02:00
|
|
|
assert(!hashBlock.IsNull());
|
|
|
|
|
|
|
|
uint256 old_tip = GetBestBlock();
|
|
|
|
if (old_tip.IsNull()) {
|
|
|
|
// We may be in the middle of replaying.
|
|
|
|
std::vector<uint256> old_heads = GetHeadBlocks();
|
|
|
|
if (old_heads.size() == 2) {
|
|
|
|
assert(old_heads[0] == hashBlock);
|
|
|
|
old_tip = old_heads[1];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// In the first batch, mark the database as being in the middle of a
|
|
|
|
// transition from old_tip to hashBlock.
|
|
|
|
// A vector is used for future extensibility, as we may want to support
|
|
|
|
// interrupting after partial writes from multiple independent reorgs.
|
|
|
|
batch.Erase(DB_BEST_BLOCK);
|
|
|
|
batch.Write(DB_HEAD_BLOCKS, std::vector<uint256>{hashBlock, old_tip});
|
|
|
|
|
2014-08-24 02:08:05 +02:00
|
|
|
for (CCoinsMap::iterator it = mapCoins.begin(); it != mapCoins.end();) {
|
2014-09-03 09:37:47 +02:00
|
|
|
if (it->second.flags & CCoinsCacheEntry::DIRTY) {
|
2017-06-02 00:47:58 +02:00
|
|
|
CoinEntry entry(&it->first);
|
|
|
|
if (it->second.coin.IsSpent())
|
|
|
|
batch.Erase(entry);
|
2015-09-08 00:22:23 +02:00
|
|
|
else
|
2017-06-02 00:47:58 +02:00
|
|
|
batch.Write(entry, it->second.coin);
|
2014-09-03 09:37:47 +02:00
|
|
|
changed++;
|
|
|
|
}
|
|
|
|
count++;
|
2014-08-24 02:08:05 +02:00
|
|
|
CCoinsMap::iterator itOld = it++;
|
|
|
|
mapCoins.erase(itOld);
|
2017-06-28 18:24:32 +02:00
|
|
|
if (batch.SizeEstimate() > batch_size) {
|
|
|
|
LogPrint(BCLog::COINDB, "Writing partial batch of %.2f MiB\n", batch.SizeEstimate() * (1.0 / 1048576.0));
|
|
|
|
db.WriteBatch(batch);
|
|
|
|
batch.Clear();
|
|
|
|
if (crash_simulate) {
|
|
|
|
static FastRandomContext rng;
|
|
|
|
if (rng.randrange(crash_simulate) == 0) {
|
|
|
|
LogPrintf("Simulating a crash. Goodbye.\n");
|
|
|
|
_Exit(0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2014-08-24 02:08:05 +02:00
|
|
|
}
|
2012-09-03 21:14:03 +02:00
|
|
|
|
2017-06-28 18:24:32 +02:00
|
|
|
// In the last batch, mark the database as consistent with hashBlock again.
|
|
|
|
batch.Erase(DB_HEAD_BLOCKS);
|
|
|
|
batch.Write(DB_BEST_BLOCK, hashBlock);
|
|
|
|
|
|
|
|
LogPrint(BCLog::COINDB, "Writing final batch of %.2f MiB\n", batch.SizeEstimate() * (1.0 / 1048576.0));
|
2017-06-02 00:47:58 +02:00
|
|
|
bool ret = db.WriteBatch(batch);
|
2019-05-22 23:51:39 +02:00
|
|
|
LogPrint(BCLog::COINDB, "Committed %u changed transaction outputs (out of %u) to coin database...\n", (unsigned int)changed, (unsigned int)count);
|
2017-06-02 00:47:58 +02:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t CCoinsViewDB::EstimateSize() const
|
|
|
|
{
|
|
|
|
return db.EstimateSize(DB_COIN, (char)(DB_COIN+1));
|
2012-09-03 21:14:03 +02:00
|
|
|
}
|
|
|
|
|
2018-03-27 21:04:22 +02:00
|
|
|
CBlockTreeDB::CBlockTreeDB(size_t nCacheSize, bool fMemory, bool fWipe) : CDBWrapper(gArgs.IsArgSet("-blocksdir") ? GetDataDir() / "blocks" / "index" : GetBlocksDir() / "index", nCacheSize, fMemory, fWipe), mapHasTxIndexCache(10000, 20000) {
|
2012-09-04 18:12:00 +02:00
|
|
|
}
|
|
|
|
|
2012-09-03 21:14:03 +02:00
|
|
|
bool CBlockTreeDB::ReadBlockFileInfo(int nFile, CBlockFileInfo &info) {
|
2017-01-30 13:13:07 +01:00
|
|
|
return Read(std::make_pair(DB_BLOCK_FILES, nFile), info);
|
2012-09-03 21:14:03 +02:00
|
|
|
}
|
|
|
|
|
2012-10-21 21:23:13 +02:00
|
|
|
bool CBlockTreeDB::WriteReindexing(bool fReindexing) {
|
|
|
|
if (fReindexing)
|
2015-01-25 16:27:54 +01:00
|
|
|
return Write(DB_REINDEX_FLAG, '1');
|
2012-10-21 21:23:13 +02:00
|
|
|
else
|
2015-01-25 16:27:54 +01:00
|
|
|
return Erase(DB_REINDEX_FLAG);
|
2012-10-21 21:23:13 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
bool CBlockTreeDB::ReadReindexing(bool &fReindexing) {
|
2015-01-25 16:27:54 +01:00
|
|
|
fReindexing = Exists(DB_REINDEX_FLAG);
|
2012-10-21 21:23:13 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2012-09-03 21:14:03 +02:00
|
|
|
bool CBlockTreeDB::ReadLastBlockFile(int &nFile) {
|
2015-01-25 16:27:54 +01:00
|
|
|
return Read(DB_LAST_BLOCK, nFile);
|
2012-09-03 21:14:03 +02:00
|
|
|
}
|
|
|
|
|
2016-04-15 16:36:39 +02:00
|
|
|
CCoinsViewCursor *CCoinsViewDB::Cursor() const
|
|
|
|
{
|
2017-07-15 21:06:13 +02:00
|
|
|
CCoinsViewDBCursor *i = new CCoinsViewDBCursor(const_cast<CDBWrapper&>(db).NewIterator(), GetBestBlock());
|
2014-07-19 16:42:48 +02:00
|
|
|
/* It seems that there are no "const iterators" for LevelDB. Since we
|
|
|
|
only need read operations on it, use a const-cast to get around
|
|
|
|
that restriction. */
|
2017-06-02 00:47:58 +02:00
|
|
|
i->pcursor->Seek(DB_COIN);
|
2016-04-15 16:36:39 +02:00
|
|
|
// Cache key of first record
|
2017-05-26 20:56:52 +02:00
|
|
|
if (i->pcursor->Valid()) {
|
2017-06-02 00:47:58 +02:00
|
|
|
CoinEntry entry(&i->keyTmp.second);
|
|
|
|
i->pcursor->GetKey(entry);
|
|
|
|
i->keyTmp.first = entry.key;
|
2017-05-26 20:56:52 +02:00
|
|
|
} else {
|
|
|
|
i->keyTmp.first = 0; // Make sure Valid() and GetKey() return false
|
|
|
|
}
|
2016-04-15 16:36:39 +02:00
|
|
|
return i;
|
|
|
|
}
|
|
|
|
|
2017-06-02 00:47:58 +02:00
|
|
|
bool CCoinsViewDBCursor::GetKey(COutPoint &key) const
|
2016-04-15 16:36:39 +02:00
|
|
|
{
|
|
|
|
// Return cached key
|
2017-06-02 00:47:58 +02:00
|
|
|
if (keyTmp.first == DB_COIN) {
|
2016-04-15 16:36:39 +02:00
|
|
|
key = keyTmp.second;
|
|
|
|
return true;
|
2012-09-25 23:04:54 +02:00
|
|
|
}
|
2016-04-15 16:36:39 +02:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-06-02 00:47:58 +02:00
|
|
|
bool CCoinsViewDBCursor::GetValue(Coin &coin) const
|
2016-04-15 16:36:39 +02:00
|
|
|
{
|
2017-06-02 00:47:58 +02:00
|
|
|
return pcursor->GetValue(coin);
|
2016-04-15 16:36:39 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
unsigned int CCoinsViewDBCursor::GetValueSize() const
|
|
|
|
{
|
|
|
|
return pcursor->GetValueSize();
|
|
|
|
}
|
|
|
|
|
|
|
|
bool CCoinsViewDBCursor::Valid() const
|
|
|
|
{
|
2017-06-02 00:47:58 +02:00
|
|
|
return keyTmp.first == DB_COIN;
|
2016-04-15 16:36:39 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void CCoinsViewDBCursor::Next()
|
|
|
|
{
|
|
|
|
pcursor->Next();
|
2017-06-02 00:47:58 +02:00
|
|
|
CoinEntry entry(&keyTmp.second);
|
|
|
|
if (!pcursor->Valid() || !pcursor->GetKey(entry)) {
|
2016-04-15 16:36:39 +02:00
|
|
|
keyTmp.first = 0; // Invalidate cached key after last record so that Valid() and GetKey() return false
|
2017-06-02 00:47:58 +02:00
|
|
|
} else {
|
|
|
|
keyTmp.first = entry.key;
|
|
|
|
}
|
2012-09-25 23:04:54 +02:00
|
|
|
}
|
|
|
|
|
2014-11-25 16:26:20 +01:00
|
|
|
bool CBlockTreeDB::WriteBatchSync(const std::vector<std::pair<int, const CBlockFileInfo*> >& fileInfo, int nLastFile, const std::vector<const CBlockIndex*>& blockinfo) {
|
2016-04-25 12:44:58 +02:00
|
|
|
CDBBatch batch(*this);
|
2014-11-25 16:26:20 +01:00
|
|
|
for (std::vector<std::pair<int, const CBlockFileInfo*> >::const_iterator it=fileInfo.begin(); it != fileInfo.end(); it++) {
|
2017-01-30 13:13:07 +01:00
|
|
|
batch.Write(std::make_pair(DB_BLOCK_FILES, it->first), *it->second);
|
2014-11-25 16:26:20 +01:00
|
|
|
}
|
2015-01-25 16:27:54 +01:00
|
|
|
batch.Write(DB_LAST_BLOCK, nLastFile);
|
2014-11-25 16:26:20 +01:00
|
|
|
for (std::vector<const CBlockIndex*>::const_iterator it=blockinfo.begin(); it != blockinfo.end(); it++) {
|
2017-01-30 13:13:07 +01:00
|
|
|
batch.Write(std::make_pair(DB_BLOCK_INDEX, (*it)->GetBlockHash()), CDiskBlockIndex(*it));
|
2014-11-25 16:26:20 +01:00
|
|
|
}
|
|
|
|
return WriteBatch(batch, true);
|
|
|
|
}
|
|
|
|
|
2019-10-01 16:24:42 +02:00
|
|
|
bool CBlockTreeDB::HasTxIndex(const uint256& txid) {
|
2020-04-08 22:18:33 +02:00
|
|
|
{
|
|
|
|
LOCK(cs);
|
|
|
|
auto it = mapHasTxIndexCache.find(txid);
|
|
|
|
if (it != mapHasTxIndexCache.end()) {
|
|
|
|
return it->second;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
bool r = Exists(std::make_pair(DB_TXINDEX, txid));
|
|
|
|
LOCK(cs);
|
|
|
|
mapHasTxIndexCache.insert(std::make_pair(txid, r));
|
|
|
|
return r;
|
2019-10-01 16:24:42 +02:00
|
|
|
}
|
|
|
|
|
2013-01-11 01:47:57 +01:00
|
|
|
bool CBlockTreeDB::ReadTxIndex(const uint256 &txid, CDiskTxPos &pos) {
|
2020-04-08 22:18:33 +02:00
|
|
|
bool r = Read(std::make_pair(DB_TXINDEX, txid), pos);
|
|
|
|
LOCK(cs);
|
|
|
|
mapHasTxIndexCache.insert_or_update(std::make_pair(txid, r));
|
|
|
|
return r;
|
2013-01-11 01:47:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
bool CBlockTreeDB::WriteTxIndex(const std::vector<std::pair<uint256, CDiskTxPos> >&vect) {
|
2016-04-25 12:44:58 +02:00
|
|
|
CDBBatch batch(*this);
|
2013-01-11 01:47:57 +01:00
|
|
|
for (std::vector<std::pair<uint256,CDiskTxPos> >::const_iterator it=vect.begin(); it!=vect.end(); it++)
|
2017-01-30 13:13:07 +01:00
|
|
|
batch.Write(std::make_pair(DB_TXINDEX, it->first), it->second);
|
2020-04-08 22:18:33 +02:00
|
|
|
bool ret = WriteBatch(batch);
|
|
|
|
LOCK(cs);
|
|
|
|
for (auto& p : vect) {
|
|
|
|
mapHasTxIndexCache.insert_or_update(std::make_pair(p.first, true));
|
|
|
|
}
|
|
|
|
return ret;
|
2013-01-11 01:47:57 +01:00
|
|
|
}
|
|
|
|
|
2016-04-05 21:53:38 +02:00
|
|
|
bool CBlockTreeDB::ReadSpentIndex(CSpentIndexKey &key, CSpentIndexValue &value) {
|
2017-01-30 13:13:07 +01:00
|
|
|
return Read(std::make_pair(DB_SPENTINDEX, key), value);
|
2016-04-05 21:53:38 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
bool CBlockTreeDB::UpdateSpentIndex(const std::vector<std::pair<CSpentIndexKey, CSpentIndexValue> >&vect) {
|
2016-04-25 12:44:58 +02:00
|
|
|
CDBBatch batch(*this);
|
2016-04-05 21:53:38 +02:00
|
|
|
for (std::vector<std::pair<CSpentIndexKey,CSpentIndexValue> >::const_iterator it=vect.begin(); it!=vect.end(); it++) {
|
|
|
|
if (it->second.IsNull()) {
|
2017-01-30 13:13:07 +01:00
|
|
|
batch.Erase(std::make_pair(DB_SPENTINDEX, it->first));
|
2016-04-05 21:53:38 +02:00
|
|
|
} else {
|
2017-01-30 13:13:07 +01:00
|
|
|
batch.Write(std::make_pair(DB_SPENTINDEX, it->first), it->second);
|
2016-04-05 21:53:38 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return WriteBatch(batch);
|
|
|
|
}
|
|
|
|
|
2016-03-29 21:17:30 +02:00
|
|
|
bool CBlockTreeDB::UpdateAddressUnspentIndex(const std::vector<std::pair<CAddressUnspentKey, CAddressUnspentValue > >&vect) {
|
2016-04-25 12:44:58 +02:00
|
|
|
CDBBatch batch(*this);
|
2016-03-29 21:17:30 +02:00
|
|
|
for (std::vector<std::pair<CAddressUnspentKey, CAddressUnspentValue> >::const_iterator it=vect.begin(); it!=vect.end(); it++) {
|
|
|
|
if (it->second.IsNull()) {
|
2017-01-30 13:13:07 +01:00
|
|
|
batch.Erase(std::make_pair(DB_ADDRESSUNSPENTINDEX, it->first));
|
2016-03-29 21:17:30 +02:00
|
|
|
} else {
|
2017-01-30 13:13:07 +01:00
|
|
|
batch.Write(std::make_pair(DB_ADDRESSUNSPENTINDEX, it->first), it->second);
|
2016-03-29 21:17:30 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return WriteBatch(batch);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool CBlockTreeDB::ReadAddressUnspentIndex(uint160 addressHash, int type,
|
|
|
|
std::vector<std::pair<CAddressUnspentKey, CAddressUnspentValue> > &unspentOutputs) {
|
|
|
|
|
2016-09-02 09:56:16 +02:00
|
|
|
std::unique_ptr<CDBIterator> pcursor(NewIterator());
|
2016-03-29 21:17:30 +02:00
|
|
|
|
2017-01-30 13:13:07 +01:00
|
|
|
pcursor->Seek(std::make_pair(DB_ADDRESSUNSPENTINDEX, CAddressIndexIteratorKey(type, addressHash)));
|
2016-03-29 21:17:30 +02:00
|
|
|
|
|
|
|
while (pcursor->Valid()) {
|
|
|
|
boost::this_thread::interruption_point();
|
|
|
|
std::pair<char,CAddressUnspentKey> key;
|
|
|
|
if (pcursor->GetKey(key) && key.first == DB_ADDRESSUNSPENTINDEX && key.second.hashBytes == addressHash) {
|
|
|
|
CAddressUnspentValue nValue;
|
|
|
|
if (pcursor->GetValue(nValue)) {
|
2017-01-30 13:13:07 +01:00
|
|
|
unspentOutputs.push_back(std::make_pair(key.second, nValue));
|
2016-03-29 21:17:30 +02:00
|
|
|
pcursor->Next();
|
|
|
|
} else {
|
|
|
|
return error("failed to get address unspent value");
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-03-05 22:31:10 +01:00
|
|
|
bool CBlockTreeDB::WriteAddressIndex(const std::vector<std::pair<CAddressIndexKey, CAmount > >&vect) {
|
2016-04-25 12:44:58 +02:00
|
|
|
CDBBatch batch(*this);
|
2016-03-05 22:31:10 +01:00
|
|
|
for (std::vector<std::pair<CAddressIndexKey, CAmount> >::const_iterator it=vect.begin(); it!=vect.end(); it++)
|
2017-01-30 13:13:07 +01:00
|
|
|
batch.Write(std::make_pair(DB_ADDRESSINDEX, it->first), it->second);
|
2016-03-05 22:31:10 +01:00
|
|
|
return WriteBatch(batch);
|
|
|
|
}
|
|
|
|
|
2016-03-28 22:47:20 +02:00
|
|
|
bool CBlockTreeDB::EraseAddressIndex(const std::vector<std::pair<CAddressIndexKey, CAmount > >&vect) {
|
2016-04-25 12:44:58 +02:00
|
|
|
CDBBatch batch(*this);
|
2016-03-28 22:47:20 +02:00
|
|
|
for (std::vector<std::pair<CAddressIndexKey, CAmount> >::const_iterator it=vect.begin(); it!=vect.end(); it++)
|
2017-01-30 13:13:07 +01:00
|
|
|
batch.Erase(std::make_pair(DB_ADDRESSINDEX, it->first));
|
2016-03-28 22:47:20 +02:00
|
|
|
return WriteBatch(batch);
|
|
|
|
}
|
|
|
|
|
2016-03-24 20:44:23 +01:00
|
|
|
bool CBlockTreeDB::ReadAddressIndex(uint160 addressHash, int type,
|
|
|
|
std::vector<std::pair<CAddressIndexKey, CAmount> > &addressIndex,
|
|
|
|
int start, int end) {
|
2016-03-05 22:31:10 +01:00
|
|
|
|
2016-09-02 09:56:16 +02:00
|
|
|
std::unique_ptr<CDBIterator> pcursor(NewIterator());
|
2016-03-05 22:31:10 +01:00
|
|
|
|
2016-03-24 20:44:23 +01:00
|
|
|
if (start > 0 && end > 0) {
|
2017-01-30 13:13:07 +01:00
|
|
|
pcursor->Seek(std::make_pair(DB_ADDRESSINDEX, CAddressIndexIteratorHeightKey(type, addressHash, start)));
|
2016-03-24 20:44:23 +01:00
|
|
|
} else {
|
2017-01-30 13:13:07 +01:00
|
|
|
pcursor->Seek(std::make_pair(DB_ADDRESSINDEX, CAddressIndexIteratorKey(type, addressHash)));
|
2016-03-24 20:44:23 +01:00
|
|
|
}
|
2016-03-05 22:31:10 +01:00
|
|
|
|
|
|
|
while (pcursor->Valid()) {
|
|
|
|
boost::this_thread::interruption_point();
|
|
|
|
std::pair<char,CAddressIndexKey> key;
|
|
|
|
if (pcursor->GetKey(key) && key.first == DB_ADDRESSINDEX && key.second.hashBytes == addressHash) {
|
2016-03-24 20:44:23 +01:00
|
|
|
if (end > 0 && key.second.blockHeight > end) {
|
|
|
|
break;
|
|
|
|
}
|
2016-03-05 22:31:10 +01:00
|
|
|
CAmount nValue;
|
|
|
|
if (pcursor->GetValue(nValue)) {
|
2017-01-30 13:13:07 +01:00
|
|
|
addressIndex.push_back(std::make_pair(key.second, nValue));
|
2016-03-05 22:31:10 +01:00
|
|
|
pcursor->Next();
|
|
|
|
} else {
|
|
|
|
return error("failed to get address index value");
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-03-22 23:11:04 +01:00
|
|
|
bool CBlockTreeDB::WriteTimestampIndex(const CTimestampIndexKey ×tampIndex) {
|
2016-04-25 12:44:58 +02:00
|
|
|
CDBBatch batch(*this);
|
2017-01-30 13:13:07 +01:00
|
|
|
batch.Write(std::make_pair(DB_TIMESTAMPINDEX, timestampIndex), 0);
|
2016-03-22 23:11:04 +01:00
|
|
|
return WriteBatch(batch);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool CBlockTreeDB::ReadTimestampIndex(const unsigned int &high, const unsigned int &low, std::vector<uint256> &hashes) {
|
|
|
|
|
2016-09-02 09:56:16 +02:00
|
|
|
std::unique_ptr<CDBIterator> pcursor(NewIterator());
|
2016-03-22 23:11:04 +01:00
|
|
|
|
2017-01-30 13:13:07 +01:00
|
|
|
pcursor->Seek(std::make_pair(DB_TIMESTAMPINDEX, CTimestampIndexIteratorKey(low)));
|
2016-03-22 23:11:04 +01:00
|
|
|
|
|
|
|
while (pcursor->Valid()) {
|
|
|
|
boost::this_thread::interruption_point();
|
|
|
|
std::pair<char, CTimestampIndexKey> key;
|
|
|
|
if (pcursor->GetKey(key) && key.first == DB_TIMESTAMPINDEX && key.second.timestamp <= high) {
|
|
|
|
hashes.push_back(key.second.blockHash);
|
|
|
|
pcursor->Next();
|
|
|
|
} else {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2013-01-11 01:47:57 +01:00
|
|
|
bool CBlockTreeDB::WriteFlag(const std::string &name, bool fValue) {
|
2015-01-25 16:27:54 +01:00
|
|
|
return Write(std::make_pair(DB_FLAG, name), fValue ? '1' : '0');
|
2013-01-11 01:47:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
bool CBlockTreeDB::ReadFlag(const std::string &name, bool &fValue) {
|
|
|
|
char ch;
|
2015-01-25 16:27:54 +01:00
|
|
|
if (!Read(std::make_pair(DB_FLAG, name), ch))
|
2013-01-11 01:47:57 +01:00
|
|
|
return false;
|
|
|
|
fValue = ch == '1';
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-06-25 10:01:21 +02:00
|
|
|
bool CBlockTreeDB::LoadBlockIndexGuts(const Consensus::Params& consensusParams, std::function<CBlockIndex*(const uint256&)> insertBlockIndex)
|
2012-09-03 21:14:03 +02:00
|
|
|
{
|
2016-09-02 09:56:16 +02:00
|
|
|
std::unique_ptr<CDBIterator> pcursor(NewIterator());
|
2012-09-03 21:14:03 +02:00
|
|
|
|
2017-01-30 13:13:07 +01:00
|
|
|
pcursor->Seek(std::make_pair(DB_BLOCK_INDEX, uint256()));
|
2012-09-03 21:14:03 +02:00
|
|
|
|
|
|
|
// Load mapBlockIndex
|
|
|
|
while (pcursor->Valid()) {
|
2013-03-09 18:02:57 +01:00
|
|
|
boost::this_thread::interruption_point();
|
2015-10-08 02:12:24 +02:00
|
|
|
std::pair<char, uint256> key;
|
2015-10-13 20:25:57 +02:00
|
|
|
if (pcursor->GetKey(key) && key.first == DB_BLOCK_INDEX) {
|
2015-10-08 02:12:24 +02:00
|
|
|
CDiskBlockIndex diskindex;
|
|
|
|
if (pcursor->GetValue(diskindex)) {
|
2012-09-03 21:14:03 +02:00
|
|
|
// Construct block index object
|
2016-05-06 10:02:57 +02:00
|
|
|
CBlockIndex* pindexNew = insertBlockIndex(diskindex.GetBlockHash());
|
|
|
|
pindexNew->pprev = insertBlockIndex(diskindex.hashPrev);
|
2012-09-03 21:14:03 +02:00
|
|
|
pindexNew->nHeight = diskindex.nHeight;
|
|
|
|
pindexNew->nFile = diskindex.nFile;
|
|
|
|
pindexNew->nDataPos = diskindex.nDataPos;
|
|
|
|
pindexNew->nUndoPos = diskindex.nUndoPos;
|
|
|
|
pindexNew->nVersion = diskindex.nVersion;
|
|
|
|
pindexNew->hashMerkleRoot = diskindex.hashMerkleRoot;
|
|
|
|
pindexNew->nTime = diskindex.nTime;
|
|
|
|
pindexNew->nBits = diskindex.nBits;
|
|
|
|
pindexNew->nNonce = diskindex.nNonce;
|
|
|
|
pindexNew->nStatus = diskindex.nStatus;
|
|
|
|
pindexNew->nTx = diskindex.nTx;
|
|
|
|
|
2017-06-25 10:01:21 +02:00
|
|
|
if (!CheckProofOfWork(pindexNew->GetBlockHash(), pindexNew->nBits, consensusParams))
|
2017-06-06 01:47:23 +02:00
|
|
|
return error("%s: CheckProofOfWork failed: %s", __func__, pindexNew->ToString());
|
2012-09-03 21:14:03 +02:00
|
|
|
|
|
|
|
pcursor->Next();
|
|
|
|
} else {
|
2017-06-06 01:47:23 +02:00
|
|
|
return error("%s: failed to read value", __func__);
|
2012-09-03 21:14:03 +02:00
|
|
|
}
|
2015-10-08 02:12:24 +02:00
|
|
|
} else {
|
|
|
|
break;
|
2012-09-03 21:14:03 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
2017-06-02 00:47:58 +02:00
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
|
|
|
//! Legacy class to deserialize pre-pertxout database entries without reindex.
|
|
|
|
class CCoins
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
//! whether transaction is a coinbase
|
|
|
|
bool fCoinBase;
|
|
|
|
|
|
|
|
//! unspent transaction outputs; spent outputs are .IsNull(); spent outputs at the end of the array are dropped
|
|
|
|
std::vector<CTxOut> vout;
|
|
|
|
|
|
|
|
//! at which height this transaction was included in the active block chain
|
|
|
|
int nHeight;
|
|
|
|
|
|
|
|
//! empty constructor
|
|
|
|
CCoins() : fCoinBase(false), vout(0), nHeight(0) { }
|
|
|
|
|
|
|
|
template<typename Stream>
|
2016-11-09 12:32:57 +01:00
|
|
|
void Unserialize(Stream &s) {
|
2017-06-02 00:47:58 +02:00
|
|
|
unsigned int nCode = 0;
|
|
|
|
// version
|
2020-12-17 13:20:31 +01:00
|
|
|
unsigned int nVersionDummy;
|
2016-11-09 12:32:57 +01:00
|
|
|
::Unserialize(s, VARINT(nVersionDummy));
|
2017-06-02 00:47:58 +02:00
|
|
|
// header code
|
2016-11-09 12:32:57 +01:00
|
|
|
::Unserialize(s, VARINT(nCode));
|
2017-06-02 00:47:58 +02:00
|
|
|
fCoinBase = nCode & 1;
|
|
|
|
std::vector<bool> vAvail(2, false);
|
|
|
|
vAvail[0] = (nCode & 2) != 0;
|
|
|
|
vAvail[1] = (nCode & 4) != 0;
|
|
|
|
unsigned int nMaskCode = (nCode / 8) + ((nCode & 6) != 0 ? 0 : 1);
|
|
|
|
// spentness bitmask
|
|
|
|
while (nMaskCode > 0) {
|
|
|
|
unsigned char chAvail = 0;
|
2016-11-09 12:32:57 +01:00
|
|
|
::Unserialize(s, chAvail);
|
2017-06-02 00:47:58 +02:00
|
|
|
for (unsigned int p = 0; p < 8; p++) {
|
|
|
|
bool f = (chAvail & (1 << p)) != 0;
|
|
|
|
vAvail.push_back(f);
|
|
|
|
}
|
|
|
|
if (chAvail != 0)
|
|
|
|
nMaskCode--;
|
|
|
|
}
|
|
|
|
// txouts themself
|
|
|
|
vout.assign(vAvail.size(), CTxOut());
|
|
|
|
for (unsigned int i = 0; i < vAvail.size(); i++) {
|
|
|
|
if (vAvail[i])
|
2020-12-17 03:07:07 +01:00
|
|
|
::Unserialize(s, CTxOutCompressor(vout[i]));
|
2017-06-02 00:47:58 +02:00
|
|
|
}
|
|
|
|
// coinbase height
|
2020-12-17 13:20:31 +01:00
|
|
|
::Unserialize(s, VARINT(nHeight, VarIntMode::NONNEGATIVE_SIGNED));
|
2017-06-02 00:47:58 +02:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Upgrade the database from older formats.
|
|
|
|
*
|
|
|
|
* Currently implemented: from the per-tx utxo model (0.8..0.14.x) to per-txout.
|
|
|
|
*/
|
|
|
|
bool CCoinsViewDB::Upgrade() {
|
|
|
|
std::unique_ptr<CDBIterator> pcursor(db.NewIterator());
|
|
|
|
pcursor->Seek(std::make_pair(DB_COINS, uint256()));
|
|
|
|
if (!pcursor->Valid()) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-06-29 19:51:48 +02:00
|
|
|
int64_t count = 0;
|
|
|
|
LogPrintf("Upgrading utxo-set database...\n");
|
2018-04-08 11:03:56 +02:00
|
|
|
LogPrintf("[0%%]..."); /* Continued */
|
2017-09-07 18:20:55 +02:00
|
|
|
uiInterface.ShowProgress(_("Upgrading UTXO database"), 0, true);
|
2017-06-02 00:47:58 +02:00
|
|
|
size_t batch_size = 1 << 24;
|
|
|
|
CDBBatch batch(db);
|
2017-06-29 19:51:48 +02:00
|
|
|
int reportDone = 0;
|
2017-08-01 13:01:59 +02:00
|
|
|
std::pair<unsigned char, uint256> key;
|
|
|
|
std::pair<unsigned char, uint256> prev_key = {DB_COINS, uint256()};
|
2017-06-02 00:47:58 +02:00
|
|
|
while (pcursor->Valid()) {
|
|
|
|
boost::this_thread::interruption_point();
|
2017-06-29 19:51:48 +02:00
|
|
|
if (ShutdownRequested()) {
|
|
|
|
break;
|
|
|
|
}
|
2017-06-02 00:47:58 +02:00
|
|
|
if (pcursor->GetKey(key) && key.first == DB_COINS) {
|
2017-06-29 19:51:48 +02:00
|
|
|
if (count++ % 256 == 0) {
|
|
|
|
uint32_t high = 0x100 * *key.second.begin() + *(key.second.begin() + 1);
|
|
|
|
int percentageDone = (int)(high * 100.0 / 65536.0 + 0.5);
|
2017-09-07 18:20:55 +02:00
|
|
|
uiInterface.ShowProgress(_("Upgrading UTXO database"), percentageDone, true);
|
2017-06-29 19:51:48 +02:00
|
|
|
if (reportDone < percentageDone/10) {
|
|
|
|
// report max. every 10% step
|
2018-04-08 11:03:56 +02:00
|
|
|
LogPrintf("[%d%%]...", percentageDone); /* Continued */
|
2017-06-29 19:51:48 +02:00
|
|
|
reportDone = percentageDone/10;
|
|
|
|
}
|
|
|
|
}
|
2017-06-02 00:47:58 +02:00
|
|
|
CCoins old_coins;
|
|
|
|
if (!pcursor->GetValue(old_coins)) {
|
|
|
|
return error("%s: cannot parse CCoins record", __func__);
|
|
|
|
}
|
|
|
|
COutPoint outpoint(key.second, 0);
|
|
|
|
for (size_t i = 0; i < old_coins.vout.size(); ++i) {
|
|
|
|
if (!old_coins.vout[i].IsNull() && !old_coins.vout[i].scriptPubKey.IsUnspendable()) {
|
|
|
|
Coin newcoin(std::move(old_coins.vout[i]), old_coins.nHeight, old_coins.fCoinBase);
|
|
|
|
outpoint.n = i;
|
|
|
|
CoinEntry entry(&outpoint);
|
|
|
|
batch.Write(entry, newcoin);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
batch.Erase(key);
|
|
|
|
if (batch.SizeEstimate() > batch_size) {
|
|
|
|
db.WriteBatch(batch);
|
|
|
|
batch.Clear();
|
2017-08-01 13:01:59 +02:00
|
|
|
db.CompactRange(prev_key, key);
|
|
|
|
prev_key = key;
|
2017-06-02 00:47:58 +02:00
|
|
|
}
|
|
|
|
pcursor->Next();
|
|
|
|
} else {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
db.WriteBatch(batch);
|
2017-08-01 13:01:59 +02:00
|
|
|
db.CompactRange({DB_COINS, uint256()}, key);
|
2017-09-07 18:20:55 +02:00
|
|
|
uiInterface.ShowProgress("", 100, false);
|
2017-06-29 19:51:48 +02:00
|
|
|
LogPrintf("[%s].\n", ShutdownRequested() ? "CANCELLED" : "DONE");
|
2017-08-08 11:58:26 +02:00
|
|
|
return !ShutdownRequested();
|
2017-06-02 00:47:58 +02:00
|
|
|
}
|