merge bitcoin#16981: Improve runtime performance of --reindex

This commit is contained in:
Kittywhiskers Van Gogh 2024-10-13 16:46:11 +00:00
parent e531dff5f7
commit 7d9ff96091
No known key found for this signature in database
GPG Key ID: 30CD0C065E5C4AAD
6 changed files with 238 additions and 37 deletions

View File

@ -35,6 +35,7 @@ bench_bench_dash_SOURCES = \
bench/ccoins_caching.cpp \ bench/ccoins_caching.cpp \
bench/gcs_filter.cpp \ bench/gcs_filter.cpp \
bench/hashpadding.cpp \ bench/hashpadding.cpp \
bench/load_external.cpp \
bench/merkle_root.cpp \ bench/merkle_root.cpp \
bench/mempool_eviction.cpp \ bench/mempool_eviction.cpp \
bench/mempool_stress.cpp \ bench/mempool_stress.cpp \

View File

@ -0,0 +1,63 @@
// Copyright (c) 2022 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or https://www.opensource.org/licenses/mit-license.php.
#include <bench/bench.h>
#include <bench/data.h>
#include <chainparams.h>
#include <test/util/setup_common.h>
#include <validation.h>
/**
* The LoadExternalBlockFile() function is used during -reindex and -loadblock.
*
* Create a test file that's similar to a datadir/blocks/blk?????.dat file,
* It contains around 134 copies of the same block (typical size of real block files).
* For each block in the file, LoadExternalBlockFile() won't find its parent,
* and so will skip the block. (In the real system, it will re-read the block
* from disk later when it encounters its parent.)
*
* This benchmark measures the performance of deserializing the block (or just
* its header, beginning with PR 16981).
*/
static void LoadExternalBlockFile(benchmark::Bench& bench)
{
const auto testing_setup{MakeNoLogFileContext<const TestingSetup>(CBaseChainParams::MAIN)};
// Create a single block as in the blocks files (magic bytes, block size,
// block data) as a stream object.
const fs::path blkfile{testing_setup.get()->m_path_root / "blk.dat"};
CDataStream ss(SER_DISK, 0);
auto params{Params()};
ss << params.MessageStart();
ss << static_cast<uint32_t>(benchmark::data::block813851.size());
// We can't use the streaming serialization (ss << benchmark::data::block813851)
// because that first writes a compact size.
ss.write(MakeByteSpan(benchmark::data::block813851));
// Create the test file.
{
// "wb+" is "binary, O_RDWR | O_CREAT | O_TRUNC".
FILE* file{fsbridge::fopen(blkfile, "wb+")};
// Make the test block file about 128 MB in length.
for (size_t i = 0; i < MAX_BLOCKFILE_SIZE / ss.size(); ++i) {
if (fwrite(ss.data(), 1, ss.size(), file) != ss.size()) {
throw std::runtime_error("write to test file failed\n");
}
}
fclose(file);
}
CChainState& chainstate{testing_setup->m_node.chainman->ActiveChainstate()};
std::multimap<uint256, FlatFilePos> blocks_with_unknown_parent;
FlatFilePos pos;
bench.run([&] {
// "rb" is "binary, O_RDONLY", positioned to the start of the file.
// The file will be closed by LoadExternalBlockFile().
FILE* file{fsbridge::fopen(blkfile, "rb")};
chainstate.LoadExternalBlockFile(file, &pos, &blocks_with_unknown_parent);
});
fs::remove(blkfile);
}
BENCHMARK(LoadExternalBlockFile);

View File

@ -641,7 +641,6 @@ private:
uint64_t nRewind; //!< how many bytes we guarantee to rewind uint64_t nRewind; //!< how many bytes we guarantee to rewind
std::vector<std::byte> vchBuf; //!< the buffer std::vector<std::byte> vchBuf; //!< the buffer
protected:
//! read data from the source to fill the buffer //! read data from the source to fill the buffer
bool Fill() { bool Fill() {
unsigned int pos = nSrcPos % vchBuf.size(); unsigned int pos = nSrcPos % vchBuf.size();
@ -659,6 +658,28 @@ protected:
return true; return true;
} }
//! Advance the stream's read pointer (m_read_pos) by up to 'length' bytes,
//! filling the buffer from the file so that at least one byte is available.
//! Return a pointer to the available buffer data and the number of bytes
//! (which may be less than the requested length) that may be accessed
//! beginning at that pointer.
std::pair<std::byte*, size_t> AdvanceStream(size_t length)
{
assert(m_read_pos <= nSrcPos);
if (m_read_pos + length > nReadLimit) {
throw std::ios_base::failure("Attempt to position past buffer limit");
}
// If there are no bytes available, read from the file.
if (m_read_pos == nSrcPos && length > 0) Fill();
size_t buffer_offset{static_cast<size_t>(m_read_pos % vchBuf.size())};
size_t buffer_available{static_cast<size_t>(vchBuf.size() - buffer_offset)};
size_t bytes_until_source_pos{static_cast<size_t>(nSrcPos - m_read_pos)};
size_t advance{std::min({length, buffer_available, bytes_until_source_pos})};
m_read_pos += advance;
return std::make_pair(&vchBuf[buffer_offset], advance);
}
public: public:
CBufferedFile(FILE* fileIn, uint64_t nBufSize, uint64_t nRewindIn, int nTypeIn, int nVersionIn) CBufferedFile(FILE* fileIn, uint64_t nBufSize, uint64_t nRewindIn, int nTypeIn, int nVersionIn)
: nType(nTypeIn), nVersion(nVersionIn), nSrcPos(0), m_read_pos(0), nReadLimit(std::numeric_limits<uint64_t>::max()), nRewind(nRewindIn), vchBuf(nBufSize, std::byte{0}) : nType(nTypeIn), nVersion(nVersionIn), nSrcPos(0), m_read_pos(0), nReadLimit(std::numeric_limits<uint64_t>::max()), nRewind(nRewindIn), vchBuf(nBufSize, std::byte{0})
@ -696,24 +717,21 @@ public:
//! read a number of bytes //! read a number of bytes
void read(Span<std::byte> dst) void read(Span<std::byte> dst)
{ {
if (dst.size() + m_read_pos > nReadLimit) {
throw std::ios_base::failure("Read attempted past buffer limit");
}
while (dst.size() > 0) { while (dst.size() > 0) {
if (m_read_pos == nSrcPos) auto [buffer_pointer, length]{AdvanceStream(dst.size())};
Fill(); memcpy(dst.data(), buffer_pointer, length);
unsigned int pos = m_read_pos % vchBuf.size(); dst = dst.subspan(length);
size_t nNow = dst.size();
if (nNow + pos > vchBuf.size())
nNow = vchBuf.size() - pos;
if (nNow + m_read_pos > nSrcPos)
nNow = nSrcPos - m_read_pos;
memcpy(dst.data(), &vchBuf[pos], nNow);
m_read_pos += nNow;
dst = dst.subspan(nNow);
} }
} }
//! Move the read position ahead in the stream to the given position.
//! Use SetPos() to back up in the stream, not SkipTo().
void SkipTo(const uint64_t file_pos)
{
assert(file_pos >= m_read_pos);
while (m_read_pos < file_pos) AdvanceStream(file_pos - m_read_pos);
}
//! return the current reading position //! return the current reading position
uint64_t GetPos() const { uint64_t GetPos() const {
return m_read_pos; return m_read_pos;

View File

@ -251,7 +251,7 @@ BOOST_AUTO_TEST_CASE(streams_buffered_file)
BOOST_CHECK(false); BOOST_CHECK(false);
} catch (const std::exception& e) { } catch (const std::exception& e) {
BOOST_CHECK(strstr(e.what(), BOOST_CHECK(strstr(e.what(),
"Read attempted past buffer limit") != nullptr); "Attempt to position past buffer limit") != nullptr);
} }
// The default argument removes the limit completely. // The default argument removes the limit completely.
BOOST_CHECK(bf.SetLimit()); BOOST_CHECK(bf.SetLimit());
@ -320,7 +320,7 @@ BOOST_AUTO_TEST_CASE(streams_buffered_file)
BOOST_CHECK(!bf.SetPos(0)); BOOST_CHECK(!bf.SetPos(0));
// But we should now be positioned at least as far back as allowed // But we should now be positioned at least as far back as allowed
// by the rewind window (relative to our farthest read position, 40). // by the rewind window (relative to our farthest read position, 40).
BOOST_CHECK(bf.GetPos() <= 30); BOOST_CHECK(bf.GetPos() <= 30U);
// We can explicitly close the file, or the destructor will do it. // We can explicitly close the file, or the destructor will do it.
bf.fclose(); bf.fclose();
@ -328,6 +328,55 @@ BOOST_AUTO_TEST_CASE(streams_buffered_file)
fs::remove("streams_test_tmp"); fs::remove("streams_test_tmp");
} }
BOOST_AUTO_TEST_CASE(streams_buffered_file_skip)
{
fs::path streams_test_filename = m_args.GetDataDirBase() / "streams_test_tmp";
FILE* file = fsbridge::fopen(streams_test_filename, "w+b");
// The value at each offset is the byte offset (e.g. byte 1 in the file has the value 0x01).
for (uint8_t j = 0; j < 40; ++j) {
fwrite(&j, 1, 1, file);
}
rewind(file);
// The buffer is 25 bytes, allow rewinding 10 bytes.
CBufferedFile bf(file, 25, 10, 222, 333);
uint8_t i;
// This is like bf >> (7-byte-variable), in that it will cause data
// to be read from the file into memory, but it's not copied to us.
bf.SkipTo(7);
BOOST_CHECK_EQUAL(bf.GetPos(), 7U);
bf >> i;
BOOST_CHECK_EQUAL(i, 7);
// The bytes in the buffer up to offset 7 are valid and can be read.
BOOST_CHECK(bf.SetPos(0));
bf >> i;
BOOST_CHECK_EQUAL(i, 0);
bf >> i;
BOOST_CHECK_EQUAL(i, 1);
bf.SkipTo(11);
bf >> i;
BOOST_CHECK_EQUAL(i, 11);
// SkipTo() honors the transfer limit; we can't position beyond the limit.
bf.SetLimit(13);
try {
bf.SkipTo(14);
BOOST_CHECK(false);
} catch (const std::exception& e) {
BOOST_CHECK(strstr(e.what(), "Attempt to position past buffer limit") != nullptr);
}
// We can position exactly to the transfer limit.
bf.SkipTo(13);
BOOST_CHECK_EQUAL(bf.GetPos(), 13U);
bf.fclose();
fs::remove(streams_test_filename);
}
BOOST_AUTO_TEST_CASE(streams_buffered_file_rand) BOOST_AUTO_TEST_CASE(streams_buffered_file_rand)
{ {
// Make this test deterministic. // Make this test deterministic.
@ -358,7 +407,7 @@ BOOST_AUTO_TEST_CASE(streams_buffered_file_rand)
// sizes; the boundaries of the objects can interact arbitrarily // sizes; the boundaries of the objects can interact arbitrarily
// with the CBufferFile's internal buffer. These first three // with the CBufferFile's internal buffer. These first three
// cases simulate objects of various sizes (1, 2, 5 bytes). // cases simulate objects of various sizes (1, 2, 5 bytes).
switch (InsecureRandRange(5)) { switch (InsecureRandRange(6)) {
case 0: { case 0: {
uint8_t a[1]; uint8_t a[1];
if (currentPos + 1 > fileSize) if (currentPos + 1 > fileSize)
@ -396,6 +445,16 @@ BOOST_AUTO_TEST_CASE(streams_buffered_file_rand)
break; break;
} }
case 3: { case 3: {
// SkipTo is similar to the "read" cases above, except
// we don't receive the data.
size_t skip_length{static_cast<size_t>(InsecureRandRange(5))};
if (currentPos + skip_length > fileSize) continue;
bf.SetLimit(currentPos + skip_length);
bf.SkipTo(currentPos + skip_length);
currentPos += skip_length;
break;
}
case 4: {
// Find a byte value (that is at or ahead of the current position). // Find a byte value (that is at or ahead of the current position).
size_t find = currentPos + InsecureRandRange(8); size_t find = currentPos + InsecureRandRange(8);
if (find >= fileSize) if (find >= fileSize)
@ -412,7 +471,7 @@ BOOST_AUTO_TEST_CASE(streams_buffered_file_rand)
currentPos++; currentPos++;
break; break;
} }
case 4: { case 5: {
size_t requestPos = InsecureRandRange(maxPos + 4); size_t requestPos = InsecureRandRange(maxPos + 4);
bool okay = bf.SetPos(requestPos); bool okay = bf.SetPos(requestPos);
// The new position may differ from the requested position // The new position may differ from the requested position

View File

@ -4603,6 +4603,8 @@ void CChainState::LoadExternalBlockFile(
unsigned int nMaxBlockSize = MaxBlockSize(); unsigned int nMaxBlockSize = MaxBlockSize();
// This takes over fileIn and calls fclose() on it in the CBufferedFile destructor // This takes over fileIn and calls fclose() on it in the CBufferedFile destructor
CBufferedFile blkdat(fileIn, 2*nMaxBlockSize, nMaxBlockSize+8, SER_DISK, CLIENT_VERSION); CBufferedFile blkdat(fileIn, 2*nMaxBlockSize, nMaxBlockSize+8, SER_DISK, CLIENT_VERSION);
// nRewind indicates where to resume scanning in case something goes wrong,
// such as a block fails to deserialize.
uint64_t nRewind = blkdat.GetPos(); uint64_t nRewind = blkdat.GetPos();
while (!blkdat.eof()) { while (!blkdat.eof()) {
if (ShutdownRequested()) return; if (ShutdownRequested()) return;
@ -4626,28 +4628,30 @@ void CChainState::LoadExternalBlockFile(
continue; continue;
} catch (const std::exception&) { } catch (const std::exception&) {
// no valid block header found; don't complain // no valid block header found; don't complain
// (this happens at the end of every blk.dat file)
break; break;
} }
try { try {
// read block // read block header
uint64_t nBlockPos = blkdat.GetPos(); const uint64_t nBlockPos{blkdat.GetPos()};
if (dbp) if (dbp)
dbp->nPos = nBlockPos; dbp->nPos = nBlockPos;
blkdat.SetLimit(nBlockPos + nSize); blkdat.SetLimit(nBlockPos + nSize);
std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>(); CBlockHeader header;
CBlock& block = *pblock; blkdat >> header;
blkdat >> block; const uint256 hash{header.GetHash()};
nRewind = blkdat.GetPos(); // Skip the rest of this block (this may read from disk into memory); position to the marker before the
// next block, but it's still possible to rewind to the start of the current block (without a disk read).
uint256 hash = block.GetHash(); nRewind = nBlockPos + nSize;
blkdat.SkipTo(nRewind);
{ {
LOCK(cs_main); LOCK(cs_main);
// detect out of order blocks, and store them for later // detect out of order blocks, and store them for later
if (hash != m_params.GetConsensus().hashGenesisBlock && !m_blockman.LookupBlockIndex(block.hashPrevBlock)) { if (hash != m_params.GetConsensus().hashGenesisBlock && !m_blockman.LookupBlockIndex(header.hashPrevBlock)) {
LogPrint(BCLog::REINDEX, "%s: Out of order block %s, parent %s not known\n", __func__, hash.ToString(), LogPrint(BCLog::REINDEX, "%s: Out of order block %s, parent %s not known\n", __func__, hash.ToString(),
block.hashPrevBlock.ToString()); header.hashPrevBlock.ToString());
if (dbp && blocks_with_unknown_parent) { if (dbp && blocks_with_unknown_parent) {
blocks_with_unknown_parent->emplace(block.hashPrevBlock, *dbp); blocks_with_unknown_parent->emplace(header.hashPrevBlock, *dbp);
} }
continue; continue;
} }
@ -4655,6 +4659,12 @@ void CChainState::LoadExternalBlockFile(
// process in case the block isn't known yet // process in case the block isn't known yet
const CBlockIndex* pindex = m_blockman.LookupBlockIndex(hash); const CBlockIndex* pindex = m_blockman.LookupBlockIndex(hash);
if (!pindex || (pindex->nStatus & BLOCK_HAVE_DATA) == 0) { if (!pindex || (pindex->nStatus & BLOCK_HAVE_DATA) == 0) {
// This block can be processed immediately; rewind to its start, read and deserialize it.
blkdat.SetPos(nBlockPos);
std::shared_ptr<CBlock> pblock{std::make_shared<CBlock>()};
blkdat >> *pblock;
nRewind = blkdat.GetPos();
BlockValidationState state; BlockValidationState state;
if (AcceptBlock(pblock, state, nullptr, true, dbp, nullptr)) { if (AcceptBlock(pblock, state, nullptr, true, dbp, nullptr)) {
nLoaded++; nLoaded++;

View File

@ -7,9 +7,12 @@
- Start a single node and generate 3 blocks. - Start a single node and generate 3 blocks.
- Stop the node and restart it with -reindex. Verify that the node has reindexed up to block 3. - Stop the node and restart it with -reindex. Verify that the node has reindexed up to block 3.
- Stop the node and restart it with -reindex-chainstate. Verify that the node has reindexed up to block 3. - Stop the node and restart it with -reindex-chainstate. Verify that the node has reindexed up to block 3.
- Verify that out-of-order blocks are correctly processed, see LoadExternalBlockFile()
""" """
import os
from test_framework.test_framework import BitcoinTestFramework from test_framework.test_framework import BitcoinTestFramework
from test_framework.p2p import MAGIC_BYTES
from test_framework.util import assert_equal from test_framework.util import assert_equal
@ -27,6 +30,50 @@ class ReindexTest(BitcoinTestFramework):
assert_equal(self.nodes[0].getblockcount(), blockcount) # start_node is blocking on reindex assert_equal(self.nodes[0].getblockcount(), blockcount) # start_node is blocking on reindex
self.log.info("Success") self.log.info("Success")
# Check that blocks can be processed out of order
def out_of_order(self):
# The previous test created 24 blocks
assert_equal(self.nodes[0].getblockcount(), 24)
self.stop_nodes()
# In this test environment, blocks will always be in order (since
# we're generating them rather than getting them from peers), so to
# test out-of-order handling, swap blocks 1 and 2 on disk.
blk0 = os.path.join(self.nodes[0].datadir, self.nodes[0].chain, 'blocks', 'blk00000.dat')
with open(blk0, 'r+b') as bf:
# Read at least the first few blocks (including genesis)
b = bf.read(2000)
# Find the offsets of blocks 2, 3, and 4 (the first 3 blocks beyond genesis)
# by searching for the regtest marker bytes (see pchMessageStart).
def find_block(b, start):
return b.find(MAGIC_BYTES["regtest"], start)+4
genesis_start = find_block(b, 0)
assert_equal(genesis_start, 4)
b2_start = find_block(b, genesis_start)
b3_start = find_block(b, b2_start)
b4_start = find_block(b, b3_start)
# Blocks 2 and 3 should be the same size.
assert_equal(b3_start-b2_start, b4_start-b3_start)
# Swap the second and third blocks (don't disturb the genesis block).
bf.seek(b2_start)
bf.write(b[b3_start:b4_start])
bf.write(b[b2_start:b3_start])
# The reindexing code should detect and accommodate out of order blocks.
with self.nodes[0].assert_debug_log([
'LoadExternalBlockFile: Out of order block',
'LoadExternalBlockFile: Processing out of order child',
]):
extra_args = [["-reindex"]]
self.start_nodes(extra_args)
# All blocks should be accepted and processed.
assert_equal(self.nodes[0].getblockcount(), 24)
def run_test(self): def run_test(self):
for txindex in [0, 1]: for txindex in [0, 1]:
self.reindex(False, txindex) self.reindex(False, txindex)
@ -34,5 +81,8 @@ class ReindexTest(BitcoinTestFramework):
self.reindex(False, txindex) self.reindex(False, txindex)
self.reindex(True, txindex) self.reindex(True, txindex)
self.out_of_order()
if __name__ == '__main__': if __name__ == '__main__':
ReindexTest().main() ReindexTest().main()