dash/src/dbwrapper.cpp

259 lines
8.8 KiB
C++
Raw Normal View History

// Copyright (c) 2012-2015 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
Backport 11651 (#3358) * scripted-diff: Replace #include "" with #include <> (ryanofsky) -BEGIN VERIFY SCRIPT- for f in \ src/*.cpp \ src/*.h \ src/bench/*.cpp \ src/bench/*.h \ src/compat/*.cpp \ src/compat/*.h \ src/consensus/*.cpp \ src/consensus/*.h \ src/crypto/*.cpp \ src/crypto/*.h \ src/crypto/ctaes/*.h \ src/policy/*.cpp \ src/policy/*.h \ src/primitives/*.cpp \ src/primitives/*.h \ src/qt/*.cpp \ src/qt/*.h \ src/qt/test/*.cpp \ src/qt/test/*.h \ src/rpc/*.cpp \ src/rpc/*.h \ src/script/*.cpp \ src/script/*.h \ src/support/*.cpp \ src/support/*.h \ src/support/allocators/*.h \ src/test/*.cpp \ src/test/*.h \ src/wallet/*.cpp \ src/wallet/*.h \ src/wallet/test/*.cpp \ src/wallet/test/*.h \ src/zmq/*.cpp \ src/zmq/*.h do base=${f%/*}/ relbase=${base#src/} sed -i "s:#include \"\(.*\)\"\(.*\):if test -e \$base'\\1'; then echo \"#include <\"\$relbase\"\\1>\\2\"; else echo \"#include <\\1>\\2\"; fi:e" $f done -END VERIFY SCRIPT- Signed-off-by: Pasta <pasta@dashboost.org> * scripted-diff: Replace #include "" with #include <> (Dash Specific) -BEGIN VERIFY SCRIPT- for f in \ src/bls/*.cpp \ src/bls/*.h \ src/evo/*.cpp \ src/evo/*.h \ src/governance/*.cpp \ src/governance/*.h \ src/llmq/*.cpp \ src/llmq/*.h \ src/masternode/*.cpp \ src/masternode/*.h \ src/privatesend/*.cpp \ src/privatesend/*.h do base=${f%/*}/ relbase=${base#src/} sed -i "s:#include \"\(.*\)\"\(.*\):if test -e \$base'\\1'; then echo \"#include <\"\$relbase\"\\1>\\2\"; else echo \"#include <\\1>\\2\"; fi:e" $f done -END VERIFY SCRIPT- Signed-off-by: Pasta <pasta@dashboost.org> * build: Remove -I for everything but project root Remove -I from build system for everything but the project root, and built-in dependencies. Signed-off-by: Pasta <pasta@dashboost.org> # Conflicts: # src/Makefile.test.include * qt: refactor: Use absolute include paths in .ui files * qt: refactor: Changes to make include paths absolute This makes all include paths in the GUI absolute. Many changes are involved as every single source file in src/qt/ assumes to be able to use relative includes. Signed-off-by: Pasta <pasta@dashboost.org> # Conflicts: # src/qt/dash.cpp # src/qt/optionsmodel.cpp # src/qt/test/rpcnestedtests.cpp * test: refactor: Use absolute include paths for test data files * Recommend #include<> syntax in developer notes * refactor: Include obj/build.h instead of build.h * END BACKPORT #11651 Remove trailing whitespace causing travis failure * fix backport 11651 Signed-off-by: Pasta <pasta@dashboost.org> * More of 11651 * fix blockchain.cpp Signed-off-by: pasta <pasta@dashboost.org> * Add missing "qt/" in includes * Add missing "test/" in includes * Fix trailing whitespaces Co-authored-by: Wladimir J. van der Laan <laanwj@gmail.com> Co-authored-by: Russell Yanofsky <russ@yanofsky.org> Co-authored-by: MeshCollider <dobsonsa68@gmail.com> Co-authored-by: UdjinM6 <UdjinM6@users.noreply.github.com>
2020-03-19 23:46:56 +01:00
#include <dbwrapper.h>
#include <memory>
Backport 11651 (#3358) * scripted-diff: Replace #include "" with #include <> (ryanofsky) -BEGIN VERIFY SCRIPT- for f in \ src/*.cpp \ src/*.h \ src/bench/*.cpp \ src/bench/*.h \ src/compat/*.cpp \ src/compat/*.h \ src/consensus/*.cpp \ src/consensus/*.h \ src/crypto/*.cpp \ src/crypto/*.h \ src/crypto/ctaes/*.h \ src/policy/*.cpp \ src/policy/*.h \ src/primitives/*.cpp \ src/primitives/*.h \ src/qt/*.cpp \ src/qt/*.h \ src/qt/test/*.cpp \ src/qt/test/*.h \ src/rpc/*.cpp \ src/rpc/*.h \ src/script/*.cpp \ src/script/*.h \ src/support/*.cpp \ src/support/*.h \ src/support/allocators/*.h \ src/test/*.cpp \ src/test/*.h \ src/wallet/*.cpp \ src/wallet/*.h \ src/wallet/test/*.cpp \ src/wallet/test/*.h \ src/zmq/*.cpp \ src/zmq/*.h do base=${f%/*}/ relbase=${base#src/} sed -i "s:#include \"\(.*\)\"\(.*\):if test -e \$base'\\1'; then echo \"#include <\"\$relbase\"\\1>\\2\"; else echo \"#include <\\1>\\2\"; fi:e" $f done -END VERIFY SCRIPT- Signed-off-by: Pasta <pasta@dashboost.org> * scripted-diff: Replace #include "" with #include <> (Dash Specific) -BEGIN VERIFY SCRIPT- for f in \ src/bls/*.cpp \ src/bls/*.h \ src/evo/*.cpp \ src/evo/*.h \ src/governance/*.cpp \ src/governance/*.h \ src/llmq/*.cpp \ src/llmq/*.h \ src/masternode/*.cpp \ src/masternode/*.h \ src/privatesend/*.cpp \ src/privatesend/*.h do base=${f%/*}/ relbase=${base#src/} sed -i "s:#include \"\(.*\)\"\(.*\):if test -e \$base'\\1'; then echo \"#include <\"\$relbase\"\\1>\\2\"; else echo \"#include <\\1>\\2\"; fi:e" $f done -END VERIFY SCRIPT- Signed-off-by: Pasta <pasta@dashboost.org> * build: Remove -I for everything but project root Remove -I from build system for everything but the project root, and built-in dependencies. Signed-off-by: Pasta <pasta@dashboost.org> # Conflicts: # src/Makefile.test.include * qt: refactor: Use absolute include paths in .ui files * qt: refactor: Changes to make include paths absolute This makes all include paths in the GUI absolute. Many changes are involved as every single source file in src/qt/ assumes to be able to use relative includes. Signed-off-by: Pasta <pasta@dashboost.org> # Conflicts: # src/qt/dash.cpp # src/qt/optionsmodel.cpp # src/qt/test/rpcnestedtests.cpp * test: refactor: Use absolute include paths for test data files * Recommend #include<> syntax in developer notes * refactor: Include obj/build.h instead of build.h * END BACKPORT #11651 Remove trailing whitespace causing travis failure * fix backport 11651 Signed-off-by: Pasta <pasta@dashboost.org> * More of 11651 * fix blockchain.cpp Signed-off-by: pasta <pasta@dashboost.org> * Add missing "qt/" in includes * Add missing "test/" in includes * Fix trailing whitespaces Co-authored-by: Wladimir J. van der Laan <laanwj@gmail.com> Co-authored-by: Russell Yanofsky <russ@yanofsky.org> Co-authored-by: MeshCollider <dobsonsa68@gmail.com> Co-authored-by: UdjinM6 <UdjinM6@users.noreply.github.com>
2020-03-19 23:46:56 +01:00
#include <random.h>
#include <leveldb/cache.h>
#include <leveldb/env.h>
#include <leveldb/filter_policy.h>
#include <memenv.h>
#include <stdint.h>
#include <algorithm>
class CBitcoinLevelDBLogger : public leveldb::Logger {
public:
// This code is adapted from posix_logger.h, which is why it is using vsprintf.
// Please do not do this in normal code
void Logv(const char * format, va_list ap) override {
Backport Bitcoin#9424, Bitcoin#10123 and Bitcoin#10153 (#2918) * Contains dashification. disables `-debug dash` Merge #9424: Change LogAcceptCategory to use uint32_t rather than sets of strings. 6b3bb3d Change LogAcceptCategory to use uint32_t rather than sets of strings. (Gregory Maxwell) Tree-SHA512: ebb5bcf9a7d00a32dd1390b727ff4d29330a038423611da01268d8e1d2c0229e52a1098e751d4e6db73ef4ae862e1e96d38249883fcaf12b68f55ebb01035b34 Signed-off-by: Pasta <Pasta@dash.org> 31 -> 32 Signed-off-by: Pasta <Pasta@dash.org> * Merge #10123: Allow debug logs to be excluded from specified component 3bde556 Add -debugexclude option to switch off logging for specified components (John Newbery) Tree-SHA512: 30202e3f2085fc2fc5dd4bedb92988f4cb162c612a42cf8f6395a7da326f34975ddc347f82bc4ddca6c84c438dc0cc6e87869f90c7ff88105dbeaa52a947fa43 * bump to uint64_t due to added Dash codes Signed-off-by: Pasta <Pasta@dash.org> * bump to uint64_t due to added Dash codes cont. Signed-off-by: Pasta <Pasta@dash.org> * string -> BCLog format Signed-off-by: Pasta <Pasta@dash.org> * uint32_t -> uint64_t Signed-off-by: Pasta <Pasta@dash.org> * Fix CBatchedLogger * Fix most fDebug-s * Fix `debug` rpc * Fix BENCH and RAND conflicts * Add ALERT and use it * Update LogPrint-s in dash-specific code * Tweak few log categories Specifically: - use PRIVATESEND in `CPrivateSendClientManager::GetRandomNotUsedMasternode()` - use ZMQ in `CZMQPublishRawGovernanceVoteNotifier::NotifyGovernanceVote()` and `CZMQPublishRawGovernanceObjectNotifier::NotifyGovernanceObject()` * Drop no longer used MASTERNODE category * Merge #10153: logging: Fix off-by-one for shrinkdebugfile default faab624 logging: Fix off-by-one for shrinkdebugfile (MarcoFalke) Tree-SHA512: d6153e06067906172ff0611af9e585a3ecf0a7d56925b6ad7c12e75aa802441047059b9b6f6c78e79916c3f2abc8f1998bfd2d5b84201ec6421f727c08da3c21 * Shift dash-specific log categories to start from `1ul << 32` to avoid potential future conflicts with bitcoin ones * Fix `dash` category * remove debugCategories Signed-off-by: Pasta <Pasta@dash.org> * Prepend "std::" to find call * Check for BCLog::PRIVATESEND instead of logCategories != BCLog::NONE * Use BCLog::MNPAYMENTS category instead of checking for logCategories != BCLog::NONE * Move "End Dash" comment below "ALERT" When adding new entries here, we'll otherwise get confused with ordering and might end up forgetting that adding something Dash specific must continue with the bit after 43.
2019-05-22 23:51:39 +02:00
if (!LogAcceptCategory(BCLog::LEVELDB)) {
return;
Backport Bitcoin#9424, Bitcoin#10123 and Bitcoin#10153 (#2918) * Contains dashification. disables `-debug dash` Merge #9424: Change LogAcceptCategory to use uint32_t rather than sets of strings. 6b3bb3d Change LogAcceptCategory to use uint32_t rather than sets of strings. (Gregory Maxwell) Tree-SHA512: ebb5bcf9a7d00a32dd1390b727ff4d29330a038423611da01268d8e1d2c0229e52a1098e751d4e6db73ef4ae862e1e96d38249883fcaf12b68f55ebb01035b34 Signed-off-by: Pasta <Pasta@dash.org> 31 -> 32 Signed-off-by: Pasta <Pasta@dash.org> * Merge #10123: Allow debug logs to be excluded from specified component 3bde556 Add -debugexclude option to switch off logging for specified components (John Newbery) Tree-SHA512: 30202e3f2085fc2fc5dd4bedb92988f4cb162c612a42cf8f6395a7da326f34975ddc347f82bc4ddca6c84c438dc0cc6e87869f90c7ff88105dbeaa52a947fa43 * bump to uint64_t due to added Dash codes Signed-off-by: Pasta <Pasta@dash.org> * bump to uint64_t due to added Dash codes cont. Signed-off-by: Pasta <Pasta@dash.org> * string -> BCLog format Signed-off-by: Pasta <Pasta@dash.org> * uint32_t -> uint64_t Signed-off-by: Pasta <Pasta@dash.org> * Fix CBatchedLogger * Fix most fDebug-s * Fix `debug` rpc * Fix BENCH and RAND conflicts * Add ALERT and use it * Update LogPrint-s in dash-specific code * Tweak few log categories Specifically: - use PRIVATESEND in `CPrivateSendClientManager::GetRandomNotUsedMasternode()` - use ZMQ in `CZMQPublishRawGovernanceVoteNotifier::NotifyGovernanceVote()` and `CZMQPublishRawGovernanceObjectNotifier::NotifyGovernanceObject()` * Drop no longer used MASTERNODE category * Merge #10153: logging: Fix off-by-one for shrinkdebugfile default faab624 logging: Fix off-by-one for shrinkdebugfile (MarcoFalke) Tree-SHA512: d6153e06067906172ff0611af9e585a3ecf0a7d56925b6ad7c12e75aa802441047059b9b6f6c78e79916c3f2abc8f1998bfd2d5b84201ec6421f727c08da3c21 * Shift dash-specific log categories to start from `1ul << 32` to avoid potential future conflicts with bitcoin ones * Fix `dash` category * remove debugCategories Signed-off-by: Pasta <Pasta@dash.org> * Prepend "std::" to find call * Check for BCLog::PRIVATESEND instead of logCategories != BCLog::NONE * Use BCLog::MNPAYMENTS category instead of checking for logCategories != BCLog::NONE * Move "End Dash" comment below "ALERT" When adding new entries here, we'll otherwise get confused with ordering and might end up forgetting that adding something Dash specific must continue with the bit after 43.
2019-05-22 23:51:39 +02:00
}
char buffer[500];
for (int iter = 0; iter < 2; iter++) {
char* base;
int bufsize;
if (iter == 0) {
bufsize = sizeof(buffer);
base = buffer;
}
else {
bufsize = 30000;
base = new char[bufsize];
}
char* p = base;
char* limit = base + bufsize;
// Print the message
if (p < limit) {
va_list backup_ap;
va_copy(backup_ap, ap);
// Do not use vsnprintf elsewhere in bitcoin source code, see above.
p += vsnprintf(p, limit - p, format, backup_ap);
va_end(backup_ap);
}
// Truncate to available space if necessary
if (p >= limit) {
if (iter == 0) {
continue; // Try again with larger buffer
}
else {
p = limit - 1;
}
}
// Add newline if necessary
if (p == base || p[-1] != '\n') {
*p++ = '\n';
}
assert(p <= limit);
base[std::min(bufsize - 1, (int)(p - base))] = '\0';
LogPrintf("leveldb: %s", base); /* Continued */
if (base != buffer) {
delete[] base;
}
break;
}
}
};
Merge #12495: Increase LevelDB max_open_files ccedbaf Increase LevelDB max_open_files unless on 32-bit Unix. (Evan Klitzke) Pull request description: Currently we set `max_open_files = 64` on all architectures due to concerns about file descriptor exhaustion. This is extremely expensive due to how LevelDB is designed. When a LevelDB file handle is opened, a bloom filter and block index are decoded, and some CRCs are checked. Bloom filters and block indexes in open table handles can be checked purely in memory. This means that when doing a key lookup, if a given table file may contain a given key, all of the lookup operations can happen completely in RAM until the block itself is fetched. In the common case fetching the block is one disk seek, because the block index stores its physical offset. This is the ideal case, and what we want to happen as often as possible. If a table file handle is not open in the table cache, then in addition to the regular system calls to open the file, the block index and bloom filter need to be decoded before they can be checked. This is expensive and is something we want to avoid. The current setting of 64 file handles means that on a synced node, only about 4% of key lookups can be satisifed by table file handles that are actually open and in memory. The original concerns about file descriptor exhaustion are unwarranted on most systems because: * On 64-bit POSIX hosts LevelDB will open up to 1000 file descriptors using `mmap()`, and it does not retain an open file descriptor for such files. * On Windows non-socket files do not interfere with the main network `select()` loop, so the same fd exhaustion issues do not apply there. This change keeps the default `max_open_files` value (which is 1000) on all systems except 32-bit POSIX hosts (which do not use `mmap()`). Open file handles use about 20 KB of memory (for the block index), so the extra file handles do not cause much memory overhead. At most 1000 will be open, and a fully synced node right now has about 1500 such files. Profile of `loadblk` thread before changes: https://monad.io/maxopenfiles-master.svg Profile of `loadblk` thread after changes: https://monad.io/maxopenfiles-increase.svg Tree-SHA512: de54f77d57e9f8999eaf8d12592aab5b02f5877be8fa727a1f42cf02da2693ce25846445eb19eb138ce4e5045d1c65e14054df72faf3ff32c7655c9cfadd27a9
2018-03-29 14:56:44 +02:00
static void SetMaxOpenFiles(leveldb::Options *options) {
// On most platforms the default setting of max_open_files (which is 1000)
// is optimal. On Windows using a large file count is OK because the handles
// do not interfere with select() loops. On 64-bit Unix hosts this value is
// also OK, because up to that amount LevelDB will use an mmap
// implementation that does not use extra file descriptors (the fds are
// closed after being mmap'ed).
Merge #12495: Increase LevelDB max_open_files ccedbaf Increase LevelDB max_open_files unless on 32-bit Unix. (Evan Klitzke) Pull request description: Currently we set `max_open_files = 64` on all architectures due to concerns about file descriptor exhaustion. This is extremely expensive due to how LevelDB is designed. When a LevelDB file handle is opened, a bloom filter and block index are decoded, and some CRCs are checked. Bloom filters and block indexes in open table handles can be checked purely in memory. This means that when doing a key lookup, if a given table file may contain a given key, all of the lookup operations can happen completely in RAM until the block itself is fetched. In the common case fetching the block is one disk seek, because the block index stores its physical offset. This is the ideal case, and what we want to happen as often as possible. If a table file handle is not open in the table cache, then in addition to the regular system calls to open the file, the block index and bloom filter need to be decoded before they can be checked. This is expensive and is something we want to avoid. The current setting of 64 file handles means that on a synced node, only about 4% of key lookups can be satisifed by table file handles that are actually open and in memory. The original concerns about file descriptor exhaustion are unwarranted on most systems because: * On 64-bit POSIX hosts LevelDB will open up to 1000 file descriptors using `mmap()`, and it does not retain an open file descriptor for such files. * On Windows non-socket files do not interfere with the main network `select()` loop, so the same fd exhaustion issues do not apply there. This change keeps the default `max_open_files` value (which is 1000) on all systems except 32-bit POSIX hosts (which do not use `mmap()`). Open file handles use about 20 KB of memory (for the block index), so the extra file handles do not cause much memory overhead. At most 1000 will be open, and a fully synced node right now has about 1500 such files. Profile of `loadblk` thread before changes: https://monad.io/maxopenfiles-master.svg Profile of `loadblk` thread after changes: https://monad.io/maxopenfiles-increase.svg Tree-SHA512: de54f77d57e9f8999eaf8d12592aab5b02f5877be8fa727a1f42cf02da2693ce25846445eb19eb138ce4e5045d1c65e14054df72faf3ff32c7655c9cfadd27a9
2018-03-29 14:56:44 +02:00
//
// Increasing the value beyond the default is dangerous because LevelDB will
// fall back to a non-mmap implementation when the file count is too large.
// On 32-bit Unix host we should decrease the value because the handles use
// up real fds, and we want to avoid fd exhaustion issues.
//
// See PR #12495 for further discussion.
int default_open_files = options->max_open_files;
#ifndef WIN32
if (sizeof(void*) < 8) {
options->max_open_files = 64;
}
#endif
LogPrint(BCLog::LEVELDB, "LevelDB using max_open_files=%d (default=%d)\n",
options->max_open_files, default_open_files);
}
static leveldb::Options GetOptions(size_t nCacheSize)
{
leveldb::Options options;
2012-11-04 17:11:48 +01:00
options.block_cache = leveldb::NewLRUCache(nCacheSize / 2);
options.write_buffer_size = nCacheSize / 4; // up to two write buffers may be held in memory simultaneously
options.filter_policy = leveldb::NewBloomFilterPolicy(10);
options.compression = leveldb::kNoCompression;
options.info_log = new CBitcoinLevelDBLogger();
if (leveldb::kMajorVersion > 1 || (leveldb::kMajorVersion == 1 && leveldb::kMinorVersion >= 16)) {
// LevelDB versions before 1.16 consider short writes to be corruption. Only trigger error
// on corruption in later versions.
options.paranoid_checks = true;
}
Merge #12495: Increase LevelDB max_open_files ccedbaf Increase LevelDB max_open_files unless on 32-bit Unix. (Evan Klitzke) Pull request description: Currently we set `max_open_files = 64` on all architectures due to concerns about file descriptor exhaustion. This is extremely expensive due to how LevelDB is designed. When a LevelDB file handle is opened, a bloom filter and block index are decoded, and some CRCs are checked. Bloom filters and block indexes in open table handles can be checked purely in memory. This means that when doing a key lookup, if a given table file may contain a given key, all of the lookup operations can happen completely in RAM until the block itself is fetched. In the common case fetching the block is one disk seek, because the block index stores its physical offset. This is the ideal case, and what we want to happen as often as possible. If a table file handle is not open in the table cache, then in addition to the regular system calls to open the file, the block index and bloom filter need to be decoded before they can be checked. This is expensive and is something we want to avoid. The current setting of 64 file handles means that on a synced node, only about 4% of key lookups can be satisifed by table file handles that are actually open and in memory. The original concerns about file descriptor exhaustion are unwarranted on most systems because: * On 64-bit POSIX hosts LevelDB will open up to 1000 file descriptors using `mmap()`, and it does not retain an open file descriptor for such files. * On Windows non-socket files do not interfere with the main network `select()` loop, so the same fd exhaustion issues do not apply there. This change keeps the default `max_open_files` value (which is 1000) on all systems except 32-bit POSIX hosts (which do not use `mmap()`). Open file handles use about 20 KB of memory (for the block index), so the extra file handles do not cause much memory overhead. At most 1000 will be open, and a fully synced node right now has about 1500 such files. Profile of `loadblk` thread before changes: https://monad.io/maxopenfiles-master.svg Profile of `loadblk` thread after changes: https://monad.io/maxopenfiles-increase.svg Tree-SHA512: de54f77d57e9f8999eaf8d12592aab5b02f5877be8fa727a1f42cf02da2693ce25846445eb19eb138ce4e5045d1c65e14054df72faf3ff32c7655c9cfadd27a9
2018-03-29 14:56:44 +02:00
SetMaxOpenFiles(&options);
return options;
}
CDBWrapper::CDBWrapper(const fs::path& path, size_t nCacheSize, bool fMemory, bool fWipe, bool obfuscate)
Merge #15880: utils and libraries: Replace deprecated Boost Filesystem functions a0a222eec Replace deprecated Boost Filesystem function (Hennadii Stepanov) 4f65af97b Remove dead code for walletFile check (Hennadii Stepanov) Pull request description: Boost Filesystem `basename()` and `extension()` functions are [deprecated since v1.36.0](https://www.boost.org/doc/libs/1_36_0/libs/filesystem/doc/reference.html#Convenience-functions). See more: https://lists.boost.org/Archives/boost/2010/01/160905.php Also this PR prevents further use of deprecated Boost Filesystem functions. Ref: https://www.boost.org/doc/libs/1_64_0/libs/filesystem/doc/index.htm#Coding-guidelines Note: On my Linux system Boost 1.65.1 header `/usr/include/boost/filesystem/convenience.hpp` contains: ```c++ # ifndef BOOST_FILESYSTEM_NO_DEPRECATED inline std::string extension(const path & p) { return p.extension().string(); } inline std::string basename(const path & p) { return p.stem().string(); } inline path change_extension( const path & p, const path & new_extension ) { path new_p( p ); new_p.replace_extension( new_extension ); return new_p; } # endif ``` UPDATE: Also removed unused code as [noted](https://github.com/bitcoin/bitcoin/pull/15880#discussion_r279386614) by **ryanofsky**. ACKs for commit a0a222: Empact: utACK https://github.com/bitcoin/bitcoin/pull/15880/commits/a0a222eec0b7f615a756e5e0dcec9b02296f999c practicalswift: utACK a0a222eec0b7f615a756e5e0dcec9b02296f999c fanquake: utACK a0a222e ryanofsky: utACK a0a222eec0b7f615a756e5e0dcec9b02296f999c. Only change is dropping assert and squashing first two commits. Tree-SHA512: bc54355441c49957507eb8d3a5782b92d65674504d69779bc16b1b997b2e7424d5665eb6bfb6e10b430a6cacd2aca70af2f94e5f7f10bea24624202834ad35c7
2019-05-08 14:00:39 +02:00
: m_name{path.stem().string()}
{
penv = nullptr;
readoptions.verify_checksums = true;
iteroptions.verify_checksums = true;
iteroptions.fill_cache = false;
syncoptions.sync = true;
2012-11-04 17:11:48 +01:00
options = GetOptions(nCacheSize);
options.create_if_missing = true;
if (fMemory) {
penv = leveldb::NewMemEnv(leveldb::Env::Default());
options.env = penv;
} else {
if (fWipe) {
LogPrintf("Wiping LevelDB in %s\n", path.string());
leveldb::Status result = leveldb::DestroyDB(path.string(), options);
dbwrapper_private::HandleError(result);
}
TryCreateDirectories(path);
LogPrintf("Opening LevelDB in %s\n", path.string());
}
leveldb::Status status = leveldb::DB::Open(options, path.string(), &pdb);
dbwrapper_private::HandleError(status);
LogPrintf("Opened LevelDB successfully\n");
if (gArgs.GetBoolArg("-forcecompactdb", false)) {
LogPrintf("Starting database compaction of %s\n", path.string());
pdb->CompactRange(nullptr, nullptr);
LogPrintf("Finished database compaction of %s\n", path.string());
}
// The base-case obfuscation key, which is a noop.
obfuscate_key = std::vector<unsigned char>(OBFUSCATE_KEY_NUM_BYTES, '\000');
bool key_exists = Read(OBFUSCATE_KEY_KEY, obfuscate_key);
if (!key_exists && obfuscate && IsEmpty()) {
// Initialize non-degenerate obfuscation if it won't upset
// existing, non-obfuscated data.
std::vector<unsigned char> new_key = CreateObfuscateKey();
// Write `new_key` so we don't obfuscate the key with itself
Write(OBFUSCATE_KEY_KEY, new_key);
obfuscate_key = new_key;
LogPrintf("Wrote new obfuscate key for %s: %s\n", path.string(), HexStr(obfuscate_key));
}
LogPrintf("Using obfuscation key for %s: %s\n", path.string(), HexStr(obfuscate_key));
}
CDBWrapper::~CDBWrapper()
{
delete pdb;
pdb = nullptr;
delete options.filter_policy;
options.filter_policy = nullptr;
delete options.info_log;
options.info_log = nullptr;
delete options.block_cache;
options.block_cache = nullptr;
delete penv;
options.env = nullptr;
}
bool CDBWrapper::WriteBatch(CDBBatch& batch, bool fSync)
{
Merge #12604: Add DynamicMemoryUsage() to CDBWrapper to estimate LevelDB memory use 741f0177c Add DynamicMemoryUsage() to LevelDB (Evan Klitzke) Pull request description: This adds a new method `CDBWrapper::DynamicMemoryUsage()` similar to Bitcoin's existing methods of the same name. It's implemented by asking LevelDB for the information, and then parsing the string response. I've also added logging to `CDBWrapper::WriteBatch()` to track this information: ``` $ tail -f ~/.bitcoin/testnet3/debug.log | grep WriteBatch 2018-03-05 19:34:55 WriteBatch memory usage: db=chainstate, before=0.0MiB, after=0.0MiB 2018-03-05 19:35:17 WriteBatch memory usage: db=index, before=0.0MiB, after=0.0MiB 2018-03-05 19:35:17 WriteBatch memory usage: db=chainstate, before=0.0MiB, after=8.0MiB 2018-03-05 19:35:22 WriteBatch memory usage: db=index, before=0.0MiB, after=0.0MiB 2018-03-05 19:35:22 WriteBatch memory usage: db=chainstate, before=8.0MiB, after=17.0MiB 2018-03-05 19:35:26 WriteBatch memory usage: db=index, before=0.0MiB, after=0.0MiB 2018-03-05 19:35:27 WriteBatch memory usage: db=chainstate, before=9.0MiB, after=18.0MiB 2018-03-05 19:35:40 WriteBatch memory usage: db=index, before=0.0MiB, after=0.0MiB 2018-03-05 19:35:41 WriteBatch memory usage: db=chainstate, before=9.0MiB, after=7.0MiB 2018-03-05 19:35:52 WriteBatch memory usage: db=index, before=0.0MiB, after=0.0MiB 2018-03-05 19:35:52 WriteBatch memory usage: db=chainstate, before=7.0MiB, after=9.0MiB ^C ``` As LevelDB doesn't seem to provide a way to get the database name, I've also added a new `m_name` field to the `CDBWrapper`. This is necessary because we have multiple LevelDB databases (two now, and possibly more later, e.g. #11857). I am using this information in other branches where I'm experimenting with changing LevelDB buffer sizes. Tree-SHA512: 7ea8ff5484bb07ef806af17d000c74ccca27d2e0f6c3229e12d93818f00874553335d87428482bd8acbcae81ea35aef2a243326f9fccbfac25989323d24391b4
2018-03-06 16:27:14 +01:00
const bool log_memory = LogAcceptCategory(BCLog::LEVELDB);
double mem_before = 0;
if (log_memory) {
mem_before = DynamicMemoryUsage() / 1024.0 / 1024;
Merge #12604: Add DynamicMemoryUsage() to CDBWrapper to estimate LevelDB memory use 741f0177c Add DynamicMemoryUsage() to LevelDB (Evan Klitzke) Pull request description: This adds a new method `CDBWrapper::DynamicMemoryUsage()` similar to Bitcoin's existing methods of the same name. It's implemented by asking LevelDB for the information, and then parsing the string response. I've also added logging to `CDBWrapper::WriteBatch()` to track this information: ``` $ tail -f ~/.bitcoin/testnet3/debug.log | grep WriteBatch 2018-03-05 19:34:55 WriteBatch memory usage: db=chainstate, before=0.0MiB, after=0.0MiB 2018-03-05 19:35:17 WriteBatch memory usage: db=index, before=0.0MiB, after=0.0MiB 2018-03-05 19:35:17 WriteBatch memory usage: db=chainstate, before=0.0MiB, after=8.0MiB 2018-03-05 19:35:22 WriteBatch memory usage: db=index, before=0.0MiB, after=0.0MiB 2018-03-05 19:35:22 WriteBatch memory usage: db=chainstate, before=8.0MiB, after=17.0MiB 2018-03-05 19:35:26 WriteBatch memory usage: db=index, before=0.0MiB, after=0.0MiB 2018-03-05 19:35:27 WriteBatch memory usage: db=chainstate, before=9.0MiB, after=18.0MiB 2018-03-05 19:35:40 WriteBatch memory usage: db=index, before=0.0MiB, after=0.0MiB 2018-03-05 19:35:41 WriteBatch memory usage: db=chainstate, before=9.0MiB, after=7.0MiB 2018-03-05 19:35:52 WriteBatch memory usage: db=index, before=0.0MiB, after=0.0MiB 2018-03-05 19:35:52 WriteBatch memory usage: db=chainstate, before=7.0MiB, after=9.0MiB ^C ``` As LevelDB doesn't seem to provide a way to get the database name, I've also added a new `m_name` field to the `CDBWrapper`. This is necessary because we have multiple LevelDB databases (two now, and possibly more later, e.g. #11857). I am using this information in other branches where I'm experimenting with changing LevelDB buffer sizes. Tree-SHA512: 7ea8ff5484bb07ef806af17d000c74ccca27d2e0f6c3229e12d93818f00874553335d87428482bd8acbcae81ea35aef2a243326f9fccbfac25989323d24391b4
2018-03-06 16:27:14 +01:00
}
leveldb::Status status = pdb->Write(fSync ? syncoptions : writeoptions, &batch.batch);
dbwrapper_private::HandleError(status);
Merge #12604: Add DynamicMemoryUsage() to CDBWrapper to estimate LevelDB memory use 741f0177c Add DynamicMemoryUsage() to LevelDB (Evan Klitzke) Pull request description: This adds a new method `CDBWrapper::DynamicMemoryUsage()` similar to Bitcoin's existing methods of the same name. It's implemented by asking LevelDB for the information, and then parsing the string response. I've also added logging to `CDBWrapper::WriteBatch()` to track this information: ``` $ tail -f ~/.bitcoin/testnet3/debug.log | grep WriteBatch 2018-03-05 19:34:55 WriteBatch memory usage: db=chainstate, before=0.0MiB, after=0.0MiB 2018-03-05 19:35:17 WriteBatch memory usage: db=index, before=0.0MiB, after=0.0MiB 2018-03-05 19:35:17 WriteBatch memory usage: db=chainstate, before=0.0MiB, after=8.0MiB 2018-03-05 19:35:22 WriteBatch memory usage: db=index, before=0.0MiB, after=0.0MiB 2018-03-05 19:35:22 WriteBatch memory usage: db=chainstate, before=8.0MiB, after=17.0MiB 2018-03-05 19:35:26 WriteBatch memory usage: db=index, before=0.0MiB, after=0.0MiB 2018-03-05 19:35:27 WriteBatch memory usage: db=chainstate, before=9.0MiB, after=18.0MiB 2018-03-05 19:35:40 WriteBatch memory usage: db=index, before=0.0MiB, after=0.0MiB 2018-03-05 19:35:41 WriteBatch memory usage: db=chainstate, before=9.0MiB, after=7.0MiB 2018-03-05 19:35:52 WriteBatch memory usage: db=index, before=0.0MiB, after=0.0MiB 2018-03-05 19:35:52 WriteBatch memory usage: db=chainstate, before=7.0MiB, after=9.0MiB ^C ``` As LevelDB doesn't seem to provide a way to get the database name, I've also added a new `m_name` field to the `CDBWrapper`. This is necessary because we have multiple LevelDB databases (two now, and possibly more later, e.g. #11857). I am using this information in other branches where I'm experimenting with changing LevelDB buffer sizes. Tree-SHA512: 7ea8ff5484bb07ef806af17d000c74ccca27d2e0f6c3229e12d93818f00874553335d87428482bd8acbcae81ea35aef2a243326f9fccbfac25989323d24391b4
2018-03-06 16:27:14 +01:00
if (log_memory) {
double mem_after = DynamicMemoryUsage() / 1024.0 / 1024;
Merge #12604: Add DynamicMemoryUsage() to CDBWrapper to estimate LevelDB memory use 741f0177c Add DynamicMemoryUsage() to LevelDB (Evan Klitzke) Pull request description: This adds a new method `CDBWrapper::DynamicMemoryUsage()` similar to Bitcoin's existing methods of the same name. It's implemented by asking LevelDB for the information, and then parsing the string response. I've also added logging to `CDBWrapper::WriteBatch()` to track this information: ``` $ tail -f ~/.bitcoin/testnet3/debug.log | grep WriteBatch 2018-03-05 19:34:55 WriteBatch memory usage: db=chainstate, before=0.0MiB, after=0.0MiB 2018-03-05 19:35:17 WriteBatch memory usage: db=index, before=0.0MiB, after=0.0MiB 2018-03-05 19:35:17 WriteBatch memory usage: db=chainstate, before=0.0MiB, after=8.0MiB 2018-03-05 19:35:22 WriteBatch memory usage: db=index, before=0.0MiB, after=0.0MiB 2018-03-05 19:35:22 WriteBatch memory usage: db=chainstate, before=8.0MiB, after=17.0MiB 2018-03-05 19:35:26 WriteBatch memory usage: db=index, before=0.0MiB, after=0.0MiB 2018-03-05 19:35:27 WriteBatch memory usage: db=chainstate, before=9.0MiB, after=18.0MiB 2018-03-05 19:35:40 WriteBatch memory usage: db=index, before=0.0MiB, after=0.0MiB 2018-03-05 19:35:41 WriteBatch memory usage: db=chainstate, before=9.0MiB, after=7.0MiB 2018-03-05 19:35:52 WriteBatch memory usage: db=index, before=0.0MiB, after=0.0MiB 2018-03-05 19:35:52 WriteBatch memory usage: db=chainstate, before=7.0MiB, after=9.0MiB ^C ``` As LevelDB doesn't seem to provide a way to get the database name, I've also added a new `m_name` field to the `CDBWrapper`. This is necessary because we have multiple LevelDB databases (two now, and possibly more later, e.g. #11857). I am using this information in other branches where I'm experimenting with changing LevelDB buffer sizes. Tree-SHA512: 7ea8ff5484bb07ef806af17d000c74ccca27d2e0f6c3229e12d93818f00874553335d87428482bd8acbcae81ea35aef2a243326f9fccbfac25989323d24391b4
2018-03-06 16:27:14 +01:00
LogPrint(BCLog::LEVELDB, "WriteBatch memory usage: db=%s, before=%.1fMiB, after=%.1fMiB\n",
m_name, mem_before, mem_after);
}
return true;
}
Merge #12604: Add DynamicMemoryUsage() to CDBWrapper to estimate LevelDB memory use 741f0177c Add DynamicMemoryUsage() to LevelDB (Evan Klitzke) Pull request description: This adds a new method `CDBWrapper::DynamicMemoryUsage()` similar to Bitcoin's existing methods of the same name. It's implemented by asking LevelDB for the information, and then parsing the string response. I've also added logging to `CDBWrapper::WriteBatch()` to track this information: ``` $ tail -f ~/.bitcoin/testnet3/debug.log | grep WriteBatch 2018-03-05 19:34:55 WriteBatch memory usage: db=chainstate, before=0.0MiB, after=0.0MiB 2018-03-05 19:35:17 WriteBatch memory usage: db=index, before=0.0MiB, after=0.0MiB 2018-03-05 19:35:17 WriteBatch memory usage: db=chainstate, before=0.0MiB, after=8.0MiB 2018-03-05 19:35:22 WriteBatch memory usage: db=index, before=0.0MiB, after=0.0MiB 2018-03-05 19:35:22 WriteBatch memory usage: db=chainstate, before=8.0MiB, after=17.0MiB 2018-03-05 19:35:26 WriteBatch memory usage: db=index, before=0.0MiB, after=0.0MiB 2018-03-05 19:35:27 WriteBatch memory usage: db=chainstate, before=9.0MiB, after=18.0MiB 2018-03-05 19:35:40 WriteBatch memory usage: db=index, before=0.0MiB, after=0.0MiB 2018-03-05 19:35:41 WriteBatch memory usage: db=chainstate, before=9.0MiB, after=7.0MiB 2018-03-05 19:35:52 WriteBatch memory usage: db=index, before=0.0MiB, after=0.0MiB 2018-03-05 19:35:52 WriteBatch memory usage: db=chainstate, before=7.0MiB, after=9.0MiB ^C ``` As LevelDB doesn't seem to provide a way to get the database name, I've also added a new `m_name` field to the `CDBWrapper`. This is necessary because we have multiple LevelDB databases (two now, and possibly more later, e.g. #11857). I am using this information in other branches where I'm experimenting with changing LevelDB buffer sizes. Tree-SHA512: 7ea8ff5484bb07ef806af17d000c74ccca27d2e0f6c3229e12d93818f00874553335d87428482bd8acbcae81ea35aef2a243326f9fccbfac25989323d24391b4
2018-03-06 16:27:14 +01:00
size_t CDBWrapper::DynamicMemoryUsage() const {
std::string memory;
if (!pdb->GetProperty("leveldb.approximate-memory-usage", &memory)) {
LogPrint(BCLog::LEVELDB, "Failed to get approximate-memory-usage property\n");
return 0;
}
return stoul(memory);
}
// Prefixed with null character to avoid collisions with other keys
//
// We must use a string constructor which specifies length so that we copy
// past the null-terminator.
const std::string CDBWrapper::OBFUSCATE_KEY_KEY("\000obfuscate_key", 14);
const unsigned int CDBWrapper::OBFUSCATE_KEY_NUM_BYTES = 8;
/**
* Returns a string (consisting of 8 random bytes) suitable for use as an
* obfuscating XOR key.
*/
std::vector<unsigned char> CDBWrapper::CreateObfuscateKey() const
{
unsigned char buff[OBFUSCATE_KEY_NUM_BYTES];
GetRandBytes(buff, OBFUSCATE_KEY_NUM_BYTES);
return std::vector<unsigned char>(&buff[0], &buff[OBFUSCATE_KEY_NUM_BYTES]);
}
bool CDBWrapper::IsEmpty()
{
std::unique_ptr<CDBIterator> it(NewIterator());
it->SeekToFirst();
return !(it->Valid());
}
CDBIterator::~CDBIterator() { delete piter; }
bool CDBIterator::Valid() const { return piter->Valid(); }
void CDBIterator::SeekToFirst() { piter->SeekToFirst(); }
void CDBIterator::Next() { piter->Next(); }
namespace dbwrapper_private {
void HandleError(const leveldb::Status& status)
{
if (status.ok())
return;
const std::string errmsg = "Fatal LevelDB error: " + status.ToString();
LogPrintf("%s\n", errmsg);
LogPrintf("You can use -debug=leveldb to get more complete diagnostic messages\n");
throw dbwrapper_error(errmsg);
}
const std::vector<unsigned char>& GetObfuscateKey(const CDBWrapper &w)
{
return w.obfuscate_key;
}
2015-10-08 09:44:10 +02:00
} // namespace dbwrapper_private