2015-12-13 14:51:43 +01:00
|
|
|
// Copyright (c) 2012-2015 The Bitcoin Core developers
|
2014-11-04 14:34:04 +01:00
|
|
|
// Distributed under the MIT software license, see the accompanying
|
2012-09-03 19:05:30 +02:00
|
|
|
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
|
|
|
|
2020-03-19 23:46:56 +01:00
|
|
|
#include <dbwrapper.h>
|
2013-04-13 07:13:08 +02:00
|
|
|
|
2018-04-02 20:31:40 +02:00
|
|
|
#include <memory>
|
2020-03-19 23:46:56 +01:00
|
|
|
#include <random.h>
|
2012-09-03 19:05:30 +02:00
|
|
|
|
|
|
|
#include <leveldb/cache.h>
|
2013-04-13 07:13:08 +02:00
|
|
|
#include <leveldb/env.h>
|
2012-09-03 19:05:30 +02:00
|
|
|
#include <leveldb/filter_policy.h>
|
2013-09-09 04:02:28 +02:00
|
|
|
#include <memenv.h>
|
2015-09-08 00:22:23 +02:00
|
|
|
#include <stdint.h>
|
2017-03-18 10:58:08 +01:00
|
|
|
#include <algorithm>
|
fix: add missing includes and remove obsolete includes (#5562)
## Issue being fixed or feature implemented
Some headers or modules are used objects from STL without including it
directly, it cause compilation failures on some platforms for some
specific compilers such as #5554
## What was done?
Added missing includes and removed obsolete includes for `optional`,
`deque`, `tuple`, `unordered_set`, `unordered_map`, `set` and `atomic`.
Please, note, that this PR doesn't cover all cases, only cases when it
is obviously missing or obviously obsolete.
Also most of changes belongs to to dash specific code; but for cases of
original bitcoin code I keep it untouched, such as missing <map> in
`src/psbt.h`
I used this script to get a list of files/headers which looks suspicious
`./headers-scanner.sh std::optional optional`:
```bash
#!/bin/bash
set -e
function check_includes() {
obj=$1
header=$2
file=$3
used=0
included=0
grep "$obj" "$file" >/dev/null 2>/dev/null && used=1
grep "include <$header>" $file >/dev/null 2>/dev/null && included=1
if [ $used == 1 ] && [ $included == 0 ]
then echo "missing <$header> in $file"
fi
if [ $used == 0 ] && [ $included == 1 ]
then echo "obsolete <$header> in $file"
fi
}
export -f check_includes
obj=$1
header=$2
find src \( -name '*.h' -or -name '*.cpp' -or -name '*.hpp' \) -exec bash -c 'check_includes "$0" "$1" "$2"' "$obj" "$header" {} \;
```
## How Has This Been Tested?
Built code locally
## Breaking Changes
n/a
## Checklist:
- [x] I have performed a self-review of my own code
- [ ] I have commented my code, particularly in hard-to-understand areas
- [ ] I have added or updated relevant unit/integration/functional/e2e
tests
- [ ] I have made corresponding changes to the documentation
- [x] I have assigned this pull request to a milestone
2023-09-07 16:07:02 +02:00
|
|
|
#include <optional>
|
2017-03-18 10:58:08 +01:00
|
|
|
|
|
|
|
class CBitcoinLevelDBLogger : public leveldb::Logger {
|
|
|
|
public:
|
|
|
|
// This code is adapted from posix_logger.h, which is why it is using vsprintf.
|
|
|
|
// Please do not do this in normal code
|
2017-09-05 03:06:09 +02:00
|
|
|
void Logv(const char * format, va_list ap) override {
|
2019-05-22 23:51:39 +02:00
|
|
|
if (!LogAcceptCategory(BCLog::LEVELDB)) {
|
2017-03-18 10:58:08 +01:00
|
|
|
return;
|
2019-05-22 23:51:39 +02:00
|
|
|
}
|
2017-03-18 10:58:08 +01:00
|
|
|
char buffer[500];
|
|
|
|
for (int iter = 0; iter < 2; iter++) {
|
|
|
|
char* base;
|
|
|
|
int bufsize;
|
|
|
|
if (iter == 0) {
|
|
|
|
bufsize = sizeof(buffer);
|
|
|
|
base = buffer;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
bufsize = 30000;
|
|
|
|
base = new char[bufsize];
|
|
|
|
}
|
|
|
|
char* p = base;
|
|
|
|
char* limit = base + bufsize;
|
|
|
|
|
|
|
|
// Print the message
|
|
|
|
if (p < limit) {
|
|
|
|
va_list backup_ap;
|
|
|
|
va_copy(backup_ap, ap);
|
|
|
|
// Do not use vsnprintf elsewhere in bitcoin source code, see above.
|
|
|
|
p += vsnprintf(p, limit - p, format, backup_ap);
|
|
|
|
va_end(backup_ap);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Truncate to available space if necessary
|
|
|
|
if (p >= limit) {
|
|
|
|
if (iter == 0) {
|
|
|
|
continue; // Try again with larger buffer
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
p = limit - 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add newline if necessary
|
|
|
|
if (p == base || p[-1] != '\n') {
|
|
|
|
*p++ = '\n';
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(p <= limit);
|
|
|
|
base[std::min(bufsize - 1, (int)(p - base))] = '\0';
|
2018-04-08 11:03:56 +02:00
|
|
|
LogPrintf("leveldb: %s", base); /* Continued */
|
2017-03-18 10:58:08 +01:00
|
|
|
if (base != buffer) {
|
|
|
|
delete[] base;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
2012-09-03 19:05:30 +02:00
|
|
|
|
Merge #12495: Increase LevelDB max_open_files
ccedbaf Increase LevelDB max_open_files unless on 32-bit Unix. (Evan Klitzke)
Pull request description:
Currently we set `max_open_files = 64` on all architectures due to concerns about file descriptor exhaustion. This is extremely expensive due to how LevelDB is designed.
When a LevelDB file handle is opened, a bloom filter and block index are decoded, and some CRCs are checked. Bloom filters and block indexes in open table handles can be checked purely in memory. This means that when doing a key lookup, if a given table file may contain a given key, all of the lookup operations can happen completely in RAM until the block itself is fetched. In the common case fetching the block is one disk seek, because the block index stores its physical offset. This is the ideal case, and what we want to happen as often as possible.
If a table file handle is not open in the table cache, then in addition to the regular system calls to open the file, the block index and bloom filter need to be decoded before they can be checked. This is expensive and is something we want to avoid.
The current setting of 64 file handles means that on a synced node, only about 4% of key lookups can be satisifed by table file handles that are actually open and in memory.
The original concerns about file descriptor exhaustion are unwarranted on most systems because:
* On 64-bit POSIX hosts LevelDB will open up to 1000 file descriptors using `mmap()`, and it does not retain an open file descriptor for such files.
* On Windows non-socket files do not interfere with the main network `select()` loop, so the same fd exhaustion issues do not apply there.
This change keeps the default `max_open_files` value (which is 1000) on all systems except 32-bit POSIX hosts (which do not use `mmap()`). Open file handles use about 20 KB of memory (for the block index), so the extra file handles do not cause much memory overhead. At most 1000 will be open, and a fully synced node right now has about 1500 such files.
Profile of `loadblk` thread before changes: https://monad.io/maxopenfiles-master.svg
Profile of `loadblk` thread after changes: https://monad.io/maxopenfiles-increase.svg
Tree-SHA512: de54f77d57e9f8999eaf8d12592aab5b02f5877be8fa727a1f42cf02da2693ce25846445eb19eb138ce4e5045d1c65e14054df72faf3ff32c7655c9cfadd27a9
2018-03-29 14:56:44 +02:00
|
|
|
static void SetMaxOpenFiles(leveldb::Options *options) {
|
|
|
|
// On most platforms the default setting of max_open_files (which is 1000)
|
|
|
|
// is optimal. On Windows using a large file count is OK because the handles
|
|
|
|
// do not interfere with select() loops. On 64-bit Unix hosts this value is
|
|
|
|
// also OK, because up to that amount LevelDB will use an mmap
|
|
|
|
// implementation that does not use extra file descriptors (the fds are
|
2018-09-06 00:12:39 +02:00
|
|
|
// closed after being mmap'ed).
|
Merge #12495: Increase LevelDB max_open_files
ccedbaf Increase LevelDB max_open_files unless on 32-bit Unix. (Evan Klitzke)
Pull request description:
Currently we set `max_open_files = 64` on all architectures due to concerns about file descriptor exhaustion. This is extremely expensive due to how LevelDB is designed.
When a LevelDB file handle is opened, a bloom filter and block index are decoded, and some CRCs are checked. Bloom filters and block indexes in open table handles can be checked purely in memory. This means that when doing a key lookup, if a given table file may contain a given key, all of the lookup operations can happen completely in RAM until the block itself is fetched. In the common case fetching the block is one disk seek, because the block index stores its physical offset. This is the ideal case, and what we want to happen as often as possible.
If a table file handle is not open in the table cache, then in addition to the regular system calls to open the file, the block index and bloom filter need to be decoded before they can be checked. This is expensive and is something we want to avoid.
The current setting of 64 file handles means that on a synced node, only about 4% of key lookups can be satisifed by table file handles that are actually open and in memory.
The original concerns about file descriptor exhaustion are unwarranted on most systems because:
* On 64-bit POSIX hosts LevelDB will open up to 1000 file descriptors using `mmap()`, and it does not retain an open file descriptor for such files.
* On Windows non-socket files do not interfere with the main network `select()` loop, so the same fd exhaustion issues do not apply there.
This change keeps the default `max_open_files` value (which is 1000) on all systems except 32-bit POSIX hosts (which do not use `mmap()`). Open file handles use about 20 KB of memory (for the block index), so the extra file handles do not cause much memory overhead. At most 1000 will be open, and a fully synced node right now has about 1500 such files.
Profile of `loadblk` thread before changes: https://monad.io/maxopenfiles-master.svg
Profile of `loadblk` thread after changes: https://monad.io/maxopenfiles-increase.svg
Tree-SHA512: de54f77d57e9f8999eaf8d12592aab5b02f5877be8fa727a1f42cf02da2693ce25846445eb19eb138ce4e5045d1c65e14054df72faf3ff32c7655c9cfadd27a9
2018-03-29 14:56:44 +02:00
|
|
|
//
|
|
|
|
// Increasing the value beyond the default is dangerous because LevelDB will
|
|
|
|
// fall back to a non-mmap implementation when the file count is too large.
|
|
|
|
// On 32-bit Unix host we should decrease the value because the handles use
|
|
|
|
// up real fds, and we want to avoid fd exhaustion issues.
|
|
|
|
//
|
|
|
|
// See PR #12495 for further discussion.
|
|
|
|
|
|
|
|
int default_open_files = options->max_open_files;
|
|
|
|
#ifndef WIN32
|
|
|
|
if (sizeof(void*) < 8) {
|
|
|
|
options->max_open_files = 64;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
LogPrint(BCLog::LEVELDB, "LevelDB using max_open_files=%d (default=%d)\n",
|
|
|
|
options->max_open_files, default_open_files);
|
|
|
|
}
|
|
|
|
|
2014-09-19 19:21:46 +02:00
|
|
|
static leveldb::Options GetOptions(size_t nCacheSize)
|
|
|
|
{
|
2012-09-03 19:05:30 +02:00
|
|
|
leveldb::Options options;
|
2012-11-04 17:11:48 +01:00
|
|
|
options.block_cache = leveldb::NewLRUCache(nCacheSize / 2);
|
|
|
|
options.write_buffer_size = nCacheSize / 4; // up to two write buffers may be held in memory simultaneously
|
2012-09-03 19:05:30 +02:00
|
|
|
options.filter_policy = leveldb::NewBloomFilterPolicy(10);
|
|
|
|
options.compression = leveldb::kNoCompression;
|
2017-03-18 10:58:08 +01:00
|
|
|
options.info_log = new CBitcoinLevelDBLogger();
|
2014-05-12 12:24:22 +02:00
|
|
|
if (leveldb::kMajorVersion > 1 || (leveldb::kMajorVersion == 1 && leveldb::kMinorVersion >= 16)) {
|
|
|
|
// LevelDB versions before 1.16 consider short writes to be corruption. Only trigger error
|
|
|
|
// on corruption in later versions.
|
|
|
|
options.paranoid_checks = true;
|
|
|
|
}
|
Merge #12495: Increase LevelDB max_open_files
ccedbaf Increase LevelDB max_open_files unless on 32-bit Unix. (Evan Klitzke)
Pull request description:
Currently we set `max_open_files = 64` on all architectures due to concerns about file descriptor exhaustion. This is extremely expensive due to how LevelDB is designed.
When a LevelDB file handle is opened, a bloom filter and block index are decoded, and some CRCs are checked. Bloom filters and block indexes in open table handles can be checked purely in memory. This means that when doing a key lookup, if a given table file may contain a given key, all of the lookup operations can happen completely in RAM until the block itself is fetched. In the common case fetching the block is one disk seek, because the block index stores its physical offset. This is the ideal case, and what we want to happen as often as possible.
If a table file handle is not open in the table cache, then in addition to the regular system calls to open the file, the block index and bloom filter need to be decoded before they can be checked. This is expensive and is something we want to avoid.
The current setting of 64 file handles means that on a synced node, only about 4% of key lookups can be satisifed by table file handles that are actually open and in memory.
The original concerns about file descriptor exhaustion are unwarranted on most systems because:
* On 64-bit POSIX hosts LevelDB will open up to 1000 file descriptors using `mmap()`, and it does not retain an open file descriptor for such files.
* On Windows non-socket files do not interfere with the main network `select()` loop, so the same fd exhaustion issues do not apply there.
This change keeps the default `max_open_files` value (which is 1000) on all systems except 32-bit POSIX hosts (which do not use `mmap()`). Open file handles use about 20 KB of memory (for the block index), so the extra file handles do not cause much memory overhead. At most 1000 will be open, and a fully synced node right now has about 1500 such files.
Profile of `loadblk` thread before changes: https://monad.io/maxopenfiles-master.svg
Profile of `loadblk` thread after changes: https://monad.io/maxopenfiles-increase.svg
Tree-SHA512: de54f77d57e9f8999eaf8d12592aab5b02f5877be8fa727a1f42cf02da2693ce25846445eb19eb138ce4e5045d1c65e14054df72faf3ff32c7655c9cfadd27a9
2018-03-29 14:56:44 +02:00
|
|
|
SetMaxOpenFiles(&options);
|
2012-09-03 19:05:30 +02:00
|
|
|
return options;
|
|
|
|
}
|
|
|
|
|
2017-04-06 20:19:21 +02:00
|
|
|
CDBWrapper::CDBWrapper(const fs::path& path, size_t nCacheSize, bool fMemory, bool fWipe, bool obfuscate)
|
2019-05-08 14:00:39 +02:00
|
|
|
: m_name{path.stem().string()}
|
2014-09-19 19:21:46 +02:00
|
|
|
{
|
2019-08-06 05:08:33 +02:00
|
|
|
penv = nullptr;
|
2012-09-03 19:05:30 +02:00
|
|
|
readoptions.verify_checksums = true;
|
|
|
|
iteroptions.verify_checksums = true;
|
|
|
|
iteroptions.fill_cache = false;
|
|
|
|
syncoptions.sync = true;
|
2012-11-04 17:11:48 +01:00
|
|
|
options = GetOptions(nCacheSize);
|
2012-09-03 19:05:30 +02:00
|
|
|
options.create_if_missing = true;
|
2012-09-04 18:12:00 +02:00
|
|
|
if (fMemory) {
|
|
|
|
penv = leveldb::NewMemEnv(leveldb::Env::Default());
|
|
|
|
options.env = penv;
|
|
|
|
} else {
|
2012-10-21 21:23:13 +02:00
|
|
|
if (fWipe) {
|
2014-01-16 16:15:27 +01:00
|
|
|
LogPrintf("Wiping LevelDB in %s\n", path.string());
|
2015-08-13 01:32:20 +02:00
|
|
|
leveldb::Status result = leveldb::DestroyDB(path.string(), options);
|
2016-04-25 12:44:58 +02:00
|
|
|
dbwrapper_private::HandleError(result);
|
2012-10-21 21:23:13 +02:00
|
|
|
}
|
2017-06-14 16:00:39 +02:00
|
|
|
TryCreateDirectories(path);
|
2014-01-16 16:15:27 +01:00
|
|
|
LogPrintf("Opening LevelDB in %s\n", path.string());
|
2012-09-04 18:12:00 +02:00
|
|
|
}
|
2012-09-03 19:05:30 +02:00
|
|
|
leveldb::Status status = leveldb::DB::Open(options, path.string(), &pdb);
|
2016-04-25 12:44:58 +02:00
|
|
|
dbwrapper_private::HandleError(status);
|
2013-09-18 12:38:08 +02:00
|
|
|
LogPrintf("Opened LevelDB successfully\n");
|
2015-09-08 00:22:23 +02:00
|
|
|
|
2019-06-24 18:44:27 +02:00
|
|
|
if (gArgs.GetBoolArg("-forcecompactdb", false)) {
|
2017-08-05 13:11:55 +02:00
|
|
|
LogPrintf("Starting database compaction of %s\n", path.string());
|
|
|
|
pdb->CompactRange(nullptr, nullptr);
|
|
|
|
LogPrintf("Finished database compaction of %s\n", path.string());
|
|
|
|
}
|
|
|
|
|
2015-09-08 00:22:23 +02:00
|
|
|
// The base-case obfuscation key, which is a noop.
|
|
|
|
obfuscate_key = std::vector<unsigned char>(OBFUSCATE_KEY_NUM_BYTES, '\000');
|
|
|
|
|
|
|
|
bool key_exists = Read(OBFUSCATE_KEY_KEY, obfuscate_key);
|
|
|
|
|
|
|
|
if (!key_exists && obfuscate && IsEmpty()) {
|
2015-10-23 02:49:02 +02:00
|
|
|
// Initialize non-degenerate obfuscation if it won't upset
|
2015-09-08 00:22:23 +02:00
|
|
|
// existing, non-obfuscated data.
|
|
|
|
std::vector<unsigned char> new_key = CreateObfuscateKey();
|
|
|
|
|
|
|
|
// Write `new_key` so we don't obfuscate the key with itself
|
|
|
|
Write(OBFUSCATE_KEY_KEY, new_key);
|
|
|
|
obfuscate_key = new_key;
|
|
|
|
|
2016-04-25 12:44:58 +02:00
|
|
|
LogPrintf("Wrote new obfuscate key for %s: %s\n", path.string(), HexStr(obfuscate_key));
|
2015-09-08 00:22:23 +02:00
|
|
|
}
|
|
|
|
|
2016-04-25 12:44:58 +02:00
|
|
|
LogPrintf("Using obfuscation key for %s: %s\n", path.string(), HexStr(obfuscate_key));
|
2012-09-03 19:05:30 +02:00
|
|
|
}
|
|
|
|
|
2015-10-23 03:02:20 +02:00
|
|
|
CDBWrapper::~CDBWrapper()
|
2014-09-19 19:21:46 +02:00
|
|
|
{
|
2012-09-03 19:05:30 +02:00
|
|
|
delete pdb;
|
2019-08-06 05:08:33 +02:00
|
|
|
pdb = nullptr;
|
2012-09-03 19:05:30 +02:00
|
|
|
delete options.filter_policy;
|
2019-08-06 05:08:33 +02:00
|
|
|
options.filter_policy = nullptr;
|
2017-03-18 10:58:08 +01:00
|
|
|
delete options.info_log;
|
2019-08-06 05:08:33 +02:00
|
|
|
options.info_log = nullptr;
|
2012-09-03 19:05:30 +02:00
|
|
|
delete options.block_cache;
|
2019-08-06 05:08:33 +02:00
|
|
|
options.block_cache = nullptr;
|
2012-09-03 19:05:30 +02:00
|
|
|
delete penv;
|
2019-08-06 05:08:33 +02:00
|
|
|
options.env = nullptr;
|
2012-09-03 19:05:30 +02:00
|
|
|
}
|
|
|
|
|
2016-04-25 12:44:58 +02:00
|
|
|
bool CDBWrapper::WriteBatch(CDBBatch& batch, bool fSync)
|
2014-09-19 19:21:46 +02:00
|
|
|
{
|
Merge #12604: Add DynamicMemoryUsage() to CDBWrapper to estimate LevelDB memory use
741f0177c Add DynamicMemoryUsage() to LevelDB (Evan Klitzke)
Pull request description:
This adds a new method `CDBWrapper::DynamicMemoryUsage()` similar to Bitcoin's existing methods of the same name. It's implemented by asking LevelDB for the information, and then parsing the string response. I've also added logging to `CDBWrapper::WriteBatch()` to track this information:
```
$ tail -f ~/.bitcoin/testnet3/debug.log | grep WriteBatch
2018-03-05 19:34:55 WriteBatch memory usage: db=chainstate, before=0.0MiB, after=0.0MiB
2018-03-05 19:35:17 WriteBatch memory usage: db=index, before=0.0MiB, after=0.0MiB
2018-03-05 19:35:17 WriteBatch memory usage: db=chainstate, before=0.0MiB, after=8.0MiB
2018-03-05 19:35:22 WriteBatch memory usage: db=index, before=0.0MiB, after=0.0MiB
2018-03-05 19:35:22 WriteBatch memory usage: db=chainstate, before=8.0MiB, after=17.0MiB
2018-03-05 19:35:26 WriteBatch memory usage: db=index, before=0.0MiB, after=0.0MiB
2018-03-05 19:35:27 WriteBatch memory usage: db=chainstate, before=9.0MiB, after=18.0MiB
2018-03-05 19:35:40 WriteBatch memory usage: db=index, before=0.0MiB, after=0.0MiB
2018-03-05 19:35:41 WriteBatch memory usage: db=chainstate, before=9.0MiB, after=7.0MiB
2018-03-05 19:35:52 WriteBatch memory usage: db=index, before=0.0MiB, after=0.0MiB
2018-03-05 19:35:52 WriteBatch memory usage: db=chainstate, before=7.0MiB, after=9.0MiB
^C
```
As LevelDB doesn't seem to provide a way to get the database name, I've also added a new `m_name` field to the `CDBWrapper`. This is necessary because we have multiple LevelDB databases (two now, and possibly more later, e.g. #11857).
I am using this information in other branches where I'm experimenting with changing LevelDB buffer sizes.
Tree-SHA512: 7ea8ff5484bb07ef806af17d000c74ccca27d2e0f6c3229e12d93818f00874553335d87428482bd8acbcae81ea35aef2a243326f9fccbfac25989323d24391b4
2018-03-06 16:27:14 +01:00
|
|
|
const bool log_memory = LogAcceptCategory(BCLog::LEVELDB);
|
|
|
|
double mem_before = 0;
|
|
|
|
if (log_memory) {
|
2018-03-29 15:11:45 +02:00
|
|
|
mem_before = DynamicMemoryUsage() / 1024.0 / 1024;
|
Merge #12604: Add DynamicMemoryUsage() to CDBWrapper to estimate LevelDB memory use
741f0177c Add DynamicMemoryUsage() to LevelDB (Evan Klitzke)
Pull request description:
This adds a new method `CDBWrapper::DynamicMemoryUsage()` similar to Bitcoin's existing methods of the same name. It's implemented by asking LevelDB for the information, and then parsing the string response. I've also added logging to `CDBWrapper::WriteBatch()` to track this information:
```
$ tail -f ~/.bitcoin/testnet3/debug.log | grep WriteBatch
2018-03-05 19:34:55 WriteBatch memory usage: db=chainstate, before=0.0MiB, after=0.0MiB
2018-03-05 19:35:17 WriteBatch memory usage: db=index, before=0.0MiB, after=0.0MiB
2018-03-05 19:35:17 WriteBatch memory usage: db=chainstate, before=0.0MiB, after=8.0MiB
2018-03-05 19:35:22 WriteBatch memory usage: db=index, before=0.0MiB, after=0.0MiB
2018-03-05 19:35:22 WriteBatch memory usage: db=chainstate, before=8.0MiB, after=17.0MiB
2018-03-05 19:35:26 WriteBatch memory usage: db=index, before=0.0MiB, after=0.0MiB
2018-03-05 19:35:27 WriteBatch memory usage: db=chainstate, before=9.0MiB, after=18.0MiB
2018-03-05 19:35:40 WriteBatch memory usage: db=index, before=0.0MiB, after=0.0MiB
2018-03-05 19:35:41 WriteBatch memory usage: db=chainstate, before=9.0MiB, after=7.0MiB
2018-03-05 19:35:52 WriteBatch memory usage: db=index, before=0.0MiB, after=0.0MiB
2018-03-05 19:35:52 WriteBatch memory usage: db=chainstate, before=7.0MiB, after=9.0MiB
^C
```
As LevelDB doesn't seem to provide a way to get the database name, I've also added a new `m_name` field to the `CDBWrapper`. This is necessary because we have multiple LevelDB databases (two now, and possibly more later, e.g. #11857).
I am using this information in other branches where I'm experimenting with changing LevelDB buffer sizes.
Tree-SHA512: 7ea8ff5484bb07ef806af17d000c74ccca27d2e0f6c3229e12d93818f00874553335d87428482bd8acbcae81ea35aef2a243326f9fccbfac25989323d24391b4
2018-03-06 16:27:14 +01:00
|
|
|
}
|
2012-09-03 19:05:30 +02:00
|
|
|
leveldb::Status status = pdb->Write(fSync ? syncoptions : writeoptions, &batch.batch);
|
2016-04-25 12:44:58 +02:00
|
|
|
dbwrapper_private::HandleError(status);
|
Merge #12604: Add DynamicMemoryUsage() to CDBWrapper to estimate LevelDB memory use
741f0177c Add DynamicMemoryUsage() to LevelDB (Evan Klitzke)
Pull request description:
This adds a new method `CDBWrapper::DynamicMemoryUsage()` similar to Bitcoin's existing methods of the same name. It's implemented by asking LevelDB for the information, and then parsing the string response. I've also added logging to `CDBWrapper::WriteBatch()` to track this information:
```
$ tail -f ~/.bitcoin/testnet3/debug.log | grep WriteBatch
2018-03-05 19:34:55 WriteBatch memory usage: db=chainstate, before=0.0MiB, after=0.0MiB
2018-03-05 19:35:17 WriteBatch memory usage: db=index, before=0.0MiB, after=0.0MiB
2018-03-05 19:35:17 WriteBatch memory usage: db=chainstate, before=0.0MiB, after=8.0MiB
2018-03-05 19:35:22 WriteBatch memory usage: db=index, before=0.0MiB, after=0.0MiB
2018-03-05 19:35:22 WriteBatch memory usage: db=chainstate, before=8.0MiB, after=17.0MiB
2018-03-05 19:35:26 WriteBatch memory usage: db=index, before=0.0MiB, after=0.0MiB
2018-03-05 19:35:27 WriteBatch memory usage: db=chainstate, before=9.0MiB, after=18.0MiB
2018-03-05 19:35:40 WriteBatch memory usage: db=index, before=0.0MiB, after=0.0MiB
2018-03-05 19:35:41 WriteBatch memory usage: db=chainstate, before=9.0MiB, after=7.0MiB
2018-03-05 19:35:52 WriteBatch memory usage: db=index, before=0.0MiB, after=0.0MiB
2018-03-05 19:35:52 WriteBatch memory usage: db=chainstate, before=7.0MiB, after=9.0MiB
^C
```
As LevelDB doesn't seem to provide a way to get the database name, I've also added a new `m_name` field to the `CDBWrapper`. This is necessary because we have multiple LevelDB databases (two now, and possibly more later, e.g. #11857).
I am using this information in other branches where I'm experimenting with changing LevelDB buffer sizes.
Tree-SHA512: 7ea8ff5484bb07ef806af17d000c74ccca27d2e0f6c3229e12d93818f00874553335d87428482bd8acbcae81ea35aef2a243326f9fccbfac25989323d24391b4
2018-03-06 16:27:14 +01:00
|
|
|
if (log_memory) {
|
2018-03-29 15:11:45 +02:00
|
|
|
double mem_after = DynamicMemoryUsage() / 1024.0 / 1024;
|
Merge #12604: Add DynamicMemoryUsage() to CDBWrapper to estimate LevelDB memory use
741f0177c Add DynamicMemoryUsage() to LevelDB (Evan Klitzke)
Pull request description:
This adds a new method `CDBWrapper::DynamicMemoryUsage()` similar to Bitcoin's existing methods of the same name. It's implemented by asking LevelDB for the information, and then parsing the string response. I've also added logging to `CDBWrapper::WriteBatch()` to track this information:
```
$ tail -f ~/.bitcoin/testnet3/debug.log | grep WriteBatch
2018-03-05 19:34:55 WriteBatch memory usage: db=chainstate, before=0.0MiB, after=0.0MiB
2018-03-05 19:35:17 WriteBatch memory usage: db=index, before=0.0MiB, after=0.0MiB
2018-03-05 19:35:17 WriteBatch memory usage: db=chainstate, before=0.0MiB, after=8.0MiB
2018-03-05 19:35:22 WriteBatch memory usage: db=index, before=0.0MiB, after=0.0MiB
2018-03-05 19:35:22 WriteBatch memory usage: db=chainstate, before=8.0MiB, after=17.0MiB
2018-03-05 19:35:26 WriteBatch memory usage: db=index, before=0.0MiB, after=0.0MiB
2018-03-05 19:35:27 WriteBatch memory usage: db=chainstate, before=9.0MiB, after=18.0MiB
2018-03-05 19:35:40 WriteBatch memory usage: db=index, before=0.0MiB, after=0.0MiB
2018-03-05 19:35:41 WriteBatch memory usage: db=chainstate, before=9.0MiB, after=7.0MiB
2018-03-05 19:35:52 WriteBatch memory usage: db=index, before=0.0MiB, after=0.0MiB
2018-03-05 19:35:52 WriteBatch memory usage: db=chainstate, before=7.0MiB, after=9.0MiB
^C
```
As LevelDB doesn't seem to provide a way to get the database name, I've also added a new `m_name` field to the `CDBWrapper`. This is necessary because we have multiple LevelDB databases (two now, and possibly more later, e.g. #11857).
I am using this information in other branches where I'm experimenting with changing LevelDB buffer sizes.
Tree-SHA512: 7ea8ff5484bb07ef806af17d000c74ccca27d2e0f6c3229e12d93818f00874553335d87428482bd8acbcae81ea35aef2a243326f9fccbfac25989323d24391b4
2018-03-06 16:27:14 +01:00
|
|
|
LogPrint(BCLog::LEVELDB, "WriteBatch memory usage: db=%s, before=%.1fMiB, after=%.1fMiB\n",
|
|
|
|
m_name, mem_before, mem_after);
|
|
|
|
}
|
2012-09-03 19:05:30 +02:00
|
|
|
return true;
|
|
|
|
}
|
2015-09-08 00:22:23 +02:00
|
|
|
|
2021-10-11 02:45:11 +02:00
|
|
|
size_t CDBWrapper::DynamicMemoryUsage() const
|
|
|
|
{
|
Merge #12604: Add DynamicMemoryUsage() to CDBWrapper to estimate LevelDB memory use
741f0177c Add DynamicMemoryUsage() to LevelDB (Evan Klitzke)
Pull request description:
This adds a new method `CDBWrapper::DynamicMemoryUsage()` similar to Bitcoin's existing methods of the same name. It's implemented by asking LevelDB for the information, and then parsing the string response. I've also added logging to `CDBWrapper::WriteBatch()` to track this information:
```
$ tail -f ~/.bitcoin/testnet3/debug.log | grep WriteBatch
2018-03-05 19:34:55 WriteBatch memory usage: db=chainstate, before=0.0MiB, after=0.0MiB
2018-03-05 19:35:17 WriteBatch memory usage: db=index, before=0.0MiB, after=0.0MiB
2018-03-05 19:35:17 WriteBatch memory usage: db=chainstate, before=0.0MiB, after=8.0MiB
2018-03-05 19:35:22 WriteBatch memory usage: db=index, before=0.0MiB, after=0.0MiB
2018-03-05 19:35:22 WriteBatch memory usage: db=chainstate, before=8.0MiB, after=17.0MiB
2018-03-05 19:35:26 WriteBatch memory usage: db=index, before=0.0MiB, after=0.0MiB
2018-03-05 19:35:27 WriteBatch memory usage: db=chainstate, before=9.0MiB, after=18.0MiB
2018-03-05 19:35:40 WriteBatch memory usage: db=index, before=0.0MiB, after=0.0MiB
2018-03-05 19:35:41 WriteBatch memory usage: db=chainstate, before=9.0MiB, after=7.0MiB
2018-03-05 19:35:52 WriteBatch memory usage: db=index, before=0.0MiB, after=0.0MiB
2018-03-05 19:35:52 WriteBatch memory usage: db=chainstate, before=7.0MiB, after=9.0MiB
^C
```
As LevelDB doesn't seem to provide a way to get the database name, I've also added a new `m_name` field to the `CDBWrapper`. This is necessary because we have multiple LevelDB databases (two now, and possibly more later, e.g. #11857).
I am using this information in other branches where I'm experimenting with changing LevelDB buffer sizes.
Tree-SHA512: 7ea8ff5484bb07ef806af17d000c74ccca27d2e0f6c3229e12d93818f00874553335d87428482bd8acbcae81ea35aef2a243326f9fccbfac25989323d24391b4
2018-03-06 16:27:14 +01:00
|
|
|
std::string memory;
|
2021-10-11 02:45:11 +02:00
|
|
|
std::optional<size_t> parsed;
|
|
|
|
if (!pdb->GetProperty("leveldb.approximate-memory-usage", &memory) || !(parsed = ToIntegral<size_t>(memory))) {
|
Merge #12604: Add DynamicMemoryUsage() to CDBWrapper to estimate LevelDB memory use
741f0177c Add DynamicMemoryUsage() to LevelDB (Evan Klitzke)
Pull request description:
This adds a new method `CDBWrapper::DynamicMemoryUsage()` similar to Bitcoin's existing methods of the same name. It's implemented by asking LevelDB for the information, and then parsing the string response. I've also added logging to `CDBWrapper::WriteBatch()` to track this information:
```
$ tail -f ~/.bitcoin/testnet3/debug.log | grep WriteBatch
2018-03-05 19:34:55 WriteBatch memory usage: db=chainstate, before=0.0MiB, after=0.0MiB
2018-03-05 19:35:17 WriteBatch memory usage: db=index, before=0.0MiB, after=0.0MiB
2018-03-05 19:35:17 WriteBatch memory usage: db=chainstate, before=0.0MiB, after=8.0MiB
2018-03-05 19:35:22 WriteBatch memory usage: db=index, before=0.0MiB, after=0.0MiB
2018-03-05 19:35:22 WriteBatch memory usage: db=chainstate, before=8.0MiB, after=17.0MiB
2018-03-05 19:35:26 WriteBatch memory usage: db=index, before=0.0MiB, after=0.0MiB
2018-03-05 19:35:27 WriteBatch memory usage: db=chainstate, before=9.0MiB, after=18.0MiB
2018-03-05 19:35:40 WriteBatch memory usage: db=index, before=0.0MiB, after=0.0MiB
2018-03-05 19:35:41 WriteBatch memory usage: db=chainstate, before=9.0MiB, after=7.0MiB
2018-03-05 19:35:52 WriteBatch memory usage: db=index, before=0.0MiB, after=0.0MiB
2018-03-05 19:35:52 WriteBatch memory usage: db=chainstate, before=7.0MiB, after=9.0MiB
^C
```
As LevelDB doesn't seem to provide a way to get the database name, I've also added a new `m_name` field to the `CDBWrapper`. This is necessary because we have multiple LevelDB databases (two now, and possibly more later, e.g. #11857).
I am using this information in other branches where I'm experimenting with changing LevelDB buffer sizes.
Tree-SHA512: 7ea8ff5484bb07ef806af17d000c74ccca27d2e0f6c3229e12d93818f00874553335d87428482bd8acbcae81ea35aef2a243326f9fccbfac25989323d24391b4
2018-03-06 16:27:14 +01:00
|
|
|
LogPrint(BCLog::LEVELDB, "Failed to get approximate-memory-usage property\n");
|
|
|
|
return 0;
|
|
|
|
}
|
2021-10-11 02:45:11 +02:00
|
|
|
return parsed.value();
|
Merge #12604: Add DynamicMemoryUsage() to CDBWrapper to estimate LevelDB memory use
741f0177c Add DynamicMemoryUsage() to LevelDB (Evan Klitzke)
Pull request description:
This adds a new method `CDBWrapper::DynamicMemoryUsage()` similar to Bitcoin's existing methods of the same name. It's implemented by asking LevelDB for the information, and then parsing the string response. I've also added logging to `CDBWrapper::WriteBatch()` to track this information:
```
$ tail -f ~/.bitcoin/testnet3/debug.log | grep WriteBatch
2018-03-05 19:34:55 WriteBatch memory usage: db=chainstate, before=0.0MiB, after=0.0MiB
2018-03-05 19:35:17 WriteBatch memory usage: db=index, before=0.0MiB, after=0.0MiB
2018-03-05 19:35:17 WriteBatch memory usage: db=chainstate, before=0.0MiB, after=8.0MiB
2018-03-05 19:35:22 WriteBatch memory usage: db=index, before=0.0MiB, after=0.0MiB
2018-03-05 19:35:22 WriteBatch memory usage: db=chainstate, before=8.0MiB, after=17.0MiB
2018-03-05 19:35:26 WriteBatch memory usage: db=index, before=0.0MiB, after=0.0MiB
2018-03-05 19:35:27 WriteBatch memory usage: db=chainstate, before=9.0MiB, after=18.0MiB
2018-03-05 19:35:40 WriteBatch memory usage: db=index, before=0.0MiB, after=0.0MiB
2018-03-05 19:35:41 WriteBatch memory usage: db=chainstate, before=9.0MiB, after=7.0MiB
2018-03-05 19:35:52 WriteBatch memory usage: db=index, before=0.0MiB, after=0.0MiB
2018-03-05 19:35:52 WriteBatch memory usage: db=chainstate, before=7.0MiB, after=9.0MiB
^C
```
As LevelDB doesn't seem to provide a way to get the database name, I've also added a new `m_name` field to the `CDBWrapper`. This is necessary because we have multiple LevelDB databases (two now, and possibly more later, e.g. #11857).
I am using this information in other branches where I'm experimenting with changing LevelDB buffer sizes.
Tree-SHA512: 7ea8ff5484bb07ef806af17d000c74ccca27d2e0f6c3229e12d93818f00874553335d87428482bd8acbcae81ea35aef2a243326f9fccbfac25989323d24391b4
2018-03-06 16:27:14 +01:00
|
|
|
}
|
|
|
|
|
2015-09-08 00:22:23 +02:00
|
|
|
// Prefixed with null character to avoid collisions with other keys
|
|
|
|
//
|
|
|
|
// We must use a string constructor which specifies length so that we copy
|
|
|
|
// past the null-terminator.
|
2015-10-23 03:02:20 +02:00
|
|
|
const std::string CDBWrapper::OBFUSCATE_KEY_KEY("\000obfuscate_key", 14);
|
2015-09-08 00:22:23 +02:00
|
|
|
|
2015-10-23 03:02:20 +02:00
|
|
|
const unsigned int CDBWrapper::OBFUSCATE_KEY_NUM_BYTES = 8;
|
2015-09-08 00:22:23 +02:00
|
|
|
|
|
|
|
/**
|
2015-10-23 02:49:02 +02:00
|
|
|
* Returns a string (consisting of 8 random bytes) suitable for use as an
|
|
|
|
* obfuscating XOR key.
|
2015-09-08 00:22:23 +02:00
|
|
|
*/
|
2015-10-23 03:02:20 +02:00
|
|
|
std::vector<unsigned char> CDBWrapper::CreateObfuscateKey() const
|
2015-09-08 00:22:23 +02:00
|
|
|
{
|
|
|
|
unsigned char buff[OBFUSCATE_KEY_NUM_BYTES];
|
|
|
|
GetRandBytes(buff, OBFUSCATE_KEY_NUM_BYTES);
|
|
|
|
return std::vector<unsigned char>(&buff[0], &buff[OBFUSCATE_KEY_NUM_BYTES]);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2015-10-23 03:02:20 +02:00
|
|
|
bool CDBWrapper::IsEmpty()
|
2015-09-08 00:22:23 +02:00
|
|
|
{
|
2016-09-02 09:56:16 +02:00
|
|
|
std::unique_ptr<CDBIterator> it(NewIterator());
|
2015-09-08 00:22:23 +02:00
|
|
|
it->SeekToFirst();
|
|
|
|
return !(it->Valid());
|
|
|
|
}
|
|
|
|
|
2016-04-25 12:44:58 +02:00
|
|
|
CDBIterator::~CDBIterator() { delete piter; }
|
2017-08-16 02:09:10 +02:00
|
|
|
bool CDBIterator::Valid() const { return piter->Valid(); }
|
2016-04-25 12:44:58 +02:00
|
|
|
void CDBIterator::SeekToFirst() { piter->SeekToFirst(); }
|
|
|
|
void CDBIterator::Next() { piter->Next(); }
|
|
|
|
|
|
|
|
namespace dbwrapper_private {
|
|
|
|
|
|
|
|
void HandleError(const leveldb::Status& status)
|
2015-10-23 02:49:02 +02:00
|
|
|
{
|
2016-04-25 12:44:58 +02:00
|
|
|
if (status.ok())
|
|
|
|
return;
|
2018-03-13 17:16:26 +01:00
|
|
|
const std::string errmsg = "Fatal LevelDB error: " + status.ToString();
|
|
|
|
LogPrintf("%s\n", errmsg);
|
|
|
|
LogPrintf("You can use -debug=leveldb to get more complete diagnostic messages\n");
|
|
|
|
throw dbwrapper_error(errmsg);
|
2015-09-08 00:22:23 +02:00
|
|
|
}
|
|
|
|
|
2016-04-25 12:44:58 +02:00
|
|
|
const std::vector<unsigned char>& GetObfuscateKey(const CDBWrapper &w)
|
2015-10-23 02:49:02 +02:00
|
|
|
{
|
2016-04-25 12:44:58 +02:00
|
|
|
return w.obfuscate_key;
|
2015-09-08 00:22:23 +02:00
|
|
|
}
|
2015-10-08 09:44:10 +02:00
|
|
|
|
2017-06-26 13:37:42 +02:00
|
|
|
} // namespace dbwrapper_private
|