mirror of
https://github.com/dashpay/dash.git
synced 2024-12-25 20:12:57 +01:00
Merge pull request #3741 from PastaPastaPasta/backports-0.17-pr19
Backports 0.17 pr19
This commit is contained in:
commit
ab48933ab4
28
configure.ac
28
configure.ac
@ -238,6 +238,12 @@ if test "x$enable_miner" = xyes; then
|
||||
AC_DEFINE(ENABLE_MINER, 1, [Define this symbol if in-wallet miner should be enabled])
|
||||
fi
|
||||
|
||||
# Enable different -fsanitize options
|
||||
AC_ARG_WITH([sanitizers],
|
||||
[AS_HELP_STRING([--with-sanitizers],
|
||||
[comma separated list of extra sanitizers to build with (default is none enabled)])],
|
||||
[use_sanitizers=$withval])
|
||||
|
||||
# Enable gprof profiling
|
||||
AC_ARG_ENABLE([gprof],
|
||||
[AS_HELP_STRING([--enable-gprof],
|
||||
@ -291,6 +297,26 @@ fi
|
||||
# Needed for MinGW targets when debug symbols are enabled as compiled objects get very large
|
||||
AX_CHECK_COMPILE_FLAG([-Wa,-mbig-obj], [CXXFLAGS="$CXXFLAGS -Wa,-mbig-obj"],,,)
|
||||
|
||||
if test x$use_sanitizers != x; then
|
||||
# First check if the compiler accepts flags. If an incompatible pair like
|
||||
# -fsanitize=address,thread is used here, this check will fail. This will also
|
||||
# fail if a bad argument is passed, e.g. -fsanitize=undfeined
|
||||
AX_CHECK_COMPILE_FLAG(
|
||||
[[-fsanitize=$use_sanitizers]],
|
||||
[[SANITIZER_CXXFLAGS=-fsanitize=$use_sanitizers]],
|
||||
[AC_MSG_ERROR([compiler did not accept requested flags])])
|
||||
|
||||
# Some compilers (e.g. GCC) require additional libraries like libasan,
|
||||
# libtsan, libubsan, etc. Make sure linking still works with the sanitize
|
||||
# flag. This is a separate check so we can give a better error message when
|
||||
# the sanitize flags are supported by the compiler but the actual sanitizer
|
||||
# libs are missing.
|
||||
AX_CHECK_LINK_FLAG(
|
||||
[[-fsanitize=$use_sanitizers]],
|
||||
[[SANITIZER_LDFLAGS=-fsanitize=$use_sanitizers]],
|
||||
[AC_MSG_ERROR([linker did not accept requested flags, you are missing required libraries])])
|
||||
fi
|
||||
|
||||
ERROR_CXXFLAGS=
|
||||
if test "x$enable_werror" = "xyes"; then
|
||||
if test "x$CXXFLAG_WERROR" = "x"; then
|
||||
@ -1415,6 +1441,8 @@ AC_SUBST(HARDENED_CPPFLAGS)
|
||||
AC_SUBST(HARDENED_LDFLAGS)
|
||||
AC_SUBST(PIC_FLAGS)
|
||||
AC_SUBST(PIE_FLAGS)
|
||||
AC_SUBST(SANITIZER_CXXFLAGS)
|
||||
AC_SUBST(SANITIZER_LDFLAGS)
|
||||
AC_SUBST(SSE42_CXXFLAGS)
|
||||
AC_SUBST(SSE41_CXXFLAGS)
|
||||
AC_SUBST(AVX2_CXXFLAGS)
|
||||
|
@ -1,6 +1,43 @@
|
||||
Developer Notes
|
||||
===============
|
||||
|
||||
<!-- markdown-toc start -->
|
||||
**Table of Contents**
|
||||
|
||||
- [Developer Notes](#developer-notes)
|
||||
- [Coding Style](#coding-style)
|
||||
- [Doxygen comments](#doxygen-comments)
|
||||
- [Development tips and tricks](#development-tips-and-tricks)
|
||||
- [Compiling for debugging](#compiling-for-debugging)
|
||||
- [Compiling for gprof profiling](#compiling-for-gprof-profiling)
|
||||
- [debug.log](#debuglog)
|
||||
- [Testnet and Regtest modes](#testnet-and-regtest-modes)
|
||||
- [DEBUG_LOCKORDER](#debug_lockorder)
|
||||
- [Valgrind suppressions file](#valgrind-suppressions-file)
|
||||
- [Compiling for test coverage](#compiling-for-test-coverage)
|
||||
- [Locking/mutex usage notes](#lockingmutex-usage-notes)
|
||||
- [Threads](#threads)
|
||||
- [Ignoring IDE/editor files](#ignoring-ideeditor-files)
|
||||
- [Development guidelines](#development-guidelines)
|
||||
- [General Dash Core](#general-dash-core)
|
||||
- [Wallet](#wallet)
|
||||
- [General C++](#general-c)
|
||||
- [C++ data structures](#c-data-structures)
|
||||
- [Strings and formatting](#strings-and-formatting)
|
||||
- [Variable names](#variable-names)
|
||||
- [Threads and synchronization](#threads-and-synchronization)
|
||||
- [Source code organization](#source-code-organization)
|
||||
- [GUI](#gui)
|
||||
- [Subtrees](#subtrees)
|
||||
- [Git and GitHub tips](#git-and-github-tips)
|
||||
- [Scripted diffs](#scripted-diffs)
|
||||
- [RPC interface guidelines](#rpc-interface-guidelines)
|
||||
|
||||
<!-- markdown-toc end -->
|
||||
|
||||
Coding Style
|
||||
---------------
|
||||
|
||||
Various coding styles have been used during the history of the codebase,
|
||||
and the result is not very consistent. However, we're now trying to converge to
|
||||
a single style, which is specified below. When writing patches, favor the new
|
||||
@ -141,43 +178,44 @@ Documentation can be generated with `make docs` and cleaned up with `make clean-
|
||||
Development tips and tricks
|
||||
---------------------------
|
||||
|
||||
**compiling for debugging**
|
||||
### Compiling for debugging
|
||||
|
||||
Run configure with the --enable-debug option, then make. Or run configure with
|
||||
CXXFLAGS="-g -ggdb -O0" or whatever debug flags you need.
|
||||
Run configure with `--enable-debug` to add additional compiler flags that
|
||||
produce better debugging builds.
|
||||
|
||||
**compiling for gprof profiling**
|
||||
### Compiling for gprof profiling
|
||||
|
||||
Run configure with the --enable-gprof option, then make.
|
||||
Run configure with the `--enable-gprof` option, then make.
|
||||
|
||||
**debug.log**
|
||||
### debug.log
|
||||
|
||||
If the code is behaving strangely, take a look in the debug.log file in the data directory;
|
||||
error and debugging messages are written there.
|
||||
|
||||
The -debug=... command-line option controls debugging; running with just -debug or -debug=1 will turn
|
||||
The `-debug=...` command-line option controls debugging; running with just `-debug` or `-debug=1` will turn
|
||||
on all categories (and give you a very large debug.log file).
|
||||
|
||||
The Qt code routes qDebug() output to debug.log under category "qt": run with -debug=qt
|
||||
The Qt code routes `qDebug()` output to debug.log under category "qt": run with `-debug=qt`
|
||||
to see it.
|
||||
|
||||
**testnet and regtest modes**
|
||||
### Testnet and Regtest modes
|
||||
|
||||
Run with the -testnet option to run with "play coins" on the test network, if you
|
||||
Run with the `-testnet` option to run with "play coins" on the test network, if you
|
||||
are testing multi-machine code that needs to operate across the internet.
|
||||
|
||||
If you are testing something that can run on one machine, run with the -regtest option.
|
||||
In regression test mode, blocks can be created on-demand; see test/functional/ for tests
|
||||
that run in -regtest mode.
|
||||
If you are testing something that can run on one machine, run with the `-regtest` option.
|
||||
In regression test mode, blocks can be created on-demand; see [test/functional/](/test/functional) for tests
|
||||
that run in `-regtest` mode.
|
||||
|
||||
**DEBUG_LOCKORDER**
|
||||
### DEBUG_LOCKORDER
|
||||
|
||||
Dash Core is a multithreaded application, and deadlocks or other multithreading bugs
|
||||
can be very difficult to track down. Compiling with -DDEBUG_LOCKORDER (configure
|
||||
CXXFLAGS="-DDEBUG_LOCKORDER -g") inserts run-time checks to keep track of which locks
|
||||
are held, and adds warnings to the debug.log file if inconsistencies are detected.
|
||||
Dash Core is a multi-threaded application, and deadlocks or other
|
||||
multi-threading bugs can be very difficult to track down. The `--enable-debug`
|
||||
configure option adds `-DDEBUG_LOCKORDER` to the compiler flags. This inserts
|
||||
run-time checks to keep track of which locks are held, and adds warnings to the
|
||||
debug.log file if inconsistencies are detected.
|
||||
|
||||
**Valgrind suppressions file**
|
||||
### Valgrind suppressions file
|
||||
|
||||
Valgrind is a programming tool for memory debugging, memory leak detection, and
|
||||
profiling. The repo contains a Valgrind suppressions file
|
||||
@ -192,7 +230,7 @@ $ valgrind --suppressions=contrib/valgrind.supp --leak-check=full \
|
||||
$ valgrind -v --leak-check=full src/dashd -printtoconsole
|
||||
```
|
||||
|
||||
**compiling for test coverage**
|
||||
### Compiling for test coverage
|
||||
|
||||
LCOV can be used to generate a test coverage report based upon `make check`
|
||||
execution. LCOV must be installed on your system (e.g. the `lcov` package
|
||||
@ -208,22 +246,73 @@ make cov
|
||||
# A coverage report will now be accessible at `./test_dash.coverage/index.html`.
|
||||
```
|
||||
|
||||
**Sanitizers**
|
||||
|
||||
Dash Core can be compiled with various "sanitizers" enabled, which add
|
||||
instrumentation for issues regarding things like memory safety, thread race
|
||||
conditions, or undefined behavior. This is controlled with the
|
||||
`--with-sanitizers` configure flag, which should be a comma separated list of
|
||||
sanitizers to enable. The sanitizer list should correspond to supported
|
||||
`-fsanitize=` options in your compiler. These sanitizers have runtime overhead,
|
||||
so they are most useful when testing changes or producing debugging builds.
|
||||
|
||||
Some examples:
|
||||
|
||||
```bash
|
||||
# Enable both the address sanitizer and the undefined behavior sanitizer
|
||||
./configure --with-sanitizers=address,undefined
|
||||
|
||||
# Enable the thread sanitizer
|
||||
./configure --with-sanitizers=thread
|
||||
```
|
||||
|
||||
If you are compiling with GCC you will typically need to install corresponding
|
||||
"san" libraries to actually compile with these flags, e.g. libasan for the
|
||||
address sanitizer, libtsan for the thread sanitizer, and libubsan for the
|
||||
undefined sanitizer. If you are missing required libraries, the configure script
|
||||
will fail with a linker error when testing the sanitizer flags.
|
||||
|
||||
The test suite should pass cleanly with the `thread` and `undefined` sanitizers,
|
||||
but there are a number of known problems when using the `address` sanitizer. The
|
||||
address sanitizer is known to fail in
|
||||
[sha256_sse4::Transform](/src/crypto/sha256_sse4.cpp) which makes it unusable
|
||||
unless you also use `--disable-asm` when running configure. We would like to fix
|
||||
sanitizer issues, so please send pull requests if you can fix any errors found
|
||||
by the address sanitizer (or any other sanitizer).
|
||||
|
||||
Not all sanitizer options can be enabled at the same time, e.g. trying to build
|
||||
with `--with-sanitizers=address,thread` will fail in the configure script as
|
||||
these sanitizers are mutually incompatible. Refer to your compiler manual to
|
||||
learn more about these options and which sanitizers are supported by your
|
||||
compiler.
|
||||
|
||||
Additional resources:
|
||||
|
||||
* [AddressSanitizer](https://clang.llvm.org/docs/AddressSanitizer.html)
|
||||
* [LeakSanitizer](https://clang.llvm.org/docs/LeakSanitizer.html)
|
||||
* [MemorySanitizer](https://clang.llvm.org/docs/MemorySanitizer.html)
|
||||
* [ThreadSanitizer](https://clang.llvm.org/docs/ThreadSanitizer.html)
|
||||
* [UndefinedBehaviorSanitizer](https://clang.llvm.org/docs/UndefinedBehaviorSanitizer.html)
|
||||
* [GCC Instrumentation Options](https://gcc.gnu.org/onlinedocs/gcc/Instrumentation-Options.html)
|
||||
* [Google Sanitizers Wiki](https://github.com/google/sanitizers/wiki)
|
||||
* [Issue #12691: Enable -fsanitize flags in Travis](https://github.com/bitcoin/bitcoin/issues/12691)
|
||||
|
||||
Locking/mutex usage notes
|
||||
-------------------------
|
||||
|
||||
The code is multi-threaded, and uses mutexes and the
|
||||
LOCK/TRY_LOCK macros to protect data structures.
|
||||
`LOCK` and `TRY_LOCK` macros to protect data structures.
|
||||
|
||||
Deadlocks due to inconsistent lock ordering (thread 1 locks cs_main
|
||||
and then cs_wallet, while thread 2 locks them in the opposite order:
|
||||
result, deadlock as each waits for the other to release its lock) are
|
||||
a problem. Compile with -DDEBUG_LOCKORDER to get lock order
|
||||
inconsistencies reported in the debug.log file.
|
||||
Deadlocks due to inconsistent lock ordering (thread 1 locks `cs_main` and then
|
||||
`cs_wallet`, while thread 2 locks them in the opposite order: result, deadlock
|
||||
as each waits for the other to release its lock) are a problem. Compile with
|
||||
`-DDEBUG_LOCKORDER` (or use `--enable-debug`) to get lock order inconsistencies
|
||||
reported in the debug.log file.
|
||||
|
||||
Re-architecting the core code so there are better-defined interfaces
|
||||
between the various components is a goal, with any necessary locking
|
||||
done by the components (e.g. see the self-contained CKeyStore class
|
||||
and its cs_KeyStore lock for example).
|
||||
done by the components (e.g. see the self-contained `CKeyStore` class
|
||||
and its `cs_KeyStore` lock for example).
|
||||
|
||||
Threads
|
||||
-------
|
||||
@ -618,7 +707,10 @@ its upstream repository.
|
||||
Current subtrees include:
|
||||
|
||||
- src/leveldb
|
||||
- Upstream at https://github.com/google/leveldb ; Maintained by Google, but open important PRs to Core to avoid delay
|
||||
- Upstream at https://github.com/google/leveldb ; Maintained by Google, but
|
||||
open important PRs to Core to avoid delay.
|
||||
- **Note**: Follow the instructions in [Upgrading LevelDB](#upgrading-leveldb) when
|
||||
merging upstream changes to the leveldb subtree.
|
||||
|
||||
- src/libsecp256k1
|
||||
- Upstream at https://github.com/bitcoin-core/secp256k1/ ; actively maintaned by Core contributors.
|
||||
@ -629,6 +721,52 @@ Current subtrees include:
|
||||
- src/univalue
|
||||
- Upstream at https://github.com/jgarzik/univalue ; report important PRs to Core to avoid delay.
|
||||
|
||||
Upgrading LevelDB
|
||||
---------------------
|
||||
|
||||
Extra care must be taken when upgrading LevelDB. This section explains issues
|
||||
you must be aware of.
|
||||
|
||||
### File Descriptor Counts
|
||||
|
||||
In most configurations we use the default LevelDB value for `max_open_files`,
|
||||
which is 1000 at the time of this writing. If LevelDB actually uses this many
|
||||
file descriptors it will cause problems with Bitcoin's `select()` loop, because
|
||||
it may cause new sockets to be created where the fd value is >= 1024. For this
|
||||
reason, on 64-bit Unix systems we rely on an internal LevelDB optimization that
|
||||
uses `mmap()` + `close()` to open table files without actually retaining
|
||||
references to the table file descriptors. If you are upgrading LevelDB, you must
|
||||
sanity check the changes to make sure that this assumption remains valid.
|
||||
|
||||
In addition to reviewing the upstream changes in `env_posix.cc`, you can use `lsof` to
|
||||
check this. For example, on Linux this command will show open `.ldb` file counts:
|
||||
|
||||
```bash
|
||||
$ lsof -p $(pidof dashd) |\
|
||||
awk 'BEGIN { fd=0; mem=0; } /ldb$/ { if ($4 == "mem") mem++; else fd++ } END { printf "mem = %s, fd = %s\n", mem, fd}'
|
||||
mem = 119, fd = 0
|
||||
```
|
||||
|
||||
The `mem` value shows how many files are mmap'ed, and the `fd` value shows you
|
||||
many file descriptors these files are using. You should check that `fd` is a
|
||||
small number (usually 0 on 64-bit hosts).
|
||||
|
||||
See the notes in the `SetMaxOpenFiles()` function in `dbwrapper.cc` for more
|
||||
details.
|
||||
|
||||
### Consensus Compatibility
|
||||
|
||||
It is possible for LevelDB changes to inadvertently change consensus
|
||||
compatibility between nodes. This happened in Bitcoin 0.8 (when LevelDB was
|
||||
first introduced). When upgrading LevelDB you should review the upstream changes
|
||||
to check for issues affecting consensus compatibility.
|
||||
|
||||
For example, if LevelDB had a bug that accidentally prevented a key from being
|
||||
returned in an edge case, and that bug was fixed upstream, the bug "fix" would
|
||||
be an incompatible consensus change. In this situation the correct behavior
|
||||
would be to revert the upstream fix before applying the updates to Bitcoin's
|
||||
copy of LevelDB. In general you should be wary of any upstream changes affecting
|
||||
what data is returned from LevelDB queries.
|
||||
|
||||
Git and GitHub tips
|
||||
---------------------
|
||||
|
@ -5,8 +5,8 @@
|
||||
|
||||
DIST_SUBDIRS = secp256k1 univalue
|
||||
|
||||
AM_LDFLAGS = $(PTHREAD_CFLAGS) $(LIBTOOL_LDFLAGS) $(HARDENED_LDFLAGS) $(GPROF_LDFLAGS)
|
||||
AM_CXXFLAGS = $(HARDENED_CXXFLAGS) $(ERROR_CXXFLAGS) $(GPROF_CXXFLAGS)
|
||||
AM_LDFLAGS = $(PTHREAD_CFLAGS) $(LIBTOOL_LDFLAGS) $(HARDENED_LDFLAGS) $(GPROF_LDFLAGS) $(SANITIZER_LDFLAGS)
|
||||
AM_CXXFLAGS = $(HARDENED_CXXFLAGS) $(ERROR_CXXFLAGS) $(GPROF_CXXFLAGS) $(SANITIZER_CXXFLAGS)
|
||||
AM_CPPFLAGS = $(HARDENED_CPPFLAGS)
|
||||
AM_LIBTOOLFLAGS = --preserve-dup-deps
|
||||
EXTRA_LIBRARIES =
|
||||
|
@ -72,6 +72,31 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
static void SetMaxOpenFiles(leveldb::Options *options) {
|
||||
// On most platforms the default setting of max_open_files (which is 1000)
|
||||
// is optimal. On Windows using a large file count is OK because the handles
|
||||
// do not interfere with select() loops. On 64-bit Unix hosts this value is
|
||||
// also OK, because up to that amount LevelDB will use an mmap
|
||||
// implementation that does not use extra file descriptors (the fds are
|
||||
// closed after being mmaped).
|
||||
//
|
||||
// Increasing the value beyond the default is dangerous because LevelDB will
|
||||
// fall back to a non-mmap implementation when the file count is too large.
|
||||
// On 32-bit Unix host we should decrease the value because the handles use
|
||||
// up real fds, and we want to avoid fd exhaustion issues.
|
||||
//
|
||||
// See PR #12495 for further discussion.
|
||||
|
||||
int default_open_files = options->max_open_files;
|
||||
#ifndef WIN32
|
||||
if (sizeof(void*) < 8) {
|
||||
options->max_open_files = 64;
|
||||
}
|
||||
#endif
|
||||
LogPrint(BCLog::LEVELDB, "LevelDB using max_open_files=%d (default=%d)\n",
|
||||
options->max_open_files, default_open_files);
|
||||
}
|
||||
|
||||
static leveldb::Options GetOptions(size_t nCacheSize)
|
||||
{
|
||||
leveldb::Options options;
|
||||
@ -79,13 +104,13 @@ static leveldb::Options GetOptions(size_t nCacheSize)
|
||||
options.write_buffer_size = nCacheSize / 4; // up to two write buffers may be held in memory simultaneously
|
||||
options.filter_policy = leveldb::NewBloomFilterPolicy(10);
|
||||
options.compression = leveldb::kNoCompression;
|
||||
options.max_open_files = 64;
|
||||
options.info_log = new CBitcoinLevelDBLogger();
|
||||
if (leveldb::kMajorVersion > 1 || (leveldb::kMajorVersion == 1 && leveldb::kMinorVersion >= 16)) {
|
||||
// LevelDB versions before 1.16 consider short writes to be corruption. Only trigger error
|
||||
// on corruption in later versions.
|
||||
options.paranoid_checks = true;
|
||||
}
|
||||
SetMaxOpenFiles(&options);
|
||||
return options;
|
||||
}
|
||||
|
||||
@ -160,12 +185,12 @@ bool CDBWrapper::WriteBatch(CDBBatch& batch, bool fSync)
|
||||
const bool log_memory = LogAcceptCategory(BCLog::LEVELDB);
|
||||
double mem_before = 0;
|
||||
if (log_memory) {
|
||||
mem_before = DynamicMemoryUsage() / 1024 / 1024;
|
||||
mem_before = DynamicMemoryUsage() / 1024.0 / 1024;
|
||||
}
|
||||
leveldb::Status status = pdb->Write(fSync ? syncoptions : writeoptions, &batch.batch);
|
||||
dbwrapper_private::HandleError(status);
|
||||
if (log_memory) {
|
||||
double mem_after = DynamicMemoryUsage() / 1024 / 1024;
|
||||
double mem_after = DynamicMemoryUsage() / 1024.0 / 1024;
|
||||
LogPrint(BCLog::LEVELDB, "WriteBatch memory usage: db=%s, before=%.1fMiB, after=%.1fMiB\n",
|
||||
m_name, mem_before, mem_after);
|
||||
}
|
||||
|
@ -456,6 +456,7 @@ std::string HelpMessage(HelpMessageMode mode)
|
||||
strUsage += HelpMessageOpt("-?", _("Print this help message and exit"));
|
||||
strUsage += HelpMessageOpt("-alertnotify=<cmd>", _("Execute command when a relevant alert is received or we see a really long fork (%s in cmd is replaced by message)"));
|
||||
strUsage +=HelpMessageOpt("-assumevalid=<hex>", strprintf(_("If this block is in the chain assume that it and its ancestors are valid and potentially skip their script verification (0 to verify all, default: %s, testnet: %s)"), defaultChainParams->GetConsensus().defaultAssumeValid.GetHex(), testnetChainParams->GetConsensus().defaultAssumeValid.GetHex()));
|
||||
strUsage += HelpMessageOpt("-blocksdir=<dir>", _("Specify blocks directory (default: <datadir>/blocks)"));
|
||||
strUsage += HelpMessageOpt("-blocknotify=<cmd>", _("Execute command when the best block changes (%s in cmd is replaced by block hash)"));
|
||||
strUsage += HelpMessageOpt("-blockreconstructionextratxn=<n>", strprintf(_("Extra transactions to keep in memory for compact block reconstructions (default: %u)"), DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN));
|
||||
if (showDebug)
|
||||
@ -734,7 +735,7 @@ void CleanupBlockRevFiles()
|
||||
// Remove the rev files immediately and insert the blk file paths into an
|
||||
// ordered map keyed by block file index.
|
||||
LogPrintf("Removing unusable blk?????.dat and rev?????.dat files for -reindex with -prune\n");
|
||||
fs::path blocksdir = GetDataDir() / "blocks";
|
||||
fs::path blocksdir = GetBlocksDir();
|
||||
for (fs::directory_iterator it(blocksdir); it != fs::directory_iterator(); it++) {
|
||||
if (fs::is_regular_file(*it) &&
|
||||
it->path().filename().string().length() == 12 &&
|
||||
@ -1111,6 +1112,10 @@ bool AppInitParameterInteraction()
|
||||
|
||||
// also see: InitParameterInteraction()
|
||||
|
||||
if (!fs::is_directory(GetBlocksDir(false))) {
|
||||
return InitError(strprintf(_("Specified blocks directory \"%s\" does not exist."), gArgs.GetArg("-blocksdir", "").c_str()));
|
||||
}
|
||||
|
||||
// if using block pruning, then disallow txindex and require disabling governance validation
|
||||
if (gArgs.GetArg("-prune", 0)) {
|
||||
if (gArgs.GetBoolArg("-txindex", DEFAULT_TXINDEX))
|
||||
@ -2206,7 +2211,7 @@ bool AppInitMain()
|
||||
|
||||
// ********************************************************* Step 11: import blocks
|
||||
|
||||
if (!CheckDiskSpace())
|
||||
if (!CheckDiskSpace() && !CheckDiskSpace(0, true))
|
||||
return false;
|
||||
|
||||
// Either install a handler to notify us when genesis activates, or set fHaveGenesis directly.
|
||||
|
@ -10,35 +10,31 @@
|
||||
#include <key.h>
|
||||
#include <pubkey.h>
|
||||
#include <script/script.h>
|
||||
#include <script/sign.h>
|
||||
#include <script/standard.h>
|
||||
#include <sync.h>
|
||||
|
||||
#include <boost/signals2/signal.hpp>
|
||||
|
||||
/** A virtual base class for key stores */
|
||||
class CKeyStore
|
||||
class CKeyStore : public SigningProvider
|
||||
{
|
||||
protected:
|
||||
mutable CCriticalSection cs_KeyStore;
|
||||
|
||||
public:
|
||||
virtual ~CKeyStore() {}
|
||||
|
||||
//! Add a key to the store.
|
||||
virtual bool AddKeyPubKey(const CKey &key, const CPubKey &pubkey) =0;
|
||||
virtual bool AddKey(const CKey &key);
|
||||
|
||||
//! Check whether a key corresponding to a given address is present in the store.
|
||||
virtual bool HaveKey(const CKeyID &address) const =0;
|
||||
virtual bool GetKey(const CKeyID &address, CKey& keyOut) const =0;
|
||||
virtual std::set<CKeyID> GetKeys() const =0;
|
||||
virtual bool GetPubKey(const CKeyID &address, CPubKey& vchPubKeyOut) const =0;
|
||||
|
||||
//! Support for BIP 0013 : see https://github.com/bitcoin/bips/blob/master/bip-0013.mediawiki
|
||||
virtual bool AddCScript(const CScript& redeemScript) =0;
|
||||
virtual bool HaveCScript(const CScriptID &hash) const =0;
|
||||
virtual std::set<CScriptID> GetCScripts() const =0;
|
||||
virtual bool GetCScript(const CScriptID &hash, CScript& redeemScriptOut) const =0;
|
||||
|
||||
//! Support for Watch-only addresses
|
||||
virtual bool AddWatchOnly(const CScript &dest) =0;
|
||||
|
@ -597,7 +597,7 @@ int main(int argc, char *argv[])
|
||||
if (!Intro::pickDataDirectory(*node))
|
||||
return EXIT_SUCCESS;
|
||||
|
||||
/// 6. Determine availability of data directory and parse dash.conf
|
||||
/// 6. Determine availability of data and blocks directory and parse dash.conf
|
||||
/// - Do not call GetDataDir(true) before this step finishes
|
||||
if (!fs::is_directory(GetDataDir(false)))
|
||||
{
|
||||
|
38
src/rest.cpp
38
src/rest.cpp
@ -487,26 +487,28 @@ static bool rest_getutxos(HTTPRequest* req, const std::string& strURIPart)
|
||||
std::vector<bool> hits;
|
||||
bitmap.resize((vOutPoints.size() + 7) / 8);
|
||||
{
|
||||
LOCK2(cs_main, mempool.cs);
|
||||
|
||||
CCoinsView viewDummy;
|
||||
CCoinsViewCache view(&viewDummy);
|
||||
|
||||
CCoinsViewCache& viewChain = *pcoinsTip;
|
||||
CCoinsViewMemPool viewMempool(&viewChain, mempool);
|
||||
|
||||
if (fCheckMemPool)
|
||||
view.SetBackend(viewMempool); // switch cache backend to db+mempool in case user likes to query mempool
|
||||
|
||||
for (size_t i = 0; i < vOutPoints.size(); i++) {
|
||||
bool hit = false;
|
||||
Coin coin;
|
||||
if (view.GetCoin(vOutPoints[i], coin) && !mempool.isSpent(vOutPoints[i])) {
|
||||
hit = true;
|
||||
outs.emplace_back(std::move(coin));
|
||||
auto process_utxos = [&vOutPoints, &outs, &hits](const CCoinsView& view, const CTxMemPool& mempool) {
|
||||
for (const COutPoint& vOutPoint : vOutPoints) {
|
||||
Coin coin;
|
||||
bool hit = !mempool.isSpent(vOutPoint) && view.GetCoin(vOutPoint, coin);
|
||||
hits.push_back(hit);
|
||||
if (hit) outs.emplace_back(std::move(coin));
|
||||
}
|
||||
};
|
||||
|
||||
hits.push_back(hit);
|
||||
if (fCheckMemPool) {
|
||||
// use db+mempool as cache backend in case user likes to query mempool
|
||||
LOCK2(cs_main, mempool.cs);
|
||||
CCoinsViewCache& viewChain = *pcoinsTip;
|
||||
CCoinsViewMemPool viewMempool(&viewChain, mempool);
|
||||
process_utxos(viewMempool, mempool);
|
||||
} else {
|
||||
LOCK(cs_main); // no need to lock mempool!
|
||||
process_utxos(*pcoinsTip, CTxMemPool());
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < hits.size(); ++i) {
|
||||
const bool hit = hits[i];
|
||||
bitmapStringRepresentation.append(hit ? "1" : "0"); // form a binary string representation (human-readable for json output)
|
||||
bitmap[i / 8] |= ((uint8_t)hit) << (i % 8);
|
||||
}
|
||||
|
@ -6,7 +6,6 @@
|
||||
#include <script/sign.h>
|
||||
|
||||
#include <key.h>
|
||||
#include <keystore.h>
|
||||
#include <policy/policy.h>
|
||||
#include <primitives/transaction.h>
|
||||
#include <script/standard.h>
|
||||
@ -15,12 +14,12 @@
|
||||
|
||||
typedef std::vector<unsigned char> valtype;
|
||||
|
||||
TransactionSignatureCreator::TransactionSignatureCreator(const CKeyStore* keystoreIn, const CTransaction* txToIn, unsigned int nInIn, const CAmount& amountIn, int nHashTypeIn) : BaseSignatureCreator(keystoreIn), txTo(txToIn), nIn(nInIn), nHashType(nHashTypeIn), amount(amountIn), checker(txTo, nIn, amountIn) {}
|
||||
TransactionSignatureCreator::TransactionSignatureCreator(const SigningProvider* provider, const CTransaction* txToIn, unsigned int nInIn, const CAmount& amountIn, int nHashTypeIn) : BaseSignatureCreator(provider), txTo(txToIn), nIn(nInIn), nHashType(nHashTypeIn), amount(amountIn), checker(txTo, nIn, amountIn) {}
|
||||
|
||||
bool TransactionSignatureCreator::CreateSig(std::vector<unsigned char>& vchSig, const CKeyID& address, const CScript& scriptCode, SigVersion sigversion) const
|
||||
{
|
||||
CKey key;
|
||||
if (!keystore->GetKey(address, key))
|
||||
if (!m_provider->GetKey(address, key))
|
||||
return false;
|
||||
|
||||
uint256 hash = SignatureHash(scriptCode, *txTo, nIn, nHashType, amount, sigversion);
|
||||
@ -86,12 +85,12 @@ static bool SignStep(const BaseSignatureCreator& creator, const CScript& scriptP
|
||||
else
|
||||
{
|
||||
CPubKey vch;
|
||||
creator.KeyStore().GetPubKey(keyID, vch);
|
||||
creator.Provider().GetPubKey(keyID, vch);
|
||||
ret.push_back(ToByteVector(vch));
|
||||
}
|
||||
return true;
|
||||
case TX_SCRIPTHASH:
|
||||
if (creator.KeyStore().GetCScript(uint160(vSolutions[0]), scriptRet)) {
|
||||
if (creator.Provider().GetCScript(uint160(vSolutions[0]), scriptRet)) {
|
||||
ret.push_back(std::vector<unsigned char>(scriptRet.begin(), scriptRet.end()));
|
||||
return true;
|
||||
}
|
||||
@ -164,12 +163,12 @@ void UpdateTransaction(CMutableTransaction& tx, unsigned int nIn, const Signatur
|
||||
tx.vin[nIn].scriptSig = data.scriptSig;
|
||||
}
|
||||
|
||||
bool SignSignature(const CKeyStore &keystore, const CScript& fromPubKey, CMutableTransaction& txTo, unsigned int nIn, const CAmount& amount, int nHashType)
|
||||
bool SignSignature(const SigningProvider &provider, const CScript& fromPubKey, CMutableTransaction& txTo, unsigned int nIn, const CAmount& amount, int nHashType)
|
||||
{
|
||||
assert(nIn < txTo.vin.size());
|
||||
|
||||
CTransaction txToConst(txTo);
|
||||
TransactionSignatureCreator creator(&keystore, &txToConst, nIn, amount, nHashType);
|
||||
TransactionSignatureCreator creator(&provider, &txToConst, nIn, amount, nHashType);
|
||||
|
||||
SignatureData sigdata;
|
||||
bool ret = ProduceSignature(creator, fromPubKey, sigdata);
|
||||
@ -177,14 +176,14 @@ bool SignSignature(const CKeyStore &keystore, const CScript& fromPubKey, CMutabl
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool SignSignature(const CKeyStore &keystore, const CTransaction& txFrom, CMutableTransaction& txTo, unsigned int nIn, int nHashType)
|
||||
bool SignSignature(const SigningProvider &provider, const CTransaction& txFrom, CMutableTransaction& txTo, unsigned int nIn, int nHashType)
|
||||
{
|
||||
assert(nIn < txTo.vin.size());
|
||||
CTxIn& txin = txTo.vin[nIn];
|
||||
assert(txin.prevout.n < txFrom.vout.size());
|
||||
const CTxOut& txout = txFrom.vout[txin.prevout.n];
|
||||
|
||||
return SignSignature(keystore, txout.scriptPubKey, txTo, nIn, txout.nValue, nHashType);
|
||||
return SignSignature(provider, txout.scriptPubKey, txTo, nIn, txout.nValue, nHashType);
|
||||
}
|
||||
|
||||
static std::vector<valtype> CombineMultisig(const CScript& scriptPubKey, const BaseSignatureChecker& checker,
|
||||
|
@ -8,21 +8,32 @@
|
||||
|
||||
#include <script/interpreter.h>
|
||||
|
||||
class CKey;
|
||||
class CKeyID;
|
||||
class CKeyStore;
|
||||
class CScript;
|
||||
class CScriptID;
|
||||
class CTransaction;
|
||||
|
||||
struct CMutableTransaction;
|
||||
|
||||
/** An interface to be implemented by keystores that support signing. */
|
||||
class SigningProvider
|
||||
{
|
||||
public:
|
||||
virtual ~SigningProvider() {}
|
||||
virtual bool GetCScript(const CScriptID &scriptid, CScript& script) const =0;
|
||||
virtual bool GetPubKey(const CKeyID &address, CPubKey& pubkey) const =0;
|
||||
virtual bool GetKey(const CKeyID &address, CKey& key) const =0;
|
||||
};
|
||||
|
||||
/** Virtual base class for signature creators. */
|
||||
class BaseSignatureCreator {
|
||||
protected:
|
||||
const CKeyStore* keystore;
|
||||
const SigningProvider* m_provider;
|
||||
|
||||
public:
|
||||
explicit BaseSignatureCreator(const CKeyStore* keystoreIn) : keystore(keystoreIn) {}
|
||||
const CKeyStore& KeyStore() const { return *keystore; };
|
||||
explicit BaseSignatureCreator(const SigningProvider* provider) : m_provider(provider) {}
|
||||
const SigningProvider& Provider() const { return *m_provider; }
|
||||
virtual ~BaseSignatureCreator() {}
|
||||
virtual const BaseSignatureChecker& Checker() const =0;
|
||||
|
||||
@ -39,7 +50,7 @@ class TransactionSignatureCreator : public BaseSignatureCreator {
|
||||
const TransactionSignatureChecker checker;
|
||||
|
||||
public:
|
||||
TransactionSignatureCreator(const CKeyStore* keystoreIn, const CTransaction* txToIn, unsigned int nInIn, const CAmount& amountIn, int nHashTypeIn=SIGHASH_ALL);
|
||||
TransactionSignatureCreator(const SigningProvider* provider, const CTransaction* txToIn, unsigned int nInIn, const CAmount& amountIn, int nHashTypeIn=SIGHASH_ALL);
|
||||
const BaseSignatureChecker& Checker() const override{ return checker; }
|
||||
bool CreateSig(std::vector<unsigned char>& vchSig, const CKeyID& keyid, const CScript& scriptCode, SigVersion sigversion) const override;
|
||||
};
|
||||
@ -48,13 +59,13 @@ class MutableTransactionSignatureCreator : public TransactionSignatureCreator {
|
||||
CTransaction tx;
|
||||
|
||||
public:
|
||||
MutableTransactionSignatureCreator(const CKeyStore* keystoreIn, const CMutableTransaction* txToIn, unsigned int nInIn, const CAmount& amount, int nHashTypeIn) : TransactionSignatureCreator(keystoreIn, &tx, nInIn, amount, nHashTypeIn), tx(*txToIn) {}
|
||||
MutableTransactionSignatureCreator(const SigningProvider* provider, const CMutableTransaction* txToIn, unsigned int nInIn, const CAmount& amount, int nHashTypeIn) : TransactionSignatureCreator(provider, &tx, nInIn, amount, nHashTypeIn), tx(*txToIn) {}
|
||||
};
|
||||
|
||||
/** A signature creator that just produces 72-byte empty signatures. */
|
||||
class DummySignatureCreator : public BaseSignatureCreator {
|
||||
public:
|
||||
explicit DummySignatureCreator(const CKeyStore* keystoreIn) : BaseSignatureCreator(keystoreIn) {}
|
||||
explicit DummySignatureCreator(const SigningProvider* provider) : BaseSignatureCreator(provider) {}
|
||||
const BaseSignatureChecker& Checker() const override;
|
||||
bool CreateSig(std::vector<unsigned char>& vchSig, const CKeyID& keyid, const CScript& scriptCode, SigVersion sigversion) const override;
|
||||
};
|
||||
@ -70,8 +81,8 @@ struct SignatureData {
|
||||
bool ProduceSignature(const BaseSignatureCreator& creator, const CScript& scriptPubKey, SignatureData& sigdata);
|
||||
|
||||
/** Produce a script signature for a transaction. */
|
||||
bool SignSignature(const CKeyStore &keystore, const CScript& fromPubKey, CMutableTransaction& txTo, unsigned int nIn, const CAmount& amount, int nHashType);
|
||||
bool SignSignature(const CKeyStore& keystore, const CTransaction& txFrom, CMutableTransaction& txTo, unsigned int nIn, int nHashType);
|
||||
bool SignSignature(const SigningProvider &provider, const CScript& fromPubKey, CMutableTransaction& txTo, unsigned int nIn, const CAmount& amount, int nHashType);
|
||||
bool SignSignature(const SigningProvider &provider, const CTransaction& txFrom, CMutableTransaction& txTo, unsigned int nIn, int nHashType);
|
||||
|
||||
/** Combine two script signatures using a generic signature checker, intelligently, possibly with OP_0 placeholders. */
|
||||
SignatureData CombineSignatures(const CScript& scriptPubKey, const BaseSignatureChecker& checker, const SignatureData& scriptSig1, const SignatureData& scriptSig2);
|
||||
|
@ -151,7 +151,7 @@ size_t CCoinsViewDB::EstimateSize() const
|
||||
return db.EstimateSize(DB_COIN, (char)(DB_COIN+1));
|
||||
}
|
||||
|
||||
CBlockTreeDB::CBlockTreeDB(size_t nCacheSize, bool fMemory, bool fWipe) : CDBWrapper(GetDataDir() / "blocks" / "index", nCacheSize, fMemory, fWipe), mapHasTxIndexCache(10000, 20000) {
|
||||
CBlockTreeDB::CBlockTreeDB(size_t nCacheSize, bool fMemory, bool fWipe) : CDBWrapper(gArgs.IsArgSet("-blocksdir") ? GetDataDir() / "blocks" / "index" : GetBlocksDir() / "index", nCacheSize, fMemory, fWipe), mapHasTxIndexCache(10000, 20000) {
|
||||
}
|
||||
|
||||
bool CBlockTreeDB::ReadBlockFileInfo(int nFile, CBlockFileInfo &info) {
|
||||
|
@ -340,7 +340,7 @@ CTxMemPool::CTxMemPool(CBlockPolicyEstimator* estimator) :
|
||||
nCheckFrequency = 0;
|
||||
}
|
||||
|
||||
bool CTxMemPool::isSpent(const COutPoint& outpoint)
|
||||
bool CTxMemPool::isSpent(const COutPoint& outpoint) const
|
||||
{
|
||||
LOCK(cs);
|
||||
return mapNextTx.count(outpoint);
|
||||
|
@ -589,7 +589,7 @@ public:
|
||||
void _clear() EXCLUSIVE_LOCKS_REQUIRED(cs); //lock free
|
||||
bool CompareDepthAndScore(const uint256& hasha, const uint256& hashb);
|
||||
void queryHashes(std::vector<uint256>& vtxid);
|
||||
bool isSpent(const COutPoint& outpoint);
|
||||
bool isSpent(const COutPoint& outpoint) const;
|
||||
unsigned int GetTransactionsUpdated() const;
|
||||
void AddTransactionsUpdated(unsigned int n);
|
||||
/**
|
||||
|
33
src/util.cpp
33
src/util.cpp
@ -609,10 +609,41 @@ fs::path GetDefaultDataDir()
|
||||
#endif
|
||||
}
|
||||
|
||||
static fs::path g_blocks_path_cached;
|
||||
static fs::path g_blocks_path_cache_net_specific;
|
||||
static fs::path pathCached;
|
||||
static fs::path pathCachedNetSpecific;
|
||||
static CCriticalSection csPathCached;
|
||||
|
||||
const fs::path &GetBlocksDir(bool fNetSpecific)
|
||||
{
|
||||
|
||||
LOCK(csPathCached);
|
||||
|
||||
fs::path &path = fNetSpecific ? g_blocks_path_cache_net_specific : g_blocks_path_cached;
|
||||
|
||||
// This can be called during exceptions by LogPrintf(), so we cache the
|
||||
// value so we don't have to do memory allocations after that.
|
||||
if (!path.empty())
|
||||
return path;
|
||||
|
||||
if (gArgs.IsArgSet("-blocksdir")) {
|
||||
path = fs::system_complete(gArgs.GetArg("-blocksdir", ""));
|
||||
if (!fs::is_directory(path)) {
|
||||
path = "";
|
||||
return path;
|
||||
}
|
||||
} else {
|
||||
path = GetDataDir(false);
|
||||
}
|
||||
if (fNetSpecific)
|
||||
path /= BaseParams().DataDir();
|
||||
|
||||
path /= "blocks";
|
||||
fs::create_directories(path);
|
||||
return path;
|
||||
}
|
||||
|
||||
const fs::path &GetDataDir(bool fNetSpecific)
|
||||
{
|
||||
|
||||
@ -659,6 +690,8 @@ void ClearDatadirCache()
|
||||
|
||||
pathCached = fs::path();
|
||||
pathCachedNetSpecific = fs::path();
|
||||
g_blocks_path_cached = fs::path();
|
||||
g_blocks_path_cache_net_specific = fs::path();
|
||||
}
|
||||
|
||||
fs::path GetConfigFile(const std::string& confPath)
|
||||
|
@ -105,6 +105,7 @@ void ReleaseDirectoryLocks();
|
||||
|
||||
bool TryCreateDirectories(const fs::path& p);
|
||||
fs::path GetDefaultDataDir();
|
||||
const fs::path &GetBlocksDir(bool fNetSpecific = true);
|
||||
const fs::path &GetDataDir(bool fNetSpecific = true);
|
||||
fs::path GetBackupsDir();
|
||||
void ClearDatadirCache();
|
||||
|
@ -2474,7 +2474,7 @@ bool static FlushStateToDisk(const CChainParams& chainparams, CValidationState &
|
||||
// Write blocks and block index to disk.
|
||||
if (fDoFullFlush || fPeriodicWrite) {
|
||||
// Depend on nMinDiskSpace to ensure we can write block index
|
||||
if (!CheckDiskSpace(0))
|
||||
if (!CheckDiskSpace(0, true))
|
||||
return state.Error("out of disk space");
|
||||
// First make sure all block and undo data is flushed to disk.
|
||||
FlushBlockFile();
|
||||
@ -3373,7 +3373,7 @@ static bool FindBlockPos(CDiskBlockPos &pos, unsigned int nAddSize, unsigned int
|
||||
if (nNewChunks > nOldChunks) {
|
||||
if (fPruneMode)
|
||||
fCheckForPruning = true;
|
||||
if (CheckDiskSpace(nNewChunks * BLOCKFILE_CHUNK_SIZE - pos.nPos)) {
|
||||
if (CheckDiskSpace(nNewChunks * BLOCKFILE_CHUNK_SIZE - pos.nPos, true)) {
|
||||
FILE *file = OpenBlockFile(pos);
|
||||
if (file) {
|
||||
LogPrintf("Pre-allocating up to position 0x%x in blk%05u.dat\n", nNewChunks * BLOCKFILE_CHUNK_SIZE, pos.nFile);
|
||||
@ -3406,7 +3406,7 @@ static bool FindUndoPos(CValidationState &state, int nFile, CDiskBlockPos &pos,
|
||||
if (nNewChunks > nOldChunks) {
|
||||
if (fPruneMode)
|
||||
fCheckForPruning = true;
|
||||
if (CheckDiskSpace(nNewChunks * UNDOFILE_CHUNK_SIZE - pos.nPos)) {
|
||||
if (CheckDiskSpace(nNewChunks * UNDOFILE_CHUNK_SIZE - pos.nPos, true)) {
|
||||
FILE *file = OpenUndoFile(pos);
|
||||
if (file) {
|
||||
LogPrintf("Pre-allocating up to position 0x%x in rev%05u.dat\n", nNewChunks * UNDOFILE_CHUNK_SIZE, pos.nFile);
|
||||
@ -4056,9 +4056,9 @@ static void FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPruneAfte
|
||||
nLastBlockWeCanPrune, count);
|
||||
}
|
||||
|
||||
bool CheckDiskSpace(uint64_t nAdditionalBytes)
|
||||
bool CheckDiskSpace(uint64_t nAdditionalBytes, bool blocks_dir)
|
||||
{
|
||||
uint64_t nFreeBytesAvailable = fs::space(GetDataDir()).available;
|
||||
uint64_t nFreeBytesAvailable = fs::space(blocks_dir ? GetBlocksDir() : GetDataDir()).available;
|
||||
|
||||
// Check for nMinDiskSpace bytes (currently 50MB)
|
||||
if (nFreeBytesAvailable < nMinDiskSpace + nAdditionalBytes)
|
||||
@ -4101,7 +4101,7 @@ static FILE* OpenUndoFile(const CDiskBlockPos &pos, bool fReadOnly) {
|
||||
|
||||
fs::path GetBlockPosFilename(const CDiskBlockPos &pos, const char *prefix)
|
||||
{
|
||||
return GetDataDir() / "blocks" / strprintf("%s%05u.dat", prefix, pos.nFile);
|
||||
return GetBlocksDir() / strprintf("%s%05u.dat", prefix, pos.nFile);
|
||||
}
|
||||
|
||||
CBlockIndex * CChainState::InsertBlockIndex(const uint256& hash)
|
||||
|
@ -258,7 +258,7 @@ bool ProcessNewBlock(const CChainParams& chainparams, const std::shared_ptr<cons
|
||||
bool ProcessNewBlockHeaders(const std::vector<CBlockHeader>& block, CValidationState& state, const CChainParams& chainparams, const CBlockIndex** ppindex = nullptr, CBlockHeader* first_invalid = nullptr) LOCKS_EXCLUDED(cs_main);
|
||||
|
||||
/** Check whether enough disk space is available for an incoming block */
|
||||
bool CheckDiskSpace(uint64_t nAdditionalBytes = 0);
|
||||
bool CheckDiskSpace(uint64_t nAdditionalBytes = 0, bool blocks_dir = false);
|
||||
/** Open a block file (blk?????.dat) */
|
||||
FILE* OpenBlockFile(const CDiskBlockPos &pos, bool fReadOnly = false);
|
||||
/** Translation to a filesystem path */
|
||||
|
@ -5188,7 +5188,7 @@ CWallet* CWallet::CreateWalletFromFile(const std::string& name, const fs::path&
|
||||
|
||||
// Top up the keypool
|
||||
if (!walletInstance->TopUpKeyPool()) {
|
||||
return error(_("Unable to generate initial keys") += "\n");
|
||||
return error(_("Unable to generate initial keys"));
|
||||
}
|
||||
|
||||
walletInstance->SetBestChain(chainActive.GetLocator());
|
||||
|
37
test/functional/feature_blocksdir.py
Executable file
37
test/functional/feature_blocksdir.py
Executable file
@ -0,0 +1,37 @@
|
||||
#!/usr/bin/env python3
|
||||
# Copyright (c) 2018 The Bitcoin Core developers
|
||||
# Distributed under the MIT software license, see the accompanying
|
||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
"""Test the blocksdir option.
|
||||
"""
|
||||
|
||||
import os
|
||||
import shutil
|
||||
|
||||
from test_framework.test_framework import BitcoinTestFramework, initialize_datadir
|
||||
|
||||
|
||||
class BlocksdirTest(BitcoinTestFramework):
|
||||
def set_test_params(self):
|
||||
self.setup_clean_chain = True
|
||||
self.num_nodes = 1
|
||||
|
||||
def run_test(self):
|
||||
self.stop_node(0)
|
||||
shutil.rmtree(self.nodes[0].datadir)
|
||||
initialize_datadir(self.options.tmpdir, 0)
|
||||
self.log.info("Starting with non exiting blocksdir ...")
|
||||
blocksdir_path = os.path.join(self.options.tmpdir, 'blocksdir')
|
||||
self.assert_start_raises_init_error(0, ["-blocksdir=" + blocksdir_path], 'Error: Specified blocks directory "' +
|
||||
blocksdir_path + '" does not exist.')
|
||||
os.mkdir(blocksdir_path)
|
||||
self.log.info("Starting with exiting blocksdir ...")
|
||||
self.start_node(0, ["-blocksdir=" + blocksdir_path])
|
||||
self.log.info("mining blocks..")
|
||||
self.nodes[0].generate(10)
|
||||
assert os.path.isfile(os.path.join(blocksdir_path, "regtest", "blocks", "blk00000.dat"))
|
||||
assert os.path.isdir(os.path.join(self.nodes[0].datadir, "regtest", "blocks", "index"))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
BlocksdirTest().main()
|
@ -84,7 +84,7 @@ class RESTTest (BitcoinTestFramework):
|
||||
#######################################
|
||||
# GETUTXOS: query an unspent outpoint #
|
||||
#######################################
|
||||
json_request = '/checkmempool/'+txid+'-'+str(n)
|
||||
json_request = '/'+txid+'-'+str(n)
|
||||
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
|
||||
json_obj = json.loads(json_string)
|
||||
|
||||
@ -99,7 +99,7 @@ class RESTTest (BitcoinTestFramework):
|
||||
#################################################
|
||||
# GETUTXOS: now query an already spent outpoint #
|
||||
#################################################
|
||||
json_request = '/checkmempool/'+vintx+'-0'
|
||||
json_request = '/'+vintx+'-0'
|
||||
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
|
||||
json_obj = json.loads(json_string)
|
||||
|
||||
@ -116,7 +116,7 @@ class RESTTest (BitcoinTestFramework):
|
||||
##################################################
|
||||
# GETUTXOS: now check both with the same request #
|
||||
##################################################
|
||||
json_request = '/checkmempool/'+txid+'-'+str(n)+'/'+vintx+'-0'
|
||||
json_request = '/'+txid+'-'+str(n)+'/'+vintx+'-0'
|
||||
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
|
||||
json_obj = json.loads(json_string)
|
||||
assert_equal(len(json_obj['utxos']), 1)
|
||||
@ -150,23 +150,48 @@ class RESTTest (BitcoinTestFramework):
|
||||
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
|
||||
json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+txid+self.FORMAT_SEPARATOR+"json")
|
||||
json_obj = json.loads(json_string)
|
||||
vintx = json_obj['vin'][0]['txid'] # get the vin to later check for utxo (should be spent by then)
|
||||
# get the spent output to later check for utxo (should be spent by then)
|
||||
spent = '{}-{}'.format(json_obj['vin'][0]['txid'], json_obj['vin'][0]['vout'])
|
||||
# get n of 0.1 outpoint
|
||||
n = 0
|
||||
for vout in json_obj['vout']:
|
||||
if vout['value'] == 0.1:
|
||||
n = vout['n']
|
||||
spending = '{}-{}'.format(txid, n)
|
||||
|
||||
json_request = '/'+txid+'-'+str(n)
|
||||
json_request = '/'+spending
|
||||
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
|
||||
json_obj = json.loads(json_string)
|
||||
assert_equal(len(json_obj['utxos']), 0) #there should be an outpoint because it has just added to the mempool
|
||||
assert_equal(len(json_obj['utxos']), 0) #there should be no outpoint because it has just added to the mempool
|
||||
|
||||
json_request = '/checkmempool/'+txid+'-'+str(n)
|
||||
json_request = '/checkmempool/'+spending
|
||||
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
|
||||
json_obj = json.loads(json_string)
|
||||
assert_equal(len(json_obj['utxos']), 1) #there should be an outpoint because it has just added to the mempool
|
||||
|
||||
json_request = '/'+spent
|
||||
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
|
||||
json_obj = json.loads(json_string)
|
||||
assert_equal(len(json_obj['utxos']), 1) #there should be an outpoint because its spending tx is not confirmed
|
||||
|
||||
json_request = '/checkmempool/'+spent
|
||||
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
|
||||
json_obj = json.loads(json_string)
|
||||
assert_equal(len(json_obj['utxos']), 0) #there should be no outpoint because it has just spent (by mempool tx)
|
||||
|
||||
self.nodes[0].generate(1)
|
||||
self.sync_all()
|
||||
|
||||
json_request = '/'+spending
|
||||
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
|
||||
json_obj = json.loads(json_string)
|
||||
assert_equal(len(json_obj['utxos']), 1) #there should be an outpoint because it was mined
|
||||
|
||||
json_request = '/checkmempool/'+spending
|
||||
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
|
||||
json_obj = json.loads(json_string)
|
||||
assert_equal(len(json_obj['utxos']), 1) #there should be an outpoint because it was mined
|
||||
|
||||
#do some invalid requests
|
||||
json_request = '{"checkmempool'
|
||||
response = http_post_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'json', json_request, True)
|
||||
|
@ -157,6 +157,7 @@ BASE_SCRIPTS= [
|
||||
'p2p_unrequested_blocks.py',
|
||||
'feature_logging.py',
|
||||
'p2p_node_network_limited.py',
|
||||
'feature_blocksdir.py',
|
||||
'feature_config_args.py',
|
||||
'feature_help.py',
|
||||
# Don't append tests at the end to avoid merge conflicts
|
||||
@ -411,7 +412,7 @@ def run_tests(*, test_list, src_dir, build_dir, tmpdir, jobs=1, enable_coverage=
|
||||
def print_results(test_results, max_len_name, runtime):
|
||||
results = "\n" + BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "STATUS ", "DURATION") + BOLD[0]
|
||||
|
||||
test_results.sort(key=lambda result: result.name.lower())
|
||||
test_results.sort(key=TestResult.sort_key)
|
||||
all_passed = True
|
||||
time_sum = 0
|
||||
|
||||
@ -422,7 +423,11 @@ def print_results(test_results, max_len_name, runtime):
|
||||
results += str(test_result)
|
||||
|
||||
status = TICK + "Passed" if all_passed else CROSS + "Failed"
|
||||
if not all_passed:
|
||||
results += RED[1]
|
||||
results += BOLD[1] + "\n%s | %s | %s s (accumulated) \n" % ("ALL".ljust(max_len_name), status.ljust(9), time_sum) + BOLD[0]
|
||||
if not all_passed:
|
||||
results += RED[0]
|
||||
results += "Runtime: %s s\n" % (runtime)
|
||||
print(results)
|
||||
|
||||
@ -511,6 +516,14 @@ class TestResult():
|
||||
self.time = time
|
||||
self.padding = 0
|
||||
|
||||
def sort_key(self):
|
||||
if self.status == "Passed":
|
||||
return 0, self.name.lower()
|
||||
elif self.status == "Failed":
|
||||
return 2, self.name.lower()
|
||||
elif self.status == "Skipped":
|
||||
return 1, self.name.lower()
|
||||
|
||||
def __repr__(self):
|
||||
if self.status == "Passed":
|
||||
color = BLUE
|
||||
|
Loading…
Reference in New Issue
Block a user