mirror of
https://github.com/dashpay/dash.git
synced 2024-12-25 12:02:48 +01:00
Merge pull request #4679 from Munkybooty/backports-0.19-pr12
backport: 0.19 pr12
This commit is contained in:
commit
bb4be52b48
@ -5,6 +5,10 @@ The package "mylib" will be used here as an example
|
||||
|
||||
General tips:
|
||||
- mylib_foo is written as $(package)_foo in order to make recipes more similar.
|
||||
- Secondary dependency packages relative to the bitcoin binaries/libraries (i.e.
|
||||
those not in `ALLOWED_LIBRARIES` in `contrib/devtools/symbol-check.py`) don't
|
||||
need to be shared and should be built statically whenever possible. See
|
||||
[below](#secondary-dependencies) for more details.
|
||||
|
||||
## Identifiers
|
||||
Each package is required to define at least these variables:
|
||||
@ -146,3 +150,34 @@ $($(package)_config_opts) will be appended.
|
||||
Most autotools projects can be properly staged using:
|
||||
|
||||
$(MAKE) DESTDIR=$($(package)_staging_dir) install
|
||||
|
||||
## Build outputs:
|
||||
|
||||
In general, the output of a depends package should not contain any libtool
|
||||
archives. Instead, the package should output `.pc` (`pkg-config`) files where
|
||||
possible.
|
||||
|
||||
From the [Gentoo Wiki entry](https://wiki.gentoo.org/wiki/Project:Quality_Assurance/Handling_Libtool_Archives):
|
||||
|
||||
> Libtool pulls in all direct and indirect dependencies into the .la files it
|
||||
> creates. This leads to massive overlinking, which is toxic to the Gentoo
|
||||
> ecosystem, as it leads to a massive number of unnecessary rebuilds.
|
||||
|
||||
## Secondary dependencies:
|
||||
|
||||
Secondary dependency packages relative to the bitcoin binaries/libraries (i.e.
|
||||
those not in `ALLOWED_LIBRARIES` in `contrib/devtools/symbol-check.py`) don't
|
||||
need to be shared and should be built statically whenever possible. This
|
||||
improves general build reliability as illustrated by the following example:
|
||||
|
||||
When linking an executable against a shared library `libprimary` that has its
|
||||
own shared dependency `libsecondary`, we may need to specify the path to
|
||||
`libsecondary` on the link command using the `-rpath/-rpath-link` options, it is
|
||||
not sufficient to just say `libprimary`.
|
||||
|
||||
For us, it's much easier to just link a static `libsecondary` into a shared
|
||||
`libprimary`. Especially because in our case, we are linking against a dummy
|
||||
`libprimary` anyway that we'll throw away. We don't care if the end-user has a
|
||||
static or dynamic `libseconday`, that's not our concern. With a static
|
||||
`libseconday`, when we need to link `libprimary` into our executable, there's no
|
||||
dependency chain to worry about as `libprimary` has all the symbols.
|
||||
|
@ -20,3 +20,7 @@ endef
|
||||
define $(package)_stage_cmds
|
||||
$(MAKE) DESTDIR=$($(package)_staging_dir) install
|
||||
endef
|
||||
|
||||
define $(package)_postprocess_cmds
|
||||
rm lib/*.la
|
||||
endef
|
||||
|
@ -6,7 +6,7 @@ $(package)_sha256_hash=6049ddd5f3f3e2618f615f1faeda0a115104423a7996b7aa73e2f36e3
|
||||
$(package)_dependencies=expat
|
||||
|
||||
define $(package)_set_vars
|
||||
$(package)_config_opts=--disable-tests --disable-doxygen-docs --disable-xml-docs --disable-static --without-x
|
||||
$(package)_config_opts=--disable-tests --disable-doxygen-docs --disable-xml-docs --disable-shared --without-x
|
||||
endef
|
||||
|
||||
define $(package)_config_cmds
|
||||
@ -21,3 +21,7 @@ define $(package)_stage_cmds
|
||||
$(MAKE) -C dbus DESTDIR=$($(package)_staging_dir) install-libLTLIBRARIES install-dbusincludeHEADERS install-nodist_dbusarchincludeHEADERS && \
|
||||
$(MAKE) DESTDIR=$($(package)_staging_dir) install-pkgconfigDATA
|
||||
endef
|
||||
|
||||
define $(package)_postprocess_cmds
|
||||
rm lib/*.la
|
||||
endef
|
||||
|
@ -5,7 +5,8 @@ $(package)_file_name=$(package)-$($(package)_version).tar.bz2
|
||||
$(package)_sha256_hash=cbc9102f4a31a8dafd42d642e9a3aa31e79a0aedaa1f6efd2795ebc83174ec18
|
||||
|
||||
define $(package)_set_vars
|
||||
$(package)_config_opts=--disable-static --without-docbook --without-tests --without-examples
|
||||
$(package)_config_opts=--disable-shared --without-docbook --without-tests --without-examples
|
||||
$(package)_config_opts_linux=--with-pic
|
||||
endef
|
||||
|
||||
define $(package)_config_cmds
|
||||
@ -19,3 +20,7 @@ endef
|
||||
define $(package)_stage_cmds
|
||||
$(MAKE) DESTDIR=$($(package)_staging_dir) install
|
||||
endef
|
||||
|
||||
define $(package)_postprocess_cmds
|
||||
rm lib/*.la
|
||||
endef
|
||||
|
@ -26,3 +26,7 @@ endef
|
||||
define $(package)_stage_cmds
|
||||
$(MAKE) DESTDIR=$($(package)_staging_dir) install
|
||||
endef
|
||||
|
||||
define $(package)_postprocess_cmds
|
||||
rm lib/*.la
|
||||
endef
|
||||
|
@ -20,3 +20,7 @@ endef
|
||||
define $(package)_stage_cmds
|
||||
$(MAKE) DESTDIR=$($(package)_staging_dir) install
|
||||
endef
|
||||
|
||||
define $(package)_postprocess_cmds
|
||||
rm lib/*.la
|
||||
endef
|
||||
|
@ -22,3 +22,6 @@ define $(package)_stage_cmds
|
||||
$(MAKE) DESTDIR=$($(package)_staging_dir) install
|
||||
endef
|
||||
|
||||
define $(package)_postprocess_cmds
|
||||
rm lib/*.la
|
||||
endef
|
||||
|
@ -27,3 +27,7 @@ endef
|
||||
define $(package)_stage_cmds
|
||||
$(MAKE) DESTDIR=$($(package)_staging_dir) install
|
||||
endef
|
||||
|
||||
define $(package)_postprocess_cmds
|
||||
rm lib/*.la
|
||||
endef
|
||||
|
@ -35,4 +35,5 @@ define $(package)_stage_cmds
|
||||
endef
|
||||
|
||||
define $(package)_postprocess_cmds
|
||||
rm lib/*.la
|
||||
endef
|
||||
|
@ -44,5 +44,5 @@ define $(package)_stage_cmds
|
||||
endef
|
||||
|
||||
define $(package)_postprocess_cmds
|
||||
rm -rf share/man share/doc
|
||||
rm -rf share/man share/doc lib/*.la
|
||||
endef
|
||||
|
@ -26,3 +26,7 @@ endef
|
||||
define $(package)_stage_cmds
|
||||
$(MAKE) DESTDIR=$($(package)_staging_dir) install
|
||||
endef
|
||||
|
||||
define $(package)_postprocess_cmds
|
||||
rm lib/*.la
|
||||
endef
|
||||
|
@ -31,5 +31,5 @@ endef
|
||||
|
||||
define $(package)_postprocess_cmds
|
||||
sed -i.old "s/ -lstdc++//" lib/pkgconfig/libzmq.pc && \
|
||||
rm -rf bin share
|
||||
rm -rf bin share lib/*.la
|
||||
endef
|
||||
|
@ -1,73 +1,78 @@
|
||||
macOS Build Instructions and Notes
|
||||
====================================
|
||||
The commands in this guide should be executed in a Terminal application.
|
||||
The built-in one is located in `/Applications/Utilities/Terminal.app`.
|
||||
# macOS Build Instructions and Notes
|
||||
|
||||
Preparation
|
||||
-----------
|
||||
The commands in this guide should be executed in a Terminal application.
|
||||
The built-in one is located in
|
||||
```
|
||||
/Applications/Utilities/Terminal.app
|
||||
```
|
||||
|
||||
## Preparation
|
||||
Install the macOS command line tools:
|
||||
|
||||
`xcode-select --install`
|
||||
```shell
|
||||
xcode-select --install
|
||||
```
|
||||
|
||||
When the popup appears, click `Install`.
|
||||
|
||||
Then install [Homebrew](https://brew.sh).
|
||||
|
||||
Base build dependencies
|
||||
-----------------------
|
||||
## Base build dependencies
|
||||
|
||||
```bash
|
||||
```shell
|
||||
brew install automake libtool pkg-config libnatpmp
|
||||
```
|
||||
|
||||
If you want to build the disk image with `make deploy` (.dmg / optional), you need RSVG:
|
||||
```bash
|
||||
```shell
|
||||
brew install librsvg
|
||||
```
|
||||
|
||||
Building
|
||||
--------
|
||||
## Building
|
||||
|
||||
It's possible that your `PATH` environment variable contains some problematic strings, run
|
||||
```bash
|
||||
```shell
|
||||
export PATH=$(echo "$PATH" | sed -e '/\\/!s/ /\\ /g') # fix whitespaces
|
||||
```
|
||||
|
||||
Next, follow the instructions in [build-generic](build-generic.md)
|
||||
|
||||
Disable-wallet mode
|
||||
--------------------
|
||||
When the intention is to run only a P2P node without a wallet, Dash Core may be compiled in
|
||||
disable-wallet mode with:
|
||||
|
||||
./configure --disable-wallet
|
||||
## `disable-wallet` mode
|
||||
When the intention is to run only a P2P node without a wallet, Dash Core may be
|
||||
compiled in `disable-wallet` mode with:
|
||||
```shell
|
||||
./configure --disable-wallet
|
||||
```
|
||||
|
||||
In this case there is no dependency on Berkeley DB 4.8.
|
||||
|
||||
Mining is also possible in disable-wallet mode using the `getblocktemplate` RPC call.
|
||||
|
||||
Running
|
||||
-------
|
||||
## Running
|
||||
|
||||
Dash Core is now available at `./src/dashd`
|
||||
|
||||
Before running, you may create an empty configuration file:
|
||||
```shell
|
||||
mkdir -p "/Users/${USER}/Library/Application Support/DashCore"
|
||||
|
||||
mkdir -p "/Users/${USER}/Library/Application Support/DashCore"
|
||||
touch "/Users/${USER}/Library/Application Support/DashCore/dash.conf"
|
||||
|
||||
touch "/Users/${USER}/Library/Application Support/DashCore/dash.conf"
|
||||
chmod 600 "/Users/${USER}/Library/Application Support/DashCore/dash.conf"
|
||||
```
|
||||
|
||||
chmod 600 "/Users/${USER}/Library/Application Support/DashCore/dash.conf"
|
||||
|
||||
The first time you run dashd, it will start downloading the blockchain. This process could take many hours, or even days on slower than average systems.
|
||||
The first time you run dashd, it will start downloading the blockchain. This process could
|
||||
take many hours, or even days on slower than average systems.
|
||||
|
||||
You can monitor the download process by looking at the debug.log file:
|
||||
```shell
|
||||
tail -f $HOME/Library/Application\ Support/DashCore/debug.log
|
||||
```
|
||||
|
||||
tail -f $HOME/Library/Application\ Support/DashCore/debug.log
|
||||
## Other commands:
|
||||
|
||||
Other commands:
|
||||
-------
|
||||
|
||||
./src/dashd -daemon # Starts the dash daemon.
|
||||
./src/dash-cli --help # Outputs a list of command-line options.
|
||||
./src/dash-cli help # Outputs a list of RPC commands when the daemon is running.
|
||||
```shell
|
||||
./src/dashd -daemon # Starts the dash daemon.
|
||||
./src/dash-cli --help # Outputs a list of command-line options.
|
||||
./src/dash-cli help # Outputs a list of RPC commands when the daemon is running.
|
||||
```
|
||||
|
3
doc/release-notes-14802.md
Normal file
3
doc/release-notes-14802.md
Normal file
@ -0,0 +1,3 @@
|
||||
RPC changes
|
||||
-----------
|
||||
The `getblockstats` RPC is faster for fee calculation by using BlockUndo data. Also, `-txindex` is no longer required and `getblockstats` works for all non-pruned blocks.
|
@ -41,7 +41,7 @@ class LockImpl : public Chain::Lock, public UniqueLock<CCriticalSection>
|
||||
{
|
||||
Optional<int> getHeight() override
|
||||
{
|
||||
LockAnnotation lock(::cs_main);
|
||||
LockAssertion lock(::cs_main);
|
||||
int height = ::ChainActive().Height();
|
||||
if (height >= 0) {
|
||||
return height;
|
||||
@ -50,7 +50,7 @@ class LockImpl : public Chain::Lock, public UniqueLock<CCriticalSection>
|
||||
}
|
||||
Optional<int> getBlockHeight(const uint256& hash) override
|
||||
{
|
||||
LockAnnotation lock(::cs_main);
|
||||
LockAssertion lock(::cs_main);
|
||||
CBlockIndex* block = LookupBlockIndex(hash);
|
||||
if (block && ::ChainActive().Contains(block)) {
|
||||
return block->nHeight;
|
||||
@ -59,34 +59,34 @@ class LockImpl : public Chain::Lock, public UniqueLock<CCriticalSection>
|
||||
}
|
||||
uint256 getBlockHash(int height) override
|
||||
{
|
||||
LockAnnotation lock(::cs_main);
|
||||
LockAssertion lock(::cs_main);
|
||||
CBlockIndex* block = ::ChainActive()[height];
|
||||
assert(block != nullptr);
|
||||
return block->GetBlockHash();
|
||||
}
|
||||
int64_t getBlockTime(int height) override
|
||||
{
|
||||
LockAnnotation lock(::cs_main);
|
||||
LockAssertion lock(::cs_main);
|
||||
CBlockIndex* block = ::ChainActive()[height];
|
||||
assert(block != nullptr);
|
||||
return block->GetBlockTime();
|
||||
}
|
||||
int64_t getBlockMedianTimePast(int height) override
|
||||
{
|
||||
LockAnnotation lock(::cs_main);
|
||||
LockAssertion lock(::cs_main);
|
||||
CBlockIndex* block = ::ChainActive()[height];
|
||||
assert(block != nullptr);
|
||||
return block->GetMedianTimePast();
|
||||
}
|
||||
bool haveBlockOnDisk(int height) override
|
||||
{
|
||||
LockAnnotation lock(::cs_main);
|
||||
LockAssertion lock(::cs_main);
|
||||
CBlockIndex* block = ::ChainActive()[height];
|
||||
return block && ((block->nStatus & BLOCK_HAVE_DATA) != 0) && block->nTx > 0;
|
||||
}
|
||||
Optional<int> findFirstBlockWithTimeAndHeight(int64_t time, int height, uint256* hash) override
|
||||
{
|
||||
LockAnnotation lock(::cs_main);
|
||||
LockAssertion lock(::cs_main);
|
||||
CBlockIndex* block = ::ChainActive().FindEarliestAtLeast(time, height);
|
||||
if (block) {
|
||||
if (hash) *hash = block->GetBlockHash();
|
||||
@ -96,7 +96,7 @@ class LockImpl : public Chain::Lock, public UniqueLock<CCriticalSection>
|
||||
}
|
||||
Optional<int> findPruned(int start_height, Optional<int> stop_height) override
|
||||
{
|
||||
LockAnnotation lock(::cs_main);
|
||||
LockAssertion lock(::cs_main);
|
||||
if (::fPruneMode) {
|
||||
CBlockIndex* block = stop_height ? ::ChainActive()[*stop_height] : ::ChainActive().Tip();
|
||||
while (block && block->nHeight >= start_height) {
|
||||
@ -110,7 +110,7 @@ class LockImpl : public Chain::Lock, public UniqueLock<CCriticalSection>
|
||||
}
|
||||
Optional<int> findFork(const uint256& hash, Optional<int>* height) override
|
||||
{
|
||||
LockAnnotation lock(::cs_main);
|
||||
LockAssertion lock(::cs_main);
|
||||
const CBlockIndex* block = LookupBlockIndex(hash);
|
||||
const CBlockIndex* fork = block ? ::ChainActive().FindFork(block) : nullptr;
|
||||
if (height) {
|
||||
@ -127,12 +127,12 @@ class LockImpl : public Chain::Lock, public UniqueLock<CCriticalSection>
|
||||
}
|
||||
CBlockLocator getTipLocator() override
|
||||
{
|
||||
LockAnnotation lock(::cs_main);
|
||||
LockAssertion lock(::cs_main);
|
||||
return ::ChainActive().GetLocator();
|
||||
}
|
||||
Optional<int> findLocatorFork(const CBlockLocator& locator) override
|
||||
{
|
||||
LockAnnotation lock(::cs_main);
|
||||
LockAssertion lock(::cs_main);
|
||||
if (CBlockIndex* fork = FindForkInGlobalIndex(::ChainActive(), locator)) {
|
||||
return fork->nHeight;
|
||||
}
|
||||
@ -140,7 +140,7 @@ class LockImpl : public Chain::Lock, public UniqueLock<CCriticalSection>
|
||||
}
|
||||
bool checkFinalTx(const CTransaction& tx) override
|
||||
{
|
||||
LockAnnotation lock(::cs_main);
|
||||
LockAssertion lock(::cs_main);
|
||||
return CheckFinalTx(tx);
|
||||
}
|
||||
|
||||
|
@ -122,7 +122,7 @@ void TestGUI(interfaces::Node& node)
|
||||
}
|
||||
{
|
||||
auto locked_chain = wallet->chain().lock();
|
||||
LockAnnotation lock(::cs_main);
|
||||
LockAssertion lock(::cs_main);
|
||||
|
||||
WalletRescanReserver reserver(wallet.get());
|
||||
reserver.reserve();
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include <streams.h>
|
||||
#include <sync.h>
|
||||
#include <txmempool.h>
|
||||
#include <undo.h>
|
||||
#include <util/strencodings.h>
|
||||
#include <util/validation.h>
|
||||
#include <util/system.h>
|
||||
@ -950,6 +951,19 @@ static CBlock GetBlockChecked(const CBlockIndex* pblockindex)
|
||||
return block;
|
||||
}
|
||||
|
||||
static CBlockUndo GetUndoChecked(const CBlockIndex* pblockindex)
|
||||
{
|
||||
CBlockUndo blockUndo;
|
||||
if (IsBlockPruned(pblockindex)) {
|
||||
throw JSONRPCError(RPC_MISC_ERROR, "Undo data not available (pruned data)");
|
||||
}
|
||||
|
||||
if (!UndoReadFromDisk(blockUndo, pblockindex)) {
|
||||
throw JSONRPCError(RPC_MISC_ERROR, "Can't read undo data from disk");
|
||||
}
|
||||
|
||||
return blockUndo;
|
||||
}
|
||||
|
||||
static UniValue getmerkleblocks(const JSONRPCRequest& request)
|
||||
{
|
||||
@ -1966,8 +1980,7 @@ static UniValue getblockstats(const JSONRPCRequest& request)
|
||||
{
|
||||
RPCHelpMan{"getblockstats",
|
||||
"\nCompute per block statistics for a given window. All amounts are in duffs.\n"
|
||||
"It won't work for some heights with pruning.\n"
|
||||
"It won't work without -txindex for utxo_size_inc, *fee or *feerate stats.\n",
|
||||
"It won't work for some heights with pruning.\n",
|
||||
{
|
||||
{"hash_or_height", RPCArg::Type::NUM, RPCArg::Optional::NO, "The block hash or height of the target block", "", {"", "string or numeric"}},
|
||||
{"stats", RPCArg::Type::ARR, /* default */ "all values", "Values to plot (see result below)",
|
||||
@ -2065,6 +2078,7 @@ static UniValue getblockstats(const JSONRPCRequest& request)
|
||||
}
|
||||
|
||||
const CBlock block = GetBlockChecked(pindex);
|
||||
const CBlockUndo blockUndo = GetUndoChecked(pindex);
|
||||
|
||||
const bool do_all = stats.size() == 0; // Calculate everything if nothing selected (default)
|
||||
const bool do_mediantxsize = do_all || stats.count("mediantxsize") != 0;
|
||||
@ -2076,10 +2090,6 @@ static UniValue getblockstats(const JSONRPCRequest& request)
|
||||
const bool do_calculate_size = do_all || do_mediantxsize ||
|
||||
SetHasKeys(stats, "total_size", "avgtxsize", "mintxsize", "maxtxsize", "avgfeerate", "feerate_percentiles", "minfeerate", "maxfeerate");
|
||||
|
||||
if (loop_inputs && !g_txindex) {
|
||||
throw JSONRPCError(RPC_INVALID_PARAMETER, "One or more of the selected stats requires -txindex enabled");
|
||||
}
|
||||
|
||||
CAmount maxfee = 0;
|
||||
CAmount maxfeerate = 0;
|
||||
CAmount minfee = MAX_MONEY;
|
||||
@ -2096,7 +2106,8 @@ static UniValue getblockstats(const JSONRPCRequest& request)
|
||||
std::vector<std::pair<CAmount, int64_t>> feerate_array;
|
||||
std::vector<int64_t> txsize_array;
|
||||
|
||||
for (const auto& tx : block.vtx) {
|
||||
for (size_t i = 0; i < block.vtx.size(); ++i) {
|
||||
const auto& tx = block.vtx.at(i);
|
||||
outputs += tx->vout.size();
|
||||
|
||||
CAmount tx_total_out = 0;
|
||||
@ -2128,14 +2139,9 @@ static UniValue getblockstats(const JSONRPCRequest& request)
|
||||
|
||||
if (loop_inputs) {
|
||||
CAmount tx_total_in = 0;
|
||||
for (const CTxIn& in : tx->vin) {
|
||||
CTransactionRef tx_in;
|
||||
uint256 hashBlock;
|
||||
if (!GetTransaction(in.prevout.hash, tx_in, Params().GetConsensus(), hashBlock)) {
|
||||
throw JSONRPCError(RPC_INTERNAL_ERROR, std::string("Unexpected internal error (tx index seems corrupt)"));
|
||||
}
|
||||
|
||||
CTxOut prevoutput = tx_in->vout[in.prevout.n];
|
||||
const auto& txundo = blockUndo.vtxundo.at(i - 1);
|
||||
for (const Coin& coin: txundo.vprevout) {
|
||||
const CTxOut& prevoutput = coin.out;
|
||||
|
||||
tx_total_in += prevoutput.nValue;
|
||||
utxo_size_inc -= GetSerializeSize(prevoutput, SER_NETWORK, PROTOCOL_VERSION) + PER_UTXO_OVERHEAD;
|
||||
|
14
src/sync.h
14
src/sync.h
@ -351,4 +351,18 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
// Utility class for indicating to compiler thread analysis that a mutex is
|
||||
// locked (when it couldn't be determined otherwise).
|
||||
struct SCOPED_LOCKABLE LockAssertion
|
||||
{
|
||||
template <typename Mutex>
|
||||
explicit LockAssertion(Mutex& mutex) EXCLUSIVE_LOCK_FUNCTION(mutex)
|
||||
{
|
||||
#ifdef DEBUG_LOCKORDER
|
||||
AssertLockHeld(mutex);
|
||||
#endif
|
||||
}
|
||||
~LockAssertion() UNLOCK_FUNCTION() {}
|
||||
};
|
||||
|
||||
#endif // BITCOIN_SYNC_H
|
||||
|
@ -36,5 +36,37 @@ std::string getnewaddress(CWallet& w);
|
||||
/** Returns the generated coin */
|
||||
CTxIn generatetoaddress(const NodeContext&, const std::string& address);
|
||||
|
||||
/**
|
||||
* Increment a string. Useful to enumerate all fixed length strings with
|
||||
* characters in [min_char, max_char].
|
||||
*/
|
||||
template <typename CharType, size_t StringLength>
|
||||
bool NextString(CharType (&string)[StringLength], CharType min_char, CharType max_char)
|
||||
{
|
||||
for (CharType& elem : string) {
|
||||
bool has_next = elem != max_char;
|
||||
elem = elem < min_char || elem >= max_char ? min_char : CharType(elem + 1);
|
||||
if (has_next) return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Iterate over string values and call function for each string without
|
||||
* successive duplicate characters.
|
||||
*/
|
||||
template <typename CharType, size_t StringLength, typename Fn>
|
||||
void ForEachNoDup(CharType (&string)[StringLength], CharType min_char, CharType max_char, Fn&& fn) {
|
||||
for (bool has_next = true; has_next; has_next = NextString(string, min_char, max_char)) {
|
||||
int prev = -1;
|
||||
bool skip_string = false;
|
||||
for (CharType c : string) {
|
||||
if (c == prev) skip_string = true;
|
||||
if (skip_string || c < min_char || c > max_char) break;
|
||||
prev = c;
|
||||
}
|
||||
if (!skip_string) fn();
|
||||
}
|
||||
}
|
||||
|
||||
#endif // BITCOIN_TEST_UTIL_H
|
||||
|
@ -6,6 +6,7 @@
|
||||
|
||||
#include <clientversion.h>
|
||||
#include <sync.h>
|
||||
#include <test/util.h>
|
||||
#include <util/getuniquepath.h>
|
||||
#include <util/strencodings.h>
|
||||
#include <util/string.h>
|
||||
@ -176,6 +177,9 @@ struct TestArgsManager : public ArgsManager
|
||||
AddArg(arg.first, "", arg.second, OptionsCategory::OPTIONS);
|
||||
}
|
||||
}
|
||||
using ArgsManager::ReadConfigStream;
|
||||
using ArgsManager::cs_args;
|
||||
using ArgsManager::m_network;
|
||||
};
|
||||
|
||||
BOOST_AUTO_TEST_CASE(util_ParseParameters)
|
||||
@ -668,6 +672,306 @@ BOOST_AUTO_TEST_CASE(util_GetChainName)
|
||||
BOOST_CHECK_THROW(test_args.GetChainName(), std::runtime_error);
|
||||
}
|
||||
|
||||
// Test different ways settings can be merged, and verify results. This test can
|
||||
// be used to confirm that updates to settings code don't change behavior
|
||||
// unintentionally.
|
||||
//
|
||||
// The test covers:
|
||||
//
|
||||
// - Combining different setting actions. Possible actions are: configuring a
|
||||
// setting, negating a setting (adding "-no" prefix), and configuring/negating
|
||||
// settings in a network section (adding "main." or "test." prefixes).
|
||||
//
|
||||
// - Combining settings from command line arguments and a config file.
|
||||
//
|
||||
// - Combining SoftSet and ForceSet calls.
|
||||
//
|
||||
// - Testing "main" and "test" network values to make sure settings from network
|
||||
// sections are applied and to check for mainnet-specific behaviors like
|
||||
// inheriting settings from the default section.
|
||||
//
|
||||
// - Testing network-specific settings like "-wallet", that may be ignored
|
||||
// outside a network section, and non-network specific settings like "-server"
|
||||
// that aren't sensitive to the network.
|
||||
//
|
||||
struct ArgsMergeTestingSetup : public BasicTestingSetup {
|
||||
//! Max number of actions to sequence together. Can decrease this when
|
||||
//! debugging to make test results easier to understand.
|
||||
static constexpr int MAX_ACTIONS = 3;
|
||||
|
||||
enum Action { NONE, SET, NEGATE, SECTION_SET, SECTION_NEGATE };
|
||||
using ActionList = Action[MAX_ACTIONS];
|
||||
|
||||
//! Enumerate all possible test configurations.
|
||||
template <typename Fn>
|
||||
void ForEachMergeSetup(Fn&& fn)
|
||||
{
|
||||
ActionList arg_actions = {};
|
||||
ForEachNoDup(arg_actions, SET, SECTION_NEGATE, [&] {
|
||||
ActionList conf_actions = {};
|
||||
ForEachNoDup(conf_actions, SET, SECTION_NEGATE, [&] {
|
||||
for (bool soft_set : {false, true}) {
|
||||
for (bool force_set : {false, true}) {
|
||||
for (const std::string& section : {CBaseChainParams::MAIN, CBaseChainParams::TESTNET}) {
|
||||
for (const std::string& network : {CBaseChainParams::MAIN, CBaseChainParams::TESTNET}) {
|
||||
for (bool net_specific : {false, true}) {
|
||||
fn(arg_actions, conf_actions, soft_set, force_set, section, network, net_specific);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
//! Translate actions into a list of <key>=<value> setting strings.
|
||||
std::vector<std::string> GetValues(const ActionList& actions,
|
||||
const std::string& section,
|
||||
const std::string& name,
|
||||
const std::string& value_prefix)
|
||||
{
|
||||
std::vector<std::string> values;
|
||||
int suffix = 0;
|
||||
for (Action action : actions) {
|
||||
if (action == NONE) break;
|
||||
std::string prefix;
|
||||
if (action == SECTION_SET || action == SECTION_NEGATE) prefix = section + ".";
|
||||
if (action == SET || action == SECTION_SET) {
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
values.push_back(prefix + name + "=" + value_prefix + std::to_string(++suffix));
|
||||
}
|
||||
}
|
||||
if (action == NEGATE || action == SECTION_NEGATE) {
|
||||
values.push_back(prefix + "no" + name + "=1");
|
||||
}
|
||||
}
|
||||
return values;
|
||||
}
|
||||
};
|
||||
|
||||
// Regression test covering different ways config settings can be merged. The
|
||||
// test parses and merges settings, representing the results as strings that get
|
||||
// compared against an expected hash. To debug, the result strings can be dumped
|
||||
// to a file (see comments below).
|
||||
BOOST_FIXTURE_TEST_CASE(util_ArgsMerge, ArgsMergeTestingSetup)
|
||||
{
|
||||
CHash256 out_sha;
|
||||
FILE* out_file = nullptr;
|
||||
if (const char* out_path = getenv("ARGS_MERGE_TEST_OUT")) {
|
||||
out_file = fsbridge::fopen(out_path, "w");
|
||||
if (!out_file) throw std::system_error(errno, std::generic_category(), "fopen failed");
|
||||
}
|
||||
|
||||
ForEachMergeSetup([&](const ActionList& arg_actions, const ActionList& conf_actions, bool soft_set, bool force_set,
|
||||
const std::string& section, const std::string& network, bool net_specific) {
|
||||
TestArgsManager parser;
|
||||
LOCK(parser.cs_args);
|
||||
|
||||
std::string desc = "net=";
|
||||
desc += network;
|
||||
parser.m_network = network;
|
||||
|
||||
const std::string& name = net_specific ? "wallet" : "server";
|
||||
const std::string key = "-" + name;
|
||||
parser.AddArg(key, name, ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
|
||||
if (net_specific) parser.SetNetworkOnlyArg(key);
|
||||
|
||||
auto args = GetValues(arg_actions, section, name, "a");
|
||||
std::vector<const char*> argv = {"ignored"};
|
||||
for (auto& arg : args) {
|
||||
arg.insert(0, "-");
|
||||
desc += " ";
|
||||
desc += arg;
|
||||
argv.push_back(arg.c_str());
|
||||
}
|
||||
std::string error;
|
||||
BOOST_CHECK(parser.ParseParameters(argv.size(), argv.data(), error));
|
||||
BOOST_CHECK_EQUAL(error, "");
|
||||
|
||||
std::string conf;
|
||||
for (auto& conf_val : GetValues(conf_actions, section, name, "c")) {
|
||||
desc += " ";
|
||||
desc += conf_val;
|
||||
conf += conf_val;
|
||||
conf += "\n";
|
||||
}
|
||||
std::istringstream conf_stream(conf);
|
||||
BOOST_CHECK(parser.ReadConfigStream(conf_stream, "filepath", error));
|
||||
BOOST_CHECK_EQUAL(error, "");
|
||||
|
||||
if (soft_set) {
|
||||
desc += " soft";
|
||||
parser.SoftSetArg(key, "soft1");
|
||||
parser.SoftSetArg(key, "soft2");
|
||||
}
|
||||
|
||||
if (force_set) {
|
||||
desc += " force";
|
||||
parser.ForceSetArg(key, "force1");
|
||||
parser.ForceSetArg(key, "force2");
|
||||
}
|
||||
|
||||
desc += " || ";
|
||||
|
||||
if (!parser.IsArgSet(key)) {
|
||||
desc += "unset";
|
||||
BOOST_CHECK(!parser.IsArgNegated(key));
|
||||
BOOST_CHECK_EQUAL(parser.GetArg(key, "default"), "default");
|
||||
BOOST_CHECK(parser.GetArgs(key).empty());
|
||||
} else if (parser.IsArgNegated(key)) {
|
||||
desc += "negated";
|
||||
BOOST_CHECK_EQUAL(parser.GetArg(key, "default"), "0");
|
||||
BOOST_CHECK(parser.GetArgs(key).empty());
|
||||
} else {
|
||||
desc += parser.GetArg(key, "default");
|
||||
desc += " |";
|
||||
for (const auto& arg : parser.GetArgs(key)) {
|
||||
desc += " ";
|
||||
desc += arg;
|
||||
}
|
||||
}
|
||||
|
||||
std::set<std::string> ignored = parser.GetUnsuitableSectionOnlyArgs();
|
||||
if (!ignored.empty()) {
|
||||
desc += " | ignored";
|
||||
for (const auto& arg : ignored) {
|
||||
desc += " ";
|
||||
desc += arg;
|
||||
}
|
||||
}
|
||||
|
||||
desc += "\n";
|
||||
|
||||
out_sha.Write(MakeUCharSpan(desc));
|
||||
if (out_file) {
|
||||
BOOST_REQUIRE(fwrite(desc.data(), 1, desc.size(), out_file) == desc.size());
|
||||
}
|
||||
});
|
||||
|
||||
if (out_file) {
|
||||
if (fclose(out_file)) throw std::system_error(errno, std::generic_category(), "fclose failed");
|
||||
out_file = nullptr;
|
||||
}
|
||||
|
||||
unsigned char out_sha_bytes[CSHA256::OUTPUT_SIZE];
|
||||
out_sha.Finalize(out_sha_bytes);
|
||||
std::string out_sha_hex = HexStr(out_sha_bytes);
|
||||
|
||||
// If check below fails, should manually dump the results with:
|
||||
//
|
||||
// ARGS_MERGE_TEST_OUT=results.txt ./test_dash --run_test=util_tests/util_ArgsMerge
|
||||
//
|
||||
// And verify diff against previous results to make sure the changes are expected.
|
||||
//
|
||||
// Results file is formatted like:
|
||||
//
|
||||
// <input> || <IsArgSet/IsArgNegated/GetArg output> | <GetArgs output> | <GetUnsuitable output>
|
||||
BOOST_CHECK_EQUAL(out_sha_hex, "b835eef5977d69114eb039a976201f8c7121f34fe2b7ea2b73cafb516e5c9dc8");
|
||||
}
|
||||
|
||||
// Similar test as above, but for ArgsManager::GetChainName function.
|
||||
struct ChainMergeTestingSetup : public BasicTestingSetup {
|
||||
static constexpr int MAX_ACTIONS = 2;
|
||||
|
||||
enum Action { NONE, ENABLE_TEST, DISABLE_TEST, NEGATE_TEST, ENABLE_REG, DISABLE_REG, NEGATE_REG };
|
||||
using ActionList = Action[MAX_ACTIONS];
|
||||
|
||||
//! Enumerate all possible test configurations.
|
||||
template <typename Fn>
|
||||
void ForEachMergeSetup(Fn&& fn)
|
||||
{
|
||||
ActionList arg_actions = {};
|
||||
ForEachNoDup(arg_actions, ENABLE_TEST, NEGATE_REG, [&] {
|
||||
ActionList conf_actions = {};
|
||||
ForEachNoDup(conf_actions, ENABLE_TEST, NEGATE_REG, [&] { fn(arg_actions, conf_actions); });
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
BOOST_FIXTURE_TEST_CASE(util_ChainMerge, ChainMergeTestingSetup)
|
||||
{
|
||||
CHash256 out_sha;
|
||||
FILE* out_file = nullptr;
|
||||
if (const char* out_path = getenv("CHAIN_MERGE_TEST_OUT")) {
|
||||
out_file = fsbridge::fopen(out_path, "w");
|
||||
if (!out_file) throw std::system_error(errno, std::generic_category(), "fopen failed");
|
||||
}
|
||||
|
||||
ForEachMergeSetup([&](const ActionList& arg_actions, const ActionList& conf_actions) {
|
||||
TestArgsManager parser;
|
||||
LOCK(parser.cs_args);
|
||||
parser.AddArg("-regtest", "regtest", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
|
||||
parser.AddArg("-testnet", "testnet", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
|
||||
|
||||
auto arg = [](Action action) { return action == ENABLE_TEST ? "-testnet=1" :
|
||||
action == DISABLE_TEST ? "-testnet=0" :
|
||||
action == NEGATE_TEST ? "-notestnet=1" :
|
||||
action == ENABLE_REG ? "-regtest=1" :
|
||||
action == DISABLE_REG ? "-regtest=0" :
|
||||
action == NEGATE_REG ? "-noregtest=1" : nullptr; };
|
||||
|
||||
std::string desc;
|
||||
std::vector<const char*> argv = {"ignored"};
|
||||
for (Action action : arg_actions) {
|
||||
const char* argstr = arg(action);
|
||||
if (!argstr) break;
|
||||
argv.push_back(argstr);
|
||||
desc += " ";
|
||||
desc += argv.back();
|
||||
}
|
||||
std::string error;
|
||||
BOOST_CHECK(parser.ParseParameters(argv.size(), argv.data(), error));
|
||||
BOOST_CHECK_EQUAL(error, "");
|
||||
|
||||
std::string conf;
|
||||
for (Action action : conf_actions) {
|
||||
const char* argstr = arg(action);
|
||||
if (!argstr) break;
|
||||
desc += " ";
|
||||
desc += argstr + 1;
|
||||
conf += argstr + 1;
|
||||
}
|
||||
std::istringstream conf_stream(conf);
|
||||
BOOST_CHECK(parser.ReadConfigStream(conf_stream, "filepath", error));
|
||||
BOOST_CHECK_EQUAL(error, "");
|
||||
|
||||
desc += " || ";
|
||||
try {
|
||||
desc += parser.GetChainName();
|
||||
} catch (const std::runtime_error& e) {
|
||||
desc += "error: ";
|
||||
desc += e.what();
|
||||
}
|
||||
desc += "\n";
|
||||
|
||||
out_sha.Write(MakeUCharSpan(desc));
|
||||
if (out_file) {
|
||||
BOOST_REQUIRE(fwrite(desc.data(), 1, desc.size(), out_file) == desc.size());
|
||||
}
|
||||
});
|
||||
|
||||
if (out_file) {
|
||||
if (fclose(out_file)) throw std::system_error(errno, std::generic_category(), "fclose failed");
|
||||
out_file = nullptr;
|
||||
}
|
||||
|
||||
unsigned char out_sha_bytes[CSHA256::OUTPUT_SIZE];
|
||||
out_sha.Finalize(out_sha_bytes);
|
||||
std::string out_sha_hex = HexStr(out_sha_bytes);
|
||||
|
||||
// If check below fails, should manually dump the results with:
|
||||
//
|
||||
// CHAIN_MERGE_TEST_OUT=results.txt ./test_dash --run_test=util_tests/util_ChainMerge
|
||||
//
|
||||
// And verify diff against previous results to make sure the changes are expected.
|
||||
//
|
||||
// Results file is formatted like:
|
||||
//
|
||||
// <input> || <output>
|
||||
BOOST_CHECK_EQUAL(out_sha_hex, "3e70723862e346ed6e9b48d8efa13d4d56334c0b73fbf3c3a6ac8b8f4d914f65");
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(util_FormatMoney)
|
||||
{
|
||||
BOOST_CHECK_EQUAL(FormatMoney(0), "0.00");
|
||||
|
@ -74,15 +74,4 @@ public:
|
||||
~StdLockGuard() UNLOCK_FUNCTION() {}
|
||||
};
|
||||
|
||||
// Utility class for indicating to compiler thread analysis that a mutex is
|
||||
// locked (when it couldn't be determined otherwise).
|
||||
struct SCOPED_LOCKABLE LockAnnotation
|
||||
{
|
||||
template <typename Mutex>
|
||||
explicit LockAnnotation(Mutex& mutex) EXCLUSIVE_LOCK_FUNCTION(mutex)
|
||||
{
|
||||
}
|
||||
~LockAnnotation() UNLOCK_FUNCTION() {}
|
||||
};
|
||||
|
||||
#endif // BITCOIN_THREADSAFETY_H
|
||||
|
@ -917,7 +917,7 @@ UniValue dumpwallet(const JSONRPCRequest& request)
|
||||
CWallet* const pwallet = wallet.get();
|
||||
|
||||
auto locked_chain = pwallet->chain().lock();
|
||||
LockAnnotation lock(::cs_main);
|
||||
LockAssertion lock(::cs_main);
|
||||
LOCK(pwallet->cs_wallet);
|
||||
|
||||
EnsureWalletIsUnlocked(pwallet);
|
||||
|
@ -1576,7 +1576,7 @@ static UniValue listsinceblock(const JSONRPCRequest& request)
|
||||
pwallet->BlockUntilSyncedToCurrentChain();
|
||||
|
||||
auto locked_chain = pwallet->chain().lock();
|
||||
LockAnnotation lock(::cs_main);
|
||||
LockAssertion lock(::cs_main);
|
||||
LOCK(pwallet->cs_wallet);
|
||||
|
||||
// The way the 'height' is initialized is just a workaround for the gcc bug #47679 since version 4.6.0.
|
||||
|
@ -47,7 +47,7 @@ BOOST_FIXTURE_TEST_CASE(scan_for_wallet_transactions, TestChain100Setup)
|
||||
NodeContext node;
|
||||
auto chain = interfaces::MakeChain(node);
|
||||
auto locked_chain = chain->lock();
|
||||
LockAnnotation lock(::cs_main);
|
||||
LockAssertion lock(::cs_main);
|
||||
|
||||
// Verify ScanForWalletTransactions accommodates a null start block.
|
||||
{
|
||||
@ -143,7 +143,7 @@ BOOST_FIXTURE_TEST_CASE(importmulti_rescan, TestChain100Setup)
|
||||
NodeContext node;
|
||||
auto chain = interfaces::MakeChain(node);
|
||||
auto locked_chain = chain->lock();
|
||||
LockAnnotation lock(::cs_main);
|
||||
LockAssertion lock(::cs_main);
|
||||
|
||||
// Prune the older block file.
|
||||
PruneOneBlockFile(oldTip->GetBlockPos().nFile);
|
||||
@ -211,7 +211,7 @@ BOOST_FIXTURE_TEST_CASE(importwallet_rescan, TestChain100Setup)
|
||||
NodeContext node;
|
||||
auto chain = interfaces::MakeChain(node);
|
||||
auto locked_chain = chain->lock();
|
||||
LockAnnotation lock(::cs_main);
|
||||
LockAssertion lock(::cs_main);
|
||||
|
||||
std::string backup_file = (GetDataDir() / "wallet.backup").string();
|
||||
|
||||
@ -270,7 +270,7 @@ BOOST_FIXTURE_TEST_CASE(coin_mark_dirty_immature_credit, TestChain100Setup)
|
||||
CWalletTx wtx(&wallet, m_coinbase_txns.back());
|
||||
|
||||
auto locked_chain = chain->lock();
|
||||
LockAnnotation lock(::cs_main);
|
||||
LockAssertion lock(::cs_main);
|
||||
LOCK(wallet.cs_wallet);
|
||||
wallet.SetLastBlockProcessed(::ChainActive().Height(), ::ChainActive().Tip()->GetBlockHash());
|
||||
|
||||
@ -295,8 +295,8 @@ static int64_t AddTx(CWallet& wallet, uint32_t lockTime, int64_t mockTime, int64
|
||||
SetMockTime(mockTime);
|
||||
CBlockIndex* block = nullptr;
|
||||
if (blockTime > 0) {
|
||||
LockAnnotation lock(::cs_main); // for mapBlockIndex
|
||||
auto locked_chain = wallet.chain().lock();
|
||||
LockAssertion lock(::cs_main);
|
||||
auto inserted = ::BlockIndex().emplace(GetRandHash(), new CBlockIndex);
|
||||
assert(inserted.second);
|
||||
const uint256& hash = inserted.first->first;
|
||||
|
@ -1308,6 +1308,7 @@ void CWallet::LoadToWallet(CWalletTx& wtxIn)
|
||||
// If wallet doesn't have a chain (e.g wallet-tool), lock can't be taken.
|
||||
auto locked_chain = LockChain();
|
||||
if (locked_chain) {
|
||||
LockAssertion lock(::cs_main);
|
||||
Optional<int> block_height = locked_chain->getBlockHeight(wtxIn.m_confirm.hashBlock);
|
||||
if (block_height) {
|
||||
// Update cached block height variable since it not stored in the
|
||||
@ -1499,7 +1500,7 @@ bool CWallet::AbandonTransaction(const uint256& hashTx)
|
||||
void CWallet::MarkConflicted(const uint256& hashBlock, int conflicting_height, const uint256& hashTx)
|
||||
{
|
||||
auto locked_chain = chain().lock();
|
||||
LockAnnotation lock(::cs_main);
|
||||
LockAssertion lock(::cs_main);
|
||||
LOCK(cs_wallet); // check WalletBatch::LoadWallet()
|
||||
|
||||
int conflictconfirms = (m_last_block_processed_height - conflicting_height + 1) * -1;
|
||||
@ -4147,6 +4148,9 @@ DBErrors CWallet::LoadWallet(bool& fFirstRunRet)
|
||||
// tx status. If lock can't be taken (e.g wallet-tool), tx confirmation
|
||||
// status may be not reliable.
|
||||
auto locked_chain = LockChain();
|
||||
if (locked_chain) {
|
||||
LockAssertion lock(::cs_main);
|
||||
}
|
||||
LOCK(cs_wallet);
|
||||
|
||||
fFirstRunRet = false;
|
||||
|
@ -153,13 +153,57 @@ enum WalletFlags : uint64_t {
|
||||
|
||||
static constexpr uint64_t g_known_wallet_flags = WALLET_FLAG_DISABLE_PRIVATE_KEYS | WALLET_FLAG_BLANK_WALLET | WALLET_FLAG_KEY_ORIGIN_METADATA;
|
||||
|
||||
/** A key pool entry */
|
||||
/** A key from a CWallet's keypool
|
||||
*
|
||||
* The wallet holds several keypools. These are sets of keys that have not
|
||||
* yet been used to provide addresses or receive change.
|
||||
*
|
||||
* The Bitcoin Core wallet was originally a collection of unrelated private
|
||||
* keys with their associated addresses. If a non-HD wallet generated a
|
||||
* key/address, gave that address out and then restored a backup from before
|
||||
* that key's generation, then any funds sent to that address would be
|
||||
* lost definitively.
|
||||
*
|
||||
* The keypool was implemented to avoid this scenario (commit: 10384941). The
|
||||
* wallet would generate a set of keys (100 by default). When a new public key
|
||||
* was required, either to give out as an address or to use in a change output,
|
||||
* it would be drawn from the keypool. The keypool would then be topped up to
|
||||
* maintain 100 keys. This ensured that as long as the wallet hadn't used more
|
||||
* than 100 keys since the previous backup, all funds would be safe, since a
|
||||
* restored wallet would be able to scan for all owned addresses.
|
||||
*
|
||||
* A keypool also allowed encrypted wallets to give out addresses without
|
||||
* having to be decrypted to generate a new private key.
|
||||
*
|
||||
* With the introduction of HD wallets (commit: f1902510), the keypool
|
||||
* essentially became an address look-ahead pool. Restoring old backups can no
|
||||
* longer definitively lose funds as long as the addresses used were from the
|
||||
* wallet's HD seed (since all private keys can be rederived from the seed).
|
||||
* However, if many addresses were used since the backup, then the wallet may
|
||||
* not know how far ahead in the HD chain to look for its addresses. The
|
||||
* keypool is used to implement a 'gap limit'. The keypool maintains a set of
|
||||
* keys (by default 1000) ahead of the last used key and scans for the
|
||||
* addresses of those keys. This avoids the risk of not seeing transactions
|
||||
* involving the wallet's addresses, or of re-using the same address.
|
||||
*
|
||||
* There is an external keypool (for addresses to hand out) and an internal keypool
|
||||
* (for change addresses).
|
||||
*
|
||||
* Keypool keys are stored in the wallet/keystore's keymap. The keypool data is
|
||||
* stored as sets of indexes in the wallet (setInternalKeyPool and
|
||||
* setExternalKeyPool), and a map from the key to the
|
||||
* index (m_pool_key_to_index). The CKeyPool object is used to
|
||||
* serialize/deserialize the pool data to/from the database.
|
||||
*/
|
||||
class CKeyPool
|
||||
{
|
||||
public:
|
||||
//! The time at which the key was generated. Set in AddKeypoolPubKeyWithDB
|
||||
int64_t nTime;
|
||||
//! The public key
|
||||
CPubKey vchPubKey;
|
||||
bool fInternal; // for change outputs
|
||||
//! Whether this keypool entry is in the internal keypool (for change outputs)
|
||||
bool fInternal;
|
||||
|
||||
CKeyPool();
|
||||
CKeyPool(const CPubKey& vchPubKeyIn, bool fInternalIn);
|
||||
@ -192,6 +236,58 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
/** A wrapper to reserve a key from a wallet keypool
|
||||
*
|
||||
* CReserveKey is used to reserve a key from the keypool. It is passed around
|
||||
* during the CreateTransaction/CommitTransaction procedure.
|
||||
*
|
||||
* Instantiating a CReserveKey does not reserve a keypool key. To do so,
|
||||
* GetReservedKey() needs to be called on the object. Once a key has been
|
||||
* reserved, call KeepKey() on the CReserveKey object to make sure it is not
|
||||
* returned to the keypool. Call ReturnKey() to return the key to the keypool
|
||||
* so it can be re-used (for example, if the key was used in a new transaction
|
||||
* and that transaction was not completed and needed to be aborted).
|
||||
*
|
||||
* If a key is reserved and KeepKey() is not called, then the key will be
|
||||
* returned to the keypool when the CReserveObject goes out of scope.
|
||||
*/
|
||||
class CReserveKey final : public CReserveScript
|
||||
{
|
||||
protected:
|
||||
//! The wallet to reserve the keypool key from
|
||||
CWallet* pwallet;
|
||||
//! The index of the key in the keypool
|
||||
int64_t nIndex{-1};
|
||||
//! The public key
|
||||
CPubKey vchPubKey;
|
||||
//! Whether this is from the internal (change output) keypool
|
||||
bool fInternal{false};
|
||||
|
||||
public:
|
||||
//! Construct a CReserveKey object. This does NOT reserve a key from the keypool yet
|
||||
explicit CReserveKey(CWallet* pwalletIn)
|
||||
{
|
||||
pwallet = pwalletIn;
|
||||
}
|
||||
|
||||
CReserveKey(const CReserveKey&) = delete;
|
||||
CReserveKey& operator=(const CReserveKey&) = delete;
|
||||
|
||||
//! Destructor. If a key has been reserved and not KeepKey'ed, it will be returned to the keypool
|
||||
~CReserveKey()
|
||||
{
|
||||
ReturnKey();
|
||||
}
|
||||
|
||||
//! Reserve a key from the keypool
|
||||
bool GetReservedKey(CPubKey &pubkey, bool fInternalIn /*= false*/);
|
||||
//! Return a key to the keypool
|
||||
void ReturnKey();
|
||||
//! Keep the key. Do not return it to the keypool when this object goes out of scope
|
||||
void KeepKey();
|
||||
void KeepScript() override { KeepKey(); }
|
||||
};
|
||||
|
||||
/** Address book data */
|
||||
class CAddressBookData
|
||||
{
|
||||
@ -1366,35 +1462,6 @@ public:
|
||||
*/
|
||||
void MaybeResendWalletTxs();
|
||||
|
||||
/** A key allocated from the key pool. */
|
||||
class CReserveKey final : public CReserveScript
|
||||
{
|
||||
protected:
|
||||
CWallet* pwallet;
|
||||
int64_t nIndex{-1};
|
||||
CPubKey vchPubKey;
|
||||
bool fInternal{false};
|
||||
|
||||
public:
|
||||
explicit CReserveKey(CWallet* pwalletIn)
|
||||
{
|
||||
pwallet = pwalletIn;
|
||||
}
|
||||
|
||||
CReserveKey(const CReserveKey&) = delete;
|
||||
CReserveKey& operator=(const CReserveKey&) = delete;
|
||||
|
||||
~CReserveKey()
|
||||
{
|
||||
ReturnKey();
|
||||
}
|
||||
|
||||
void ReturnKey();
|
||||
bool GetReservedKey(CPubKey &pubkey, bool fInternalIn /*= false*/);
|
||||
void KeepKey();
|
||||
void KeepScript() override { KeepKey(); }
|
||||
};
|
||||
|
||||
/** RAII object to check and reserve a wallet rescan */
|
||||
class WalletRescanReserver
|
||||
{
|
||||
|
@ -510,7 +510,9 @@ DBErrors WalletBatch::LoadWallet(CWallet* pwallet)
|
||||
DBErrors result = DBErrors::LOAD_OK;
|
||||
|
||||
auto locked_chain = pwallet->LockChain();
|
||||
LockAnnotation lock(::cs_main);
|
||||
if (locked_chain) {
|
||||
LockAssertion lock(::cs_main);
|
||||
}
|
||||
LOCK(pwallet->cs_wallet);
|
||||
try {
|
||||
int nMinVersion = 0;
|
||||
|
@ -153,6 +153,7 @@
|
||||
1,
|
||||
1
|
||||
],
|
||||
"height": 102,
|
||||
"ins": 1,
|
||||
"maxfee": 192,
|
||||
"maxfeerate": 1,
|
||||
|
@ -20,18 +20,6 @@ class GetblockstatsTest(BitcoinTestFramework):
|
||||
|
||||
start_height = 101
|
||||
max_stat_pos = 2
|
||||
STATS_NEED_TXINDEX = [
|
||||
'avgfee',
|
||||
'avgfeerate',
|
||||
'maxfee',
|
||||
'maxfeerate',
|
||||
'medianfee',
|
||||
'feerate_percentiles',
|
||||
'minfee',
|
||||
'minfeerate',
|
||||
'totalfee',
|
||||
'utxo_size_inc',
|
||||
]
|
||||
|
||||
def add_options(self, parser):
|
||||
parser.add_argument('--gen-test-data', dest='gen_test_data',
|
||||
@ -44,23 +32,25 @@ class GetblockstatsTest(BitcoinTestFramework):
|
||||
|
||||
# def set_test_params(self):
|
||||
def set_test_params(self):
|
||||
self.num_nodes = 2
|
||||
self.extra_args = [['-txindex'], ['-txindex=0', '-paytxfee=0.003']]
|
||||
self.num_nodes = 1
|
||||
self.setup_clean_chain = True
|
||||
|
||||
def get_stats(self):
|
||||
return [self.nodes[0].getblockstats(hash_or_height=self.start_height + i) for i in range(self.max_stat_pos+1)]
|
||||
|
||||
def generate_test_data(self, filename):
|
||||
self.nodes[0].setmocktime(self.mocktime)
|
||||
self.nodes[0].generate(101)
|
||||
|
||||
self.nodes[0].sendtoaddress(address=self.nodes[1].getnewaddress(), amount=10, subtractfeefromamount=True)
|
||||
address = self.nodes[0].get_deterministic_priv_key().address
|
||||
self.nodes[0].sendtoaddress(address=address, amount=10, subtractfeefromamount=True)
|
||||
self.nodes[0].generate(1)
|
||||
self.sync_all()
|
||||
|
||||
self.nodes[0].sendtoaddress(address=self.nodes[0].getnewaddress(), amount=10, subtractfeefromamount=True)
|
||||
self.nodes[0].sendtoaddress(address=self.nodes[0].getnewaddress(), amount=10, subtractfeefromamount=False)
|
||||
self.nodes[1].sendtoaddress(address=self.nodes[0].getnewaddress(), amount=1, subtractfeefromamount=True)
|
||||
self.nodes[0].sendtoaddress(address=address, amount=10, subtractfeefromamount=True)
|
||||
self.nodes[0].sendtoaddress(address=address, amount=10, subtractfeefromamount=False)
|
||||
self.nodes[0].settxfee(amount=0.003)
|
||||
self.nodes[0].sendtoaddress(address=address, amount=1, subtractfeefromamount=True)
|
||||
self.sync_all()
|
||||
self.nodes[0].generate(1)
|
||||
|
||||
@ -92,11 +82,12 @@ class GetblockstatsTest(BitcoinTestFramework):
|
||||
|
||||
# Set the timestamps from the file so that the nodes can get out of Initial Block Download
|
||||
self.nodes[0].setmocktime(self.mocktime)
|
||||
self.nodes[1].setmocktime(self.mocktime)
|
||||
self.sync_all()
|
||||
|
||||
for b in blocks:
|
||||
self.nodes[0].submitblock(b)
|
||||
|
||||
|
||||
def run_test(self):
|
||||
test_data = os.path.join(TESTSDIR, self.options.test_data)
|
||||
if self.options.gen_test_data:
|
||||
@ -106,9 +97,6 @@ class GetblockstatsTest(BitcoinTestFramework):
|
||||
|
||||
self.sync_all()
|
||||
stats = self.get_stats()
|
||||
expected_stats_noindex = []
|
||||
for stat_row in stats:
|
||||
expected_stats_noindex.append({k: v for k, v in stat_row.items() if k not in self.STATS_NEED_TXINDEX})
|
||||
|
||||
# Make sure all valid statistics are included but nothing else is
|
||||
expected_keys = self.expected_stats[0].keys()
|
||||
@ -126,10 +114,6 @@ class GetblockstatsTest(BitcoinTestFramework):
|
||||
stats_by_hash = self.nodes[0].getblockstats(hash_or_height=blockhash)
|
||||
assert_equal(stats_by_hash, self.expected_stats[i])
|
||||
|
||||
# Check with the node that has no txindex
|
||||
stats_no_txindex = self.nodes[1].getblockstats(hash_or_height=blockhash, stats=list(expected_stats_noindex[i].keys()))
|
||||
assert_equal(stats_no_txindex, expected_stats_noindex[i])
|
||||
|
||||
# Make sure each stat can be queried on its own
|
||||
for stat in expected_keys:
|
||||
for i in range(self.max_stat_pos+1):
|
||||
@ -167,12 +151,6 @@ class GetblockstatsTest(BitcoinTestFramework):
|
||||
# Make sure we aren't always returning inv_sel_stat as the culprit stat
|
||||
assert_raises_rpc_error(-8, 'Invalid selected statistic aaa%s' % inv_sel_stat,
|
||||
self.nodes[0].getblockstats, hash_or_height=1, stats=['minfee' , 'aaa%s' % inv_sel_stat])
|
||||
|
||||
assert_raises_rpc_error(-8, 'One or more of the selected stats requires -txindex enabled',
|
||||
self.nodes[1].getblockstats, hash_or_height=1)
|
||||
assert_raises_rpc_error(-8, 'One or more of the selected stats requires -txindex enabled',
|
||||
self.nodes[1].getblockstats, hash_or_height=self.start_height + self.max_stat_pos)
|
||||
|
||||
# Mainchain's genesis block shouldn't be found on regtest
|
||||
assert_raises_rpc_error(-5, 'Block not found', self.nodes[0].getblockstats,
|
||||
hash_or_height='000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f')
|
||||
|
@ -190,6 +190,8 @@ class WalletTest(BitcoinTestFramework):
|
||||
self.log.info('Put txs back into mempool of node 1 (not node 0)')
|
||||
self.nodes[0].invalidateblock(block_reorg)
|
||||
self.nodes[1].invalidateblock(block_reorg)
|
||||
self.sync_blocks()
|
||||
self.nodes[0].syncwithvalidationinterfacequeue()
|
||||
assert_equal(self.nodes[0].getbalance(minconf=0), 0) # wallet txs not in the mempool are untrusted
|
||||
self.nodes[0].generatetoaddress(1, ADDRESS_WATCHONLY)
|
||||
assert_equal(self.nodes[0].getbalance(minconf=0), 0) # wallet txs not in the mempool are untrusted
|
||||
|
Loading…
Reference in New Issue
Block a user