mirror of
https://github.com/dashpay/dash.git
synced 2024-12-25 03:52:49 +01:00
Merge pull request #4845 from Munkybooty/backports-0.20-pr3
Backports 0.20 pr3
This commit is contained in:
commit
ea957a73a0
@ -6,7 +6,14 @@
|
|||||||
# Example use:
|
# Example use:
|
||||||
# $ valgrind --suppressions=contrib/valgrind.supp src/test/test_dash
|
# $ valgrind --suppressions=contrib/valgrind.supp src/test/test_dash
|
||||||
# $ valgrind --suppressions=contrib/valgrind.supp --leak-check=full \
|
# $ valgrind --suppressions=contrib/valgrind.supp --leak-check=full \
|
||||||
# --show-leak-kinds=all src/test/test_dash --log_level=test_suite
|
# --show-leak-kinds=all src/test/test_dash
|
||||||
|
#
|
||||||
|
# To create suppressions for found issues, use the --gen-suppressions=all option:
|
||||||
|
# $ valgrind --suppressions=contrib/valgrind.supp --leak-check=full \
|
||||||
|
# --show-leak-kinds=all --gen-suppressions=all --show-reachable=yes \
|
||||||
|
# --error-limit=no src/test/test_dash
|
||||||
|
#
|
||||||
|
# Note that suppressions may depend on OS and/or library versions.
|
||||||
{
|
{
|
||||||
Suppress libstdc++ warning - https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65434
|
Suppress libstdc++ warning - https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65434
|
||||||
Memcheck:Leak
|
Memcheck:Leak
|
||||||
@ -26,6 +33,14 @@
|
|||||||
obj:*/libdb_cxx-*.so
|
obj:*/libdb_cxx-*.so
|
||||||
fun:__log_put_record
|
fun:__log_put_record
|
||||||
}
|
}
|
||||||
|
{
|
||||||
|
Suppress libdb warning
|
||||||
|
Memcheck:Param
|
||||||
|
pwrite64(buf)
|
||||||
|
fun:pwrite
|
||||||
|
fun:__os_io
|
||||||
|
obj:*/libdb_cxx-*.so
|
||||||
|
}
|
||||||
{
|
{
|
||||||
Suppress leveldb warning (leveldb::InitModule()) - https://github.com/google/leveldb/issues/113
|
Suppress leveldb warning (leveldb::InitModule()) - https://github.com/google/leveldb/issues/113
|
||||||
Memcheck:Leak
|
Memcheck:Leak
|
||||||
@ -41,3 +56,57 @@
|
|||||||
...
|
...
|
||||||
fun:_ZN7leveldbL14InitDefaultEnvEv
|
fun:_ZN7leveldbL14InitDefaultEnvEv
|
||||||
}
|
}
|
||||||
|
{
|
||||||
|
Suppress wcsnrtombs glibc SSE4 warning (could be related: https://stroika.atlassian.net/browse/STK-626)
|
||||||
|
Memcheck:Addr16
|
||||||
|
fun:__wcsnlen_sse4_1
|
||||||
|
fun:wcsnrtombs
|
||||||
|
}
|
||||||
|
{
|
||||||
|
Suppress boost::filesystem warning (fixed in boost 1.70: https://github.com/boostorg/filesystem/commit/bbe9d1771e5d679b3f10c42a58fc81f7e8c024a9)
|
||||||
|
Memcheck:Cond
|
||||||
|
fun:_ZN5boost10filesystem6detail28directory_iterator_incrementERNS0_18directory_iteratorEPNS_6system10error_codeE
|
||||||
|
fun:_ZN5boost10filesystem6detail28directory_iterator_constructERNS0_18directory_iteratorERKNS0_4pathEPNS_6system10error_codeE
|
||||||
|
obj:*/libboost_filesystem.so.*
|
||||||
|
}
|
||||||
|
{
|
||||||
|
Suppress boost::filesystem warning (could be related: https://stackoverflow.com/questions/9830182/function-boostfilesystemcomplete-being-reported-as-possible-memory-leak-by-v)
|
||||||
|
Memcheck:Leak
|
||||||
|
match-leak-kinds: reachable
|
||||||
|
fun:_Znwm
|
||||||
|
fun:_ZN5boost10filesystem8absoluteERKNS0_4pathES3_
|
||||||
|
}
|
||||||
|
{
|
||||||
|
Suppress boost still reachable memory warning
|
||||||
|
Memcheck:Leak
|
||||||
|
match-leak-kinds: reachable
|
||||||
|
fun:_Znwm
|
||||||
|
...
|
||||||
|
fun:_M_construct_aux<char*>
|
||||||
|
fun:_M_construct<char*>
|
||||||
|
fun:basic_string
|
||||||
|
fun:path
|
||||||
|
}
|
||||||
|
{
|
||||||
|
Suppress LogInstance still reachable memory warning
|
||||||
|
Memcheck:Leak
|
||||||
|
match-leak-kinds: reachable
|
||||||
|
fun:_Znwm
|
||||||
|
fun:_Z11LogInstancev
|
||||||
|
}
|
||||||
|
{
|
||||||
|
Suppress secp256k1_context_create still reachable memory warning
|
||||||
|
Memcheck:Leak
|
||||||
|
match-leak-kinds: reachable
|
||||||
|
fun:malloc
|
||||||
|
...
|
||||||
|
fun:secp256k1_context_create
|
||||||
|
}
|
||||||
|
{
|
||||||
|
Suppress BCLog::Logger::StartLogging() still reachable memory warning
|
||||||
|
Memcheck:Leak
|
||||||
|
match-leak-kinds: reachable
|
||||||
|
fun:malloc
|
||||||
|
...
|
||||||
|
fun:_ZN5BCLog6Logger12StartLoggingEv
|
||||||
|
}
|
||||||
|
@ -19,7 +19,11 @@ BIPs that are implemented by Dash Core (up-to-date up to **v18.0**):
|
|||||||
* [`BIP 65`](https://github.com/bitcoin/bips/blob/master/bip-0065.mediawiki): The CHECKLOCKTIMEVERIFY softfork was merged in **v0.12.0** ([PR #6351](https://github.com/bitcoin/bitcoin/pull/6351)), and backported to **v0.11.2** and **v0.10.4**. Mempool-only CLTV was added in [PR #6124](https://github.com/bitcoin/bitcoin/pull/6124).
|
* [`BIP 65`](https://github.com/bitcoin/bips/blob/master/bip-0065.mediawiki): The CHECKLOCKTIMEVERIFY softfork was merged in **v0.12.0** ([PR #6351](https://github.com/bitcoin/bitcoin/pull/6351)), and backported to **v0.11.2** and **v0.10.4**. Mempool-only CLTV was added in [PR #6124](https://github.com/bitcoin/bitcoin/pull/6124).
|
||||||
* [`BIP 66`](https://github.com/bitcoin/bips/blob/master/bip-0066.mediawiki): The strict DER rules and associated version 3 blocks have been implemented since **v0.10.0** ([PR #5713](https://github.com/bitcoin/bitcoin/pull/5713)).
|
* [`BIP 66`](https://github.com/bitcoin/bips/blob/master/bip-0066.mediawiki): The strict DER rules and associated version 3 blocks have been implemented since **v0.10.0** ([PR #5713](https://github.com/bitcoin/bitcoin/pull/5713)).
|
||||||
* [`BIP 68`](https://github.com/bitcoin/bips/blob/master/bip-0068.mediawiki): Sequence locks have been implemented as of **v0.12.1** ([PR #7184](https://github.com/bitcoin/bitcoin/pull/7184)), and have been activated since *block 419328*.
|
* [`BIP 68`](https://github.com/bitcoin/bips/blob/master/bip-0068.mediawiki): Sequence locks have been implemented as of **v0.12.1** ([PR #7184](https://github.com/bitcoin/bitcoin/pull/7184)), and have been activated since *block 419328*.
|
||||||
* [`BIP 70`](https://github.com/bitcoin/bips/blob/master/bip-0070.mediawiki) [`71`](https://github.com/bitcoin/bips/blob/master/bip-0071.mediawiki) [`72`](https://github.com/bitcoin/bips/blob/master/bip-0072.mediawiki): Payment Protocol support has been available in Bitcoin Core GUI since **v0.9.0** ([PR #5216](https://github.com/bitcoin/bitcoin/pull/5216)). Support can be optionally disabled at build time since **v0.18.0** ([PR 4350](https://github.com/dashpay/dash/pull/4350)).
|
* [`BIP 70`](https://github.com/bitcoin/bips/blob/master/bip-0070.mediawiki) [`71`](https://github.com/bitcoin/bips/blob/master/bip-0071.mediawiki) [`72`](https://github.com/bitcoin/bips/blob/master/bip-0072.mediawiki):
|
||||||
|
Payment Protocol support has been available in Dash Core GUI since **v0.9.0** ([PR #5216](https://github.com/bitcoin/bitcoin/pull/5216)).
|
||||||
|
Support can be optionally disabled at build time since **v0.18.0** ([PR 14451](https://github.com/bitcoin/bitcoin/pull/14451)),
|
||||||
|
and it is disabled by default at build time since **v0.19.0** ([PR #15584](https://github.com/bitcoin/bitcoin/pull/15584)).
|
||||||
|
It has been removed as of **v0.20.0** ([PR 17165](https://github.com/bitcoin/bitcoin/pull/17165)).
|
||||||
* [`BIP 90`](https://github.com/bitcoin/bips/blob/master/bip-0090.mediawiki): Trigger mechanism for activation of BIPs 34, 65, and 66 has been simplified to block height checks since **v0.14.0** ([PR #8391](https://github.com/bitcoin/bitcoin/pull/8391)).
|
* [`BIP 90`](https://github.com/bitcoin/bips/blob/master/bip-0090.mediawiki): Trigger mechanism for activation of BIPs 34, 65, and 66 has been simplified to block height checks since **v0.14.0** ([PR #8391](https://github.com/bitcoin/bitcoin/pull/8391)).
|
||||||
* [`BIP 111`](https://github.com/bitcoin/bips/blob/master/bip-0111.mediawiki): `NODE_BLOOM` service bit added, and enforced for all peer versions as of **v0.13.0** ([PR #6579](https://github.com/bitcoin/bitcoin/pull/6579) and [PR #6641](https://github.com/bitcoin/bitcoin/pull/6641)).
|
* [`BIP 111`](https://github.com/bitcoin/bips/blob/master/bip-0111.mediawiki): `NODE_BLOOM` service bit added, and enforced for all peer versions as of **v0.13.0** ([PR #6579](https://github.com/bitcoin/bitcoin/pull/6579) and [PR #6641](https://github.com/bitcoin/bitcoin/pull/6641)).
|
||||||
* [`BIP 112`](https://github.com/bitcoin/bips/blob/master/bip-0112.mediawiki): The CHECKSEQUENCEVERIFY opcode has been implemented since **v0.12.1** ([PR #7524](https://github.com/bitcoin/bitcoin/pull/7524)) and has been activated since *block 419328*.
|
* [`BIP 112`](https://github.com/bitcoin/bips/blob/master/bip-0112.mediawiki): The CHECKSEQUENCEVERIFY opcode has been implemented since **v0.12.1** ([PR #7524](https://github.com/bitcoin/bitcoin/pull/7524)) and has been activated since *block 419328*.
|
||||||
|
@ -453,7 +453,7 @@ Threads
|
|||||||
|
|
||||||
- ThreadScriptCheck : Verifies block scripts.
|
- ThreadScriptCheck : Verifies block scripts.
|
||||||
|
|
||||||
- ThreadImport : Loads blocks from blk*.dat files or bootstrap.dat.
|
- ThreadImport : Loads blocks from `blk*.dat` files or `-loadblock=<file>`.
|
||||||
|
|
||||||
- ThreadDNSAddressSeed : Loads addresses of peers from the DNS.
|
- ThreadDNSAddressSeed : Loads addresses of peers from the DNS.
|
||||||
|
|
||||||
|
4
doc/release-notes-15954.md
Normal file
4
doc/release-notes-15954.md
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
Configuration option changes
|
||||||
|
-----------------------------
|
||||||
|
|
||||||
|
Importing blocks upon startup via the `bootstrap.dat` file no longer occurs by default. The file must now be specified with `-loadblock=<file>`.
|
18
src/init.cpp
18
src/init.cpp
@ -521,7 +521,7 @@ void SetupServerArgs(NodeContext& node)
|
|||||||
argsman.AddArg("-dbcache=<n>", strprintf("Maximum database cache size <n> MiB (%d to %d, default: %d). In addition, unused mempool memory is shared for this cache (see -maxmempool).", nMinDbCache, nMaxDbCache, nDefaultDbCache), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
|
argsman.AddArg("-dbcache=<n>", strprintf("Maximum database cache size <n> MiB (%d to %d, default: %d). In addition, unused mempool memory is shared for this cache (see -maxmempool).", nMinDbCache, nMaxDbCache, nDefaultDbCache), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
|
||||||
argsman.AddArg("-debuglogfile=<file>", strprintf("Specify location of debug log file. Relative paths will be prefixed by a net-specific datadir location. (-nodebuglogfile to disable; default: %s)", DEFAULT_DEBUGLOGFILE), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
|
argsman.AddArg("-debuglogfile=<file>", strprintf("Specify location of debug log file. Relative paths will be prefixed by a net-specific datadir location. (-nodebuglogfile to disable; default: %s)", DEFAULT_DEBUGLOGFILE), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
|
||||||
argsman.AddArg("-includeconf=<file>", "Specify additional configuration file, relative to the -datadir path (only useable from configuration file, not command line)", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
|
argsman.AddArg("-includeconf=<file>", "Specify additional configuration file, relative to the -datadir path (only useable from configuration file, not command line)", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
|
||||||
argsman.AddArg("-loadblock=<file>", "Imports blocks from external blk000??.dat file on startup", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
|
argsman.AddArg("-loadblock=<file>", "Imports blocks from external file on startup", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
|
||||||
argsman.AddArg("-maxmempool=<n>", strprintf("Keep the transaction memory pool below <n> megabytes (default: %u)", DEFAULT_MAX_MEMPOOL_SIZE), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
|
argsman.AddArg("-maxmempool=<n>", strprintf("Keep the transaction memory pool below <n> megabytes (default: %u)", DEFAULT_MAX_MEMPOOL_SIZE), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
|
||||||
argsman.AddArg("-maxorphantxsize=<n>", strprintf("Maximum total size of all orphan transactions in megabytes (default: %u)", DEFAULT_MAX_ORPHAN_TRANSACTIONS_SIZE), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
|
argsman.AddArg("-maxorphantxsize=<n>", strprintf("Maximum total size of all orphan transactions in megabytes (default: %u)", DEFAULT_MAX_ORPHAN_TRANSACTIONS_SIZE), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
|
||||||
argsman.AddArg("-maxrecsigsage=<n>", strprintf("Number of seconds to keep LLMQ recovery sigs (default: %u)", llmq::DEFAULT_MAX_RECOVERED_SIGS_AGE), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
|
argsman.AddArg("-maxrecsigsage=<n>", strprintf("Number of seconds to keep LLMQ recovery sigs (default: %u)", llmq::DEFAULT_MAX_RECOVERED_SIGS_AGE), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
|
||||||
@ -912,22 +912,6 @@ static void ThreadImport(ChainstateManager& chainman, std::vector<fs::path> vImp
|
|||||||
LoadGenesisBlock(chainparams);
|
LoadGenesisBlock(chainparams);
|
||||||
}
|
}
|
||||||
|
|
||||||
// hardcoded $DATADIR/bootstrap.dat
|
|
||||||
fs::path pathBootstrap = GetDataDir() / "bootstrap.dat";
|
|
||||||
if (fs::exists(pathBootstrap)) {
|
|
||||||
FILE *file = fsbridge::fopen(pathBootstrap, "rb");
|
|
||||||
if (file) {
|
|
||||||
fs::path pathBootstrapOld = GetDataDir() / "bootstrap.dat.old";
|
|
||||||
LogPrintf("Importing bootstrap.dat...\n");
|
|
||||||
LoadExternalBlockFile(chainparams, file);
|
|
||||||
if (!RenameOver(pathBootstrap, pathBootstrapOld)) {
|
|
||||||
throw std::runtime_error("Rename failed");
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
LogPrintf("Warning: Could not open bootstrap file %s\n", pathBootstrap.string());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// -loadblock=
|
// -loadblock=
|
||||||
for (const fs::path& path : vImportFiles) {
|
for (const fs::path& path : vImportFiles) {
|
||||||
FILE *file = fsbridge::fopen(path, "rb");
|
FILE *file = fsbridge::fopen(path, "rb");
|
||||||
|
@ -16,8 +16,6 @@
|
|||||||
|
|
||||||
#include <map>
|
#include <map>
|
||||||
|
|
||||||
#include <boost/thread.hpp>
|
|
||||||
|
|
||||||
static uint64_t GetBogoSize(const CScript& scriptPubKey)
|
static uint64_t GetBogoSize(const CScript& scriptPubKey)
|
||||||
{
|
{
|
||||||
return 32 /* txid */ +
|
return 32 /* txid */ +
|
||||||
|
@ -47,8 +47,6 @@
|
|||||||
|
|
||||||
#include <univalue.h>
|
#include <univalue.h>
|
||||||
|
|
||||||
#include <boost/thread/thread.hpp> // boost::thread::interrupt
|
|
||||||
|
|
||||||
#include <mutex>
|
#include <mutex>
|
||||||
#include <condition_variable>
|
#include <condition_variable>
|
||||||
#include <merkleblock.h>
|
#include <merkleblock.h>
|
||||||
|
@ -1620,7 +1620,7 @@ static UniValue listsinceblock(const JSONRPCRequest& request)
|
|||||||
for (const std::pair<const uint256, CWalletTx>& pairWtx : pwallet->mapWallet) {
|
for (const std::pair<const uint256, CWalletTx>& pairWtx : pwallet->mapWallet) {
|
||||||
CWalletTx tx = pairWtx.second;
|
CWalletTx tx = pairWtx.second;
|
||||||
|
|
||||||
if (depth == -1 || tx.GetDepthInMainChain() < depth) {
|
if (depth == -1 || abs(tx.GetDepthInMainChain()) < depth) {
|
||||||
ListTransactions(pwallet, tx, 0, true, transactions, filter, nullptr /* filter_label */);
|
ListTransactions(pwallet, tx, 0, true, transactions, filter, nullptr /* filter_label */);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -10,13 +10,19 @@ from test_framework.messages import COIN
|
|||||||
from test_framework.test_framework import BitcoinTestFramework
|
from test_framework.test_framework import BitcoinTestFramework
|
||||||
from test_framework.util import assert_equal, assert_raises_rpc_error, satoshi_round
|
from test_framework.util import assert_equal, assert_raises_rpc_error, satoshi_round
|
||||||
|
|
||||||
|
# default limits
|
||||||
MAX_ANCESTORS = 25
|
MAX_ANCESTORS = 25
|
||||||
MAX_DESCENDANTS = 25
|
MAX_DESCENDANTS = 25
|
||||||
|
# custom limits for node1
|
||||||
|
MAX_ANCESTORS_CUSTOM = 5
|
||||||
|
|
||||||
class MempoolPackagesTest(BitcoinTestFramework):
|
class MempoolPackagesTest(BitcoinTestFramework):
|
||||||
def set_test_params(self):
|
def set_test_params(self):
|
||||||
self.num_nodes = 2
|
self.num_nodes = 2
|
||||||
self.extra_args = [["-maxorphantxsize=1000"], ["-maxorphantxsize=1000", "-limitancestorcount=5"]]
|
self.extra_args = [
|
||||||
|
["-maxorphantxsize=1000"],
|
||||||
|
["-maxorphantxsize=1000", "-limitancestorcount={}".format(MAX_ANCESTORS_CUSTOM)],
|
||||||
|
]
|
||||||
|
|
||||||
def skip_test_if_missing_module(self):
|
def skip_test_if_missing_module(self):
|
||||||
self.skip_if_no_wallet()
|
self.skip_if_no_wallet()
|
||||||
@ -184,7 +190,14 @@ class MempoolPackagesTest(BitcoinTestFramework):
|
|||||||
assert_equal(mempool[x]['descendantfees'], descendant_fees * COIN + 2000)
|
assert_equal(mempool[x]['descendantfees'], descendant_fees * COIN + 2000)
|
||||||
assert_equal(mempool[x]['fees']['descendant'], descendant_fees+satoshi_round(0.00002))
|
assert_equal(mempool[x]['fees']['descendant'], descendant_fees+satoshi_round(0.00002))
|
||||||
|
|
||||||
# TODO: check that node1's mempool is as expected
|
# Check that node1's mempool is as expected (-> custom ancestor limit)
|
||||||
|
mempool0 = self.nodes[0].getrawmempool(False)
|
||||||
|
mempool1 = self.nodes[1].getrawmempool(False)
|
||||||
|
assert_equal(len(mempool1), MAX_ANCESTORS_CUSTOM)
|
||||||
|
assert set(mempool1).issubset(set(mempool0))
|
||||||
|
for tx in chain[:MAX_ANCESTORS_CUSTOM]:
|
||||||
|
assert tx in mempool1
|
||||||
|
# TODO: more detailed check of node1's mempool (fees etc.)
|
||||||
|
|
||||||
# TODO: test ancestor size limits
|
# TODO: test ancestor size limits
|
||||||
|
|
||||||
|
@ -4,7 +4,6 @@
|
|||||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||||
"""Test node responses to invalid network messages."""
|
"""Test node responses to invalid network messages."""
|
||||||
import asyncio
|
import asyncio
|
||||||
import os
|
|
||||||
import struct
|
import struct
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
@ -66,27 +65,21 @@ class InvalidMessagesTest(BitcoinTestFramework):
|
|||||||
msg_at_size = msg_unrecognized(str_data="b" * valid_data_limit)
|
msg_at_size = msg_unrecognized(str_data="b" * valid_data_limit)
|
||||||
assert len(msg_at_size.serialize()) == msg_limit
|
assert len(msg_at_size.serialize()) == msg_limit
|
||||||
|
|
||||||
increase_allowed = 0.5
|
self.log.info("Sending a bunch of large, junk messages to test memory exhaustion. May take a bit...")
|
||||||
if [s for s in os.environ.get("BITCOIN_CONFIG", "").split(" ") if "--with-sanitizers" in s and "address" in s]:
|
|
||||||
increase_allowed = 3.5
|
|
||||||
with node.assert_memory_usage_stable(increase_allowed=increase_allowed):
|
|
||||||
self.log.info(
|
|
||||||
"Sending a bunch of large, junk messages to test "
|
|
||||||
"memory exhaustion. May take a bit...")
|
|
||||||
|
|
||||||
# Run a bunch of times to test for memory exhaustion.
|
# Run a bunch of times to test for memory exhaustion.
|
||||||
for _ in range(80):
|
for _ in range(80):
|
||||||
node.p2p.send_message(msg_at_size)
|
node.p2p.send_message(msg_at_size)
|
||||||
|
|
||||||
# Check that, even though the node is being hammered by nonsense from one
|
# Check that, even though the node is being hammered by nonsense from one
|
||||||
# connection, it can still service other peers in a timely way.
|
# connection, it can still service other peers in a timely way.
|
||||||
for _ in range(20):
|
for _ in range(20):
|
||||||
conn2.sync_with_ping(timeout=2)
|
conn2.sync_with_ping(timeout=2)
|
||||||
|
|
||||||
# Peer 1, despite serving up a bunch of nonsense, should still be connected.
|
# Peer 1, despite serving up a bunch of nonsense, should still be connected.
|
||||||
self.log.info("Waiting for node to drop junk messages.")
|
self.log.info("Waiting for node to drop junk messages.")
|
||||||
node.p2p.sync_with_ping(timeout=320)
|
node.p2p.sync_with_ping(timeout=320)
|
||||||
assert node.p2p.is_connected
|
assert node.p2p.is_connected
|
||||||
|
|
||||||
#
|
#
|
||||||
# 1.
|
# 1.
|
||||||
|
@ -29,6 +29,9 @@ class RawTransactionsTest(BitcoinTestFramework):
|
|||||||
self.num_nodes = 4
|
self.num_nodes = 4
|
||||||
self.setup_clean_chain = True
|
self.setup_clean_chain = True
|
||||||
self.extra_args = [['-usehd=0']] * self.num_nodes
|
self.extra_args = [['-usehd=0']] * self.num_nodes
|
||||||
|
# This test isn't testing tx relay. Set whitelist on the peers for
|
||||||
|
# instant tx relay.
|
||||||
|
self.extra_args = [['-whitelist=127.0.0.1']] * self.num_nodes
|
||||||
|
|
||||||
def skip_test_if_missing_module(self):
|
def skip_test_if_missing_module(self):
|
||||||
self.skip_if_no_wallet()
|
self.skip_if_no_wallet()
|
||||||
@ -457,8 +460,7 @@ class RawTransactionsTest(BitcoinTestFramework):
|
|||||||
|
|
||||||
# send 12 DASH to msig addr
|
# send 12 DASH to msig addr
|
||||||
self.nodes[0].sendtoaddress(mSigObj, 12)
|
self.nodes[0].sendtoaddress(mSigObj, 12)
|
||||||
self.sync_all()
|
self.nodes[0].generate(1)
|
||||||
self.nodes[1].generate(1)
|
|
||||||
self.sync_all()
|
self.sync_all()
|
||||||
|
|
||||||
oldBalance = self.nodes[1].getbalance()
|
oldBalance = self.nodes[1].getbalance()
|
||||||
@ -469,8 +471,7 @@ class RawTransactionsTest(BitcoinTestFramework):
|
|||||||
|
|
||||||
signedTx = self.nodes[2].signrawtransactionwithwallet(fundedTx['hex'])
|
signedTx = self.nodes[2].signrawtransactionwithwallet(fundedTx['hex'])
|
||||||
self.nodes[2].sendrawtransaction(signedTx['hex'])
|
self.nodes[2].sendrawtransaction(signedTx['hex'])
|
||||||
self.sync_all()
|
self.nodes[2].generate(1)
|
||||||
self.nodes[1].generate(1)
|
|
||||||
self.sync_all()
|
self.sync_all()
|
||||||
|
|
||||||
# Make sure funds are received at node1.
|
# Make sure funds are received at node1.
|
||||||
@ -480,22 +481,6 @@ class RawTransactionsTest(BitcoinTestFramework):
|
|||||||
self.log.info("Test fundrawtxn with locked wallet")
|
self.log.info("Test fundrawtxn with locked wallet")
|
||||||
|
|
||||||
self.nodes[1].encryptwallet("test")
|
self.nodes[1].encryptwallet("test")
|
||||||
self.stop_nodes()
|
|
||||||
|
|
||||||
self.start_nodes()
|
|
||||||
# This test is not meant to test fee estimation and we'd like
|
|
||||||
# to be sure all txns are sent at a consistent desired feerate.
|
|
||||||
for node in self.nodes:
|
|
||||||
node.settxfee(self.min_relay_tx_fee)
|
|
||||||
|
|
||||||
connect_nodes(self.nodes[0], 1)
|
|
||||||
connect_nodes(self.nodes[1], 2)
|
|
||||||
connect_nodes(self.nodes[0], 2)
|
|
||||||
connect_nodes(self.nodes[0], 3)
|
|
||||||
# Again lock the watchonly UTXO or nodes[0] may spend it, because
|
|
||||||
# lockunspent is memory-only and thus lost on restart.
|
|
||||||
self.nodes[0].lockunspent(False, [{"txid": self.watchonly_txid, "vout": self.watchonly_vout}])
|
|
||||||
self.sync_all()
|
|
||||||
|
|
||||||
# Drain the keypool.
|
# Drain the keypool.
|
||||||
self.nodes[1].getnewaddress()
|
self.nodes[1].getnewaddress()
|
||||||
@ -535,8 +520,7 @@ class RawTransactionsTest(BitcoinTestFramework):
|
|||||||
|
|
||||||
# Empty node1, send some small coins from node0 to node1.
|
# Empty node1, send some small coins from node0 to node1.
|
||||||
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
|
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
|
||||||
self.sync_all()
|
self.nodes[1].generate(1)
|
||||||
self.nodes[0].generate(1)
|
|
||||||
self.sync_all()
|
self.sync_all()
|
||||||
|
|
||||||
for i in range(0,20):
|
for i in range(0,20):
|
||||||
@ -564,8 +548,7 @@ class RawTransactionsTest(BitcoinTestFramework):
|
|||||||
|
|
||||||
# Again, empty node1, send some small coins from node0 to node1.
|
# Again, empty node1, send some small coins from node0 to node1.
|
||||||
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
|
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
|
||||||
self.sync_all()
|
self.nodes[1].generate(1)
|
||||||
self.nodes[0].generate(1)
|
|
||||||
self.sync_all()
|
self.sync_all()
|
||||||
|
|
||||||
for i in range(0,20):
|
for i in range(0,20):
|
||||||
@ -582,8 +565,7 @@ class RawTransactionsTest(BitcoinTestFramework):
|
|||||||
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
|
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
|
||||||
fundedAndSignedTx = self.nodes[1].signrawtransactionwithwallet(fundedTx['hex'])
|
fundedAndSignedTx = self.nodes[1].signrawtransactionwithwallet(fundedTx['hex'])
|
||||||
self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex'])
|
self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex'])
|
||||||
self.sync_all()
|
self.nodes[1].generate(1)
|
||||||
self.nodes[0].generate(1)
|
|
||||||
self.sync_all()
|
self.sync_all()
|
||||||
assert_equal(oldBalance+Decimal('500.19000000'), self.nodes[0].getbalance()) #0.19+block reward
|
assert_equal(oldBalance+Decimal('500.19000000'), self.nodes[0].getbalance()) #0.19+block reward
|
||||||
|
|
||||||
|
@ -148,28 +148,6 @@ class TestNode():
|
|||||||
]
|
]
|
||||||
return PRIV_KEYS[self.index]
|
return PRIV_KEYS[self.index]
|
||||||
|
|
||||||
def get_mem_rss_kilobytes(self):
|
|
||||||
"""Get the memory usage (RSS) per `ps`.
|
|
||||||
|
|
||||||
If process is stopped or `ps` is unavailable, return None.
|
|
||||||
"""
|
|
||||||
if not (self.running and self.process):
|
|
||||||
self.log.warning("Couldn't get memory usage; process isn't running.")
|
|
||||||
return None
|
|
||||||
|
|
||||||
try:
|
|
||||||
return int(subprocess.check_output(
|
|
||||||
"ps h -o rss {}".format(self.process.pid),
|
|
||||||
shell=True, stderr=subprocess.DEVNULL).strip())
|
|
||||||
|
|
||||||
# Catching `Exception` broadly to avoid failing on platforms where ps
|
|
||||||
# isn't installed or doesn't work as expected, e.g. OpenBSD.
|
|
||||||
#
|
|
||||||
# We could later use something like `psutils` to work across platforms.
|
|
||||||
except Exception:
|
|
||||||
self.log.exception("Unable to get memory usage")
|
|
||||||
return None
|
|
||||||
|
|
||||||
def _node_msg(self, msg: str) -> str:
|
def _node_msg(self, msg: str) -> str:
|
||||||
"""Return a modified msg that identifies this node by its index as a debugging aid."""
|
"""Return a modified msg that identifies this node by its index as a debugging aid."""
|
||||||
return "[node %d] %s" % (self.index, msg)
|
return "[node %d] %s" % (self.index, msg)
|
||||||
@ -358,33 +336,6 @@ class TestNode():
|
|||||||
time.sleep(0.05)
|
time.sleep(0.05)
|
||||||
self._raise_assertion_error('Expected messages "{}" does not partially match log:\n\n{}\n\n'.format(str(expected_msgs), print_log))
|
self._raise_assertion_error('Expected messages "{}" does not partially match log:\n\n{}\n\n'.format(str(expected_msgs), print_log))
|
||||||
|
|
||||||
@contextlib.contextmanager
|
|
||||||
def assert_memory_usage_stable(self, *, increase_allowed=0.03):
|
|
||||||
"""Context manager that allows the user to assert that a node's memory usage (RSS)
|
|
||||||
hasn't increased beyond some threshold percentage.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
increase_allowed (float): the fractional increase in memory allowed until failure;
|
|
||||||
e.g. `0.12` for up to 12% increase allowed.
|
|
||||||
"""
|
|
||||||
before_memory_usage = self.get_mem_rss_kilobytes()
|
|
||||||
|
|
||||||
yield
|
|
||||||
|
|
||||||
after_memory_usage = self.get_mem_rss_kilobytes()
|
|
||||||
|
|
||||||
if not (before_memory_usage and after_memory_usage):
|
|
||||||
self.log.warning("Unable to detect memory usage (RSS) - skipping memory check.")
|
|
||||||
return
|
|
||||||
|
|
||||||
perc_increase_memory_usage = 1 - (float(before_memory_usage) / after_memory_usage)
|
|
||||||
|
|
||||||
if perc_increase_memory_usage > increase_allowed:
|
|
||||||
self._raise_assertion_error(
|
|
||||||
"Memory usage increased over threshold of {:.3f}% from {} to {} ({:.3f}%)".format(
|
|
||||||
increase_allowed * 100, before_memory_usage, after_memory_usage,
|
|
||||||
perc_increase_memory_usage * 100))
|
|
||||||
|
|
||||||
@contextlib.contextmanager
|
@contextlib.contextmanager
|
||||||
def profile_with_perf(self, profile_name):
|
def profile_with_perf(self, profile_name):
|
||||||
"""
|
"""
|
||||||
|
@ -397,7 +397,8 @@ def main():
|
|||||||
def run_tests(*, test_list, src_dir, build_dir, tmpdir, jobs=1, enable_coverage=False, args=None, combined_logs_len=0,failfast=False, runs_ci=False):
|
def run_tests(*, test_list, src_dir, build_dir, tmpdir, jobs=1, enable_coverage=False, args=None, combined_logs_len=0,failfast=False, runs_ci=False):
|
||||||
args = args or []
|
args = args or []
|
||||||
|
|
||||||
# Warn if dashd is already running (unix only)
|
# Warn if dashd is already running
|
||||||
|
# pidof might fail or return an empty string if bitcoind is not running
|
||||||
try:
|
try:
|
||||||
pidof_output = subprocess.check_output(["pidof", "dashd"])
|
pidof_output = subprocess.check_output(["pidof", "dashd"])
|
||||||
if not (pidof_output is None or pidof_output == b''):
|
if not (pidof_output is None or pidof_output == b''):
|
||||||
|
@ -5,13 +5,17 @@
|
|||||||
"""Test the listsincelast RPC."""
|
"""Test the listsincelast RPC."""
|
||||||
|
|
||||||
from test_framework.test_framework import BitcoinTestFramework
|
from test_framework.test_framework import BitcoinTestFramework
|
||||||
|
from test_framework.messages import BIP125_SEQUENCE_NUMBER
|
||||||
from test_framework.util import (
|
from test_framework.util import (
|
||||||
assert_array_result,
|
assert_array_result,
|
||||||
assert_equal,
|
assert_equal,
|
||||||
assert_raises_rpc_error,
|
assert_raises_rpc_error,
|
||||||
connect_nodes,
|
connect_nodes,
|
||||||
|
isolate_node,
|
||||||
|
reconnect_isolated_node,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
from decimal import Decimal
|
||||||
|
|
||||||
class ListSinceBlockTest(BitcoinTestFramework):
|
class ListSinceBlockTest(BitcoinTestFramework):
|
||||||
def set_test_params(self):
|
def set_test_params(self):
|
||||||
@ -33,6 +37,7 @@ class ListSinceBlockTest(BitcoinTestFramework):
|
|||||||
self.test_reorg()
|
self.test_reorg()
|
||||||
self.test_double_spend()
|
self.test_double_spend()
|
||||||
self.test_double_send()
|
self.test_double_send()
|
||||||
|
self.double_spends_filtered()
|
||||||
|
|
||||||
def test_no_blockhash(self):
|
def test_no_blockhash(self):
|
||||||
txid = self.nodes[2].sendtoaddress(self.nodes[0].getnewaddress(), 1)
|
txid = self.nodes[2].sendtoaddress(self.nodes[0].getnewaddress(), 1)
|
||||||
@ -289,5 +294,65 @@ class ListSinceBlockTest(BitcoinTestFramework):
|
|||||||
if tx['txid'] == txid1:
|
if tx['txid'] == txid1:
|
||||||
assert_equal(tx['confirmations'], 2)
|
assert_equal(tx['confirmations'], 2)
|
||||||
|
|
||||||
|
def double_spends_filtered(self):
|
||||||
|
'''
|
||||||
|
`listsinceblock` was returning conflicted transactions even if they
|
||||||
|
occurred before the specified cutoff blockhash
|
||||||
|
'''
|
||||||
|
spending_node = self.nodes[2]
|
||||||
|
double_spending_node = self.nodes[3]
|
||||||
|
dest_address = spending_node.getnewaddress()
|
||||||
|
|
||||||
|
tx_input = dict(
|
||||||
|
sequence=BIP125_SEQUENCE_NUMBER, **next(u for u in spending_node.listunspent()))
|
||||||
|
rawtx = spending_node.createrawtransaction(
|
||||||
|
[tx_input], {dest_address: tx_input["amount"] - Decimal("0.00051000"),
|
||||||
|
spending_node.getrawchangeaddress(): Decimal("0.00050000")})
|
||||||
|
double_rawtx = spending_node.createrawtransaction(
|
||||||
|
[tx_input], {dest_address: tx_input["amount"] - Decimal("0.00052000"),
|
||||||
|
spending_node.getrawchangeaddress(): Decimal("0.00050000")})
|
||||||
|
|
||||||
|
isolate_node(double_spending_node)
|
||||||
|
|
||||||
|
signedtx = spending_node.signrawtransactionwithwallet(rawtx)
|
||||||
|
orig_tx_id = spending_node.sendrawtransaction(signedtx["hex"])
|
||||||
|
original_tx = spending_node.gettransaction(orig_tx_id)
|
||||||
|
|
||||||
|
double_signedtx = spending_node.signrawtransactionwithwallet(double_rawtx)
|
||||||
|
dbl_tx_id = double_spending_node.sendrawtransaction(double_signedtx["hex"])
|
||||||
|
double_tx = double_spending_node.getrawtransaction(dbl_tx_id, 1)
|
||||||
|
lastblockhash = double_spending_node.generate(1)[0]
|
||||||
|
|
||||||
|
reconnect_isolated_node(double_spending_node, 2)
|
||||||
|
self.sync_all()
|
||||||
|
spending_node.invalidateblock(lastblockhash)
|
||||||
|
|
||||||
|
# check that both transactions exist
|
||||||
|
block_hash = spending_node.listsinceblock(
|
||||||
|
spending_node.getblockhash(spending_node.getblockcount()))
|
||||||
|
original_found = False
|
||||||
|
double_found = False
|
||||||
|
for tx in block_hash['transactions']:
|
||||||
|
if tx['txid'] == original_tx['txid']:
|
||||||
|
original_found = True
|
||||||
|
if tx['txid'] == double_tx['txid']:
|
||||||
|
double_found = True
|
||||||
|
assert_equal(original_found, True)
|
||||||
|
assert_equal(double_found, True)
|
||||||
|
|
||||||
|
lastblockhash = spending_node.generate(1)[0]
|
||||||
|
|
||||||
|
# check that neither transaction exists
|
||||||
|
block_hash = spending_node.listsinceblock(lastblockhash)
|
||||||
|
original_found = False
|
||||||
|
double_found = False
|
||||||
|
for tx in block_hash['transactions']:
|
||||||
|
if tx['txid'] == original_tx['txid']:
|
||||||
|
original_found = True
|
||||||
|
if tx['txid'] == double_tx['txid']:
|
||||||
|
double_found = True
|
||||||
|
assert_equal(original_found, False)
|
||||||
|
assert_equal(double_found, False)
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
ListSinceBlockTest().main()
|
ListSinceBlockTest().main()
|
||||||
|
@ -71,7 +71,6 @@ EXPECTED_BOOST_INCLUDES=(
|
|||||||
boost/test/unit_test.hpp
|
boost/test/unit_test.hpp
|
||||||
boost/thread.hpp
|
boost/thread.hpp
|
||||||
boost/thread/condition_variable.hpp
|
boost/thread/condition_variable.hpp
|
||||||
boost/thread/thread.hpp
|
|
||||||
boost/variant.hpp
|
boost/variant.hpp
|
||||||
boost/variant/apply_visitor.hpp
|
boost/variant/apply_visitor.hpp
|
||||||
boost/variant/static_visitor.hpp
|
boost/variant/static_visitor.hpp
|
||||||
|
Loading…
Reference in New Issue
Block a user