mirror of
https://github.com/dashpay/dash.git
synced 2024-12-24 19:42:46 +01:00
Merge #6352: backport: trivial 2024 10 23 pr9
168e5e4a50
Merge bitcoin/bitcoin#28877: bench: Update nanobench to 4.3.11 (fanquake)417c86b949
Merge bitcoin/bitcoin#28105: doc: Clarify that -fstack-reuse=all bugs exist on all versions of GCC (fanquake)a620cccd81
Merge bitcoin/bitcoin#26970: test: fix immediate tx relay in wallet_groups.py (merge-script)f115d9c27f
Merge bitcoin/bitcoin#27061: doc: Document affected gcc versions for -fstack-reuse=none workaround (fanquake)6889a8db29
Merge bitcoin/bitcoin#27056: doc: use arch agnostic clang path in fuzzing doc (macOS) (MarcoFalke)97858384ec
Merge bitcoin/bitcoin#21995: build: Make dependency package archive timestamps deterministic (fanquake)c4760bb32e
Merge bitcoin/bitcoin#27030: Update nanobench to version v4.3.10 (fanquake)a7e3c2c916
Merge bitcoin-core/gui#705: doc: Fix comment about how wallet txs are sorted (Hennadii Stepanov)44e6c9e902
Merge bitcoin/bitcoin#27004: test: Use std::unique_ptr over manual delete in coins_tests (fanquake)2ab1989a39
Merge bitcoin/bitcoin#27010: refactor: use `Hash` helpers for double-SHA256 calculations (MarcoFalke)c681aaad30
Merge bitcoin/bitcoin#22811: build: Fix depends build system when working with subtargets (fanquake)d1b7386374
Merge bitcoin/bitcoin#26930: fuzz: Actually use mocked mempool in tx_pool target (MarcoFalke)cd53a195a6
Merge bitcoin/bitcoin#26873: doc: add databases/py-sqlite3 to FreeBSD test suite deps (fanquake)8cc5f11a2f
Merge bitcoin/bitcoin#26506: refactor: rpc: use convenience fn to auto parse non-string parameters (MarcoFalke)662302c42b
Merge bitcoin/bitcoin#26805: tests: Use unique port for ZMQ tests to allow for multiple test instances (MarcoFalke)66a3981a7a
Merge bitcoin/bitcoin#24279: build: Make `$(package)_*_env` available to all `$(package)_*_cmds` (fanquake)3261092f85
Merge bitcoin/bitcoin#26520: doc: test: update/fix TestShell example instructions (fanquake)5f78859562
Merge bitcoin/bitcoin#25248: refactor: Add LIFETIMEBOUND / -Wdangling-gsl to Assert() (fanquake)459425776c
Merge bitcoin/bitcoin#26229: test: Use proper Boost macros instead of assertions (MacroFake)3be81a2d4c
Merge bitcoin/bitcoin#25915: test: Fix wallet_balance intermittent issue (Andrew Chow)da1d3f2654
Merge bitcoin/bitcoin#25663: tracing: do not use `coin` after move in `CCoinsViewCache::AddCoin` (MacroFake) Pull request description: ## Issue being fixed or feature implemented Batch of trivial backports ## What was done? See commits ## How Has This Been Tested? built locally; large combined merge passed tests locally ## Breaking Changes Should be none ## Checklist: - [ ] I have performed a self-review of my own code - [ ] I have commented my code, particularly in hard-to-understand areas - [ ] I have added or updated relevant unit/integration/functional/e2e tests - [ ] I have made corresponding changes to the documentation - [x] I have assigned this pull request to a milestone _(for repository code-owners and collaborators only)_ ACKs for top commit: UdjinM6: utACK168e5e4a50
Tree-SHA512: 3099e09bc500a86bffafea0db136e3213f69b69e7af74304c171780e56ff1ff4c973a228962cf80aec62158ded19365d6f8506ef202a15751a43851574f082e2
This commit is contained in:
commit
e43d75bca2
@ -963,7 +963,8 @@ if test x$TARGET_OS != xwindows; then
|
||||
AX_CHECK_COMPILE_FLAG([-fPIC],[PIC_FLAGS="-fPIC"])
|
||||
fi
|
||||
|
||||
dnl All versions of gcc that we commonly use for building are subject to bug
|
||||
dnl Currently all versions of gcc are subject to a class of bugs, see the
|
||||
dnl gccbug_90348 test case (only reproduces on GCC 11 and earlier) and the related bugs of
|
||||
dnl https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90348. To work around that, set
|
||||
dnl -fstack-reuse=none for all gcc builds. (Only gcc understands this flag)
|
||||
AX_CHECK_COMPILE_FLAG([-fstack-reuse=none],[HARDENED_CXXFLAGS="$HARDENED_CXXFLAGS -fstack-reuse=none"])
|
||||
|
@ -76,7 +76,7 @@ $(1)_extracted=$$($(1)_extract_dir)/.stamp_extracted
|
||||
$(1)_preprocessed=$$($(1)_extract_dir)/.stamp_preprocessed
|
||||
$(1)_cleaned=$$($(1)_extract_dir)/.stamp_cleaned
|
||||
$(1)_built=$$($(1)_build_dir)/.stamp_built
|
||||
$(1)_configured=$$($(1)_build_dir)/.stamp_configured
|
||||
$(1)_configured=$(host_prefix)/.$(1)_stamp_configured
|
||||
$(1)_staged=$$($(1)_staging_dir)/.stamp_staged
|
||||
$(1)_postprocessed=$$($(1)_staging_prefix_dir)/.stamp_postprocessed
|
||||
$(1)_download_path_fixed=$(subst :,\:,$$($(1)_download_path))
|
||||
@ -87,9 +87,9 @@ $(1)_download_path_fixed=$(subst :,\:,$$($(1)_download_path))
|
||||
$(1)_fetch_cmds ?= $(call fetch_file,$(1),$(subst \:,:,$$($(1)_download_path_fixed)),$$($(1)_download_file),$($(1)_file_name),$($(1)_sha256_hash))
|
||||
$(1)_extract_cmds ?= mkdir -p $$($(1)_extract_dir) && echo "$$($(1)_sha256_hash) $$($(1)_source)" > $$($(1)_extract_dir)/.$$($(1)_file_name).hash && $(build_SHA256SUM) -c $$($(1)_extract_dir)/.$$($(1)_file_name).hash && $(build_TAR) --no-same-owner --strip-components=1 -xf $$($(1)_source)
|
||||
$(1)_preprocess_cmds ?= true
|
||||
$(1)_build_cmds ?=
|
||||
$(1)_config_cmds ?=
|
||||
$(1)_stage_cmds ?=
|
||||
$(1)_build_cmds ?= true
|
||||
$(1)_config_cmds ?= true
|
||||
$(1)_stage_cmds ?= true
|
||||
$(1)_set_vars ?=
|
||||
|
||||
|
||||
@ -137,6 +137,7 @@ $(1)_config_env+=$($(1)_config_env_$(host_arch)_$(host_os)) $($(1)_config_env_$(
|
||||
|
||||
$(1)_config_env+=PKG_CONFIG_LIBDIR=$($($(1)_type)_prefix)/lib/pkgconfig
|
||||
$(1)_config_env+=PKG_CONFIG_PATH=$($($(1)_type)_prefix)/share/pkgconfig
|
||||
$(1)_config_env+=PKG_CONFIG_SYSROOT_DIR=/
|
||||
$(1)_config_env+=CMAKE_MODULE_PATH=$($($(1)_type)_prefix)/lib/cmake
|
||||
$(1)_config_env+=PATH=$(build_prefix)/bin:$(PATH)
|
||||
$(1)_build_env+=PATH=$(build_prefix)/bin:$(PATH)
|
||||
@ -220,18 +221,18 @@ $($(1)_preprocessed): | $($(1)_extracted)
|
||||
$($(1)_configured): | $($(1)_dependencies) $($(1)_preprocessed)
|
||||
echo Configuring $(1)...
|
||||
rm -rf $(host_prefix); mkdir -p $(host_prefix)/lib; cd $(host_prefix); $(foreach package,$($(1)_all_dependencies), $(build_TAR) --no-same-owner -xf $($(package)_cached); )
|
||||
mkdir -p $$(@D)
|
||||
+{ cd $$(@D); $($(1)_config_env) $($(1)_config_cmds); } $$($(1)_logging)
|
||||
mkdir -p $$($(1)_build_dir)
|
||||
+{ cd $$($(1)_build_dir); export $($(1)_config_env); $($(1)_config_cmds); } $$($(1)_logging)
|
||||
touch $$@
|
||||
$($(1)_built): | $($(1)_configured)
|
||||
echo Building $(1)...
|
||||
mkdir -p $$(@D)
|
||||
+{ cd $$(@D); $($(1)_build_env) $($(1)_build_cmds); } $$($(1)_logging)
|
||||
+{ cd $$(@D); export $($(1)_build_env); $($(1)_build_cmds); } $$($(1)_logging)
|
||||
touch $$@
|
||||
$($(1)_staged): | $($(1)_built)
|
||||
echo Staging $(1)...
|
||||
mkdir -p $($(1)_staging_dir)/$(host_prefix)
|
||||
+{ cd $($(1)_build_dir); $($(1)_stage_env) $($(1)_stage_cmds); } $$($(1)_logging)
|
||||
+{ cd $($(1)_build_dir); export $($(1)_stage_env); $($(1)_stage_cmds); } $$($(1)_logging)
|
||||
rm -rf $($(1)_extract_dir)
|
||||
touch $$@
|
||||
$($(1)_postprocessed): | $($(1)_staged)
|
||||
@ -240,7 +241,9 @@ $($(1)_postprocessed): | $($(1)_staged)
|
||||
touch $$@
|
||||
$($(1)_cached): | $($(1)_dependencies) $($(1)_postprocessed)
|
||||
echo Caching $(1)...
|
||||
cd $$($(1)_staging_dir)/$(host_prefix); find . | sort | $(build_TAR) --no-recursion -czf $$($(1)_staging_dir)/$$(@F) -T -
|
||||
cd $$($(1)_staging_dir)/$(host_prefix); \
|
||||
find . ! -name '.stamp_postprocessed' -print0 | TZ=UTC xargs -0r touch -h -m -t 200001011200; \
|
||||
find . ! -name '.stamp_postprocessed' | LC_ALL=C sort | $(build_TAR) --numeric-owner --no-recursion -czf $$($(1)_staging_dir)/$$(@F) -T -
|
||||
mkdir -p $$(@D)
|
||||
rm -rf $$(@D) && mkdir -p $$(@D)
|
||||
mv $$($(1)_staging_dir)/$$(@F) $$(@)
|
||||
|
@ -36,6 +36,7 @@ $(package)_extra_sources = $($(package)_qttranslations_file_name)
|
||||
$(package)_extra_sources += $($(package)_qttools_file_name)
|
||||
|
||||
define $(package)_set_vars
|
||||
$(package)_config_env = QT_MAC_SDK_NO_VERSION_CHECK=1
|
||||
$(package)_config_opts_release = -release
|
||||
$(package)_config_opts_release += -silent
|
||||
$(package)_config_opts_debug = -debug
|
||||
@ -277,9 +278,6 @@ define $(package)_preprocess_cmds
|
||||
endef
|
||||
|
||||
define $(package)_config_cmds
|
||||
export PKG_CONFIG_SYSROOT_DIR=/ && \
|
||||
export PKG_CONFIG_LIBDIR=$(host_prefix)/lib/pkgconfig && \
|
||||
export QT_MAC_SDK_NO_VERSION_CHECK=1 && \
|
||||
cd qtbase && \
|
||||
./configure -top-level $($(package)_config_opts)
|
||||
endef
|
||||
|
@ -96,7 +96,7 @@ There is an included test suite that is useful for testing code changes when dev
|
||||
To run the test suite (recommended), you will need to have Python 3 installed:
|
||||
|
||||
```bash
|
||||
pkg install python3
|
||||
pkg install python3 databases/py-sqlite3
|
||||
```
|
||||
---
|
||||
|
||||
|
@ -129,10 +129,10 @@ You may also need to take care of giving the correct path for `clang` and
|
||||
`clang++`, like `CC=/path/to/clang CXX=/path/to/clang++` if the non-systems
|
||||
`clang` does not come first in your path.
|
||||
|
||||
Full configure that was tested on macOS Catalina with `brew` installed `llvm`:
|
||||
Full configure that was tested on macOS with `brew` installed `llvm`:
|
||||
|
||||
```sh
|
||||
./configure --enable-fuzz --with-sanitizers=fuzzer,address,undefined CC=/usr/local/opt/llvm/bin/clang CXX=/usr/local/opt/llvm/bin/clang++ --disable-asm
|
||||
./configure --enable-fuzz --with-sanitizers=fuzzer,address,undefined --disable-asm CC=$(brew --prefix llvm)/bin/clang CXX=$(brew --prefix llvm)/bin/clang++
|
||||
```
|
||||
|
||||
Read the [libFuzzer documentation](https://llvm.org/docs/LibFuzzer.html) for more information. This [libFuzzer tutorial](https://github.com/google/fuzzing/blob/master/tutorial/libFuzzerTutorial.md) might also be of interest.
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -259,21 +259,10 @@ bool BlockFilter::BuildParams(GCSFilter::Params& params) const
|
||||
|
||||
uint256 BlockFilter::GetHash() const
|
||||
{
|
||||
const std::vector<unsigned char>& data = GetEncodedFilter();
|
||||
|
||||
uint256 result;
|
||||
CHash256().Write(data).Finalize(result);
|
||||
return result;
|
||||
return Hash(GetEncodedFilter());
|
||||
}
|
||||
|
||||
uint256 BlockFilter::ComputeHeader(const uint256& prev_header) const
|
||||
{
|
||||
const uint256& filter_hash = GetHash();
|
||||
|
||||
uint256 result;
|
||||
CHash256()
|
||||
.Write(filter_hash)
|
||||
.Write(prev_header)
|
||||
.Finalize(result);
|
||||
return result;
|
||||
return Hash(GetHash(), prev_header);
|
||||
}
|
||||
|
@ -102,9 +102,9 @@ void CCoinsViewCache::AddCoin(const COutPoint &outpoint, Coin&& coin, bool possi
|
||||
TRACE5(utxocache, add,
|
||||
outpoint.hash.data(),
|
||||
(uint32_t)outpoint.n,
|
||||
(uint32_t)coin.nHeight,
|
||||
(int64_t)coin.out.nValue,
|
||||
(bool)coin.IsCoinBase());
|
||||
(uint32_t)it->second.coin.nHeight,
|
||||
(int64_t)it->second.coin.out.nValue,
|
||||
(bool)it->second.coin.IsCoinBase());
|
||||
}
|
||||
|
||||
void CCoinsViewCache::EmplaceCoinInternalDANGER(COutPoint&& outpoint, Coin&& coin) {
|
||||
|
@ -159,9 +159,7 @@ bool BlockFilterIndex::ReadFilterFromDisk(const FlatFilePos& pos, const uint256&
|
||||
std::vector<uint8_t> encoded_filter;
|
||||
try {
|
||||
filein >> block_hash >> encoded_filter;
|
||||
uint256 result;
|
||||
CHash256().Write(encoded_filter).Finalize(result);
|
||||
if (result != hash) return error("Checksum mismatch in filter decode.");
|
||||
if (Hash(encoded_filter) != hash) return error("Checksum mismatch in filter decode.");
|
||||
filter = BlockFilter(GetFilterType(), block_hash, std::move(encoded_filter), /*skip_decode_check=*/true);
|
||||
}
|
||||
catch (const std::exception& e) {
|
||||
|
@ -243,8 +243,7 @@ bool CKey::VerifyPubKey(const CPubKey& pubkey) const {
|
||||
unsigned char rnd[8];
|
||||
std::string str = "Bitcoin key verification\n";
|
||||
GetRandBytes(rnd);
|
||||
uint256 hash;
|
||||
CHash256().Write(MakeUCharSpan(str)).Write(rnd).Finalize(hash);
|
||||
uint256 hash{Hash(str, rnd)};
|
||||
std::vector<unsigned char> vchSig;
|
||||
Sign(hash, vchSig);
|
||||
return pubkey.Verify(hash, vchSig);
|
||||
|
@ -91,10 +91,7 @@ public:
|
||||
|
||||
TransactionTableModel *parent;
|
||||
|
||||
/* Local cache of wallet.
|
||||
* As it is in the same order as the CWallet, by definition
|
||||
* this is sorted by sha256.
|
||||
*/
|
||||
//! Local cache of wallet sorted by transaction hash
|
||||
QList<TransactionRecord> cachedWallet;
|
||||
|
||||
/** True when model finishes loading all wallet transactions on start */
|
||||
|
@ -251,11 +251,16 @@ private:
|
||||
public:
|
||||
CRPCConvertTable();
|
||||
|
||||
bool convert(const std::string& method, int idx) {
|
||||
return (members.count(std::make_pair(method, idx)) > 0);
|
||||
/** Return arg_value as UniValue, and first parse it if it is a non-string parameter */
|
||||
UniValue ArgToUniValue(const std::string& arg_value, const std::string& method, int param_idx)
|
||||
{
|
||||
return members.count(std::make_pair(method, param_idx)) > 0 ? ParseNonRFCJSONValue(arg_value) : arg_value;
|
||||
}
|
||||
bool convert(const std::string& method, const std::string& name) {
|
||||
return (membersByName.count(std::make_pair(method, name)) > 0);
|
||||
|
||||
/** Return arg_value as UniValue, and first parse it if it is a non-string parameter */
|
||||
UniValue ArgToUniValue(const std::string& arg_value, const std::string& method, const std::string& param_name)
|
||||
{
|
||||
return membersByName.count(std::make_pair(method, param_name)) > 0 ? ParseNonRFCJSONValue(arg_value) : arg_value;
|
||||
}
|
||||
};
|
||||
|
||||
@ -287,14 +292,7 @@ UniValue RPCConvertValues(const std::string &strMethod, const std::vector<std::s
|
||||
|
||||
for (unsigned int idx = 0; idx < strParams.size(); idx++) {
|
||||
const std::string& strVal = strParams[idx];
|
||||
|
||||
if (!rpcCvtTable.convert(strMethod, idx)) {
|
||||
// insert string value directly
|
||||
params.push_back(strVal);
|
||||
} else {
|
||||
// parse string as JSON, insert bool/number/object/etc. value
|
||||
params.push_back(ParseNonRFCJSONValue(strVal));
|
||||
}
|
||||
params.push_back(rpcCvtTable.ArgToUniValue(strVal, strMethod, idx));
|
||||
}
|
||||
|
||||
return params;
|
||||
@ -308,7 +306,7 @@ UniValue RPCConvertNamedValues(const std::string &strMethod, const std::vector<s
|
||||
for (const std::string &s: strParams) {
|
||||
size_t pos = s.find('=');
|
||||
if (pos == std::string::npos) {
|
||||
positional_args.push_back(rpcCvtTable.convert(strMethod, positional_args.size()) ? ParseNonRFCJSONValue(s) : s);
|
||||
positional_args.push_back(rpcCvtTable.ArgToUniValue(s, strMethod, positional_args.size()));
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -318,13 +316,7 @@ UniValue RPCConvertNamedValues(const std::string &strMethod, const std::vector<s
|
||||
// Intentionally overwrite earlier named values with later ones as a
|
||||
// convenience for scripts and command line users that want to merge
|
||||
// options.
|
||||
if (!rpcCvtTable.convert(strMethod, name)) {
|
||||
// insert string value directly
|
||||
params.pushKV(name, value);
|
||||
} else {
|
||||
// parse string as JSON, insert bool/number/object/etc. value
|
||||
params.pushKV(name, ParseNonRFCJSONValue(value));
|
||||
}
|
||||
params.pushKV(name, rpcCvtTable.ArgToUniValue(value, strMethod, name));
|
||||
}
|
||||
|
||||
if (!positional_args.empty()) {
|
||||
|
@ -27,7 +27,7 @@ BOOST_AUTO_TEST_CASE(file)
|
||||
" { \"version\": 1, \"ban_created\": 0, \"banned_until\": 778, \"address\": \"1.0.0.0/8\" }"
|
||||
"] }",
|
||||
};
|
||||
assert(WriteBinaryFile(banlist_path + ".json", entries_write));
|
||||
BOOST_REQUIRE(WriteBinaryFile(banlist_path + ".json", entries_write));
|
||||
{
|
||||
// The invalid entries will be dropped, but the valid one remains
|
||||
ASSERT_DEBUG_LOG("Dropping entry with unparseable address or subnet (aaaaaaaaa) from ban list");
|
||||
@ -35,7 +35,7 @@ BOOST_AUTO_TEST_CASE(file)
|
||||
BanMan banman{banlist_path, /*client_interface=*/nullptr, /*default_ban_time=*/0};
|
||||
banmap_t entries_read;
|
||||
banman.GetBanned(entries_read);
|
||||
assert(entries_read.size() == 1);
|
||||
BOOST_CHECK_EQUAL(entries_read.size(), 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -132,8 +132,8 @@ void SimulationTest(CCoinsView* base, bool fake_best_block)
|
||||
std::map<COutPoint, Coin> result;
|
||||
|
||||
// The cache stack.
|
||||
std::vector<CCoinsViewCacheTest*> stack; // A stack of CCoinsViewCaches on top.
|
||||
stack.push_back(new CCoinsViewCacheTest(base)); // Start with one cache.
|
||||
std::vector<std::unique_ptr<CCoinsViewCacheTest>> stack; // A stack of CCoinsViewCaches on top.
|
||||
stack.push_back(std::make_unique<CCoinsViewCacheTest>(base)); // Start with one cache.
|
||||
|
||||
// Use a limited set of random transaction ids, so we do test overwriting entries.
|
||||
std::vector<uint256> txids;
|
||||
@ -219,7 +219,7 @@ void SimulationTest(CCoinsView* base, bool fake_best_block)
|
||||
found_an_entry = true;
|
||||
}
|
||||
}
|
||||
for (const CCoinsViewCacheTest *test : stack) {
|
||||
for (const auto& test : stack) {
|
||||
test->SelfTest();
|
||||
}
|
||||
}
|
||||
@ -242,18 +242,17 @@ void SimulationTest(CCoinsView* base, bool fake_best_block)
|
||||
bool should_erase = InsecureRandRange(4) < 3;
|
||||
BOOST_CHECK(should_erase ? stack.back()->Flush() : stack.back()->Sync());
|
||||
flushed_without_erase |= !should_erase;
|
||||
delete stack.back();
|
||||
stack.pop_back();
|
||||
}
|
||||
if (stack.size() == 0 || (stack.size() < 4 && InsecureRandBool())) {
|
||||
//Add a new cache
|
||||
CCoinsView* tip = base;
|
||||
if (stack.size() > 0) {
|
||||
tip = stack.back();
|
||||
tip = stack.back().get();
|
||||
} else {
|
||||
removed_all_caches = true;
|
||||
}
|
||||
stack.push_back(new CCoinsViewCacheTest(tip));
|
||||
stack.push_back(std::make_unique<CCoinsViewCacheTest>(tip));
|
||||
if (stack.size() == 4) {
|
||||
reached_4_caches = true;
|
||||
}
|
||||
@ -261,12 +260,6 @@ void SimulationTest(CCoinsView* base, bool fake_best_block)
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up the stack.
|
||||
while (stack.size() > 0) {
|
||||
delete stack.back();
|
||||
stack.pop_back();
|
||||
}
|
||||
|
||||
// Verify coverage.
|
||||
BOOST_CHECK(removed_all_caches);
|
||||
BOOST_CHECK(reached_4_caches);
|
||||
@ -322,8 +315,8 @@ BOOST_AUTO_TEST_CASE(updatecoins_simulation_test)
|
||||
|
||||
// The cache stack.
|
||||
CCoinsViewTest base; // A CCoinsViewTest at the bottom.
|
||||
std::vector<CCoinsViewCacheTest*> stack; // A stack of CCoinsViewCaches on top.
|
||||
stack.push_back(new CCoinsViewCacheTest(&base)); // Start with one cache.
|
||||
std::vector<std::unique_ptr<CCoinsViewCacheTest>> stack; // A stack of CCoinsViewCaches on top.
|
||||
stack.push_back(std::make_unique<CCoinsViewCacheTest>(&base)); // Start with one cache.
|
||||
|
||||
// Track the txids we've used in various sets
|
||||
std::set<COutPoint> coinbase_coins;
|
||||
@ -488,25 +481,18 @@ BOOST_AUTO_TEST_CASE(updatecoins_simulation_test)
|
||||
// Every 100 iterations, change the cache stack.
|
||||
if (stack.size() > 0 && InsecureRandBool() == 0) {
|
||||
BOOST_CHECK(stack.back()->Flush());
|
||||
delete stack.back();
|
||||
stack.pop_back();
|
||||
}
|
||||
if (stack.size() == 0 || (stack.size() < 4 && InsecureRandBool())) {
|
||||
CCoinsView* tip = &base;
|
||||
if (stack.size() > 0) {
|
||||
tip = stack.back();
|
||||
tip = stack.back().get();
|
||||
}
|
||||
stack.push_back(new CCoinsViewCacheTest(tip));
|
||||
stack.push_back(std::make_unique<CCoinsViewCacheTest>(tip));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up the stack.
|
||||
while (stack.size() > 0) {
|
||||
delete stack.back();
|
||||
stack.pop_back();
|
||||
}
|
||||
|
||||
// Verify coverage.
|
||||
BOOST_CHECK(spent_a_duplicate_coinbase);
|
||||
|
||||
@ -920,7 +906,7 @@ Coin MakeCoin()
|
||||
void TestFlushBehavior(
|
||||
CCoinsViewCacheTest* view,
|
||||
CCoinsViewDB& base,
|
||||
std::vector<CCoinsViewCacheTest*>& all_caches,
|
||||
std::vector<std::unique_ptr<CCoinsViewCacheTest>>& all_caches,
|
||||
bool do_erasing_flush)
|
||||
{
|
||||
CAmount value;
|
||||
@ -931,7 +917,7 @@ void TestFlushBehavior(
|
||||
auto flush_all = [&all_caches](bool erase) {
|
||||
// Flush in reverse order to ensure that flushes happen from children up.
|
||||
for (auto i = all_caches.rbegin(); i != all_caches.rend(); ++i) {
|
||||
auto cache = *i;
|
||||
auto& cache = *i;
|
||||
// hashBlock must be filled before flushing to disk; value is
|
||||
// unimportant here. This is normally done during connect/disconnect block.
|
||||
cache->SetBestBlock(InsecureRand256());
|
||||
@ -1087,19 +1073,13 @@ BOOST_AUTO_TEST_CASE(ccoins_flush_behavior)
|
||||
{
|
||||
// Create two in-memory caches atop a leveldb view.
|
||||
CCoinsViewDB base{"test", /*nCacheSize=*/ 1 << 23, /*fMemory=*/ true, /*fWipe=*/ false};
|
||||
std::vector<CCoinsViewCacheTest*> caches;
|
||||
caches.push_back(new CCoinsViewCacheTest(&base));
|
||||
caches.push_back(new CCoinsViewCacheTest(caches.back()));
|
||||
std::vector<std::unique_ptr<CCoinsViewCacheTest>> caches;
|
||||
caches.push_back(std::make_unique<CCoinsViewCacheTest>(&base));
|
||||
caches.push_back(std::make_unique<CCoinsViewCacheTest>(caches.back().get()));
|
||||
|
||||
for (CCoinsViewCacheTest* view : caches) {
|
||||
TestFlushBehavior(view, base, caches, /*do_erasing_flush=*/ false);
|
||||
TestFlushBehavior(view, base, caches, /*do_erasing_flush=*/ true);
|
||||
}
|
||||
|
||||
// Clean up the caches.
|
||||
while (caches.size() > 0) {
|
||||
delete caches.back();
|
||||
caches.pop_back();
|
||||
for (const auto& view : caches) {
|
||||
TestFlushBehavior(view.get(), base, caches, /*do_erasing_flush=*/false);
|
||||
TestFlushBehavior(view.get(), base, caches, /*do_erasing_flush=*/true);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -297,7 +297,7 @@ FUZZ_TARGET_INIT(tx_pool, initialize_tx_pool)
|
||||
{
|
||||
FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size());
|
||||
const auto& node = g_setup->m_node;
|
||||
auto& chainstate = node.chainman->ActiveChainstate();
|
||||
auto& chainstate{static_cast<DummyChainState&>(node.chainman->ActiveChainstate())};
|
||||
|
||||
MockTime(fuzzed_data_provider, chainstate);
|
||||
SetMempoolConstraints(*node.args, fuzzed_data_provider);
|
||||
@ -315,6 +315,8 @@ FUZZ_TARGET_INIT(tx_pool, initialize_tx_pool)
|
||||
CTxMemPool tx_pool_{/* estimator */ nullptr, /* check_ratio */ 1};
|
||||
MockedTxPool& tx_pool = *static_cast<MockedTxPool*>(&tx_pool_);
|
||||
|
||||
chainstate.SetMempool(&tx_pool);
|
||||
|
||||
LIMITED_WHILE(fuzzed_data_provider.ConsumeBool(), 300)
|
||||
{
|
||||
const auto mut_tx = ConsumeTransaction(fuzzed_data_provider, txids);
|
||||
|
@ -205,8 +205,7 @@ BOOST_AUTO_TEST_CASE(key_key_negation)
|
||||
unsigned char rnd[8];
|
||||
std::string str = "Bitcoin key verification\n";
|
||||
GetRandBytes(rnd);
|
||||
uint256 hash;
|
||||
CHash256().Write(MakeUCharSpan(str)).Write(rnd).Finalize(hash);
|
||||
uint256 hash{Hash(str, rnd)};
|
||||
|
||||
// import the static test key
|
||||
CKey key = DecodeSecret(strSecret1C);
|
||||
|
@ -60,7 +60,7 @@ static void MerkleComputation(const std::vector<uint256>& leaves, uint256* proot
|
||||
}
|
||||
}
|
||||
mutated |= (inner[level] == h);
|
||||
CHash256().Write(inner[level]).Write(h).Finalize(h);
|
||||
h = Hash(inner[level], h);
|
||||
}
|
||||
// Store the resulting hash at inner position level.
|
||||
inner[level] = h;
|
||||
@ -86,7 +86,7 @@ static void MerkleComputation(const std::vector<uint256>& leaves, uint256* proot
|
||||
if (pbranch && matchh) {
|
||||
pbranch->push_back(h);
|
||||
}
|
||||
CHash256().Write(h).Write(h).Finalize(h);
|
||||
h = Hash(h, h);
|
||||
// Increment count to the value it would have if two entries at this
|
||||
// level had existed.
|
||||
count += (((uint32_t)1) << level);
|
||||
@ -101,7 +101,7 @@ static void MerkleComputation(const std::vector<uint256>& leaves, uint256* proot
|
||||
matchh = true;
|
||||
}
|
||||
}
|
||||
CHash256().Write(inner[level]).Write(h).Finalize(h);
|
||||
h = Hash(inner[level], h);
|
||||
level++;
|
||||
}
|
||||
}
|
||||
|
@ -123,6 +123,11 @@ BOOST_AUTO_TEST_CASE(util_check)
|
||||
|
||||
// Check nested Asserts
|
||||
BOOST_CHECK_EQUAL(Assert((Assert(x).test() ? 3 : 0)), 3);
|
||||
|
||||
// Check -Wdangling-gsl does not trigger when copying the int. (It would
|
||||
// trigger on "const int&")
|
||||
const int nine{*Assert(std::optional<int>{9})};
|
||||
BOOST_CHECK_EQUAL(9, nine);
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(util_criticalsection)
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include <config/bitcoin-config.h>
|
||||
#endif
|
||||
|
||||
#include <attributes.h>
|
||||
#include <tinyformat.h>
|
||||
|
||||
#include <stdexcept>
|
||||
@ -24,7 +25,7 @@ class NonFatalCheckError : public std::runtime_error
|
||||
|
||||
/** Helper for CHECK_NONFATAL() */
|
||||
template <typename T>
|
||||
T&& inline_check_non_fatal(T&& val, const char* file, int line, const char* func, const char* assertion)
|
||||
T&& inline_check_non_fatal(LIFETIMEBOUND T&& val, const char* file, int line, const char* func, const char* assertion)
|
||||
{
|
||||
if (!(val)) {
|
||||
throw NonFatalCheckError(
|
||||
@ -56,7 +57,7 @@ void assertion_fail(const char* file, int line, const char* func, const char* as
|
||||
|
||||
/** Helper for Assert()/Assume() */
|
||||
template <bool IS_ASSERT, typename T>
|
||||
T&& inline_assertion_check(T&& val, [[maybe_unused]] const char* file, [[maybe_unused]] int line, [[maybe_unused]] const char* func, [[maybe_unused]] const char* assertion)
|
||||
T&& inline_assertion_check(LIFETIMEBOUND T&& val, [[maybe_unused]] const char* file, [[maybe_unused]] int line, [[maybe_unused]] const char* func, [[maybe_unused]] const char* assertion)
|
||||
{
|
||||
if constexpr (IS_ASSERT
|
||||
#ifdef ABORT_ON_FAILED_ASSUME
|
||||
|
@ -16,6 +16,7 @@ from test_framework.messages import (
|
||||
from test_framework.util import (
|
||||
assert_equal,
|
||||
assert_raises_rpc_error,
|
||||
p2p_port,
|
||||
)
|
||||
from test_framework.netutil import test_ipv6_local
|
||||
from time import sleep
|
||||
@ -102,6 +103,7 @@ class ZMQTest (BitcoinTestFramework):
|
||||
# This test isn't testing txn relay/timing, so set whitelist on the
|
||||
# peers for instant txn relay. This speeds up the test run time 2-3x.
|
||||
self.extra_args = [["-whitelist=noban@127.0.0.1"]] * self.num_nodes
|
||||
self.zmq_port_base = p2p_port(self.num_nodes + 1)
|
||||
|
||||
def skip_test_if_missing_module(self):
|
||||
self.skip_if_no_py3_zmq()
|
||||
@ -177,7 +179,7 @@ class ZMQTest (BitcoinTestFramework):
|
||||
self.restart_node(0, ["-zmqpubrawtx=foo", "-zmqpubhashtx=bar"])
|
||||
self.zmq_context = zmq.Context()
|
||||
|
||||
address = 'tcp://127.0.0.1:28332'
|
||||
address = f"tcp://127.0.0.1:{self.zmq_port_base}"
|
||||
subs = self.setup_zmq_test([(topic, address) for topic in ["hashblock", "hashtx", "rawblock", "rawtx"]])
|
||||
|
||||
hashblock = subs[0]
|
||||
@ -248,7 +250,7 @@ class ZMQTest (BitcoinTestFramework):
|
||||
self.log.info("Skipping reorg test because wallet is disabled")
|
||||
return
|
||||
|
||||
address = 'tcp://127.0.0.1:28333'
|
||||
address = f"tcp://127.0.0.1:{self.zmq_port_base}"
|
||||
|
||||
# Should only notify the tip if a reorg occurs
|
||||
hashblock, hashtx = self.setup_zmq_test(
|
||||
@ -302,7 +304,7 @@ class ZMQTest (BitcoinTestFramework):
|
||||
<32-byte hash>A<8-byte LE uint> : Transactionhash added mempool
|
||||
"""
|
||||
self.log.info("Testing 'sequence' publisher")
|
||||
[seq] = self.setup_zmq_test([("sequence", "tcp://127.0.0.1:28333")])
|
||||
[seq] = self.setup_zmq_test([("sequence", f"tcp://127.0.0.1:{self.zmq_port_base}")])
|
||||
self.disconnect_nodes(0, 1)
|
||||
|
||||
# Mempool sequence number starts at 1
|
||||
@ -435,7 +437,7 @@ class ZMQTest (BitcoinTestFramework):
|
||||
return
|
||||
|
||||
self.log.info("Testing 'mempool sync' usage of sequence notifier")
|
||||
[seq] = self.setup_zmq_test([("sequence", "tcp://127.0.0.1:28333")])
|
||||
[seq] = self.setup_zmq_test([("sequence", f"tcp://127.0.0.1:{self.zmq_port_base}")])
|
||||
|
||||
# In-memory counter, should always start at 1
|
||||
next_mempool_seq = self.nodes[0].getrawmempool(mempool_sequence=True)["mempool_sequence"]
|
||||
@ -537,8 +539,8 @@ class ZMQTest (BitcoinTestFramework):
|
||||
# chain lengths on node0 and node1; for this test we only need node0, so
|
||||
# we can disable syncing blocks on the setup)
|
||||
subscribers = self.setup_zmq_test([
|
||||
("hashblock", "tcp://127.0.0.1:28334"),
|
||||
("hashblock", "tcp://127.0.0.1:28335"),
|
||||
("hashblock", f"tcp://127.0.0.1:{self.zmq_port_base + 1}"),
|
||||
("hashblock", f"tcp://127.0.0.1:{self.zmq_port_base + 2}"),
|
||||
], sync_blocks=False)
|
||||
|
||||
# Generate 1 block in nodes[0] and receive all notifications
|
||||
@ -555,7 +557,7 @@ class ZMQTest (BitcoinTestFramework):
|
||||
self.log.info("Testing IPv6")
|
||||
# Set up subscriber using IPv6 loopback address
|
||||
subscribers = self.setup_zmq_test([
|
||||
("hashblock", "tcp://[::1]:28332")
|
||||
("hashblock", f"tcp://[::1]:{self.zmq_port_base}")
|
||||
], ipv6=True)
|
||||
|
||||
# Generate 1 block in nodes[0]
|
||||
|
@ -93,8 +93,10 @@ We now let the first node generate 101 regtest blocks, and direct the coinbase
|
||||
rewards to a wallet address owned by the mining node.
|
||||
|
||||
```
|
||||
>>> test.nodes[0].createwallet('default')
|
||||
{'name': 'default', 'warning': 'Empty string given as passphrase, wallet will not be encrypted.'}
|
||||
>>> address = test.nodes[0].getnewaddress()
|
||||
>>> test.self.generatetoaddress(nodes[0], 101, address)
|
||||
>>> test.generatetoaddress(test.nodes[0], 101, address)
|
||||
['2b98dd0044aae6f1cca7f88a0acf366a4bfe053c7f7b00da3c0d115f03d67efb', ...
|
||||
```
|
||||
Since the two nodes are both initialized by default to establish an outbound
|
||||
|
@ -266,7 +266,6 @@ class WalletTest(BitcoinTestFramework):
|
||||
self.nodes[1].invalidateblock(block_reorg)
|
||||
assert_equal(self.nodes[0].getbalance(minconf=0), 0) # wallet txs not in the mempool are untrusted
|
||||
self.generatetoaddress(self.nodes[0], 1, ADDRESS_WATCHONLY, sync_fun=self.no_op)
|
||||
assert_equal(self.nodes[0].getbalance(minconf=0), 0) # wallet txs not in the mempool are untrusted
|
||||
|
||||
# Now confirm tx_orig
|
||||
self.restart_node(1, ['-persistmempool=0', '-checklevel=0'])
|
||||
|
@ -33,6 +33,11 @@ class WalletGroupTest(BitcoinTestFramework):
|
||||
|
||||
def run_test(self):
|
||||
self.log.info("Setting up")
|
||||
# To take full use of immediate tx relay, all nodes need to be reachable
|
||||
# via inbound peers, i.e. connect first to last to close the circle
|
||||
# (the default test network topology looks like this:
|
||||
# node0 <-- node1 <-- node2 <-- node3 <-- node4 <-- node5)
|
||||
self.connect_nodes(0, self.num_nodes - 1)
|
||||
# Mine some coins
|
||||
self.generate(self.nodes[0], COINBASE_MATURITY + 1)
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user