mirror of
https://github.com/dashpay/dash.git
synced 2024-12-25 12:02:48 +01:00
Merge pull request #5051 from Munkybooty/backports-0.20-pr8
Backports 0.20 pr8
This commit is contained in:
commit
fa84fc5634
@ -257,6 +257,12 @@ after_success:
|
||||
name: 'x86_64 Linux [GOAL: install] [bionic] [no wallet]'
|
||||
env: >-
|
||||
FILE_ENV="./ci/test/00_setup_env_native_fuzz.sh"
|
||||
|
||||
- stage: test
|
||||
name: 'x86_64 Linux [GOAL: install] [bionic] [no depends, only system libs, fuzzers under valgrind]'
|
||||
env: >-
|
||||
FILE_ENV="./ci/test/00_setup_env_native_fuzz_with_valgrind.sh"
|
||||
|
||||
- stage: test
|
||||
env: >-
|
||||
FILE_ENV="./ci/test/00_setup_env_native_nowallet.sh"
|
||||
|
18
ci/test/00_setup_env_native_fuzz_with_valgrind.sh
Normal file
18
ci/test/00_setup_env_native_fuzz_with_valgrind.sh
Normal file
@ -0,0 +1,18 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Copyright (c) 2019 The Bitcoin Core developers
|
||||
# Distributed under the MIT software license, see the accompanying
|
||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
|
||||
export LC_ALL=C.UTF-8
|
||||
|
||||
export CONTAINER_NAME=ci_native_fuzz_valgrind
|
||||
export PACKAGES="clang-8 llvm-8 python3 libevent-dev bsdmainutils libboost-system-dev libboost-filesystem-dev libboost-chrono-dev libboost-test-dev libboost-thread-dev valgrind"
|
||||
export NO_DEPENDS=1
|
||||
export RUN_UNIT_TESTS=false
|
||||
export RUN_FUNCTIONAL_TESTS=false
|
||||
export RUN_FUZZ_TESTS=true
|
||||
export FUZZ_TESTS_CONFIG="--valgrind"
|
||||
export GOAL="install"
|
||||
export BITCOIN_CONFIG="--enable-fuzz --with-sanitizers=fuzzer CC=clang-8 CXX=clang++-8"
|
||||
# Use clang-8, instead of default clang on bionic, which is clang-6 and does not come with libfuzzer on aarch64
|
@ -23,6 +23,8 @@ Then install [Homebrew](https://brew.sh).
|
||||
brew install automake libtool pkg-config libnatpmp
|
||||
```
|
||||
|
||||
If you run into issues, check [Homebrew's troubleshooting page](https://docs.brew.sh/Troubleshooting).
|
||||
|
||||
If you want to build the disk image with `make deploy` (.dmg / optional), you need RSVG:
|
||||
```shell
|
||||
brew install librsvg
|
||||
|
4
doc/release-notes-17264.md
Normal file
4
doc/release-notes-17264.md
Normal file
@ -0,0 +1,4 @@
|
||||
Updated RPCs
|
||||
------------
|
||||
|
||||
- `walletprocesspsbt` and `walletcreatefundedpsbt` now include BIP 32 derivation paths by default for public keys if we know them. This can be disabled by setting `bip32derivs` to `false`.
|
@ -608,7 +608,7 @@ void SetupServerArgs(NodeContext& node)
|
||||
argsman.AddArg("-whitebind=<[permissions@]addr>", "Bind to given address and whitelist peers connecting to it. "
|
||||
"Use [host]:port notation for IPv6. Allowed permissions are bloomfilter (allow requesting BIP37 filtered blocks and transactions), "
|
||||
"noban (do not ban for misbehavior), "
|
||||
"forcerelay (relay even non-standard transactions), "
|
||||
"forcerelay (relay transactions that are already in the mempool; implies relay), "
|
||||
"relay (relay even in -blocksonly mode), "
|
||||
"and mempool (allow requesting BIP35 mempool contents). "
|
||||
"Specify multiple permissions separated by commas (default: noban,mempool,relay). Can be specified multiple times.", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
|
||||
@ -753,7 +753,7 @@ void SetupServerArgs(NodeContext& node)
|
||||
argsman.AddArg("-datacarriersize", strprintf("Maximum size of data in data carrier transactions we relay and mine (default: %u)", MAX_OP_RETURN_RELAY), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
|
||||
argsman.AddArg("-minrelaytxfee=<amt>", strprintf("Fees (in %s/kB) smaller than this are considered zero fee for relaying, mining and transaction creation (default: %s)",
|
||||
CURRENCY_UNIT, FormatMoney(DEFAULT_MIN_RELAY_TX_FEE)), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
|
||||
argsman.AddArg("-whitelistforcerelay", strprintf("Add 'forcerelay' permission to whitelisted inbound peers with default permissions. This will relay transactions even if the transactions were already in the mempool or violate local relay policy. (default: %d)", DEFAULT_WHITELISTFORCERELAY), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
|
||||
argsman.AddArg("-whitelistforcerelay", strprintf("Add 'forcerelay' permission to whitelisted inbound peers with default permissions. This will relay transactions even if the transactions were already in the mempool. (default: %d)", DEFAULT_WHITELISTFORCERELAY), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
|
||||
argsman.AddArg("-whitelistrelay", strprintf("Add 'relay' permission to whitelisted inbound peers with default permissions. This will accept relayed transactions even when not relaying transactions (default: %d)", DEFAULT_WHITELISTRELAY), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
|
||||
|
||||
argsman.AddArg("-blockmaxsize=<n>", strprintf("Set maximum block size in bytes (default: %d)", DEFAULT_BLOCK_MAX_SIZE), ArgsManager::ALLOW_ANY, OptionsCategory::BLOCK_CREATION);
|
||||
|
@ -18,7 +18,7 @@ enum NetPermissionFlags
|
||||
PF_BLOOMFILTER = (1U << 1),
|
||||
// Relay and accept transactions from this peer, even if -blocksonly is true
|
||||
PF_RELAY = (1U << 3),
|
||||
// Always relay transactions from this peer, even if already in mempool or rejected from policy
|
||||
// Always relay transactions from this peer, even if already in mempool
|
||||
// Keep parameter interaction: forcerelay implies relay
|
||||
PF_FORCERELAY = (1U << 2) | PF_RELAY,
|
||||
// Can't be banned/disconnected/discouraged for misbehavior
|
||||
|
@ -1167,17 +1167,6 @@ bool IsBanned(NodeId pnode)
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if the given validation state result may result in a peer
|
||||
* banning/disconnecting us. We use this to determine which unaccepted
|
||||
* transactions from a whitelisted peer that we can safely relay.
|
||||
*/
|
||||
static bool TxRelayMayResultInDisconnect(const CValidationState& state)
|
||||
{
|
||||
assert(IsTransactionReason(state.GetReason()));
|
||||
return state.GetReason() == ValidationInvalidReason::CONSENSUS;
|
||||
}
|
||||
|
||||
/**
|
||||
* Potentially mark a node discouraged based on the contents of a CValidationState object
|
||||
*
|
||||
@ -1187,10 +1176,9 @@ static bool TxRelayMayResultInDisconnect(const CValidationState& state)
|
||||
* txs, the peer should not be punished. See BIP 152.
|
||||
*
|
||||
* @return Returns true if the peer was punished (probably disconnected)
|
||||
*
|
||||
* Changes here may need to be reflected in TxRelayMayResultInDisconnect().
|
||||
*/
|
||||
static bool MaybePunishNode(NodeId nodeid, const CValidationState& state, bool via_compact_block, const std::string& message = "") {
|
||||
static bool MaybePunishNode(NodeId nodeid, const CValidationState& state, bool via_compact_block, const std::string& message = "")
|
||||
{
|
||||
switch (state.GetReason()) {
|
||||
case ValidationInvalidReason::NONE:
|
||||
break;
|
||||
@ -1254,12 +1242,6 @@ static bool MaybePunishNode(NodeId nodeid, const CValidationState& state, bool v
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// blockchain -> download logic notification
|
||||
@ -3469,14 +3451,11 @@ bool ProcessMessage(CNode* pfrom, const std::string& msg_type, CDataStream& vRec
|
||||
|
||||
if (pfrom->HasPermission(PF_FORCERELAY)) {
|
||||
// Always relay transactions received from whitelisted peers, even
|
||||
// if they were already in the mempool or rejected from it due
|
||||
// to policy, allowing the node to function as a gateway for
|
||||
// if they were already in the mempool,
|
||||
// allowing the node to function as a gateway for
|
||||
// nodes hidden behind it.
|
||||
//
|
||||
// Never relay transactions that might result in being
|
||||
// disconnected (or banned).
|
||||
if (state.IsInvalid() && TxRelayMayResultInDisconnect(state)) {
|
||||
LogPrintf("Not relaying invalid transaction %s from whitelisted peer=%d (%s)\n", tx.GetHash().ToString(), pfrom->GetId(), FormatStateMessage(state));
|
||||
if (!mempool.exists(tx.GetHash())) {
|
||||
LogPrintf("Not relaying non-mempool transaction %s from whitelisted peer=%d\n", tx.GetHash().ToString(), pfrom->GetId());
|
||||
} else {
|
||||
LogPrintf("Force relaying tx %s from whitelisted peer=%d\n", tx.GetHash().ToString(), pfrom->GetId());
|
||||
RelayTransaction(tx.GetHash(), *connman);
|
||||
|
@ -10,6 +10,7 @@
|
||||
|
||||
#include <algorithm>
|
||||
#include <atomic>
|
||||
#include <cstring>
|
||||
#include <ios>
|
||||
#include <limits>
|
||||
#include <list>
|
||||
@ -127,27 +128,31 @@ template<typename Stream> inline uint64_t ser_readdata64(Stream &s)
|
||||
}
|
||||
inline uint64_t ser_double_to_uint64(double x)
|
||||
{
|
||||
union { double x; uint64_t y; } tmp;
|
||||
tmp.x = x;
|
||||
return tmp.y;
|
||||
uint64_t tmp;
|
||||
std::memcpy(&tmp, &x, sizeof(x));
|
||||
static_assert(sizeof(tmp) == sizeof(x), "double and uint64_t assumed to have the same size");
|
||||
return tmp;
|
||||
}
|
||||
inline uint32_t ser_float_to_uint32(float x)
|
||||
{
|
||||
union { float x; uint32_t y; } tmp;
|
||||
tmp.x = x;
|
||||
return tmp.y;
|
||||
uint32_t tmp;
|
||||
std::memcpy(&tmp, &x, sizeof(x));
|
||||
static_assert(sizeof(tmp) == sizeof(x), "float and uint32_t assumed to have the same size");
|
||||
return tmp;
|
||||
}
|
||||
inline double ser_uint64_to_double(uint64_t y)
|
||||
{
|
||||
union { double x; uint64_t y; } tmp;
|
||||
tmp.y = y;
|
||||
return tmp.x;
|
||||
double tmp;
|
||||
std::memcpy(&tmp, &y, sizeof(y));
|
||||
static_assert(sizeof(tmp) == sizeof(y), "double and uint64_t assumed to have the same size");
|
||||
return tmp;
|
||||
}
|
||||
inline float ser_uint32_to_float(uint32_t y)
|
||||
{
|
||||
union { float x; uint32_t y; } tmp;
|
||||
tmp.y = y;
|
||||
return tmp.x;
|
||||
float tmp;
|
||||
std::memcpy(&tmp, &y, sizeof(y));
|
||||
static_assert(sizeof(tmp) == sizeof(y), "float and uint32_t assumed to have the same size");
|
||||
return tmp;
|
||||
}
|
||||
|
||||
|
||||
|
@ -77,10 +77,12 @@ std::string FormatISO8601DateTime(int64_t nTime) {
|
||||
struct tm ts;
|
||||
time_t time_val = nTime;
|
||||
#ifdef HAVE_GMTIME_R
|
||||
gmtime_r(&time_val, &ts);
|
||||
if (gmtime_r(&time_val, &ts) == nullptr) {
|
||||
#else
|
||||
gmtime_s(&ts, &time_val);
|
||||
if (gmtime_s(&ts, &time_val) != 0) {
|
||||
#endif
|
||||
return {};
|
||||
}
|
||||
return strprintf("%04i-%02i-%02iT%02i:%02i:%02iZ", ts.tm_year + 1900, ts.tm_mon + 1, ts.tm_mday, ts.tm_hour, ts.tm_min, ts.tm_sec);
|
||||
}
|
||||
|
||||
@ -88,10 +90,12 @@ std::string FormatISO8601Date(int64_t nTime) {
|
||||
struct tm ts;
|
||||
time_t time_val = nTime;
|
||||
#ifdef HAVE_GMTIME_R
|
||||
gmtime_r(&time_val, &ts);
|
||||
if (gmtime_r(&time_val, &ts) == nullptr) {
|
||||
#else
|
||||
gmtime_s(&ts, &time_val);
|
||||
if (gmtime_s(&ts, &time_val) != 0) {
|
||||
#endif
|
||||
return {};
|
||||
}
|
||||
return strprintf("%04i-%02i-%02i", ts.tm_year + 1900, ts.tm_mon + 1, ts.tm_mday);
|
||||
}
|
||||
|
||||
|
@ -27,6 +27,6 @@
|
||||
bool& complete,
|
||||
int sighash_type = 1 /* SIGHASH_ALL */,
|
||||
bool sign = true,
|
||||
bool bip32derivs = false);
|
||||
bool bip32derivs = true);
|
||||
|
||||
#endif // BITCOIN_WALLET_PSBTWALLET_H
|
||||
|
@ -3133,7 +3133,7 @@ static UniValue listunspent(const JSONRPCRequest& request)
|
||||
CTxDestination address;
|
||||
const CScript& scriptPubKey = out.tx->tx->vout[out.i].scriptPubKey;
|
||||
bool fValidAddress = ExtractDestination(scriptPubKey, address);
|
||||
bool reused = avoid_reuse && pwallet->IsUsedDestination(address);
|
||||
bool reused = avoid_reuse && pwallet->IsSpentKey(address);
|
||||
|
||||
if (destinations.size() && (!fValidAddress || !destinations.count(address)))
|
||||
continue;
|
||||
@ -3866,7 +3866,7 @@ UniValue walletprocesspsbt(const JSONRPCRequest& request)
|
||||
" \"ALL|ANYONECANPAY\"\n"
|
||||
" \"NONE|ANYONECANPAY\"\n"
|
||||
" \"SINGLE|ANYONECANPAY\""},
|
||||
{"bip32derivs", RPCArg::Type::BOOL, /* default */ "false", "If true, includes the BIP 32 derivation paths for public keys if we know them"},
|
||||
{"bip32derivs", RPCArg::Type::BOOL, /* default */ "true", "Include BIP 32 derivation paths for public keys if we know them"},
|
||||
},
|
||||
RPCResult{
|
||||
RPCResult::Type::OBJ, "", "",
|
||||
@ -3902,7 +3902,7 @@ UniValue walletprocesspsbt(const JSONRPCRequest& request)
|
||||
|
||||
// Fill transaction with our data and also sign
|
||||
bool sign = request.params[1].isNull() ? true : request.params[1].get_bool();
|
||||
bool bip32derivs = request.params[3].isNull() ? false : request.params[3].get_bool();
|
||||
bool bip32derivs = request.params[3].isNull() ? true : request.params[3].get_bool();
|
||||
bool complete = true;
|
||||
const TransactionError err = FillPSBT(pwallet, psbtx, complete, nHashType, sign, bip32derivs);
|
||||
if (err != TransactionError::OK) {
|
||||
@ -3975,7 +3975,7 @@ UniValue walletcreatefundedpsbt(const JSONRPCRequest& request)
|
||||
" \"CONSERVATIVE\""},
|
||||
},
|
||||
"options"},
|
||||
{"bip32derivs", RPCArg::Type::BOOL, /* default */ "false", "If true, includes the BIP 32 derivation paths for public keys if we know them"},
|
||||
{"bip32derivs", RPCArg::Type::BOOL, /* default */ "true", "Include BIP 32 derivation paths for public keys if we know them"},
|
||||
},
|
||||
RPCResult{
|
||||
RPCResult::Type::OBJ, "", "",
|
||||
@ -4013,7 +4013,7 @@ UniValue walletcreatefundedpsbt(const JSONRPCRequest& request)
|
||||
PartiallySignedTransaction psbtx{rawTx};
|
||||
|
||||
// Fill transaction with out data but don't sign
|
||||
bool bip32derivs = request.params[4].isNull() ? false : request.params[4].get_bool();
|
||||
bool bip32derivs = request.params[4].isNull() ? true : request.params[4].get_bool();
|
||||
bool complete = true;
|
||||
const TransactionError err = FillPSBT(pwallet, psbtx, complete, 1, false, bip32derivs);
|
||||
if (err != TransactionError::OK) {
|
||||
|
@ -777,7 +777,7 @@ void CWallet::MarkDirty()
|
||||
fAnonymizableTallyCachedNonDenom = false;
|
||||
}
|
||||
|
||||
void CWallet::SetUsedDestinationState(const uint256& hash, unsigned int n, bool used, std::set<CTxDestination>& tx_destinations)
|
||||
void CWallet::SetSpentKeyState(const uint256& hash, unsigned int n, bool used, std::set<CTxDestination>& tx_destinations)
|
||||
{
|
||||
const CWalletTx* srctx = GetWalletTx(hash);
|
||||
if (!srctx) return;
|
||||
@ -797,17 +797,17 @@ void CWallet::SetUsedDestinationState(const uint256& hash, unsigned int n, bool
|
||||
}
|
||||
}
|
||||
|
||||
bool CWallet::IsUsedDestination(const CTxDestination& dst) const
|
||||
bool CWallet::IsSpentKey(const CTxDestination& dst) const
|
||||
{
|
||||
LOCK(cs_wallet);
|
||||
return IsMine(dst) && GetDestData(dst, "used", nullptr);
|
||||
}
|
||||
|
||||
bool CWallet::IsUsedDestination(const uint256& hash, unsigned int n) const
|
||||
bool CWallet::IsSpentKey(const uint256& hash, unsigned int n) const
|
||||
{
|
||||
CTxDestination dst;
|
||||
const CWalletTx* srctx = GetWalletTx(hash);
|
||||
return srctx && ExtractDestination(srctx->tx->vout[n].scriptPubKey, dst) && IsUsedDestination(dst);
|
||||
return srctx && ExtractDestination(srctx->tx->vout[n].scriptPubKey, dst) && IsSpentKey(dst);
|
||||
}
|
||||
|
||||
bool CWallet::AddToWallet(const CWalletTx& wtxIn, bool fFlushOnClose)
|
||||
@ -824,7 +824,7 @@ bool CWallet::AddToWallet(const CWalletTx& wtxIn, bool fFlushOnClose)
|
||||
|
||||
for (const CTxIn& txin : wtxIn.tx->vin) {
|
||||
const COutPoint& op = txin.prevout;
|
||||
SetUsedDestinationState(op.hash, op.n, true, tx_destinations);
|
||||
SetSpentKeyState(op.hash, op.n, true, tx_destinations);
|
||||
}
|
||||
|
||||
MarkDestinationsDirty(tx_destinations);
|
||||
@ -2102,7 +2102,7 @@ CAmount CWalletTx::GetAvailableCredit(bool fUseCache, const isminefilter& filter
|
||||
uint256 hashTx = GetHash();
|
||||
for (unsigned int i = 0; i < tx->vout.size(); i++)
|
||||
{
|
||||
if (!pwallet->IsSpent(hashTx, i) && (allow_used_addresses || !pwallet->IsUsedDestination(hashTx, i)))
|
||||
if (!pwallet->IsSpent(hashTx, i) && (allow_used_addresses || !pwallet->IsSpentKey(hashTx, i)))
|
||||
{
|
||||
const CTxOut &txout = tx->vout[i];
|
||||
nCredit += pwallet->GetCredit(txout, filter);
|
||||
@ -2546,7 +2546,7 @@ void CWallet::AvailableCoins(std::vector<COutput> &vCoins, bool fOnlySafe, const
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!allow_used_addresses && IsUsedDestination(wtxid, i)) {
|
||||
if (!allow_used_addresses && IsSpentKey(wtxid, i)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -907,9 +907,9 @@ public:
|
||||
bool IsSpent(const uint256& hash, unsigned int n) const EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
|
||||
|
||||
// Whether this or any UTXO with the same CTxDestination has been spent.
|
||||
bool IsUsedDestination(const CTxDestination& dst) const;
|
||||
bool IsUsedDestination(const uint256& hash, unsigned int n) const;
|
||||
void SetUsedDestinationState(const uint256& hash, unsigned int n, bool used, std::set<CTxDestination>& tx_destinations);
|
||||
bool IsSpentKey(const CTxDestination& dst) const;
|
||||
bool IsSpentKey(const uint256& hash, unsigned int n) const;
|
||||
void SetSpentKeyState(const uint256& hash, unsigned int n, bool used, std::set<CTxDestination>& tx_destinations);
|
||||
|
||||
std::vector<OutputGroup> GroupOutputs(const std::vector<COutput>& outputs, bool single_coin) const;
|
||||
|
||||
|
@ -60,7 +60,7 @@ class BIP65Test(BitcoinTestFramework):
|
||||
def set_test_params(self):
|
||||
self.num_nodes = 1
|
||||
self.extra_args = [[
|
||||
'-whitelist=127.0.0.1',
|
||||
'-whitelist=noban@127.0.0.1',
|
||||
'-dip3params=9000:9000',
|
||||
'-par=1', # Use only one script thread to get the exact reject reason for testing
|
||||
'-acceptnonstdtxn=1', # cltv_invalidate is nonstandard
|
||||
|
@ -41,6 +41,7 @@ bip112txs_vary_nSequence_9 - 16 txs with nSequence relative_locktimes of 9 evalu
|
||||
bip112txs_vary_OP_CSV - 16 txs with nSequence = 10 evaluated against varying {relative_locktimes of 10} OP_CSV OP_DROP
|
||||
bip112txs_vary_OP_CSV_9 - 16 txs with nSequence = 9 evaluated against varying {relative_locktimes of 10} OP_CSV OP_DROP
|
||||
bip112tx_special - test negative argument to OP_CSV
|
||||
bip112tx_emptystack - test empty stack (= no argument) OP_CSV
|
||||
"""
|
||||
from decimal import Decimal
|
||||
from itertools import product
|
||||
@ -61,6 +62,8 @@ from test_framework.util import (
|
||||
hex_str_to_bytes,
|
||||
)
|
||||
|
||||
TESTING_TX_COUNT = 83 # Number of testing transactions: 1 BIP113 tx, 16 BIP68 txs, 66 BIP112 txs (see comments above)
|
||||
COINBASE_BLOCK_COUNT = TESTING_TX_COUNT # Number of coinbase blocks we need to generate as inputs for our txs
|
||||
BASE_RELATIVE_LOCKTIME = 10
|
||||
SEQ_DISABLE_FLAG = 1 << 31
|
||||
SEQ_RANDOM_HIGH_BIT = 1 << 25
|
||||
@ -99,6 +102,13 @@ def create_bip112special(node, input, txversion, address):
|
||||
signtx.vin[0].scriptSig = CScript([-1, OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig)))
|
||||
return signtx
|
||||
|
||||
def create_bip112emptystack(node, input, txversion, address):
|
||||
tx = create_transaction(node, input, address, amount=Decimal("499.98"))
|
||||
tx.nVersion = txversion
|
||||
signtx = sign_transaction(node, tx)
|
||||
signtx.vin[0].scriptSig = CScript([OP_CHECKSEQUENCEVERIFY] + list(CScript(signtx.vin[0].scriptSig)))
|
||||
return signtx
|
||||
|
||||
def send_generic_input_tx(node, coinbases, address):
|
||||
return node.sendrawtransaction(ToHex(sign_transaction(node, create_transaction(node, node.getblock(coinbases.pop())['tx'][0], address, amount=Decimal("499.99")))))
|
||||
|
||||
@ -144,7 +154,12 @@ class BIP68_112_113Test(BitcoinTestFramework):
|
||||
self.setup_clean_chain = True
|
||||
# Must also set '-maxtipage=600100' to allow syncing from very old blocks
|
||||
# and '-dip3params=2000:2000' to create pre-dip3 blocks only
|
||||
self.extra_args = [['-whitelist=127.0.0.1', '-blockversion=4', '-maxtipage=600100', '-dip3params=2000:2000']]
|
||||
self.extra_args = [[
|
||||
'-whitelist=noban@127.0.0.1',
|
||||
'-blockversion=4',
|
||||
'-maxtipage=600100', '-dip3params=2000:2000',
|
||||
'-par=1', # Use only one script thread to get the exact reject reason for testing
|
||||
]]
|
||||
self.supports_cli = False
|
||||
|
||||
def setup_network(self):
|
||||
@ -173,28 +188,33 @@ class BIP68_112_113Test(BitcoinTestFramework):
|
||||
block.solve()
|
||||
return block
|
||||
|
||||
def send_blocks(self, blocks, success=True):
|
||||
def send_blocks(self, blocks, success=True, reject_reason=None):
|
||||
"""Sends blocks to test node. Syncs and verifies that tip has advanced to most recent block.
|
||||
|
||||
Call with success = False if the tip shouldn't advance to the most recent block."""
|
||||
self.nodes[0].p2p.send_blocks_and_test(blocks, self.nodes[0], success=success)
|
||||
self.nodes[0].p2p.send_blocks_and_test(blocks, self.nodes[0], success=success, reject_reason=reject_reason)
|
||||
|
||||
def run_test(self):
|
||||
self.nodes[0].add_p2p_connection(P2PDataStore())
|
||||
|
||||
self.log.info("Generate blocks in the past for coinbase outputs.")
|
||||
self.coinbase_blocks = self.nodes[0].generate(1 + 16 + 2 * 32 + 1) # 82 blocks generated for inputs
|
||||
self.coinbase_blocks = self.nodes[0].generate(COINBASE_BLOCK_COUNT) # blocks generated for inputs
|
||||
# set time so that there was enough time to build up to 1000 blocks 10 minutes apart on top of the last one
|
||||
# without worrying about getting into the future
|
||||
self.nodes[0].setmocktime(TIME_GENESIS_BLOCK + 600 * 1000 + 100)
|
||||
self.tipheight = 82 # height of the next block to build
|
||||
self.tipheight = COINBASE_BLOCK_COUNT # height of the next block to build
|
||||
self.last_block_time = TIME_GENESIS_BLOCK
|
||||
self.tip = int(self.nodes[0].getbestblockhash(), 16)
|
||||
self.nodeaddress = self.nodes[0].getnewaddress()
|
||||
|
||||
# TODO: uncomment the code below when bitcoin#16060 is backported,
|
||||
# should go right below `# Activation height is hardcoded` line
|
||||
# # We advance to block height five below BIP112 activation for the following tests
|
||||
# test_blocks = self.generate_blocks(CSV_ACTIVATION_HEIGHT-5 - COINBASE_BLOCK_COUNT)
|
||||
|
||||
self.log.info("Test that the csv softfork is DEFINED")
|
||||
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'defined')
|
||||
test_blocks = self.generate_blocks(61, 4)
|
||||
test_blocks = self.generate_blocks(60, 4)
|
||||
self.send_blocks(test_blocks)
|
||||
|
||||
self.log.info("Advance from DEFINED to STARTED, height = 143")
|
||||
@ -255,6 +275,8 @@ class BIP68_112_113Test(BitcoinTestFramework):
|
||||
|
||||
# 1 special input with -1 OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
|
||||
bip112specialinput = send_generic_input_tx(self.nodes[0], self.coinbase_blocks, self.nodeaddress)
|
||||
# 1 special input with (empty stack) OP_CSV (actually will be prepended to spending scriptSig)
|
||||
bip112emptystackinput = send_generic_input_tx(self.nodes[0],self.coinbase_blocks, self.nodeaddress)
|
||||
|
||||
# 1 normal input
|
||||
bip113input = send_generic_input_tx(self.nodes[0], self.coinbase_blocks, self.nodeaddress)
|
||||
@ -265,7 +287,7 @@ class BIP68_112_113Test(BitcoinTestFramework):
|
||||
self.tip = int(inputblockhash, 16)
|
||||
self.tipheight += 1
|
||||
self.last_block_time += 600
|
||||
assert_equal(len(self.nodes[0].getblock(inputblockhash, True)["tx"]), 82 + 1)
|
||||
assert_equal(len(self.nodes[0].getblock(inputblockhash, True)["tx"]), TESTING_TX_COUNT + 1)
|
||||
|
||||
# 2 more version 4 blocks
|
||||
test_blocks = self.generate_blocks(2, 4)
|
||||
@ -303,6 +325,9 @@ class BIP68_112_113Test(BitcoinTestFramework):
|
||||
# -1 OP_CSV OP_DROP input
|
||||
bip112tx_special_v1 = create_bip112special(self.nodes[0], bip112specialinput, 1, self.nodeaddress)
|
||||
bip112tx_special_v2 = create_bip112special(self.nodes[0], bip112specialinput, 2, self.nodeaddress)
|
||||
# (empty stack) OP_CSV input
|
||||
bip112tx_emptystack_v1 = create_bip112emptystack(self.nodes[0], bip112emptystackinput, 1, self.nodeaddress)
|
||||
bip112tx_emptystack_v2 = create_bip112emptystack(self.nodes[0], bip112emptystackinput, 2, self.nodeaddress)
|
||||
|
||||
self.log.info("TESTING")
|
||||
|
||||
@ -310,11 +335,12 @@ class BIP68_112_113Test(BitcoinTestFramework):
|
||||
self.log.info("Test version 1 txs")
|
||||
|
||||
success_txs = []
|
||||
# add BIP113 tx and -1 CSV tx
|
||||
# BIP113 tx, -1 CSV tx and empty stack CSV tx should succeed
|
||||
bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
|
||||
bip113signed1 = sign_transaction(self.nodes[0], bip113tx_v1)
|
||||
success_txs.append(bip113signed1)
|
||||
success_txs.append(bip112tx_special_v1)
|
||||
success_txs.append(bip112tx_emptystack_v1)
|
||||
# add BIP 68 txs
|
||||
success_txs.extend(all_rlt_txs(bip68txs_v1))
|
||||
# add BIP 112 with seq=10 txs
|
||||
@ -329,11 +355,12 @@ class BIP68_112_113Test(BitcoinTestFramework):
|
||||
self.log.info("Test version 2 txs")
|
||||
|
||||
success_txs = []
|
||||
# add BIP113 tx and -1 CSV tx
|
||||
# BIP113 tx, -1 CSV tx and empty stack CSV tx should succeed
|
||||
bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
|
||||
bip113signed2 = sign_transaction(self.nodes[0], bip113tx_v2)
|
||||
success_txs.append(bip113signed2)
|
||||
success_txs.append(bip112tx_special_v2)
|
||||
success_txs.append(bip112tx_emptystack_v2)
|
||||
# add BIP 68 txs
|
||||
success_txs.extend(all_rlt_txs(bip68txs_v2))
|
||||
# add BIP 112 with seq=10 txs
|
||||
@ -420,8 +447,10 @@ class BIP68_112_113Test(BitcoinTestFramework):
|
||||
self.log.info("BIP 112 tests")
|
||||
self.log.info("Test version 1 txs")
|
||||
|
||||
# -1 OP_CSV tx should fail
|
||||
# -1 OP_CSV tx and (empty stack) OP_CSV tx should fail
|
||||
self.send_blocks([self.create_test_block([bip112tx_special_v1])], success=False)
|
||||
self.send_blocks([self.create_test_block([bip112tx_emptystack_v1])], success=False,
|
||||
reject_reason='non-mandatory-script-verify-flag (Operation not valid with the current stack size)')
|
||||
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, version 1 txs should still pass
|
||||
|
||||
success_txs = [tx['tx'] for tx in bip112txs_vary_OP_CSV_v1 if tx['sdf']]
|
||||
@ -439,8 +468,10 @@ class BIP68_112_113Test(BitcoinTestFramework):
|
||||
|
||||
self.log.info("Test version 2 txs")
|
||||
|
||||
# -1 OP_CSV tx should fail
|
||||
# -1 OP_CSV tx and (empty stack) OP_CSV tx should fail
|
||||
self.send_blocks([self.create_test_block([bip112tx_special_v2])], success=False)
|
||||
self.send_blocks([self.create_test_block([bip112tx_emptystack_v2])], success=False,
|
||||
reject_reason='non-mandatory-script-verify-flag (Operation not valid with the current stack size)')
|
||||
|
||||
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, version 2 txs should pass (all sequence locks are met)
|
||||
success_txs = [tx['tx'] for tx in bip112txs_vary_OP_CSV_v2 if tx['sdf']]
|
||||
@ -484,7 +515,5 @@ class BIP68_112_113Test(BitcoinTestFramework):
|
||||
self.send_blocks([self.create_test_block(time_txs)])
|
||||
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
|
||||
|
||||
# TODO: Test empty stack fails
|
||||
|
||||
if __name__ == '__main__':
|
||||
BIP68_112_113Test().main()
|
||||
|
@ -45,7 +45,7 @@ def unDERify(tx):
|
||||
class BIP66Test(BitcoinTestFramework):
|
||||
def set_test_params(self):
|
||||
self.num_nodes = 1
|
||||
self.extra_args = [['-whitelist=127.0.0.1', '-dip3params=9000:9000', '-par=1', '-enablebip61']] # Use only one script thread to get the exact reject reason for testing
|
||||
self.extra_args = [['-whitelist=noban@127.0.0.1', '-dip3params=9000:9000', '-par=1', '-enablebip61']] # Use only one script thread to get the exact reject reason for testing
|
||||
self.setup_clean_chain = True
|
||||
self.rpc_timeout = 240
|
||||
|
||||
|
@ -125,9 +125,9 @@ class EstimateFeeTest(BitcoinTestFramework):
|
||||
self.num_nodes = 3
|
||||
# mine non-standard txs (e.g. txs with "dust" outputs)
|
||||
self.extra_args = [
|
||||
["-acceptnonstdtxn=1", "-maxorphantxsize=1000", "-whitelist=127.0.0.1"],
|
||||
["-acceptnonstdtxn=1", "-blockmaxsize=17000", "-maxorphantxsize=1000", "-whitelist=127.0.0.1"],
|
||||
["-acceptnonstdtxn=1", "-blockmaxsize=8000", "-maxorphantxsize=1000", "-whitelist=127.0.0.1"]
|
||||
["-acceptnonstdtxn=1", "-maxorphantxsize=1000", "-whitelist=noban@127.0.0.1"],
|
||||
["-acceptnonstdtxn=1", "-blockmaxsize=17000", "-maxorphantxsize=1000", "-whitelist=noban@127.0.0.1"],
|
||||
["-acceptnonstdtxn=1", "-blockmaxsize=8000", "-maxorphantxsize=1000", "-whitelist=noban@127.0.0.1"]
|
||||
]
|
||||
|
||||
def skip_test_if_missing_module(self):
|
||||
|
@ -145,10 +145,9 @@ class MaxUploadTest(BitcoinTestFramework):
|
||||
|
||||
self.nodes[0].disconnect_p2ps()
|
||||
|
||||
#stop and start node 0 with 1MB maxuploadtarget, whitelist 127.0.0.1
|
||||
self.log.info("Restarting nodes with -whitelist=127.0.0.1")
|
||||
self.log.info("Restarting node 0 with noban permission and 1MB maxuploadtarget")
|
||||
self.stop_node(0)
|
||||
self.start_node(0, ["-whitelist=127.0.0.1", "-maxuploadtarget=1", "-blockmaxsize=999000", "-maxtipage="+str(2*60*60*24*7), "-mocktime="+str(current_mocktime)])
|
||||
self.start_node(0, ["-whitelist=noban@127.0.0.1", "-maxuploadtarget=1", "-blockmaxsize=999000", "-maxtipage="+str(2*60*60*24*7), "-mocktime="+str(current_mocktime)])
|
||||
|
||||
# Reconnect to self.nodes[0]
|
||||
self.nodes[0].add_p2p_connection(TestP2PConn())
|
||||
|
@ -8,20 +8,34 @@ from decimal import Decimal
|
||||
|
||||
from test_framework.messages import COIN
|
||||
from test_framework.test_framework import BitcoinTestFramework
|
||||
from test_framework.util import assert_equal, assert_raises_rpc_error, satoshi_round
|
||||
from test_framework.util import (
|
||||
assert_equal,
|
||||
assert_raises_rpc_error,
|
||||
satoshi_round,
|
||||
wait_until,
|
||||
)
|
||||
|
||||
# default limits
|
||||
MAX_ANCESTORS = 25
|
||||
MAX_DESCENDANTS = 25
|
||||
# custom limits for node1
|
||||
MAX_ANCESTORS_CUSTOM = 5
|
||||
MAX_DESCENDANTS_CUSTOM = 10
|
||||
assert MAX_DESCENDANTS_CUSTOM >= MAX_ANCESTORS_CUSTOM
|
||||
|
||||
class MempoolPackagesTest(BitcoinTestFramework):
|
||||
def set_test_params(self):
|
||||
self.num_nodes = 2
|
||||
self.extra_args = [
|
||||
["-maxorphantxsize=1000"],
|
||||
["-maxorphantxsize=1000", "-limitancestorcount={}".format(MAX_ANCESTORS_CUSTOM)],
|
||||
[
|
||||
"-maxorphantxsize=1000",
|
||||
"-whitelist=noban@127.0.0.1", # immediate tx relay
|
||||
],
|
||||
[
|
||||
"-maxorphantxsize=1000",
|
||||
"-limitancestorcount={}".format(MAX_ANCESTORS_CUSTOM),
|
||||
"-limitdescendantcount={}".format(MAX_DESCENDANTS_CUSTOM),
|
||||
],
|
||||
]
|
||||
|
||||
def skip_test_if_missing_module(self):
|
||||
@ -215,9 +229,11 @@ class MempoolPackagesTest(BitcoinTestFramework):
|
||||
transaction_package.append({'txid': txid, 'vout': i, 'amount': sent_value})
|
||||
|
||||
# Sign and send up to MAX_DESCENDANT transactions chained off the parent tx
|
||||
chain = [] # save sent txs for the purpose of checking node1's mempool later (see below)
|
||||
for i in range(MAX_DESCENDANTS - 1):
|
||||
utxo = transaction_package.pop(0)
|
||||
(txid, sent_value) = self.chain_transaction(self.nodes[0], utxo['txid'], utxo['vout'], utxo['amount'], fee, 10)
|
||||
chain.append(txid)
|
||||
if utxo['txid'] is parent_transaction:
|
||||
tx_children.append(txid)
|
||||
for j in range(10):
|
||||
@ -234,7 +250,21 @@ class MempoolPackagesTest(BitcoinTestFramework):
|
||||
utxo = transaction_package.pop(0)
|
||||
assert_raises_rpc_error(-26, "too-long-mempool-chain", self.chain_transaction, self.nodes[0], utxo['txid'], utxo['vout'], utxo['amount'], fee, 10)
|
||||
|
||||
# TODO: check that node1's mempool is as expected
|
||||
# Check that node1's mempool is as expected, containing:
|
||||
# - txs from previous ancestor test (-> custom ancestor limit)
|
||||
# - parent tx for descendant test
|
||||
# - txs chained off parent tx (-> custom descendant limit)
|
||||
wait_until(lambda: len(self.nodes[1].getrawmempool(False)) ==
|
||||
MAX_ANCESTORS_CUSTOM + 1 + MAX_DESCENDANTS_CUSTOM, timeout=10)
|
||||
mempool0 = self.nodes[0].getrawmempool(False)
|
||||
mempool1 = self.nodes[1].getrawmempool(False)
|
||||
assert set(mempool1).issubset(set(mempool0))
|
||||
assert parent_transaction in mempool1
|
||||
for tx in chain[:MAX_DESCENDANTS_CUSTOM]:
|
||||
assert tx in mempool1
|
||||
for tx in chain[MAX_DESCENDANTS_CUSTOM:]:
|
||||
assert tx not in mempool1
|
||||
# TODO: more detailed check of node1's mempool (fees etc.)
|
||||
|
||||
# TODO: test descendant size limits
|
||||
|
||||
|
@ -23,7 +23,7 @@ class InvalidBlockRequestTest(BitcoinTestFramework):
|
||||
def set_test_params(self):
|
||||
self.num_nodes = 1
|
||||
self.setup_clean_chain = True
|
||||
self.extra_args = [["-whitelist=127.0.0.1"]]
|
||||
self.extra_args = [["-whitelist=noban@127.0.0.1"]]
|
||||
|
||||
def run_test(self):
|
||||
# Add p2p connection to node0
|
||||
|
@ -7,20 +7,33 @@
|
||||
Test that permissions are correctly calculated and applied
|
||||
"""
|
||||
|
||||
from test_framework.address import ADDRESS_BCRT1_P2SH_OP_TRUE
|
||||
from test_framework.messages import (
|
||||
CTransaction,
|
||||
FromHex,
|
||||
)
|
||||
from test_framework.mininode import P2PDataStore
|
||||
from test_framework.script import (
|
||||
CScript,
|
||||
OP_TRUE,
|
||||
)
|
||||
from test_framework.test_node import ErrorMatch
|
||||
from test_framework.test_framework import BitcoinTestFramework
|
||||
from test_framework.util import (
|
||||
assert_equal,
|
||||
p2p_port,
|
||||
wait_until,
|
||||
)
|
||||
|
||||
|
||||
class P2PPermissionsTests(BitcoinTestFramework):
|
||||
def set_test_params(self):
|
||||
self.num_nodes = 2
|
||||
self.setup_clean_chain = True
|
||||
self.extra_args = [[],[]]
|
||||
|
||||
def run_test(self):
|
||||
self.check_tx_relay()
|
||||
|
||||
self.checkpermission(
|
||||
# default permissions (no specific permissions)
|
||||
["-whitelist=127.0.0.1"],
|
||||
@ -53,9 +66,9 @@ class P2PPermissionsTests(BitcoinTestFramework):
|
||||
ip_port = "127.0.0.1:{}".format(p2p_port(1))
|
||||
self.replaceinconfig(1, "bind=127.0.0.1", "whitebind=bloomfilter,forcerelay@" + ip_port)
|
||||
self.checkpermission(
|
||||
["-whitelist=noban@127.0.0.1" ],
|
||||
["-whitelist=noban@127.0.0.1"],
|
||||
# Check parameter interaction forcerelay should activate relay
|
||||
["noban", "bloomfilter", "forcerelay", "relay" ],
|
||||
["noban", "bloomfilter", "forcerelay", "relay"],
|
||||
False)
|
||||
self.replaceinconfig(1, "whitebind=bloomfilter,forcerelay@" + ip_port, "bind=127.0.0.1")
|
||||
|
||||
@ -88,6 +101,55 @@ class P2PPermissionsTests(BitcoinTestFramework):
|
||||
self.nodes[1].assert_start_raises_init_error(["-whitelist=noban@127.0.0.1:230"], "Invalid netmask specified in", match=ErrorMatch.PARTIAL_REGEX)
|
||||
self.nodes[1].assert_start_raises_init_error(["-whitebind=noban@127.0.0.1/10"], "Cannot resolve -whitebind address", match=ErrorMatch.PARTIAL_REGEX)
|
||||
|
||||
def check_tx_relay(self):
|
||||
block_op_true = self.nodes[0].getblock(self.nodes[0].generatetoaddress(100, ADDRESS_BCRT1_P2SH_OP_TRUE)[0])
|
||||
self.sync_all()
|
||||
|
||||
self.log.debug("Create a connection from a whitelisted wallet that rebroadcasts raw txs")
|
||||
# A python mininode is needed to send the raw transaction directly. If a full node was used, it could only
|
||||
# rebroadcast via the inv-getdata mechanism. However, even for whitelisted connections, a full node would
|
||||
# currently not request a txid that is already in the mempool.
|
||||
self.restart_node(1, extra_args=["-whitelist=forcerelay@127.0.0.1"])
|
||||
p2p_rebroadcast_wallet = self.nodes[1].add_p2p_connection(P2PDataStore())
|
||||
|
||||
self.log.debug("Send a tx from the wallet initially")
|
||||
tx = FromHex(
|
||||
CTransaction(),
|
||||
self.nodes[0].createrawtransaction(
|
||||
inputs=[{
|
||||
'txid': block_op_true['tx'][0],
|
||||
'vout': 0,
|
||||
}], outputs=[{
|
||||
ADDRESS_BCRT1_P2SH_OP_TRUE: 5,
|
||||
}]),
|
||||
)
|
||||
tx.vin[0].scriptSig = CScript([CScript([OP_TRUE])])
|
||||
txid = tx.rehash()
|
||||
|
||||
self.log.debug("Wait until tx is in node[1]'s mempool")
|
||||
p2p_rebroadcast_wallet.send_txs_and_test([tx], self.nodes[1])
|
||||
|
||||
self.log.debug("Check that node[1] will send the tx to node[0] even though it is already in the mempool")
|
||||
self.connect_nodes(1, 0)
|
||||
|
||||
def in_mempool():
|
||||
self.bump_mocktime(1)
|
||||
return txid in self.nodes[0].getrawmempool()
|
||||
|
||||
with self.nodes[1].assert_debug_log(["Force relaying tx {} from whitelisted peer=0".format(txid)]):
|
||||
p2p_rebroadcast_wallet.send_txs_and_test([tx], self.nodes[1])
|
||||
wait_until(in_mempool)
|
||||
|
||||
self.log.debug("Check that node[1] will not send an invalid tx to node[0]")
|
||||
tx.vout[0].nValue += 1
|
||||
txid = tx.rehash()
|
||||
p2p_rebroadcast_wallet.send_txs_and_test(
|
||||
[tx],
|
||||
self.nodes[1],
|
||||
success=False,
|
||||
reject_reason='Not relaying non-mempool transaction {} from whitelisted peer=0'.format(txid),
|
||||
)
|
||||
|
||||
def checkpermission(self, args, expectedPermissions, whitelisted):
|
||||
self.restart_node(1, args)
|
||||
self.connect_nodes(0, 1)
|
||||
@ -100,9 +162,10 @@ class P2PPermissionsTests(BitcoinTestFramework):
|
||||
|
||||
def replaceinconfig(self, nodeid, old, new):
|
||||
with open(self.nodes[nodeid].bitcoinconf, encoding="utf8") as f:
|
||||
newText=f.read().replace(old, new)
|
||||
newText = f.read().replace(old, new)
|
||||
with open(self.nodes[nodeid].bitcoinconf, 'w', encoding="utf8") as f:
|
||||
f.write(newText)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
P2PPermissionsTests().main()
|
||||
|
@ -30,7 +30,7 @@ class RawTransactionsTest(BitcoinTestFramework):
|
||||
self.extra_args = [['-usehd=0']] * self.num_nodes
|
||||
# This test isn't testing tx relay. Set whitelist on the peers for
|
||||
# instant tx relay.
|
||||
self.extra_args = [['-whitelist=127.0.0.1']] * self.num_nodes
|
||||
self.extra_args = [['-whitelist=noban@127.0.0.1']] * self.num_nodes
|
||||
|
||||
def skip_test_if_missing_module(self):
|
||||
self.skip_if_no_wallet()
|
||||
|
@ -133,12 +133,20 @@ class PSBTTest(BitcoinTestFramework):
|
||||
psbt_orig = self.nodes[0].createpsbt([{"txid":txid1, "vout":vout1}, {"txid":txid2, "vout":vout2}], {self.nodes[0].getnewaddress():25.999})
|
||||
|
||||
# Update psbts, should only have data for one input and not the other
|
||||
psbt1 = self.nodes[1].walletprocesspsbt(psbt_orig)['psbt']
|
||||
psbt1 = self.nodes[1].walletprocesspsbt(psbt_orig, False, "ALL")['psbt']
|
||||
psbt1_decoded = self.nodes[0].decodepsbt(psbt1)
|
||||
assert psbt1_decoded['inputs'][0] and not psbt1_decoded['inputs'][1]
|
||||
psbt2 = self.nodes[2].walletprocesspsbt(psbt_orig)['psbt']
|
||||
# Check that BIP32 path was added
|
||||
assert "bip32_derivs" in psbt1_decoded['inputs'][0]
|
||||
psbt2 = self.nodes[2].walletprocesspsbt(psbt_orig, False, "ALL", False)['psbt']
|
||||
psbt2_decoded = self.nodes[0].decodepsbt(psbt2)
|
||||
assert not psbt2_decoded['inputs'][0] and psbt2_decoded['inputs'][1]
|
||||
# Check that BIP32 paths were not added
|
||||
assert "bip32_derivs" not in psbt2_decoded['inputs'][1]
|
||||
|
||||
# Sign PSBTs (workaround issue #18039)
|
||||
psbt1 = self.nodes[1].walletprocesspsbt(psbt_orig)['psbt']
|
||||
psbt2 = self.nodes[2].walletprocesspsbt(psbt_orig)['psbt']
|
||||
|
||||
# Combine, finalize, and send the psbts
|
||||
combined = self.nodes[0].combinepsbt([psbt1, psbt2])
|
||||
|
@ -15,6 +15,7 @@ from .util import hex_str_to_bytes
|
||||
# Note unlike in bitcoin, this address isn't bech32 since we don't (at this time) support bech32.
|
||||
ADDRESS_BCRT1_UNSPENDABLE = 'yVg3NBUHNEhgDceqwVUjsZHreC5PBHnUo9'
|
||||
ADDRESS_BCRT1_UNSPENDABLE_DESCRIPTOR = 'addr(yVg3NBUHNEhgDceqwVUjsZHreC5PBHnUo9)#e5kt0jtk'
|
||||
ADDRESS_BCRT1_P2SH_OP_TRUE = '8zJctvfrzGZ5s1zQ3kagwyW1DsPYSQ4V2P'
|
||||
|
||||
chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
|
||||
|
||||
|
@ -75,7 +75,7 @@ class AvoidReuseTest(BitcoinTestFramework):
|
||||
self.num_nodes = 2
|
||||
# This test isn't testing txn relay/timing, so set whitelist on the
|
||||
# peers for instant txn relay. This speeds up the test run time 2-3x.
|
||||
self.extra_args = [["-whitelist=127.0.0.1"]] * self.num_nodes
|
||||
self.extra_args = [["-whitelist=noban@127.0.0.1"]] * self.num_nodes
|
||||
|
||||
def skip_test_if_missing_module(self):
|
||||
self.skip_if_no_wallet()
|
||||
|
@ -45,10 +45,10 @@ class WalletBackupTest(BitcoinTestFramework):
|
||||
# nodes 1, 2,3 are spenders, let's give them a keypool=100
|
||||
# whitelist all peers to speed up tx relay / mempool sync
|
||||
self.extra_args = [
|
||||
["-keypool=100", "-whitelist=127.0.0.1"],
|
||||
["-keypool=100", "-whitelist=127.0.0.1"],
|
||||
["-keypool=100", "-whitelist=127.0.0.1"],
|
||||
["-whitelist=127.0.0.1"]
|
||||
["-whitelist=noban@127.0.0.1", "-keypool=100"],
|
||||
["-whitelist=noban@127.0.0.1", "-keypool=100"],
|
||||
["-whitelist=noban@127.0.0.1", "-keypool=100"],
|
||||
["-whitelist=noban@127.0.0.1"],
|
||||
]
|
||||
self.rpc_timeout = 120
|
||||
|
||||
|
@ -267,6 +267,13 @@ def run_once(*, fuzz_pool, corpus, test_list, build_dir, use_valgrind):
|
||||
logging.debug(output)
|
||||
try:
|
||||
result.check_returncode()
|
||||
except subprocess.CalledProcessError as e:
|
||||
if e.stdout:
|
||||
logging.info(e.stdout)
|
||||
if e.stderr:
|
||||
logging.info(e.stderr)
|
||||
logging.info("Target \"{}\" failed with exit code {}: {}".format(t, e.returncode, " ".join(args)))
|
||||
sys.exit(1)
|
||||
except subprocess.CalledProcessError as e:
|
||||
if e.stdout:
|
||||
logging.info(e.stdout)
|
||||
|
Loading…
Reference in New Issue
Block a user