mirror of
https://github.com/dashpay/dash.git
synced 2024-12-25 20:12:57 +01:00
Merge pull request #4334 from linuxsh2/bp-19
Backports v0.19 (16767, 16646, 16470, 16329, 16234, 16059, 15968, 15866, 15755, 15617, 15466, 15491)
This commit is contained in:
commit
39e34e2b52
85
configure.ac
85
configure.ac
@ -200,6 +200,12 @@ AC_ARG_ENABLE([glibc-back-compat],
|
|||||||
[use_glibc_compat=$enableval],
|
[use_glibc_compat=$enableval],
|
||||||
[use_glibc_compat=no])
|
[use_glibc_compat=no])
|
||||||
|
|
||||||
|
AC_ARG_ENABLE([threadlocal],
|
||||||
|
[AS_HELP_STRING([--enable-threadlocal],
|
||||||
|
[enable features that depend on the c++ thread_local keyword (currently just thread names in debug logs). (default is to enabled if there is platform support and glibc-back-compat is not enabled)])],
|
||||||
|
[use_thread_local=$enableval],
|
||||||
|
[use_thread_local=auto])
|
||||||
|
|
||||||
AC_ARG_ENABLE([asm],
|
AC_ARG_ENABLE([asm],
|
||||||
[AS_HELP_STRING([--disable-asm],
|
[AS_HELP_STRING([--disable-asm],
|
||||||
[disable assembly routines (enabled by default)])],
|
[disable assembly routines (enabled by default)])],
|
||||||
@ -881,42 +887,49 @@ AC_LINK_IFELSE([AC_LANG_SOURCE([
|
|||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
|
||||||
TEMP_LDFLAGS="$LDFLAGS"
|
if test "x$use_thread_local" = xyes || { test "x$use_thread_local" = xauto && test "x$use_glibc_compat" = xno; }; then
|
||||||
LDFLAGS="$TEMP_LDFLAGS $PTHREAD_CFLAGS"
|
TEMP_LDFLAGS="$LDFLAGS"
|
||||||
AC_MSG_CHECKING([for thread_local support])
|
LDFLAGS="$TEMP_LDFLAGS $PTHREAD_CFLAGS"
|
||||||
AC_LINK_IFELSE([AC_LANG_SOURCE([
|
AC_MSG_CHECKING([for thread_local support])
|
||||||
#include <thread>
|
AC_LINK_IFELSE([AC_LANG_SOURCE([
|
||||||
static thread_local int foo = 0;
|
#include <thread>
|
||||||
static void run_thread() { foo++;}
|
static thread_local int foo = 0;
|
||||||
int main(){
|
static void run_thread() { foo++;}
|
||||||
for(int i = 0; i < 10; i++) { std::thread(run_thread).detach();}
|
int main(){
|
||||||
return foo;
|
for(int i = 0; i < 10; i++) { std::thread(run_thread).detach();}
|
||||||
}
|
return foo;
|
||||||
])],
|
}
|
||||||
[
|
])],
|
||||||
case $host in
|
[
|
||||||
*mingw*)
|
case $host in
|
||||||
# mingw32's implementation of thread_local has also been shown to behave
|
*mingw*)
|
||||||
# erroneously under concurrent usage; see:
|
# mingw32's implementation of thread_local has also been shown to behave
|
||||||
# https://gist.github.com/jamesob/fe9a872051a88b2025b1aa37bfa98605
|
# erroneously under concurrent usage; see:
|
||||||
AC_MSG_RESULT(no)
|
# https://gist.github.com/jamesob/fe9a872051a88b2025b1aa37bfa98605
|
||||||
;;
|
AC_MSG_RESULT(no)
|
||||||
*darwin*)
|
;;
|
||||||
# TODO enable thread_local on later versions of Darwin where it is
|
*darwin*)
|
||||||
# supported (per https://stackoverflow.com/a/29929949)
|
# TODO enable thread_local on later versions of Darwin where it is
|
||||||
AC_MSG_RESULT(no)
|
# supported (per https://stackoverflow.com/a/29929949)
|
||||||
;;
|
AC_MSG_RESULT(no)
|
||||||
*)
|
;;
|
||||||
AC_DEFINE(HAVE_THREAD_LOCAL,1,[Define if thread_local is supported.])
|
*freebsd*)
|
||||||
AC_MSG_RESULT(yes)
|
# FreeBSD's implementation of thread_local is also buggy (per
|
||||||
;;
|
# https://groups.google.com/d/msg/bsdmailinglist/22ncTZAbDp4/Dii_pII5AwAJ)
|
||||||
esac
|
AC_MSG_RESULT(no)
|
||||||
],
|
;;
|
||||||
[
|
*)
|
||||||
AC_MSG_RESULT(no)
|
AC_DEFINE(HAVE_THREAD_LOCAL,1,[Define if thread_local is supported.])
|
||||||
]
|
AC_MSG_RESULT(yes)
|
||||||
)
|
;;
|
||||||
LDFLAGS="$TEMP_LDFLAGS"
|
esac
|
||||||
|
],
|
||||||
|
[
|
||||||
|
AC_MSG_RESULT(no)
|
||||||
|
]
|
||||||
|
)
|
||||||
|
LDFLAGS="$TEMP_LDFLAGS"
|
||||||
|
fi
|
||||||
|
|
||||||
dnl check for gmtime_r(), fallback to gmtime_s() if that is unavailable
|
dnl check for gmtime_r(), fallback to gmtime_s() if that is unavailable
|
||||||
dnl fail if neither are available.
|
dnl fail if neither are available.
|
||||||
|
@ -2503,6 +2503,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
|
|||||||
if (addr.nTime <= 100000000 || addr.nTime > nNow + 10 * 60)
|
if (addr.nTime <= 100000000 || addr.nTime > nNow + 10 * 60)
|
||||||
addr.nTime = nNow - 5 * 24 * 60 * 60;
|
addr.nTime = nNow - 5 * 24 * 60 * 60;
|
||||||
pfrom->AddAddressKnown(addr);
|
pfrom->AddAddressKnown(addr);
|
||||||
|
if (g_banman->IsBanned(addr)) continue; // Do not process banned addresses beyond remembering we received them
|
||||||
bool fReachable = IsReachable(addr);
|
bool fReachable = IsReachable(addr);
|
||||||
if (addr.nTime > nSince && !pfrom->fGetAddr && vAddr.size() <= 10 && addr.IsRoutable())
|
if (addr.nTime > nSince && !pfrom->fGetAddr && vAddr.size() <= 10 && addr.IsRoutable())
|
||||||
{
|
{
|
||||||
@ -3459,8 +3460,11 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
|
|||||||
pfrom->vAddrToSend.clear();
|
pfrom->vAddrToSend.clear();
|
||||||
std::vector<CAddress> vAddr = connman->GetAddresses();
|
std::vector<CAddress> vAddr = connman->GetAddresses();
|
||||||
FastRandomContext insecure_rand;
|
FastRandomContext insecure_rand;
|
||||||
for (const CAddress &addr : vAddr)
|
for (const CAddress &addr : vAddr) {
|
||||||
pfrom->PushAddress(addr, insecure_rand);
|
if (!g_banman->IsBanned(addr)) {
|
||||||
|
pfrom->PushAddress(addr, insecure_rand);
|
||||||
|
}
|
||||||
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1484,7 +1484,6 @@ UniValue combinepsbt(const JSONRPCRequest& request)
|
|||||||
throw JSONRPCTransactionError(error);
|
throw JSONRPCTransactionError(error);
|
||||||
}
|
}
|
||||||
|
|
||||||
UniValue result(UniValue::VOBJ);
|
|
||||||
CDataStream ssTx(SER_NETWORK, PROTOCOL_VERSION);
|
CDataStream ssTx(SER_NETWORK, PROTOCOL_VERSION);
|
||||||
ssTx << merged_psbt;
|
ssTx << merged_psbt;
|
||||||
return EncodeBase64(ssTx.str());
|
return EncodeBase64(ssTx.str());
|
||||||
|
@ -9,6 +9,11 @@
|
|||||||
#include <atomic>
|
#include <atomic>
|
||||||
#include <thread>
|
#include <thread>
|
||||||
|
|
||||||
|
#if (defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__DragonFly__))
|
||||||
|
#include <pthread.h>
|
||||||
|
#include <pthread_np.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
#include <util/threadnames.h>
|
#include <util/threadnames.h>
|
||||||
|
|
||||||
#ifdef HAVE_SYS_PRCTL_H
|
#ifdef HAVE_SYS_PRCTL_H
|
||||||
|
@ -82,6 +82,14 @@ bool IsWalletLoaded(const fs::path& wallet_path)
|
|||||||
return database && database->IsDatabaseLoaded(database_filename);
|
return database && database->IsDatabaseLoaded(database_filename);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fs::path WalletDataFilePath(const fs::path& wallet_path)
|
||||||
|
{
|
||||||
|
fs::path env_directory;
|
||||||
|
std::string database_filename;
|
||||||
|
SplitWalletPath(wallet_path, env_directory, database_filename);
|
||||||
|
return env_directory / database_filename;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param[in] wallet_path Path to wallet directory. Or (for backwards compatibility only) a path to a berkeley btree data file inside a wallet directory.
|
* @param[in] wallet_path Path to wallet directory. Or (for backwards compatibility only) a path to a berkeley btree data file inside a wallet directory.
|
||||||
* @param[out] database_filename Filename of berkeley btree data file inside the wallet directory.
|
* @param[out] database_filename Filename of berkeley btree data file inside the wallet directory.
|
||||||
|
@ -108,6 +108,9 @@ public:
|
|||||||
/** Return whether a wallet database is currently loaded. */
|
/** Return whether a wallet database is currently loaded. */
|
||||||
bool IsWalletLoaded(const fs::path& wallet_path);
|
bool IsWalletLoaded(const fs::path& wallet_path);
|
||||||
|
|
||||||
|
/** Given a wallet directory path or legacy file path, return path to main data file in the wallet database. */
|
||||||
|
fs::path WalletDataFilePath(const fs::path& wallet_path);
|
||||||
|
|
||||||
/** Get BerkeleyEnvironment and database filename given a wallet path. */
|
/** Get BerkeleyEnvironment and database filename given a wallet path. */
|
||||||
std::shared_ptr<BerkeleyEnvironment> GetWalletEnv(const fs::path& wallet_path, std::string& database_filename);
|
std::shared_ptr<BerkeleyEnvironment> GetWalletEnv(const fs::path& wallet_path, std::string& database_filename);
|
||||||
|
|
||||||
|
@ -4981,7 +4981,7 @@ bool CWallet::Verify(const WalletLocation& location, bool salvage_wallet, std::s
|
|||||||
|
|
||||||
std::shared_ptr<CWallet> CWallet::CreateWalletFromFile(const WalletLocation& location, uint64_t wallet_creation_flags)
|
std::shared_ptr<CWallet> CWallet::CreateWalletFromFile(const WalletLocation& location, uint64_t wallet_creation_flags)
|
||||||
{
|
{
|
||||||
const std::string& walletFile = location.GetName();
|
const std::string& walletFile = WalletDataFilePath(location.GetPath()).string();
|
||||||
|
|
||||||
// needed to restore wallet transaction meta data after -zapwallettxes
|
// needed to restore wallet transaction meta data after -zapwallettxes
|
||||||
std::vector<CWalletTx> vWtx;
|
std::vector<CWalletTx> vWtx;
|
||||||
|
@ -581,8 +581,15 @@ ReadKeyValue(CWallet* pwallet, CDataStream& ssKey, CDataStream& ssValue,
|
|||||||
strType != "minversion"){
|
strType != "minversion"){
|
||||||
wss.m_unknown_records++;
|
wss.m_unknown_records++;
|
||||||
}
|
}
|
||||||
} catch (...)
|
} catch (const std::exception& e) {
|
||||||
{
|
if (strErr.empty()) {
|
||||||
|
strErr = e.what();
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
} catch (...) {
|
||||||
|
if (strErr.empty()) {
|
||||||
|
strErr = "Caught unknown exception in ReadKeyValue";
|
||||||
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
|
@ -57,9 +57,23 @@ class BIP65Test(BitcoinTestFramework):
|
|||||||
self.extra_args = [['-whitelist=127.0.0.1', '-dip3params=9000:9000']]
|
self.extra_args = [['-whitelist=127.0.0.1', '-dip3params=9000:9000']]
|
||||||
self.setup_clean_chain = True
|
self.setup_clean_chain = True
|
||||||
|
|
||||||
|
def test_cltv_info(self, *, is_active):
|
||||||
|
assert_equal(
|
||||||
|
next(s for s in self.nodes[0].getblockchaininfo()['softforks'] if s['id'] == 'bip65'),
|
||||||
|
{
|
||||||
|
"id": "bip65",
|
||||||
|
"version": 4,
|
||||||
|
"reject": {
|
||||||
|
"status": is_active
|
||||||
|
}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
def run_test(self):
|
def run_test(self):
|
||||||
self.nodes[0].add_p2p_connection(P2PInterface())
|
self.nodes[0].add_p2p_connection(P2PInterface())
|
||||||
|
|
||||||
|
self.test_cltv_info(is_active=False)
|
||||||
|
|
||||||
self.log.info("Mining %d blocks", CLTV_HEIGHT - 2)
|
self.log.info("Mining %d blocks", CLTV_HEIGHT - 2)
|
||||||
self.coinbase_txids = [self.nodes[0].getblock(b)['tx'][0] for b in self.nodes[0].generate(CLTV_HEIGHT - 2)]
|
self.coinbase_txids = [self.nodes[0].getblock(b)['tx'][0] for b in self.nodes[0].generate(CLTV_HEIGHT - 2)]
|
||||||
self.nodeaddress = self.nodes[0].getnewaddress()
|
self.nodeaddress = self.nodes[0].getnewaddress()
|
||||||
@ -79,7 +93,9 @@ class BIP65Test(BitcoinTestFramework):
|
|||||||
block.hashMerkleRoot = block.calc_merkle_root()
|
block.hashMerkleRoot = block.calc_merkle_root()
|
||||||
block.solve()
|
block.solve()
|
||||||
|
|
||||||
|
self.test_cltv_info(is_active=False)
|
||||||
self.nodes[0].p2p.send_and_ping(msg_block(block))
|
self.nodes[0].p2p.send_and_ping(msg_block(block))
|
||||||
|
self.test_cltv_info(is_active=False) # Not active as of current tip, but next block must obey rules
|
||||||
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
|
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
|
||||||
|
|
||||||
self.log.info("Test that blocks must now be at least version 4")
|
self.log.info("Test that blocks must now be at least version 4")
|
||||||
@ -137,7 +153,9 @@ class BIP65Test(BitcoinTestFramework):
|
|||||||
block.hashMerkleRoot = block.calc_merkle_root()
|
block.hashMerkleRoot = block.calc_merkle_root()
|
||||||
block.solve()
|
block.solve()
|
||||||
|
|
||||||
|
self.test_cltv_info(is_active=False) # Not active as of current tip, but next block must obey rules
|
||||||
self.nodes[0].p2p.send_and_ping(msg_block(block))
|
self.nodes[0].p2p.send_and_ping(msg_block(block))
|
||||||
|
self.test_cltv_info(is_active=True) # Active as of current tip
|
||||||
assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256)
|
assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256)
|
||||||
|
|
||||||
|
|
||||||
|
@ -21,6 +21,16 @@ class ConfArgsTest(BitcoinTestFramework):
|
|||||||
with open(os.path.join(self.nodes[0].datadir, 'dash.conf'), 'a', encoding='utf-8') as conf:
|
with open(os.path.join(self.nodes[0].datadir, 'dash.conf'), 'a', encoding='utf-8') as conf:
|
||||||
conf.write('includeconf={}\n'.format(inc_conf_file_path))
|
conf.write('includeconf={}\n'.format(inc_conf_file_path))
|
||||||
|
|
||||||
|
self.nodes[0].assert_start_raises_init_error(
|
||||||
|
expected_msg='Error parsing command line arguments: Invalid parameter -dash_cli',
|
||||||
|
extra_args=['-dash_cli=1'],
|
||||||
|
)
|
||||||
|
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
|
||||||
|
conf.write('dash_conf=1\n')
|
||||||
|
with self.nodes[0].assert_debug_log(expected_msgs=['Ignoring unknown configuration value dash_conf']):
|
||||||
|
self.start_node(0)
|
||||||
|
self.stop_node(0)
|
||||||
|
|
||||||
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
|
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
|
||||||
conf.write('-dash=1\n')
|
conf.write('-dash=1\n')
|
||||||
self.nodes[0].assert_start_raises_init_error(expected_msg='Error reading configuration file: parse error on line 1: -dash=1, options in configuration file must be specified without leading -')
|
self.nodes[0].assert_start_raises_init_error(expected_msg='Error reading configuration file: parse error on line 1: -dash=1, options in configuration file must be specified without leading -')
|
||||||
|
@ -45,9 +45,23 @@ class BIP66Test(BitcoinTestFramework):
|
|||||||
self.extra_args = [['-whitelist=127.0.0.1', '-dip3params=9000:9000']]
|
self.extra_args = [['-whitelist=127.0.0.1', '-dip3params=9000:9000']]
|
||||||
self.setup_clean_chain = True
|
self.setup_clean_chain = True
|
||||||
|
|
||||||
|
def test_dersig_info(self, *, is_active):
|
||||||
|
assert_equal(
|
||||||
|
next(s for s in self.nodes[0].getblockchaininfo()['softforks'] if s['id'] == 'bip66'),
|
||||||
|
{
|
||||||
|
"id": "bip66",
|
||||||
|
"version": 3,
|
||||||
|
"reject": {
|
||||||
|
"status": is_active
|
||||||
|
}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
def run_test(self):
|
def run_test(self):
|
||||||
self.nodes[0].add_p2p_connection(P2PInterface())
|
self.nodes[0].add_p2p_connection(P2PInterface())
|
||||||
|
|
||||||
|
self.test_dersig_info(is_active=False)
|
||||||
|
|
||||||
self.log.info("Mining %d blocks", DERSIG_HEIGHT - 2)
|
self.log.info("Mining %d blocks", DERSIG_HEIGHT - 2)
|
||||||
self.coinbase_txids = [self.nodes[0].getblock(b)['tx'][0] for b in self.nodes[0].generate(DERSIG_HEIGHT - 2)]
|
self.coinbase_txids = [self.nodes[0].getblock(b)['tx'][0] for b in self.nodes[0].generate(DERSIG_HEIGHT - 2)]
|
||||||
self.nodeaddress = self.nodes[0].getnewaddress()
|
self.nodeaddress = self.nodes[0].getnewaddress()
|
||||||
@ -68,7 +82,9 @@ class BIP66Test(BitcoinTestFramework):
|
|||||||
block.rehash()
|
block.rehash()
|
||||||
block.solve()
|
block.solve()
|
||||||
|
|
||||||
|
self.test_dersig_info(is_active=False)
|
||||||
self.nodes[0].p2p.send_and_ping(msg_block(block))
|
self.nodes[0].p2p.send_and_ping(msg_block(block))
|
||||||
|
self.test_dersig_info(is_active=False) # Not active as of current tip, but next block must obey rules
|
||||||
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
|
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
|
||||||
|
|
||||||
self.log.info("Test that blocks must now be at least version 3")
|
self.log.info("Test that blocks must now be at least version 3")
|
||||||
@ -131,8 +147,11 @@ class BIP66Test(BitcoinTestFramework):
|
|||||||
block.rehash()
|
block.rehash()
|
||||||
block.solve()
|
block.solve()
|
||||||
|
|
||||||
|
self.test_dersig_info(is_active=False) # Not active as of current tip, but next block must obey rules
|
||||||
self.nodes[0].p2p.send_and_ping(msg_block(block))
|
self.nodes[0].p2p.send_and_ping(msg_block(block))
|
||||||
|
self.test_dersig_info(is_active=True) # Active as of current tip
|
||||||
assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256)
|
assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
BIP66Test().main()
|
BIP66Test().main()
|
||||||
|
@ -422,6 +422,7 @@ class P2PInterface(P2PConnection):
|
|||||||
|
|
||||||
def wait_for_tx(self, txid, timeout=60):
|
def wait_for_tx(self, txid, timeout=60):
|
||||||
def test_function():
|
def test_function():
|
||||||
|
assert self.is_connected
|
||||||
if not self.last_message.get('tx'):
|
if not self.last_message.get('tx'):
|
||||||
return False
|
return False
|
||||||
return self.last_message['tx'].tx.rehash() == txid
|
return self.last_message['tx'].tx.rehash() == txid
|
||||||
@ -429,11 +430,15 @@ class P2PInterface(P2PConnection):
|
|||||||
wait_until(test_function, timeout=timeout, lock=mininode_lock)
|
wait_until(test_function, timeout=timeout, lock=mininode_lock)
|
||||||
|
|
||||||
def wait_for_block(self, blockhash, timeout=60):
|
def wait_for_block(self, blockhash, timeout=60):
|
||||||
test_function = lambda: self.last_message.get("block") and self.last_message["block"].block.rehash() == blockhash
|
def test_function():
|
||||||
|
assert self.is_connected
|
||||||
|
return self.last_message.get("block") and self.last_message["block"].block.rehash() == blockhash
|
||||||
|
|
||||||
wait_until(test_function, timeout=timeout, lock=mininode_lock)
|
wait_until(test_function, timeout=timeout, lock=mininode_lock)
|
||||||
|
|
||||||
def wait_for_header(self, blockhash, timeout=60):
|
def wait_for_header(self, blockhash, timeout=60):
|
||||||
def test_function():
|
def test_function():
|
||||||
|
assert self.is_connected
|
||||||
last_headers = self.last_message.get('headers')
|
last_headers = self.last_message.get('headers')
|
||||||
if not last_headers:
|
if not last_headers:
|
||||||
return False
|
return False
|
||||||
@ -448,7 +453,11 @@ class P2PInterface(P2PConnection):
|
|||||||
value must be explicitly cleared before calling this method, or this will return
|
value must be explicitly cleared before calling this method, or this will return
|
||||||
immediately with success. TODO: change this method to take a hash value and only
|
immediately with success. TODO: change this method to take a hash value and only
|
||||||
return true if the correct block/tx has been requested."""
|
return true if the correct block/tx has been requested."""
|
||||||
test_function = lambda: self.last_message.get("getdata")
|
|
||||||
|
def test_function():
|
||||||
|
assert self.is_connected
|
||||||
|
return self.last_message.get("getdata")
|
||||||
|
|
||||||
wait_until(test_function, timeout=timeout, lock=mininode_lock)
|
wait_until(test_function, timeout=timeout, lock=mininode_lock)
|
||||||
|
|
||||||
def wait_for_getheaders(self, timeout=60):
|
def wait_for_getheaders(self, timeout=60):
|
||||||
@ -458,20 +467,30 @@ class P2PInterface(P2PConnection):
|
|||||||
value must be explicitly cleared before calling this method, or this will return
|
value must be explicitly cleared before calling this method, or this will return
|
||||||
immediately with success. TODO: change this method to take a hash value and only
|
immediately with success. TODO: change this method to take a hash value and only
|
||||||
return true if the correct block header has been requested."""
|
return true if the correct block header has been requested."""
|
||||||
test_function = lambda: self.last_message.get("getheaders")
|
|
||||||
|
def test_function():
|
||||||
|
assert self.is_connected
|
||||||
|
return self.last_message.get("getheaders")
|
||||||
|
|
||||||
wait_until(test_function, timeout=timeout, lock=mininode_lock)
|
wait_until(test_function, timeout=timeout, lock=mininode_lock)
|
||||||
|
|
||||||
def wait_for_inv(self, expected_inv, timeout=60):
|
def wait_for_inv(self, expected_inv, timeout=60):
|
||||||
"""Waits for an INV message and checks that the first inv object in the message was as expected."""
|
"""Waits for an INV message and checks that the first inv object in the message was as expected."""
|
||||||
if len(expected_inv) > 1:
|
if len(expected_inv) > 1:
|
||||||
raise NotImplementedError("wait_for_inv() will only verify the first inv object")
|
raise NotImplementedError("wait_for_inv() will only verify the first inv object")
|
||||||
test_function = lambda: self.last_message.get("inv") and \
|
|
||||||
|
def test_function():
|
||||||
|
assert self.is_connected
|
||||||
|
return self.last_message.get("inv") and \
|
||||||
self.last_message["inv"].inv[0].type == expected_inv[0].type and \
|
self.last_message["inv"].inv[0].type == expected_inv[0].type and \
|
||||||
self.last_message["inv"].inv[0].hash == expected_inv[0].hash
|
self.last_message["inv"].inv[0].hash == expected_inv[0].hash
|
||||||
|
|
||||||
wait_until(test_function, timeout=timeout, lock=mininode_lock)
|
wait_until(test_function, timeout=timeout, lock=mininode_lock)
|
||||||
|
|
||||||
def wait_for_verack(self, timeout=60):
|
def wait_for_verack(self, timeout=60):
|
||||||
test_function = lambda: self.message_count["verack"]
|
def test_function():
|
||||||
|
return self.message_count["verack"]
|
||||||
|
|
||||||
wait_until(test_function, timeout=timeout, lock=mininode_lock)
|
wait_until(test_function, timeout=timeout, lock=mininode_lock)
|
||||||
|
|
||||||
# Message sending helper functions
|
# Message sending helper functions
|
||||||
@ -483,7 +502,11 @@ class P2PInterface(P2PConnection):
|
|||||||
# Sync up with the node
|
# Sync up with the node
|
||||||
def sync_with_ping(self, timeout=60):
|
def sync_with_ping(self, timeout=60):
|
||||||
self.send_message(msg_ping(nonce=self.ping_counter))
|
self.send_message(msg_ping(nonce=self.ping_counter))
|
||||||
test_function = lambda: self.last_message.get("pong") and self.last_message["pong"].nonce == self.ping_counter
|
|
||||||
|
def test_function():
|
||||||
|
assert self.is_connected
|
||||||
|
return self.last_message.get("pong") and self.last_message["pong"].nonce == self.ping_counter
|
||||||
|
|
||||||
wait_until(test_function, timeout=timeout, lock=mininode_lock)
|
wait_until(test_function, timeout=timeout, lock=mininode_lock)
|
||||||
self.ping_counter += 1
|
self.ping_counter += 1
|
||||||
|
|
||||||
|
@ -336,6 +336,7 @@ def initialize_datadir(dirname, n, chain):
|
|||||||
f.write("discover=0\n")
|
f.write("discover=0\n")
|
||||||
f.write("listenonion=0\n")
|
f.write("listenonion=0\n")
|
||||||
f.write("printtoconsole=0\n")
|
f.write("printtoconsole=0\n")
|
||||||
|
f.write("upnp=0\n")
|
||||||
os.makedirs(os.path.join(datadir, 'stderr'), exist_ok=True)
|
os.makedirs(os.path.join(datadir, 'stderr'), exist_ok=True)
|
||||||
os.makedirs(os.path.join(datadir, 'stdout'), exist_ok=True)
|
os.makedirs(os.path.join(datadir, 'stdout'), exist_ok=True)
|
||||||
return datadir
|
return datadir
|
||||||
|
@ -506,6 +506,11 @@ class TestHandler:
|
|||||||
log_stderr))
|
log_stderr))
|
||||||
if not self.jobs:
|
if not self.jobs:
|
||||||
raise IndexError('pop from empty list')
|
raise IndexError('pop from empty list')
|
||||||
|
|
||||||
|
# Print remaining running jobs when all jobs have been started.
|
||||||
|
if not self.test_list:
|
||||||
|
print("Remaining jobs: [{}]".format(", ".join(j[0] for j in self.jobs)))
|
||||||
|
|
||||||
dot_count = 0
|
dot_count = 0
|
||||||
while True:
|
while True:
|
||||||
# Return first proc that finishes
|
# Return first proc that finishes
|
||||||
|
@ -161,11 +161,12 @@ class ImportRescanTest(BitcoinTestFramework):
|
|||||||
timestamp = self.nodes[0].getblockheader(self.nodes[0].getbestblockhash())["time"]
|
timestamp = self.nodes[0].getblockheader(self.nodes[0].getbestblockhash())["time"]
|
||||||
set_node_times(self.nodes, timestamp + TIMESTAMP_WINDOW + 1)
|
set_node_times(self.nodes, timestamp + TIMESTAMP_WINDOW + 1)
|
||||||
self.nodes[0].generate(1)
|
self.nodes[0].generate(1)
|
||||||
self.sync_blocks()
|
self.sync_all()
|
||||||
|
|
||||||
# For each variation of wallet key import, invoke the import RPC and
|
# For each variation of wallet key import, invoke the import RPC and
|
||||||
# check the results from getbalance and listtransactions.
|
# check the results from getbalance and listtransactions.
|
||||||
for variant in IMPORT_VARIANTS:
|
for variant in IMPORT_VARIANTS:
|
||||||
|
self.log.info('Run import for variant {}'.format(variant))
|
||||||
variant.expect_disabled = variant.rescan == Rescan.yes and variant.prune and variant.call == Call.single
|
variant.expect_disabled = variant.rescan == Rescan.yes and variant.prune and variant.call == Call.single
|
||||||
expect_rescan = variant.rescan == Rescan.yes and not variant.expect_disabled
|
expect_rescan = variant.rescan == Rescan.yes and not variant.expect_disabled
|
||||||
variant.node = self.nodes[2 + IMPORT_NODES.index(ImportNode(variant.prune, expect_rescan))]
|
variant.node = self.nodes[2 + IMPORT_NODES.index(ImportNode(variant.prune, expect_rescan))]
|
||||||
@ -187,10 +188,11 @@ class ImportRescanTest(BitcoinTestFramework):
|
|||||||
# Generate a block containing the new transactions.
|
# Generate a block containing the new transactions.
|
||||||
self.nodes[0].generate(1)
|
self.nodes[0].generate(1)
|
||||||
assert_equal(self.nodes[0].getrawmempool(), [])
|
assert_equal(self.nodes[0].getrawmempool(), [])
|
||||||
self.sync_blocks()
|
self.sync_all()
|
||||||
|
|
||||||
# Check the latest results from getbalance and listtransactions.
|
# Check the latest results from getbalance and listtransactions.
|
||||||
for variant in IMPORT_VARIANTS:
|
for variant in IMPORT_VARIANTS:
|
||||||
|
self.log.info('Run check for variant {}'.format(variant))
|
||||||
if not variant.expect_disabled:
|
if not variant.expect_disabled:
|
||||||
variant.expected_balance += variant.sent_amount
|
variant.expected_balance += variant.sent_amount
|
||||||
variant.expected_txs += 1
|
variant.expected_txs += 1
|
||||||
|
@ -9,6 +9,11 @@
|
|||||||
|
|
||||||
export LC_ALL=C
|
export LC_ALL=C
|
||||||
|
|
||||||
|
if ! command -v codespell > /dev/null; then
|
||||||
|
echo "Skipping spell check linting since codespell is not installed."
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
IGNORE_WORDS_FILE=test/lint/lint-spelling.ignore-words.txt
|
IGNORE_WORDS_FILE=test/lint/lint-spelling.ignore-words.txt
|
||||||
if ! codespell --check-filenames --disable-colors --quiet-level=7 --ignore-words=${IGNORE_WORDS_FILE} $(git ls-files -- ":(exclude)build-aux/m4/" ":(exclude)contrib/seeds/*.txt" ":(exclude)depends/" ":(exclude)doc/release-notes/" ":(exclude)src/crypto/" ":(exclude)src/leveldb/" ":(exclude)src/qt/locale/" ":(exclude)src/secp256k1/" ":(exclude)src/univalue/"); then
|
if ! codespell --check-filenames --disable-colors --quiet-level=7 --ignore-words=${IGNORE_WORDS_FILE} $(git ls-files -- ":(exclude)build-aux/m4/" ":(exclude)contrib/seeds/*.txt" ":(exclude)depends/" ":(exclude)doc/release-notes/" ":(exclude)src/crypto/" ":(exclude)src/leveldb/" ":(exclude)src/qt/locale/" ":(exclude)src/secp256k1/" ":(exclude)src/univalue/"); then
|
||||||
echo "^ Warning: codespell identified likely spelling errors. Any false positives? Add them to the list of ignored words in ${IGNORE_WORDS_FILE}"
|
echo "^ Warning: codespell identified likely spelling errors. Any false positives? Add them to the list of ignored words in ${IGNORE_WORDS_FILE}"
|
||||||
|
Loading…
Reference in New Issue
Block a user