mirror of
https://github.com/dashpay/dash.git
synced 2024-12-25 12:02:48 +01:00
Merge pull request #5858 from knst/bp-v21-p22
backport: bitcoin#20112, #20115, #20258, #20285, #20294, #20299, #20315, #20328, #20339, #20368, #20390
This commit is contained in:
commit
cb09d35fc6
70
.cirrus.yml
70
.cirrus.yml
@ -8,7 +8,7 @@ container:
|
||||
memory: 8G # Set to 8GB to avoid OOM. https://cirrus-ci.org/guide/linux/#linux-containers
|
||||
kvm: true # Use kvm to avoid spurious CI failures in the default virtualization cluster, see https://github.com/bitcoin/bitcoin/issues/20093
|
||||
env:
|
||||
PACKAGE_MANAGER_INSTALL : "apt-get update && apt-get install -y"
|
||||
PACKAGE_MANAGER_INSTALL: "apt-get update && apt-get install -y"
|
||||
MAKEJOBS: "-j4"
|
||||
DANGER_RUN_CI_ON_HOST: "1" # Containers will be discarded after the run, so there is no risk that the ci scripts modify the system
|
||||
TEST_RUNNER_PORT_MIN: "14000" # Must be larger than 12321, which is used for the http cache. See https://cirrus-ci.org/guide/writing-tasks/#http-cache
|
||||
@ -19,6 +19,7 @@ env:
|
||||
|
||||
# https://cirrus-ci.org/guide/tips-and-tricks/#sharing-configuration-between-tasks
|
||||
global_task_template: &GLOBAL_TASK_TEMPLATE
|
||||
skip: $CIRRUS_REPO_FULL_NAME == "bitcoin-core/gui" && $CIRRUS_PR == "" # No need to run on the read-only mirror, unless it is a PR. https://cirrus-ci.org/guide/writing-tasks/#conditional-task-execution
|
||||
ccache_cache:
|
||||
folder: "/tmp/ccache_dir"
|
||||
depends_built_cache:
|
||||
@ -28,6 +29,7 @@ global_task_template: &GLOBAL_TASK_TEMPLATE
|
||||
depends_releases_cache:
|
||||
folder: "/tmp/cirrus-ci-build/releases"
|
||||
merge_base_script:
|
||||
- if [ "$CIRRUS_PR" = "" ]; then exit 0; fi
|
||||
- bash -c "$PACKAGE_MANAGER_INSTALL git"
|
||||
- git fetch $CIRRUS_REPO_CLONE_URL $CIRRUS_BASE_BRANCH
|
||||
- git config --global user.email "ci@ci.ci"
|
||||
@ -37,6 +39,30 @@ global_task_template: &GLOBAL_TASK_TEMPLATE
|
||||
- ./ci/test_run_all.sh
|
||||
|
||||
|
||||
task:
|
||||
name: 'ARM [GOAL: install] [buster] [unit tests, no functional tests]'
|
||||
<< : *GLOBAL_TASK_TEMPLATE
|
||||
container:
|
||||
image: debian:buster
|
||||
env:
|
||||
FILE_ENV: "./ci/test/00_setup_env_arm.sh"
|
||||
|
||||
task:
|
||||
name: 'Win64 [GOAL: deploy] [unit tests, no gui, no boost::process, no functional tests]'
|
||||
<< : *GLOBAL_TASK_TEMPLATE
|
||||
container:
|
||||
image: ubuntu:bionic
|
||||
env:
|
||||
FILE_ENV: "./ci/test/00_setup_env_win64.sh"
|
||||
|
||||
task:
|
||||
name: 'x86_64 Linux [GOAL: install] [focal] [previous releases, uses qt5 dev package and some depends packages] [unsigned char]'
|
||||
<< : *GLOBAL_TASK_TEMPLATE
|
||||
container:
|
||||
image: ubuntu:focal
|
||||
env:
|
||||
FILE_ENV: "./ci/test/00_setup_env_native_qt5.sh"
|
||||
|
||||
task:
|
||||
name: 'x86_64 Linux [GOAL: install] [focal] [depends, sanitizers: thread (TSan), no gui]'
|
||||
<< : *GLOBAL_TASK_TEMPLATE
|
||||
@ -48,6 +74,14 @@ task:
|
||||
MAKEJOBS: "-j8"
|
||||
FILE_ENV: "./ci/test/00_setup_env_native_tsan.sh"
|
||||
|
||||
task:
|
||||
name: 'x86_64 Linux [GOAL: install] [focal] [depends, sanitizers: memory (MSan)]'
|
||||
<< : *GLOBAL_TASK_TEMPLATE
|
||||
container:
|
||||
image: ubuntu:focal
|
||||
env:
|
||||
FILE_ENV: "./ci/test/00_setup_env_native_msan.sh"
|
||||
|
||||
task:
|
||||
name: 'x86_64 Linux [GOAL: install] [focal] [no depends, only system libs, sanitizers: address/leak (ASan + LSan) + undefined (UBSan) + integer]'
|
||||
<< : *GLOBAL_TASK_TEMPLATE
|
||||
@ -57,9 +91,39 @@ task:
|
||||
FILE_ENV: "./ci/test/00_setup_env_native_asan.sh"
|
||||
|
||||
task:
|
||||
name: 'x86_64 Linux [GOAL: install] [focal] [no depends, only system libs, fuzzers under valgrind]'
|
||||
name: 'x86_64 Linux [GOAL: install] [focal] [no depends, only system libs, sanitizers: fuzzer,address,undefined]'
|
||||
<< : *GLOBAL_TASK_TEMPLATE
|
||||
container:
|
||||
image: ubuntu:focal
|
||||
env:
|
||||
FILE_ENV: "./ci/test/00_setup_env_native_fuzz_with_valgrind.sh"
|
||||
FILE_ENV: "./ci/test/00_setup_env_native_fuzz.sh"
|
||||
|
||||
task:
|
||||
name: 'x86_64 Linux [GOAL: install] [focal] [multiprocess]'
|
||||
<< : *GLOBAL_TASK_TEMPLATE
|
||||
container:
|
||||
image: ubuntu:focal
|
||||
env:
|
||||
FILE_ENV: "./ci/test/00_setup_env_native_multiprocess.sh"
|
||||
|
||||
task:
|
||||
name: 'macOS 10.12 [GOAL: deploy] [no functional tests]'
|
||||
<< : *GLOBAL_TASK_TEMPLATE
|
||||
container:
|
||||
image: ubuntu:focal
|
||||
env:
|
||||
FILE_ENV: "./ci/test/00_setup_env_mac.sh"
|
||||
|
||||
task:
|
||||
name: 'macOS 10.14 native [GOAL: install] [GUI] [no depends]'
|
||||
macos_brew_addon_script:
|
||||
- brew install boost libevent berkeley-db4 qt miniupnpc ccache zeromq qrencode sqlite libtool automake pkg-config gnu-getopt
|
||||
<< : *GLOBAL_TASK_TEMPLATE
|
||||
osx_instance:
|
||||
# Use latest image, but hardcode version to avoid silent upgrades (and breaks)
|
||||
image: catalina-xcode-12.1 # https://cirrus-ci.org/guide/macOS
|
||||
env:
|
||||
DANGER_RUN_CI_ON_HOST: "true"
|
||||
CI_USE_APT_INSTALL: "no"
|
||||
PACKAGE_MANAGER_INSTALL: "echo" # Nothing to do
|
||||
FILE_ENV: "./ci/test/00_setup_env_mac_host.sh"
|
||||
|
60
.travis.yml
60
.travis.yml
@ -185,67 +185,7 @@ after_success:
|
||||
script:
|
||||
- set -o errexit; source ./ci/lint/06_script.sh
|
||||
|
||||
- stage: test
|
||||
name: 'ARM [GOAL: install] [focal] [unit tests, no functional tests]'
|
||||
arch: arm64 # Can disable QEMU_USER_CMD and run the tests natively without qemu
|
||||
env: >-
|
||||
FILE_ENV="./ci/test/00_setup_env_arm.sh"
|
||||
QEMU_USER_CMD=""
|
||||
|
||||
- stage: test
|
||||
name: 'S390x [GOAL: install] [focal] [unit tests, functional tests]'
|
||||
arch: s390x # Can disable QEMU_USER_CMD and run the tests natively without qemu
|
||||
env: >-
|
||||
FILE_ENV="./ci/test/00_setup_env_s390x.sh"
|
||||
QEMU_USER_CMD=""
|
||||
|
||||
- stage: test
|
||||
name: 'Win64 [GOAL: deploy] [unit tests, no gui, no boost::process, no functional tests]'
|
||||
env: >-
|
||||
FILE_ENV="./ci/test/00_setup_env_win64.sh"
|
||||
|
||||
- stage: test
|
||||
name: 'x86_64 Linux [GOAL: install] [focal] [previous releases, uses qt5 dev package and some depends packages] [unsigned char]'
|
||||
env: >-
|
||||
FILE_ENV="./ci/test/00_setup_env_native_qt5.sh"
|
||||
# x86_64 Linux (xenial, no depends, only system libs, sanitizers: thread (TSan))
|
||||
|
||||
- stage: test
|
||||
name: 'x86_64 Linux [GOAL: install] [focal] [no depends, only system libs, sanitizers: fuzzer,address,undefined]'
|
||||
env: >-
|
||||
FILE_ENV="./ci/test/00_setup_env_native_fuzz.sh"
|
||||
|
||||
- stage: test
|
||||
name: 'x86_64 Linux [GOAL: install] [focal] [multiprocess]'
|
||||
env: >-
|
||||
FILE_ENV="./ci/test/00_setup_env_native_multiprocess.sh"
|
||||
|
||||
- stage: test
|
||||
name: 'x86_64 Linux [GOAL: install] [focal] [no wallet]'
|
||||
env: >-
|
||||
FILE_ENV="./ci/test/00_setup_env_native_nowallet.sh"
|
||||
|
||||
- stage: test
|
||||
name: 'macOS 10.12 [GOAL: deploy] [no functional tests]'
|
||||
env: >-
|
||||
FILE_ENV="./ci/test/00_setup_env_mac.sh"
|
||||
|
||||
- stage: test
|
||||
name: 'macOS 10.14 native [GOAL: install] [GUI] [no depends]'
|
||||
os: osx
|
||||
# Use the most recent version:
|
||||
# Xcode 11.3.1, macOS 10.14, SDK 10.15
|
||||
# https://docs.travis-ci.com/user/reference/osx/#macos-version
|
||||
osx_image: xcode11.3
|
||||
addons:
|
||||
homebrew:
|
||||
packages:
|
||||
- berkeley-db4
|
||||
- miniupnpc
|
||||
- qrencode
|
||||
- ccache
|
||||
- zeromq
|
||||
env: >-
|
||||
DANGER_RUN_CI_ON_HOST=true
|
||||
CI_USE_APT_INSTALL=no
|
||||
FILE_ENV="./ci/test/00_setup_env_mac_host.sh"
|
||||
|
@ -29,8 +29,8 @@ if [ "$DIRECT_WINE_EXEC_TESTS" = "true" ]; then
|
||||
wine ./src/test/test_dash.exe
|
||||
else
|
||||
if [ "$RUN_UNIT_TESTS_SEQUENTIAL" = "true" ]; then
|
||||
./src/test/test_dash --catch_system_errors=no -l test_suite
|
||||
${TEST_RUNNER_ENV} ./src/test/test_dash --catch_system_errors=no -l test_suite
|
||||
else
|
||||
make $MAKEJOBS check VERBOSE=1
|
||||
${TEST_RUNNER_ENV} make $MAKEJOBS check VERBOSE=1
|
||||
fi
|
||||
fi
|
||||
|
@ -11,12 +11,9 @@ export HOST=x86_64-apple-darwin
|
||||
export PIP_PACKAGES="zmq lief"
|
||||
export GOAL="install"
|
||||
export BITCOIN_CONFIG="--with-gui --enable-reduce-exports --disable-miner --enable-werror --with-boost-process"
|
||||
export CI_OS_NAME="macos"
|
||||
export NO_DEPENDS=1
|
||||
export OSX_SDK=""
|
||||
export CCACHE_SIZE=300M
|
||||
|
||||
export RUN_SECURITY_TESTS="true"
|
||||
if [ "$TRAVIS_REPO_SLUG" != "dashpay/dash" ]; then
|
||||
export RUN_FUNCTIONAL_TESTS="false"
|
||||
export EXPECTED_TESTS_DURATION_IN_SECONDS=200
|
||||
fi
|
||||
|
@ -19,6 +19,7 @@ fi
|
||||
# Use debian to avoid 404 apt errors
|
||||
export CONTAINER_NAME=ci_s390x
|
||||
export RUN_UNIT_TESTS=true
|
||||
export TEST_RUNNER_ENV="LC_ALL=C"
|
||||
export RUN_FUNCTIONAL_TESTS=true
|
||||
export GOAL="install"
|
||||
export BITCOIN_CONFIG="--enable-reduce-exports --disable-gui-tests --with-boost-process" # GUI tests disabled for now, see https://github.com/bitcoin/bitcoin/issues/23730
|
||||
|
@ -8,7 +8,7 @@ export LC_ALL=C.UTF-8
|
||||
|
||||
export CONTAINER_NAME=ci_win64
|
||||
export HOST=x86_64-w64-mingw32
|
||||
export PACKAGES="python3 nsis g++-mingw-w64-x86-64 wine-binfmt wine64"
|
||||
export PACKAGES="python3 nsis g++-mingw-w64-x86-64 wine-binfmt wine64 file"
|
||||
export DPKG_ADD_ARCH="i386"
|
||||
export RUN_FUNCTIONAL_TESTS=false
|
||||
export RUN_SECURITY_TESTS="false"
|
||||
|
@ -13,8 +13,8 @@ if [[ $QEMU_USER_CMD == qemu-s390* ]]; then
|
||||
export LC_ALL=C
|
||||
fi
|
||||
|
||||
if [ "$TRAVIS_OS_NAME" == "osx" ]; then
|
||||
${CI_RETRY_EXE} pip3 install $PIP_PACKAGES
|
||||
if [ "$CI_OS_NAME" == "macos" ]; then
|
||||
IN_GETOPT_BIN="/usr/local/opt/gnu-getopt/bin/getopt" ${CI_RETRY_EXE} pip3 install --user $PIP_PACKAGES
|
||||
fi
|
||||
|
||||
# Create folders that are mounted into the docker
|
||||
@ -26,9 +26,7 @@ export LSAN_OPTIONS="suppressions=${BASE_BUILD_DIR}/test/sanitizer_suppressions/
|
||||
export TSAN_OPTIONS="suppressions=${BASE_BUILD_DIR}/test/sanitizer_suppressions/tsan:halt_on_error=1"
|
||||
export UBSAN_OPTIONS="suppressions=${BASE_BUILD_DIR}/test/sanitizer_suppressions/ubsan:print_stacktrace=1:halt_on_error=1:report_error_type=1"
|
||||
env | grep -E '^(BASE_|QEMU_|CCACHE_|LC_ALL|BOOST_TEST_RANDOM|DEBIAN_FRONTEND|CONFIG_SHELL|(ASAN|LSAN|TSAN|UBSAN)_OPTIONS|PREVIOUS_RELEASES_DIR))' | tee /tmp/env
|
||||
if [[ $HOST = *-mingw32 ]]; then
|
||||
DOCKER_ADMIN="--cap-add SYS_ADMIN"
|
||||
elif [[ $BITCOIN_CONFIG = *--with-sanitizers=*address* ]]; then # If ran with (ASan + LSan), Docker needs access to ptrace (https://github.com/google/sanitizers/issues/764)
|
||||
if [[ $BITCOIN_CONFIG = *--with-sanitizers=*address* ]]; then # If ran with (ASan + LSan), Docker needs access to ptrace (https://github.com/google/sanitizers/issues/764)
|
||||
DOCKER_ADMIN="--cap-add SYS_PTRACE"
|
||||
fi
|
||||
|
||||
@ -72,16 +70,16 @@ elif [ "$CI_USE_APT_INSTALL" != "no" ]; then
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$TRAVIS_OS_NAME" == "osx" ]; then
|
||||
if [ "$CI_OS_NAME" == "macos" ]; then
|
||||
top -l 1 -s 0 | awk ' /PhysMem/ {print}'
|
||||
echo "Number of CPUs: $(sysctl -n hw.logicalcpu)"
|
||||
else
|
||||
DOCKER_EXEC free -m -h
|
||||
DOCKER_EXEC echo "Number of CPUs \(nproc\):" \$\(nproc\)
|
||||
DOCKER_EXEC echo $(lscpu | grep Endian)
|
||||
DOCKER_EXEC echo "Free disk space:"
|
||||
DOCKER_EXEC df -h
|
||||
fi
|
||||
DOCKER_EXEC echo "Free disk space:"
|
||||
DOCKER_EXEC df -h
|
||||
|
||||
if [ ! -d ${DIR_QA_ASSETS} ]; then
|
||||
if [ "$RUN_FUZZ_TESTS" = "true" ]; then
|
||||
|
@ -7,7 +7,7 @@
|
||||
export LC_ALL=C.UTF-8
|
||||
|
||||
# Make sure default datadir does not exist and is never read by creating a dummy file
|
||||
if [ "$TRAVIS_OS_NAME" == "osx" ]; then
|
||||
if [ "$CI_OS_NAME" == "macos" ]; then
|
||||
echo > $HOME/Library/Application\ Support/DashCore
|
||||
else
|
||||
DOCKER_EXEC echo \> \$HOME/.dashcore
|
||||
|
20
ci/test/wrap-wine.sh
Executable file
20
ci/test/wrap-wine.sh
Executable file
@ -0,0 +1,20 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Copyright (c) 2020 The Bitcoin Core developers
|
||||
# Distributed under the MIT software license, see the accompanying
|
||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
|
||||
export LC_ALL=C.UTF-8
|
||||
|
||||
for b_name in {"${BASE_OUTDIR}/bin"/*,src/secp256k1/*tests,src/univalue/{no_nul,test_json,unitester,object}}.exe; do
|
||||
# shellcheck disable=SC2044
|
||||
for b in $(find "${BASE_ROOT_DIR}" -executable -type f -name "$(basename $b_name)"); do
|
||||
if (file "$b" | grep "Windows"); then
|
||||
echo "Wrap $b ..."
|
||||
mv "$b" "${b}_orig"
|
||||
echo '#!/usr/bin/env bash' > "$b"
|
||||
echo "wine64 \"${b}_orig\" \"\$@\"" >> "$b"
|
||||
chmod +x "$b"
|
||||
fi
|
||||
done
|
||||
done
|
@ -16,11 +16,13 @@
|
||||
#include <rpc/protocol.h>
|
||||
#include <rpc/request.h>
|
||||
#include <stacktraces.h>
|
||||
#include <tinyformat.h>
|
||||
#include <util/strencodings.h>
|
||||
#include <util/system.h>
|
||||
#include <util/translation.h>
|
||||
#include <util/url.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <cmath>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
@ -306,12 +308,12 @@ class NetinfoRequestHandler : public BaseRequestHandler
|
||||
{
|
||||
private:
|
||||
static constexpr int8_t UNKNOWN_NETWORK{-1};
|
||||
static constexpr size_t m_networks_size{3};
|
||||
static constexpr uint8_t m_networks_size{3};
|
||||
const std::array<std::string, m_networks_size> m_networks{{"ipv4", "ipv6", "onion"}};
|
||||
std::array<std::array<uint16_t, m_networks_size + 2>, 3> m_counts{{{}}}; //!< Peer counts by (in/out/total, networks/total/block-relay)
|
||||
int8_t NetworkStringToId(const std::string& str) const
|
||||
{
|
||||
for (size_t i = 0; i < m_networks_size; ++i) {
|
||||
for (uint8_t i = 0; i < m_networks_size; ++i) {
|
||||
if (str == m_networks.at(i)) return i;
|
||||
}
|
||||
return UNKNOWN_NETWORK;
|
||||
@ -323,21 +325,22 @@ private:
|
||||
bool IsVersionSelected() const { return m_details_level == 3 || m_details_level == 4; }
|
||||
bool m_is_asmap_on{false};
|
||||
size_t m_max_addr_length{0};
|
||||
size_t m_max_age_length{4};
|
||||
size_t m_max_id_length{2};
|
||||
struct Peer {
|
||||
int id;
|
||||
int mapped_as;
|
||||
int version;
|
||||
int64_t conn_time;
|
||||
std::string addr;
|
||||
std::string sub_version;
|
||||
std::string network;
|
||||
std::string age;
|
||||
double min_ping;
|
||||
double ping;
|
||||
int64_t last_blck;
|
||||
int64_t last_recv;
|
||||
int64_t last_send;
|
||||
int64_t last_trxn;
|
||||
double min_ping;
|
||||
double ping;
|
||||
std::string addr;
|
||||
std::string network;
|
||||
std::string sub_version;
|
||||
int id;
|
||||
int mapped_as;
|
||||
int version;
|
||||
bool is_block_relay;
|
||||
bool is_outbound;
|
||||
bool operator<(const Peer& rhs) const { return std::tie(is_outbound, min_ping) < std::tie(rhs.is_outbound, rhs.min_ping); }
|
||||
@ -346,9 +349,16 @@ private:
|
||||
std::string ChainToString() const
|
||||
{
|
||||
if (gArgs.GetChainName() == CBaseChainParams::TESTNET) return " testnet";
|
||||
if (gArgs.GetChainName() == CBaseChainParams::DEVNET) return " devnet";
|
||||
if (gArgs.GetChainName() == CBaseChainParams::REGTEST) return " regtest";
|
||||
return "";
|
||||
}
|
||||
std::string PingTimeToString(double seconds) const
|
||||
{
|
||||
if (seconds < 0) return "";
|
||||
const double milliseconds{round(1000 * seconds)};
|
||||
return milliseconds > 999999 ? "-" : ToString(milliseconds);
|
||||
}
|
||||
const UniValue NetinfoHelp()
|
||||
{
|
||||
return std::string{
|
||||
@ -471,10 +481,12 @@ public:
|
||||
const double min_ping{peer["minping"].isNull() ? -1 : peer["minping"].get_real()};
|
||||
const double ping{peer["pingtime"].isNull() ? -1 : peer["pingtime"].get_real()};
|
||||
const std::string addr{peer["addr"].get_str()};
|
||||
const std::string age{conn_time == 0 ? "" : ToString((m_time_now - conn_time) / 60)};
|
||||
const std::string sub_version{peer["subver"].get_str()};
|
||||
m_peers.push_back({peer_id, mapped_as, version, conn_time, last_blck, last_recv, last_send, last_trxn, min_ping, ping, addr, network, sub_version, is_block_relay, is_outbound});
|
||||
m_max_id_length = std::max(ToString(peer_id).length(), m_max_id_length);
|
||||
m_peers.push_back({addr, sub_version, network, age, min_ping, ping, last_blck, last_recv, last_send, last_trxn, peer_id, mapped_as, version, is_block_relay, is_outbound});
|
||||
m_max_addr_length = std::max(addr.length() + 1, m_max_addr_length);
|
||||
m_max_age_length = std::max(age.length(), m_max_age_length);
|
||||
m_max_id_length = std::max(ToString(peer_id).length(), m_max_id_length);
|
||||
m_is_asmap_on |= (mapped_as != 0);
|
||||
}
|
||||
}
|
||||
@ -485,23 +497,24 @@ public:
|
||||
// Report detailed peer connections list sorted by direction and minimum ping time.
|
||||
if (DetailsRequested() && !m_peers.empty()) {
|
||||
std::sort(m_peers.begin(), m_peers.end());
|
||||
result += "<-> relay net mping ping send recv txn blk uptime ";
|
||||
result += strprintf("<-> relay net mping ping send recv txn blk %*s ", m_max_age_length, "age");
|
||||
if (m_is_asmap_on) result += " asmap ";
|
||||
result += strprintf("%*s %-*s%s\n", m_max_id_length, "id", IsAddressSelected() ? m_max_addr_length : 0, IsAddressSelected() ? "address" : "", IsVersionSelected() ? "version" : "");
|
||||
for (const Peer& peer : m_peers) {
|
||||
std::string version{ToString(peer.version) + peer.sub_version};
|
||||
result += strprintf(
|
||||
"%3s %5s %5s%6s%7s%5s%5s%5s%5s%7s%*i %*s %-*s%s\n",
|
||||
"%3s %5s %5s%7s%7s%5s%5s%5s%5s %*s%*i %*s %-*s%s\n",
|
||||
peer.is_outbound ? "out" : "in",
|
||||
peer.is_block_relay ? "block" : "full",
|
||||
peer.network,
|
||||
peer.min_ping == -1 ? "" : ToString(round(1000 * peer.min_ping)),
|
||||
peer.ping == -1 ? "" : ToString(round(1000 * peer.ping)),
|
||||
PingTimeToString(peer.min_ping),
|
||||
PingTimeToString(peer.ping),
|
||||
peer.last_send == 0 ? "" : ToString(m_time_now - peer.last_send),
|
||||
peer.last_recv == 0 ? "" : ToString(m_time_now - peer.last_recv),
|
||||
peer.last_trxn == 0 ? "" : ToString((m_time_now - peer.last_trxn) / 60),
|
||||
peer.last_blck == 0 ? "" : ToString((m_time_now - peer.last_blck) / 60),
|
||||
peer.conn_time == 0 ? "" : ToString((m_time_now - peer.conn_time) / 60),
|
||||
m_max_age_length, // variable spacing
|
||||
peer.age,
|
||||
m_is_asmap_on ? 7 : 0, // variable spacing
|
||||
m_is_asmap_on && peer.mapped_as != 0 ? ToString(peer.mapped_as) : "",
|
||||
m_max_id_length, // variable spacing
|
||||
@ -510,24 +523,28 @@ public:
|
||||
IsAddressSelected() ? peer.addr : "",
|
||||
IsVersionSelected() && version != "0" ? version : "");
|
||||
}
|
||||
result += " ms ms sec sec min min min\n\n";
|
||||
result += strprintf(" ms ms sec sec min min %*s\n\n", m_max_age_length, "min");
|
||||
}
|
||||
|
||||
// Report peer connection totals by type.
|
||||
result += " ipv4 ipv6 onion total block-relay\n";
|
||||
const std::array<std::string, 3> rows{{"in", "out", "total"}};
|
||||
for (size_t i = 0; i < m_networks_size; ++i) {
|
||||
for (uint8_t i = 0; i < m_networks_size; ++i) {
|
||||
result += strprintf("%-5s %5i %5i %5i %5i %5i\n", rows.at(i), m_counts.at(i).at(0), m_counts.at(i).at(1), m_counts.at(i).at(2), m_counts.at(i).at(m_networks_size), m_counts.at(i).at(m_networks_size + 1));
|
||||
}
|
||||
|
||||
// Report local addresses, ports, and scores.
|
||||
result += "\nLocal addresses";
|
||||
const UniValue& local_addrs{networkinfo["localaddresses"]};
|
||||
const std::vector<UniValue>& local_addrs{networkinfo["localaddresses"].getValues()};
|
||||
if (local_addrs.empty()) {
|
||||
result += ": n/a\n";
|
||||
} else {
|
||||
for (const UniValue& addr : local_addrs.getValues()) {
|
||||
result += strprintf("\n%-40i port %5i score %6i", addr["address"].get_str(), addr["port"].get_int(), addr["score"].get_int());
|
||||
size_t max_addr_size{0};
|
||||
for (const UniValue& addr : local_addrs) {
|
||||
max_addr_size = std::max(addr["address"].get_str().length() + 1, max_addr_size);
|
||||
}
|
||||
for (const UniValue& addr : local_addrs) {
|
||||
result += strprintf("\n%-*s port %6i score %6i", max_addr_size, addr["address"].get_str(), addr["port"].get_int(), addr["score"].get_int());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1201,7 +1201,7 @@ BOOST_FIXTURE_TEST_CASE(wallet_disableprivkeys, TestChain100Setup)
|
||||
BOOST_CHECK(!wallet->GetNewDestination("", dest, error));
|
||||
}
|
||||
|
||||
//! Test CreateWalletFromFile function and its behavior handling potential race
|
||||
//! Test CWallet::Create() and its behavior handling potential race
|
||||
//! conditions if it's called the same time an incoming transaction shows up in
|
||||
//! the mempool or a new block.
|
||||
//!
|
||||
@ -1219,7 +1219,7 @@ BOOST_FIXTURE_TEST_CASE(wallet_disableprivkeys, TestChain100Setup)
|
||||
//! wallet rescan and notifications are immediately synced, to verify the wallet
|
||||
//! must already have a handler in place for them, and there's no gap after
|
||||
//! rescanning where new transactions in new blocks could be lost.
|
||||
BOOST_FIXTURE_TEST_CASE(CreateWalletFromFile, TestChain100Setup)
|
||||
BOOST_FIXTURE_TEST_CASE(CreateWallet, TestChain100Setup)
|
||||
{
|
||||
gArgs.ForceSetArg("-unsafesqlitesync", "1");
|
||||
// Create new wallet with known key and unload it.
|
||||
|
@ -1362,11 +1362,6 @@ public:
|
||||
const CKeyingMaterial& GetEncryptionKey() const override;
|
||||
bool HasEncryptionKeys() const override;
|
||||
|
||||
// Temporary LegacyScriptPubKeyMan accessors and aliases.
|
||||
friend class LegacyScriptPubKeyMan;
|
||||
|
||||
std::unique_ptr<LegacyScriptPubKeyMan> m_spk_man;
|
||||
|
||||
/** Get last block processed height */
|
||||
int GetLastBlockHeight() const EXCLUSIVE_LOCKS_REQUIRED(cs_wallet)
|
||||
{
|
||||
|
@ -19,7 +19,6 @@ from test_framework.test_framework import DashTestFramework
|
||||
from test_framework.util import (
|
||||
assert_approx,
|
||||
assert_equal,
|
||||
assert_greater_than_or_equal,
|
||||
assert_greater_than,
|
||||
assert_raises_rpc_error,
|
||||
p2p_port,
|
||||
@ -73,34 +72,20 @@ class NetTest(DashTestFramework):
|
||||
|
||||
def test_getnettotals(self):
|
||||
self.log.info("Test getnettotals")
|
||||
# getnettotals totalbytesrecv and totalbytessent should be
|
||||
# consistent with getpeerinfo. Since the RPC calls are not atomic,
|
||||
# and messages might have been recvd or sent between RPC calls, call
|
||||
# getnettotals before and after and verify that the returned values
|
||||
# from getpeerinfo are bounded by those values.
|
||||
# Test getnettotals and getpeerinfo by doing a ping. The bytes
|
||||
# sent/received should increase by at least the size of one ping (32
|
||||
# bytes) and one pong (32 bytes).
|
||||
net_totals_before = self.nodes[0].getnettotals()
|
||||
peer_info = self.nodes[0].getpeerinfo()
|
||||
net_totals_after = self.nodes[0].getnettotals()
|
||||
assert_equal(len(peer_info), 3)
|
||||
peers_recv = sum([peer['bytesrecv'] for peer in peer_info])
|
||||
peers_sent = sum([peer['bytessent'] for peer in peer_info])
|
||||
peer_info_before = self.nodes[0].getpeerinfo()
|
||||
|
||||
assert_greater_than_or_equal(peers_recv, net_totals_before['totalbytesrecv'])
|
||||
assert_greater_than_or_equal(net_totals_after['totalbytesrecv'], peers_recv)
|
||||
assert_greater_than_or_equal(peers_sent, net_totals_before['totalbytessent'])
|
||||
assert_greater_than_or_equal(net_totals_after['totalbytessent'], peers_sent)
|
||||
|
||||
# test getnettotals and getpeerinfo by doing a ping
|
||||
# the bytes sent/received should change
|
||||
# note ping and pong are 32 bytes each
|
||||
self.nodes[0].ping()
|
||||
self.wait_until(lambda: (self.nodes[0].getnettotals()['totalbytessent'] >= net_totals_after['totalbytessent'] + 32 * 2), timeout=1)
|
||||
self.wait_until(lambda: (self.nodes[0].getnettotals()['totalbytesrecv'] >= net_totals_after['totalbytesrecv'] + 32 * 2), timeout=1)
|
||||
self.wait_until(lambda: (self.nodes[0].getnettotals()['totalbytessent'] >= net_totals_before['totalbytessent'] + 32 * 2), timeout=1)
|
||||
self.wait_until(lambda: (self.nodes[0].getnettotals()['totalbytesrecv'] >= net_totals_before['totalbytesrecv'] + 32 * 2), timeout=1)
|
||||
|
||||
peer_info_after_ping = self.nodes[0].getpeerinfo()
|
||||
for before, after in zip(peer_info, peer_info_after_ping):
|
||||
assert_greater_than_or_equal(after['bytesrecv_per_msg'].get('pong', 0), before['bytesrecv_per_msg'].get('pong', 0) + 32)
|
||||
assert_greater_than_or_equal(after['bytessent_per_msg'].get('ping', 0), before['bytessent_per_msg'].get('ping', 0) + 32)
|
||||
for peer_before in peer_info_before:
|
||||
peer_after = lambda: next(p for p in self.nodes[0].getpeerinfo() if p['id'] == peer_before['id'])
|
||||
self.wait_until(lambda: peer_after()['bytesrecv_per_msg'].get('pong', 0) >= peer_before['bytesrecv_per_msg'].get('pong', 0) + 32, timeout=1)
|
||||
self.wait_until(lambda: peer_after()['bytessent_per_msg'].get('ping', 0) >= peer_before['bytessent_per_msg'].get('ping', 0) + 32, timeout=1)
|
||||
|
||||
def test_getnetworkinfo(self):
|
||||
self.log.info("Test getnetworkinfo")
|
||||
|
@ -3,7 +3,6 @@
|
||||
# Distributed under the MIT software license, see the accompanying
|
||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
"""Test that the wallet resends transactions periodically."""
|
||||
import time
|
||||
|
||||
from test_framework.blocktools import create_block, create_coinbase
|
||||
from test_framework.messages import ToHex
|
||||
@ -11,6 +10,7 @@ from test_framework.p2p import P2PTxInvStore
|
||||
from test_framework.test_framework import BitcoinTestFramework
|
||||
from test_framework.util import assert_equal
|
||||
|
||||
|
||||
class ResendWalletTransactionsTest(BitcoinTestFramework):
|
||||
def set_test_params(self):
|
||||
self.num_nodes = 1
|
||||
@ -27,10 +27,10 @@ class ResendWalletTransactionsTest(BitcoinTestFramework):
|
||||
txid = node.sendtoaddress(node.getnewaddress(), 1)
|
||||
|
||||
# Wallet rebroadcast is first scheduled 1 sec after startup (see
|
||||
# nNextResend in ResendWalletTransactions()). Sleep for just over a
|
||||
# second to be certain that it has been called before the first
|
||||
# nNextResend in ResendWalletTransactions()). Tell scheduler to call
|
||||
# MaybeResendWalletTxn now to initialize nNextResend before the first
|
||||
# setmocktime call below.
|
||||
time.sleep(1.1)
|
||||
node.mockscheduler(1)
|
||||
|
||||
# Can take a few seconds due to transaction trickling
|
||||
def wait_p2p():
|
||||
@ -60,16 +60,16 @@ class ResendWalletTransactionsTest(BitcoinTestFramework):
|
||||
twelve_hrs = 12 * 60 * 60
|
||||
two_min = 2 * 60
|
||||
node.setmocktime(self.mocktime + twelve_hrs - two_min)
|
||||
self.mocktime = self.mocktime + twelve_hrs - two_min
|
||||
time.sleep(2) # ensure enough time has passed for rebroadcast attempt to occur
|
||||
node.mockscheduler(1) # Tell scheduler to call MaybeResendWalletTxn now
|
||||
assert_equal(int(txid, 16) in peer_second.get_invs(), False)
|
||||
|
||||
self.log.info("Bump time & check that transaction is rebroadcast")
|
||||
# Transaction should be rebroadcast approximately 24 hours in the future,
|
||||
# but can range from 12-36. So bump 36 hours to be sure.
|
||||
node.setmocktime(self.mocktime + 36 * 60 * 60)
|
||||
# Tell scheduler to call MaybeResendWalletTxn now.
|
||||
node.mockscheduler(1)
|
||||
with node.assert_debug_log(['ResendWalletTransactions: resubmit 1 unconfirmed transactions']):
|
||||
node.setmocktime(self.mocktime + 36 * 60 * 60)
|
||||
# Tell scheduler to call MaybeResendWalletTxn now.
|
||||
node.mockscheduler(1)
|
||||
# Give some time for trickle to occur
|
||||
node.setmocktime(self.mocktime + 36 * 60 * 60 + 600)
|
||||
peer_second.wait_for_broadcast([txid])
|
||||
|
@ -38,7 +38,7 @@ deadlock:CConnman::ForNode
|
||||
deadlock:CConnman::GetNodeStats
|
||||
deadlock:CChainState::ConnectTip
|
||||
deadlock:UpdateTip
|
||||
deadlock:wallet_tests::CreateWalletFromFile
|
||||
deadlock:wallet_tests::CreateWallet
|
||||
|
||||
# WalletBatch (unidentified deadlock)
|
||||
deadlock:WalletBatch
|
||||
|
Loading…
Reference in New Issue
Block a user