Merge pull request #5858 from knst/bp-v21-p22

backport: bitcoin#20112, #20115, #20258, #20285, #20294, #20299, #20315, #20328, #20339, #20368, #20390
This commit is contained in:
PastaPastaPasta 2024-02-05 10:20:59 -06:00 committed by GitHub
commit cb09d35fc6
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
15 changed files with 161 additions and 144 deletions

View File

@ -8,7 +8,7 @@ container:
memory: 8G # Set to 8GB to avoid OOM. https://cirrus-ci.org/guide/linux/#linux-containers memory: 8G # Set to 8GB to avoid OOM. https://cirrus-ci.org/guide/linux/#linux-containers
kvm: true # Use kvm to avoid spurious CI failures in the default virtualization cluster, see https://github.com/bitcoin/bitcoin/issues/20093 kvm: true # Use kvm to avoid spurious CI failures in the default virtualization cluster, see https://github.com/bitcoin/bitcoin/issues/20093
env: env:
PACKAGE_MANAGER_INSTALL : "apt-get update && apt-get install -y" PACKAGE_MANAGER_INSTALL: "apt-get update && apt-get install -y"
MAKEJOBS: "-j4" MAKEJOBS: "-j4"
DANGER_RUN_CI_ON_HOST: "1" # Containers will be discarded after the run, so there is no risk that the ci scripts modify the system DANGER_RUN_CI_ON_HOST: "1" # Containers will be discarded after the run, so there is no risk that the ci scripts modify the system
TEST_RUNNER_PORT_MIN: "14000" # Must be larger than 12321, which is used for the http cache. See https://cirrus-ci.org/guide/writing-tasks/#http-cache TEST_RUNNER_PORT_MIN: "14000" # Must be larger than 12321, which is used for the http cache. See https://cirrus-ci.org/guide/writing-tasks/#http-cache
@ -19,6 +19,7 @@ env:
# https://cirrus-ci.org/guide/tips-and-tricks/#sharing-configuration-between-tasks # https://cirrus-ci.org/guide/tips-and-tricks/#sharing-configuration-between-tasks
global_task_template: &GLOBAL_TASK_TEMPLATE global_task_template: &GLOBAL_TASK_TEMPLATE
skip: $CIRRUS_REPO_FULL_NAME == "bitcoin-core/gui" && $CIRRUS_PR == "" # No need to run on the read-only mirror, unless it is a PR. https://cirrus-ci.org/guide/writing-tasks/#conditional-task-execution
ccache_cache: ccache_cache:
folder: "/tmp/ccache_dir" folder: "/tmp/ccache_dir"
depends_built_cache: depends_built_cache:
@ -28,6 +29,7 @@ global_task_template: &GLOBAL_TASK_TEMPLATE
depends_releases_cache: depends_releases_cache:
folder: "/tmp/cirrus-ci-build/releases" folder: "/tmp/cirrus-ci-build/releases"
merge_base_script: merge_base_script:
- if [ "$CIRRUS_PR" = "" ]; then exit 0; fi
- bash -c "$PACKAGE_MANAGER_INSTALL git" - bash -c "$PACKAGE_MANAGER_INSTALL git"
- git fetch $CIRRUS_REPO_CLONE_URL $CIRRUS_BASE_BRANCH - git fetch $CIRRUS_REPO_CLONE_URL $CIRRUS_BASE_BRANCH
- git config --global user.email "ci@ci.ci" - git config --global user.email "ci@ci.ci"
@ -37,6 +39,30 @@ global_task_template: &GLOBAL_TASK_TEMPLATE
- ./ci/test_run_all.sh - ./ci/test_run_all.sh
task:
name: 'ARM [GOAL: install] [buster] [unit tests, no functional tests]'
<< : *GLOBAL_TASK_TEMPLATE
container:
image: debian:buster
env:
FILE_ENV: "./ci/test/00_setup_env_arm.sh"
task:
name: 'Win64 [GOAL: deploy] [unit tests, no gui, no boost::process, no functional tests]'
<< : *GLOBAL_TASK_TEMPLATE
container:
image: ubuntu:bionic
env:
FILE_ENV: "./ci/test/00_setup_env_win64.sh"
task:
name: 'x86_64 Linux [GOAL: install] [focal] [previous releases, uses qt5 dev package and some depends packages] [unsigned char]'
<< : *GLOBAL_TASK_TEMPLATE
container:
image: ubuntu:focal
env:
FILE_ENV: "./ci/test/00_setup_env_native_qt5.sh"
task: task:
name: 'x86_64 Linux [GOAL: install] [focal] [depends, sanitizers: thread (TSan), no gui]' name: 'x86_64 Linux [GOAL: install] [focal] [depends, sanitizers: thread (TSan), no gui]'
<< : *GLOBAL_TASK_TEMPLATE << : *GLOBAL_TASK_TEMPLATE
@ -48,6 +74,14 @@ task:
MAKEJOBS: "-j8" MAKEJOBS: "-j8"
FILE_ENV: "./ci/test/00_setup_env_native_tsan.sh" FILE_ENV: "./ci/test/00_setup_env_native_tsan.sh"
task:
name: 'x86_64 Linux [GOAL: install] [focal] [depends, sanitizers: memory (MSan)]'
<< : *GLOBAL_TASK_TEMPLATE
container:
image: ubuntu:focal
env:
FILE_ENV: "./ci/test/00_setup_env_native_msan.sh"
task: task:
name: 'x86_64 Linux [GOAL: install] [focal] [no depends, only system libs, sanitizers: address/leak (ASan + LSan) + undefined (UBSan) + integer]' name: 'x86_64 Linux [GOAL: install] [focal] [no depends, only system libs, sanitizers: address/leak (ASan + LSan) + undefined (UBSan) + integer]'
<< : *GLOBAL_TASK_TEMPLATE << : *GLOBAL_TASK_TEMPLATE
@ -57,9 +91,39 @@ task:
FILE_ENV: "./ci/test/00_setup_env_native_asan.sh" FILE_ENV: "./ci/test/00_setup_env_native_asan.sh"
task: task:
name: 'x86_64 Linux [GOAL: install] [focal] [no depends, only system libs, fuzzers under valgrind]' name: 'x86_64 Linux [GOAL: install] [focal] [no depends, only system libs, sanitizers: fuzzer,address,undefined]'
<< : *GLOBAL_TASK_TEMPLATE << : *GLOBAL_TASK_TEMPLATE
container: container:
image: ubuntu:focal image: ubuntu:focal
env: env:
FILE_ENV: "./ci/test/00_setup_env_native_fuzz_with_valgrind.sh" FILE_ENV: "./ci/test/00_setup_env_native_fuzz.sh"
task:
name: 'x86_64 Linux [GOAL: install] [focal] [multiprocess]'
<< : *GLOBAL_TASK_TEMPLATE
container:
image: ubuntu:focal
env:
FILE_ENV: "./ci/test/00_setup_env_native_multiprocess.sh"
task:
name: 'macOS 10.12 [GOAL: deploy] [no functional tests]'
<< : *GLOBAL_TASK_TEMPLATE
container:
image: ubuntu:focal
env:
FILE_ENV: "./ci/test/00_setup_env_mac.sh"
task:
name: 'macOS 10.14 native [GOAL: install] [GUI] [no depends]'
macos_brew_addon_script:
- brew install boost libevent berkeley-db4 qt miniupnpc ccache zeromq qrencode sqlite libtool automake pkg-config gnu-getopt
<< : *GLOBAL_TASK_TEMPLATE
osx_instance:
# Use latest image, but hardcode version to avoid silent upgrades (and breaks)
image: catalina-xcode-12.1 # https://cirrus-ci.org/guide/macOS
env:
DANGER_RUN_CI_ON_HOST: "true"
CI_USE_APT_INSTALL: "no"
PACKAGE_MANAGER_INSTALL: "echo" # Nothing to do
FILE_ENV: "./ci/test/00_setup_env_mac_host.sh"

View File

@ -185,67 +185,7 @@ after_success:
script: script:
- set -o errexit; source ./ci/lint/06_script.sh - set -o errexit; source ./ci/lint/06_script.sh
- stage: test
name: 'ARM [GOAL: install] [focal] [unit tests, no functional tests]'
arch: arm64 # Can disable QEMU_USER_CMD and run the tests natively without qemu
env: >-
FILE_ENV="./ci/test/00_setup_env_arm.sh"
QEMU_USER_CMD=""
- stage: test
name: 'S390x [GOAL: install] [focal] [unit tests, functional tests]'
arch: s390x # Can disable QEMU_USER_CMD and run the tests natively without qemu
env: >-
FILE_ENV="./ci/test/00_setup_env_s390x.sh"
QEMU_USER_CMD=""
- stage: test
name: 'Win64 [GOAL: deploy] [unit tests, no gui, no boost::process, no functional tests]'
env: >-
FILE_ENV="./ci/test/00_setup_env_win64.sh"
- stage: test
name: 'x86_64 Linux [GOAL: install] [focal] [previous releases, uses qt5 dev package and some depends packages] [unsigned char]'
env: >-
FILE_ENV="./ci/test/00_setup_env_native_qt5.sh"
# x86_64 Linux (xenial, no depends, only system libs, sanitizers: thread (TSan))
- stage: test
name: 'x86_64 Linux [GOAL: install] [focal] [no depends, only system libs, sanitizers: fuzzer,address,undefined]'
env: >-
FILE_ENV="./ci/test/00_setup_env_native_fuzz.sh"
- stage: test
name: 'x86_64 Linux [GOAL: install] [focal] [multiprocess]'
env: >-
FILE_ENV="./ci/test/00_setup_env_native_multiprocess.sh"
- stage: test - stage: test
name: 'x86_64 Linux [GOAL: install] [focal] [no wallet]' name: 'x86_64 Linux [GOAL: install] [focal] [no wallet]'
env: >- env: >-
FILE_ENV="./ci/test/00_setup_env_native_nowallet.sh" FILE_ENV="./ci/test/00_setup_env_native_nowallet.sh"
- stage: test
name: 'macOS 10.12 [GOAL: deploy] [no functional tests]'
env: >-
FILE_ENV="./ci/test/00_setup_env_mac.sh"
- stage: test
name: 'macOS 10.14 native [GOAL: install] [GUI] [no depends]'
os: osx
# Use the most recent version:
# Xcode 11.3.1, macOS 10.14, SDK 10.15
# https://docs.travis-ci.com/user/reference/osx/#macos-version
osx_image: xcode11.3
addons:
homebrew:
packages:
- berkeley-db4
- miniupnpc
- qrencode
- ccache
- zeromq
env: >-
DANGER_RUN_CI_ON_HOST=true
CI_USE_APT_INSTALL=no
FILE_ENV="./ci/test/00_setup_env_mac_host.sh"

View File

@ -29,8 +29,8 @@ if [ "$DIRECT_WINE_EXEC_TESTS" = "true" ]; then
wine ./src/test/test_dash.exe wine ./src/test/test_dash.exe
else else
if [ "$RUN_UNIT_TESTS_SEQUENTIAL" = "true" ]; then if [ "$RUN_UNIT_TESTS_SEQUENTIAL" = "true" ]; then
./src/test/test_dash --catch_system_errors=no -l test_suite ${TEST_RUNNER_ENV} ./src/test/test_dash --catch_system_errors=no -l test_suite
else else
make $MAKEJOBS check VERBOSE=1 ${TEST_RUNNER_ENV} make $MAKEJOBS check VERBOSE=1
fi fi
fi fi

View File

@ -11,12 +11,9 @@ export HOST=x86_64-apple-darwin
export PIP_PACKAGES="zmq lief" export PIP_PACKAGES="zmq lief"
export GOAL="install" export GOAL="install"
export BITCOIN_CONFIG="--with-gui --enable-reduce-exports --disable-miner --enable-werror --with-boost-process" export BITCOIN_CONFIG="--with-gui --enable-reduce-exports --disable-miner --enable-werror --with-boost-process"
export CI_OS_NAME="macos"
export NO_DEPENDS=1 export NO_DEPENDS=1
export OSX_SDK="" export OSX_SDK=""
export CCACHE_SIZE=300M export CCACHE_SIZE=300M
export RUN_SECURITY_TESTS="true" export RUN_SECURITY_TESTS="true"
if [ "$TRAVIS_REPO_SLUG" != "dashpay/dash" ]; then
export RUN_FUNCTIONAL_TESTS="false"
export EXPECTED_TESTS_DURATION_IN_SECONDS=200
fi

View File

@ -19,6 +19,7 @@ fi
# Use debian to avoid 404 apt errors # Use debian to avoid 404 apt errors
export CONTAINER_NAME=ci_s390x export CONTAINER_NAME=ci_s390x
export RUN_UNIT_TESTS=true export RUN_UNIT_TESTS=true
export TEST_RUNNER_ENV="LC_ALL=C"
export RUN_FUNCTIONAL_TESTS=true export RUN_FUNCTIONAL_TESTS=true
export GOAL="install" export GOAL="install"
export BITCOIN_CONFIG="--enable-reduce-exports --disable-gui-tests --with-boost-process" # GUI tests disabled for now, see https://github.com/bitcoin/bitcoin/issues/23730 export BITCOIN_CONFIG="--enable-reduce-exports --disable-gui-tests --with-boost-process" # GUI tests disabled for now, see https://github.com/bitcoin/bitcoin/issues/23730

View File

@ -8,7 +8,7 @@ export LC_ALL=C.UTF-8
export CONTAINER_NAME=ci_win64 export CONTAINER_NAME=ci_win64
export HOST=x86_64-w64-mingw32 export HOST=x86_64-w64-mingw32
export PACKAGES="python3 nsis g++-mingw-w64-x86-64 wine-binfmt wine64" export PACKAGES="python3 nsis g++-mingw-w64-x86-64 wine-binfmt wine64 file"
export DPKG_ADD_ARCH="i386" export DPKG_ADD_ARCH="i386"
export RUN_FUNCTIONAL_TESTS=false export RUN_FUNCTIONAL_TESTS=false
export RUN_SECURITY_TESTS="false" export RUN_SECURITY_TESTS="false"

View File

@ -13,8 +13,8 @@ if [[ $QEMU_USER_CMD == qemu-s390* ]]; then
export LC_ALL=C export LC_ALL=C
fi fi
if [ "$TRAVIS_OS_NAME" == "osx" ]; then if [ "$CI_OS_NAME" == "macos" ]; then
${CI_RETRY_EXE} pip3 install $PIP_PACKAGES IN_GETOPT_BIN="/usr/local/opt/gnu-getopt/bin/getopt" ${CI_RETRY_EXE} pip3 install --user $PIP_PACKAGES
fi fi
# Create folders that are mounted into the docker # Create folders that are mounted into the docker
@ -26,9 +26,7 @@ export LSAN_OPTIONS="suppressions=${BASE_BUILD_DIR}/test/sanitizer_suppressions/
export TSAN_OPTIONS="suppressions=${BASE_BUILD_DIR}/test/sanitizer_suppressions/tsan:halt_on_error=1" export TSAN_OPTIONS="suppressions=${BASE_BUILD_DIR}/test/sanitizer_suppressions/tsan:halt_on_error=1"
export UBSAN_OPTIONS="suppressions=${BASE_BUILD_DIR}/test/sanitizer_suppressions/ubsan:print_stacktrace=1:halt_on_error=1:report_error_type=1" export UBSAN_OPTIONS="suppressions=${BASE_BUILD_DIR}/test/sanitizer_suppressions/ubsan:print_stacktrace=1:halt_on_error=1:report_error_type=1"
env | grep -E '^(BASE_|QEMU_|CCACHE_|LC_ALL|BOOST_TEST_RANDOM|DEBIAN_FRONTEND|CONFIG_SHELL|(ASAN|LSAN|TSAN|UBSAN)_OPTIONS|PREVIOUS_RELEASES_DIR))' | tee /tmp/env env | grep -E '^(BASE_|QEMU_|CCACHE_|LC_ALL|BOOST_TEST_RANDOM|DEBIAN_FRONTEND|CONFIG_SHELL|(ASAN|LSAN|TSAN|UBSAN)_OPTIONS|PREVIOUS_RELEASES_DIR))' | tee /tmp/env
if [[ $HOST = *-mingw32 ]]; then if [[ $BITCOIN_CONFIG = *--with-sanitizers=*address* ]]; then # If ran with (ASan + LSan), Docker needs access to ptrace (https://github.com/google/sanitizers/issues/764)
DOCKER_ADMIN="--cap-add SYS_ADMIN"
elif [[ $BITCOIN_CONFIG = *--with-sanitizers=*address* ]]; then # If ran with (ASan + LSan), Docker needs access to ptrace (https://github.com/google/sanitizers/issues/764)
DOCKER_ADMIN="--cap-add SYS_PTRACE" DOCKER_ADMIN="--cap-add SYS_PTRACE"
fi fi
@ -72,16 +70,16 @@ elif [ "$CI_USE_APT_INSTALL" != "no" ]; then
fi fi
fi fi
if [ "$TRAVIS_OS_NAME" == "osx" ]; then if [ "$CI_OS_NAME" == "macos" ]; then
top -l 1 -s 0 | awk ' /PhysMem/ {print}' top -l 1 -s 0 | awk ' /PhysMem/ {print}'
echo "Number of CPUs: $(sysctl -n hw.logicalcpu)" echo "Number of CPUs: $(sysctl -n hw.logicalcpu)"
else else
DOCKER_EXEC free -m -h DOCKER_EXEC free -m -h
DOCKER_EXEC echo "Number of CPUs \(nproc\):" \$\(nproc\) DOCKER_EXEC echo "Number of CPUs \(nproc\):" \$\(nproc\)
DOCKER_EXEC echo $(lscpu | grep Endian) DOCKER_EXEC echo $(lscpu | grep Endian)
DOCKER_EXEC echo "Free disk space:"
DOCKER_EXEC df -h
fi fi
DOCKER_EXEC echo "Free disk space:"
DOCKER_EXEC df -h
if [ ! -d ${DIR_QA_ASSETS} ]; then if [ ! -d ${DIR_QA_ASSETS} ]; then
if [ "$RUN_FUZZ_TESTS" = "true" ]; then if [ "$RUN_FUZZ_TESTS" = "true" ]; then

View File

@ -7,7 +7,7 @@
export LC_ALL=C.UTF-8 export LC_ALL=C.UTF-8
# Make sure default datadir does not exist and is never read by creating a dummy file # Make sure default datadir does not exist and is never read by creating a dummy file
if [ "$TRAVIS_OS_NAME" == "osx" ]; then if [ "$CI_OS_NAME" == "macos" ]; then
echo > $HOME/Library/Application\ Support/DashCore echo > $HOME/Library/Application\ Support/DashCore
else else
DOCKER_EXEC echo \> \$HOME/.dashcore DOCKER_EXEC echo \> \$HOME/.dashcore

20
ci/test/wrap-wine.sh Executable file
View File

@ -0,0 +1,20 @@
#!/usr/bin/env bash
#
# Copyright (c) 2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
export LC_ALL=C.UTF-8
for b_name in {"${BASE_OUTDIR}/bin"/*,src/secp256k1/*tests,src/univalue/{no_nul,test_json,unitester,object}}.exe; do
# shellcheck disable=SC2044
for b in $(find "${BASE_ROOT_DIR}" -executable -type f -name "$(basename $b_name)"); do
if (file "$b" | grep "Windows"); then
echo "Wrap $b ..."
mv "$b" "${b}_orig"
echo '#!/usr/bin/env bash' > "$b"
echo "wine64 \"${b}_orig\" \"\$@\"" >> "$b"
chmod +x "$b"
fi
done
done

View File

@ -16,11 +16,13 @@
#include <rpc/protocol.h> #include <rpc/protocol.h>
#include <rpc/request.h> #include <rpc/request.h>
#include <stacktraces.h> #include <stacktraces.h>
#include <tinyformat.h>
#include <util/strencodings.h> #include <util/strencodings.h>
#include <util/system.h> #include <util/system.h>
#include <util/translation.h> #include <util/translation.h>
#include <util/url.h> #include <util/url.h>
#include <algorithm>
#include <cmath> #include <cmath>
#include <functional> #include <functional>
#include <memory> #include <memory>
@ -306,12 +308,12 @@ class NetinfoRequestHandler : public BaseRequestHandler
{ {
private: private:
static constexpr int8_t UNKNOWN_NETWORK{-1}; static constexpr int8_t UNKNOWN_NETWORK{-1};
static constexpr size_t m_networks_size{3}; static constexpr uint8_t m_networks_size{3};
const std::array<std::string, m_networks_size> m_networks{{"ipv4", "ipv6", "onion"}}; const std::array<std::string, m_networks_size> m_networks{{"ipv4", "ipv6", "onion"}};
std::array<std::array<uint16_t, m_networks_size + 2>, 3> m_counts{{{}}}; //!< Peer counts by (in/out/total, networks/total/block-relay) std::array<std::array<uint16_t, m_networks_size + 2>, 3> m_counts{{{}}}; //!< Peer counts by (in/out/total, networks/total/block-relay)
int8_t NetworkStringToId(const std::string& str) const int8_t NetworkStringToId(const std::string& str) const
{ {
for (size_t i = 0; i < m_networks_size; ++i) { for (uint8_t i = 0; i < m_networks_size; ++i) {
if (str == m_networks.at(i)) return i; if (str == m_networks.at(i)) return i;
} }
return UNKNOWN_NETWORK; return UNKNOWN_NETWORK;
@ -323,21 +325,22 @@ private:
bool IsVersionSelected() const { return m_details_level == 3 || m_details_level == 4; } bool IsVersionSelected() const { return m_details_level == 3 || m_details_level == 4; }
bool m_is_asmap_on{false}; bool m_is_asmap_on{false};
size_t m_max_addr_length{0}; size_t m_max_addr_length{0};
size_t m_max_age_length{4};
size_t m_max_id_length{2}; size_t m_max_id_length{2};
struct Peer { struct Peer {
int id; std::string addr;
int mapped_as; std::string sub_version;
int version; std::string network;
int64_t conn_time; std::string age;
double min_ping;
double ping;
int64_t last_blck; int64_t last_blck;
int64_t last_recv; int64_t last_recv;
int64_t last_send; int64_t last_send;
int64_t last_trxn; int64_t last_trxn;
double min_ping; int id;
double ping; int mapped_as;
std::string addr; int version;
std::string network;
std::string sub_version;
bool is_block_relay; bool is_block_relay;
bool is_outbound; bool is_outbound;
bool operator<(const Peer& rhs) const { return std::tie(is_outbound, min_ping) < std::tie(rhs.is_outbound, rhs.min_ping); } bool operator<(const Peer& rhs) const { return std::tie(is_outbound, min_ping) < std::tie(rhs.is_outbound, rhs.min_ping); }
@ -346,9 +349,16 @@ private:
std::string ChainToString() const std::string ChainToString() const
{ {
if (gArgs.GetChainName() == CBaseChainParams::TESTNET) return " testnet"; if (gArgs.GetChainName() == CBaseChainParams::TESTNET) return " testnet";
if (gArgs.GetChainName() == CBaseChainParams::DEVNET) return " devnet";
if (gArgs.GetChainName() == CBaseChainParams::REGTEST) return " regtest"; if (gArgs.GetChainName() == CBaseChainParams::REGTEST) return " regtest";
return ""; return "";
} }
std::string PingTimeToString(double seconds) const
{
if (seconds < 0) return "";
const double milliseconds{round(1000 * seconds)};
return milliseconds > 999999 ? "-" : ToString(milliseconds);
}
const UniValue NetinfoHelp() const UniValue NetinfoHelp()
{ {
return std::string{ return std::string{
@ -471,10 +481,12 @@ public:
const double min_ping{peer["minping"].isNull() ? -1 : peer["minping"].get_real()}; const double min_ping{peer["minping"].isNull() ? -1 : peer["minping"].get_real()};
const double ping{peer["pingtime"].isNull() ? -1 : peer["pingtime"].get_real()}; const double ping{peer["pingtime"].isNull() ? -1 : peer["pingtime"].get_real()};
const std::string addr{peer["addr"].get_str()}; const std::string addr{peer["addr"].get_str()};
const std::string age{conn_time == 0 ? "" : ToString((m_time_now - conn_time) / 60)};
const std::string sub_version{peer["subver"].get_str()}; const std::string sub_version{peer["subver"].get_str()};
m_peers.push_back({peer_id, mapped_as, version, conn_time, last_blck, last_recv, last_send, last_trxn, min_ping, ping, addr, network, sub_version, is_block_relay, is_outbound}); m_peers.push_back({addr, sub_version, network, age, min_ping, ping, last_blck, last_recv, last_send, last_trxn, peer_id, mapped_as, version, is_block_relay, is_outbound});
m_max_id_length = std::max(ToString(peer_id).length(), m_max_id_length);
m_max_addr_length = std::max(addr.length() + 1, m_max_addr_length); m_max_addr_length = std::max(addr.length() + 1, m_max_addr_length);
m_max_age_length = std::max(age.length(), m_max_age_length);
m_max_id_length = std::max(ToString(peer_id).length(), m_max_id_length);
m_is_asmap_on |= (mapped_as != 0); m_is_asmap_on |= (mapped_as != 0);
} }
} }
@ -485,23 +497,24 @@ public:
// Report detailed peer connections list sorted by direction and minimum ping time. // Report detailed peer connections list sorted by direction and minimum ping time.
if (DetailsRequested() && !m_peers.empty()) { if (DetailsRequested() && !m_peers.empty()) {
std::sort(m_peers.begin(), m_peers.end()); std::sort(m_peers.begin(), m_peers.end());
result += "<-> relay net mping ping send recv txn blk uptime "; result += strprintf("<-> relay net mping ping send recv txn blk %*s ", m_max_age_length, "age");
if (m_is_asmap_on) result += " asmap "; if (m_is_asmap_on) result += " asmap ";
result += strprintf("%*s %-*s%s\n", m_max_id_length, "id", IsAddressSelected() ? m_max_addr_length : 0, IsAddressSelected() ? "address" : "", IsVersionSelected() ? "version" : ""); result += strprintf("%*s %-*s%s\n", m_max_id_length, "id", IsAddressSelected() ? m_max_addr_length : 0, IsAddressSelected() ? "address" : "", IsVersionSelected() ? "version" : "");
for (const Peer& peer : m_peers) { for (const Peer& peer : m_peers) {
std::string version{ToString(peer.version) + peer.sub_version}; std::string version{ToString(peer.version) + peer.sub_version};
result += strprintf( result += strprintf(
"%3s %5s %5s%6s%7s%5s%5s%5s%5s%7s%*i %*s %-*s%s\n", "%3s %5s %5s%7s%7s%5s%5s%5s%5s %*s%*i %*s %-*s%s\n",
peer.is_outbound ? "out" : "in", peer.is_outbound ? "out" : "in",
peer.is_block_relay ? "block" : "full", peer.is_block_relay ? "block" : "full",
peer.network, peer.network,
peer.min_ping == -1 ? "" : ToString(round(1000 * peer.min_ping)), PingTimeToString(peer.min_ping),
peer.ping == -1 ? "" : ToString(round(1000 * peer.ping)), PingTimeToString(peer.ping),
peer.last_send == 0 ? "" : ToString(m_time_now - peer.last_send), peer.last_send == 0 ? "" : ToString(m_time_now - peer.last_send),
peer.last_recv == 0 ? "" : ToString(m_time_now - peer.last_recv), peer.last_recv == 0 ? "" : ToString(m_time_now - peer.last_recv),
peer.last_trxn == 0 ? "" : ToString((m_time_now - peer.last_trxn) / 60), peer.last_trxn == 0 ? "" : ToString((m_time_now - peer.last_trxn) / 60),
peer.last_blck == 0 ? "" : ToString((m_time_now - peer.last_blck) / 60), peer.last_blck == 0 ? "" : ToString((m_time_now - peer.last_blck) / 60),
peer.conn_time == 0 ? "" : ToString((m_time_now - peer.conn_time) / 60), m_max_age_length, // variable spacing
peer.age,
m_is_asmap_on ? 7 : 0, // variable spacing m_is_asmap_on ? 7 : 0, // variable spacing
m_is_asmap_on && peer.mapped_as != 0 ? ToString(peer.mapped_as) : "", m_is_asmap_on && peer.mapped_as != 0 ? ToString(peer.mapped_as) : "",
m_max_id_length, // variable spacing m_max_id_length, // variable spacing
@ -510,24 +523,28 @@ public:
IsAddressSelected() ? peer.addr : "", IsAddressSelected() ? peer.addr : "",
IsVersionSelected() && version != "0" ? version : ""); IsVersionSelected() && version != "0" ? version : "");
} }
result += " ms ms sec sec min min min\n\n"; result += strprintf(" ms ms sec sec min min %*s\n\n", m_max_age_length, "min");
} }
// Report peer connection totals by type. // Report peer connection totals by type.
result += " ipv4 ipv6 onion total block-relay\n"; result += " ipv4 ipv6 onion total block-relay\n";
const std::array<std::string, 3> rows{{"in", "out", "total"}}; const std::array<std::string, 3> rows{{"in", "out", "total"}};
for (size_t i = 0; i < m_networks_size; ++i) { for (uint8_t i = 0; i < m_networks_size; ++i) {
result += strprintf("%-5s %5i %5i %5i %5i %5i\n", rows.at(i), m_counts.at(i).at(0), m_counts.at(i).at(1), m_counts.at(i).at(2), m_counts.at(i).at(m_networks_size), m_counts.at(i).at(m_networks_size + 1)); result += strprintf("%-5s %5i %5i %5i %5i %5i\n", rows.at(i), m_counts.at(i).at(0), m_counts.at(i).at(1), m_counts.at(i).at(2), m_counts.at(i).at(m_networks_size), m_counts.at(i).at(m_networks_size + 1));
} }
// Report local addresses, ports, and scores. // Report local addresses, ports, and scores.
result += "\nLocal addresses"; result += "\nLocal addresses";
const UniValue& local_addrs{networkinfo["localaddresses"]}; const std::vector<UniValue>& local_addrs{networkinfo["localaddresses"].getValues()};
if (local_addrs.empty()) { if (local_addrs.empty()) {
result += ": n/a\n"; result += ": n/a\n";
} else { } else {
for (const UniValue& addr : local_addrs.getValues()) { size_t max_addr_size{0};
result += strprintf("\n%-40i port %5i score %6i", addr["address"].get_str(), addr["port"].get_int(), addr["score"].get_int()); for (const UniValue& addr : local_addrs) {
max_addr_size = std::max(addr["address"].get_str().length() + 1, max_addr_size);
}
for (const UniValue& addr : local_addrs) {
result += strprintf("\n%-*s port %6i score %6i", max_addr_size, addr["address"].get_str(), addr["port"].get_int(), addr["score"].get_int());
} }
} }

View File

@ -1201,7 +1201,7 @@ BOOST_FIXTURE_TEST_CASE(wallet_disableprivkeys, TestChain100Setup)
BOOST_CHECK(!wallet->GetNewDestination("", dest, error)); BOOST_CHECK(!wallet->GetNewDestination("", dest, error));
} }
//! Test CreateWalletFromFile function and its behavior handling potential race //! Test CWallet::Create() and its behavior handling potential race
//! conditions if it's called the same time an incoming transaction shows up in //! conditions if it's called the same time an incoming transaction shows up in
//! the mempool or a new block. //! the mempool or a new block.
//! //!
@ -1219,7 +1219,7 @@ BOOST_FIXTURE_TEST_CASE(wallet_disableprivkeys, TestChain100Setup)
//! wallet rescan and notifications are immediately synced, to verify the wallet //! wallet rescan and notifications are immediately synced, to verify the wallet
//! must already have a handler in place for them, and there's no gap after //! must already have a handler in place for them, and there's no gap after
//! rescanning where new transactions in new blocks could be lost. //! rescanning where new transactions in new blocks could be lost.
BOOST_FIXTURE_TEST_CASE(CreateWalletFromFile, TestChain100Setup) BOOST_FIXTURE_TEST_CASE(CreateWallet, TestChain100Setup)
{ {
gArgs.ForceSetArg("-unsafesqlitesync", "1"); gArgs.ForceSetArg("-unsafesqlitesync", "1");
// Create new wallet with known key and unload it. // Create new wallet with known key and unload it.

View File

@ -1362,11 +1362,6 @@ public:
const CKeyingMaterial& GetEncryptionKey() const override; const CKeyingMaterial& GetEncryptionKey() const override;
bool HasEncryptionKeys() const override; bool HasEncryptionKeys() const override;
// Temporary LegacyScriptPubKeyMan accessors and aliases.
friend class LegacyScriptPubKeyMan;
std::unique_ptr<LegacyScriptPubKeyMan> m_spk_man;
/** Get last block processed height */ /** Get last block processed height */
int GetLastBlockHeight() const EXCLUSIVE_LOCKS_REQUIRED(cs_wallet) int GetLastBlockHeight() const EXCLUSIVE_LOCKS_REQUIRED(cs_wallet)
{ {

View File

@ -19,7 +19,6 @@ from test_framework.test_framework import DashTestFramework
from test_framework.util import ( from test_framework.util import (
assert_approx, assert_approx,
assert_equal, assert_equal,
assert_greater_than_or_equal,
assert_greater_than, assert_greater_than,
assert_raises_rpc_error, assert_raises_rpc_error,
p2p_port, p2p_port,
@ -73,34 +72,20 @@ class NetTest(DashTestFramework):
def test_getnettotals(self): def test_getnettotals(self):
self.log.info("Test getnettotals") self.log.info("Test getnettotals")
# getnettotals totalbytesrecv and totalbytessent should be # Test getnettotals and getpeerinfo by doing a ping. The bytes
# consistent with getpeerinfo. Since the RPC calls are not atomic, # sent/received should increase by at least the size of one ping (32
# and messages might have been recvd or sent between RPC calls, call # bytes) and one pong (32 bytes).
# getnettotals before and after and verify that the returned values
# from getpeerinfo are bounded by those values.
net_totals_before = self.nodes[0].getnettotals() net_totals_before = self.nodes[0].getnettotals()
peer_info = self.nodes[0].getpeerinfo() peer_info_before = self.nodes[0].getpeerinfo()
net_totals_after = self.nodes[0].getnettotals()
assert_equal(len(peer_info), 3)
peers_recv = sum([peer['bytesrecv'] for peer in peer_info])
peers_sent = sum([peer['bytessent'] for peer in peer_info])
assert_greater_than_or_equal(peers_recv, net_totals_before['totalbytesrecv'])
assert_greater_than_or_equal(net_totals_after['totalbytesrecv'], peers_recv)
assert_greater_than_or_equal(peers_sent, net_totals_before['totalbytessent'])
assert_greater_than_or_equal(net_totals_after['totalbytessent'], peers_sent)
# test getnettotals and getpeerinfo by doing a ping
# the bytes sent/received should change
# note ping and pong are 32 bytes each
self.nodes[0].ping() self.nodes[0].ping()
self.wait_until(lambda: (self.nodes[0].getnettotals()['totalbytessent'] >= net_totals_after['totalbytessent'] + 32 * 2), timeout=1) self.wait_until(lambda: (self.nodes[0].getnettotals()['totalbytessent'] >= net_totals_before['totalbytessent'] + 32 * 2), timeout=1)
self.wait_until(lambda: (self.nodes[0].getnettotals()['totalbytesrecv'] >= net_totals_after['totalbytesrecv'] + 32 * 2), timeout=1) self.wait_until(lambda: (self.nodes[0].getnettotals()['totalbytesrecv'] >= net_totals_before['totalbytesrecv'] + 32 * 2), timeout=1)
peer_info_after_ping = self.nodes[0].getpeerinfo() for peer_before in peer_info_before:
for before, after in zip(peer_info, peer_info_after_ping): peer_after = lambda: next(p for p in self.nodes[0].getpeerinfo() if p['id'] == peer_before['id'])
assert_greater_than_or_equal(after['bytesrecv_per_msg'].get('pong', 0), before['bytesrecv_per_msg'].get('pong', 0) + 32) self.wait_until(lambda: peer_after()['bytesrecv_per_msg'].get('pong', 0) >= peer_before['bytesrecv_per_msg'].get('pong', 0) + 32, timeout=1)
assert_greater_than_or_equal(after['bytessent_per_msg'].get('ping', 0), before['bytessent_per_msg'].get('ping', 0) + 32) self.wait_until(lambda: peer_after()['bytessent_per_msg'].get('ping', 0) >= peer_before['bytessent_per_msg'].get('ping', 0) + 32, timeout=1)
def test_getnetworkinfo(self): def test_getnetworkinfo(self):
self.log.info("Test getnetworkinfo") self.log.info("Test getnetworkinfo")

View File

@ -3,7 +3,6 @@
# Distributed under the MIT software license, see the accompanying # Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php. # file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test that the wallet resends transactions periodically.""" """Test that the wallet resends transactions periodically."""
import time
from test_framework.blocktools import create_block, create_coinbase from test_framework.blocktools import create_block, create_coinbase
from test_framework.messages import ToHex from test_framework.messages import ToHex
@ -11,6 +10,7 @@ from test_framework.p2p import P2PTxInvStore
from test_framework.test_framework import BitcoinTestFramework from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal from test_framework.util import assert_equal
class ResendWalletTransactionsTest(BitcoinTestFramework): class ResendWalletTransactionsTest(BitcoinTestFramework):
def set_test_params(self): def set_test_params(self):
self.num_nodes = 1 self.num_nodes = 1
@ -27,10 +27,10 @@ class ResendWalletTransactionsTest(BitcoinTestFramework):
txid = node.sendtoaddress(node.getnewaddress(), 1) txid = node.sendtoaddress(node.getnewaddress(), 1)
# Wallet rebroadcast is first scheduled 1 sec after startup (see # Wallet rebroadcast is first scheduled 1 sec after startup (see
# nNextResend in ResendWalletTransactions()). Sleep for just over a # nNextResend in ResendWalletTransactions()). Tell scheduler to call
# second to be certain that it has been called before the first # MaybeResendWalletTxn now to initialize nNextResend before the first
# setmocktime call below. # setmocktime call below.
time.sleep(1.1) node.mockscheduler(1)
# Can take a few seconds due to transaction trickling # Can take a few seconds due to transaction trickling
def wait_p2p(): def wait_p2p():
@ -60,16 +60,16 @@ class ResendWalletTransactionsTest(BitcoinTestFramework):
twelve_hrs = 12 * 60 * 60 twelve_hrs = 12 * 60 * 60
two_min = 2 * 60 two_min = 2 * 60
node.setmocktime(self.mocktime + twelve_hrs - two_min) node.setmocktime(self.mocktime + twelve_hrs - two_min)
self.mocktime = self.mocktime + twelve_hrs - two_min node.mockscheduler(1) # Tell scheduler to call MaybeResendWalletTxn now
time.sleep(2) # ensure enough time has passed for rebroadcast attempt to occur
assert_equal(int(txid, 16) in peer_second.get_invs(), False) assert_equal(int(txid, 16) in peer_second.get_invs(), False)
self.log.info("Bump time & check that transaction is rebroadcast") self.log.info("Bump time & check that transaction is rebroadcast")
# Transaction should be rebroadcast approximately 24 hours in the future, # Transaction should be rebroadcast approximately 24 hours in the future,
# but can range from 12-36. So bump 36 hours to be sure. # but can range from 12-36. So bump 36 hours to be sure.
node.setmocktime(self.mocktime + 36 * 60 * 60) with node.assert_debug_log(['ResendWalletTransactions: resubmit 1 unconfirmed transactions']):
# Tell scheduler to call MaybeResendWalletTxn now. node.setmocktime(self.mocktime + 36 * 60 * 60)
node.mockscheduler(1) # Tell scheduler to call MaybeResendWalletTxn now.
node.mockscheduler(1)
# Give some time for trickle to occur # Give some time for trickle to occur
node.setmocktime(self.mocktime + 36 * 60 * 60 + 600) node.setmocktime(self.mocktime + 36 * 60 * 60 + 600)
peer_second.wait_for_broadcast([txid]) peer_second.wait_for_broadcast([txid])

View File

@ -38,7 +38,7 @@ deadlock:CConnman::ForNode
deadlock:CConnman::GetNodeStats deadlock:CConnman::GetNodeStats
deadlock:CChainState::ConnectTip deadlock:CChainState::ConnectTip
deadlock:UpdateTip deadlock:UpdateTip
deadlock:wallet_tests::CreateWalletFromFile deadlock:wallet_tests::CreateWallet
# WalletBatch (unidentified deadlock) # WalletBatch (unidentified deadlock)
deadlock:WalletBatch deadlock:WalletBatch