mirror of
https://github.com/dashpay/dash.git
synced 2024-12-25 12:02:48 +01:00
9c54cb16de
581b343d5bf517510ab0236583ca96628751177d Add in/out connections to cli -getinfo (Jon Atack) d9cc13e88d096c1a171159c01cbb96444f7f8d7f UNIX_EPOCH_TIME fixup in rpc getnettotals (Jon Atack) 1ab49b81cf32b6ef9e312a0a8ac45c68a3262f0d Add in/out connections to rpc getnetworkinfo (Jon Atack) Pull request description: This is basic info that is present in the GUI that I've been wishing to have exposed via the RPC and CLI without needing a bash workaround or script. For human users it would also be useful to have it in `-getinfo`. `bitcoin-cli getnetworkinfo` ``` "connections": 15, "connections_in": 6, "connections_out": 9, ``` `bitcoin-cli -getinfo` ``` "connections": { "in": 6, "out": 9, "total": 15 }, ``` Update the tests, RPC help, and release notes for the changes. Also fixup the `getnettotals` timemillis help while touching `rpc/net.cpp`. ----- Reviewers can manually test this PR by [building from source](https://jonatack.github.io/articles/how-to-compile-bitcoin-core-and-run-the-tests), launching bitcoind, and then running `bitcoin-cli -getinfo`, `bitcoin-cli getnetworkinfo`, `bitcoin-cli help getnetworkinfo`, and `bitcoin-cli help getnettotals` (for the UNIX epoch time change). ACKs for top commit: eriknylund: > tACK [581b343](581b343d5b
) on master at [a0a422c](a0a422c34c
), ran unit & functional tests and and confirmed changes on an existing datadir ✌️ benthecarman: tACK `581b343` willcl-ark: tACK for 581b343d5bf517510ab0236583ca96628751177d, this time rebased onto master at 862fde88be706adb20a211178253636442c3ae00. shesek: tACK `581b343`. This provides what I needed, thanks! n-thumann: tACK 581b343 on master at a0a422c, ran unit & functional tests and and confirmed changes on an existing datadir ✌️ Tree-SHA512: 08dd3ac8fefae401bd8253ff3ac027603c528eeccba53cedcb127771316173a7052fce44af8fa33ac98ebc4cf2a2b11cdefd949995d55e9b9a5942b876d00dc5
247 lines
12 KiB
Python
Executable File
247 lines
12 KiB
Python
Executable File
#!/usr/bin/env python3
|
|
# Copyright (c) 2017-2019 The Bitcoin Core developers
|
|
# Distributed under the MIT software license, see the accompanying
|
|
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
|
"""Test RPC calls related to net.
|
|
|
|
Tests correspond to code in rpc/net.cpp.
|
|
"""
|
|
|
|
from test_framework.p2p import P2PInterface
|
|
import test_framework.messages
|
|
from test_framework.messages import (
|
|
NODE_NETWORK,
|
|
)
|
|
|
|
from itertools import product
|
|
|
|
from test_framework.test_framework import DashTestFramework
|
|
from test_framework.util import (
|
|
assert_approx,
|
|
assert_equal,
|
|
assert_greater_than,
|
|
assert_raises_rpc_error,
|
|
p2p_port,
|
|
)
|
|
|
|
|
|
def assert_net_servicesnames(servicesflag, servicenames):
|
|
"""Utility that checks if all flags are correctly decoded in
|
|
`getpeerinfo` and `getnetworkinfo`.
|
|
|
|
:param servicesflag: The services as an integer.
|
|
:param servicenames: The list of decoded services names, as strings.
|
|
"""
|
|
servicesflag_generated = 0
|
|
for servicename in servicenames:
|
|
servicesflag_generated |= getattr(test_framework.messages, 'NODE_' + servicename)
|
|
assert servicesflag_generated == servicesflag
|
|
|
|
|
|
class NetTest(DashTestFramework):
|
|
def set_test_params(self):
|
|
self.set_dash_test_params(3, 1, fast_dip3_enforcement=True)
|
|
self.supports_cli = False
|
|
|
|
def run_test(self):
|
|
# Get out of IBD for the getpeerinfo tests.
|
|
self.nodes[0].generate(101)
|
|
# Wait for one ping/pong to finish so that we can be sure that there is no chatter between nodes for some time
|
|
# Especially the exchange of messages like getheaders and friends causes test failures here
|
|
self.nodes[0].ping()
|
|
self.wait_until(lambda: all(['pingtime' in n for n in self.nodes[0].getpeerinfo()]))
|
|
self.log.info('Connect nodes both way')
|
|
self.connect_nodes(0, 1)
|
|
self.connect_nodes(1, 0)
|
|
self.sync_all()
|
|
|
|
self.test_connection_count()
|
|
self.test_getpeerinfo()
|
|
self.test_getnettotals()
|
|
self.test_getnetworkinfo()
|
|
self.test_getaddednodeinfo()
|
|
self.test_service_flags()
|
|
self.test_getnodeaddresses()
|
|
|
|
def test_connection_count(self):
|
|
self.log.info("Test getconnectioncount")
|
|
# After using `connect_nodes` to connect nodes 0 and 1 to each other.
|
|
# and node0 was also connected to node2 (a masternode)
|
|
# during network setup
|
|
assert_equal(self.nodes[0].getconnectioncount(), 3)
|
|
|
|
def test_getnettotals(self):
|
|
self.log.info("Test getnettotals")
|
|
# Test getnettotals and getpeerinfo by doing a ping. The bytes
|
|
# sent/received should increase by at least the size of one ping (32
|
|
# bytes) and one pong (32 bytes).
|
|
net_totals_before = self.nodes[0].getnettotals()
|
|
peer_info_before = self.nodes[0].getpeerinfo()
|
|
|
|
self.nodes[0].ping()
|
|
self.wait_until(lambda: (self.nodes[0].getnettotals()['totalbytessent'] >= net_totals_before['totalbytessent'] + 32 * 2), timeout=1)
|
|
self.wait_until(lambda: (self.nodes[0].getnettotals()['totalbytesrecv'] >= net_totals_before['totalbytesrecv'] + 32 * 2), timeout=1)
|
|
|
|
for peer_before in peer_info_before:
|
|
peer_after = lambda: next(p for p in self.nodes[0].getpeerinfo() if p['id'] == peer_before['id'])
|
|
self.wait_until(lambda: peer_after()['bytesrecv_per_msg'].get('pong', 0) >= peer_before['bytesrecv_per_msg'].get('pong', 0) + 32, timeout=1)
|
|
self.wait_until(lambda: peer_after()['bytessent_per_msg'].get('ping', 0) >= peer_before['bytessent_per_msg'].get('ping', 0) + 32, timeout=1)
|
|
|
|
def test_getnetworkinfo(self):
|
|
self.log.info("Test getnetworkinfo")
|
|
info = self.nodes[0].getnetworkinfo()
|
|
assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], True)
|
|
assert_equal(info['networkactive'], True)
|
|
assert_equal(info['connections'], 3)
|
|
assert_equal(info['connections_in'], 2)
|
|
assert_equal(info['connections_out'], 1)
|
|
assert_equal(info['connections_mn'], 0)
|
|
assert_equal(info['connections_mn_in'], 0)
|
|
assert_equal(info['connections_mn_out'], 0)
|
|
|
|
with self.nodes[0].assert_debug_log(expected_msgs=['SetNetworkActive: false\n']):
|
|
self.nodes[0].setnetworkactive(state=False)
|
|
assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], False)
|
|
# Wait a bit for all sockets to close
|
|
self.wait_until(lambda: self.nodes[0].getnetworkinfo()['connections'] == 0, timeout=3)
|
|
self.wait_until(lambda: self.nodes[1].getnetworkinfo()['connections'] == 0, timeout=3)
|
|
|
|
with self.nodes[0].assert_debug_log(expected_msgs=['SetNetworkActive: true\n']):
|
|
self.nodes[0].setnetworkactive(state=True)
|
|
# Connect nodes both ways.
|
|
self.connect_nodes(0, 1)
|
|
self.connect_nodes(1, 0)
|
|
|
|
info = self.nodes[1].getnetworkinfo()
|
|
assert_equal(info['networkactive'], True)
|
|
assert_equal(info['connections'], 2)
|
|
assert_equal(info['connections_in'], 1)
|
|
assert_equal(info['connections_out'], 1)
|
|
assert_equal(info['connections_mn'], 0)
|
|
assert_equal(info['connections_mn_in'], 0)
|
|
assert_equal(info['connections_mn_out'], 0)
|
|
|
|
# check the `servicesnames` field
|
|
network_info = [node.getnetworkinfo() for node in self.nodes]
|
|
for info in network_info:
|
|
assert_net_servicesnames(int(info["localservices"], 0x10), info["localservicesnames"])
|
|
|
|
# Check dynamically generated networks list in getnetworkinfo help output.
|
|
assert "(ipv4, ipv6, onion, i2p)" in self.nodes[0].help("getnetworkinfo")
|
|
|
|
self.log.info('Test extended connections info')
|
|
self.connect_nodes(1, 2)
|
|
self.nodes[1].ping()
|
|
self.wait_until(lambda: all(['pingtime' in n for n in self.nodes[1].getpeerinfo()]))
|
|
assert_equal(self.nodes[1].getnetworkinfo()['connections'], 3)
|
|
assert_equal(self.nodes[1].getnetworkinfo()['connections_in'], 1)
|
|
assert_equal(self.nodes[1].getnetworkinfo()['connections_out'], 2)
|
|
assert_equal(self.nodes[1].getnetworkinfo()['connections_mn'], 1)
|
|
assert_equal(self.nodes[1].getnetworkinfo()['connections_mn_in'], 0)
|
|
assert_equal(self.nodes[1].getnetworkinfo()['connections_mn_out'], 1)
|
|
|
|
def test_getaddednodeinfo(self):
|
|
self.log.info("Test getaddednodeinfo")
|
|
assert_equal(self.nodes[0].getaddednodeinfo(), [])
|
|
# add a node (node2) to node0
|
|
ip_port = "127.0.0.1:{}".format(p2p_port(2))
|
|
self.nodes[0].addnode(node=ip_port, command='add')
|
|
# check that the node has indeed been added
|
|
added_nodes = self.nodes[0].getaddednodeinfo(ip_port)
|
|
assert_equal(len(added_nodes), 1)
|
|
assert_equal(added_nodes[0]['addednode'], ip_port)
|
|
# check that node cannot be added again
|
|
assert_raises_rpc_error(-23, "Node already added", self.nodes[0].addnode, node=ip_port, command='add')
|
|
# check that node can be removed
|
|
self.nodes[0].addnode(node=ip_port, command='remove')
|
|
assert_equal(self.nodes[0].getaddednodeinfo(), [])
|
|
# check that trying to remove the node again returns an error
|
|
assert_raises_rpc_error(-24, "Node could not be removed", self.nodes[0].addnode, node=ip_port, command='remove')
|
|
# check that a non-existent node returns an error
|
|
assert_raises_rpc_error(-24, "Node has not been added", self.nodes[0].getaddednodeinfo, '1.1.1.1')
|
|
|
|
def test_getpeerinfo(self):
|
|
self.log.info("Test getpeerinfo")
|
|
# Create a few getpeerinfo last_block/last_transaction values.
|
|
if self.is_wallet_compiled():
|
|
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1)
|
|
self.nodes[1].generate(1)
|
|
self.sync_all()
|
|
time_now = self.mocktime
|
|
peer_info = [x.getpeerinfo() for x in self.nodes]
|
|
# Verify last_block and last_transaction keys/values.
|
|
for node, peer, field in product(range(self.num_nodes - self.mn_count), range(2), ['last_block', 'last_transaction']):
|
|
assert field in peer_info[node][peer].keys()
|
|
if peer_info[node][peer][field] != 0:
|
|
assert_approx(peer_info[node][peer][field], time_now, vspan=60)
|
|
# check both sides of bidirectional connection between nodes
|
|
# the address bound to on one side will be the source address for the other node
|
|
assert_equal(peer_info[0][0]['addrbind'], peer_info[1][0]['addr'])
|
|
assert_equal(peer_info[1][0]['addrbind'], peer_info[0][0]['addr'])
|
|
# check the `servicesnames` field
|
|
for info in peer_info:
|
|
assert_net_servicesnames(int(info[0]["services"], 0x10), info[0]["servicesnames"])
|
|
|
|
# Check dynamically generated networks list in getpeerinfo help output.
|
|
assert "(ipv4, ipv6, onion, i2p, not_publicly_routable)" in self.nodes[0].help("getpeerinfo")
|
|
# This part is slightly different comparing to the Bitcoin implementation. This is expected because we create connections on network setup a bit differently too.
|
|
# We also create more connection during the test itself to test mn specific stats
|
|
assert_equal(peer_info[0][0]['connection_type'], 'inbound')
|
|
assert_equal(peer_info[0][1]['connection_type'], 'inbound')
|
|
assert_equal(peer_info[0][2]['connection_type'], 'manual')
|
|
|
|
assert_equal(peer_info[1][0]['connection_type'], 'manual')
|
|
assert_equal(peer_info[1][1]['connection_type'], 'inbound')
|
|
|
|
assert_equal(peer_info[2][0]['connection_type'], 'manual')
|
|
|
|
def test_service_flags(self):
|
|
self.log.info("Test service flags")
|
|
self.nodes[0].add_p2p_connection(P2PInterface(), services=(1 << 4) | (1 << 63))
|
|
assert_equal(['UNKNOWN[2^4]', 'UNKNOWN[2^63]'], self.nodes[0].getpeerinfo()[-1]['servicesnames'])
|
|
self.nodes[0].disconnect_p2ps()
|
|
|
|
def test_getnodeaddresses(self):
|
|
self.log.info("Test getnodeaddresses")
|
|
self.nodes[0].add_p2p_connection(P2PInterface())
|
|
|
|
# Add some addresses to the Address Manager over RPC. Due to the way
|
|
# bucket and bucket position are calculated, some of these addresses
|
|
# will collide.
|
|
imported_addrs = []
|
|
for i in range(10000):
|
|
first_octet = i >> 8
|
|
second_octet = i % 256
|
|
a = "{}.{}.1.1".format(first_octet, second_octet)
|
|
imported_addrs.append(a)
|
|
self.nodes[0].addpeeraddress(a, 8333)
|
|
|
|
# Obtain addresses via rpc call and check they were ones sent in before.
|
|
#
|
|
# Maximum possible addresses in addrman is 10000, although actual
|
|
# number will usually be less due to bucket and bucket position
|
|
# collisions.
|
|
node_addresses = self.nodes[0].getnodeaddresses(0)
|
|
assert_greater_than(len(node_addresses), 5000)
|
|
assert_greater_than(10000, len(node_addresses))
|
|
for a in node_addresses:
|
|
assert_equal(a["time"], self.mocktime)
|
|
assert_equal(a["services"], NODE_NETWORK)
|
|
assert a["address"] in imported_addrs
|
|
assert_equal(a["port"], 8333)
|
|
|
|
node_addresses = self.nodes[0].getnodeaddresses(1)
|
|
assert_equal(len(node_addresses), 1)
|
|
|
|
assert_raises_rpc_error(-8, "Address count out of range", self.nodes[0].getnodeaddresses, -1)
|
|
|
|
# addrman's size cannot be known reliably after insertion, as hash collisions may occur
|
|
# so only test that requesting a large number of addresses returns less than that
|
|
LARGE_REQUEST_COUNT = 10000
|
|
node_addresses = self.nodes[0].getnodeaddresses(LARGE_REQUEST_COUNT)
|
|
assert_greater_than(LARGE_REQUEST_COUNT, len(node_addresses))
|
|
|
|
|
|
if __name__ == '__main__':
|
|
NetTest().main()
|