dash/test/functional/rpc_net.py
MarcoFalke 4143e359f7
(Partial) Merge #19725: [RPC] Add connection type to getpeerinfo, improve logs
a512925e19a70d7f6b80ac530a169f45ffaafa1c [doc] Release notes (Amiti Uttarwar)
50f94b34a33c954f6e207f509c93d33267a5c3e2 [rpc] Deprecate getpeerinfo addnode field (Amiti Uttarwar)
df091b9b509f0b10e4315c0bfa2da0cc0c31c22f [refactor] Rename test file to allow any getpeerinfo deprecations. (Amiti Uttarwar)
395acfa83a5436790c1a722a5609ac9d48df235f [rpc] Add connection type to getpeerinfo RPC, update tests (Amiti Uttarwar)
49c10a9ca40967d28ae16dfea9cccc6f3a6624a1 [log] Add connection type to log statement (Amiti Uttarwar)

Pull request description:

  After #19316, we can more directly expose information about the connection type on the `getpeerinfo` RPC. Doing so also makes the existing addnode field redundant, so this PR begins the process of deprecating this field.

  This PR also includes one commit that improves a log message, as both use a shared function to return the connection type as a string.

  Suggested by sdaftuar- https://github.com/bitcoin/bitcoin/pull/19316#discussion_r468001604 & https://github.com/bitcoin/bitcoin/pull/19316#discussion_r468018093

ACKs for top commit:
  jnewbery:
    Code review ACK a512925e19a70d7f6b80ac530a169f45ffaafa1c.
  sipa:
    utACK a512925e19a70d7f6b80ac530a169f45ffaafa1c
  guggero:
    Tested and code review ACK a512925e.
  MarcoFalke:
    cr ACK a512925e19a70d7f6b80ac530a169f45ffaafa1c 🌇
  promag:
    Code review ACK a512925e19a70d7f6b80ac530a169f45ffaafa1c.

Tree-SHA512: 601a7a38aee235ee59aca690784f886dc2ae4e418b2e6422c4b58cd597376c00f74910f66920b08a08a0bec28bf8022e71a1435785ff6ba8a188954261aba78e
2024-01-09 08:15:36 -06:00

224 lines
11 KiB
Python
Executable File

#!/usr/bin/env python3
# Copyright (c) 2017-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test RPC calls related to net.
Tests correspond to code in rpc/net.cpp.
"""
from test_framework.test_framework import DashTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than_or_equal,
assert_greater_than,
assert_raises_rpc_error,
p2p_port,
wait_until,
)
from test_framework.mininode import P2PInterface
import test_framework.messages
from test_framework.messages import (
NODE_NETWORK,
)
def assert_net_servicesnames(servicesflag, servicenames):
"""Utility that checks if all flags are correctly decoded in
`getpeerinfo` and `getnetworkinfo`.
:param servicesflag: The services as an integer.
:param servicenames: The list of decoded services names, as strings.
"""
servicesflag_generated = 0
for servicename in servicenames:
servicesflag_generated |= getattr(test_framework.messages, 'NODE_' + servicename)
assert servicesflag_generated == servicesflag
class NetTest(DashTestFramework):
def set_test_params(self):
self.set_dash_test_params(3, 1, fast_dip3_enforcement=True)
self.supports_cli = False
def run_test(self):
# Wait for one ping/pong to finish so that we can be sure that there is no chatter between nodes for some time
# Especially the exchange of messages like getheaders and friends causes test failures here
self.nodes[0].ping()
wait_until(lambda: all(['pingtime' in n for n in self.nodes[0].getpeerinfo()]))
self.log.info('Connect nodes both way')
self.connect_nodes(0, 1)
self.connect_nodes(1, 0)
self.sync_all()
self._test_connection_count()
self._test_getnettotals()
self._test_getnetworkinfo()
self._test_getaddednodeinfo()
self._test_getpeerinfo()
self.test_service_flags()
self._test_getnodeaddresses()
def _test_connection_count(self):
# connect_nodes connects each node to the other
# and node0 was also connected to node2 (a masternode)
# during network setup
assert_equal(self.nodes[0].getconnectioncount(), 3)
def _test_getnettotals(self):
# getnettotals totalbytesrecv and totalbytessent should be
# consistent with getpeerinfo. Since the RPC calls are not atomic,
# and messages might have been recvd or sent between RPC calls, call
# getnettotals before and after and verify that the returned values
# from getpeerinfo are bounded by those values.
net_totals_before = self.nodes[0].getnettotals()
peer_info = self.nodes[0].getpeerinfo()
net_totals_after = self.nodes[0].getnettotals()
assert_equal(len(peer_info), 3)
peers_recv = sum([peer['bytesrecv'] for peer in peer_info])
peers_sent = sum([peer['bytessent'] for peer in peer_info])
assert_greater_than_or_equal(peers_recv, net_totals_before['totalbytesrecv'])
assert_greater_than_or_equal(net_totals_after['totalbytesrecv'], peers_recv)
assert_greater_than_or_equal(peers_sent, net_totals_before['totalbytessent'])
assert_greater_than_or_equal(net_totals_after['totalbytessent'], peers_sent)
# test getnettotals and getpeerinfo by doing a ping
# the bytes sent/received should change
# note ping and pong are 32 bytes each
self.nodes[0].ping()
wait_until(lambda: (self.nodes[0].getnettotals()['totalbytessent'] >= net_totals_after['totalbytessent'] + 32 * 2), timeout=1)
wait_until(lambda: (self.nodes[0].getnettotals()['totalbytesrecv'] >= net_totals_after['totalbytesrecv'] + 32 * 2), timeout=1)
peer_info_after_ping = self.nodes[0].getpeerinfo()
for before, after in zip(peer_info, peer_info_after_ping):
assert_greater_than_or_equal(after['bytesrecv_per_msg'].get('pong', 0), before['bytesrecv_per_msg'].get('pong', 0) + 32)
assert_greater_than_or_equal(after['bytessent_per_msg'].get('ping', 0), before['bytessent_per_msg'].get('ping', 0) + 32)
def _test_getnetworkinfo(self):
assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], True)
assert_equal(self.nodes[0].getnetworkinfo()['connections'], 3)
self.nodes[0].setnetworkactive(state=False)
assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], False)
# Wait a bit for all sockets to close
wait_until(lambda: self.nodes[0].getnetworkinfo()['connections'] == 0, timeout=3)
wait_until(lambda: self.nodes[1].getnetworkinfo()['connections'] == 0, timeout=3)
self.nodes[0].setnetworkactive(state=True)
self.log.info('Connect nodes both way')
self.connect_nodes(0, 1)
self.connect_nodes(1, 0)
assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], True)
assert_equal(self.nodes[0].getnetworkinfo()['connections'], 2)
# check the `servicesnames` field
network_info = [node.getnetworkinfo() for node in self.nodes]
for info in network_info:
assert_net_servicesnames(int(info["localservices"], 0x10), info["localservicesnames"])
# Check dynamically generated networks list in getnetworkinfo help output.
assert "(ipv4, ipv6, onion, i2p)" in self.nodes[0].help("getnetworkinfo")
self.log.info('Test extended connections info')
self.connect_nodes(1, 2)
self.nodes[1].ping()
wait_until(lambda: all(['pingtime' in n for n in self.nodes[1].getpeerinfo()]))
assert_equal(self.nodes[1].getnetworkinfo()['connections'], 3)
assert_equal(self.nodes[1].getnetworkinfo()['inboundconnections'], 1)
assert_equal(self.nodes[1].getnetworkinfo()['outboundconnections'], 2)
assert_equal(self.nodes[1].getnetworkinfo()['mnconnections'], 1)
assert_equal(self.nodes[1].getnetworkinfo()['inboundmnconnections'], 0)
assert_equal(self.nodes[1].getnetworkinfo()['outboundmnconnections'], 1)
def _test_getaddednodeinfo(self):
assert_equal(self.nodes[0].getaddednodeinfo(), [])
# add a node (node2) to node0
ip_port = "127.0.0.1:{}".format(p2p_port(2))
self.nodes[0].addnode(node=ip_port, command='add')
# check that the node has indeed been added
added_nodes = self.nodes[0].getaddednodeinfo(ip_port)
assert_equal(len(added_nodes), 1)
assert_equal(added_nodes[0]['addednode'], ip_port)
# check that node cannot be added again
assert_raises_rpc_error(-23, "Node already added", self.nodes[0].addnode, node=ip_port, command='add')
# check that node can be removed
self.nodes[0].addnode(node=ip_port, command='remove')
assert_equal(self.nodes[0].getaddednodeinfo(), [])
# check that trying to remove the node again returns an error
assert_raises_rpc_error(-24, "Node could not be removed", self.nodes[0].addnode, node=ip_port, command='remove')
# check that a non-existent node returns an error
assert_raises_rpc_error(-24, "Node has not been added", self.nodes[0].getaddednodeinfo, '1.1.1.1')
def _test_getpeerinfo(self):
peer_info = [x.getpeerinfo() for x in self.nodes]
# check both sides of bidirectional connection between nodes
# the address bound to on one side will be the source address for the other node
assert_equal(peer_info[0][0]['addrbind'], peer_info[1][0]['addr'])
assert_equal(peer_info[1][0]['addrbind'], peer_info[0][0]['addr'])
# check the `servicesnames` field
for info in peer_info:
assert_net_servicesnames(int(info[0]["services"], 0x10), info[0]["servicesnames"])
# Check dynamically generated networks list in getpeerinfo help output.
assert "(ipv4, ipv6, onion, i2p, not_publicly_routable)" in self.nodes[0].help("getpeerinfo")
# This part is slightly different comparing to the Bitcoin implementation. This is expected because we create connections on network setup a bit differently too.
# We also create more connection during the test itself to test mn specific stats
assert_equal(peer_info[0][0]['connection_type'], 'manual')
assert_equal(peer_info[0][1]['connection_type'], 'inbound')
assert_equal(peer_info[1][0]['connection_type'], 'inbound')
assert_equal(peer_info[1][1]['connection_type'], 'manual')
assert_equal(peer_info[1][2]['connection_type'], 'manual')
assert_equal(peer_info[2][0]['connection_type'], 'inbound')
def test_service_flags(self):
self.nodes[0].add_p2p_connection(P2PInterface(), services=(1 << 4) | (1 << 63))
assert_equal(['UNKNOWN[2^4]', 'UNKNOWN[2^63]'], self.nodes[0].getpeerinfo()[-1]['servicesnames'])
self.nodes[0].disconnect_p2ps()
def _test_getnodeaddresses(self):
self.nodes[0].add_p2p_connection(P2PInterface())
# Add some addresses to the Address Manager over RPC. Due to the way
# bucket and bucket position are calculated, some of these addresses
# will collide.
imported_addrs = []
for i in range(10000):
first_octet = i >> 8
second_octet = i % 256
a = "{}.{}.1.1".format(first_octet, second_octet)
imported_addrs.append(a)
self.nodes[0].addpeeraddress(a, 8333)
# Obtain addresses via rpc call and check they were ones sent in before.
#
# Maximum possible addresses in addrman is 10000, although actual
# number will usually be less due to bucket and bucket position
# collisions.
node_addresses = self.nodes[0].getnodeaddresses(0)
assert_greater_than(len(node_addresses), 5000)
assert_greater_than(10000, len(node_addresses))
for a in node_addresses:
assert_equal(a["time"], self.mocktime)
assert_equal(a["services"], NODE_NETWORK)
assert a["address"] in imported_addrs
assert_equal(a["port"], 8333)
node_addresses = self.nodes[0].getnodeaddresses(1)
assert_equal(len(node_addresses), 1)
assert_raises_rpc_error(-8, "Address count out of range", self.nodes[0].getnodeaddresses, -1)
# addrman's size cannot be known reliably after insertion, as hash collisions may occur
# so only test that requesting a large number of addresses returns less than that
LARGE_REQUEST_COUNT = 10000
node_addresses = self.nodes[0].getnodeaddresses(LARGE_REQUEST_COUNT)
assert_greater_than(LARGE_REQUEST_COUNT, len(node_addresses))
if __name__ == '__main__':
NetTest().main()