Merge #19177: test: Fix and clean p2p_invalid_messages functional tests

af2a145e575f23c64909e6cf1fb323c603bda7ca Refactor resource exhaustion test (Troy Giorshev)
5c4648d17ba18e4194959963994cc6b37053f127 Fix "invalid message size" test (Troy Giorshev)
ff1e7b884447a5ba10553b2d964625f94e255bdc Move size limits to module-global (Troy Giorshev)
57890abf2c7919eddfec36178b1136cd44ffe883 Remove two unneeded tests (Troy Giorshev)

Pull request description:

  This PR touches only the p2p_invalid_messages.py functional test module.  There are two main goals accomplished here.  First, it fixes the "invalid message size" test, which previously made a message that was invalid for multiple reasons.  Second, it refactors the file into a single consistent style.  This file appears to have originally had two authors, with different styles and some test duplication.

  It should now be easier and quicker to understand this module, anticipating the upcoming [BIP324](https://github.com/bitcoin/bitcoin/pull/18242) and [AltNet](https://github.com/bitcoin/bitcoin/issues/18989) changes.

  This should probably go in ahead of #19107, but the two are not strictly related.

ACKs for top commit:
  jnewbery:
    ACK af2a145e575f23c64909e6cf1fb323c603bda7ca
  MarcoFalke:
    re-ACK af2a145e57 🍦

Tree-SHA512: 9b57561e142c5eaefac5665f7355c8651670400b4db1a89525d2dfdd20e872d6873c4f6175c4222b6f5a8e5210cf5d6a52da69b925b673a2e2ac30a15d670d1c
This commit is contained in:
MarcoFalke 2020-06-12 15:29:48 -04:00 committed by pasta
parent 54501fb5fb
commit 94680f4ee3
No known key found for this signature in database
GPG Key ID: 52527BEDABE87984

View File

@ -4,9 +4,6 @@
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test node responses to invalid network messages."""
import struct
import sys
from test_framework.messages import (
CBlockHeader,
@ -27,6 +24,8 @@ from test_framework.util import (
wait_until,
)
MSG_LIMIT = 3 * 1024 * 1024 # 3MB, per MAX_PROTOCOL_MESSAGE_LENGTH
VALID_DATA_LIMIT = MSG_LIMIT - 5 # Account for the 5-byte length prefix
class msg_unrecognized:
"""Nonsensical message. Modeled after similar types in test_framework.messages."""
@ -49,115 +48,13 @@ class InvalidMessagesTest(BitcoinTestFramework):
self.setup_clean_chain = True
def run_test(self):
"""
. Test msg header
0. Send a bunch of large (3MB) messages of an unrecognized type. Check to see
that it isn't an effective DoS against the node.
1. Send an oversized (3MB+) message and check that we're disconnected.
2. Send a few messages with an incorrect data size in the header, ensure the
messages are ignored.
"""
self.test_buffer()
self.test_magic_bytes()
self.test_checksum()
self.test_size()
self.test_msgtype()
self.test_large_inv()
node = self.nodes[0]
self.node = node
node.add_p2p_connection(P2PDataStore())
conn2 = node.add_p2p_connection(P2PDataStore())
msg_limit = 3 * 1024 * 1024 # 3MB, per MAX_PROTOCOL_MESSAGE_LENGTH
valid_data_limit = msg_limit - 5 # Account for the 4-byte length prefix
#
# 0.
#
# Send as large a message as is valid, ensure we aren't disconnected but
# also can't exhaust resources.
#
msg_at_size = msg_unrecognized(str_data="b" * valid_data_limit)
assert len(msg_at_size.serialize()) == msg_limit
self.log.info("Sending a bunch of large, junk messages to test memory exhaustion. May take a bit...")
# Run a bunch of times to test for memory exhaustion.
for _ in range(80):
node.p2p.send_message(msg_at_size)
# Check that, even though the node is being hammered by nonsense from one
# connection, it can still service other peers in a timely way.
for _ in range(20):
conn2.sync_with_ping(timeout=2)
# Peer 1, despite serving up a bunch of nonsense, should still be connected.
self.log.info("Waiting for node to drop junk messages.")
node.p2p.sync_with_ping(timeout=400)
assert node.p2p.is_connected
#
# 1.
#
# Send an oversized message, ensure we're disconnected.
#
# Under macOS this test is skipped due to an unexpected error code
# returned from the closing socket which python/asyncio does not
# yet know how to handle.
#
if sys.platform != 'darwin':
msg_over_size = msg_unrecognized(str_data="b" * (valid_data_limit + 1))
assert len(msg_over_size.serialize()) == (msg_limit + 1)
# An unknown message type (or *any* message type) over
# MAX_PROTOCOL_MESSAGE_LENGTH should result in a disconnect.
node.p2p.send_message(msg_over_size)
node.p2p.wait_for_disconnect(timeout=4)
node.disconnect_p2ps()
conn = node.add_p2p_connection(P2PDataStore())
conn.wait_for_verack()
else:
self.log.info("Skipping test p2p_invalid_messages/1 (oversized message) under macOS")
#
# 2.
#
# Send messages with an incorrect data size in the header.
#
actual_size = 100
msg = msg_unrecognized(str_data="b" * actual_size)
# TODO: handle larger-than cases. I haven't been able to pin down what behavior to expect.
for wrong_size in (2, 77, 78, 79):
self.log.info("Sending a message with incorrect size of {}".format(wrong_size))
# Unmodified message should submit okay.
node.p2p.send_and_ping(msg)
# A message lying about its data size results in a disconnect when the incorrect
# data size is less than the actual size.
#
# TODO: why does behavior change at 78 bytes?
#
node.p2p.send_raw_message(self._tweak_msg_data_size(msg, wrong_size))
# For some reason unknown to me, we sometimes have to push additional data to the
# peer in order for it to realize a disconnect.
try:
node.p2p.send_message(msg_ping(nonce=123123))
except IOError:
pass
node.p2p.wait_for_disconnect(timeout=10)
node.disconnect_p2ps()
node.add_p2p_connection(P2PDataStore())
# Node is still up.
conn = node.add_p2p_connection(P2PDataStore())
self.test_resource_exhaustion()
def test_buffer(self):
self.log.info("Test message with header split across two buffers, should be received")
@ -202,14 +99,10 @@ class InvalidMessagesTest(BitcoinTestFramework):
def test_size(self):
conn = self.nodes[0].add_p2p_connection(P2PDataStore())
with self.nodes[0].assert_debug_log(['HEADER ERROR - SIZE (badmsg, 33554433 bytes)']):
msg = conn.build_message(msg_unrecognized(str_data="d"))
cut_len = (
4 + # magic
12 # msgtype
)
# modify len to MAX_SIZE + 1
msg = msg[:cut_len] + struct.pack("<I", 0x02000000 + 1) + msg[cut_len + 4:]
with self.nodes[0].assert_debug_log(['']):
# Create a message with oversized payload
msg = msg_unrecognized(str_data="d"*(VALID_DATA_LIMIT + 1))
msg = conn.build_message(msg)
self.nodes[0].p2p.send_raw_message(msg)
conn.wait_for_disconnect(timeout=5)
self.nodes[0].disconnect_p2ps()
@ -238,25 +131,28 @@ class InvalidMessagesTest(BitcoinTestFramework):
conn.send_and_ping(msg)
self.nodes[0].disconnect_p2ps()
def _tweak_msg_data_size(self, message, wrong_size):
"""
Return a raw message based on another message but with an incorrect data size in
the message header.
"""
raw_msg = self.node.p2p.build_message(message)
def test_resource_exhaustion(self):
conn = self.nodes[0].add_p2p_connection(P2PDataStore())
conn2 = self.nodes[0].add_p2p_connection(P2PDataStore())
msg_at_size = msg_unrecognized(str_data="b" * VALID_DATA_LIMIT)
assert len(msg_at_size.serialize()) == MSG_LIMIT
bad_size_bytes = struct.pack("<I", wrong_size)
num_header_bytes_before_size = 4 + 12
self.log.info("Sending a bunch of large, junk messages to test memory exhaustion. May take a bit...")
# Replace the correct data size in the message with an incorrect one.
raw_msg_with_wrong_size = (
raw_msg[:num_header_bytes_before_size] +
bad_size_bytes +
raw_msg[(num_header_bytes_before_size + len(bad_size_bytes)):]
)
assert len(raw_msg) == len(raw_msg_with_wrong_size)
# Run a bunch of times to test for memory exhaustion.
for _ in range(80):
conn.send_message(msg_at_size)
return raw_msg_with_wrong_size
# Check that, even though the node is being hammered by nonsense from one
# connection, it can still service other peers in a timely way.
for _ in range(20):
conn2.sync_with_ping(timeout=2)
# Peer 1, despite being served up a bunch of nonsense, should still be connected.
self.log.info("Waiting for node to drop junk messages.")
conn.sync_with_ping(timeout=400)
assert conn.is_connected
self.nodes[0].disconnect_p2ps()
if __name__ == '__main__':