mirror of
https://github.com/dashpay/dash.git
synced 2024-12-25 03:52:49 +01:00
merge bitcoin#29006: fix v2 transport intermittent test failure
This commit is contained in:
parent
d0804d4bf0
commit
1a293c7cc5
@ -133,9 +133,8 @@ class V2TransportTest(BitcoinTestFramework):
|
|||||||
V1_PREFIX = MAGIC_BYTES[self.chain] + b"version\x00\x00\x00\x00\x00"
|
V1_PREFIX = MAGIC_BYTES[self.chain] + b"version\x00\x00\x00\x00\x00"
|
||||||
assert_equal(len(V1_PREFIX), 16)
|
assert_equal(len(V1_PREFIX), 16)
|
||||||
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
||||||
num_peers = len(self.nodes[0].getpeerinfo())
|
with self.nodes[0].wait_for_new_peer():
|
||||||
s.connect(("127.0.0.1", p2p_port(0)))
|
s.connect(("127.0.0.1", p2p_port(0)))
|
||||||
self.wait_until(lambda: len(self.nodes[0].getpeerinfo()) == num_peers + 1)
|
|
||||||
s.sendall(V1_PREFIX[:-1])
|
s.sendall(V1_PREFIX[:-1])
|
||||||
assert_equal(self.nodes[0].getpeerinfo()[-1]["transport_protocol_type"], "detecting")
|
assert_equal(self.nodes[0].getpeerinfo()[-1]["transport_protocol_type"], "detecting")
|
||||||
s.sendall(bytes([V1_PREFIX[-1]])) # send out last prefix byte
|
s.sendall(bytes([V1_PREFIX[-1]])) # send out last prefix byte
|
||||||
@ -144,22 +143,23 @@ class V2TransportTest(BitcoinTestFramework):
|
|||||||
# Check wrong network prefix detection (hits if the next 12 bytes correspond to a v1 version message)
|
# Check wrong network prefix detection (hits if the next 12 bytes correspond to a v1 version message)
|
||||||
wrong_network_magic_prefix = MAGIC_BYTES["mainnet"] + V1_PREFIX[4:]
|
wrong_network_magic_prefix = MAGIC_BYTES["mainnet"] + V1_PREFIX[4:]
|
||||||
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
||||||
s.connect(("127.0.0.1", p2p_port(0)))
|
with self.nodes[0].wait_for_new_peer():
|
||||||
|
s.connect(("127.0.0.1", p2p_port(0)))
|
||||||
with self.nodes[0].assert_debug_log(["V2 transport error: V1 peer with wrong MessageStart"]):
|
with self.nodes[0].assert_debug_log(["V2 transport error: V1 peer with wrong MessageStart"]):
|
||||||
s.sendall(wrong_network_magic_prefix + b"somepayload")
|
s.sendall(wrong_network_magic_prefix + b"somepayload")
|
||||||
|
|
||||||
# Check detection of missing garbage terminator (hits after fixed amount of data if terminator never matches garbage)
|
# Check detection of missing garbage terminator (hits after fixed amount of data if terminator never matches garbage)
|
||||||
MAX_KEY_GARB_AND_GARBTERM_LEN = 64 + 4095 + 16
|
MAX_KEY_GARB_AND_GARBTERM_LEN = 64 + 4095 + 16
|
||||||
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
||||||
num_peers = len(self.nodes[0].getpeerinfo())
|
with self.nodes[0].wait_for_new_peer():
|
||||||
s.connect(("127.0.0.1", p2p_port(0)))
|
s.connect(("127.0.0.1", p2p_port(0)))
|
||||||
self.wait_until(lambda: len(self.nodes[0].getpeerinfo()) == num_peers + 1)
|
|
||||||
s.sendall(b'\x00' * (MAX_KEY_GARB_AND_GARBTERM_LEN - 1))
|
s.sendall(b'\x00' * (MAX_KEY_GARB_AND_GARBTERM_LEN - 1))
|
||||||
self.wait_until(lambda: self.nodes[0].getpeerinfo()[-1]["bytesrecv"] == MAX_KEY_GARB_AND_GARBTERM_LEN - 1)
|
self.wait_until(lambda: self.nodes[0].getpeerinfo()[-1]["bytesrecv"] == MAX_KEY_GARB_AND_GARBTERM_LEN - 1)
|
||||||
with self.nodes[0].assert_debug_log(["V2 transport error: missing garbage terminator"]):
|
with self.nodes[0].assert_debug_log(["V2 transport error: missing garbage terminator"]):
|
||||||
|
peer_id = self.nodes[0].getpeerinfo()[-1]["id"]
|
||||||
s.sendall(b'\x00') # send out last byte
|
s.sendall(b'\x00') # send out last byte
|
||||||
# should disconnect immediately
|
# should disconnect immediately
|
||||||
self.wait_until(lambda: len(self.nodes[0].getpeerinfo()) == num_peers)
|
self.wait_until(lambda: not peer_id in [p["id"] for p in self.nodes[0].getpeerinfo()])
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
@ -490,6 +490,24 @@ class TestNode():
|
|||||||
'Expected messages "{}" does not partially match log:\n\n{}\n\n'.format(
|
'Expected messages "{}" does not partially match log:\n\n{}\n\n'.format(
|
||||||
str(expected_msgs), print_log))
|
str(expected_msgs), print_log))
|
||||||
|
|
||||||
|
@contextlib.contextmanager
|
||||||
|
def wait_for_new_peer(self, timeout=5):
|
||||||
|
"""
|
||||||
|
Wait until the node is connected to at least one new peer. We detect this
|
||||||
|
by watching for an increased highest peer id, using the `getpeerinfo` RPC call.
|
||||||
|
Note that the simpler approach of only accounting for the number of peers
|
||||||
|
suffers from race conditions, as disconnects from unrelated previous peers
|
||||||
|
could happen anytime in-between.
|
||||||
|
"""
|
||||||
|
def get_highest_peer_id():
|
||||||
|
peer_info = self.getpeerinfo()
|
||||||
|
return peer_info[-1]["id"] if peer_info else -1
|
||||||
|
|
||||||
|
initial_peer_id = get_highest_peer_id()
|
||||||
|
yield
|
||||||
|
wait_until_helper(lambda: get_highest_peer_id() > initial_peer_id,
|
||||||
|
timeout=timeout, timeout_factor=self.timeout_factor)
|
||||||
|
|
||||||
@contextlib.contextmanager
|
@contextlib.contextmanager
|
||||||
def profile_with_perf(self, profile_name: str):
|
def profile_with_perf(self, profile_name: str):
|
||||||
"""
|
"""
|
||||||
|
Loading…
Reference in New Issue
Block a user