mirror of
https://github.com/dashpay/dash.git
synced 2024-12-25 03:52:49 +01:00
Merge #6239: feat: increase the number of block headers able to be downloaded at once to 8000 in protocol version 70235
48c7f98b1a
doc: drop trailing whitespace (pasta)697743d77b
test: add missing import (UdjinM6)cfe99fd289
docs: add release notes for 6239 (pasta)a6bbaacfaa
fix: GetHeadersLimit is used for getheaders(2) and headers(2), refactor it to accept `compressed` instead of `msg_type` (UdjinM6)b224f3f6ca
bump p2p_version in tests (PastaPastaPasta)b423f42aae
refactor: sort imports (UdjinM6)f6c68ba71b
refactor: simplify _compute_requested_block_headers (UdjinM6)07876b2c4a
use `MAX_HEADERS_UNCOMPRESSED_RESULT` not `MAX_HEADERS_UNCOMPRESSED_RESULTS` ; use `MAX_HEADERS_UNCOMPRESSED_RESULT` in RPC to avoid breaking changes (pasta)b137280df4
change to _COMPRESSED or _UNCOMPRESSED (pasta)303bc7af99
fix: increase it for headers2 only (UdjinM6)e23410ffdd
trivial: rename `MAX_HEADERS_RESULTS_NEW` to `MAX_HEADERS_RESULTS` (Kittywhiskers Van Gogh)bcf0320691
trivial: move the headers limit determination to `GetHeadersLimit()` (Kittywhiskers Van Gogh)993c7c0f90
feat: increase the number of block headers able to be downloaded at once to 8000 in protocol version `70234` (pasta) Pull request description: ## Issue being fixed or feature implemented We did some testing quite a while ago that found that sending 8000 headers at a time could speed stuff up. But we wanted to wait until compressed headers were implemented. Well, they've been implemented! ## What was done? Bump 2000 -> 8000 triggered by protocol version ## How Has This Been Tested? Hasn't, we should setup a few nodes running this and sync them from each other ## Breaking Changes New protocol version, not breaking but should add notes? I should probably add release notes ## Checklist: _Go over all the following points, and put an `x` in all the boxes that apply._ - [ ] I have performed a self-review of my own code - [ ] I have commented my code, particularly in hard-to-understand areas - [ ] I have added or updated relevant unit/integration/functional/e2e tests - [ ] I have made corresponding changes to the documentation - [x] I have assigned this pull request to a milestone _(for repository code-owners and collaborators only)_ ACKs for top commit: UdjinM6: light ACK48c7f98b1a
knst: utACK48c7f98b1a
Tree-SHA512: 54c68b9496131ab7f32504d44398d776a151df809d0120d093bbabb18904a783bd9b58796820209f5d75552df5476e30eaa09d68f7c5057882f94b5766a64f4c
This commit is contained in:
commit
312134e39e
5
doc/release-notes-6239.md
Normal file
5
doc/release-notes-6239.md
Normal file
@ -0,0 +1,5 @@
|
||||
P2P and Network Changes
|
||||
-----------------------
|
||||
|
||||
The max number of compressed block headers which can be requested at once has been increased from 2000 to 8000. This
|
||||
change activates with the protocol version `70235` and only applies to compressed block headers.
|
@ -1041,6 +1041,15 @@ static bool IsLimitedPeer(const Peer& peer)
|
||||
(peer.m_their_services & NODE_NETWORK_LIMITED));
|
||||
}
|
||||
|
||||
/** Get maximum number of headers that can be included in one batch */
|
||||
static uint16_t GetHeadersLimit(const CNode& pfrom, bool compressed)
|
||||
{
|
||||
if (pfrom.GetCommonVersion() >= INCREASE_MAX_HEADERS2_VERSION && compressed) {
|
||||
return MAX_HEADERS_COMPRESSED_RESULT;
|
||||
}
|
||||
return MAX_HEADERS_UNCOMPRESSED_RESULT;
|
||||
}
|
||||
|
||||
static void PushInv(Peer& peer, const CInv& inv)
|
||||
{
|
||||
// Dash always initializes m_tx_relay
|
||||
@ -2963,8 +2972,9 @@ void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, Peer& peer,
|
||||
}
|
||||
|
||||
// Consider fetching more headers.
|
||||
if (nCount == MAX_HEADERS_RESULTS) {
|
||||
std::string msg_type = UsesCompressedHeaders(peer) ? NetMsgType::GETHEADERS2 : NetMsgType::GETHEADERS;
|
||||
const bool uses_compressed = UsesCompressedHeaders(peer);
|
||||
const std::string msg_type = uses_compressed ? NetMsgType::GETHEADERS2 : NetMsgType::GETHEADERS;
|
||||
if (nCount == GetHeadersLimit(pfrom, uses_compressed)) {
|
||||
// Headers message had its maximum size; the peer may have more headers.
|
||||
if (MaybeSendGetHeaders(pfrom, msg_type, m_chainman.ActiveChain().GetLocator(pindexLast), peer)) {
|
||||
LogPrint(BCLog::NET, "more %s (%d) to end to peer=%d (startheight:%d)\n",
|
||||
@ -2972,7 +2982,7 @@ void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, Peer& peer,
|
||||
}
|
||||
}
|
||||
|
||||
UpdatePeerStateForReceivedHeaders(pfrom, pindexLast, received_new_header, nCount == MAX_HEADERS_RESULTS);
|
||||
UpdatePeerStateForReceivedHeaders(pfrom, pindexLast, received_new_header, nCount == GetHeadersLimit(pfrom, uses_compressed));
|
||||
|
||||
// Consider immediately downloading blocks.
|
||||
HeadersDirectFetchBlocks(pfrom, peer, pindexLast);
|
||||
@ -4066,8 +4076,8 @@ void PeerManagerImpl::ProcessMessage(
|
||||
pindex = m_chainman.ActiveChain().Next(pindex);
|
||||
}
|
||||
|
||||
const auto send_headers = [this /* for m_connman */, &hashStop, &pindex, &nodestate, &pfrom, &msgMaker](auto msg_type, auto& v_headers, auto callback) {
|
||||
int nLimit = MAX_HEADERS_RESULTS;
|
||||
const auto send_headers = [this /* for m_connman */, &hashStop, &pindex, &nodestate, &pfrom, &msgMaker](auto msg_type_internal, auto& v_headers, auto callback) {
|
||||
int nLimit = GetHeadersLimit(pfrom, msg_type_internal == NetMsgType::HEADERS2);
|
||||
for (; pindex; pindex = m_chainman.ActiveChain().Next(pindex)) {
|
||||
v_headers.push_back(callback(pindex));
|
||||
|
||||
@ -4087,7 +4097,7 @@ void PeerManagerImpl::ProcessMessage(
|
||||
// will re-announce the new block via headers (or compact blocks again)
|
||||
// in the SendMessages logic.
|
||||
nodestate->pindexBestHeaderSent = pindex ? pindex : m_chainman.ActiveChain().Tip();
|
||||
m_connman.PushMessage(&pfrom, msgMaker.Make(msg_type, v_headers));
|
||||
m_connman.PushMessage(&pfrom, msgMaker.Make(msg_type_internal, v_headers));
|
||||
};
|
||||
|
||||
LogPrint(BCLog::NET, "%s %d to %s from peer=%d\n", msg_type, (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), pfrom.GetId());
|
||||
@ -4575,7 +4585,7 @@ void PeerManagerImpl::ProcessMessage(
|
||||
|
||||
// Bypass the normal CBlock deserialization, as we don't want to risk deserializing 2000 full blocks.
|
||||
unsigned int nCount = ReadCompactSize(vRecv);
|
||||
if (nCount > MAX_HEADERS_RESULTS) {
|
||||
if (nCount > GetHeadersLimit(pfrom, msg_type == NetMsgType::HEADERS2)) {
|
||||
Misbehaving(pfrom.GetId(), 20, strprintf("headers message size = %u", nCount));
|
||||
return;
|
||||
}
|
||||
@ -6010,4 +6020,4 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
|
||||
}
|
||||
} // release cs_main
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
@ -997,7 +997,7 @@ static RPCHelpMan getblockheaders()
|
||||
"If verbose is true, each item is an Object with information about a single blockheader.\n",
|
||||
{
|
||||
{"blockhash", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "The block hash"},
|
||||
{"count", RPCArg::Type::NUM, /* default */ strprintf("%s", MAX_HEADERS_RESULTS), ""},
|
||||
{"count", RPCArg::Type::NUM, /* default */ strprintf("%s", MAX_HEADERS_UNCOMPRESSED_RESULT), ""},
|
||||
{"verbose", RPCArg::Type::BOOL, /* default */ "true", "true for a json object, false for the hex-encoded data"},
|
||||
},
|
||||
{
|
||||
@ -1055,11 +1055,11 @@ static RPCHelpMan getblockheaders()
|
||||
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found");
|
||||
}
|
||||
|
||||
int nCount = MAX_HEADERS_RESULTS;
|
||||
int nCount = MAX_HEADERS_UNCOMPRESSED_RESULT;
|
||||
if (!request.params[1].isNull())
|
||||
nCount = request.params[1].get_int();
|
||||
|
||||
if (nCount <= 0 || nCount > (int)MAX_HEADERS_RESULTS)
|
||||
if (nCount <= 0 || nCount > (int)MAX_HEADERS_UNCOMPRESSED_RESULT)
|
||||
throw JSONRPCError(RPC_INVALID_PARAMETER, "Count is out of range");
|
||||
|
||||
bool fVerbose = true;
|
||||
@ -1135,7 +1135,7 @@ static RPCHelpMan getmerkleblocks()
|
||||
{
|
||||
{"filter", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "The hex-encoded bloom filter"},
|
||||
{"blockhash", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "The block hash"},
|
||||
{"count", RPCArg::Type::NUM, /* default */ strprintf("%s", MAX_HEADERS_RESULTS), ""},
|
||||
{"count", RPCArg::Type::NUM, /* default */ strprintf("%s", MAX_HEADERS_UNCOMPRESSED_RESULT), ""},
|
||||
},
|
||||
RPCResult{
|
||||
RPCResult::Type::ARR, "", "",
|
||||
@ -1164,11 +1164,11 @@ static RPCHelpMan getmerkleblocks()
|
||||
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found");
|
||||
}
|
||||
|
||||
int nCount = MAX_HEADERS_RESULTS;
|
||||
int nCount = MAX_HEADERS_UNCOMPRESSED_RESULT;
|
||||
if (!request.params[2].isNull())
|
||||
nCount = request.params[2].get_int();
|
||||
|
||||
if (nCount <= 0 || nCount > (int)MAX_HEADERS_RESULTS) {
|
||||
if (nCount <= 0 || nCount > (int)MAX_HEADERS_UNCOMPRESSED_RESULT) {
|
||||
throw JSONRPCError(RPC_INVALID_PARAMETER, "Count is out of range");
|
||||
}
|
||||
|
||||
|
@ -85,7 +85,8 @@ static const int MAX_SCRIPTCHECK_THREADS = 15;
|
||||
static const int DEFAULT_SCRIPTCHECK_THREADS = 0;
|
||||
/** Number of headers sent in one getheaders result. We rely on the assumption that if a peer sends
|
||||
* less than this number, we reached its tip. Changing this value is a protocol upgrade. */
|
||||
static const unsigned int MAX_HEADERS_RESULTS = 2000;
|
||||
static const unsigned int MAX_HEADERS_UNCOMPRESSED_RESULT = 2000;
|
||||
static const unsigned int MAX_HEADERS_COMPRESSED_RESULT = 8000;
|
||||
|
||||
static const int64_t DEFAULT_MAX_TIP_AGE = 6 * 60 * 60; // ~144 blocks behind -> 2 x fork detection time, was 24 * 60 * 60 in bitcoin
|
||||
|
||||
|
@ -11,7 +11,7 @@
|
||||
*/
|
||||
|
||||
|
||||
static const int PROTOCOL_VERSION = 70234;
|
||||
static const int PROTOCOL_VERSION = 70235;
|
||||
|
||||
//! initial proto version, to be increased after version/verack negotiation
|
||||
static const int INIT_PROTO_VERSION = 209;
|
||||
@ -58,6 +58,9 @@ static const int NO_LEGACY_ISLOCK_PROTO_VERSION = 70231;
|
||||
//! Inventory type for DSQ messages added
|
||||
static const int DSQ_INV_VERSION = 70234;
|
||||
|
||||
//! Maximum header count for HEADRES2 message was increased from 2000 to 8000 in this version
|
||||
static const int INCREASE_MAX_HEADERS2_VERSION = 70235;
|
||||
|
||||
// Make sure that none of the values above collide with `ADDRV2_FORMAT`.
|
||||
|
||||
#endif // BITCOIN_VERSION_H
|
||||
|
@ -10,11 +10,13 @@ from test_framework.messages import (
|
||||
CInv,
|
||||
msg_ping,
|
||||
ser_string,
|
||||
MAX_HEADERS_RESULTS,
|
||||
MAX_HEADERS_COMPRESSED_RESULT,
|
||||
MAX_HEADERS_UNCOMPRESSED_RESULT,
|
||||
MAX_INV_SIZE,
|
||||
MAX_PROTOCOL_MESSAGE_LENGTH,
|
||||
msg_getdata,
|
||||
msg_headers,
|
||||
msg_headers2,
|
||||
msg_inv,
|
||||
MSG_TX,
|
||||
msg_version,
|
||||
@ -153,9 +155,13 @@ class InvalidMessagesTest(BitcoinTestFramework):
|
||||
self.test_oversized_msg(msg_getdata([CInv(MSG_TX, 1)] * size), size)
|
||||
|
||||
def test_oversized_headers_msg(self):
|
||||
size = MAX_HEADERS_RESULTS + 1
|
||||
size = MAX_HEADERS_UNCOMPRESSED_RESULT + 1
|
||||
self.test_oversized_msg(msg_headers([CBlockHeader()] * size), size)
|
||||
|
||||
def test_oversized_headers2_msg(self):
|
||||
size = MAX_HEADERS_COMPRESSED_RESULT + 1
|
||||
self.test_oversized_msg(msg_headers2([CBlockHeader()] * size), size)
|
||||
|
||||
def test_resource_exhaustion(self):
|
||||
self.log.info("Test node stays up despite many large junk messages")
|
||||
conn = self.nodes[0].add_p2p_connection(P2PDataStore())
|
||||
|
@ -42,7 +42,8 @@ MAX_MONEY = 21000000 * COIN
|
||||
BIP125_SEQUENCE_NUMBER = 0xfffffffd # Sequence number that is BIP 125 opt-in and BIP 68-opt-out
|
||||
|
||||
MAX_PROTOCOL_MESSAGE_LENGTH = 3 * 1024 * 1024 # Maximum length of incoming protocol messages
|
||||
MAX_HEADERS_RESULTS = 2000 # Number of headers sent in one getheaders result
|
||||
MAX_HEADERS_UNCOMPRESSED_RESULT = 2000 # Number of headers sent in one getheaders result
|
||||
MAX_HEADERS_COMPRESSED_RESULT = 8000 # Number of headers2 sent in one getheaders2 result
|
||||
MAX_INV_SIZE = 50000 # Maximum number of entries in an 'inv' protocol message
|
||||
|
||||
NODE_NETWORK = (1 << 0)
|
||||
|
@ -31,7 +31,8 @@ import threading
|
||||
from test_framework.messages import (
|
||||
CBlockHeader,
|
||||
CompressibleBlockHeader,
|
||||
MAX_HEADERS_RESULTS,
|
||||
MAX_HEADERS_COMPRESSED_RESULT,
|
||||
MAX_HEADERS_UNCOMPRESSED_RESULT,
|
||||
NODE_HEADERS_COMPRESSED,
|
||||
msg_addr,
|
||||
msg_addrv2,
|
||||
@ -91,8 +92,8 @@ logger = logging.getLogger("TestFramework.p2p")
|
||||
# The minimum P2P version that this test framework supports
|
||||
MIN_P2P_VERSION_SUPPORTED = 60001
|
||||
# The P2P version that this test framework implements and sends in its `version` message
|
||||
# Version 70231 drops supports for legacy InstantSend locks
|
||||
P2P_VERSION = 70231
|
||||
# Version 70235 increased max header count for HEADERS2 message from 2000 to 8000
|
||||
P2P_VERSION = 70235
|
||||
# The services that this test framework offers in its `version` message
|
||||
P2P_SERVICES = NODE_NETWORK | NODE_HEADERS_COMPRESSED
|
||||
# The P2P user agent string that this test framework sends in its `version` message
|
||||
@ -712,7 +713,7 @@ class P2PDataStore(P2PInterface):
|
||||
else:
|
||||
logger.debug('getdata message type {} received.'.format(hex(inv.type)))
|
||||
|
||||
def _compute_requested_block_headers(self, locator, hash_stop):
|
||||
def _compute_requested_block_headers(self, locator, hash_stop, max_results):
|
||||
# Assume that the most recent block added is the tip
|
||||
if not self.block_store:
|
||||
return
|
||||
@ -733,14 +734,14 @@ class P2PDataStore(P2PInterface):
|
||||
break
|
||||
|
||||
# Truncate the list if there are too many headers
|
||||
headers_list = headers_list[:-MAX_HEADERS_RESULTS - 1:-1]
|
||||
headers_list = headers_list[:-max_results - 1:-1]
|
||||
|
||||
return headers_list
|
||||
|
||||
def on_getheaders2(self, message):
|
||||
"""Search back through our block store for the locator, and reply with a compressed headers message if found."""
|
||||
|
||||
headers_list = self._compute_requested_block_headers(message.locator, message.hashstop)
|
||||
headers_list = self._compute_requested_block_headers(message.locator, message.hashstop, MAX_HEADERS_COMPRESSED_RESULT)
|
||||
compressible_headers_list = [CompressibleBlockHeader(h) for h in headers_list] if headers_list else None
|
||||
response = msg_headers2(compressible_headers_list)
|
||||
|
||||
@ -750,7 +751,7 @@ class P2PDataStore(P2PInterface):
|
||||
def on_getheaders(self, message):
|
||||
"""Search back through our block store for the locator, and reply with a headers message if found."""
|
||||
|
||||
headers_list = self._compute_requested_block_headers(message.locator, message.hashstop)
|
||||
headers_list = self._compute_requested_block_headers(message.locator, message.hashstop, MAX_HEADERS_UNCOMPRESSED_RESULT)
|
||||
response = msg_headers(headers_list)
|
||||
|
||||
if response is not None:
|
||||
|
Loading…
Reference in New Issue
Block a user