mirror of
https://github.com/dashpay/dash.git
synced 2024-12-25 03:52:49 +01:00
Merge pull request #4355 from kittywhiskers/bfilters
merge bitcoin#16787, #17474, #17687, #18165, #18877, #18960, #19010, #19044, #19070: block filters
This commit is contained in:
commit
4ffd42de63
@ -4,6 +4,7 @@
|
||||
|
||||
#include <mutex>
|
||||
#include <sstream>
|
||||
#include <set>
|
||||
|
||||
#include <blockfilter.h>
|
||||
#include <crypto/siphash.h>
|
||||
@ -221,15 +222,14 @@ bool BlockFilterTypeByName(const std::string& name, BlockFilterType& filter_type
|
||||
return false;
|
||||
}
|
||||
|
||||
const std::vector<BlockFilterType>& AllBlockFilterTypes()
|
||||
const std::set<BlockFilterType>& AllBlockFilterTypes()
|
||||
{
|
||||
static std::vector<BlockFilterType> types;
|
||||
static std::set<BlockFilterType> types;
|
||||
|
||||
static std::once_flag flag;
|
||||
std::call_once(flag, []() {
|
||||
types.reserve(g_filter_types.size());
|
||||
for (auto entry : g_filter_types) {
|
||||
types.push_back(entry.first);
|
||||
types.insert(entry.first);
|
||||
}
|
||||
});
|
||||
|
||||
|
@ -7,6 +7,7 @@
|
||||
|
||||
#include <stdint.h>
|
||||
#include <string>
|
||||
#include <set>
|
||||
#include <unordered_set>
|
||||
#include <vector>
|
||||
|
||||
@ -96,7 +97,7 @@ const std::string& BlockFilterTypeName(BlockFilterType filter_type);
|
||||
bool BlockFilterTypeByName(const std::string& name, BlockFilterType& filter_type);
|
||||
|
||||
/** Get a list of known filter types. */
|
||||
const std::vector<BlockFilterType>& AllBlockFilterTypes();
|
||||
const std::set<BlockFilterType>& AllBlockFilterTypes();
|
||||
|
||||
/** Get a comma-separated list of known filter type names. */
|
||||
const std::string& ListBlockFilterTypes();
|
||||
@ -142,8 +143,8 @@ public:
|
||||
|
||||
template <typename Stream>
|
||||
void Serialize(Stream& s) const {
|
||||
s << m_block_hash
|
||||
<< static_cast<uint8_t>(m_filter_type)
|
||||
s << static_cast<uint8_t>(m_filter_type)
|
||||
<< m_block_hash
|
||||
<< m_filter.GetEncoded();
|
||||
}
|
||||
|
||||
@ -152,8 +153,8 @@ public:
|
||||
std::vector<unsigned char> encoded_filter;
|
||||
uint8_t filter_type;
|
||||
|
||||
s >> m_block_hash
|
||||
>> filter_type
|
||||
s >> filter_type
|
||||
>> m_block_hash
|
||||
>> encoded_filter;
|
||||
|
||||
m_filter_type = static_cast<BlockFilterType>(filter_type);
|
||||
|
@ -32,6 +32,12 @@ constexpr char DB_FILTER_POS = 'P';
|
||||
constexpr unsigned int MAX_FLTR_FILE_SIZE = 0x1000000; // 16 MiB
|
||||
/** The pre-allocation chunk size for fltr?????.dat files */
|
||||
constexpr unsigned int FLTR_FILE_CHUNK_SIZE = 0x100000; // 1 MiB
|
||||
/** Maximum size of the cfheaders cache
|
||||
* We have a limit to prevent a bug in filling this cache
|
||||
* potentially turning into an OOM. At 2000 entries, this cache
|
||||
* is big enough for a 2,000,000 length block chain, which
|
||||
* we should be enough until ~2047. */
|
||||
constexpr size_t CF_HEADERS_CACHE_MAX_SZ{2000};
|
||||
|
||||
namespace {
|
||||
|
||||
@ -384,13 +390,32 @@ bool BlockFilterIndex::LookupFilter(const CBlockIndex* block_index, BlockFilter&
|
||||
return ReadFilterFromDisk(entry.pos, filter_out);
|
||||
}
|
||||
|
||||
bool BlockFilterIndex::LookupFilterHeader(const CBlockIndex* block_index, uint256& header_out) const
|
||||
bool BlockFilterIndex::LookupFilterHeader(const CBlockIndex* block_index, uint256& header_out)
|
||||
{
|
||||
LOCK(m_cs_headers_cache);
|
||||
|
||||
bool is_checkpoint{block_index->nHeight % CFCHECKPT_INTERVAL == 0};
|
||||
|
||||
if (is_checkpoint) {
|
||||
// Try to find the block in the headers cache if this is a checkpoint height.
|
||||
auto header = m_headers_cache.find(block_index->GetBlockHash());
|
||||
if (header != m_headers_cache.end()) {
|
||||
header_out = header->second;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
DBVal entry;
|
||||
if (!LookupOne(*m_db, block_index, entry)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (is_checkpoint &&
|
||||
m_headers_cache.size() < CF_HEADERS_CACHE_MAX_SZ) {
|
||||
// Add to the headers cache if this is a checkpoint height.
|
||||
m_headers_cache.emplace(block_index->GetBlockHash(), entry.header);
|
||||
}
|
||||
|
||||
header_out = entry.header;
|
||||
return true;
|
||||
}
|
||||
|
@ -10,6 +10,14 @@
|
||||
#include <flatfile.h>
|
||||
#include <index/base.h>
|
||||
|
||||
/** Interval between compact filter checkpoints. See BIP 157. */
|
||||
static constexpr int CFCHECKPT_INTERVAL = 1000;
|
||||
|
||||
struct FilterHeaderHasher
|
||||
{
|
||||
size_t operator()(const uint256& hash) const { return ReadLE64(hash.begin()); }
|
||||
};
|
||||
|
||||
/**
|
||||
* BlockFilterIndex is used to store and retrieve block filters, hashes, and headers for a range of
|
||||
* blocks by height. An index is constructed for each supported filter type with its own database
|
||||
@ -30,6 +38,10 @@ private:
|
||||
bool ReadFilterFromDisk(const FlatFilePos& pos, BlockFilter& filter) const;
|
||||
size_t WriteFilterToDisk(FlatFilePos& pos, const BlockFilter& filter);
|
||||
|
||||
Mutex m_cs_headers_cache;
|
||||
/** cache of block hash to filter header, to avoid disk access when responding to getcfcheckpt. */
|
||||
std::unordered_map<uint256, uint256, FilterHeaderHasher> m_headers_cache GUARDED_BY(m_cs_headers_cache);
|
||||
|
||||
protected:
|
||||
bool Init() override;
|
||||
|
||||
@ -54,7 +66,7 @@ public:
|
||||
bool LookupFilter(const CBlockIndex* block_index, BlockFilter& filter_out) const;
|
||||
|
||||
/** Get a single filter header by block. */
|
||||
bool LookupFilterHeader(const CBlockIndex* block_index, uint256& header_out) const;
|
||||
bool LookupFilterHeader(const CBlockIndex* block_index, uint256& header_out);
|
||||
|
||||
/** Get a range of filters between two heights on a chain. */
|
||||
bool LookupFilterRange(int start_height, const CBlockIndex* stop_index,
|
||||
|
16
src/init.cpp
16
src/init.cpp
@ -82,6 +82,7 @@
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stdio.h>
|
||||
#include <set>
|
||||
|
||||
#include <bls/bls.h>
|
||||
|
||||
@ -552,6 +553,7 @@ void SetupServerArgs()
|
||||
gArgs.AddArg("-maxuploadtarget=<n>", strprintf("Tries to keep outbound traffic under the given target (in MiB per 24h), 0 = no limit (default: %d)", DEFAULT_MAX_UPLOAD_TARGET), false, OptionsCategory::CONNECTION);
|
||||
gArgs.AddArg("-onion=<ip:port>", "Use separate SOCKS5 proxy to reach peers via Tor hidden services, set -noonion to disable (default: -proxy)", false, OptionsCategory::CONNECTION);
|
||||
gArgs.AddArg("-onlynet=<net>", "Make outgoing connections only through network <net> (ipv4, ipv6 or onion). Incoming connections are not affected by this option. This option can be specified multiple times to allow multiple networks.", false, OptionsCategory::CONNECTION);
|
||||
gArgs.AddArg("-peerblockfilters", strprintf("Serve compact block filters to peers per BIP 157 (default: %u)", DEFAULT_PEERBLOCKFILTERS), false, OptionsCategory::CONNECTION);
|
||||
gArgs.AddArg("-peerbloomfilters", strprintf("Support filtering of blocks and transaction with bloom filters (default: %u)", DEFAULT_PEERBLOOMFILTERS), false, OptionsCategory::CONNECTION);
|
||||
gArgs.AddArg("-peertimeout=<n>", strprintf("Specify p2p connection timeout in seconds. This option determines the amount of time a peer may be inactive before the connection to it is dropped. (minimum: 1, default: %d)", DEFAULT_PEER_CONNECT_TIMEOUT), false, OptionsCategory::CONNECTION);
|
||||
gArgs.AddArg("-permitbaremultisig", strprintf("Relay non-P2SH multisig (default: %u)", DEFAULT_PERMIT_BAREMULTISIG), false, OptionsCategory::CONNECTION);
|
||||
@ -1210,7 +1212,7 @@ int nUserMaxConnections;
|
||||
int nFD;
|
||||
ServiceFlags nLocalServices = ServiceFlags(NODE_NETWORK | NODE_NETWORK_LIMITED);
|
||||
int64_t peer_connect_timeout;
|
||||
std::vector<BlockFilterType> g_enabled_filter_types;
|
||||
std::set<BlockFilterType> g_enabled_filter_types;
|
||||
|
||||
} // namespace
|
||||
|
||||
@ -1294,16 +1296,24 @@ bool AppInitParameterInteraction()
|
||||
g_enabled_filter_types = AllBlockFilterTypes();
|
||||
} else if (blockfilterindex_value != "0") {
|
||||
const std::vector<std::string> names = gArgs.GetArgs("-blockfilterindex");
|
||||
g_enabled_filter_types.reserve(names.size());
|
||||
for (const auto& name : names) {
|
||||
BlockFilterType filter_type;
|
||||
if (!BlockFilterTypeByName(name, filter_type)) {
|
||||
return InitError(strprintf(_("Unknown -blockfilterindex value %s."), name));
|
||||
}
|
||||
g_enabled_filter_types.push_back(filter_type);
|
||||
g_enabled_filter_types.insert(filter_type);
|
||||
}
|
||||
}
|
||||
|
||||
// Signal NODE_COMPACT_FILTERS if peerblockfilters and basic filters index are both enabled.
|
||||
if (gArgs.GetBoolArg("-peerblockfilters", DEFAULT_PEERBLOCKFILTERS)) {
|
||||
if (g_enabled_filter_types.count(BlockFilterType::BASIC_FILTER) != 1) {
|
||||
return InitError(_("Cannot set -peerblockfilters without -blockfilterindex."));
|
||||
}
|
||||
|
||||
nLocalServices = ServiceFlags(nLocalServices | NODE_COMPACT_FILTERS);
|
||||
}
|
||||
|
||||
// if using block pruning, then disallow txindex and require disabling governance validation
|
||||
if (gArgs.GetArg("-prune", 0)) {
|
||||
if (gArgs.GetBoolArg("-txindex", DEFAULT_TXINDEX))
|
||||
|
@ -9,9 +9,12 @@
|
||||
#include <banman.h>
|
||||
#include <arith_uint256.h>
|
||||
#include <blockencodings.h>
|
||||
#include <blockfilter.h>
|
||||
#include <chainparams.h>
|
||||
#include <consensus/validation.h>
|
||||
#include <hash.h>
|
||||
#include <index/blockfilterindex.h>
|
||||
#include <validation.h>
|
||||
#include <merkleblock.h>
|
||||
#include <netmessagemaker.h>
|
||||
#include <netbase.h>
|
||||
@ -149,6 +152,10 @@ static const unsigned int INVENTORY_BROADCAST_INTERVAL = 5;
|
||||
* Limits the impact of low-fee transaction floods.
|
||||
* We have 4 times smaller block times in Dash, so we need to push 4 times more invs per 1MB. */
|
||||
static constexpr unsigned int INVENTORY_BROADCAST_MAX_PER_1MB_BLOCK = 4 * 7 * INVENTORY_BROADCAST_INTERVAL;
|
||||
/** Maximum number of compact filters that may be requested with one getcfilters. See BIP 157. */
|
||||
static constexpr uint32_t MAX_GETCFILTERS_SIZE = 1000;
|
||||
/** Maximum number of cf hashes that may be requested with one getcfheaders. See BIP 157. */
|
||||
static constexpr uint32_t MAX_GETCFHEADERS_SIZE = 2000;
|
||||
|
||||
struct COrphanTx {
|
||||
// When modifying, adapt the copy of this definition in tests/DoS_tests.
|
||||
@ -2107,6 +2114,222 @@ void static ProcessOrphanTx(CConnman* connman, std::set<uint256>& orphan_work_se
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Validation logic for compact filters request handling.
|
||||
*
|
||||
* May disconnect from the peer in the case of a bad request.
|
||||
*
|
||||
* @param[in] peer The peer that we received the request from
|
||||
* @param[in] chain_params Chain parameters
|
||||
* @param[in] filter_type The filter type the request is for. Must be basic filters.
|
||||
* @param[in] start_height The start height for the request
|
||||
* @param[in] stop_hash The stop_hash for the request
|
||||
* @param[in] max_height_diff The maximum number of items permitted to request, as specified in BIP 157
|
||||
* @param[out] stop_index The CBlockIndex for the stop_hash block, if the request can be serviced.
|
||||
* @param[out] filter_index The filter index, if the request can be serviced.
|
||||
* @return True if the request can be serviced.
|
||||
*/
|
||||
static bool PrepareBlockFilterRequest(CNode& peer, const CChainParams& chain_params,
|
||||
BlockFilterType filter_type, uint32_t start_height,
|
||||
const uint256& stop_hash, uint32_t max_height_diff,
|
||||
const CBlockIndex*& stop_index,
|
||||
BlockFilterIndex*& filter_index)
|
||||
{
|
||||
const bool supported_filter_type =
|
||||
(filter_type == BlockFilterType::BASIC_FILTER &&
|
||||
(peer.GetLocalServices() & NODE_COMPACT_FILTERS));
|
||||
if (!supported_filter_type) {
|
||||
LogPrint(BCLog::NET, "peer %d requested unsupported block filter type: %d\n",
|
||||
peer.GetId(), static_cast<uint8_t>(filter_type));
|
||||
peer.fDisconnect = true;
|
||||
return false;
|
||||
}
|
||||
|
||||
{
|
||||
LOCK(cs_main);
|
||||
stop_index = LookupBlockIndex(stop_hash);
|
||||
|
||||
// Check that the stop block exists and the peer would be allowed to fetch it.
|
||||
if (!stop_index || !BlockRequestAllowed(stop_index, chain_params.GetConsensus())) {
|
||||
LogPrint(BCLog::NET, "peer %d requested invalid block hash: %s\n",
|
||||
peer.GetId(), stop_hash.ToString());
|
||||
peer.fDisconnect = true;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
uint32_t stop_height = stop_index->nHeight;
|
||||
if (start_height > stop_height) {
|
||||
LogPrint(BCLog::NET, "peer %d sent invalid getcfilters/getcfheaders with " /* Continued */
|
||||
"start height %d and stop height %d\n",
|
||||
peer.GetId(), start_height, stop_height);
|
||||
peer.fDisconnect = true;
|
||||
return false;
|
||||
}
|
||||
if (stop_height - start_height >= max_height_diff) {
|
||||
LogPrint(BCLog::NET, "peer %d requested too many cfilters/cfheaders: %d / %d\n",
|
||||
peer.GetId(), stop_height - start_height + 1, max_height_diff);
|
||||
peer.fDisconnect = true;
|
||||
return false;
|
||||
}
|
||||
|
||||
filter_index = GetBlockFilterIndex(filter_type);
|
||||
if (!filter_index) {
|
||||
LogPrint(BCLog::NET, "Filter index for supported type %s not found\n", BlockFilterTypeName(filter_type));
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle a cfilters request.
|
||||
*
|
||||
* May disconnect from the peer in the case of a bad request.
|
||||
*
|
||||
* @param[in] peer The peer that we received the request from
|
||||
* @param[in] vRecv The raw message received
|
||||
* @param[in] chain_params Chain parameters
|
||||
* @param[in] connman Pointer to the connection manager
|
||||
*/
|
||||
static void ProcessGetCFilters(CNode& peer, CDataStream& vRecv, const CChainParams& chain_params,
|
||||
CConnman& connman)
|
||||
{
|
||||
uint8_t filter_type_ser;
|
||||
uint32_t start_height;
|
||||
uint256 stop_hash;
|
||||
|
||||
vRecv >> filter_type_ser >> start_height >> stop_hash;
|
||||
|
||||
const BlockFilterType filter_type = static_cast<BlockFilterType>(filter_type_ser);
|
||||
|
||||
const CBlockIndex* stop_index;
|
||||
BlockFilterIndex* filter_index;
|
||||
if (!PrepareBlockFilterRequest(peer, chain_params, filter_type, start_height, stop_hash,
|
||||
MAX_GETCFILTERS_SIZE, stop_index, filter_index)) {
|
||||
return;
|
||||
}
|
||||
|
||||
std::vector<BlockFilter> filters;
|
||||
if (!filter_index->LookupFilterRange(start_height, stop_index, filters)) {
|
||||
LogPrint(BCLog::NET, "Failed to find block filter in index: filter_type=%s, start_height=%d, stop_hash=%s\n",
|
||||
BlockFilterTypeName(filter_type), start_height, stop_hash.ToString());
|
||||
return;
|
||||
}
|
||||
|
||||
for (const auto& filter : filters) {
|
||||
CSerializedNetMsg msg = CNetMsgMaker(peer.GetSendVersion())
|
||||
.Make(NetMsgType::CFILTER, filter);
|
||||
connman.PushMessage(&peer, std::move(msg));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle a cfheaders request.
|
||||
*
|
||||
* May disconnect from the peer in the case of a bad request.
|
||||
*
|
||||
* @param[in] peer The peer that we received the request from
|
||||
* @param[in] vRecv The raw message received
|
||||
* @param[in] chain_params Chain parameters
|
||||
* @param[in] connman Pointer to the connection manager
|
||||
*/
|
||||
static void ProcessGetCFHeaders(CNode& peer, CDataStream& vRecv, const CChainParams& chain_params,
|
||||
CConnman& connman)
|
||||
{
|
||||
uint8_t filter_type_ser;
|
||||
uint32_t start_height;
|
||||
uint256 stop_hash;
|
||||
|
||||
vRecv >> filter_type_ser >> start_height >> stop_hash;
|
||||
|
||||
const BlockFilterType filter_type = static_cast<BlockFilterType>(filter_type_ser);
|
||||
|
||||
const CBlockIndex* stop_index;
|
||||
BlockFilterIndex* filter_index;
|
||||
if (!PrepareBlockFilterRequest(peer, chain_params, filter_type, start_height, stop_hash,
|
||||
MAX_GETCFHEADERS_SIZE, stop_index, filter_index)) {
|
||||
return;
|
||||
}
|
||||
|
||||
uint256 prev_header;
|
||||
if (start_height > 0) {
|
||||
const CBlockIndex* const prev_block =
|
||||
stop_index->GetAncestor(static_cast<int>(start_height - 1));
|
||||
if (!filter_index->LookupFilterHeader(prev_block, prev_header)) {
|
||||
LogPrint(BCLog::NET, "Failed to find block filter header in index: filter_type=%s, block_hash=%s\n",
|
||||
BlockFilterTypeName(filter_type), prev_block->GetBlockHash().ToString());
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<uint256> filter_hashes;
|
||||
if (!filter_index->LookupFilterHashRange(start_height, stop_index, filter_hashes)) {
|
||||
LogPrint(BCLog::NET, "Failed to find block filter hashes in index: filter_type=%s, start_height=%d, stop_hash=%s\n",
|
||||
BlockFilterTypeName(filter_type), start_height, stop_hash.ToString());
|
||||
return;
|
||||
}
|
||||
|
||||
CSerializedNetMsg msg = CNetMsgMaker(peer.GetSendVersion())
|
||||
.Make(NetMsgType::CFHEADERS,
|
||||
filter_type_ser,
|
||||
stop_index->GetBlockHash(),
|
||||
prev_header,
|
||||
filter_hashes);
|
||||
connman.PushMessage(&peer, std::move(msg));
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle a getcfcheckpt request.
|
||||
*
|
||||
* May disconnect from the peer in the case of a bad request.
|
||||
*
|
||||
* @param[in] peer The peer that we received the request from
|
||||
* @param[in] vRecv The raw message received
|
||||
* @param[in] chain_params Chain parameters
|
||||
* @param[in] connman Pointer to the connection manager
|
||||
*/
|
||||
static void ProcessGetCFCheckPt(CNode& peer, CDataStream& vRecv, const CChainParams& chain_params,
|
||||
CConnman& connman)
|
||||
{
|
||||
uint8_t filter_type_ser;
|
||||
uint256 stop_hash;
|
||||
|
||||
vRecv >> filter_type_ser >> stop_hash;
|
||||
|
||||
const BlockFilterType filter_type = static_cast<BlockFilterType>(filter_type_ser);
|
||||
|
||||
const CBlockIndex* stop_index;
|
||||
BlockFilterIndex* filter_index;
|
||||
if (!PrepareBlockFilterRequest(peer, chain_params, filter_type, /*start_height=*/0, stop_hash,
|
||||
/*max_height_diff=*/std::numeric_limits<uint32_t>::max(),
|
||||
stop_index, filter_index)) {
|
||||
return;
|
||||
}
|
||||
|
||||
std::vector<uint256> headers(stop_index->nHeight / CFCHECKPT_INTERVAL);
|
||||
|
||||
// Populate headers.
|
||||
const CBlockIndex* block_index = stop_index;
|
||||
for (int i = headers.size() - 1; i >= 0; i--) {
|
||||
int height = (i + 1) * CFCHECKPT_INTERVAL;
|
||||
block_index = block_index->GetAncestor(height);
|
||||
|
||||
if (!filter_index->LookupFilterHeader(block_index, headers[i])) {
|
||||
LogPrint(BCLog::NET, "Failed to find block filter header in index: filter_type=%s, block_hash=%s\n",
|
||||
BlockFilterTypeName(filter_type), block_index->GetBlockHash().ToString());
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
CSerializedNetMsg msg = CNetMsgMaker(peer.GetSendVersion())
|
||||
.Make(NetMsgType::CFCHECKPT,
|
||||
filter_type_ser,
|
||||
stop_index->GetBlockHash(),
|
||||
headers);
|
||||
connman.PushMessage(&peer, std::move(msg));
|
||||
}
|
||||
|
||||
std::string RejectCodeToString(const unsigned char code)
|
||||
{
|
||||
if (code == REJECT_MALFORMED)
|
||||
@ -3639,6 +3862,21 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
|
||||
return true;
|
||||
}
|
||||
|
||||
if (strCommand == NetMsgType::GETCFILTERS) {
|
||||
ProcessGetCFilters(*pfrom, vRecv, chainparams, *connman);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (strCommand == NetMsgType::GETCFHEADERS) {
|
||||
ProcessGetCFHeaders(*pfrom, vRecv, chainparams, *connman);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (strCommand == NetMsgType::GETCFCHECKPT) {
|
||||
ProcessGetCFCheckPt(*pfrom, vRecv, chainparams, *connman);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
if (strCommand == NetMsgType::MNLISTDIFF) {
|
||||
// we have never requested this
|
||||
|
@ -21,6 +21,7 @@ static const unsigned int DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN = 100;
|
||||
/** Default for BIP61 (sending reject messages) */
|
||||
static constexpr bool DEFAULT_ENABLE_BIP61 = true;
|
||||
static const bool DEFAULT_PEERBLOOMFILTERS = true;
|
||||
static const bool DEFAULT_PEERBLOCKFILTERS = false;
|
||||
|
||||
class PeerLogicValidation final : public CValidationInterface, public NetEventsInterface {
|
||||
private:
|
||||
|
@ -38,6 +38,12 @@ const char *SENDCMPCT="sendcmpct";
|
||||
const char *CMPCTBLOCK="cmpctblock";
|
||||
const char *GETBLOCKTXN="getblocktxn";
|
||||
const char *BLOCKTXN="blocktxn";
|
||||
const char *GETCFILTERS="getcfilters";
|
||||
const char *CFILTER="cfilter";
|
||||
const char *GETCFHEADERS="getcfheaders";
|
||||
const char *CFHEADERS="cfheaders";
|
||||
const char *GETCFCHECKPT="getcfcheckpt";
|
||||
const char *CFCHECKPT="cfcheckpt";
|
||||
// Dash message types
|
||||
const char *LEGACYTXLOCKREQUEST="ix";
|
||||
const char *SPORK="spork";
|
||||
@ -108,6 +114,12 @@ const static std::string allNetMessageTypes[] = {
|
||||
NetMsgType::CMPCTBLOCK,
|
||||
NetMsgType::GETBLOCKTXN,
|
||||
NetMsgType::BLOCKTXN,
|
||||
NetMsgType::GETCFILTERS,
|
||||
NetMsgType::CFILTER,
|
||||
NetMsgType::GETCFHEADERS,
|
||||
NetMsgType::CFHEADERS,
|
||||
NetMsgType::GETCFCHECKPT,
|
||||
NetMsgType::CFCHECKPT,
|
||||
// Dash message types
|
||||
// NOTE: do NOT include non-implmented here, we want them to be "Unknown command" in ProcessMessage()
|
||||
NetMsgType::LEGACYTXLOCKREQUEST,
|
||||
@ -305,3 +317,28 @@ const std::vector<std::string> &getAllNetMessageTypes()
|
||||
{
|
||||
return allNetMessageTypesVec;
|
||||
}
|
||||
|
||||
std::string serviceFlagToStr(const uint64_t mask, const int bit)
|
||||
{
|
||||
switch (ServiceFlags(mask)) {
|
||||
case NODE_NONE: abort(); // impossible
|
||||
case NODE_NETWORK: return "NETWORK";
|
||||
case NODE_GETUTXO: return "GETUTXO";
|
||||
case NODE_BLOOM: return "BLOOM";
|
||||
case NODE_XTHIN: return "XTHIN";
|
||||
case NODE_COMPACT_FILTERS: return "COMPACT_FILTERS";
|
||||
case NODE_NETWORK_LIMITED: return "NETWORK_LIMITED";
|
||||
// Not using default, so we get warned when a case is missing
|
||||
}
|
||||
|
||||
std::ostringstream stream;
|
||||
stream.imbue(std::locale::classic());
|
||||
stream << "UNKNOWN[";
|
||||
if (bit < 8) {
|
||||
stream << mask;
|
||||
} else {
|
||||
stream << "2^" << bit;
|
||||
}
|
||||
stream << "]";
|
||||
return stream.str();
|
||||
}
|
||||
|
@ -216,6 +216,42 @@ extern const char *GETBLOCKTXN;
|
||||
* @since protocol version 70209 as described by BIP 152
|
||||
*/
|
||||
extern const char *BLOCKTXN;
|
||||
/**
|
||||
* getcfilters requests compact filters for a range of blocks.
|
||||
* Only available with service bit NODE_COMPACT_FILTERS as described by
|
||||
* BIP 157 & 158.
|
||||
*/
|
||||
extern const char* GETCFILTERS;
|
||||
/**
|
||||
* cfilter is a response to a getcfilters request containing a single compact
|
||||
* filter.
|
||||
*/
|
||||
extern const char* CFILTER;
|
||||
/**
|
||||
* getcfheaders requests a compact filter header and the filter hashes for a
|
||||
* range of blocks, which can then be used to reconstruct the filter headers
|
||||
* for those blocks.
|
||||
* Only available with service bit NODE_COMPACT_FILTERS as described by
|
||||
* BIP 157 & 158.
|
||||
*/
|
||||
extern const char* GETCFHEADERS;
|
||||
/**
|
||||
* cfheaders is a response to a getcfheaders request containing a filter header
|
||||
* and a vector of filter hashes for each subsequent block in the requested range.
|
||||
*/
|
||||
extern const char* CFHEADERS;
|
||||
/**
|
||||
* getcfcheckpt requests evenly spaced compact filter headers, enabling
|
||||
* parallelized download and validation of the headers between them.
|
||||
* Only available with service bit NODE_COMPACT_FILTERS as described by
|
||||
* BIP 157 & 158.
|
||||
*/
|
||||
extern const char *GETCFCHECKPT;
|
||||
/**
|
||||
* cfcheckpt is a response to a getcfcheckpt request containing a vector of
|
||||
* evenly spaced filter headers for blocks on the requested chain.
|
||||
*/
|
||||
extern const char *CFCHECKPT;
|
||||
|
||||
// Dash message types
|
||||
// NOTE: do NOT declare non-implmented here, we don't want them to be exposed to the outside
|
||||
@ -263,6 +299,7 @@ const std::vector<std::string> &getAllNetMessageTypes();
|
||||
|
||||
/** nServices flags */
|
||||
enum ServiceFlags : uint64_t {
|
||||
// NOTE: When adding here, be sure to update serviceFlagToStr too
|
||||
// Nothing
|
||||
NODE_NONE = 0,
|
||||
// NODE_NETWORK means that the node is capable of serving the complete block chain. It is currently
|
||||
@ -279,6 +316,9 @@ enum ServiceFlags : uint64_t {
|
||||
// NODE_XTHIN means the node supports Xtreme Thinblocks
|
||||
// If this is turned off then the node will not service nor make xthin requests
|
||||
NODE_XTHIN = (1 << 4),
|
||||
// NODE_COMPACT_FILTERS means the node will service basic block filter requests.
|
||||
// See BIP157 and BIP158 for details on how this is implemented.
|
||||
NODE_COMPACT_FILTERS = (1 << 6),
|
||||
// NODE_NETWORK_LIMITED means the same as NODE_NETWORK with the limitation of only
|
||||
// serving the last 288 blocks
|
||||
// See BIP159 for details on how this is implemented.
|
||||
@ -293,6 +333,8 @@ enum ServiceFlags : uint64_t {
|
||||
// BIP process.
|
||||
};
|
||||
|
||||
std::string serviceFlagToStr(uint64_t mask, int bit);
|
||||
|
||||
/**
|
||||
* Gets the set of service flags which are "desirable" for a given peer.
|
||||
*
|
||||
|
@ -1725,28 +1725,11 @@ QString formatServicesStr(quint64 mask)
|
||||
{
|
||||
QStringList strList;
|
||||
|
||||
// Just scan the last 8 bits for now.
|
||||
for (int i = 0; i < 8; i++) {
|
||||
uint64_t check = 1 << i;
|
||||
for (int i = 0; i < 64; i++) {
|
||||
uint64_t check = 1ull << i;
|
||||
if (mask & check)
|
||||
{
|
||||
switch (check)
|
||||
{
|
||||
case NODE_NETWORK:
|
||||
strList.append("NETWORK");
|
||||
break;
|
||||
case NODE_GETUTXO:
|
||||
strList.append("GETUTXO");
|
||||
break;
|
||||
case NODE_BLOOM:
|
||||
strList.append("BLOOM");
|
||||
break;
|
||||
case NODE_XTHIN:
|
||||
strList.append("XTHIN");
|
||||
break;
|
||||
default:
|
||||
strList.append(QString("%1[%2]").arg("UNKNOWN").arg(check));
|
||||
}
|
||||
strList.append(QString::fromStdString(serviceFlagToStr(mask, i)));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -90,6 +90,10 @@ static UniValue getpeerinfo(const JSONRPCRequest& request)
|
||||
" \"verified_pubkey_hash\": h, (hex) Only present when the peer is a masternode and successfully\n"
|
||||
" authenticated via MNAUTH. In this case, this field contains the\n"
|
||||
" hash of the masternode's operator public key\n"
|
||||
" \"servicesnames\":[ (array) the services offered, in human-readable form\n"
|
||||
" \"SERVICE_NAME\", (string) the service name if it is recognised\n"
|
||||
" ...\n"
|
||||
" ],\n"
|
||||
" \"relaytxes\":true|false, (boolean) Whether peer has asked us to relay transactions to it\n"
|
||||
" \"lastsend\": ttt, (numeric) The time in seconds since epoch (Jan 1 1970 GMT) of the last send\n"
|
||||
" \"lastrecv\": ttt, (numeric) The time in seconds since epoch (Jan 1 1970 GMT) of the last receive\n"
|
||||
@ -162,6 +166,7 @@ static UniValue getpeerinfo(const JSONRPCRequest& request)
|
||||
if (!stats.verifiedPubKeyHash.IsNull()) {
|
||||
obj.pushKV("verified_pubkey_hash", stats.verifiedPubKeyHash.ToString());
|
||||
}
|
||||
obj.pushKV("servicesnames", GetServicesNames(stats.nServices));
|
||||
obj.pushKV("relaytxes", stats.fRelayTxes);
|
||||
obj.pushKV("lastsend", stats.nLastSend);
|
||||
obj.pushKV("lastrecv", stats.nLastRecv);
|
||||
@ -458,6 +463,10 @@ static UniValue getnetworkinfo(const JSONRPCRequest& request)
|
||||
" \"subversion\": \"/Dash Core:x.x.x.x/\", (string) the server subversion string\n"
|
||||
" \"protocolversion\": xxxxx, (numeric) the protocol version\n"
|
||||
" \"localservices\": \"xxxxxxxxxxxxxxxx\", (string) the services we offer to the network\n"
|
||||
" \"localservicesnames\": [ (array) the services we offer to the network, in human-readable form\n"
|
||||
" \"SERVICE_NAME\", (string) the service name\n"
|
||||
" ...\n"
|
||||
" ],\n"
|
||||
" \"localrelay\": true|false, (bool) true if transaction relay is requested from peers\n"
|
||||
" \"timeoffset\": xxxxx, (numeric) the time offset\n"
|
||||
" \"connections\": xxxxx, (numeric) the number of connections\n"
|
||||
@ -496,8 +505,11 @@ static UniValue getnetworkinfo(const JSONRPCRequest& request)
|
||||
obj.pushKV("buildversion", FormatFullVersion());
|
||||
obj.pushKV("subversion", strSubVersion);
|
||||
obj.pushKV("protocolversion",PROTOCOL_VERSION);
|
||||
if(g_connman)
|
||||
obj.pushKV("localservices", strprintf("%016x", g_connman->GetLocalServices()));
|
||||
if (g_connman) {
|
||||
ServiceFlags services = g_connman->GetLocalServices();
|
||||
obj.pushKV("localservices", strprintf("%016x", services));
|
||||
obj.pushKV("localservicesnames", GetServicesNames(services));
|
||||
}
|
||||
obj.pushKV("localrelay", g_relay_txes);
|
||||
obj.pushKV("timeoffset", GetTimeOffset());
|
||||
if (g_connman) {
|
||||
|
@ -224,3 +224,17 @@ UniValue JSONRPCTransactionError(TransactionError terr, const std::string& err_s
|
||||
}
|
||||
}
|
||||
|
||||
UniValue GetServicesNames(ServiceFlags services)
|
||||
{
|
||||
const uint64_t services_n = services;
|
||||
UniValue servicesNames(UniValue::VARR);
|
||||
|
||||
for (int i = 0; i < 64; ++i) {
|
||||
const uint64_t mask = 1ull << i;
|
||||
if (services_n & mask) {
|
||||
servicesNames.push_back(serviceFlagToStr(mask, i));
|
||||
}
|
||||
}
|
||||
|
||||
return servicesNames;
|
||||
}
|
||||
|
@ -7,6 +7,7 @@
|
||||
|
||||
#include <node/transaction.h>
|
||||
#include <pubkey.h>
|
||||
#include <protocol.h>
|
||||
#include <rpc/protocol.h>
|
||||
#include <script/standard.h>
|
||||
#include <univalue.h>
|
||||
@ -28,6 +29,9 @@ UniValue DescribeAddress(const CTxDestination& dest);
|
||||
//! Parse a confirm target option and raise an RPC error if it is invalid.
|
||||
unsigned int ParseConfirmTarget(const UniValue& value);
|
||||
|
||||
/** Returns, given services flags, a list of humanly readable (known) network services */
|
||||
UniValue GetServicesNames(ServiceFlags services);
|
||||
|
||||
struct RPCArg {
|
||||
enum class Type {
|
||||
OBJ,
|
||||
|
259
test/functional/p2p_blockfilters.py
Executable file
259
test/functional/p2p_blockfilters.py
Executable file
@ -0,0 +1,259 @@
|
||||
#!/usr/bin/env python3
|
||||
# Copyright (c) 2019 The Bitcoin Core developers
|
||||
# Distributed under the MIT software license, see the accompanying
|
||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
"""Tests NODE_COMPACT_FILTERS (BIP 157/158).
|
||||
|
||||
Tests that a node configured with -blockfilterindex and -peerblockfilters signals
|
||||
NODE_COMPACT_FILTERS and can serve cfilters, cfheaders and cfcheckpts.
|
||||
"""
|
||||
|
||||
from test_framework.messages import (
|
||||
FILTER_TYPE_BASIC,
|
||||
NODE_COMPACT_FILTERS,
|
||||
hash256,
|
||||
msg_getcfcheckpt,
|
||||
msg_getcfheaders,
|
||||
msg_getcfilters,
|
||||
ser_uint256,
|
||||
uint256_from_str,
|
||||
)
|
||||
from test_framework.mininode import P2PInterface
|
||||
from test_framework.test_framework import BitcoinTestFramework
|
||||
from test_framework.util import (
|
||||
assert_equal,
|
||||
connect_nodes,
|
||||
disconnect_nodes,
|
||||
wait_until,
|
||||
)
|
||||
|
||||
class CFiltersClient(P2PInterface):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
# Store the cfilters received.
|
||||
self.cfilters = []
|
||||
|
||||
def pop_cfilters(self):
|
||||
cfilters = self.cfilters
|
||||
self.cfilters = []
|
||||
return cfilters
|
||||
|
||||
def on_cfilter(self, message):
|
||||
"""Store cfilters received in a list."""
|
||||
self.cfilters.append(message)
|
||||
|
||||
class CompactFiltersTest(BitcoinTestFramework):
|
||||
def set_test_params(self):
|
||||
self.setup_clean_chain = True
|
||||
self.rpc_timeout = 480
|
||||
self.num_nodes = 2
|
||||
self.extra_args = [
|
||||
["-blockfilterindex", "-peerblockfilters"],
|
||||
["-blockfilterindex"],
|
||||
]
|
||||
|
||||
def run_test(self):
|
||||
# Node 0 supports COMPACT_FILTERS, node 1 does not.
|
||||
node0 = self.nodes[0].add_p2p_connection(CFiltersClient())
|
||||
node1 = self.nodes[1].add_p2p_connection(CFiltersClient())
|
||||
|
||||
# Nodes 0 & 1 share the same first 999 blocks in the chain.
|
||||
self.nodes[0].generate(999)
|
||||
self.sync_blocks(timeout=600)
|
||||
|
||||
# Stale blocks by disconnecting nodes 0 & 1, mining, then reconnecting
|
||||
disconnect_nodes(self.nodes[0], 1)
|
||||
|
||||
self.nodes[0].generate(1)
|
||||
wait_until(lambda: self.nodes[0].getblockcount() == 1000)
|
||||
stale_block_hash = self.nodes[0].getblockhash(1000)
|
||||
|
||||
self.nodes[1].generate(1001)
|
||||
wait_until(lambda: self.nodes[1].getblockcount() == 2000)
|
||||
|
||||
# Check that nodes have signalled NODE_COMPACT_FILTERS correctly.
|
||||
assert node0.nServices & NODE_COMPACT_FILTERS != 0
|
||||
assert node1.nServices & NODE_COMPACT_FILTERS == 0
|
||||
|
||||
# Check that the localservices is as expected.
|
||||
assert int(self.nodes[0].getnetworkinfo()['localservices'], 16) & NODE_COMPACT_FILTERS != 0
|
||||
assert int(self.nodes[1].getnetworkinfo()['localservices'], 16) & NODE_COMPACT_FILTERS == 0
|
||||
|
||||
self.log.info("get cfcheckpt on chain to be re-orged out.")
|
||||
request = msg_getcfcheckpt(
|
||||
filter_type=FILTER_TYPE_BASIC,
|
||||
stop_hash=int(stale_block_hash, 16)
|
||||
)
|
||||
node0.send_and_ping(message=request)
|
||||
response = node0.last_message['cfcheckpt']
|
||||
assert_equal(response.filter_type, request.filter_type)
|
||||
assert_equal(response.stop_hash, request.stop_hash)
|
||||
assert_equal(len(response.headers), 1)
|
||||
|
||||
self.log.info("Reorg node 0 to a new chain.")
|
||||
connect_nodes(self.nodes[0], 1)
|
||||
self.sync_blocks(timeout=600)
|
||||
|
||||
main_block_hash = self.nodes[0].getblockhash(1000)
|
||||
assert main_block_hash != stale_block_hash, "node 0 chain did not reorganize"
|
||||
|
||||
self.log.info("Check that peers can fetch cfcheckpt on active chain.")
|
||||
tip_hash = self.nodes[0].getbestblockhash()
|
||||
request = msg_getcfcheckpt(
|
||||
filter_type=FILTER_TYPE_BASIC,
|
||||
stop_hash=int(tip_hash, 16)
|
||||
)
|
||||
node0.send_and_ping(request)
|
||||
response = node0.last_message['cfcheckpt']
|
||||
assert_equal(response.filter_type, request.filter_type)
|
||||
assert_equal(response.stop_hash, request.stop_hash)
|
||||
|
||||
main_cfcheckpt = self.nodes[0].getblockfilter(main_block_hash, 'basic')['header']
|
||||
tip_cfcheckpt = self.nodes[0].getblockfilter(tip_hash, 'basic')['header']
|
||||
assert_equal(
|
||||
response.headers,
|
||||
[int(header, 16) for header in (main_cfcheckpt, tip_cfcheckpt)]
|
||||
)
|
||||
|
||||
self.log.info("Check that peers can fetch cfcheckpt on stale chain.")
|
||||
request = msg_getcfcheckpt(
|
||||
filter_type=FILTER_TYPE_BASIC,
|
||||
stop_hash=int(stale_block_hash, 16)
|
||||
)
|
||||
node0.send_and_ping(request)
|
||||
response = node0.last_message['cfcheckpt']
|
||||
|
||||
stale_cfcheckpt = self.nodes[0].getblockfilter(stale_block_hash, 'basic')['header']
|
||||
assert_equal(
|
||||
response.headers,
|
||||
[int(header, 16) for header in (stale_cfcheckpt,)]
|
||||
)
|
||||
|
||||
self.log.info("Check that peers can fetch cfheaders on active chain.")
|
||||
request = msg_getcfheaders(
|
||||
filter_type=FILTER_TYPE_BASIC,
|
||||
start_height=1,
|
||||
stop_hash=int(main_block_hash, 16)
|
||||
)
|
||||
node0.send_and_ping(request)
|
||||
response = node0.last_message['cfheaders']
|
||||
main_cfhashes = response.hashes
|
||||
assert_equal(len(main_cfhashes), 1000)
|
||||
assert_equal(
|
||||
compute_last_header(response.prev_header, response.hashes),
|
||||
int(main_cfcheckpt, 16)
|
||||
)
|
||||
|
||||
self.log.info("Check that peers can fetch cfheaders on stale chain.")
|
||||
request = msg_getcfheaders(
|
||||
filter_type=FILTER_TYPE_BASIC,
|
||||
start_height=1,
|
||||
stop_hash=int(stale_block_hash, 16)
|
||||
)
|
||||
node0.send_and_ping(request)
|
||||
response = node0.last_message['cfheaders']
|
||||
stale_cfhashes = response.hashes
|
||||
assert_equal(len(stale_cfhashes), 1000)
|
||||
assert_equal(
|
||||
compute_last_header(response.prev_header, response.hashes),
|
||||
int(stale_cfcheckpt, 16)
|
||||
)
|
||||
|
||||
self.log.info("Check that peers can fetch cfilters.")
|
||||
stop_hash = self.nodes[0].getblockhash(10)
|
||||
request = msg_getcfilters(
|
||||
filter_type=FILTER_TYPE_BASIC,
|
||||
start_height=1,
|
||||
stop_hash=int(stop_hash, 16)
|
||||
)
|
||||
node0.send_message(request)
|
||||
node0.sync_with_ping()
|
||||
response = node0.pop_cfilters()
|
||||
assert_equal(len(response), 10)
|
||||
|
||||
self.log.info("Check that cfilter responses are correct.")
|
||||
for cfilter, cfhash, height in zip(response, main_cfhashes, range(1, 11)):
|
||||
block_hash = self.nodes[0].getblockhash(height)
|
||||
assert_equal(cfilter.filter_type, FILTER_TYPE_BASIC)
|
||||
assert_equal(cfilter.block_hash, int(block_hash, 16))
|
||||
computed_cfhash = uint256_from_str(hash256(cfilter.filter_data))
|
||||
assert_equal(computed_cfhash, cfhash)
|
||||
|
||||
self.log.info("Check that peers can fetch cfilters for stale blocks.")
|
||||
request = msg_getcfilters(
|
||||
filter_type=FILTER_TYPE_BASIC,
|
||||
start_height=1000,
|
||||
stop_hash=int(stale_block_hash, 16)
|
||||
)
|
||||
node0.send_message(request)
|
||||
node0.sync_with_ping()
|
||||
response = node0.pop_cfilters()
|
||||
assert_equal(len(response), 1)
|
||||
|
||||
cfilter = response[0]
|
||||
assert_equal(cfilter.filter_type, FILTER_TYPE_BASIC)
|
||||
assert_equal(cfilter.block_hash, int(stale_block_hash, 16))
|
||||
computed_cfhash = uint256_from_str(hash256(cfilter.filter_data))
|
||||
assert_equal(computed_cfhash, stale_cfhashes[999])
|
||||
|
||||
self.log.info("Requests to node 1 without NODE_COMPACT_FILTERS results in disconnection.")
|
||||
requests = [
|
||||
msg_getcfcheckpt(
|
||||
filter_type=FILTER_TYPE_BASIC,
|
||||
stop_hash=int(main_block_hash, 16)
|
||||
),
|
||||
msg_getcfheaders(
|
||||
filter_type=FILTER_TYPE_BASIC,
|
||||
start_height=1000,
|
||||
stop_hash=int(main_block_hash, 16)
|
||||
),
|
||||
msg_getcfilters(
|
||||
filter_type=FILTER_TYPE_BASIC,
|
||||
start_height=1000,
|
||||
stop_hash=int(main_block_hash, 16)
|
||||
),
|
||||
]
|
||||
for request in requests:
|
||||
node1 = self.nodes[1].add_p2p_connection(P2PInterface())
|
||||
node1.send_message(request)
|
||||
node1.wait_for_disconnect()
|
||||
|
||||
self.log.info("Check that invalid requests result in disconnection.")
|
||||
requests = [
|
||||
# Requesting too many filters results in disconnection.
|
||||
msg_getcfilters(
|
||||
filter_type=FILTER_TYPE_BASIC,
|
||||
start_height=0,
|
||||
stop_hash=int(main_block_hash, 16)
|
||||
),
|
||||
# Requesting too many filter headers results in disconnection.
|
||||
msg_getcfheaders(
|
||||
filter_type=FILTER_TYPE_BASIC,
|
||||
start_height=0,
|
||||
stop_hash=int(tip_hash, 16)
|
||||
),
|
||||
# Requesting unknown filter type results in disconnection.
|
||||
msg_getcfcheckpt(
|
||||
filter_type=255,
|
||||
stop_hash=int(main_block_hash, 16)
|
||||
),
|
||||
# Requesting unknown hash results in disconnection.
|
||||
msg_getcfcheckpt(
|
||||
filter_type=FILTER_TYPE_BASIC,
|
||||
stop_hash=123456789,
|
||||
),
|
||||
]
|
||||
for request in requests:
|
||||
node0 = self.nodes[0].add_p2p_connection(P2PInterface())
|
||||
node0.send_message(request)
|
||||
node0.wait_for_disconnect()
|
||||
|
||||
def compute_last_header(prev_header, hashes):
|
||||
"""Compute the last filter header from a starting header and a sequence of filter hashes."""
|
||||
header = ser_uint256(prev_header)
|
||||
for filter_hash in hashes:
|
||||
header = hash256(ser_uint256(filter_hash) + header)
|
||||
return uint256_from_str(header)
|
||||
|
||||
if __name__ == '__main__':
|
||||
CompactFiltersTest().main()
|
@ -46,8 +46,11 @@ BIP125_SEQUENCE_NUMBER = 0xfffffffd # Sequence number that is BIP 125 opt-in an
|
||||
NODE_NETWORK = (1 << 0)
|
||||
# NODE_GETUTXO = (1 << 1)
|
||||
NODE_BLOOM = (1 << 2)
|
||||
NODE_COMPACT_FILTERS = (1 << 6)
|
||||
NODE_NETWORK_LIMITED = (1 << 10)
|
||||
|
||||
FILTER_TYPE_BASIC = 0
|
||||
|
||||
# Serialization/deserialization tools
|
||||
def sha256(s):
|
||||
return hashlib.new('sha256', s).digest()
|
||||
@ -1880,3 +1883,153 @@ class msg_qdata:
|
||||
def __repr__(self):
|
||||
return "msg_qdata(error=%d, quorum_vvec=%d, enc_contributions=%d)" % (self.error, len(self.quorum_vvec),
|
||||
len(self.enc_contributions))
|
||||
|
||||
class msg_getcfilters:
|
||||
__slots__ = ("filter_type", "start_height", "stop_hash")
|
||||
command = b"getcfilters"
|
||||
|
||||
def __init__(self, filter_type, start_height, stop_hash):
|
||||
self.filter_type = filter_type
|
||||
self.start_height = start_height
|
||||
self.stop_hash = stop_hash
|
||||
|
||||
def deserialize(self, f):
|
||||
self.filter_type = struct.unpack("<B", f.read(1))[0]
|
||||
self.start_height = struct.unpack("<I", f.read(4))[0]
|
||||
self.stop_hash = deser_uint256(f)
|
||||
|
||||
def serialize(self):
|
||||
r = b""
|
||||
r += struct.pack("<B", self.filter_type)
|
||||
r += struct.pack("<I", self.start_height)
|
||||
r += ser_uint256(self.stop_hash)
|
||||
return r
|
||||
|
||||
def __repr__(self):
|
||||
return "msg_getcfilters(filter_type={:#x}, start_height={}, stop_hash={:x})".format(
|
||||
self.filter_type, self.start_height, self.stop_hash)
|
||||
|
||||
class msg_cfilter:
|
||||
__slots__ = ("filter_type", "block_hash", "filter_data")
|
||||
command = b"cfilter"
|
||||
|
||||
def __init__(self, filter_type=None, block_hash=None, filter_data=None):
|
||||
self.filter_type = filter_type
|
||||
self.block_hash = block_hash
|
||||
self.filter_data = filter_data
|
||||
|
||||
def deserialize(self, f):
|
||||
self.filter_type = struct.unpack("<B", f.read(1))[0]
|
||||
self.block_hash = deser_uint256(f)
|
||||
self.filter_data = deser_string(f)
|
||||
|
||||
def serialize(self):
|
||||
r = b""
|
||||
r += struct.pack("<B", self.filter_type)
|
||||
r += ser_uint256(self.block_hash)
|
||||
r += ser_string(self.filter_data)
|
||||
return r
|
||||
|
||||
def __repr__(self):
|
||||
return "msg_cfilter(filter_type={:#x}, block_hash={:x})".format(
|
||||
self.filter_type, self.block_hash)
|
||||
|
||||
class msg_getcfheaders:
|
||||
__slots__ = ("filter_type", "start_height", "stop_hash")
|
||||
command = b"getcfheaders"
|
||||
|
||||
def __init__(self, filter_type, start_height, stop_hash):
|
||||
self.filter_type = filter_type
|
||||
self.start_height = start_height
|
||||
self.stop_hash = stop_hash
|
||||
|
||||
def deserialize(self, f):
|
||||
self.filter_type = struct.unpack("<B", f.read(1))[0]
|
||||
self.start_height = struct.unpack("<I", f.read(4))[0]
|
||||
self.stop_hash = deser_uint256(f)
|
||||
|
||||
def serialize(self):
|
||||
r = b""
|
||||
r += struct.pack("<B", self.filter_type)
|
||||
r += struct.pack("<I", self.start_height)
|
||||
r += ser_uint256(self.stop_hash)
|
||||
return r
|
||||
|
||||
def __repr__(self):
|
||||
return "msg_getcfheaders(filter_type={:#x}, start_height={}, stop_hash={:x})".format(
|
||||
self.filter_type, self.start_height, self.stop_hash)
|
||||
|
||||
class msg_cfheaders:
|
||||
__slots__ = ("filter_type", "stop_hash", "prev_header", "hashes")
|
||||
command = b"cfheaders"
|
||||
|
||||
def __init__(self, filter_type=None, stop_hash=None, prev_header=None, hashes=None):
|
||||
self.filter_type = filter_type
|
||||
self.stop_hash = stop_hash
|
||||
self.prev_header = prev_header
|
||||
self.hashes = hashes
|
||||
|
||||
def deserialize(self, f):
|
||||
self.filter_type = struct.unpack("<B", f.read(1))[0]
|
||||
self.stop_hash = deser_uint256(f)
|
||||
self.prev_header = deser_uint256(f)
|
||||
self.hashes = deser_uint256_vector(f)
|
||||
|
||||
def serialize(self):
|
||||
r = b""
|
||||
r += struct.pack("<B", self.filter_type)
|
||||
r += ser_uint256(self.stop_hash)
|
||||
r += ser_uint256(self.prev_header)
|
||||
r += ser_uint256_vector(self.hashes)
|
||||
return r
|
||||
|
||||
def __repr__(self):
|
||||
return "msg_cfheaders(filter_type={:#x}, stop_hash={:x})".format(
|
||||
self.filter_type, self.stop_hash)
|
||||
|
||||
class msg_getcfcheckpt:
|
||||
__slots__ = ("filter_type", "stop_hash")
|
||||
command = b"getcfcheckpt"
|
||||
|
||||
def __init__(self, filter_type, stop_hash):
|
||||
self.filter_type = filter_type
|
||||
self.stop_hash = stop_hash
|
||||
|
||||
def deserialize(self, f):
|
||||
self.filter_type = struct.unpack("<B", f.read(1))[0]
|
||||
self.stop_hash = deser_uint256(f)
|
||||
|
||||
def serialize(self):
|
||||
r = b""
|
||||
r += struct.pack("<B", self.filter_type)
|
||||
r += ser_uint256(self.stop_hash)
|
||||
return r
|
||||
|
||||
def __repr__(self):
|
||||
return "msg_getcfcheckpt(filter_type={:#x}, stop_hash={:x})".format(
|
||||
self.filter_type, self.stop_hash)
|
||||
|
||||
class msg_cfcheckpt:
|
||||
__slots__ = ("filter_type", "stop_hash", "headers")
|
||||
command = b"cfcheckpt"
|
||||
|
||||
def __init__(self, filter_type=None, stop_hash=None, headers=None):
|
||||
self.filter_type = filter_type
|
||||
self.stop_hash = stop_hash
|
||||
self.headers = headers
|
||||
|
||||
def deserialize(self, f):
|
||||
self.filter_type = struct.unpack("<B", f.read(1))[0]
|
||||
self.stop_hash = deser_uint256(f)
|
||||
self.headers = deser_uint256_vector(f)
|
||||
|
||||
def serialize(self):
|
||||
r = b""
|
||||
r += struct.pack("<B", self.filter_type)
|
||||
r += ser_uint256(self.stop_hash)
|
||||
r += ser_uint256_vector(self.headers)
|
||||
return r
|
||||
|
||||
def __repr__(self):
|
||||
return "msg_cfcheckpt(filter_type={:#x}, stop_hash={:x})".format(
|
||||
self.filter_type, self.stop_hash)
|
||||
|
@ -30,6 +30,9 @@ from test_framework.messages import (
|
||||
msg_addrv2,
|
||||
msg_block,
|
||||
msg_blocktxn,
|
||||
msg_cfcheckpt,
|
||||
msg_cfheaders,
|
||||
msg_cfilter,
|
||||
msg_clsig,
|
||||
msg_cmpctblock,
|
||||
msg_getaddr,
|
||||
@ -72,6 +75,9 @@ MESSAGEMAP = {
|
||||
b"addrv2": msg_addrv2,
|
||||
b"block": msg_block,
|
||||
b"blocktxn": msg_blocktxn,
|
||||
b"cfcheckpt": msg_cfcheckpt,
|
||||
b"cfheaders": msg_cfheaders,
|
||||
b"cfilter": msg_cfilter,
|
||||
b"cmpctblock": msg_cmpctblock,
|
||||
b"getaddr": msg_getaddr,
|
||||
b"getblocks": msg_getblocks,
|
||||
@ -368,6 +374,9 @@ class P2PInterface(P2PConnection):
|
||||
def on_addrv2(self, message): pass
|
||||
def on_block(self, message): pass
|
||||
def on_blocktxn(self, message): pass
|
||||
def on_cfcheckpt(self, message): pass
|
||||
def on_cfheaders(self, message): pass
|
||||
def on_cfilter(self, message): pass
|
||||
def on_cmpctblock(self, message): pass
|
||||
def on_feefilter(self, message): pass
|
||||
def on_getaddr(self, message): pass
|
||||
|
@ -191,6 +191,7 @@ BASE_SCRIPTS = [
|
||||
'feature_dip0020_activation.py',
|
||||
'feature_uacomment.py',
|
||||
'p2p_unrequested_blocks.py',
|
||||
'p2p_blockfilters.py',
|
||||
'feature_asmap.py',
|
||||
'feature_includeconf.py',
|
||||
'rpc_scantxoutset.py',
|
||||
|
Loading…
Reference in New Issue
Block a user