mirror of
https://github.com/dashpay/dash.git
synced 2024-12-30 22:35:51 +01:00
18950f923e
* Allow sub-batch verification in CBLSInsecureBatchVerifier * Implement batch verification of CDKGDebugStatus messages * Use uint8_t for statusBitset in CDKGDebugMemberStatus and CDKGDebugSessionStatus No need to waste one byte per member and per LLMQ type. * Reserve 4k of buffer for CSerializedNetMsg buffer Profiling has shown that a lot of time is spent in resizing the data vector when large messages are involved. * Remove nHeight from CDKGDebugStatus This field changes every block and causes all masternodes to propagate its status for every block, even if nothing DKG related has changed. * Leave out session statuses when we're not a member of that session Otherwise MNs which are not members of DKG sessions will spam the network * Remove receivedFinalCommitment from CDKGDebugSessionStatus This is not bound to a session and thus is prone to spam the network when final commitments are propagated in the finalization phase. * Add "minableCommitments" to "quorum dkgstatus" * Hold cs_main while calling GetMinableCommitment * Abort processing of pending debug messages when spork18 gets disabled * Don't ask for debug messages when we've already seen them "statuses" only contains the current messages but none of the old messages, so nodes kept re-requesting old messages.
169 lines
5.0 KiB
C++
169 lines
5.0 KiB
C++
// Copyright (c) 2018-2019 The Dash Core developers
|
|
// Distributed under the MIT/X11 software license, see the accompanying
|
|
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
|
|
|
#ifndef DASH_CRYPTO_BLS_BATCHVERIFIER_H
|
|
#define DASH_CRYPTO_BLS_BATCHVERIFIER_H
|
|
|
|
#include "bls.h"
|
|
|
|
#include <map>
|
|
#include <vector>
|
|
|
|
template<typename SourceId, typename MessageId>
|
|
class CBLSInsecureBatchVerifier
|
|
{
|
|
private:
|
|
struct Message {
|
|
MessageId msgId;
|
|
uint256 msgHash;
|
|
CBLSSignature sig;
|
|
CBLSPublicKey pubKey;
|
|
};
|
|
|
|
typedef std::map<MessageId, Message> MessageMap;
|
|
typedef typename MessageMap::iterator MessageMapIterator;
|
|
typedef std::map<SourceId, std::vector<MessageMapIterator>> MessagesBySourceMap;
|
|
|
|
bool perMessageFallback;
|
|
size_t subBatchSize;
|
|
|
|
MessageMap messages;
|
|
MessagesBySourceMap messagesBySource;
|
|
|
|
public:
|
|
std::set<SourceId> badSources;
|
|
std::set<MessageId> badMessages;
|
|
|
|
public:
|
|
CBLSInsecureBatchVerifier(bool _perMessageFallback, size_t _subBatchSize = 0) :
|
|
perMessageFallback(_perMessageFallback),
|
|
subBatchSize(_subBatchSize)
|
|
{
|
|
}
|
|
|
|
void PushMessage(const SourceId& sourceId, const MessageId& msgId, const uint256& msgHash, const CBLSSignature& sig, const CBLSPublicKey& pubKey)
|
|
{
|
|
assert(sig.IsValid() && pubKey.IsValid());
|
|
|
|
auto it = messages.emplace(msgId, Message{msgId, msgHash, sig, pubKey}).first;
|
|
messagesBySource[sourceId].emplace_back(it);
|
|
|
|
if (subBatchSize != 0 && messages.size() >= subBatchSize) {
|
|
Verify();
|
|
ClearMessages();
|
|
}
|
|
}
|
|
|
|
void ClearMessages()
|
|
{
|
|
messages.clear();
|
|
messagesBySource.clear();
|
|
}
|
|
|
|
void Verify()
|
|
{
|
|
std::map<uint256, std::vector<MessageMapIterator>> byMessageHash;
|
|
|
|
for (auto it = messages.begin(); it != messages.end(); ++it) {
|
|
byMessageHash[it->second.msgHash].emplace_back(it);
|
|
}
|
|
|
|
if (VerifyBatch(byMessageHash)) {
|
|
// full batch is valid
|
|
return;
|
|
}
|
|
|
|
// revert to per-source verification
|
|
for (const auto& p : messagesBySource) {
|
|
bool batchValid = false;
|
|
|
|
// no need to verify it again if there was just one source
|
|
if (messagesBySource.size() != 1) {
|
|
byMessageHash.clear();
|
|
for (auto it = p.second.begin(); it != p.second.end(); ++it) {
|
|
byMessageHash[(*it)->second.msgHash].emplace_back(*it);
|
|
}
|
|
batchValid = VerifyBatch(byMessageHash);
|
|
}
|
|
if (!batchValid) {
|
|
badSources.emplace(p.first);
|
|
|
|
if (perMessageFallback) {
|
|
// revert to per-message verification
|
|
if (p.second.size() == 1) {
|
|
// no need to re-verify a single message
|
|
badMessages.emplace(p.second[0]->second.msgId);
|
|
} else {
|
|
for (const auto& msgIt : p.second) {
|
|
if (badMessages.count(msgIt->first)) {
|
|
// same message might be invalid from different source, so no need to re-verify it
|
|
continue;
|
|
}
|
|
|
|
const auto& msg = msgIt->second;
|
|
if (!msg.sig.VerifyInsecure(msg.pubKey, msg.msgHash)) {
|
|
badMessages.emplace(msg.msgId);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
private:
|
|
bool VerifyBatch(const std::map<uint256, std::vector<MessageMapIterator>>& byMessageHash)
|
|
{
|
|
CBLSSignature aggSig;
|
|
std::vector<uint256> msgHashes;
|
|
std::vector<CBLSPublicKey> pubKeys;
|
|
std::set<MessageId> dups;
|
|
|
|
msgHashes.reserve(messages.size());
|
|
pubKeys.reserve(messages.size());
|
|
|
|
for (const auto& p : byMessageHash) {
|
|
const auto& msgHash = p.first;
|
|
|
|
CBLSPublicKey aggPubKey;
|
|
|
|
for (const auto& msgIt : p.second) {
|
|
const auto& msg = msgIt->second;
|
|
|
|
if (!dups.emplace(msg.msgId).second) {
|
|
continue;
|
|
}
|
|
|
|
if (!aggSig.IsValid()) {
|
|
aggSig = msg.sig;
|
|
} else {
|
|
aggSig.AggregateInsecure(msg.sig);
|
|
}
|
|
|
|
if (!aggPubKey.IsValid()) {
|
|
aggPubKey = msg.pubKey;
|
|
} else {
|
|
aggPubKey.AggregateInsecure(msg.pubKey);
|
|
}
|
|
}
|
|
|
|
if (!aggSig.IsValid()) {
|
|
// only duplicates for this msgHash
|
|
continue;
|
|
}
|
|
|
|
msgHashes.emplace_back(msgHash);
|
|
pubKeys.emplace_back(aggPubKey);
|
|
}
|
|
|
|
if (msgHashes.empty()) {
|
|
return true;
|
|
}
|
|
|
|
return aggSig.VerifyInsecureAggregated(pubKeys, msgHashes);
|
|
}
|
|
};
|
|
|
|
#endif //DASH_CRYPTO_BLS_BATCHVERIFIER_H
|