2015-12-13 14:51:43 +01:00
|
|
|
// Copyright (c) 2012-2015 The Bitcoin Core developers
|
2014-10-31 01:43:19 +01:00
|
|
|
// Distributed under the MIT software license, see the accompanying
|
2012-08-13 05:26:27 +02:00
|
|
|
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
|
|
|
|
2020-03-19 23:46:56 +01:00
|
|
|
#include <bloom.h>
|
2013-04-13 07:13:08 +02:00
|
|
|
|
2020-03-19 23:46:56 +01:00
|
|
|
#include <primitives/transaction.h>
|
|
|
|
#include <evo/specialtx.h>
|
|
|
|
#include <evo/providertx.h>
|
2022-08-02 19:14:25 +02:00
|
|
|
#include <logging.h>
|
2020-03-19 23:46:56 +01:00
|
|
|
#include <hash.h>
|
|
|
|
#include <script/script.h>
|
|
|
|
#include <script/standard.h>
|
|
|
|
#include <random.h>
|
|
|
|
#include <streams.h>
|
2012-08-13 05:26:27 +02:00
|
|
|
|
2013-04-13 07:13:08 +02:00
|
|
|
#include <math.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
|
2012-08-13 05:26:27 +02:00
|
|
|
#define LN2SQUARED 0.4804530139182014246671025263266649717305529515945455
|
|
|
|
#define LN2 0.6931471805599453094172321214581765680755001343602552
|
|
|
|
|
2017-05-18 10:08:56 +02:00
|
|
|
CBloomFilter::CBloomFilter(const unsigned int nElements, const double nFPRate, const unsigned int nTweakIn, unsigned char nFlagsIn) :
|
2015-04-24 19:14:45 +02:00
|
|
|
/**
|
|
|
|
* The ideal size for a bloom filter with a given number of elements and false positive rate is:
|
|
|
|
* - nElements * log(fp rate) / ln(2)^2
|
|
|
|
* We ignore filter parameters which will create a bloom filter larger than the protocol limits
|
|
|
|
*/
|
2017-01-30 13:13:07 +01:00
|
|
|
vData(std::min((unsigned int)(-1 / LN2SQUARED * nElements * log(nFPRate)), MAX_BLOOM_FILTER_SIZE * 8) / 8),
|
2015-04-24 19:14:45 +02:00
|
|
|
/**
|
|
|
|
* The ideal number of hash functions is filter size * ln(2) / number of elements
|
|
|
|
* Again, we ignore filter parameters which will create a bloom filter with more hash functions than the protocol limits
|
|
|
|
* See https://en.wikipedia.org/wiki/Bloom_filter for an explanation of these formulas
|
|
|
|
*/
|
2017-01-30 13:13:07 +01:00
|
|
|
nHashFuncs(std::min((unsigned int)(vData.size() * 8 / nElements * LN2), MAX_HASH_FUNCS)),
|
2015-04-24 19:14:45 +02:00
|
|
|
nTweak(nTweakIn),
|
|
|
|
nFlags(nFlagsIn)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2012-08-13 05:26:27 +02:00
|
|
|
inline unsigned int CBloomFilter::Hash(unsigned int nHashNum, const std::vector<unsigned char>& vDataToHash) const
|
|
|
|
{
|
|
|
|
// 0xFBA4C795 chosen as it guarantees a reasonable bit difference between nHashNum values.
|
2012-11-02 23:33:50 +01:00
|
|
|
return MurmurHash3(nHashNum * 0xFBA4C795 + nTweak, vDataToHash) % (vData.size() * 8);
|
2012-08-13 05:26:27 +02:00
|
|
|
}
|
|
|
|
|
2017-01-30 13:13:07 +01:00
|
|
|
void CBloomFilter::insert(const std::vector<unsigned char>& vKey)
|
2012-08-13 05:26:27 +02:00
|
|
|
{
|
Merge #18806: net: remove is{Empty,Full} flags from CBloomFilter, clarify CVE fix
1ad8ea2b73134bdd8d6b50704a019d47ad2191d8 net: remove is{Empty,Full} flags from CBloomFilter, clarify CVE fix (Sebastian Falbesoner)
Pull request description:
The BIP37 bloom filter class `CBloomFilter` contains two flags `isEmpty`/`isFull` together with an update method with the purpose to, according to the comments, "avoid wasting cpu", i.e. the mechanism should serve as an optimization for the trivial cases of empty (all bits zero) or full (all bits one) filters.
However, the real reason of adding those flags (introduced with commit https://github.com/bitcoin/bitcoin/commit/37c6389c5a0ca63ae3573440ecdfe95d28ad8f07 by gmaxwell) was a _covert fix_ of [CVE-2013-5700](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2013-5700), a vulnerability that allowed a divide-by-zero remote node crash.
According to gmaxwell himself (https://github.com/bitcoin/bitcoin/pull/9060#issuecomment-257749165):
> the IsEmpty/IsFull optimizations were largely a pretextual optimization intended to make unexploitable a remote crash vulnerability (integer division by zero) that existed in the original bloom filtering code without disclosing it. I'm doubtful that they are all that useful. :)
For more information on how to trigger this crash, see PR https://github.com/bitcoin/bitcoin/pull/18515 which contains a detailled description and a regression test. It has also been discussed on a [recent PR club meeting on fuzzing](https://bitcoincore.reviews/18521.html).
The covert fix code already led to issues and PR based on the wrong assumption that the flags are there for optimization reasons (see #16886 and #16922). This PR gets rid of the flags and the update method and just focuses on the CVE fix itself, i.e. it can be seen as a revert of the covert fix commit modulo the actual fix.
ACKs for top commit:
meshcollider:
utACK 1ad8ea2b73134bdd8d6b50704a019d47ad2191d8
laanwj:
Concept and code review ACK 1ad8ea2b73134bdd8d6b50704a019d47ad2191d8
jkczyz:
ACK 1ad8ea2b73134bdd8d6b50704a019d47ad2191d8
MarcoFalke:
ACK 1ad8ea2b73134bdd8d6b50704a019d47ad2191d8
fjahr:
Code review ACK 1ad8ea2b73134bdd8d6b50704a019d47ad2191d8
Tree-SHA512: 29f7ff9faece0285e11e16c024851f5bcb772dec64118ccc3f9067ec256267ec8e1b1e3105c7de2a72fd122c3b085e8fc840ab8f4e49813f1cc7a444df1867f7
2020-05-06 09:06:16 +02:00
|
|
|
if (vData.empty()) // Avoid divide-by-zero (CVE-2013-5700)
|
2013-02-25 02:36:59 +01:00
|
|
|
return;
|
2012-08-13 05:26:27 +02:00
|
|
|
for (unsigned int i = 0; i < nHashFuncs; i++)
|
|
|
|
{
|
|
|
|
unsigned int nIndex = Hash(i, vKey);
|
|
|
|
// Sets bit nIndex of vData
|
2014-03-20 05:21:23 +01:00
|
|
|
vData[nIndex >> 3] |= (1 << (7 & nIndex));
|
2012-08-13 05:26:27 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void CBloomFilter::insert(const COutPoint& outpoint)
|
|
|
|
{
|
|
|
|
CDataStream stream(SER_NETWORK, PROTOCOL_VERSION);
|
|
|
|
stream << outpoint;
|
2017-01-30 13:13:07 +01:00
|
|
|
std::vector<unsigned char> data(stream.begin(), stream.end());
|
2012-08-13 05:26:27 +02:00
|
|
|
insert(data);
|
|
|
|
}
|
|
|
|
|
|
|
|
void CBloomFilter::insert(const uint256& hash)
|
|
|
|
{
|
2017-01-30 13:13:07 +01:00
|
|
|
std::vector<unsigned char> data(hash.begin(), hash.end());
|
2012-08-13 05:26:27 +02:00
|
|
|
insert(data);
|
|
|
|
}
|
|
|
|
|
2017-01-30 13:13:07 +01:00
|
|
|
bool CBloomFilter::contains(const std::vector<unsigned char>& vKey) const
|
2012-08-13 05:26:27 +02:00
|
|
|
{
|
Merge #18806: net: remove is{Empty,Full} flags from CBloomFilter, clarify CVE fix
1ad8ea2b73134bdd8d6b50704a019d47ad2191d8 net: remove is{Empty,Full} flags from CBloomFilter, clarify CVE fix (Sebastian Falbesoner)
Pull request description:
The BIP37 bloom filter class `CBloomFilter` contains two flags `isEmpty`/`isFull` together with an update method with the purpose to, according to the comments, "avoid wasting cpu", i.e. the mechanism should serve as an optimization for the trivial cases of empty (all bits zero) or full (all bits one) filters.
However, the real reason of adding those flags (introduced with commit https://github.com/bitcoin/bitcoin/commit/37c6389c5a0ca63ae3573440ecdfe95d28ad8f07 by gmaxwell) was a _covert fix_ of [CVE-2013-5700](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2013-5700), a vulnerability that allowed a divide-by-zero remote node crash.
According to gmaxwell himself (https://github.com/bitcoin/bitcoin/pull/9060#issuecomment-257749165):
> the IsEmpty/IsFull optimizations were largely a pretextual optimization intended to make unexploitable a remote crash vulnerability (integer division by zero) that existed in the original bloom filtering code without disclosing it. I'm doubtful that they are all that useful. :)
For more information on how to trigger this crash, see PR https://github.com/bitcoin/bitcoin/pull/18515 which contains a detailled description and a regression test. It has also been discussed on a [recent PR club meeting on fuzzing](https://bitcoincore.reviews/18521.html).
The covert fix code already led to issues and PR based on the wrong assumption that the flags are there for optimization reasons (see #16886 and #16922). This PR gets rid of the flags and the update method and just focuses on the CVE fix itself, i.e. it can be seen as a revert of the covert fix commit modulo the actual fix.
ACKs for top commit:
meshcollider:
utACK 1ad8ea2b73134bdd8d6b50704a019d47ad2191d8
laanwj:
Concept and code review ACK 1ad8ea2b73134bdd8d6b50704a019d47ad2191d8
jkczyz:
ACK 1ad8ea2b73134bdd8d6b50704a019d47ad2191d8
MarcoFalke:
ACK 1ad8ea2b73134bdd8d6b50704a019d47ad2191d8
fjahr:
Code review ACK 1ad8ea2b73134bdd8d6b50704a019d47ad2191d8
Tree-SHA512: 29f7ff9faece0285e11e16c024851f5bcb772dec64118ccc3f9067ec256267ec8e1b1e3105c7de2a72fd122c3b085e8fc840ab8f4e49813f1cc7a444df1867f7
2020-05-06 09:06:16 +02:00
|
|
|
if (vData.empty()) // Avoid divide-by-zero (CVE-2013-5700)
|
2013-02-25 02:36:59 +01:00
|
|
|
return true;
|
2012-08-13 05:26:27 +02:00
|
|
|
for (unsigned int i = 0; i < nHashFuncs; i++)
|
|
|
|
{
|
|
|
|
unsigned int nIndex = Hash(i, vKey);
|
|
|
|
// Checks bit nIndex of vData
|
2014-03-20 05:21:23 +01:00
|
|
|
if (!(vData[nIndex >> 3] & (1 << (7 & nIndex))))
|
2012-08-13 05:26:27 +02:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool CBloomFilter::contains(const COutPoint& outpoint) const
|
|
|
|
{
|
|
|
|
CDataStream stream(SER_NETWORK, PROTOCOL_VERSION);
|
|
|
|
stream << outpoint;
|
2017-01-30 13:13:07 +01:00
|
|
|
std::vector<unsigned char> data(stream.begin(), stream.end());
|
2012-08-13 05:26:27 +02:00
|
|
|
return contains(data);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool CBloomFilter::contains(const uint256& hash) const
|
|
|
|
{
|
2017-01-30 13:13:07 +01:00
|
|
|
std::vector<unsigned char> data(hash.begin(), hash.end());
|
2012-08-13 05:26:27 +02:00
|
|
|
return contains(data);
|
|
|
|
}
|
|
|
|
|
2019-03-21 21:45:27 +01:00
|
|
|
bool CBloomFilter::contains(const uint160& hash) const
|
|
|
|
{
|
|
|
|
std::vector<unsigned char> data(hash.begin(), hash.end());
|
|
|
|
return contains(data);
|
|
|
|
}
|
|
|
|
|
2012-08-13 05:26:27 +02:00
|
|
|
bool CBloomFilter::IsWithinSizeConstraints() const
|
|
|
|
{
|
|
|
|
return vData.size() <= MAX_BLOOM_FILTER_SIZE && nHashFuncs <= MAX_HASH_FUNCS;
|
|
|
|
}
|
|
|
|
|
2019-03-21 21:45:27 +01:00
|
|
|
// Match if the filter contains any arbitrary script data element in script
|
|
|
|
bool CBloomFilter::CheckScript(const CScript &script) const
|
|
|
|
{
|
|
|
|
CScript::const_iterator pc = script.begin();
|
|
|
|
std::vector<unsigned char> data;
|
|
|
|
while (pc < script.end()) {
|
|
|
|
opcodetype opcode;
|
|
|
|
if (!script.GetOp(pc, opcode, data))
|
|
|
|
break;
|
|
|
|
if (data.size() != 0 && contains(data))
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the transaction is a special transaction that has a registration
|
|
|
|
// transaction hash, test the registration transaction hash.
|
|
|
|
// If the transaction is a special transaction with any public keys or any
|
|
|
|
// public key hashes test them.
|
|
|
|
// If the transaction is a special transaction with payout addresses test
|
|
|
|
// the hash160 of those addresses.
|
|
|
|
// Filter is updated only if it has BLOOM_UPDATE_ALL flag to be able to have
|
|
|
|
// simple SPV wallets that doesn't work with DIP2 transactions (multicoin
|
|
|
|
// wallets, etc.)
|
|
|
|
bool CBloomFilter::CheckSpecialTransactionMatchesAndUpdate(const CTransaction &tx)
|
|
|
|
{
|
|
|
|
if(tx.nVersion != 3 || tx.nType == TRANSACTION_NORMAL) {
|
|
|
|
return false; // it is not a special transaction
|
|
|
|
}
|
|
|
|
switch(tx.nType) {
|
|
|
|
case(TRANSACTION_PROVIDER_REGISTER): {
|
|
|
|
CProRegTx proTx;
|
|
|
|
if (GetTxPayload(tx, proTx)) {
|
|
|
|
if(contains(proTx.collateralOutpoint) ||
|
|
|
|
contains(proTx.keyIDOwner) ||
|
|
|
|
contains(proTx.keyIDVoting) ||
|
|
|
|
CheckScript(proTx.scriptPayout)) {
|
|
|
|
if ((nFlags & BLOOM_UPDATE_MASK) == BLOOM_UPDATE_ALL)
|
|
|
|
insert(tx.GetHash());
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
case(TRANSACTION_PROVIDER_UPDATE_SERVICE): {
|
|
|
|
CProUpServTx proTx;
|
|
|
|
if (GetTxPayload(tx, proTx)) {
|
|
|
|
if(contains(proTx.proTxHash)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
if(CheckScript(proTx.scriptOperatorPayout)) {
|
|
|
|
if ((nFlags & BLOOM_UPDATE_MASK) == BLOOM_UPDATE_ALL)
|
|
|
|
insert(proTx.proTxHash);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
case(TRANSACTION_PROVIDER_UPDATE_REGISTRAR): {
|
|
|
|
CProUpRegTx proTx;
|
|
|
|
if (GetTxPayload(tx, proTx)) {
|
|
|
|
if(contains(proTx.proTxHash))
|
|
|
|
return true;
|
|
|
|
if(contains(proTx.keyIDVoting) ||
|
|
|
|
CheckScript(proTx.scriptPayout)) {
|
|
|
|
if ((nFlags & BLOOM_UPDATE_MASK) == BLOOM_UPDATE_ALL)
|
|
|
|
insert(proTx.proTxHash);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
case(TRANSACTION_PROVIDER_UPDATE_REVOKE): {
|
|
|
|
CProUpRevTx proTx;
|
|
|
|
if (GetTxPayload(tx, proTx)) {
|
|
|
|
if(contains(proTx.proTxHash))
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
case(TRANSACTION_COINBASE):
|
|
|
|
case(TRANSACTION_QUORUM_COMMITMENT):
|
2021-12-11 21:00:27 +01:00
|
|
|
case(TRANSACTION_MNHF_SIGNAL):
|
2021-07-17 21:15:21 +02:00
|
|
|
// No additional checks for this transaction types
|
2019-03-21 21:45:27 +01:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-06-12 07:01:11 +02:00
|
|
|
LogPrintf("Unknown special transaction type in Bloom filter check.\n");
|
2019-03-21 21:45:27 +01:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2014-06-09 10:02:00 +02:00
|
|
|
bool CBloomFilter::IsRelevantAndUpdate(const CTransaction& tx)
|
2012-08-13 05:26:27 +02:00
|
|
|
{
|
2012-08-19 05:38:28 +02:00
|
|
|
bool fFound = false;
|
2012-08-13 05:26:27 +02:00
|
|
|
// Match if the filter contains the hash of tx
|
|
|
|
// for finding tx when they appear in a block
|
Merge #18806: net: remove is{Empty,Full} flags from CBloomFilter, clarify CVE fix
1ad8ea2b73134bdd8d6b50704a019d47ad2191d8 net: remove is{Empty,Full} flags from CBloomFilter, clarify CVE fix (Sebastian Falbesoner)
Pull request description:
The BIP37 bloom filter class `CBloomFilter` contains two flags `isEmpty`/`isFull` together with an update method with the purpose to, according to the comments, "avoid wasting cpu", i.e. the mechanism should serve as an optimization for the trivial cases of empty (all bits zero) or full (all bits one) filters.
However, the real reason of adding those flags (introduced with commit https://github.com/bitcoin/bitcoin/commit/37c6389c5a0ca63ae3573440ecdfe95d28ad8f07 by gmaxwell) was a _covert fix_ of [CVE-2013-5700](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2013-5700), a vulnerability that allowed a divide-by-zero remote node crash.
According to gmaxwell himself (https://github.com/bitcoin/bitcoin/pull/9060#issuecomment-257749165):
> the IsEmpty/IsFull optimizations were largely a pretextual optimization intended to make unexploitable a remote crash vulnerability (integer division by zero) that existed in the original bloom filtering code without disclosing it. I'm doubtful that they are all that useful. :)
For more information on how to trigger this crash, see PR https://github.com/bitcoin/bitcoin/pull/18515 which contains a detailled description and a regression test. It has also been discussed on a [recent PR club meeting on fuzzing](https://bitcoincore.reviews/18521.html).
The covert fix code already led to issues and PR based on the wrong assumption that the flags are there for optimization reasons (see #16886 and #16922). This PR gets rid of the flags and the update method and just focuses on the CVE fix itself, i.e. it can be seen as a revert of the covert fix commit modulo the actual fix.
ACKs for top commit:
meshcollider:
utACK 1ad8ea2b73134bdd8d6b50704a019d47ad2191d8
laanwj:
Concept and code review ACK 1ad8ea2b73134bdd8d6b50704a019d47ad2191d8
jkczyz:
ACK 1ad8ea2b73134bdd8d6b50704a019d47ad2191d8
MarcoFalke:
ACK 1ad8ea2b73134bdd8d6b50704a019d47ad2191d8
fjahr:
Code review ACK 1ad8ea2b73134bdd8d6b50704a019d47ad2191d8
Tree-SHA512: 29f7ff9faece0285e11e16c024851f5bcb772dec64118ccc3f9067ec256267ec8e1b1e3105c7de2a72fd122c3b085e8fc840ab8f4e49813f1cc7a444df1867f7
2020-05-06 09:06:16 +02:00
|
|
|
if (vData.empty()) // zero-size = "match-all" filter
|
2013-08-19 05:21:06 +02:00
|
|
|
return true;
|
2014-06-09 10:02:00 +02:00
|
|
|
const uint256& hash = tx.GetHash();
|
2012-08-13 05:26:30 +02:00
|
|
|
if (contains(hash))
|
2012-08-19 05:38:28 +02:00
|
|
|
fFound = true;
|
2012-08-13 05:26:27 +02:00
|
|
|
|
2019-03-21 21:45:27 +01:00
|
|
|
// Check additional matches for special transactions
|
|
|
|
fFound = fFound || CheckSpecialTransactionMatchesAndUpdate(tx);
|
|
|
|
|
2012-08-19 05:38:28 +02:00
|
|
|
for (unsigned int i = 0; i < tx.vout.size(); i++)
|
2012-08-13 05:26:27 +02:00
|
|
|
{
|
2012-08-19 05:38:28 +02:00
|
|
|
const CTxOut& txout = tx.vout[i];
|
2012-08-13 05:26:27 +02:00
|
|
|
// Match if the filter contains any arbitrary script data element in any scriptPubKey in tx
|
2012-08-19 05:38:28 +02:00
|
|
|
// If this matches, also add the specific output that was matched.
|
2020-07-29 03:23:12 +02:00
|
|
|
// This means clients don't have to update the filter themselves when a new relevant tx
|
2012-08-19 05:38:28 +02:00
|
|
|
// is discovered in order to find spending transactions, which avoids round-tripping and race conditions.
|
2019-03-21 21:45:27 +01:00
|
|
|
if(CheckScript(txout.scriptPubKey)) {
|
|
|
|
fFound = true;
|
|
|
|
if ((nFlags & BLOOM_UPDATE_MASK) == BLOOM_UPDATE_ALL)
|
|
|
|
insert(COutPoint(hash, i));
|
|
|
|
else if ((nFlags & BLOOM_UPDATE_MASK) == BLOOM_UPDATE_P2PUBKEY_ONLY)
|
2012-08-19 05:38:28 +02:00
|
|
|
{
|
2019-03-21 21:45:27 +01:00
|
|
|
std::vector<std::vector<unsigned char> > vSolutions;
|
2022-09-15 16:24:07 +02:00
|
|
|
TxoutType type = Solver(txout.scriptPubKey, vSolutions);
|
|
|
|
if (type == TxoutType::PUBKEY || type == TxoutType::MULTISIG) {
|
2013-01-11 02:23:28 +01:00
|
|
|
insert(COutPoint(hash, i));
|
2021-09-19 09:45:35 +02:00
|
|
|
}
|
2012-08-19 05:38:28 +02:00
|
|
|
}
|
2012-08-13 05:26:27 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-08-19 05:38:28 +02:00
|
|
|
if (fFound)
|
|
|
|
return true;
|
|
|
|
|
2019-07-05 09:06:28 +02:00
|
|
|
for (const CTxIn& txin : tx.vin)
|
2012-08-13 05:26:27 +02:00
|
|
|
{
|
|
|
|
// Match if the filter contains an outpoint tx spends
|
|
|
|
if (contains(txin.prevout))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
// Match if the filter contains any arbitrary script data element in any scriptSig in tx
|
2019-03-21 21:45:27 +01:00
|
|
|
if(CheckScript(txin.scriptSig))
|
|
|
|
return true;
|
2012-08-13 05:26:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
2013-08-19 05:21:06 +02:00
|
|
|
|
2017-05-18 10:08:56 +02:00
|
|
|
CRollingBloomFilter::CRollingBloomFilter(const unsigned int nElements, const double fpRate)
|
2015-04-24 19:14:45 +02:00
|
|
|
{
|
2015-12-03 13:35:55 +01:00
|
|
|
double logFpRate = log(fpRate);
|
|
|
|
/* The optimal number of hash functions is log(fpRate) / log(0.5), but
|
|
|
|
* restrict it to the range 1-50. */
|
|
|
|
nHashFuncs = std::max(1, std::min((int)round(logFpRate / log(0.5)), 50));
|
|
|
|
/* In this rolling bloom filter, we'll store between 2 and 3 generations of nElements / 2 entries. */
|
|
|
|
nEntriesPerGeneration = (nElements + 1) / 2;
|
|
|
|
uint32_t nMaxElements = nEntriesPerGeneration * 3;
|
|
|
|
/* The maximum fpRate = pow(1.0 - exp(-nHashFuncs * nMaxElements / nFilterBits), nHashFuncs)
|
|
|
|
* => pow(fpRate, 1.0 / nHashFuncs) = 1.0 - exp(-nHashFuncs * nMaxElements / nFilterBits)
|
|
|
|
* => 1.0 - pow(fpRate, 1.0 / nHashFuncs) = exp(-nHashFuncs * nMaxElements / nFilterBits)
|
|
|
|
* => log(1.0 - pow(fpRate, 1.0 / nHashFuncs)) = -nHashFuncs * nMaxElements / nFilterBits
|
|
|
|
* => nFilterBits = -nHashFuncs * nMaxElements / log(1.0 - pow(fpRate, 1.0 / nHashFuncs))
|
|
|
|
* => nFilterBits = -nHashFuncs * nMaxElements / log(1.0 - exp(logFpRate / nHashFuncs))
|
|
|
|
*/
|
|
|
|
uint32_t nFilterBits = (uint32_t)ceil(-1.0 * nHashFuncs * nMaxElements / log(1.0 - exp(logFpRate / nHashFuncs)));
|
|
|
|
data.clear();
|
2016-05-09 08:31:14 +02:00
|
|
|
/* For each data element we need to store 2 bits. If both bits are 0, the
|
|
|
|
* bit is treated as unset. If the bits are (01), (10), or (11), the bit is
|
|
|
|
* treated as set in generation 1, 2, or 3 respectively.
|
|
|
|
* These bits are stored in separate integers: position P corresponds to bit
|
|
|
|
* (P & 63) of the integers data[(P >> 6) * 2] and data[(P >> 6) * 2 + 1]. */
|
|
|
|
data.resize(((nFilterBits + 63) / 64) << 1);
|
2015-07-27 18:58:00 +02:00
|
|
|
reset();
|
2015-04-24 19:14:45 +02:00
|
|
|
}
|
|
|
|
|
2015-12-03 13:35:55 +01:00
|
|
|
/* Similar to CBloomFilter::Hash */
|
2016-05-09 08:31:14 +02:00
|
|
|
static inline uint32_t RollingBloomHash(unsigned int nHashNum, uint32_t nTweak, const std::vector<unsigned char>& vDataToHash) {
|
|
|
|
return MurmurHash3(nHashNum * 0xFBA4C795 + nTweak, vDataToHash);
|
2015-12-03 13:35:55 +01:00
|
|
|
}
|
|
|
|
|
Collection of minor performance optimizations (#2855)
* Merge #13176: Improve CRollingBloomFilter performance: replace modulus with FastMod
9aac9f90d5e56752cc6cbfac48063ad29a01143c replace modulus with FastMod (Martin Ankerl)
Pull request description:
Not sure if this is optimization is necessary, but anyway I have some spare time so here it is. This replaces the slow modulo operation with a much faster 64bit multiplication & shift. This works when the hash is uniformly distributed between 0 and 2^32-1. This speeds up the benchmark by a factor of about 1.3:
```
RollingBloom, 5, 1500000, 3.73733, 4.97569e-07, 4.99002e-07, 4.98372e-07 # before
RollingBloom, 5, 1500000, 2.86842, 3.81630e-07, 3.83730e-07, 3.82473e-07 # FastMod
```
Be aware that this changes the internal data of the filter, so this should probably
not be used for CBloomFilter because of interoperability problems.
Tree-SHA512: 04104f3fb09f56c9d14458a6aad919aeb0a5af944e8ee6a31f00e93c753e22004648c1cd65bf36752b6addec528d19fb665c27b955ce1666a85a928e17afa47a
* Use unordered_map in CSporkManager
In one of my profiling sessions with many InstantSend transactions
happening, calls into CSporkManager added up to about 1% of total CPU time.
This is easily avoidable by using unordered maps.
* Use std::unordered_map instead of std::map in limitedmap
* Use unordered_set for CNode::setAskFor
* Add serialization support for unordered maps and sets
* Use unordered_map for mapArgs and mapMultiArgs
* Let limitedmap prune in batches and use unordered_multimap
Due to the batched pruning, there is no need to maintain an ordered map
of values anymore. Only when nPruneAfterSize, there is a need to create
a temporary ordered vector of values to figure out what can be removed.
* Instead of using a multimap for mapAskFor, use a vector which we sort on demand
CNode::AskFor will now push entries into an initially unordered vector
instead of an ordered multimap. Only when we later want to use vecAskFor in
SendMessages, we sort the vector.
The vector will actually be mostly sorted in most cases as insertion order
usually mimics the desired ordering. Only the last few entries might need
some shuffling around. Doing the sort on-demand should be less wasteful
then trying to maintain correct order all the time.
* Fix compilation of tests
* Fix limitedmap tests
* Rename limitedmap to unordered_limitedmap to ensure backports conflict
This ensures that future backports that depends on limitedmap's ordering
conflict so that we are made aware of needed action.
* Fix compilation error on Travis
2019-04-11 14:42:14 +02:00
|
|
|
|
|
|
|
// A replacement for x % n. This assumes that x and n are 32bit integers, and x is a uniformly random distributed 32bit value
|
|
|
|
// which should be the case for a good hash.
|
|
|
|
// See https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/
|
|
|
|
static inline uint32_t FastMod(uint32_t x, size_t n) {
|
|
|
|
return ((uint64_t)x * (uint64_t)n) >> 32;
|
|
|
|
}
|
|
|
|
|
2015-04-24 19:14:45 +02:00
|
|
|
void CRollingBloomFilter::insert(const std::vector<unsigned char>& vKey)
|
|
|
|
{
|
2015-12-03 13:35:55 +01:00
|
|
|
if (nEntriesThisGeneration == nEntriesPerGeneration) {
|
|
|
|
nEntriesThisGeneration = 0;
|
|
|
|
nGeneration++;
|
|
|
|
if (nGeneration == 4) {
|
|
|
|
nGeneration = 1;
|
|
|
|
}
|
2017-03-09 10:02:49 +01:00
|
|
|
uint64_t nGenerationMask1 = 0 - (uint64_t)(nGeneration & 1);
|
|
|
|
uint64_t nGenerationMask2 = 0 - (uint64_t)(nGeneration >> 1);
|
2015-12-03 13:35:55 +01:00
|
|
|
/* Wipe old entries that used this generation number. */
|
2016-05-09 08:31:14 +02:00
|
|
|
for (uint32_t p = 0; p < data.size(); p += 2) {
|
|
|
|
uint64_t p1 = data[p], p2 = data[p + 1];
|
|
|
|
uint64_t mask = (p1 ^ nGenerationMask1) | (p2 ^ nGenerationMask2);
|
|
|
|
data[p] = p1 & mask;
|
|
|
|
data[p + 1] = p2 & mask;
|
2015-12-03 13:35:55 +01:00
|
|
|
}
|
2015-04-24 19:14:45 +02:00
|
|
|
}
|
2015-12-03 13:35:55 +01:00
|
|
|
nEntriesThisGeneration++;
|
|
|
|
|
|
|
|
for (int n = 0; n < nHashFuncs; n++) {
|
2016-05-09 08:31:14 +02:00
|
|
|
uint32_t h = RollingBloomHash(n, nTweak, vKey);
|
|
|
|
int bit = h & 0x3F;
|
Collection of minor performance optimizations (#2855)
* Merge #13176: Improve CRollingBloomFilter performance: replace modulus with FastMod
9aac9f90d5e56752cc6cbfac48063ad29a01143c replace modulus with FastMod (Martin Ankerl)
Pull request description:
Not sure if this is optimization is necessary, but anyway I have some spare time so here it is. This replaces the slow modulo operation with a much faster 64bit multiplication & shift. This works when the hash is uniformly distributed between 0 and 2^32-1. This speeds up the benchmark by a factor of about 1.3:
```
RollingBloom, 5, 1500000, 3.73733, 4.97569e-07, 4.99002e-07, 4.98372e-07 # before
RollingBloom, 5, 1500000, 2.86842, 3.81630e-07, 3.83730e-07, 3.82473e-07 # FastMod
```
Be aware that this changes the internal data of the filter, so this should probably
not be used for CBloomFilter because of interoperability problems.
Tree-SHA512: 04104f3fb09f56c9d14458a6aad919aeb0a5af944e8ee6a31f00e93c753e22004648c1cd65bf36752b6addec528d19fb665c27b955ce1666a85a928e17afa47a
* Use unordered_map in CSporkManager
In one of my profiling sessions with many InstantSend transactions
happening, calls into CSporkManager added up to about 1% of total CPU time.
This is easily avoidable by using unordered maps.
* Use std::unordered_map instead of std::map in limitedmap
* Use unordered_set for CNode::setAskFor
* Add serialization support for unordered maps and sets
* Use unordered_map for mapArgs and mapMultiArgs
* Let limitedmap prune in batches and use unordered_multimap
Due to the batched pruning, there is no need to maintain an ordered map
of values anymore. Only when nPruneAfterSize, there is a need to create
a temporary ordered vector of values to figure out what can be removed.
* Instead of using a multimap for mapAskFor, use a vector which we sort on demand
CNode::AskFor will now push entries into an initially unordered vector
instead of an ordered multimap. Only when we later want to use vecAskFor in
SendMessages, we sort the vector.
The vector will actually be mostly sorted in most cases as insertion order
usually mimics the desired ordering. Only the last few entries might need
some shuffling around. Doing the sort on-demand should be less wasteful
then trying to maintain correct order all the time.
* Fix compilation of tests
* Fix limitedmap tests
* Rename limitedmap to unordered_limitedmap to ensure backports conflict
This ensures that future backports that depends on limitedmap's ordering
conflict so that we are made aware of needed action.
* Fix compilation error on Travis
2019-04-11 14:42:14 +02:00
|
|
|
/* FastMod works with the upper bits of h, so it is safe to ignore that the lower bits of h are already used for bit. */
|
|
|
|
uint32_t pos = FastMod(h, data.size());
|
2016-05-09 08:31:14 +02:00
|
|
|
/* The lowest bit of pos is ignored, and set to zero for the first bit, and to one for the second. */
|
|
|
|
data[pos & ~1] = (data[pos & ~1] & ~(((uint64_t)1) << bit)) | ((uint64_t)(nGeneration & 1)) << bit;
|
|
|
|
data[pos | 1] = (data[pos | 1] & ~(((uint64_t)1) << bit)) | ((uint64_t)(nGeneration >> 1)) << bit;
|
2015-04-24 19:14:45 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-07-17 12:42:43 +02:00
|
|
|
void CRollingBloomFilter::insert(const uint256& hash)
|
|
|
|
{
|
2017-01-30 13:13:07 +01:00
|
|
|
std::vector<unsigned char> vData(hash.begin(), hash.end());
|
2016-09-27 13:25:42 +02:00
|
|
|
insert(vData);
|
2015-07-17 12:42:43 +02:00
|
|
|
}
|
|
|
|
|
2015-04-24 19:14:45 +02:00
|
|
|
bool CRollingBloomFilter::contains(const std::vector<unsigned char>& vKey) const
|
|
|
|
{
|
2015-12-03 13:35:55 +01:00
|
|
|
for (int n = 0; n < nHashFuncs; n++) {
|
2016-05-09 08:31:14 +02:00
|
|
|
uint32_t h = RollingBloomHash(n, nTweak, vKey);
|
|
|
|
int bit = h & 0x3F;
|
Collection of minor performance optimizations (#2855)
* Merge #13176: Improve CRollingBloomFilter performance: replace modulus with FastMod
9aac9f90d5e56752cc6cbfac48063ad29a01143c replace modulus with FastMod (Martin Ankerl)
Pull request description:
Not sure if this is optimization is necessary, but anyway I have some spare time so here it is. This replaces the slow modulo operation with a much faster 64bit multiplication & shift. This works when the hash is uniformly distributed between 0 and 2^32-1. This speeds up the benchmark by a factor of about 1.3:
```
RollingBloom, 5, 1500000, 3.73733, 4.97569e-07, 4.99002e-07, 4.98372e-07 # before
RollingBloom, 5, 1500000, 2.86842, 3.81630e-07, 3.83730e-07, 3.82473e-07 # FastMod
```
Be aware that this changes the internal data of the filter, so this should probably
not be used for CBloomFilter because of interoperability problems.
Tree-SHA512: 04104f3fb09f56c9d14458a6aad919aeb0a5af944e8ee6a31f00e93c753e22004648c1cd65bf36752b6addec528d19fb665c27b955ce1666a85a928e17afa47a
* Use unordered_map in CSporkManager
In one of my profiling sessions with many InstantSend transactions
happening, calls into CSporkManager added up to about 1% of total CPU time.
This is easily avoidable by using unordered maps.
* Use std::unordered_map instead of std::map in limitedmap
* Use unordered_set for CNode::setAskFor
* Add serialization support for unordered maps and sets
* Use unordered_map for mapArgs and mapMultiArgs
* Let limitedmap prune in batches and use unordered_multimap
Due to the batched pruning, there is no need to maintain an ordered map
of values anymore. Only when nPruneAfterSize, there is a need to create
a temporary ordered vector of values to figure out what can be removed.
* Instead of using a multimap for mapAskFor, use a vector which we sort on demand
CNode::AskFor will now push entries into an initially unordered vector
instead of an ordered multimap. Only when we later want to use vecAskFor in
SendMessages, we sort the vector.
The vector will actually be mostly sorted in most cases as insertion order
usually mimics the desired ordering. Only the last few entries might need
some shuffling around. Doing the sort on-demand should be less wasteful
then trying to maintain correct order all the time.
* Fix compilation of tests
* Fix limitedmap tests
* Rename limitedmap to unordered_limitedmap to ensure backports conflict
This ensures that future backports that depends on limitedmap's ordering
conflict so that we are made aware of needed action.
* Fix compilation error on Travis
2019-04-11 14:42:14 +02:00
|
|
|
uint32_t pos = FastMod(h, data.size());
|
2016-05-09 08:31:14 +02:00
|
|
|
/* If the relevant bit is not set in either data[pos & ~1] or data[pos | 1], the filter does not contain vKey */
|
|
|
|
if (!(((data[pos & ~1] | data[pos | 1]) >> bit) & 1)) {
|
2015-12-03 13:35:55 +01:00
|
|
|
return false;
|
|
|
|
}
|
2015-04-24 19:14:45 +02:00
|
|
|
}
|
2015-12-03 13:35:55 +01:00
|
|
|
return true;
|
2015-04-24 19:14:45 +02:00
|
|
|
}
|
|
|
|
|
2015-07-17 12:42:43 +02:00
|
|
|
bool CRollingBloomFilter::contains(const uint256& hash) const
|
|
|
|
{
|
2017-01-30 13:13:07 +01:00
|
|
|
std::vector<unsigned char> vData(hash.begin(), hash.end());
|
2016-09-27 13:25:42 +02:00
|
|
|
return contains(vData);
|
2015-07-17 12:42:43 +02:00
|
|
|
}
|
|
|
|
|
2015-07-27 18:58:00 +02:00
|
|
|
void CRollingBloomFilter::reset()
|
2015-04-24 19:14:45 +02:00
|
|
|
{
|
2015-12-03 13:35:55 +01:00
|
|
|
nTweak = GetRand(std::numeric_limits<unsigned int>::max());
|
|
|
|
nEntriesThisGeneration = 0;
|
|
|
|
nGeneration = 1;
|
Merge #16073: refactor: Improve CRollingBloomFilter::reset by using std::fill
df9e15f092c18a8047f09307576c2b77b9c8d01c refactor: Improve CRollingBloomFilter::reset by using std::fill (João Barbosa)
d2dbc7da26e1ca40200521c05a0b1ca75578acd2 bench: Add benchmark for CRollingBloomFilter::reset (João Barbosa)
Pull request description:
Cleaner code. Also improves performance with `--enable-debug` (which is meaningless to non-developers).
Before:
```
# Benchmark, evals, iterations, total, min, max, median
RollingBloomReset, 5, 150, 19.3008, 0.0254917, 0.0259195, 0.0257395
```
After:
```
# Benchmark, evals, iterations, total, min, max, median
RollingBloomReset, 5, 150, 5.43269, 0.00720651, 0.00729697, 0.00724854
```
ACKs for commit df9e15:
MarcoFalke:
re-utACK df9e15f092
jamesob:
re-utACK https://github.com/bitcoin/bitcoin/pull/16073/commits/df9e15f092c18a8047f09307576c2b77b9c8d01c
Tree-SHA512: 22038411dfd41afad77b17a3da9ee04476ffbd4d215dcf47bdd9f14588759bc328a55d958dcebc2036b52ce4c56f79b1284eae11e56ddfaf21f0b2ee1c6a914a
2019-05-22 21:05:50 +02:00
|
|
|
std::fill(data.begin(), data.end(), 0);
|
2015-04-24 19:14:45 +02:00
|
|
|
}
|