mirror of
https://github.com/dashpay/dash.git
synced 2024-12-24 11:32:46 +01:00
Merge pull request #5785 from ogabrielides/20.0.3-release
backport: v20.0.3 backports and release
This commit is contained in:
commit
c50a7d1a7b
@ -1,7 +1,7 @@
|
||||
AC_PREREQ([2.69])
|
||||
define(_CLIENT_VERSION_MAJOR, 20)
|
||||
define(_CLIENT_VERSION_MINOR, 0)
|
||||
define(_CLIENT_VERSION_BUILD, 2)
|
||||
define(_CLIENT_VERSION_BUILD, 3)
|
||||
define(_CLIENT_VERSION_RC, 0)
|
||||
define(_CLIENT_VERSION_IS_RELEASE, true)
|
||||
define(_COPYRIGHT_YEAR, 2023)
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Dash Core version v20.0.2
|
||||
# Dash Core version v20.0.3
|
||||
|
||||
Release is now available from:
|
||||
|
||||
@ -37,18 +37,22 @@ reindex or re-sync the whole chain.
|
||||
|
||||
## Masternode fix
|
||||
|
||||
A problem has been fixed in the old quorum data cleanup mechanism. It was slowing down masternodes during DKG sessions and causing them to get PoSe scored.
|
||||
The memory usage during the old quorum data cleanup mechanism was reduced.
|
||||
|
||||
## Testnet Crash
|
||||
## Wallet fix
|
||||
|
||||
A fix has been implemented for the reported crash that could occur when upgrading from v19.x to v20.0.0 after v20 activation without re-indexing.
|
||||
A fix has been implemented for the reported decryption of wallets.
|
||||
|
||||
## RPC changes
|
||||
|
||||
In `getspecialtxes` `instantlock` and `chainlock` fields are reflecting actual values now.
|
||||
|
||||
## Other changes
|
||||
|
||||
Implemented improvements in Github CI and build system for macOS. Fixed compilation issues on FreeBSD.
|
||||
|
||||
|
||||
# v20.0.2 Change log
|
||||
# v20.0.3 Change log
|
||||
|
||||
See detailed [set of changes][set-of-changes].
|
||||
|
||||
@ -86,6 +90,7 @@ Dash Core tree 0.12.1.x was a fork of Bitcoin Core tree 0.12.
|
||||
|
||||
These release are considered obsolete. Old release notes can be found here:
|
||||
|
||||
- [v20.0.2](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-20.0.2.md) released December/06/2023
|
||||
- [v20.0.1](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-20.0.1.md) released November/18/2023
|
||||
- [v20.0.0](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-20.0.0.md) released November/15/2023
|
||||
- [v19.3.0](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-19.3.0.md) released July/31/2023
|
||||
@ -130,4 +135,4 @@ These release are considered obsolete. Old release notes can be found here:
|
||||
- [v0.10.x](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.10.0.md) released Sep/25/2014
|
||||
- [v0.9.x](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.9.0.md) released Mar/13/2014
|
||||
|
||||
[set-of-changes]: https://github.com/dashpay/dash/compare/v20.0.1...dashpay:v20.0.2
|
||||
[set-of-changes]: https://github.com/dashpay/dash/compare/v20.0.2...dashpay:v20.0.3
|
||||
|
133
doc/release-notes/dash/release-notes-20.0.2.md
Normal file
133
doc/release-notes/dash/release-notes-20.0.2.md
Normal file
@ -0,0 +1,133 @@
|
||||
# Dash Core version v20.0.2
|
||||
|
||||
Release is now available from:
|
||||
|
||||
<https://www.dash.org/downloads/#wallets>
|
||||
|
||||
This is a new patch version release, bringing small bug fixes and build system enhancements.
|
||||
|
||||
This release is optional for all nodes.
|
||||
|
||||
Please report bugs using the issue tracker at GitHub:
|
||||
|
||||
<https://github.com/dashpay/dash/issues>
|
||||
|
||||
|
||||
# Upgrading and downgrading
|
||||
|
||||
## How to Upgrade
|
||||
|
||||
If you are running an older version, shut it down. Wait until it has completely
|
||||
shut down (which might take a few minutes for older versions), then run the
|
||||
installer (on Windows) or just copy over /Applications/Dash-Qt (on Mac) or
|
||||
dashd/dash-qt (on Linux). If you upgrade after DIP0003 activation and you were
|
||||
using version < 0.13 you will have to reindex (start with -reindex-chainstate
|
||||
or -reindex) to make sure your wallet has all the new data synced. Upgrading
|
||||
from version 0.13 should not require any additional actions.
|
||||
|
||||
## Downgrade warning
|
||||
|
||||
### Downgrade to a version < v19.2.0
|
||||
|
||||
Downgrading to a version older than v19.2.0 is not supported due to changes
|
||||
in the evodb database. If you need to use an older version, you must either
|
||||
reindex or re-sync the whole chain.
|
||||
|
||||
# Notable changes
|
||||
|
||||
## Masternode fix
|
||||
|
||||
A problem has been fixed in the old quorum data cleanup mechanism. It was slowing down masternodes during DKG sessions and causing them to get PoSe scored.
|
||||
|
||||
## Testnet Crash
|
||||
|
||||
A fix has been implemented for the reported crash that could occur when upgrading from v19.x to v20.0.0 after v20 activation without re-indexing.
|
||||
|
||||
## Other changes
|
||||
|
||||
Implemented improvements in Github CI and build system for macOS. Fixed compilation issues on FreeBSD.
|
||||
|
||||
|
||||
# v20.0.2 Change log
|
||||
|
||||
See detailed [set of changes][set-of-changes].
|
||||
|
||||
# Credits
|
||||
|
||||
Thanks to everyone who directly contributed to this release:
|
||||
|
||||
- Konstantin Akimov (knst)
|
||||
- Odysseas Gabrielides (ogabrielides)
|
||||
- PastaPastaPasta
|
||||
- UdjinM6
|
||||
|
||||
As well as everyone that submitted issues, reviewed pull requests and helped
|
||||
debug the release candidates.
|
||||
|
||||
# Older releases
|
||||
|
||||
Dash was previously known as Darkcoin.
|
||||
|
||||
Darkcoin tree 0.8.x was a fork of Litecoin tree 0.8, original name was XCoin
|
||||
which was first released on Jan/18/2014.
|
||||
|
||||
Darkcoin tree 0.9.x was the open source implementation of masternodes based on
|
||||
the 0.8.x tree and was first released on Mar/13/2014.
|
||||
|
||||
Darkcoin tree 0.10.x used to be the closed source implementation of Darksend
|
||||
which was released open source on Sep/25/2014.
|
||||
|
||||
Dash Core tree 0.11.x was a fork of Bitcoin Core tree 0.9,
|
||||
Darkcoin was rebranded to Dash.
|
||||
|
||||
Dash Core tree 0.12.0.x was a fork of Bitcoin Core tree 0.10.
|
||||
|
||||
Dash Core tree 0.12.1.x was a fork of Bitcoin Core tree 0.12.
|
||||
|
||||
These release are considered obsolete. Old release notes can be found here:
|
||||
|
||||
- [v20.0.1](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-20.0.1.md) released November/18/2023
|
||||
- [v20.0.0](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-20.0.0.md) released November/15/2023
|
||||
- [v19.3.0](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-19.3.0.md) released July/31/2023
|
||||
- [v19.2.0](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-19.2.0.md) released June/19/2023
|
||||
- [v19.1.0](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-19.1.0.md) released May/22/2023
|
||||
- [v19.0.0](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-19.0.0.md) released Apr/14/2023
|
||||
- [v18.2.2](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-18.2.2.md) released Mar/21/2023
|
||||
- [v18.2.1](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-18.2.1.md) released Jan/17/2023
|
||||
- [v18.2.0](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-18.2.0.md) released Jan/01/2023
|
||||
- [v18.1.1](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-18.1.1.md) released January/08/2023
|
||||
- [v18.1.0](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-18.1.0.md) released October/09/2022
|
||||
- [v18.0.2](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-18.0.2.md) released October/09/2022
|
||||
- [v18.0.1](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-18.0.1.md) released August/17/2022
|
||||
- [v0.17.0.3](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.17.0.3.md) released June/07/2021
|
||||
- [v0.17.0.2](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.17.0.2.md) released May/19/2021
|
||||
- [v0.16.1.1](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.16.1.1.md) released November/17/2020
|
||||
- [v0.16.1.0](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.16.1.0.md) released November/14/2020
|
||||
- [v0.16.0.1](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.16.0.1.md) released September/30/2020
|
||||
- [v0.15.0.0](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.15.0.0.md) released Febrary/18/2020
|
||||
- [v0.14.0.5](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.14.0.5.md) released December/08/2019
|
||||
- [v0.14.0.4](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.14.0.4.md) released November/22/2019
|
||||
- [v0.14.0.3](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.14.0.3.md) released August/15/2019
|
||||
- [v0.14.0.2](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.14.0.2.md) released July/4/2019
|
||||
- [v0.14.0.1](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.14.0.1.md) released May/31/2019
|
||||
- [v0.14.0](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.14.0.md) released May/22/2019
|
||||
- [v0.13.3](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.13.3.md) released Apr/04/2019
|
||||
- [v0.13.2](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.13.2.md) released Mar/15/2019
|
||||
- [v0.13.1](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.13.1.md) released Feb/9/2019
|
||||
- [v0.13.0](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.13.0.md) released Jan/14/2019
|
||||
- [v0.12.3.4](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.12.3.4.md) released Dec/14/2018
|
||||
- [v0.12.3.3](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.12.3.3.md) released Sep/19/2018
|
||||
- [v0.12.3.2](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.12.3.2.md) released Jul/09/2018
|
||||
- [v0.12.3.1](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.12.3.1.md) released Jul/03/2018
|
||||
- [v0.12.2.3](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.12.2.3.md) released Jan/12/2018
|
||||
- [v0.12.2.2](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.12.2.2.md) released Dec/17/2017
|
||||
- [v0.12.2](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.12.2.md) released Nov/08/2017
|
||||
- [v0.12.1](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.12.1.md) released Feb/06/2017
|
||||
- [v0.12.0](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.12.0.md) released Aug/15/2015
|
||||
- [v0.11.2](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.11.2.md) released Mar/04/2015
|
||||
- [v0.11.1](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.11.1.md) released Feb/10/2015
|
||||
- [v0.11.0](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.11.0.md) released Jan/15/2015
|
||||
- [v0.10.x](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.10.0.md) released Sep/25/2014
|
||||
- [v0.9.x](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.9.0.md) released Mar/13/2014
|
||||
|
||||
[set-of-changes]: https://github.com/dashpay/dash/compare/v20.0.1...dashpay:v20.0.2
|
@ -465,10 +465,6 @@ void CDKGSessionManager::CleanupOldContributions() const
|
||||
const auto prefixes = {DB_VVEC, DB_SKCONTRIB, DB_ENC_CONTRIB};
|
||||
|
||||
for (const auto& params : Params().GetConsensus().llmqs) {
|
||||
// For how many blocks recent DKG info should be kept
|
||||
const int MAX_CYCLES = params.useRotation ? params.keepOldKeys / params.signingActiveQuorumCount : params.keepOldKeys;
|
||||
const int MAX_STORE_DEPTH = MAX_CYCLES * params.dkgInterval;
|
||||
|
||||
LogPrint(BCLog::LLMQ, "CDKGSessionManager::%s -- looking for old entries for llmq type %d\n", __func__, ToUnderlying(params.type));
|
||||
|
||||
CDBBatch batch(*db);
|
||||
@ -486,7 +482,7 @@ void CDKGSessionManager::CleanupOldContributions() const
|
||||
}
|
||||
cnt_all++;
|
||||
const CBlockIndex* pindexQuorum = m_chainstate.m_blockman.LookupBlockIndex(std::get<2>(k));
|
||||
if (pindexQuorum == nullptr || m_chainstate.m_chain.Tip()->nHeight - pindexQuorum->nHeight > MAX_STORE_DEPTH) {
|
||||
if (pindexQuorum == nullptr || m_chainstate.m_chain.Tip()->nHeight - pindexQuorum->nHeight > utils::max_store_depth(params)) {
|
||||
// not found or too old
|
||||
batch.Erase(k);
|
||||
cnt_old++;
|
||||
|
@ -201,8 +201,6 @@ CQuorumManager::CQuorumManager(CBLSWorker& _blsWorker, CChainState& chainstate,
|
||||
m_peerman(peerman)
|
||||
{
|
||||
utils::InitQuorumsCache(mapQuorumsCache, false);
|
||||
utils::InitQuorumsCache(scanQuorumsCache, false);
|
||||
|
||||
quorumThreadInterrupt.reset();
|
||||
}
|
||||
|
||||
@ -364,7 +362,7 @@ void CQuorumManager::CheckQuorumConnections(const Consensus::LLMQParams& llmqPar
|
||||
}
|
||||
}
|
||||
|
||||
CQuorumPtr CQuorumManager::BuildQuorumFromCommitment(const Consensus::LLMQType llmqType, gsl::not_null<const CBlockIndex*> pQuorumBaseBlockIndex) const
|
||||
CQuorumPtr CQuorumManager::BuildQuorumFromCommitment(const Consensus::LLMQType llmqType, gsl::not_null<const CBlockIndex*> pQuorumBaseBlockIndex, bool populate_cache) const
|
||||
{
|
||||
const uint256& quorumHash{pQuorumBaseBlockIndex->GetBlockHash()};
|
||||
uint256 minedBlockHash;
|
||||
@ -394,7 +392,7 @@ CQuorumPtr CQuorumManager::BuildQuorumFromCommitment(const Consensus::LLMQType l
|
||||
}
|
||||
}
|
||||
|
||||
if (hasValidVvec) {
|
||||
if (hasValidVvec && populate_cache) {
|
||||
// pre-populate caches in the background
|
||||
// recovering public key shares is quite expensive and would result in serious lags for the first few signing
|
||||
// sessions if the shares would be calculated on-demand
|
||||
@ -503,14 +501,45 @@ std::vector<CQuorumCPtr> CQuorumManager::ScanQuorums(Consensus::LLMQType llmqTyp
|
||||
return {};
|
||||
}
|
||||
|
||||
const CBlockIndex* pIndexScanCommitments{pindexStart};
|
||||
gsl::not_null<const CBlockIndex*> pindexStore{pindexStart};
|
||||
const auto& llmq_params_opt = GetLLMQParams(llmqType);
|
||||
assert(llmq_params_opt.has_value());
|
||||
|
||||
// Quorum sets can only change during the mining phase of DKG.
|
||||
// Find the closest known block index.
|
||||
const int quorumCycleStartHeight = pindexStart->nHeight - (pindexStart->nHeight % llmq_params_opt->dkgInterval);
|
||||
const int quorumCycleMiningStartHeight = quorumCycleStartHeight + llmq_params_opt->dkgMiningWindowStart;
|
||||
const int quorumCycleMiningEndHeight = quorumCycleStartHeight + llmq_params_opt->dkgMiningWindowEnd;
|
||||
|
||||
if (pindexStart->nHeight < quorumCycleMiningStartHeight) {
|
||||
// too early for this cycle, use the previous one
|
||||
// bail out if it's below genesis block
|
||||
if (quorumCycleMiningEndHeight < llmq_params_opt->dkgInterval) return {};
|
||||
pindexStore = pindexStart->GetAncestor(quorumCycleMiningEndHeight - llmq_params_opt->dkgInterval);
|
||||
} else if (pindexStart->nHeight > quorumCycleMiningEndHeight) {
|
||||
// we are past the mining phase of this cycle, use it
|
||||
pindexStore = pindexStart->GetAncestor(quorumCycleMiningEndHeight);
|
||||
}
|
||||
// everything else is inside the mining phase of this cycle, no pindexStore adjustment needed
|
||||
|
||||
gsl::not_null<const CBlockIndex*> pIndexScanCommitments{pindexStore};
|
||||
size_t nScanCommitments{nCountRequested};
|
||||
std::vector<CQuorumCPtr> vecResultQuorums;
|
||||
|
||||
{
|
||||
LOCK(cs_scan_quorums);
|
||||
if (scanQuorumsCache.empty()) {
|
||||
for (const auto& llmq : Params().GetConsensus().llmqs) {
|
||||
// NOTE: We store it for each block hash in the DKG mining phase here
|
||||
// and not for a single quorum hash per quorum like we do for other caches.
|
||||
// And we only do this for max_cycles() of the most recent quorums
|
||||
// because signing by old quorums requires the exact quorum hash to be specified
|
||||
// and quorum scanning isn't needed there.
|
||||
scanQuorumsCache.try_emplace(llmq.type, utils::max_cycles(llmq, llmq.keepOldConnections) * (llmq.dkgMiningWindowEnd - llmq.dkgMiningWindowStart));
|
||||
}
|
||||
}
|
||||
auto& cache = scanQuorumsCache[llmqType];
|
||||
bool fCacheExists = cache.get(pindexStart->GetBlockHash(), vecResultQuorums);
|
||||
bool fCacheExists = cache.get(pindexStore->GetBlockHash(), vecResultQuorums);
|
||||
if (fCacheExists) {
|
||||
// We have exactly what requested so just return it
|
||||
if (vecResultQuorums.size() == nCountRequested) {
|
||||
@ -524,17 +553,17 @@ std::vector<CQuorumCPtr> CQuorumManager::ScanQuorums(Consensus::LLMQType llmqTyp
|
||||
// scanning for the rests
|
||||
if (!vecResultQuorums.empty()) {
|
||||
nScanCommitments -= vecResultQuorums.size();
|
||||
// bail out if it's below genesis block
|
||||
if (vecResultQuorums.back()->m_quorum_base_block_index->pprev == nullptr) return {};
|
||||
pIndexScanCommitments = vecResultQuorums.back()->m_quorum_base_block_index->pprev;
|
||||
}
|
||||
} else {
|
||||
// If there is nothing in cache request at least cache.max_size() because this gets cached then later
|
||||
nScanCommitments = std::max(nCountRequested, cache.max_size());
|
||||
// If there is nothing in cache request at least keepOldConnections because this gets cached then later
|
||||
nScanCommitments = std::max(nCountRequested, static_cast<size_t>(llmq_params_opt->keepOldConnections));
|
||||
}
|
||||
}
|
||||
|
||||
// Get the block indexes of the mined commitments to build the required quorums from
|
||||
const auto& llmq_params_opt = GetLLMQParams(llmqType);
|
||||
assert(llmq_params_opt.has_value());
|
||||
std::vector<const CBlockIndex*> pQuorumBaseBlockIndexes{ llmq_params_opt->useRotation ?
|
||||
quorumBlockProcessor.GetMinedCommitmentsIndexedUntilBlock(llmqType, pIndexScanCommitments, nScanCommitments) :
|
||||
quorumBlockProcessor.GetMinedCommitmentsUntilBlock(llmqType, pIndexScanCommitments, nScanCommitments)
|
||||
@ -543,7 +572,9 @@ std::vector<CQuorumCPtr> CQuorumManager::ScanQuorums(Consensus::LLMQType llmqTyp
|
||||
|
||||
for (auto& pQuorumBaseBlockIndex : pQuorumBaseBlockIndexes) {
|
||||
assert(pQuorumBaseBlockIndex);
|
||||
auto quorum = GetQuorum(llmqType, pQuorumBaseBlockIndex);
|
||||
// populate cache for keepOldConnections most recent quorums only
|
||||
bool populate_cache = vecResultQuorums.size() < llmq_params_opt->keepOldConnections;
|
||||
auto quorum = GetQuorum(llmqType, pQuorumBaseBlockIndex, populate_cache);
|
||||
assert(quorum != nullptr);
|
||||
vecResultQuorums.emplace_back(quorum);
|
||||
}
|
||||
@ -551,10 +582,12 @@ std::vector<CQuorumCPtr> CQuorumManager::ScanQuorums(Consensus::LLMQType llmqTyp
|
||||
const size_t nCountResult{vecResultQuorums.size()};
|
||||
if (nCountResult > 0) {
|
||||
LOCK(cs_scan_quorums);
|
||||
// Don't cache more than cache.max_size() elements
|
||||
// Don't cache more than keepOldConnections elements
|
||||
// because signing by old quorums requires the exact quorum hash
|
||||
// to be specified and quorum scanning isn't needed there.
|
||||
auto& cache = scanQuorumsCache[llmqType];
|
||||
const size_t nCacheEndIndex = std::min(nCountResult, cache.max_size());
|
||||
cache.emplace(pindexStart->GetBlockHash(), {vecResultQuorums.begin(), vecResultQuorums.begin() + nCacheEndIndex});
|
||||
const size_t nCacheEndIndex = std::min(nCountResult, static_cast<size_t>(llmq_params_opt->keepOldConnections));
|
||||
cache.emplace(pindexStore->GetBlockHash(), {vecResultQuorums.begin(), vecResultQuorums.begin() + nCacheEndIndex});
|
||||
}
|
||||
// Don't return more than nCountRequested elements
|
||||
const size_t nResultEndIndex = std::min(nCountResult, nCountRequested);
|
||||
@ -571,7 +604,7 @@ CQuorumCPtr CQuorumManager::GetQuorum(Consensus::LLMQType llmqType, const uint25
|
||||
return GetQuorum(llmqType, pQuorumBaseBlockIndex);
|
||||
}
|
||||
|
||||
CQuorumCPtr CQuorumManager::GetQuorum(Consensus::LLMQType llmqType, gsl::not_null<const CBlockIndex*> pQuorumBaseBlockIndex) const
|
||||
CQuorumCPtr CQuorumManager::GetQuorum(Consensus::LLMQType llmqType, gsl::not_null<const CBlockIndex*> pQuorumBaseBlockIndex, bool populate_cache) const
|
||||
{
|
||||
auto quorumHash = pQuorumBaseBlockIndex->GetBlockHash();
|
||||
|
||||
@ -586,7 +619,7 @@ CQuorumCPtr CQuorumManager::GetQuorum(Consensus::LLMQType llmqType, gsl::not_nul
|
||||
return pQuorum;
|
||||
}
|
||||
|
||||
return BuildQuorumFromCommitment(llmqType, pQuorumBaseBlockIndex);
|
||||
return BuildQuorumFromCommitment(llmqType, pQuorumBaseBlockIndex, populate_cache);
|
||||
}
|
||||
|
||||
size_t CQuorumManager::GetQuorumRecoveryStartOffset(const CQuorumCPtr pQuorum, const CBlockIndex* pIndex) const
|
||||
@ -819,7 +852,10 @@ void CQuorumManager::StartCachePopulatorThread(const CQuorumCPtr pQuorum) const
|
||||
}
|
||||
|
||||
cxxtimer::Timer t(true);
|
||||
LogPrint(BCLog::LLMQ, "CQuorumManager::StartCachePopulatorThread -- start\n");
|
||||
LogPrint(BCLog::LLMQ, "CQuorumManager::StartCachePopulatorThread -- type=%d height=%d hash=%s start\n",
|
||||
ToUnderlying(pQuorum->params.type),
|
||||
pQuorum->m_quorum_base_block_index->nHeight,
|
||||
pQuorum->m_quorum_base_block_index->GetBlockHash().ToString());
|
||||
|
||||
// when then later some other thread tries to get keys, it will be much faster
|
||||
workerPool.push([pQuorum, t, this](int threadId) {
|
||||
@ -831,7 +867,11 @@ void CQuorumManager::StartCachePopulatorThread(const CQuorumCPtr pQuorum) const
|
||||
pQuorum->GetPubKeyShare(i);
|
||||
}
|
||||
}
|
||||
LogPrint(BCLog::LLMQ, "CQuorumManager::StartCachePopulatorThread -- done. time=%d\n", t.count());
|
||||
LogPrint(BCLog::LLMQ, "CQuorumManager::StartCachePopulatorThread -- type=%d height=%d hash=%s done. time=%d\n",
|
||||
ToUnderlying(pQuorum->params.type),
|
||||
pQuorum->m_quorum_base_block_index->nHeight,
|
||||
pQuorum->m_quorum_base_block_index->GetBlockHash().ToString(),
|
||||
t.count());
|
||||
});
|
||||
}
|
||||
|
||||
@ -1023,13 +1063,31 @@ void CQuorumManager::StartCleanupOldQuorumDataThread(const CBlockIndex* pIndex)
|
||||
workerPool.push([pIndex, t, this](int threadId) {
|
||||
std::set<uint256> dbKeysToSkip;
|
||||
|
||||
if (LOCK(cs_cleanup); cleanupQuorumsCache.empty()) {
|
||||
utils::InitQuorumsCache(cleanupQuorumsCache, false);
|
||||
}
|
||||
for (const auto& params : Params().GetConsensus().llmqs) {
|
||||
if (quorumThreadInterrupt) {
|
||||
break;
|
||||
}
|
||||
for (const auto& pQuorum : ScanQuorums(params.type, pIndex, params.keepOldKeys)) {
|
||||
dbKeysToSkip.insert(MakeQuorumKey(*pQuorum));
|
||||
LOCK(cs_cleanup);
|
||||
auto& cache = cleanupQuorumsCache[params.type];
|
||||
const CBlockIndex* pindex_loop{pIndex};
|
||||
std::set<uint256> quorum_keys;
|
||||
while (pindex_loop != nullptr && pIndex->nHeight - pindex_loop->nHeight < utils::max_store_depth(params)) {
|
||||
uint256 quorum_key;
|
||||
if (cache.get(pindex_loop->GetBlockHash(), quorum_key)) {
|
||||
quorum_keys.insert(quorum_key);
|
||||
if (quorum_keys.size() >= params.keepOldKeys) break; // extra safety belt
|
||||
}
|
||||
pindex_loop = pindex_loop->pprev;
|
||||
}
|
||||
for (const auto& pQuorum : ScanQuorums(params.type, pIndex, params.keepOldKeys - quorum_keys.size())) {
|
||||
const uint256 quorum_key = MakeQuorumKey(*pQuorum);
|
||||
quorum_keys.insert(quorum_key);
|
||||
cache.insert(pQuorum->m_quorum_base_block_index->GetBlockHash(), quorum_key);
|
||||
}
|
||||
dbKeysToSkip.merge(quorum_keys);
|
||||
}
|
||||
|
||||
if (!quorumThreadInterrupt) {
|
||||
|
@ -231,6 +231,8 @@ private:
|
||||
mutable std::map<Consensus::LLMQType, unordered_lru_cache<uint256, CQuorumPtr, StaticSaltedHasher>> mapQuorumsCache GUARDED_BY(cs_map_quorums);
|
||||
mutable RecursiveMutex cs_scan_quorums;
|
||||
mutable std::map<Consensus::LLMQType, unordered_lru_cache<uint256, std::vector<CQuorumCPtr>, StaticSaltedHasher>> scanQuorumsCache GUARDED_BY(cs_scan_quorums);
|
||||
mutable Mutex cs_cleanup;
|
||||
mutable std::map<Consensus::LLMQType, unordered_lru_cache<uint256, uint256, StaticSaltedHasher>> cleanupQuorumsCache GUARDED_BY(cs_cleanup);
|
||||
|
||||
mutable ctpl::thread_pool workerPool;
|
||||
mutable CThreadInterrupt quorumThreadInterrupt;
|
||||
@ -265,10 +267,10 @@ private:
|
||||
// all private methods here are cs_main-free
|
||||
void CheckQuorumConnections(const Consensus::LLMQParams& llmqParams, const CBlockIndex *pindexNew) const;
|
||||
|
||||
CQuorumPtr BuildQuorumFromCommitment(Consensus::LLMQType llmqType, gsl::not_null<const CBlockIndex*> pQuorumBaseBlockIndex) const;
|
||||
CQuorumPtr BuildQuorumFromCommitment(Consensus::LLMQType llmqType, gsl::not_null<const CBlockIndex*> pQuorumBaseBlockIndex, bool populate_cache) const;
|
||||
bool BuildQuorumContributions(const CFinalCommitmentPtr& fqc, const std::shared_ptr<CQuorum>& quorum) const;
|
||||
|
||||
CQuorumCPtr GetQuorum(Consensus::LLMQType llmqType, gsl::not_null<const CBlockIndex*> pindex) const;
|
||||
CQuorumCPtr GetQuorum(Consensus::LLMQType llmqType, gsl::not_null<const CBlockIndex*> pindex, bool populate_cache = true) const;
|
||||
/// Returns the start offset for the masternode with the given proTxHash. This offset is applied when picking data recovery members of a quorum's
|
||||
/// memberlist and is calculated based on a list of all member of all active quorums for the given llmqType in a way that each member
|
||||
/// should receive the same number of request if all active llmqType members requests data from one llmqType quorum.
|
||||
|
@ -1115,6 +1115,7 @@ template void InitQuorumsCache<std::map<Consensus::LLMQType, unordered_lru_cache
|
||||
template void InitQuorumsCache<std::map<Consensus::LLMQType, unordered_lru_cache<uint256, std::vector<CQuorumCPtr>, StaticSaltedHasher>>>(std::map<Consensus::LLMQType, unordered_lru_cache<uint256, std::vector<CQuorumCPtr>, StaticSaltedHasher>>& cache, bool limit_by_connections);
|
||||
template void InitQuorumsCache<std::map<Consensus::LLMQType, unordered_lru_cache<uint256, std::shared_ptr<llmq::CQuorum>, StaticSaltedHasher, 0ul, 0ul>, std::less<Consensus::LLMQType>, std::allocator<std::pair<Consensus::LLMQType const, unordered_lru_cache<uint256, std::shared_ptr<llmq::CQuorum>, StaticSaltedHasher, 0ul, 0ul>>>>>(std::map<Consensus::LLMQType, unordered_lru_cache<uint256, std::shared_ptr<llmq::CQuorum>, StaticSaltedHasher, 0ul, 0ul>, std::less<Consensus::LLMQType>, std::allocator<std::pair<Consensus::LLMQType const, unordered_lru_cache<uint256, std::shared_ptr<llmq::CQuorum>, StaticSaltedHasher, 0ul, 0ul>>>>&cache, bool limit_by_connections);
|
||||
template void InitQuorumsCache<std::map<Consensus::LLMQType, unordered_lru_cache<uint256, int, StaticSaltedHasher>>>(std::map<Consensus::LLMQType, unordered_lru_cache<uint256, int, StaticSaltedHasher>>& cache, bool limit_by_connections);
|
||||
template void InitQuorumsCache<std::map<Consensus::LLMQType, unordered_lru_cache<uint256, uint256, StaticSaltedHasher>>>(std::map<Consensus::LLMQType, unordered_lru_cache<uint256, uint256, StaticSaltedHasher>>& cache, bool limit_by_connections);
|
||||
|
||||
} // namespace utils
|
||||
|
||||
|
@ -122,6 +122,17 @@ void IterateNodesRandom(NodesContainer& nodeStates, Continue&& cont, Callback&&
|
||||
template <typename CacheType>
|
||||
void InitQuorumsCache(CacheType& cache, bool limit_by_connections = true);
|
||||
|
||||
[[ nodiscard ]] static constexpr int max_cycles(const Consensus::LLMQParams& llmqParams, int quorums_count)
|
||||
{
|
||||
return llmqParams.useRotation ? quorums_count / llmqParams.signingActiveQuorumCount : quorums_count;
|
||||
}
|
||||
|
||||
[[ nodiscard ]] static constexpr int max_store_depth(const Consensus::LLMQParams& llmqParams)
|
||||
{
|
||||
// For how many blocks recent DKG info should be kept
|
||||
return max_cycles(llmqParams, llmqParams.keepOldKeys) * llmqParams.dkgInterval;
|
||||
}
|
||||
|
||||
} // namespace utils
|
||||
|
||||
[[ nodiscard ]] const std::optional<Consensus::LLMQParams> GetLLMQParams(Consensus::LLMQType llmqType);
|
||||
|
@ -14,16 +14,14 @@
|
||||
#include <wincrypt.h>
|
||||
#endif
|
||||
#include <logging.h> // for LogPrintf()
|
||||
#include <randomenv.h>
|
||||
#include <support/allocators/secure.h>
|
||||
#include <sync.h> // for Mutex
|
||||
#include <util/time.h> // for GetTimeMicros()
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <thread>
|
||||
|
||||
#include <randomenv.h>
|
||||
|
||||
#include <support/allocators/secure.h>
|
||||
|
||||
#ifndef WIN32
|
||||
#include <fcntl.h>
|
||||
#endif
|
||||
@ -582,16 +580,6 @@ static void ProcRand(unsigned char* out, int num, RNGLevel level) noexcept
|
||||
}
|
||||
}
|
||||
|
||||
std::chrono::microseconds GetRandMicros(std::chrono::microseconds duration_max) noexcept
|
||||
{
|
||||
return std::chrono::microseconds{GetRand(duration_max.count())};
|
||||
}
|
||||
|
||||
std::chrono::milliseconds GetRandMillis(std::chrono::milliseconds duration_max) noexcept
|
||||
{
|
||||
return std::chrono::milliseconds{GetRand(duration_max.count())};
|
||||
}
|
||||
|
||||
void GetRandBytes(unsigned char* buf, int num) noexcept { ProcRand(buf, num, RNGLevel::FAST); }
|
||||
void GetStrongRandBytes(unsigned char* buf, int num) noexcept { ProcRand(buf, num, RNGLevel::SLOW); }
|
||||
void RandAddPeriodic() noexcept { ProcRand(nullptr, 0, RNGLevel::PERIODIC); }
|
||||
|
16
src/random.h
16
src/random.h
@ -69,9 +69,21 @@
|
||||
* Thread-safe.
|
||||
*/
|
||||
void GetRandBytes(unsigned char* buf, int num) noexcept;
|
||||
/** Generate a uniform random integer in the range [0..range). Precondition: range > 0 */
|
||||
uint64_t GetRand(uint64_t nMax) noexcept;
|
||||
std::chrono::microseconds GetRandMicros(std::chrono::microseconds duration_max) noexcept;
|
||||
std::chrono::milliseconds GetRandMillis(std::chrono::milliseconds duration_max) noexcept;
|
||||
/** Generate a uniform random duration in the range [0..max). Precondition: max.count() > 0 */
|
||||
template <typename D>
|
||||
D GetRandomDuration(typename std::common_type<D>::type max) noexcept
|
||||
// Having the compiler infer the template argument from the function argument
|
||||
// is dangerous, because the desired return value generally has a different
|
||||
// type than the function argument. So std::common_type is used to force the
|
||||
// call site to specify the type of the return value.
|
||||
{
|
||||
assert(max.count() > 0);
|
||||
return D{GetRand(max.count())};
|
||||
};
|
||||
constexpr auto GetRandMicros = GetRandomDuration<std::chrono::microseconds>;
|
||||
constexpr auto GetRandMillis = GetRandomDuration<std::chrono::milliseconds>;
|
||||
int GetRandInt(int nMax) noexcept;
|
||||
uint256 GetRandHash() noexcept;
|
||||
|
||||
|
@ -2462,7 +2462,7 @@ static UniValue getspecialtxes(const JSONRPCRequest& request)
|
||||
CTxMemPool& mempool = EnsureMemPool(node);
|
||||
LLMQContext& llmq_ctx = EnsureLLMQContext(node);
|
||||
|
||||
uint256 hash(ParseHashV(request.params[0], "blockhash"));
|
||||
uint256 blockhash(ParseHashV(request.params[0], "blockhash"));
|
||||
|
||||
int nTxType = -1;
|
||||
if (!request.params[1].isNull()) {
|
||||
@ -2491,7 +2491,7 @@ static UniValue getspecialtxes(const JSONRPCRequest& request)
|
||||
}
|
||||
}
|
||||
|
||||
const CBlockIndex* pblockindex = chainman.m_blockman.LookupBlockIndex(hash);
|
||||
const CBlockIndex* pblockindex = chainman.m_blockman.LookupBlockIndex(blockhash);
|
||||
if (!pblockindex) {
|
||||
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found");
|
||||
}
|
||||
@ -2519,7 +2519,7 @@ static UniValue getspecialtxes(const JSONRPCRequest& request)
|
||||
case 2 :
|
||||
{
|
||||
UniValue objTx(UniValue::VOBJ);
|
||||
TxToJSON(*tx, uint256(), mempool, chainman.ActiveChainstate(), *llmq_ctx.clhandler, *llmq_ctx.isman, objTx);
|
||||
TxToJSON(*tx, blockhash, mempool, chainman.ActiveChainstate(), *llmq_ctx.clhandler, *llmq_ctx.isman, objTx);
|
||||
result.push_back(objTx);
|
||||
break;
|
||||
}
|
||||
|
@ -36,7 +36,10 @@ static void quorum_list_help(const JSONRPCRequest& request)
|
||||
RPCHelpMan{"quorum list",
|
||||
"List of on-chain quorums\n",
|
||||
{
|
||||
{"count", RPCArg::Type::NUM, /* default */ "", "Number of quorums to list. Will list active quorums if \"count\" is not specified."},
|
||||
{"count", RPCArg::Type::NUM, /* default */ "",
|
||||
"Number of quorums to list. Will list active quorums if \"count\" is not specified.\n"
|
||||
"Can be CPU/disk heavy when the value is larger than the number of active quorums."
|
||||
},
|
||||
},
|
||||
RPCResult{
|
||||
RPCResult::Type::OBJ, "", "",
|
||||
@ -365,8 +368,10 @@ static void quorum_memberof_help(const JSONRPCRequest& request)
|
||||
{
|
||||
{"proTxHash", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "ProTxHash of the masternode."},
|
||||
{"scanQuorumsCount", RPCArg::Type::NUM, /* default */ "",
|
||||
"Number of quorums to scan for. If not specified,\n"
|
||||
"the active quorum count for each specific quorum type is used."},
|
||||
"Number of quorums to scan for.\n"
|
||||
"If not specified, the active quorum count for each specific quorum type is used.\n"
|
||||
"Can be CPU/disk heavy when the value is larger than the number of active quorums."
|
||||
},
|
||||
},
|
||||
RPCResults{},
|
||||
RPCExamples{""},
|
||||
|
@ -28,8 +28,9 @@ static std::string rpcWarmupStatus GUARDED_BY(g_rpc_warmup_mutex) = "RPC server
|
||||
/* Timer-creating functions */
|
||||
static RPCTimerInterface* timerInterface = nullptr;
|
||||
/* Map of name to timer. */
|
||||
static std::map<std::string, std::unique_ptr<RPCTimerBase> > deadlineTimers;
|
||||
static bool ExecuteCommand(const CRPCCommand& command, const JSONRPCRequest& request, UniValue& result, bool last_handler, std::multimap<std::string, std::vector<UniValue>> mapPlatformRestrictions);
|
||||
static Mutex g_deadline_timers_mutex;
|
||||
static std::map<std::string, std::unique_ptr<RPCTimerBase> > deadlineTimers GUARDED_BY(g_deadline_timers_mutex);
|
||||
static bool ExecuteCommand(const CRPCCommand& command, const JSONRPCRequest& request, UniValue& result, bool last_handler, const std::multimap<std::string, std::vector<UniValue>>& mapPlatformRestrictions);
|
||||
|
||||
// Any commands submitted by this user will have their commands filtered based on the mapPlatformRestrictions
|
||||
static const std::string defaultPlatformUser = "platform-user";
|
||||
@ -330,7 +331,7 @@ void InterruptRPC()
|
||||
void StopRPC()
|
||||
{
|
||||
LogPrint(BCLog::RPC, "Stopping RPC\n");
|
||||
deadlineTimers.clear();
|
||||
WITH_LOCK(g_deadline_timers_mutex, deadlineTimers.clear());
|
||||
DeleteAuthCookie();
|
||||
g_rpcSignals.Stopped();
|
||||
}
|
||||
@ -502,7 +503,7 @@ UniValue CRPCTable::execute(const JSONRPCRequest &request) const
|
||||
throw JSONRPCError(RPC_METHOD_NOT_FOUND, "Method not found");
|
||||
}
|
||||
|
||||
static bool ExecuteCommand(const CRPCCommand& command, const JSONRPCRequest& request, UniValue& result, bool last_handler, std::multimap<std::string, std::vector<UniValue>> mapPlatformRestrictions)
|
||||
static bool ExecuteCommand(const CRPCCommand& command, const JSONRPCRequest& request, UniValue& result, bool last_handler, const std::multimap<std::string, std::vector<UniValue>>& mapPlatformRestrictions)
|
||||
{
|
||||
// Before executing the RPC Command, filter commands from platform rpc user
|
||||
if (fMasternodeMode && request.authUser == gArgs.GetArg("-platform-user", defaultPlatformUser)) {
|
||||
@ -609,6 +610,7 @@ void RPCRunLater(const std::string& name, std::function<void()> func, int64_t nS
|
||||
{
|
||||
if (!timerInterface)
|
||||
throw JSONRPCError(RPC_INTERNAL_ERROR, "No timer handler registered for RPC");
|
||||
LOCK(g_deadline_timers_mutex);
|
||||
deadlineTimers.erase(name);
|
||||
LogPrint(BCLog::RPC, "queue run of timer %s in %i seconds (using %s)\n", name, nSeconds, timerInterface->Name());
|
||||
deadlineTimers.emplace(name, std::unique_ptr<RPCTimerBase>(timerInterface->NewTimer(func, nSeconds*1000)));
|
||||
|
@ -27,6 +27,8 @@ BOOST_AUTO_TEST_CASE(fastrandom_tests)
|
||||
for (int i = 10; i > 0; --i) {
|
||||
BOOST_CHECK_EQUAL(GetRand(std::numeric_limits<uint64_t>::max()), uint64_t{10393729187455219830U});
|
||||
BOOST_CHECK_EQUAL(GetRandInt(std::numeric_limits<int>::max()), int{769702006});
|
||||
BOOST_CHECK_EQUAL(GetRandMicros(std::chrono::hours{1}).count(), 2917185654);
|
||||
BOOST_CHECK_EQUAL(GetRandMillis(std::chrono::hours{1}).count(), 2144374);
|
||||
}
|
||||
{
|
||||
constexpr SteadySeconds time_point{1s};
|
||||
@ -66,6 +68,8 @@ BOOST_AUTO_TEST_CASE(fastrandom_tests)
|
||||
for (int i = 10; i > 0; --i) {
|
||||
BOOST_CHECK(GetRand(std::numeric_limits<uint64_t>::max()) != uint64_t{10393729187455219830U});
|
||||
BOOST_CHECK(GetRandInt(std::numeric_limits<int>::max()) != int{769702006});
|
||||
BOOST_CHECK(GetRandMicros(std::chrono::hours{1}) != std::chrono::microseconds{2917185654});
|
||||
BOOST_CHECK(GetRandMillis(std::chrono::hours{1}) != std::chrono::milliseconds{2144374});
|
||||
}
|
||||
|
||||
{
|
||||
@ -107,7 +111,7 @@ BOOST_AUTO_TEST_CASE(stdrandom_test)
|
||||
BOOST_CHECK(x >= 3);
|
||||
BOOST_CHECK(x <= 9);
|
||||
|
||||
std::vector<int> test{1,2,3,4,5,6,7,8,9,10};
|
||||
std::vector<int> test{1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
|
||||
std::shuffle(test.begin(), test.end(), ctx);
|
||||
for (int j = 1; j <= 10; ++j) {
|
||||
BOOST_CHECK(std::find(test.begin(), test.end(), j) != test.end());
|
||||
@ -117,7 +121,6 @@ BOOST_AUTO_TEST_CASE(stdrandom_test)
|
||||
BOOST_CHECK(std::find(test.begin(), test.end(), j) != test.end());
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/** Test that Shuffle reaches every permutation with equal probability. */
|
||||
|
@ -1927,6 +1927,9 @@ static UniValue walletpassphrase(const JSONRPCRequest& request)
|
||||
CWallet* const pwallet = wallet.get();
|
||||
|
||||
int64_t nSleepTime;
|
||||
int64_t relock_time;
|
||||
// Prevent concurrent calls to walletpassphrase with the same wallet.
|
||||
LOCK(pwallet->m_unlock_mutex);
|
||||
{
|
||||
LOCK(pwallet->cs_wallet);
|
||||
|
||||
@ -1975,7 +1978,7 @@ static UniValue walletpassphrase(const JSONRPCRequest& request)
|
||||
pwallet->TopUpKeyPool();
|
||||
|
||||
pwallet->nRelockTime = GetTime() + nSleepTime;
|
||||
|
||||
relock_time = pwallet->nRelockTime;
|
||||
}
|
||||
// rpcRunLater must be called without cs_wallet held otherwise a deadlock
|
||||
// can occur. The deadlock would happen when RPCRunLater removes the
|
||||
@ -1986,9 +1989,11 @@ static UniValue walletpassphrase(const JSONRPCRequest& request)
|
||||
// wallet before the following callback is called. If a valid shared pointer
|
||||
// is acquired in the callback then the wallet is still loaded.
|
||||
std::weak_ptr<CWallet> weak_wallet = wallet;
|
||||
pwallet->chain().rpcRunLater(strprintf("lockwallet(%s)", pwallet->GetName()), [weak_wallet] {
|
||||
pwallet->chain().rpcRunLater(strprintf("lockwallet(%s)", pwallet->GetName()), [weak_wallet, relock_time] {
|
||||
if (auto shared_wallet = weak_wallet.lock()) {
|
||||
LOCK(shared_wallet->cs_wallet);
|
||||
// Skip if this is not the most recent rpcRunLater callback.
|
||||
if (shared_wallet->nRelockTime != relock_time) return;
|
||||
shared_wallet->Lock();
|
||||
shared_wallet->nRelockTime = 0;
|
||||
}
|
||||
|
@ -965,8 +965,10 @@ public:
|
||||
std::vector<std::string> GetDestValues(const std::string& prefix) const EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
|
||||
|
||||
//! Holds a timestamp at which point the wallet is scheduled (externally) to be relocked. Caller must arrange for actual relocking to occur via Lock().
|
||||
int64_t nRelockTime = 0;
|
||||
int64_t nRelockTime GUARDED_BY(cs_wallet){0};
|
||||
|
||||
// Used to prevent concurrent calls to walletpassphrase RPC.
|
||||
Mutex m_unlock_mutex;
|
||||
bool Unlock(const SecureString& strWalletPassphrase, bool fForMixingOnly = false, bool accept_no_keys = false);
|
||||
bool ChangeWalletPassphrase(const SecureString& strOldWalletPassphrase, const SecureString& strNewWalletPassphrase);
|
||||
bool EncryptWallet(const SecureString& strWalletPassphrase);
|
||||
|
@ -388,7 +388,7 @@ ReadKeyValue(CWallet* pwallet, CDataStream& ssKey, CDataStream& ssValue,
|
||||
if (!ssValue.eof()) {
|
||||
uint256 checksum;
|
||||
ssValue >> checksum;
|
||||
if ((checksum_valid = Hash(vchPrivKey) != checksum)) {
|
||||
if (!(checksum_valid = Hash(vchPrivKey) == checksum)) {
|
||||
strErr = "Error reading wallet database: Crypted key corrupt";
|
||||
return false;
|
||||
}
|
||||
|
@ -40,8 +40,13 @@ class LLMQChainLocksTest(DashTestFramework):
|
||||
self.test_coinbase_best_cl(self.nodes[0], expected_cl_in_cb=False)
|
||||
|
||||
# v20 is active, no quorums, no CLs - null CL in CbTx
|
||||
self.nodes[0].generate(1)
|
||||
nocl_block_hash = self.nodes[0].generate(1)[0]
|
||||
self.test_coinbase_best_cl(self.nodes[0], expected_cl_in_cb=True, expected_null_cl=True)
|
||||
cbtx = self.nodes[0].getspecialtxes(nocl_block_hash, 5, 1, 0, 2)[0]
|
||||
assert_equal(cbtx["instantlock"], False)
|
||||
assert_equal(cbtx["instantlock_internal"], False)
|
||||
assert_equal(cbtx["chainlock"], False)
|
||||
|
||||
|
||||
self.nodes[0].sporkupdate("SPORK_17_QUORUM_DKG_ENABLED", 0)
|
||||
self.wait_for_sporks_same()
|
||||
@ -55,6 +60,12 @@ class LLMQChainLocksTest(DashTestFramework):
|
||||
self.wait_for_chainlocked_block_all_nodes(self.nodes[0].getbestblockhash())
|
||||
self.test_coinbase_best_cl(self.nodes[0])
|
||||
|
||||
# ChainLock locks all the blocks below it so nocl_block_hash should be locked too
|
||||
cbtx = self.nodes[0].getspecialtxes(nocl_block_hash, 5, 1, 0, 2)[0]
|
||||
assert_equal(cbtx["instantlock"], True)
|
||||
assert_equal(cbtx["instantlock_internal"], False)
|
||||
assert_equal(cbtx["chainlock"], True)
|
||||
|
||||
self.log.info("Mine many blocks, wait for chainlock")
|
||||
self.nodes[0].generate(20)
|
||||
# We need more time here due to 20 blocks being generated at once
|
||||
|
Loading…
Reference in New Issue
Block a user