mirror of
https://github.com/dashpay/dash.git
synced 2024-12-26 12:32:48 +01:00
Merge #11022: Basic keypool topup
d34957e
[wallet] [tests] Add keypool topup functional test (Jonas Schnelli)095142d
[wallet] keypool mark-used and topup (John Newbery)c25d90f
[wallet] Add HasUnusedKeys() helper (John Newbery)f2123e3
[wallet] Cache keyid -> keypool id mappings (John Newbery)83f1ec3
[wallet] Don't hold cs_LastBlockFile while calling setBestChain (John Newbery)2376bfc
[wallet] [moveonly] Move LoadKeyPool to cpp (Matt Corallo)cab8557
[wallet] [moveonly] Move CAffectedKeysVisitor (Jonas Schnelli) Pull request description: This PR contains the first part of #10882 : - if a key from the keypool is used, mark all keys up to that key as used, and then try to top up the keypool - top up the keypool on startup Notably, it does not stop the node or prevent the best block from advancing if the keypool drops below a threshold (which means that transactions may be missed and funds lost if restoring from an old HD wallet backup). Tree-SHA512: ac681fefeaf7ec2aab2fa1da93d12273ea80bd05eb48d7b3b551ea6e5d975dd97ba7de52b7fba52993823280ac4079cc36cf78a27dac708107ebf8fb6326142b
This commit is contained in:
commit
653a46dd91
@ -1863,95 +1863,100 @@ static bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockInd
|
||||
*/
|
||||
bool static FlushStateToDisk(const CChainParams& chainparams, CValidationState &state, FlushStateMode mode, int nManualPruneHeight) {
|
||||
int64_t nMempoolUsage = mempool.DynamicMemoryUsage();
|
||||
LOCK2(cs_main, cs_LastBlockFile);
|
||||
LOCK(cs_main);
|
||||
static int64_t nLastWrite = 0;
|
||||
static int64_t nLastFlush = 0;
|
||||
static int64_t nLastSetChain = 0;
|
||||
std::set<int> setFilesToPrune;
|
||||
bool fFlushForPrune = false;
|
||||
bool fDoFullFlush = false;
|
||||
int64_t nNow = 0;
|
||||
try {
|
||||
if (fPruneMode && (fCheckForPruning || nManualPruneHeight > 0) && !fReindex) {
|
||||
if (nManualPruneHeight > 0) {
|
||||
FindFilesToPruneManual(setFilesToPrune, nManualPruneHeight);
|
||||
} else {
|
||||
FindFilesToPrune(setFilesToPrune, chainparams.PruneAfterHeight());
|
||||
fCheckForPruning = false;
|
||||
}
|
||||
if (!setFilesToPrune.empty()) {
|
||||
fFlushForPrune = true;
|
||||
if (!fHavePruned) {
|
||||
pblocktree->WriteFlag("prunedblockfiles", true);
|
||||
fHavePruned = true;
|
||||
{
|
||||
LOCK(cs_LastBlockFile);
|
||||
if (fPruneMode && (fCheckForPruning || nManualPruneHeight > 0) && !fReindex) {
|
||||
if (nManualPruneHeight > 0) {
|
||||
FindFilesToPruneManual(setFilesToPrune, nManualPruneHeight);
|
||||
} else {
|
||||
FindFilesToPrune(setFilesToPrune, chainparams.PruneAfterHeight());
|
||||
fCheckForPruning = false;
|
||||
}
|
||||
if (!setFilesToPrune.empty()) {
|
||||
fFlushForPrune = true;
|
||||
if (!fHavePruned) {
|
||||
pblocktree->WriteFlag("prunedblockfiles", true);
|
||||
fHavePruned = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
int64_t nNow = GetTimeMicros();
|
||||
// Avoid writing/flushing immediately after startup.
|
||||
if (nLastWrite == 0) {
|
||||
nLastWrite = nNow;
|
||||
}
|
||||
if (nLastFlush == 0) {
|
||||
nLastFlush = nNow;
|
||||
}
|
||||
if (nLastSetChain == 0) {
|
||||
nLastSetChain = nNow;
|
||||
}
|
||||
int64_t nMempoolSizeMax = GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000;
|
||||
int64_t cacheSize = pcoinsTip->DynamicMemoryUsage();
|
||||
int64_t nTotalSpace = nCoinCacheUsage + std::max<int64_t>(nMempoolSizeMax - nMempoolUsage, 0);
|
||||
// The cache is large and we're within 10% and 10 MiB of the limit, but we have time now (not in the middle of a block processing).
|
||||
bool fCacheLarge = mode == FLUSH_STATE_PERIODIC && cacheSize > std::max((9 * nTotalSpace) / 10, nTotalSpace - MAX_BLOCK_COINSDB_USAGE * 1024 * 1024);
|
||||
// The cache is over the limit, we have to write now.
|
||||
bool fCacheCritical = mode == FLUSH_STATE_IF_NEEDED && cacheSize > nTotalSpace;
|
||||
// It's been a while since we wrote the block index to disk. Do this frequently, so we don't need to redownload after a crash.
|
||||
bool fPeriodicWrite = mode == FLUSH_STATE_PERIODIC && nNow > nLastWrite + (int64_t)DATABASE_WRITE_INTERVAL * 1000000;
|
||||
// It's been very long since we flushed the cache. Do this infrequently, to optimize cache usage.
|
||||
bool fPeriodicFlush = mode == FLUSH_STATE_PERIODIC && nNow > nLastFlush + (int64_t)DATABASE_FLUSH_INTERVAL * 1000000;
|
||||
// Combine all conditions that result in a full cache flush.
|
||||
bool fDoFullFlush = (mode == FLUSH_STATE_ALWAYS) || fCacheLarge || fCacheCritical || fPeriodicFlush || fFlushForPrune;
|
||||
// Write blocks and block index to disk.
|
||||
if (fDoFullFlush || fPeriodicWrite) {
|
||||
// Depend on nMinDiskSpace to ensure we can write block index
|
||||
if (!CheckDiskSpace(0))
|
||||
return state.Error("out of disk space");
|
||||
// First make sure all block and undo data is flushed to disk.
|
||||
FlushBlockFile();
|
||||
// Then update all block file information (which may refer to block and undo files).
|
||||
{
|
||||
std::vector<std::pair<int, const CBlockFileInfo*> > vFiles;
|
||||
vFiles.reserve(setDirtyFileInfo.size());
|
||||
for (std::set<int>::iterator it = setDirtyFileInfo.begin(); it != setDirtyFileInfo.end(); ) {
|
||||
vFiles.push_back(std::make_pair(*it, &vinfoBlockFile[*it]));
|
||||
setDirtyFileInfo.erase(it++);
|
||||
}
|
||||
std::vector<const CBlockIndex*> vBlocks;
|
||||
vBlocks.reserve(setDirtyBlockIndex.size());
|
||||
for (std::set<CBlockIndex*>::iterator it = setDirtyBlockIndex.begin(); it != setDirtyBlockIndex.end(); ) {
|
||||
vBlocks.push_back(*it);
|
||||
setDirtyBlockIndex.erase(it++);
|
||||
}
|
||||
if (!pblocktree->WriteBatchSync(vFiles, nLastBlockFile, vBlocks)) {
|
||||
return AbortNode(state, "Failed to write to block index database");
|
||||
}
|
||||
nNow = GetTimeMicros();
|
||||
// Avoid writing/flushing immediately after startup.
|
||||
if (nLastWrite == 0) {
|
||||
nLastWrite = nNow;
|
||||
}
|
||||
if (nLastFlush == 0) {
|
||||
nLastFlush = nNow;
|
||||
}
|
||||
if (nLastSetChain == 0) {
|
||||
nLastSetChain = nNow;
|
||||
}
|
||||
int64_t nMempoolSizeMax = GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000;
|
||||
int64_t cacheSize = pcoinsTip->DynamicMemoryUsage();
|
||||
int64_t nTotalSpace = nCoinCacheUsage + std::max<int64_t>(nMempoolSizeMax - nMempoolUsage, 0);
|
||||
// The cache is large and we're within 10% and 10 MiB of the limit, but we have time now (not in the middle of a block processing).
|
||||
bool fCacheLarge = mode == FLUSH_STATE_PERIODIC && cacheSize > std::max((9 * nTotalSpace) / 10, nTotalSpace - MAX_BLOCK_COINSDB_USAGE * 1024 * 1024);
|
||||
// The cache is over the limit, we have to write now.
|
||||
bool fCacheCritical = mode == FLUSH_STATE_IF_NEEDED && cacheSize > nTotalSpace;
|
||||
// It's been a while since we wrote the block index to disk. Do this frequently, so we don't need to redownload after a crash.
|
||||
bool fPeriodicWrite = mode == FLUSH_STATE_PERIODIC && nNow > nLastWrite + (int64_t)DATABASE_WRITE_INTERVAL * 1000000;
|
||||
// It's been very long since we flushed the cache. Do this infrequently, to optimize cache usage.
|
||||
bool fPeriodicFlush = mode == FLUSH_STATE_PERIODIC && nNow > nLastFlush + (int64_t)DATABASE_FLUSH_INTERVAL * 1000000;
|
||||
// Combine all conditions that result in a full cache flush.
|
||||
fDoFullFlush = (mode == FLUSH_STATE_ALWAYS) || fCacheLarge || fCacheCritical || fPeriodicFlush || fFlushForPrune;
|
||||
// Write blocks and block index to disk.
|
||||
if (fDoFullFlush || fPeriodicWrite) {
|
||||
// Depend on nMinDiskSpace to ensure we can write block index
|
||||
if (!CheckDiskSpace(0))
|
||||
return state.Error("out of disk space");
|
||||
// First make sure all block and undo data is flushed to disk.
|
||||
FlushBlockFile();
|
||||
// Then update all block file information (which may refer to block and undo files).
|
||||
{
|
||||
std::vector<std::pair<int, const CBlockFileInfo*> > vFiles;
|
||||
vFiles.reserve(setDirtyFileInfo.size());
|
||||
for (std::set<int>::iterator it = setDirtyFileInfo.begin(); it != setDirtyFileInfo.end(); ) {
|
||||
vFiles.push_back(std::make_pair(*it, &vinfoBlockFile[*it]));
|
||||
setDirtyFileInfo.erase(it++);
|
||||
}
|
||||
std::vector<const CBlockIndex*> vBlocks;
|
||||
vBlocks.reserve(setDirtyBlockIndex.size());
|
||||
for (std::set<CBlockIndex*>::iterator it = setDirtyBlockIndex.begin(); it != setDirtyBlockIndex.end(); ) {
|
||||
vBlocks.push_back(*it);
|
||||
setDirtyBlockIndex.erase(it++);
|
||||
}
|
||||
if (!pblocktree->WriteBatchSync(vFiles, nLastBlockFile, vBlocks)) {
|
||||
return AbortNode(state, "Failed to write to block index database");
|
||||
}
|
||||
}
|
||||
// Finally remove any pruned files
|
||||
if (fFlushForPrune)
|
||||
UnlinkPrunedFiles(setFilesToPrune);
|
||||
nLastWrite = nNow;
|
||||
}
|
||||
// Flush best chain related state. This can only be done if the blocks / block index write was also done.
|
||||
if (fDoFullFlush) {
|
||||
// Typical Coin structures on disk are around 48 bytes in size.
|
||||
// Pushing a new one to the database can cause it to be written
|
||||
// twice (once in the log, and once in the tables). This is already
|
||||
// an overestimation, as most will delete an existing entry or
|
||||
// overwrite one. Still, use a conservative safety factor of 2.
|
||||
if (!CheckDiskSpace(48 * 2 * 2 * pcoinsTip->GetCacheSize()))
|
||||
return state.Error("out of disk space");
|
||||
// Flush the chainstate (which may refer to block index entries).
|
||||
if (!pcoinsTip->Flush())
|
||||
return AbortNode(state, "Failed to write to coin database");
|
||||
nLastFlush = nNow;
|
||||
}
|
||||
// Finally remove any pruned files
|
||||
if (fFlushForPrune)
|
||||
UnlinkPrunedFiles(setFilesToPrune);
|
||||
nLastWrite = nNow;
|
||||
}
|
||||
// Flush best chain related state. This can only be done if the blocks / block index write was also done.
|
||||
if (fDoFullFlush) {
|
||||
// Typical Coin structures on disk are around 48 bytes in size.
|
||||
// Pushing a new one to the database can cause it to be written
|
||||
// twice (once in the log, and once in the tables). This is already
|
||||
// an overestimation, as most will delete an existing entry or
|
||||
// overwrite one. Still, use a conservative safety factor of 2.
|
||||
if (!CheckDiskSpace(48 * 2 * 2 * pcoinsTip->GetCacheSize()))
|
||||
return state.Error("out of disk space");
|
||||
// Flush the chainstate (which may refer to block index entries).
|
||||
if (!pcoinsTip->Flush())
|
||||
return AbortNode(state, "Failed to write to coin database");
|
||||
nLastFlush = nNow;
|
||||
}
|
||||
if (fDoFullFlush || ((mode == FLUSH_STATE_ALWAYS || mode == FLUSH_STATE_PERIODIC) && nNow > nLastSetChain + (int64_t)DATABASE_WRITE_INTERVAL * 1000000)) {
|
||||
// Update best block in wallet (so we can detect restored wallets).
|
||||
|
@ -619,9 +619,8 @@ UniValue dumpwallet(const JSONRPCRequest& request)
|
||||
throw JSONRPCError(RPC_INVALID_PARAMETER, "Cannot open wallet dump file");
|
||||
|
||||
std::map<CTxDestination, int64_t> mapKeyBirth;
|
||||
std::set<CKeyID> setKeyPool;
|
||||
const std::map<CKeyID, int64_t>& mapKeyPool = pwallet->GetAllReserveKeys();
|
||||
pwallet->GetKeyBirthTimes(mapKeyBirth);
|
||||
pwallet->GetAllReserveKeys(setKeyPool);
|
||||
|
||||
// sort time/key pairs
|
||||
std::vector<std::pair<int64_t, CKeyID> > vKeyBirth;
|
||||
@ -666,7 +665,7 @@ UniValue dumpwallet(const JSONRPCRequest& request)
|
||||
file << strprintf("label=%s", EncodeDumpString(pwallet->mapAddressBook[keyid].name));
|
||||
} else if (keyid == masterKeyID) {
|
||||
file << "hdmaster=1";
|
||||
} else if (setKeyPool.count(keyid)) {
|
||||
} else if (mapKeyPool.count(keyid)) {
|
||||
file << "reserve=1";
|
||||
} else if (pwallet->mapKeyMetadata[keyid].hdKeypath == "m") {
|
||||
file << "inactivehdmaster=1";
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include "consensus/consensus.h"
|
||||
#include "consensus/validation.h"
|
||||
#include "fs.h"
|
||||
#include "init.h"
|
||||
#include "key.h"
|
||||
#include "keystore.h"
|
||||
#include "validation.h"
|
||||
@ -80,6 +81,38 @@ std::string COutput::ToString() const
|
||||
return strprintf("COutput(%s, %d, %d) [%s]", tx->GetHash().ToString(), i, nDepth, FormatMoney(tx->tx->vout[i].nValue));
|
||||
}
|
||||
|
||||
class CAffectedKeysVisitor : public boost::static_visitor<void> {
|
||||
private:
|
||||
const CKeyStore &keystore;
|
||||
std::vector<CKeyID> &vKeys;
|
||||
|
||||
public:
|
||||
CAffectedKeysVisitor(const CKeyStore &keystoreIn, std::vector<CKeyID> &vKeysIn) : keystore(keystoreIn), vKeys(vKeysIn) {}
|
||||
|
||||
void Process(const CScript &script) {
|
||||
txnouttype type;
|
||||
std::vector<CTxDestination> vDest;
|
||||
int nRequired;
|
||||
if (ExtractDestinations(script, type, vDest, nRequired)) {
|
||||
for (const CTxDestination &dest : vDest)
|
||||
boost::apply_visitor(*this, dest);
|
||||
}
|
||||
}
|
||||
|
||||
void operator()(const CKeyID &keyId) {
|
||||
if (keystore.HaveKey(keyId))
|
||||
vKeys.push_back(keyId);
|
||||
}
|
||||
|
||||
void operator()(const CScriptID &scriptId) {
|
||||
CScript script;
|
||||
if (keystore.GetCScript(scriptId, script))
|
||||
Process(script);
|
||||
}
|
||||
|
||||
void operator()(const CNoDestination &none) {}
|
||||
};
|
||||
|
||||
const CWalletTx* CWallet::GetWalletTx(const uint256& hash) const
|
||||
{
|
||||
LOCK(cs_wallet);
|
||||
@ -1021,6 +1054,30 @@ bool CWallet::AddToWalletIfInvolvingMe(const CTransactionRef& ptx, const CBlockI
|
||||
if (fExisted && !fUpdate) return false;
|
||||
if (fExisted || IsMine(tx) || IsFromMe(tx))
|
||||
{
|
||||
/* Check if any keys in the wallet keypool that were supposed to be unused
|
||||
* have appeared in a new transaction. If so, remove those keys from the keypool.
|
||||
* This can happen when restoring an old wallet backup that does not contain
|
||||
* the mostly recently created transactions from newer versions of the wallet.
|
||||
*/
|
||||
|
||||
// loop though all outputs
|
||||
for (const CTxOut& txout: tx.vout) {
|
||||
// extract addresses and check if they match with an unused keypool key
|
||||
std::vector<CKeyID> vAffected;
|
||||
CAffectedKeysVisitor(*this, vAffected).Process(txout.scriptPubKey);
|
||||
for (const CKeyID &keyid : vAffected) {
|
||||
std::map<CKeyID, int64_t>::const_iterator mi = m_pool_key_to_index.find(keyid);
|
||||
if (mi != m_pool_key_to_index.end()) {
|
||||
LogPrintf("%s: Detected a used keypool key, mark all keypool key up to this key as used\n", __func__);
|
||||
MarkReserveKeysAsUsed(mi->second);
|
||||
|
||||
if (!TopUpKeyPool()) {
|
||||
LogPrintf("%s: Topping up keypool failed (locked wallet)\n", __func__);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
CWalletTx wtx(this, ptx);
|
||||
|
||||
// Get merkle branch if transaction was found in a block
|
||||
@ -3050,6 +3107,7 @@ DBErrors CWallet::LoadWallet(bool& fFirstRunRet)
|
||||
LOCK(cs_wallet);
|
||||
setInternalKeyPool.clear();
|
||||
setExternalKeyPool.clear();
|
||||
m_pool_key_to_index.clear();
|
||||
// Note: can't top-up keypool here, because wallet is locked.
|
||||
// User will be prompted to unlock wallet the next operation
|
||||
// that requires a new key.
|
||||
@ -3079,6 +3137,7 @@ DBErrors CWallet::ZapSelectTx(std::vector<uint256>& vHashIn, std::vector<uint256
|
||||
{
|
||||
setInternalKeyPool.clear();
|
||||
setExternalKeyPool.clear();
|
||||
m_pool_key_to_index.clear();
|
||||
// Note: can't top-up keypool here, because wallet is locked.
|
||||
// User will be prompted to unlock wallet the next operation
|
||||
// that requires a new key.
|
||||
@ -3105,6 +3164,7 @@ DBErrors CWallet::ZapWalletTx(std::vector<CWalletTx>& vWtx)
|
||||
LOCK(cs_wallet);
|
||||
setInternalKeyPool.clear();
|
||||
setExternalKeyPool.clear();
|
||||
m_pool_key_to_index.clear();
|
||||
// Note: can't top-up keypool here, because wallet is locked.
|
||||
// User will be prompted to unlock wallet the next operation
|
||||
// that requires a new key.
|
||||
@ -3199,6 +3259,8 @@ bool CWallet::NewKeyPool()
|
||||
}
|
||||
setExternalKeyPool.clear();
|
||||
|
||||
m_pool_key_to_index.clear();
|
||||
|
||||
if (!TopUpKeyPool()) {
|
||||
return false;
|
||||
}
|
||||
@ -3213,6 +3275,25 @@ size_t CWallet::KeypoolCountExternalKeys()
|
||||
return setExternalKeyPool.size();
|
||||
}
|
||||
|
||||
void CWallet::LoadKeyPool(int64_t nIndex, const CKeyPool &keypool)
|
||||
{
|
||||
AssertLockHeld(cs_wallet);
|
||||
if (keypool.fInternal) {
|
||||
setInternalKeyPool.insert(nIndex);
|
||||
} else {
|
||||
setExternalKeyPool.insert(nIndex);
|
||||
}
|
||||
m_max_keypool_index = std::max(m_max_keypool_index, nIndex);
|
||||
m_pool_key_to_index[keypool.vchPubKey.GetID()] = nIndex;
|
||||
|
||||
// If no metadata exists yet, create a default with the pool key's
|
||||
// creation time. Note that this may be overwritten by actually
|
||||
// stored metadata for that key later, which is fine.
|
||||
CKeyID keyid = keypool.vchPubKey.GetID();
|
||||
if (mapKeyMetadata.count(keyid) == 0)
|
||||
mapKeyMetadata[keyid] = CKeyMetadata(keypool.nTime);
|
||||
}
|
||||
|
||||
bool CWallet::TopUpKeyPool(unsigned int kpSize)
|
||||
{
|
||||
{
|
||||
@ -3249,7 +3330,8 @@ bool CWallet::TopUpKeyPool(unsigned int kpSize)
|
||||
assert(m_max_keypool_index < std::numeric_limits<int64_t>::max()); // How in the hell did you use so many keys?
|
||||
int64_t index = ++m_max_keypool_index;
|
||||
|
||||
if (!walletdb.WritePool(index, CKeyPool(GenerateNewKey(walletdb, internal), internal))) {
|
||||
CPubKey pubkey(GenerateNewKey(walletdb, internal));
|
||||
if (!walletdb.WritePool(index, CKeyPool(pubkey, internal))) {
|
||||
throw std::runtime_error(std::string(__func__) + ": writing generated key failed");
|
||||
}
|
||||
|
||||
@ -3258,6 +3340,7 @@ bool CWallet::TopUpKeyPool(unsigned int kpSize)
|
||||
} else {
|
||||
setExternalKeyPool.insert(index);
|
||||
}
|
||||
m_pool_key_to_index[pubkey.GetID()] = index;
|
||||
}
|
||||
if (missingInternal + missingExternal > 0) {
|
||||
LogPrintf("keypool added %d keys (%d internal), size=%u (%u internal)\n", missingInternal + missingExternal, missingInternal, setInternalKeyPool.size() + setExternalKeyPool.size(), setInternalKeyPool.size());
|
||||
@ -3299,6 +3382,7 @@ void CWallet::ReserveKeyFromKeyPool(int64_t& nIndex, CKeyPool& keypool, bool fRe
|
||||
}
|
||||
|
||||
assert(keypool.vchPubKey.IsValid());
|
||||
m_pool_key_to_index.erase(keypool.vchPubKey.GetID());
|
||||
LogPrintf("keypool reserve %d\n", nIndex);
|
||||
}
|
||||
}
|
||||
@ -3311,7 +3395,7 @@ void CWallet::KeepKey(int64_t nIndex)
|
||||
LogPrintf("keypool keep %d\n", nIndex);
|
||||
}
|
||||
|
||||
void CWallet::ReturnKey(int64_t nIndex, bool fInternal)
|
||||
void CWallet::ReturnKey(int64_t nIndex, bool fInternal, const CPubKey& pubkey)
|
||||
{
|
||||
// Return to key pool
|
||||
{
|
||||
@ -3321,6 +3405,7 @@ void CWallet::ReturnKey(int64_t nIndex, bool fInternal)
|
||||
} else {
|
||||
setExternalKeyPool.insert(nIndex);
|
||||
}
|
||||
m_pool_key_to_index[pubkey.GetID()] = nIndex;
|
||||
}
|
||||
LogPrintf("keypool return %d\n", nIndex);
|
||||
}
|
||||
@ -3550,39 +3635,37 @@ void CReserveKey::KeepKey()
|
||||
void CReserveKey::ReturnKey()
|
||||
{
|
||||
if (nIndex != -1) {
|
||||
pwallet->ReturnKey(nIndex, fInternal);
|
||||
pwallet->ReturnKey(nIndex, fInternal, vchPubKey);
|
||||
}
|
||||
nIndex = -1;
|
||||
vchPubKey = CPubKey();
|
||||
}
|
||||
|
||||
static void LoadReserveKeysToSet(std::set<CKeyID>& setAddress, const std::set<int64_t>& setKeyPool, CWalletDB& walletdb) {
|
||||
for (const int64_t& id : setKeyPool)
|
||||
{
|
||||
void CWallet::MarkReserveKeysAsUsed(int64_t keypool_id)
|
||||
{
|
||||
AssertLockHeld(cs_wallet);
|
||||
bool internal = setInternalKeyPool.count(keypool_id);
|
||||
if (!internal) assert(setExternalKeyPool.count(keypool_id));
|
||||
std::set<int64_t> *setKeyPool = internal ? &setInternalKeyPool : &setExternalKeyPool;
|
||||
auto it = setKeyPool->begin();
|
||||
|
||||
CWalletDB walletdb(*dbw);
|
||||
while (it != std::end(*setKeyPool)) {
|
||||
const int64_t& index = *(it);
|
||||
if (index > keypool_id) break; // set*KeyPool is ordered
|
||||
|
||||
CKeyPool keypool;
|
||||
if (!walletdb.ReadPool(id, keypool))
|
||||
throw std::runtime_error(std::string(__func__) + ": read failed");
|
||||
assert(keypool.vchPubKey.IsValid());
|
||||
CKeyID keyID = keypool.vchPubKey.GetID();
|
||||
setAddress.insert(keyID);
|
||||
if (walletdb.ReadPool(index, keypool)) { //TODO: This should be unnecessary
|
||||
m_pool_key_to_index.erase(keypool.vchPubKey.GetID());
|
||||
}
|
||||
walletdb.ErasePool(index);
|
||||
it = setKeyPool->erase(it);
|
||||
}
|
||||
}
|
||||
|
||||
void CWallet::GetAllReserveKeys(std::set<CKeyID>& setAddress) const
|
||||
bool CWallet::HasUnusedKeys(int min_keys) const
|
||||
{
|
||||
setAddress.clear();
|
||||
|
||||
CWalletDB walletdb(*dbw);
|
||||
|
||||
LOCK2(cs_main, cs_wallet);
|
||||
LoadReserveKeysToSet(setAddress, setInternalKeyPool, walletdb);
|
||||
LoadReserveKeysToSet(setAddress, setExternalKeyPool, walletdb);
|
||||
|
||||
for (const CKeyID& keyID : setAddress) {
|
||||
if (!HaveKey(keyID)) {
|
||||
throw std::runtime_error(std::string(__func__) + ": unknown key in key pool");
|
||||
}
|
||||
}
|
||||
return setExternalKeyPool.size() >= min_keys && (setInternalKeyPool.size() >= min_keys || !CanSupportFeature(FEATURE_HD_SPLIT));
|
||||
}
|
||||
|
||||
void CWallet::GetScriptForMining(std::shared_ptr<CReserveScript> &script)
|
||||
@ -3634,38 +3717,6 @@ void CWallet::ListLockedCoins(std::vector<COutPoint>& vOutpts) const
|
||||
|
||||
/** @} */ // end of Actions
|
||||
|
||||
class CAffectedKeysVisitor : public boost::static_visitor<void> {
|
||||
private:
|
||||
const CKeyStore &keystore;
|
||||
std::vector<CKeyID> &vKeys;
|
||||
|
||||
public:
|
||||
CAffectedKeysVisitor(const CKeyStore &keystoreIn, std::vector<CKeyID> &vKeysIn) : keystore(keystoreIn), vKeys(vKeysIn) {}
|
||||
|
||||
void Process(const CScript &script) {
|
||||
txnouttype type;
|
||||
std::vector<CTxDestination> vDest;
|
||||
int nRequired;
|
||||
if (ExtractDestinations(script, type, vDest, nRequired)) {
|
||||
for (const CTxDestination &dest : vDest)
|
||||
boost::apply_visitor(*this, dest);
|
||||
}
|
||||
}
|
||||
|
||||
void operator()(const CKeyID &keyId) {
|
||||
if (keystore.HaveKey(keyId))
|
||||
vKeys.push_back(keyId);
|
||||
}
|
||||
|
||||
void operator()(const CScriptID &scriptId) {
|
||||
CScript script;
|
||||
if (keystore.GetCScript(scriptId, script))
|
||||
Process(script);
|
||||
}
|
||||
|
||||
void operator()(const CNoDestination &none) {}
|
||||
};
|
||||
|
||||
void CWallet::GetKeyBirthTimes(std::map<CTxDestination, int64_t> &mapKeyBirth) const {
|
||||
AssertLockHeld(cs_wallet); // mapKeyMetadata
|
||||
mapKeyBirth.clear();
|
||||
@ -3990,6 +4041,9 @@ CWallet* CWallet::CreateWalletFromFile(const std::string walletFile)
|
||||
|
||||
RegisterValidationInterface(walletInstance);
|
||||
|
||||
// Try to top up keypool. No-op if the wallet is locked.
|
||||
walletInstance->TopUpKeyPool();
|
||||
|
||||
CBlockIndex *pindexRescan = chainActive.Genesis();
|
||||
if (!GetBoolArg("-rescan", false))
|
||||
{
|
||||
|
@ -705,6 +705,7 @@ private:
|
||||
std::set<int64_t> setInternalKeyPool;
|
||||
std::set<int64_t> setExternalKeyPool;
|
||||
int64_t m_max_keypool_index;
|
||||
std::map<CKeyID, int64_t> m_pool_key_to_index;
|
||||
|
||||
int64_t nTimeFirstKey;
|
||||
|
||||
@ -747,22 +748,7 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
void LoadKeyPool(int64_t nIndex, const CKeyPool &keypool)
|
||||
{
|
||||
if (keypool.fInternal) {
|
||||
setInternalKeyPool.insert(nIndex);
|
||||
} else {
|
||||
setExternalKeyPool.insert(nIndex);
|
||||
}
|
||||
m_max_keypool_index = std::max(m_max_keypool_index, nIndex);
|
||||
|
||||
// If no metadata exists yet, create a default with the pool key's
|
||||
// creation time. Note that this may be overwritten by actually
|
||||
// stored metadata for that key later, which is fine.
|
||||
CKeyID keyid = keypool.vchPubKey.GetID();
|
||||
if (mapKeyMetadata.count(keyid) == 0)
|
||||
mapKeyMetadata[keyid] = CKeyMetadata(keypool.nTime);
|
||||
}
|
||||
void LoadKeyPool(int64_t nIndex, const CKeyPool &keypool);
|
||||
|
||||
// Map from Key ID (for regular keys) or Script ID (for watch-only keys) to
|
||||
// key metadata.
|
||||
@ -828,7 +814,7 @@ public:
|
||||
const CWalletTx* GetWalletTx(const uint256& hash) const;
|
||||
|
||||
//! check whether we are allowed to upgrade (or already support) to the named feature
|
||||
bool CanSupportFeature(enum WalletFeature wf) { AssertLockHeld(cs_wallet); return nWalletMaxVersion >= wf; }
|
||||
bool CanSupportFeature(enum WalletFeature wf) const { AssertLockHeld(cs_wallet); return nWalletMaxVersion >= wf; }
|
||||
|
||||
/**
|
||||
* populate vCoins with vector of available COutputs.
|
||||
@ -990,10 +976,16 @@ public:
|
||||
bool TopUpKeyPool(unsigned int kpSize = 0);
|
||||
void ReserveKeyFromKeyPool(int64_t& nIndex, CKeyPool& keypool, bool fRequestedInternal);
|
||||
void KeepKey(int64_t nIndex);
|
||||
void ReturnKey(int64_t nIndex, bool fInternal);
|
||||
void ReturnKey(int64_t nIndex, bool fInternal, const CPubKey& pubkey);
|
||||
bool GetKeyFromPool(CPubKey &key, bool internal = false);
|
||||
int64_t GetOldestKeyPoolTime();
|
||||
void GetAllReserveKeys(std::set<CKeyID>& setAddress) const;
|
||||
/**
|
||||
* Marks all keys in the keypool up to and including reserve_key as used.
|
||||
*/
|
||||
void MarkReserveKeysAsUsed(int64_t keypool_id);
|
||||
const std::map<CKeyID, int64_t>& GetAllReserveKeys() const { return m_pool_key_to_index; }
|
||||
/** Does the wallet have at least min_keys in the keypool? */
|
||||
bool HasUnusedKeys(int min_keys) const;
|
||||
|
||||
std::set< std::set<CTxDestination> > GetAddressGroupings();
|
||||
std::map<CTxDestination, CAmount> GetAddressBalances();
|
||||
|
75
test/functional/keypool-topup.py
Executable file
75
test/functional/keypool-topup.py
Executable file
@ -0,0 +1,75 @@
|
||||
#!/usr/bin/env python3
|
||||
# Copyright (c) 2017 The Bitcoin Core developers
|
||||
# Distributed under the MIT software license, see the accompanying
|
||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
"""Test HD Wallet keypool restore function.
|
||||
|
||||
Two nodes. Node1 is under test. Node0 is providing transactions and generating blocks.
|
||||
|
||||
- Start node1, shutdown and backup wallet.
|
||||
- Generate 110 keys (enough to drain the keypool). Store key 90 (in the initial keypool) and key 110 (beyond the initial keypool). Send funds to key 90 and key 110.
|
||||
- Stop node1, clear the datadir, move wallet file back into the datadir and restart node1.
|
||||
- connect node1 to node0. Verify that they sync and node1 receives its funds."""
|
||||
import shutil
|
||||
|
||||
from test_framework.test_framework import BitcoinTestFramework
|
||||
from test_framework.util import (
|
||||
assert_equal,
|
||||
connect_nodes_bi,
|
||||
sync_blocks,
|
||||
)
|
||||
|
||||
class KeypoolRestoreTest(BitcoinTestFramework):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.setup_clean_chain = True
|
||||
self.num_nodes = 2
|
||||
self.extra_args = [['-usehd=0'], ['-usehd=1', '-keypool=100', '-keypoolmin=20']]
|
||||
|
||||
def run_test(self):
|
||||
self.tmpdir = self.options.tmpdir
|
||||
self.nodes[0].generate(101)
|
||||
|
||||
self.log.info("Make backup of wallet")
|
||||
|
||||
self.stop_node(1)
|
||||
|
||||
shutil.copyfile(self.tmpdir + "/node1/regtest/wallet.dat", self.tmpdir + "/wallet.bak")
|
||||
self.nodes[1] = self.start_node(1, self.tmpdir, self.extra_args[1])
|
||||
connect_nodes_bi(self.nodes, 0, 1)
|
||||
|
||||
self.log.info("Generate keys for wallet")
|
||||
|
||||
for _ in range(90):
|
||||
addr_oldpool = self.nodes[1].getnewaddress()
|
||||
for _ in range(20):
|
||||
addr_extpool = self.nodes[1].getnewaddress()
|
||||
|
||||
self.log.info("Send funds to wallet")
|
||||
|
||||
self.nodes[0].sendtoaddress(addr_oldpool, 10)
|
||||
self.nodes[0].generate(1)
|
||||
self.nodes[0].sendtoaddress(addr_extpool, 5)
|
||||
self.nodes[0].generate(1)
|
||||
sync_blocks(self.nodes)
|
||||
|
||||
self.log.info("Restart node with wallet backup")
|
||||
|
||||
self.stop_node(1)
|
||||
|
||||
shutil.copyfile(self.tmpdir + "/wallet.bak", self.tmpdir + "/node1/regtest/wallet.dat")
|
||||
|
||||
self.log.info("Verify keypool is restored and balance is correct")
|
||||
|
||||
self.nodes[1] = self.start_node(1, self.tmpdir, self.extra_args[1])
|
||||
connect_nodes_bi(self.nodes, 0, 1)
|
||||
self.sync_all()
|
||||
|
||||
assert_equal(self.nodes[1].getbalance(), 15)
|
||||
assert_equal(self.nodes[1].listtransactions()[0]['category'], "receive")
|
||||
|
||||
# Check that we have marked all keys up to the used keypool key as used
|
||||
assert_equal(self.nodes[1].validateaddress(self.nodes[1].getnewaddress())['hdkeypath'], "m/0'/0'/111'")
|
||||
|
||||
if __name__ == '__main__':
|
||||
KeypoolRestoreTest().main()
|
@ -79,6 +79,7 @@ BASE_SCRIPTS= [
|
||||
'rawtransactions.py',
|
||||
'reindex.py',
|
||||
# vv Tests less than 30s vv
|
||||
'keypool-topup.py',
|
||||
'zmq_test.py',
|
||||
'mempool_resurrect_test.py',
|
||||
'txn_doublespend.py --mineblock',
|
||||
|
@ -9,7 +9,6 @@ from test_framework.util import (
|
||||
assert_equal,
|
||||
connect_nodes_bi,
|
||||
)
|
||||
import os
|
||||
import shutil
|
||||
|
||||
|
||||
@ -72,10 +71,12 @@ class WalletHDTest(BitcoinTestFramework):
|
||||
|
||||
self.log.info("Restore backup ...")
|
||||
self.stop_node(1)
|
||||
os.remove(self.options.tmpdir + "/node1/regtest/wallet.dat")
|
||||
# we need to delete the complete regtest directory
|
||||
# otherwise node1 would auto-recover all funds in flag the keypool keys as used
|
||||
shutil.rmtree(tmpdir + "/node1/regtest/blocks")
|
||||
shutil.rmtree(tmpdir + "/node1/regtest/chainstate")
|
||||
shutil.copyfile(tmpdir + "/hd.bak", tmpdir + "/node1/regtest/wallet.dat")
|
||||
self.nodes[1] = self.start_node(1, self.options.tmpdir, self.extra_args[1])
|
||||
#connect_nodes_bi(self.nodes, 0, 1)
|
||||
|
||||
# Assert that derivation is deterministic
|
||||
hd_add_2 = None
|
||||
@ -85,11 +86,12 @@ class WalletHDTest(BitcoinTestFramework):
|
||||
assert_equal(hd_info_2["hdkeypath"], "m/0'/0'/"+str(_+1)+"'")
|
||||
assert_equal(hd_info_2["hdmasterkeyid"], masterkeyid)
|
||||
assert_equal(hd_add, hd_add_2)
|
||||
connect_nodes_bi(self.nodes, 0, 1)
|
||||
self.sync_all()
|
||||
|
||||
# Needs rescan
|
||||
self.stop_node(1)
|
||||
self.nodes[1] = self.start_node(1, self.options.tmpdir, self.extra_args[1] + ['-rescan'])
|
||||
#connect_nodes_bi(self.nodes, 0, 1)
|
||||
assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1)
|
||||
|
||||
# send a tx and make sure its using the internal chain for the changeoutput
|
||||
|
Loading…
Reference in New Issue
Block a user