Allow re-signing of IS locks when performing retroactive signing (#3219)
* Implement re-signing of InstantSend inputs when TXs come in via blocks * Use GetAdjustedTime instead of GetTimeMillis in CSigSharesManager This allows use of mocktime in tests. * Expose verifiedProRegTxHash in getpeerinfo and implement wait_for_mnauth * Allow to wait for IS and CL to NOT happen * Bump timeout for wait_for_instantlock * Implement tests for retroactive signing of IS and CLs * Add wait_for_tx function to DashTestFramework * Add -whitelist=127.0.0.1 to node0 * Use node3 for isolated block generation * Don't test for non-receival of TXs on node4/node5
This commit is contained in:
parent
b4b9d34675
commit
4c00d98ea6
@ -371,7 +371,7 @@ void CInstantSendManager::InterruptWorkerThread()
|
||||
workInterrupt();
|
||||
}
|
||||
|
||||
bool CInstantSendManager::ProcessTx(const CTransaction& tx, const Consensus::Params& params)
|
||||
bool CInstantSendManager::ProcessTx(const CTransaction& tx, bool allowReSigning, const Consensus::Params& params)
|
||||
{
|
||||
if (!IsInstantSendEnabled()) {
|
||||
return true;
|
||||
@ -441,7 +441,7 @@ bool CInstantSendManager::ProcessTx(const CTransaction& tx, const Consensus::Par
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if (alreadyVotedCount == ids.size()) {
|
||||
if (!allowReSigning && alreadyVotedCount == ids.size()) {
|
||||
LogPrint(BCLog::INSTANTSEND, "CInstantSendManager::%s -- txid=%s: already voted on all inputs, bailing out\n", __func__,
|
||||
tx.GetHash().ToString());
|
||||
return true;
|
||||
@ -454,9 +454,9 @@ bool CInstantSendManager::ProcessTx(const CTransaction& tx, const Consensus::Par
|
||||
auto& in = tx.vin[i];
|
||||
auto& id = ids[i];
|
||||
inputRequestIds.emplace(id);
|
||||
LogPrint(BCLog::INSTANTSEND, "CInstantSendManager::%s -- txid=%s: trying to vote on input %s with id %s\n", __func__,
|
||||
tx.GetHash().ToString(), in.prevout.ToStringShort(), id.ToString());
|
||||
if (quorumSigningManager->AsyncSignIfMember(llmqType, id, tx.GetHash())) {
|
||||
LogPrint(BCLog::INSTANTSEND, "CInstantSendManager::%s -- txid=%s: trying to vote on input %s with id %s. allowReSigning=%d\n", __func__,
|
||||
tx.GetHash().ToString(), in.prevout.ToStringShort(), id.ToString(), allowReSigning);
|
||||
if (quorumSigningManager->AsyncSignIfMember(llmqType, id, tx.GetHash(), allowReSigning)) {
|
||||
LogPrint(BCLog::INSTANTSEND, "CInstantSendManager::%s -- txid=%s: voted on input %s with id %s\n", __func__,
|
||||
tx.GetHash().ToString(), in.prevout.ToStringShort(), id.ToString());
|
||||
}
|
||||
@ -961,7 +961,7 @@ void CInstantSendManager::UpdateWalletTransaction(const CTransactionRef& tx, con
|
||||
mempool.AddTransactionsUpdated(1);
|
||||
}
|
||||
|
||||
void CInstantSendManager::ProcessNewTransaction(const CTransactionRef& tx, const CBlockIndex* pindex)
|
||||
void CInstantSendManager::ProcessNewTransaction(const CTransactionRef& tx, const CBlockIndex* pindex, bool allowReSigning)
|
||||
{
|
||||
if (!IsInstantSendEnabled()) {
|
||||
return;
|
||||
@ -989,7 +989,7 @@ void CInstantSendManager::ProcessNewTransaction(const CTransactionRef& tx, const
|
||||
|
||||
bool chainlocked = pindex && chainLocksHandler->HasChainLock(pindex->nHeight, pindex->GetBlockHash());
|
||||
if (islockHash.IsNull() && !chainlocked) {
|
||||
ProcessTx(*tx, Params().GetConsensus());
|
||||
ProcessTx(*tx, allowReSigning, Params().GetConsensus());
|
||||
}
|
||||
|
||||
LOCK(cs);
|
||||
@ -1004,7 +1004,7 @@ void CInstantSendManager::ProcessNewTransaction(const CTransactionRef& tx, const
|
||||
|
||||
void CInstantSendManager::TransactionAddedToMempool(const CTransactionRef& tx)
|
||||
{
|
||||
ProcessNewTransaction(tx, nullptr);
|
||||
ProcessNewTransaction(tx, nullptr, false);
|
||||
}
|
||||
|
||||
void CInstantSendManager::BlockConnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindex, const std::vector<CTransactionRef>& vtxConflicted)
|
||||
@ -1021,7 +1021,7 @@ void CInstantSendManager::BlockConnected(const std::shared_ptr<const CBlock>& pb
|
||||
}
|
||||
|
||||
for (const auto& tx : pblock->vtx) {
|
||||
ProcessNewTransaction(tx, pindex);
|
||||
ProcessNewTransaction(tx, pindex, true);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1400,7 +1400,7 @@ bool CInstantSendManager::ProcessPendingRetryLockTxs()
|
||||
tx->GetHash().ToString());
|
||||
}
|
||||
|
||||
ProcessTx(*tx, Params().GetConsensus());
|
||||
ProcessTx(*tx, false, Params().GetConsensus());
|
||||
retryCount++;
|
||||
}
|
||||
|
||||
|
@ -120,7 +120,7 @@ public:
|
||||
void InterruptWorkerThread();
|
||||
|
||||
public:
|
||||
bool ProcessTx(const CTransaction& tx, const Consensus::Params& params);
|
||||
bool ProcessTx(const CTransaction& tx, bool allowReSigning, const Consensus::Params& params);
|
||||
bool CheckCanLock(const CTransaction& tx, bool printDebug, const Consensus::Params& params);
|
||||
bool CheckCanLock(const COutPoint& outpoint, bool printDebug, const uint256& txHash, CAmount* retValue, const Consensus::Params& params);
|
||||
bool IsLocked(const uint256& txHash);
|
||||
@ -141,7 +141,7 @@ public:
|
||||
void ProcessInstantSendLock(NodeId from, const uint256& hash, const CInstantSendLock& islock);
|
||||
void UpdateWalletTransaction(const CTransactionRef& tx, const CInstantSendLock& islock);
|
||||
|
||||
void ProcessNewTransaction(const CTransactionRef& tx, const CBlockIndex* pindex);
|
||||
void ProcessNewTransaction(const CTransactionRef& tx, const CBlockIndex* pindex, bool allowReSigning);
|
||||
void TransactionAddedToMempool(const CTransactionRef& tx);
|
||||
void BlockConnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindex, const std::vector<CTransactionRef>& vtxConflicted);
|
||||
void BlockDisconnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindexDisconnected);
|
||||
|
@ -762,7 +762,7 @@ void CSigningManager::UnregisterRecoveredSigsListener(CRecoveredSigsListener* l)
|
||||
recoveredSigsListeners.erase(itRem, recoveredSigsListeners.end());
|
||||
}
|
||||
|
||||
bool CSigningManager::AsyncSignIfMember(Consensus::LLMQType llmqType, const uint256& id, const uint256& msgHash)
|
||||
bool CSigningManager::AsyncSignIfMember(Consensus::LLMQType llmqType, const uint256& id, const uint256& msgHash, bool allowReSign)
|
||||
{
|
||||
auto& params = Params().GetConsensus().llmqs.at(llmqType);
|
||||
|
||||
@ -773,24 +773,31 @@ bool CSigningManager::AsyncSignIfMember(Consensus::LLMQType llmqType, const uint
|
||||
{
|
||||
LOCK(cs);
|
||||
|
||||
if (db.HasVotedOnId(llmqType, id)) {
|
||||
bool hasVoted = db.HasVotedOnId(llmqType, id);
|
||||
if (hasVoted) {
|
||||
uint256 prevMsgHash;
|
||||
db.GetVoteForId(llmqType, id, prevMsgHash);
|
||||
if (msgHash != prevMsgHash) {
|
||||
LogPrintf("CSigningManager::%s -- already voted for id=%s and msgHash=%s. Not voting on conflicting msgHash=%s\n", __func__,
|
||||
id.ToString(), prevMsgHash.ToString(), msgHash.ToString());
|
||||
return false;
|
||||
} else if (allowReSign) {
|
||||
LogPrint(BCLog::LLMQ, "CSigningManager::%s -- already voted for id=%s and msgHash=%s. Resigning!\n", __func__,
|
||||
id.ToString(), prevMsgHash.ToString());
|
||||
} else {
|
||||
LogPrint(BCLog::LLMQ, "CSigningManager::%s -- already voted for id=%s and msgHash=%s. Not voting again.\n", __func__,
|
||||
id.ToString(), prevMsgHash.ToString());
|
||||
return false;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
if (db.HasRecoveredSigForId(llmqType, id)) {
|
||||
// no need to sign it if we already have a recovered sig
|
||||
return true;
|
||||
}
|
||||
db.WriteVoteForId(llmqType, id, msgHash);
|
||||
if (!hasVoted) {
|
||||
db.WriteVoteForId(llmqType, id, msgHash);
|
||||
}
|
||||
}
|
||||
|
||||
int tipHeight;
|
||||
@ -814,6 +821,10 @@ bool CSigningManager::AsyncSignIfMember(Consensus::LLMQType llmqType, const uint
|
||||
return false;
|
||||
}
|
||||
|
||||
if (allowReSign) {
|
||||
// make us re-announce all known shares (other nodes might have run into a timeout)
|
||||
quorumSigSharesManager->ForceReAnnouncement(quorum, llmqType, id, msgHash);
|
||||
}
|
||||
quorumSigSharesManager->AsyncSign(quorum, id, msgHash);
|
||||
|
||||
return true;
|
||||
|
@ -170,7 +170,7 @@ public:
|
||||
void RegisterRecoveredSigsListener(CRecoveredSigsListener* l);
|
||||
void UnregisterRecoveredSigsListener(CRecoveredSigsListener* l);
|
||||
|
||||
bool AsyncSignIfMember(Consensus::LLMQType llmqType, const uint256& id, const uint256& msgHash);
|
||||
bool AsyncSignIfMember(Consensus::LLMQType llmqType, const uint256& id, const uint256& msgHash, bool allowReSign = false);
|
||||
bool HasRecoveredSig(Consensus::LLMQType llmqType, const uint256& id, const uint256& msgHash);
|
||||
bool HasRecoveredSigForId(Consensus::LLMQType llmqType, const uint256& id);
|
||||
bool HasRecoveredSigForSession(const uint256& signHash);
|
||||
|
@ -82,6 +82,13 @@ void CSigSharesInv::Set(uint16_t quorumMember, bool v)
|
||||
inv[quorumMember] = v;
|
||||
}
|
||||
|
||||
void CSigSharesInv::SetAll(bool v)
|
||||
{
|
||||
for (size_t i = 0; i < inv.size(); i++) {
|
||||
inv[i] = v;
|
||||
}
|
||||
}
|
||||
|
||||
std::string CBatchedSigShares::ToInvString() const
|
||||
{
|
||||
CSigSharesInv inv;
|
||||
@ -678,7 +685,7 @@ void CSigSharesManager::ProcessSigShare(NodeId nodeId, const CSigShare& sigShare
|
||||
sigSharesToAnnounce.Add(sigShare.GetKey(), true);
|
||||
|
||||
// Update the time we've seen the last sigShare
|
||||
timeSeenForSessions[sigShare.GetSignHash()] = GetTimeMillis();
|
||||
timeSeenForSessions[sigShare.GetSignHash()] = GetAdjustedTime();
|
||||
|
||||
if (!quorumNodes.empty()) {
|
||||
// don't announce and wait for other nodes to request this share and directly send it to them
|
||||
@ -777,7 +784,7 @@ void CSigSharesManager::CollectSigSharesToRequest(std::unordered_map<NodeId, std
|
||||
{
|
||||
AssertLockHeld(cs);
|
||||
|
||||
int64_t now = GetTimeMillis();
|
||||
int64_t now = GetAdjustedTime();
|
||||
const size_t maxRequestsForNode = 32;
|
||||
|
||||
// avoid requesting from same nodes all the time
|
||||
@ -1143,8 +1150,8 @@ CSigShare CSigSharesManager::RebuildSigShare(const CSigSharesNodeState::SessionI
|
||||
|
||||
void CSigSharesManager::Cleanup()
|
||||
{
|
||||
int64_t now = GetTimeMillis();
|
||||
if (now - lastCleanupTime < 5000) {
|
||||
int64_t now = GetAdjustedTime();
|
||||
if (now - lastCleanupTime < 5) {
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1265,7 +1272,7 @@ void CSigSharesManager::Cleanup()
|
||||
nodeStates.erase(nodeId);
|
||||
}
|
||||
|
||||
lastCleanupTime = GetTimeMillis();
|
||||
lastCleanupTime = GetAdjustedTime();
|
||||
}
|
||||
|
||||
void CSigSharesManager::RemoveSigSharesForSession(const uint256& signHash)
|
||||
@ -1426,6 +1433,31 @@ void CSigSharesManager::Sign(const CQuorumCPtr& quorum, const uint256& id, const
|
||||
ProcessSigShare(-1, sigShare, *g_connman, quorum);
|
||||
}
|
||||
|
||||
// causes all known sigShares to be re-announced
|
||||
void CSigSharesManager::ForceReAnnouncement(const CQuorumCPtr& quorum, Consensus::LLMQType llmqType, const uint256& id, const uint256& msgHash)
|
||||
{
|
||||
LOCK(cs);
|
||||
auto signHash = CLLMQUtils::BuildSignHash(llmqType, quorum->qc.quorumHash, id, msgHash);
|
||||
auto sigs = sigShares.GetAllForSignHash(signHash);
|
||||
if (sigs) {
|
||||
for (auto& p : *sigs) {
|
||||
// re-announce every sigshare to every node
|
||||
sigSharesToAnnounce.Add(std::make_pair(signHash, p.first), true);
|
||||
}
|
||||
}
|
||||
for (auto& p : nodeStates) {
|
||||
CSigSharesNodeState& nodeState = p.second;
|
||||
auto session = nodeState.GetSessionBySignHash(signHash);
|
||||
if (!session) {
|
||||
continue;
|
||||
}
|
||||
// pretend that the other node doesn't know about any shares so that we re-announce everything
|
||||
session->knows.SetAll(false);
|
||||
// we need to use a new session id as we don't know if the other node has run into a timeout already
|
||||
session->sendSessionId = (uint32_t)-1;
|
||||
}
|
||||
}
|
||||
|
||||
void CSigSharesManager::HandleNewRecoveredSig(const llmq::CRecoveredSig& recoveredSig)
|
||||
{
|
||||
LOCK(cs);
|
||||
|
@ -104,6 +104,7 @@ public:
|
||||
void Init(size_t size);
|
||||
bool IsSet(uint16_t quorumMember) const;
|
||||
void Set(uint16_t quorumMember, bool v);
|
||||
void SetAll(bool v);
|
||||
void Merge(const CSigSharesInv& inv2);
|
||||
|
||||
size_t CountSet() const;
|
||||
@ -328,8 +329,8 @@ public:
|
||||
|
||||
class CSigSharesManager : public CRecoveredSigsListener
|
||||
{
|
||||
static const int64_t SESSION_NEW_SHARES_TIMEOUT = 60 * 1000;
|
||||
static const int64_t SIG_SHARE_REQUEST_TIMEOUT = 5 * 1000;
|
||||
static const int64_t SESSION_NEW_SHARES_TIMEOUT = 60;
|
||||
static const int64_t SIG_SHARE_REQUEST_TIMEOUT = 5;
|
||||
|
||||
// we try to keep total message size below 10k
|
||||
const size_t MAX_MSGS_CNT_QSIGSESANN = 100;
|
||||
@ -376,6 +377,7 @@ public:
|
||||
|
||||
void AsyncSign(const CQuorumCPtr& quorum, const uint256& id, const uint256& msgHash);
|
||||
void Sign(const CQuorumCPtr& quorum, const uint256& id, const uint256& msgHash);
|
||||
void ForceReAnnouncement(const CQuorumCPtr& quorum, Consensus::LLMQType llmqType, const uint256& id, const uint256& msgHash);
|
||||
|
||||
void HandleNewRecoveredSig(const CRecoveredSig& recoveredSig);
|
||||
|
||||
|
@ -716,6 +716,11 @@ void CNode::copyStats(CNodeStats &stats)
|
||||
// Leave string empty if addrLocal invalid (not filled in yet)
|
||||
CService addrLocalUnlocked = GetAddrLocal();
|
||||
stats.addrLocal = addrLocalUnlocked.IsValid() ? addrLocalUnlocked.ToString() : "";
|
||||
|
||||
{
|
||||
LOCK(cs_mnauth);
|
||||
X(verifiedProRegTxHash);
|
||||
}
|
||||
}
|
||||
#undef X
|
||||
|
||||
|
@ -694,6 +694,8 @@ public:
|
||||
CAddress addr;
|
||||
// Bind address of our side of the connection
|
||||
CAddress addrBind;
|
||||
// In case this is a verified MN, this value is the proTx of the MN
|
||||
uint256 verifiedProRegTxHash;
|
||||
};
|
||||
|
||||
|
||||
|
@ -80,6 +80,9 @@ UniValue getpeerinfo(const JSONRPCRequest& request)
|
||||
" \"addrbind\":\"ip:port\", (string) Bind address of the connection to the peer\n"
|
||||
" \"addrlocal\":\"ip:port\", (string) Local address as reported by the peer\n"
|
||||
" \"services\":\"xxxxxxxxxxxxxxxx\", (string) The services offered\n"
|
||||
" \"verified_proregtx_hash\": h, (hex) Only present when the peer is a masternode and succesfully\n"
|
||||
" autheticated via MNAUTH. In this case, this field contains the\n"
|
||||
" protx hash of the masternode\n"
|
||||
" \"relaytxes\":true|false, (boolean) Whether peer has asked us to relay transactions to it\n"
|
||||
" \"lastsend\": ttt, (numeric) The time in seconds since epoch (Jan 1 1970 GMT) of the last send\n"
|
||||
" \"lastrecv\": ttt, (numeric) The time in seconds since epoch (Jan 1 1970 GMT) of the last receive\n"
|
||||
@ -138,6 +141,9 @@ UniValue getpeerinfo(const JSONRPCRequest& request)
|
||||
if (stats.addrBind.IsValid())
|
||||
obj.push_back(Pair("addrbind", stats.addrBind.ToString()));
|
||||
obj.push_back(Pair("services", strprintf("%016x", stats.nServices)));
|
||||
if (!stats.verifiedProRegTxHash.IsNull()) {
|
||||
obj.push_back(Pair("verified_proregtx_hash", stats.verifiedProRegTxHash.ToString()));
|
||||
}
|
||||
obj.push_back(Pair("relaytxes", stats.fRelayTxes));
|
||||
obj.push_back(Pair("lastsend", stats.nLastSend));
|
||||
obj.push_back(Pair("lastrecv", stats.nLastRecv));
|
||||
|
@ -129,7 +129,7 @@ class LLMQChainLocksTest(DashTestFramework):
|
||||
# for the mined TXs, which will then allow the network to create a CLSIG
|
||||
self.log.info("Reenable network on first node and wait for chainlock")
|
||||
reconnect_isolated_node(self.nodes[0], 1)
|
||||
self.wait_for_chainlocked_block(self.nodes[0], self.nodes[0].getbestblockhash(), 30)
|
||||
self.wait_for_chainlocked_block(self.nodes[0], self.nodes[0].getbestblockhash(), timeout=30)
|
||||
|
||||
def create_chained_txs(self, node, amount):
|
||||
txid = node.sendtoaddress(node.getnewaddress(), amount)
|
||||
|
178
test/functional/llmq-is-retroactive.py
Executable file
178
test/functional/llmq-is-retroactive.py
Executable file
@ -0,0 +1,178 @@
|
||||
#!/usr/bin/env python3
|
||||
# Copyright (c) 2015-2018 The Dash Core developers
|
||||
# Distributed under the MIT software license, see the accompanying
|
||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
|
||||
from test_framework.mininode import *
|
||||
from test_framework.test_framework import DashTestFramework
|
||||
from test_framework.util import sync_blocks, set_node_times, \
|
||||
isolate_node, reconnect_isolated_node
|
||||
|
||||
'''
|
||||
llmq-is-retroactive.py
|
||||
|
||||
Tests retroactive signing
|
||||
|
||||
We have 6 nodes where node 0 is the control node, nodes 1-5 are masternodes.
|
||||
Mempool inconsistencies are simulated via disconnecting/reconnecting node 3
|
||||
and by having a higher relay fee on nodes 4 and 5.
|
||||
'''
|
||||
|
||||
class LLMQ_IS_RetroactiveSigning(DashTestFramework):
|
||||
def set_test_params(self):
|
||||
# -whitelist is needed to avoid the trickling logic on node0
|
||||
self.set_dash_test_params(6, 5, [["-whitelist=127.0.0.1"], [], [], [], ["-minrelaytxfee=0.001"], ["-minrelaytxfee=0.001"]], fast_dip3_enforcement=True)
|
||||
|
||||
def run_test(self):
|
||||
while self.nodes[0].getblockchaininfo()["bip9_softforks"]["dip0008"]["status"] != "active":
|
||||
self.nodes[0].generate(10)
|
||||
sync_blocks(self.nodes, timeout=60*5)
|
||||
|
||||
self.nodes[0].spork("SPORK_17_QUORUM_DKG_ENABLED", 0)
|
||||
self.nodes[0].spork("SPORK_19_CHAINLOCKS_ENABLED", 0)
|
||||
self.nodes[0].spork("SPORK_2_INSTANTSEND_ENABLED", 0)
|
||||
self.nodes[0].spork("SPORK_3_INSTANTSEND_BLOCK_FILTERING", 0)
|
||||
self.wait_for_sporks_same()
|
||||
|
||||
self.mine_quorum()
|
||||
self.mine_quorum()
|
||||
|
||||
# Make sure that all nodes are chainlocked at the same height before starting actual tests
|
||||
self.wait_for_chainlocked_block_all_nodes(self.nodes[0].getbestblockhash())
|
||||
|
||||
self.log.info("trying normal IS lock")
|
||||
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
|
||||
# 3 nodes should be enough to create an IS lock even if nodes 4 and 5 (which have no tx itself)
|
||||
# are the only "neighbours" in intra-quorum connections for one of them.
|
||||
self.wait_for_instantlock(txid, self.nodes[0])
|
||||
self.bump_mocktime(1)
|
||||
set_node_times(self.nodes, self.mocktime)
|
||||
block = self.nodes[0].generate(1)[0]
|
||||
self.wait_for_chainlocked_block_all_nodes(block)
|
||||
|
||||
self.log.info("testing normal signing with partially known TX")
|
||||
isolate_node(self.nodes[3])
|
||||
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
|
||||
# Make sure nodes 1 and 2 received the TX before we continue,
|
||||
# otherwise it might announce the TX to node 3 when reconnecting
|
||||
self.wait_for_tx(txid, self.nodes[1])
|
||||
self.wait_for_tx(txid, self.nodes[2])
|
||||
reconnect_isolated_node(self.nodes[3], 0)
|
||||
self.wait_for_mnauth(self.nodes[3], 2)
|
||||
# node 3 fully reconnected but the TX wasn't relayed to it, so there should be no IS lock
|
||||
self.wait_for_instantlock(txid, self.nodes[0], False, 5)
|
||||
# push the tx directly via rpc
|
||||
self.nodes[3].sendrawtransaction(self.nodes[0].getrawtransaction(txid))
|
||||
# node 3 should vote on a tx now since it became aware of it via sendrawtransaction
|
||||
# and this should be enough to complete an IS lock
|
||||
self.wait_for_instantlock(txid, self.nodes[0])
|
||||
|
||||
self.log.info("testing retroactive signing with unknown TX")
|
||||
isolate_node(self.nodes[3])
|
||||
rawtx = self.nodes[0].createrawtransaction([], {self.nodes[0].getnewaddress(): 1})
|
||||
rawtx = self.nodes[0].fundrawtransaction(rawtx)['hex']
|
||||
rawtx = self.nodes[0].signrawtransaction(rawtx)['hex']
|
||||
txid = self.nodes[3].sendrawtransaction(rawtx)
|
||||
# Make node 3 consider the TX as safe
|
||||
self.bump_mocktime(10 * 60 + 1)
|
||||
set_node_times(self.nodes, self.mocktime)
|
||||
block = self.nodes[3].generatetoaddress(1, self.nodes[0].getnewaddress())[0]
|
||||
reconnect_isolated_node(self.nodes[3], 0)
|
||||
self.wait_for_chainlocked_block_all_nodes(block)
|
||||
self.nodes[0].setmocktime(self.mocktime)
|
||||
|
||||
self.log.info("testing retroactive signing with partially known TX")
|
||||
isolate_node(self.nodes[3])
|
||||
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
|
||||
# Make sure nodes 1 and 2 received the TX before we continue,
|
||||
# otherwise it might announce the TX to node 3 when reconnecting
|
||||
self.wait_for_tx(txid, self.nodes[1])
|
||||
self.wait_for_tx(txid, self.nodes[2])
|
||||
reconnect_isolated_node(self.nodes[3], 0)
|
||||
self.wait_for_mnauth(self.nodes[3], 2)
|
||||
# node 3 fully reconnected but the TX wasn't relayed to it, so there should be no IS lock
|
||||
self.wait_for_instantlock(txid, self.nodes[0], False, 5)
|
||||
# Make node0 consider the TX as safe
|
||||
self.bump_mocktime(10 * 60 + 1)
|
||||
set_node_times(self.nodes, self.mocktime)
|
||||
block = self.nodes[0].generate(1)[0]
|
||||
self.wait_for_chainlocked_block_all_nodes(block)
|
||||
|
||||
self.log.info("testing retroactive signing with partially known TX and all nodes session timeout")
|
||||
self.test_all_nodes_session_timeout(False)
|
||||
self.log.info("repeating test, but with cycled LLMQs")
|
||||
self.test_all_nodes_session_timeout(True)
|
||||
|
||||
self.log.info("testing retroactive signing with partially known TX and single node session timeout")
|
||||
self.test_single_node_session_timeout(False)
|
||||
self.log.info("repeating test, but with cycled LLMQs")
|
||||
self.test_single_node_session_timeout(True)
|
||||
|
||||
def cycle_llmqs(self):
|
||||
self.mine_quorum()
|
||||
self.mine_quorum()
|
||||
self.wait_for_chainlocked_block_all_nodes(self.nodes[0].getbestblockhash())
|
||||
|
||||
def test_all_nodes_session_timeout(self, do_cycle_llmqs):
|
||||
set_node_times(self.nodes, self.mocktime)
|
||||
isolate_node(self.nodes[3])
|
||||
rawtx = self.nodes[0].createrawtransaction([], {self.nodes[0].getnewaddress(): 1})
|
||||
rawtx = self.nodes[0].fundrawtransaction(rawtx)['hex']
|
||||
rawtx = self.nodes[0].signrawtransaction(rawtx)['hex']
|
||||
txid = self.nodes[0].sendrawtransaction(rawtx)
|
||||
txid = self.nodes[3].sendrawtransaction(rawtx)
|
||||
# Make sure nodes 1 and 2 received the TX before we continue
|
||||
self.wait_for_tx(txid, self.nodes[1])
|
||||
self.wait_for_tx(txid, self.nodes[2])
|
||||
# Make sure signing is done on nodes 1 and 2 (it's async)
|
||||
time.sleep(5)
|
||||
# Make the signing session for the IS lock timeout on nodes 1-3
|
||||
self.bump_mocktime(61)
|
||||
set_node_times(self.nodes, self.mocktime)
|
||||
time.sleep(2) # make sure Cleanup() is called
|
||||
reconnect_isolated_node(self.nodes[3], 0)
|
||||
self.wait_for_mnauth(self.nodes[3], 2)
|
||||
# node 3 fully reconnected but the signing session is already timed out on all nodes, so no IS lock
|
||||
self.wait_for_instantlock(txid, self.nodes[0], False, 5)
|
||||
if do_cycle_llmqs:
|
||||
self.cycle_llmqs()
|
||||
self.wait_for_instantlock(txid, self.nodes[0], False, 5)
|
||||
# Make node 0 consider the TX as safe
|
||||
self.bump_mocktime(10 * 60 + 1)
|
||||
self.nodes[0].setmocktime(self.mocktime)
|
||||
block = self.nodes[0].generate(1)[0]
|
||||
self.wait_for_chainlocked_block_all_nodes(block)
|
||||
|
||||
def test_single_node_session_timeout(self, do_cycle_llmqs):
|
||||
set_node_times(self.nodes, self.mocktime)
|
||||
isolate_node(self.nodes[3])
|
||||
rawtx = self.nodes[0].createrawtransaction([], {self.nodes[0].getnewaddress(): 1})
|
||||
rawtx = self.nodes[0].fundrawtransaction(rawtx)['hex']
|
||||
rawtx = self.nodes[0].signrawtransaction(rawtx)['hex']
|
||||
txid = self.nodes[3].sendrawtransaction(rawtx)
|
||||
time.sleep(2) # make sure signing is done on node 2 (it's async)
|
||||
# Make the signing session for the IS lock timeout on node 3
|
||||
self.bump_mocktime(61)
|
||||
set_node_times(self.nodes, self.mocktime)
|
||||
time.sleep(2) # make sure Cleanup() is called
|
||||
reconnect_isolated_node(self.nodes[3], 0)
|
||||
self.wait_for_mnauth(self.nodes[3], 2)
|
||||
self.nodes[0].sendrawtransaction(rawtx)
|
||||
# Make sure nodes 1 and 2 received the TX
|
||||
self.wait_for_tx(txid, self.nodes[1])
|
||||
self.wait_for_tx(txid, self.nodes[2])
|
||||
# Make sure signing is done on nodes 1 and 2 (it's async)
|
||||
time.sleep(5)
|
||||
# node 3 fully reconnected but the signing session is already timed out on it, so no IS lock
|
||||
self.wait_for_instantlock(txid, self.nodes[0], False, 1)
|
||||
if do_cycle_llmqs:
|
||||
self.cycle_llmqs()
|
||||
self.wait_for_instantlock(txid, self.nodes[0], False, 5)
|
||||
# Make node 0 consider the TX as safe
|
||||
self.bump_mocktime(10 * 60 + 1)
|
||||
self.nodes[0].setmocktime(self.mocktime)
|
||||
block = self.nodes[0].generate(1)[0]
|
||||
self.wait_for_chainlocked_block_all_nodes(block)
|
||||
|
||||
if __name__ == '__main__':
|
||||
LLMQ_IS_RetroactiveSigning().main()
|
@ -699,26 +699,37 @@ class DashTestFramework(BitcoinTestFramework):
|
||||
ret = {**decoded, **ret}
|
||||
return ret
|
||||
|
||||
def wait_for_instantlock(self, txid, node):
|
||||
def wait_for_tx(self, txid, node, expected=True, timeout=15):
|
||||
def check_tx():
|
||||
try:
|
||||
return node.getrawtransaction(txid)
|
||||
except:
|
||||
return False
|
||||
if wait_until(check_tx, timeout=timeout, sleep=0.5, do_assert=expected) and not expected:
|
||||
raise AssertionError("waiting unexpectedly succeeded")
|
||||
|
||||
def wait_for_instantlock(self, txid, node, expected=True, timeout=15):
|
||||
def check_instantlock():
|
||||
try:
|
||||
return node.getrawtransaction(txid, True)["instantlock"]
|
||||
except:
|
||||
return False
|
||||
wait_until(check_instantlock, timeout=10, sleep=0.5)
|
||||
if wait_until(check_instantlock, timeout=timeout, sleep=0.5, do_assert=expected) and not expected:
|
||||
raise AssertionError("waiting unexpectedly succeeded")
|
||||
|
||||
def wait_for_chainlocked_block(self, node, block_hash, timeout=15):
|
||||
def wait_for_chainlocked_block(self, node, block_hash, expected=True, timeout=15):
|
||||
def check_chainlocked_block():
|
||||
try:
|
||||
block = node.getblock(block_hash)
|
||||
return block["confirmations"] > 0 and block["chainlock"]
|
||||
except:
|
||||
return False
|
||||
wait_until(check_chainlocked_block, timeout=timeout, sleep=0.1)
|
||||
if wait_until(check_chainlocked_block, timeout=timeout, sleep=0.1, do_assert=expected) and not expected:
|
||||
raise AssertionError("waiting unexpectedly succeeded")
|
||||
|
||||
def wait_for_chainlocked_block_all_nodes(self, block_hash, timeout=15):
|
||||
for node in self.nodes:
|
||||
self.wait_for_chainlocked_block(node, block_hash, timeout)
|
||||
self.wait_for_chainlocked_block(node, block_hash, timeout=timeout)
|
||||
|
||||
def wait_for_best_chainlock(self, node, block_hash, timeout=15):
|
||||
wait_until(lambda: node.getbestchainlock()["blockhash"] == block_hash, timeout=timeout, sleep=0.1)
|
||||
@ -846,6 +857,17 @@ class DashTestFramework(BitcoinTestFramework):
|
||||
|
||||
return new_quorum
|
||||
|
||||
def wait_for_mnauth(self, node, count, timeout=10):
|
||||
def test():
|
||||
pi = node.getpeerinfo()
|
||||
c = 0
|
||||
for p in pi:
|
||||
if "verified_proregtx_hash" in p and p["verified_proregtx_hash"] != "":
|
||||
c += 1
|
||||
return c >= count
|
||||
wait_until(test, timeout=timeout)
|
||||
|
||||
|
||||
class ComparisonTestFramework(BitcoinTestFramework):
|
||||
"""Test framework for doing p2p comparison testing
|
||||
|
||||
|
@ -202,27 +202,34 @@ def str_to_b64str(string):
|
||||
def satoshi_round(amount):
|
||||
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
|
||||
|
||||
def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf'), sleep=0.05, lock=None):
|
||||
def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf'), sleep=0.05, lock=None, do_assert=True, allow_exception=False):
|
||||
if attempts == float('inf') and timeout == float('inf'):
|
||||
timeout = 60
|
||||
attempt = 0
|
||||
timeout += time.time()
|
||||
|
||||
while attempt < attempts and time.time() < timeout:
|
||||
if lock:
|
||||
with lock:
|
||||
try:
|
||||
if lock:
|
||||
with lock:
|
||||
if predicate():
|
||||
return True
|
||||
else:
|
||||
if predicate():
|
||||
return
|
||||
else:
|
||||
if predicate():
|
||||
return
|
||||
return True
|
||||
except:
|
||||
if not allow_exception:
|
||||
raise
|
||||
attempt += 1
|
||||
time.sleep(sleep)
|
||||
|
||||
# Print the cause of the timeout
|
||||
assert_greater_than(attempts, attempt)
|
||||
assert_greater_than(timeout, time.time())
|
||||
raise RuntimeError('Unreachable')
|
||||
if do_assert:
|
||||
# Print the cause of the timeout
|
||||
assert_greater_than(attempts, attempt)
|
||||
assert_greater_than(timeout, time.time())
|
||||
raise RuntimeError('Unreachable')
|
||||
else:
|
||||
return False
|
||||
|
||||
# RPC/P2P connection constants and functions
|
||||
############################################
|
||||
|
@ -72,6 +72,7 @@ BASE_SCRIPTS= [
|
||||
'llmq-chainlocks.py', # NOTE: needs dash_hash to pass
|
||||
'llmq-simplepose.py', # NOTE: needs dash_hash to pass
|
||||
'llmq-is-cl-conflicts.py', # NOTE: needs dash_hash to pass
|
||||
'llmq-is-retroactive.py', # NOTE: needs dash_hash to pass
|
||||
'llmq-dkgerrors.py', # NOTE: needs dash_hash to pass
|
||||
'dip4-coinbasemerkleroots.py', # NOTE: needs dash_hash to pass
|
||||
# vv Tests less than 60s vv
|
||||
|
Loading…
Reference in New Issue
Block a user