diff --git a/src/llmq/quorums_instantsend.cpp b/src/llmq/quorums_instantsend.cpp index 32f418e68..c804fe16e 100644 --- a/src/llmq/quorums_instantsend.cpp +++ b/src/llmq/quorums_instantsend.cpp @@ -371,7 +371,7 @@ void CInstantSendManager::InterruptWorkerThread() workInterrupt(); } -bool CInstantSendManager::ProcessTx(const CTransaction& tx, const Consensus::Params& params) +bool CInstantSendManager::ProcessTx(const CTransaction& tx, bool allowReSigning, const Consensus::Params& params) { if (!IsInstantSendEnabled()) { return true; @@ -441,7 +441,7 @@ bool CInstantSendManager::ProcessTx(const CTransaction& tx, const Consensus::Par return false; } } - if (alreadyVotedCount == ids.size()) { + if (!allowReSigning && alreadyVotedCount == ids.size()) { LogPrint(BCLog::INSTANTSEND, "CInstantSendManager::%s -- txid=%s: already voted on all inputs, bailing out\n", __func__, tx.GetHash().ToString()); return true; @@ -454,9 +454,9 @@ bool CInstantSendManager::ProcessTx(const CTransaction& tx, const Consensus::Par auto& in = tx.vin[i]; auto& id = ids[i]; inputRequestIds.emplace(id); - LogPrint(BCLog::INSTANTSEND, "CInstantSendManager::%s -- txid=%s: trying to vote on input %s with id %s\n", __func__, - tx.GetHash().ToString(), in.prevout.ToStringShort(), id.ToString()); - if (quorumSigningManager->AsyncSignIfMember(llmqType, id, tx.GetHash())) { + LogPrint(BCLog::INSTANTSEND, "CInstantSendManager::%s -- txid=%s: trying to vote on input %s with id %s. allowReSigning=%d\n", __func__, + tx.GetHash().ToString(), in.prevout.ToStringShort(), id.ToString(), allowReSigning); + if (quorumSigningManager->AsyncSignIfMember(llmqType, id, tx.GetHash(), allowReSigning)) { LogPrint(BCLog::INSTANTSEND, "CInstantSendManager::%s -- txid=%s: voted on input %s with id %s\n", __func__, tx.GetHash().ToString(), in.prevout.ToStringShort(), id.ToString()); } @@ -961,7 +961,7 @@ void CInstantSendManager::UpdateWalletTransaction(const CTransactionRef& tx, con mempool.AddTransactionsUpdated(1); } -void CInstantSendManager::ProcessNewTransaction(const CTransactionRef& tx, const CBlockIndex* pindex) +void CInstantSendManager::ProcessNewTransaction(const CTransactionRef& tx, const CBlockIndex* pindex, bool allowReSigning) { if (!IsInstantSendEnabled()) { return; @@ -989,7 +989,7 @@ void CInstantSendManager::ProcessNewTransaction(const CTransactionRef& tx, const bool chainlocked = pindex && chainLocksHandler->HasChainLock(pindex->nHeight, pindex->GetBlockHash()); if (islockHash.IsNull() && !chainlocked) { - ProcessTx(*tx, Params().GetConsensus()); + ProcessTx(*tx, allowReSigning, Params().GetConsensus()); } LOCK(cs); @@ -1004,7 +1004,7 @@ void CInstantSendManager::ProcessNewTransaction(const CTransactionRef& tx, const void CInstantSendManager::TransactionAddedToMempool(const CTransactionRef& tx) { - ProcessNewTransaction(tx, nullptr); + ProcessNewTransaction(tx, nullptr, false); } void CInstantSendManager::BlockConnected(const std::shared_ptr& pblock, const CBlockIndex* pindex, const std::vector& vtxConflicted) @@ -1021,7 +1021,7 @@ void CInstantSendManager::BlockConnected(const std::shared_ptr& pb } for (const auto& tx : pblock->vtx) { - ProcessNewTransaction(tx, pindex); + ProcessNewTransaction(tx, pindex, true); } } @@ -1400,7 +1400,7 @@ bool CInstantSendManager::ProcessPendingRetryLockTxs() tx->GetHash().ToString()); } - ProcessTx(*tx, Params().GetConsensus()); + ProcessTx(*tx, false, Params().GetConsensus()); retryCount++; } diff --git a/src/llmq/quorums_instantsend.h b/src/llmq/quorums_instantsend.h index 50d935ec1..7a1b1d537 100644 --- a/src/llmq/quorums_instantsend.h +++ b/src/llmq/quorums_instantsend.h @@ -120,7 +120,7 @@ public: void InterruptWorkerThread(); public: - bool ProcessTx(const CTransaction& tx, const Consensus::Params& params); + bool ProcessTx(const CTransaction& tx, bool allowReSigning, const Consensus::Params& params); bool CheckCanLock(const CTransaction& tx, bool printDebug, const Consensus::Params& params); bool CheckCanLock(const COutPoint& outpoint, bool printDebug, const uint256& txHash, CAmount* retValue, const Consensus::Params& params); bool IsLocked(const uint256& txHash); @@ -141,7 +141,7 @@ public: void ProcessInstantSendLock(NodeId from, const uint256& hash, const CInstantSendLock& islock); void UpdateWalletTransaction(const CTransactionRef& tx, const CInstantSendLock& islock); - void ProcessNewTransaction(const CTransactionRef& tx, const CBlockIndex* pindex); + void ProcessNewTransaction(const CTransactionRef& tx, const CBlockIndex* pindex, bool allowReSigning); void TransactionAddedToMempool(const CTransactionRef& tx); void BlockConnected(const std::shared_ptr& pblock, const CBlockIndex* pindex, const std::vector& vtxConflicted); void BlockDisconnected(const std::shared_ptr& pblock, const CBlockIndex* pindexDisconnected); diff --git a/src/llmq/quorums_signing.cpp b/src/llmq/quorums_signing.cpp index 3240b1731..e1f9535fb 100644 --- a/src/llmq/quorums_signing.cpp +++ b/src/llmq/quorums_signing.cpp @@ -762,7 +762,7 @@ void CSigningManager::UnregisterRecoveredSigsListener(CRecoveredSigsListener* l) recoveredSigsListeners.erase(itRem, recoveredSigsListeners.end()); } -bool CSigningManager::AsyncSignIfMember(Consensus::LLMQType llmqType, const uint256& id, const uint256& msgHash) +bool CSigningManager::AsyncSignIfMember(Consensus::LLMQType llmqType, const uint256& id, const uint256& msgHash, bool allowReSign) { auto& params = Params().GetConsensus().llmqs.at(llmqType); @@ -773,24 +773,31 @@ bool CSigningManager::AsyncSignIfMember(Consensus::LLMQType llmqType, const uint { LOCK(cs); - if (db.HasVotedOnId(llmqType, id)) { + bool hasVoted = db.HasVotedOnId(llmqType, id); + if (hasVoted) { uint256 prevMsgHash; db.GetVoteForId(llmqType, id, prevMsgHash); if (msgHash != prevMsgHash) { LogPrintf("CSigningManager::%s -- already voted for id=%s and msgHash=%s. Not voting on conflicting msgHash=%s\n", __func__, id.ToString(), prevMsgHash.ToString(), msgHash.ToString()); + return false; + } else if (allowReSign) { + LogPrint(BCLog::LLMQ, "CSigningManager::%s -- already voted for id=%s and msgHash=%s. Resigning!\n", __func__, + id.ToString(), prevMsgHash.ToString()); } else { LogPrint(BCLog::LLMQ, "CSigningManager::%s -- already voted for id=%s and msgHash=%s. Not voting again.\n", __func__, id.ToString(), prevMsgHash.ToString()); + return false; } - return false; } if (db.HasRecoveredSigForId(llmqType, id)) { // no need to sign it if we already have a recovered sig return true; } - db.WriteVoteForId(llmqType, id, msgHash); + if (!hasVoted) { + db.WriteVoteForId(llmqType, id, msgHash); + } } int tipHeight; @@ -814,6 +821,10 @@ bool CSigningManager::AsyncSignIfMember(Consensus::LLMQType llmqType, const uint return false; } + if (allowReSign) { + // make us re-announce all known shares (other nodes might have run into a timeout) + quorumSigSharesManager->ForceReAnnouncement(quorum, llmqType, id, msgHash); + } quorumSigSharesManager->AsyncSign(quorum, id, msgHash); return true; diff --git a/src/llmq/quorums_signing.h b/src/llmq/quorums_signing.h index 11c28b24c..44f7f7910 100644 --- a/src/llmq/quorums_signing.h +++ b/src/llmq/quorums_signing.h @@ -170,7 +170,7 @@ public: void RegisterRecoveredSigsListener(CRecoveredSigsListener* l); void UnregisterRecoveredSigsListener(CRecoveredSigsListener* l); - bool AsyncSignIfMember(Consensus::LLMQType llmqType, const uint256& id, const uint256& msgHash); + bool AsyncSignIfMember(Consensus::LLMQType llmqType, const uint256& id, const uint256& msgHash, bool allowReSign = false); bool HasRecoveredSig(Consensus::LLMQType llmqType, const uint256& id, const uint256& msgHash); bool HasRecoveredSigForId(Consensus::LLMQType llmqType, const uint256& id); bool HasRecoveredSigForSession(const uint256& signHash); diff --git a/src/llmq/quorums_signing_shares.cpp b/src/llmq/quorums_signing_shares.cpp index dfbe3d935..74f585966 100644 --- a/src/llmq/quorums_signing_shares.cpp +++ b/src/llmq/quorums_signing_shares.cpp @@ -82,6 +82,13 @@ void CSigSharesInv::Set(uint16_t quorumMember, bool v) inv[quorumMember] = v; } +void CSigSharesInv::SetAll(bool v) +{ + for (size_t i = 0; i < inv.size(); i++) { + inv[i] = v; + } +} + std::string CBatchedSigShares::ToInvString() const { CSigSharesInv inv; @@ -678,7 +685,7 @@ void CSigSharesManager::ProcessSigShare(NodeId nodeId, const CSigShare& sigShare sigSharesToAnnounce.Add(sigShare.GetKey(), true); // Update the time we've seen the last sigShare - timeSeenForSessions[sigShare.GetSignHash()] = GetTimeMillis(); + timeSeenForSessions[sigShare.GetSignHash()] = GetAdjustedTime(); if (!quorumNodes.empty()) { // don't announce and wait for other nodes to request this share and directly send it to them @@ -777,7 +784,7 @@ void CSigSharesManager::CollectSigSharesToRequest(std::unordered_mapqc.quorumHash, id, msgHash); + auto sigs = sigShares.GetAllForSignHash(signHash); + if (sigs) { + for (auto& p : *sigs) { + // re-announce every sigshare to every node + sigSharesToAnnounce.Add(std::make_pair(signHash, p.first), true); + } + } + for (auto& p : nodeStates) { + CSigSharesNodeState& nodeState = p.second; + auto session = nodeState.GetSessionBySignHash(signHash); + if (!session) { + continue; + } + // pretend that the other node doesn't know about any shares so that we re-announce everything + session->knows.SetAll(false); + // we need to use a new session id as we don't know if the other node has run into a timeout already + session->sendSessionId = (uint32_t)-1; + } +} + void CSigSharesManager::HandleNewRecoveredSig(const llmq::CRecoveredSig& recoveredSig) { LOCK(cs); diff --git a/src/llmq/quorums_signing_shares.h b/src/llmq/quorums_signing_shares.h index c8a4a3f26..e5355737a 100644 --- a/src/llmq/quorums_signing_shares.h +++ b/src/llmq/quorums_signing_shares.h @@ -104,6 +104,7 @@ public: void Init(size_t size); bool IsSet(uint16_t quorumMember) const; void Set(uint16_t quorumMember, bool v); + void SetAll(bool v); void Merge(const CSigSharesInv& inv2); size_t CountSet() const; @@ -328,8 +329,8 @@ public: class CSigSharesManager : public CRecoveredSigsListener { - static const int64_t SESSION_NEW_SHARES_TIMEOUT = 60 * 1000; - static const int64_t SIG_SHARE_REQUEST_TIMEOUT = 5 * 1000; + static const int64_t SESSION_NEW_SHARES_TIMEOUT = 60; + static const int64_t SIG_SHARE_REQUEST_TIMEOUT = 5; // we try to keep total message size below 10k const size_t MAX_MSGS_CNT_QSIGSESANN = 100; @@ -376,6 +377,7 @@ public: void AsyncSign(const CQuorumCPtr& quorum, const uint256& id, const uint256& msgHash); void Sign(const CQuorumCPtr& quorum, const uint256& id, const uint256& msgHash); + void ForceReAnnouncement(const CQuorumCPtr& quorum, Consensus::LLMQType llmqType, const uint256& id, const uint256& msgHash); void HandleNewRecoveredSig(const CRecoveredSig& recoveredSig); diff --git a/src/net.cpp b/src/net.cpp index e4bdd6091..03aee7625 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -716,6 +716,11 @@ void CNode::copyStats(CNodeStats &stats) // Leave string empty if addrLocal invalid (not filled in yet) CService addrLocalUnlocked = GetAddrLocal(); stats.addrLocal = addrLocalUnlocked.IsValid() ? addrLocalUnlocked.ToString() : ""; + + { + LOCK(cs_mnauth); + X(verifiedProRegTxHash); + } } #undef X diff --git a/src/net.h b/src/net.h index e6d1d9a9b..3a405d0f7 100644 --- a/src/net.h +++ b/src/net.h @@ -694,6 +694,8 @@ public: CAddress addr; // Bind address of our side of the connection CAddress addrBind; + // In case this is a verified MN, this value is the proTx of the MN + uint256 verifiedProRegTxHash; }; diff --git a/src/rpc/net.cpp b/src/rpc/net.cpp index 5165d6317..4f50397ca 100644 --- a/src/rpc/net.cpp +++ b/src/rpc/net.cpp @@ -80,6 +80,9 @@ UniValue getpeerinfo(const JSONRPCRequest& request) " \"addrbind\":\"ip:port\", (string) Bind address of the connection to the peer\n" " \"addrlocal\":\"ip:port\", (string) Local address as reported by the peer\n" " \"services\":\"xxxxxxxxxxxxxxxx\", (string) The services offered\n" + " \"verified_proregtx_hash\": h, (hex) Only present when the peer is a masternode and succesfully\n" + " autheticated via MNAUTH. In this case, this field contains the\n" + " protx hash of the masternode\n" " \"relaytxes\":true|false, (boolean) Whether peer has asked us to relay transactions to it\n" " \"lastsend\": ttt, (numeric) The time in seconds since epoch (Jan 1 1970 GMT) of the last send\n" " \"lastrecv\": ttt, (numeric) The time in seconds since epoch (Jan 1 1970 GMT) of the last receive\n" @@ -138,6 +141,9 @@ UniValue getpeerinfo(const JSONRPCRequest& request) if (stats.addrBind.IsValid()) obj.push_back(Pair("addrbind", stats.addrBind.ToString())); obj.push_back(Pair("services", strprintf("%016x", stats.nServices))); + if (!stats.verifiedProRegTxHash.IsNull()) { + obj.push_back(Pair("verified_proregtx_hash", stats.verifiedProRegTxHash.ToString())); + } obj.push_back(Pair("relaytxes", stats.fRelayTxes)); obj.push_back(Pair("lastsend", stats.nLastSend)); obj.push_back(Pair("lastrecv", stats.nLastRecv)); diff --git a/test/functional/llmq-chainlocks.py b/test/functional/llmq-chainlocks.py index b08c69dd5..2ebb83286 100755 --- a/test/functional/llmq-chainlocks.py +++ b/test/functional/llmq-chainlocks.py @@ -129,7 +129,7 @@ class LLMQChainLocksTest(DashTestFramework): # for the mined TXs, which will then allow the network to create a CLSIG self.log.info("Reenable network on first node and wait for chainlock") reconnect_isolated_node(self.nodes[0], 1) - self.wait_for_chainlocked_block(self.nodes[0], self.nodes[0].getbestblockhash(), 30) + self.wait_for_chainlocked_block(self.nodes[0], self.nodes[0].getbestblockhash(), timeout=30) def create_chained_txs(self, node, amount): txid = node.sendtoaddress(node.getnewaddress(), amount) diff --git a/test/functional/llmq-is-retroactive.py b/test/functional/llmq-is-retroactive.py new file mode 100755 index 000000000..7bdd7b401 --- /dev/null +++ b/test/functional/llmq-is-retroactive.py @@ -0,0 +1,178 @@ +#!/usr/bin/env python3 +# Copyright (c) 2015-2018 The Dash Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +from test_framework.mininode import * +from test_framework.test_framework import DashTestFramework +from test_framework.util import sync_blocks, set_node_times, \ + isolate_node, reconnect_isolated_node + +''' +llmq-is-retroactive.py + +Tests retroactive signing + +We have 6 nodes where node 0 is the control node, nodes 1-5 are masternodes. +Mempool inconsistencies are simulated via disconnecting/reconnecting node 3 +and by having a higher relay fee on nodes 4 and 5. +''' + +class LLMQ_IS_RetroactiveSigning(DashTestFramework): + def set_test_params(self): + # -whitelist is needed to avoid the trickling logic on node0 + self.set_dash_test_params(6, 5, [["-whitelist=127.0.0.1"], [], [], [], ["-minrelaytxfee=0.001"], ["-minrelaytxfee=0.001"]], fast_dip3_enforcement=True) + + def run_test(self): + while self.nodes[0].getblockchaininfo()["bip9_softforks"]["dip0008"]["status"] != "active": + self.nodes[0].generate(10) + sync_blocks(self.nodes, timeout=60*5) + + self.nodes[0].spork("SPORK_17_QUORUM_DKG_ENABLED", 0) + self.nodes[0].spork("SPORK_19_CHAINLOCKS_ENABLED", 0) + self.nodes[0].spork("SPORK_2_INSTANTSEND_ENABLED", 0) + self.nodes[0].spork("SPORK_3_INSTANTSEND_BLOCK_FILTERING", 0) + self.wait_for_sporks_same() + + self.mine_quorum() + self.mine_quorum() + + # Make sure that all nodes are chainlocked at the same height before starting actual tests + self.wait_for_chainlocked_block_all_nodes(self.nodes[0].getbestblockhash()) + + self.log.info("trying normal IS lock") + txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1) + # 3 nodes should be enough to create an IS lock even if nodes 4 and 5 (which have no tx itself) + # are the only "neighbours" in intra-quorum connections for one of them. + self.wait_for_instantlock(txid, self.nodes[0]) + self.bump_mocktime(1) + set_node_times(self.nodes, self.mocktime) + block = self.nodes[0].generate(1)[0] + self.wait_for_chainlocked_block_all_nodes(block) + + self.log.info("testing normal signing with partially known TX") + isolate_node(self.nodes[3]) + txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1) + # Make sure nodes 1 and 2 received the TX before we continue, + # otherwise it might announce the TX to node 3 when reconnecting + self.wait_for_tx(txid, self.nodes[1]) + self.wait_for_tx(txid, self.nodes[2]) + reconnect_isolated_node(self.nodes[3], 0) + self.wait_for_mnauth(self.nodes[3], 2) + # node 3 fully reconnected but the TX wasn't relayed to it, so there should be no IS lock + self.wait_for_instantlock(txid, self.nodes[0], False, 5) + # push the tx directly via rpc + self.nodes[3].sendrawtransaction(self.nodes[0].getrawtransaction(txid)) + # node 3 should vote on a tx now since it became aware of it via sendrawtransaction + # and this should be enough to complete an IS lock + self.wait_for_instantlock(txid, self.nodes[0]) + + self.log.info("testing retroactive signing with unknown TX") + isolate_node(self.nodes[3]) + rawtx = self.nodes[0].createrawtransaction([], {self.nodes[0].getnewaddress(): 1}) + rawtx = self.nodes[0].fundrawtransaction(rawtx)['hex'] + rawtx = self.nodes[0].signrawtransaction(rawtx)['hex'] + txid = self.nodes[3].sendrawtransaction(rawtx) + # Make node 3 consider the TX as safe + self.bump_mocktime(10 * 60 + 1) + set_node_times(self.nodes, self.mocktime) + block = self.nodes[3].generatetoaddress(1, self.nodes[0].getnewaddress())[0] + reconnect_isolated_node(self.nodes[3], 0) + self.wait_for_chainlocked_block_all_nodes(block) + self.nodes[0].setmocktime(self.mocktime) + + self.log.info("testing retroactive signing with partially known TX") + isolate_node(self.nodes[3]) + txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1) + # Make sure nodes 1 and 2 received the TX before we continue, + # otherwise it might announce the TX to node 3 when reconnecting + self.wait_for_tx(txid, self.nodes[1]) + self.wait_for_tx(txid, self.nodes[2]) + reconnect_isolated_node(self.nodes[3], 0) + self.wait_for_mnauth(self.nodes[3], 2) + # node 3 fully reconnected but the TX wasn't relayed to it, so there should be no IS lock + self.wait_for_instantlock(txid, self.nodes[0], False, 5) + # Make node0 consider the TX as safe + self.bump_mocktime(10 * 60 + 1) + set_node_times(self.nodes, self.mocktime) + block = self.nodes[0].generate(1)[0] + self.wait_for_chainlocked_block_all_nodes(block) + + self.log.info("testing retroactive signing with partially known TX and all nodes session timeout") + self.test_all_nodes_session_timeout(False) + self.log.info("repeating test, but with cycled LLMQs") + self.test_all_nodes_session_timeout(True) + + self.log.info("testing retroactive signing with partially known TX and single node session timeout") + self.test_single_node_session_timeout(False) + self.log.info("repeating test, but with cycled LLMQs") + self.test_single_node_session_timeout(True) + + def cycle_llmqs(self): + self.mine_quorum() + self.mine_quorum() + self.wait_for_chainlocked_block_all_nodes(self.nodes[0].getbestblockhash()) + + def test_all_nodes_session_timeout(self, do_cycle_llmqs): + set_node_times(self.nodes, self.mocktime) + isolate_node(self.nodes[3]) + rawtx = self.nodes[0].createrawtransaction([], {self.nodes[0].getnewaddress(): 1}) + rawtx = self.nodes[0].fundrawtransaction(rawtx)['hex'] + rawtx = self.nodes[0].signrawtransaction(rawtx)['hex'] + txid = self.nodes[0].sendrawtransaction(rawtx) + txid = self.nodes[3].sendrawtransaction(rawtx) + # Make sure nodes 1 and 2 received the TX before we continue + self.wait_for_tx(txid, self.nodes[1]) + self.wait_for_tx(txid, self.nodes[2]) + # Make sure signing is done on nodes 1 and 2 (it's async) + time.sleep(5) + # Make the signing session for the IS lock timeout on nodes 1-3 + self.bump_mocktime(61) + set_node_times(self.nodes, self.mocktime) + time.sleep(2) # make sure Cleanup() is called + reconnect_isolated_node(self.nodes[3], 0) + self.wait_for_mnauth(self.nodes[3], 2) + # node 3 fully reconnected but the signing session is already timed out on all nodes, so no IS lock + self.wait_for_instantlock(txid, self.nodes[0], False, 5) + if do_cycle_llmqs: + self.cycle_llmqs() + self.wait_for_instantlock(txid, self.nodes[0], False, 5) + # Make node 0 consider the TX as safe + self.bump_mocktime(10 * 60 + 1) + self.nodes[0].setmocktime(self.mocktime) + block = self.nodes[0].generate(1)[0] + self.wait_for_chainlocked_block_all_nodes(block) + + def test_single_node_session_timeout(self, do_cycle_llmqs): + set_node_times(self.nodes, self.mocktime) + isolate_node(self.nodes[3]) + rawtx = self.nodes[0].createrawtransaction([], {self.nodes[0].getnewaddress(): 1}) + rawtx = self.nodes[0].fundrawtransaction(rawtx)['hex'] + rawtx = self.nodes[0].signrawtransaction(rawtx)['hex'] + txid = self.nodes[3].sendrawtransaction(rawtx) + time.sleep(2) # make sure signing is done on node 2 (it's async) + # Make the signing session for the IS lock timeout on node 3 + self.bump_mocktime(61) + set_node_times(self.nodes, self.mocktime) + time.sleep(2) # make sure Cleanup() is called + reconnect_isolated_node(self.nodes[3], 0) + self.wait_for_mnauth(self.nodes[3], 2) + self.nodes[0].sendrawtransaction(rawtx) + # Make sure nodes 1 and 2 received the TX + self.wait_for_tx(txid, self.nodes[1]) + self.wait_for_tx(txid, self.nodes[2]) + # Make sure signing is done on nodes 1 and 2 (it's async) + time.sleep(5) + # node 3 fully reconnected but the signing session is already timed out on it, so no IS lock + self.wait_for_instantlock(txid, self.nodes[0], False, 1) + if do_cycle_llmqs: + self.cycle_llmqs() + self.wait_for_instantlock(txid, self.nodes[0], False, 5) + # Make node 0 consider the TX as safe + self.bump_mocktime(10 * 60 + 1) + self.nodes[0].setmocktime(self.mocktime) + block = self.nodes[0].generate(1)[0] + self.wait_for_chainlocked_block_all_nodes(block) + +if __name__ == '__main__': + LLMQ_IS_RetroactiveSigning().main() diff --git a/test/functional/test_framework/test_framework.py b/test/functional/test_framework/test_framework.py index dd00d145c..95d1d95d0 100755 --- a/test/functional/test_framework/test_framework.py +++ b/test/functional/test_framework/test_framework.py @@ -699,26 +699,37 @@ class DashTestFramework(BitcoinTestFramework): ret = {**decoded, **ret} return ret - def wait_for_instantlock(self, txid, node): + def wait_for_tx(self, txid, node, expected=True, timeout=15): + def check_tx(): + try: + return node.getrawtransaction(txid) + except: + return False + if wait_until(check_tx, timeout=timeout, sleep=0.5, do_assert=expected) and not expected: + raise AssertionError("waiting unexpectedly succeeded") + + def wait_for_instantlock(self, txid, node, expected=True, timeout=15): def check_instantlock(): try: return node.getrawtransaction(txid, True)["instantlock"] except: return False - wait_until(check_instantlock, timeout=10, sleep=0.5) + if wait_until(check_instantlock, timeout=timeout, sleep=0.5, do_assert=expected) and not expected: + raise AssertionError("waiting unexpectedly succeeded") - def wait_for_chainlocked_block(self, node, block_hash, timeout=15): + def wait_for_chainlocked_block(self, node, block_hash, expected=True, timeout=15): def check_chainlocked_block(): try: block = node.getblock(block_hash) return block["confirmations"] > 0 and block["chainlock"] except: return False - wait_until(check_chainlocked_block, timeout=timeout, sleep=0.1) + if wait_until(check_chainlocked_block, timeout=timeout, sleep=0.1, do_assert=expected) and not expected: + raise AssertionError("waiting unexpectedly succeeded") def wait_for_chainlocked_block_all_nodes(self, block_hash, timeout=15): for node in self.nodes: - self.wait_for_chainlocked_block(node, block_hash, timeout) + self.wait_for_chainlocked_block(node, block_hash, timeout=timeout) def wait_for_best_chainlock(self, node, block_hash, timeout=15): wait_until(lambda: node.getbestchainlock()["blockhash"] == block_hash, timeout=timeout, sleep=0.1) @@ -846,6 +857,17 @@ class DashTestFramework(BitcoinTestFramework): return new_quorum + def wait_for_mnauth(self, node, count, timeout=10): + def test(): + pi = node.getpeerinfo() + c = 0 + for p in pi: + if "verified_proregtx_hash" in p and p["verified_proregtx_hash"] != "": + c += 1 + return c >= count + wait_until(test, timeout=timeout) + + class ComparisonTestFramework(BitcoinTestFramework): """Test framework for doing p2p comparison testing diff --git a/test/functional/test_framework/util.py b/test/functional/test_framework/util.py index ffe84177a..ada489e84 100644 --- a/test/functional/test_framework/util.py +++ b/test/functional/test_framework/util.py @@ -202,27 +202,34 @@ def str_to_b64str(string): def satoshi_round(amount): return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN) -def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf'), sleep=0.05, lock=None): +def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf'), sleep=0.05, lock=None, do_assert=True, allow_exception=False): if attempts == float('inf') and timeout == float('inf'): timeout = 60 attempt = 0 timeout += time.time() while attempt < attempts and time.time() < timeout: - if lock: - with lock: + try: + if lock: + with lock: + if predicate(): + return True + else: if predicate(): - return - else: - if predicate(): - return + return True + except: + if not allow_exception: + raise attempt += 1 time.sleep(sleep) - # Print the cause of the timeout - assert_greater_than(attempts, attempt) - assert_greater_than(timeout, time.time()) - raise RuntimeError('Unreachable') + if do_assert: + # Print the cause of the timeout + assert_greater_than(attempts, attempt) + assert_greater_than(timeout, time.time()) + raise RuntimeError('Unreachable') + else: + return False # RPC/P2P connection constants and functions ############################################ diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py index 4efd05793..613de2a66 100755 --- a/test/functional/test_runner.py +++ b/test/functional/test_runner.py @@ -72,6 +72,7 @@ BASE_SCRIPTS= [ 'llmq-chainlocks.py', # NOTE: needs dash_hash to pass 'llmq-simplepose.py', # NOTE: needs dash_hash to pass 'llmq-is-cl-conflicts.py', # NOTE: needs dash_hash to pass + 'llmq-is-retroactive.py', # NOTE: needs dash_hash to pass 'llmq-dkgerrors.py', # NOTE: needs dash_hash to pass 'dip4-coinbasemerkleroots.py', # NOTE: needs dash_hash to pass # vv Tests less than 60s vv