diff --git a/src/masternode/sync.cpp b/src/masternode/sync.cpp index b4fc5440bf..fefc12730d 100644 --- a/src/masternode/sync.cpp +++ b/src/masternode/sync.cpp @@ -127,7 +127,7 @@ void CMasternodeSync::ProcessTick(const PeerManager& peerman) // reset the sync process if the last call to this function was more than 60 minutes ago (client was in sleep mode) static int64_t nTimeLastProcess = GetTime(); - if(GetTime() - nTimeLastProcess > 60*60 && !fMasternodeMode) { + if (!Params().IsMockableChain() && GetTime() - nTimeLastProcess > 60 * 60 && !fMasternodeMode) { LogPrintf("CMasternodeSync::ProcessTick -- WARNING: no actions for too long, restarting sync...\n"); Reset(true); nTimeLastProcess = GetTime(); diff --git a/test/functional/feature_governance.py b/test/functional/feature_governance.py index 2bc5eb3e09..6d4c6a12a2 100755 --- a/test/functional/feature_governance.py +++ b/test/functional/feature_governance.py @@ -88,8 +88,10 @@ class DashGovernanceTest (DashTestFramework): assert_equal(len(self.nodes[0].gobject("list-prepared")), 0) # TODO: drop these extra 80 blocks - doesn't work without them - self.nodes[0].generate(80) - self.bump_mocktime(80) + for _ in range(8): + self.bump_mocktime(10) + self.nodes[0].generate(10) + self.sync_blocks() self.nodes[0].generate(3) self.bump_mocktime(3) @@ -280,7 +282,7 @@ class DashGovernanceTest (DashTestFramework): before = self.nodes[1].gobject("count")["votes"] # Bump mocktime to let MNs vote again - self.bump_mocktime(GOVERNANCE_UPDATE_MIN + 1) + self.bump_mocktime(GOVERNANCE_UPDATE_MIN + 1, update_schedulers=False) # Move another block inside the Superblock maturity window with self.nodes[1].assert_debug_log(["CGovernanceManager::VoteGovernanceTriggers"]): @@ -291,7 +293,7 @@ class DashGovernanceTest (DashTestFramework): # Vote count should not change even though MNs are allowed to vote again assert_equal(before, self.nodes[1].gobject("count")["votes"]) # Revert mocktime back to avoid issues in tests below - self.bump_mocktime(GOVERNANCE_UPDATE_MIN * -1) + self.bump_mocktime(GOVERNANCE_UPDATE_MIN * -1, update_schedulers=False) block_count = self.nodes[0].getblockcount() n = sb_cycle - block_count % sb_cycle diff --git a/test/functional/feature_governance_cl.py b/test/functional/feature_governance_cl.py index b75b516cac..99bdf3dfdd 100755 --- a/test/functional/feature_governance_cl.py +++ b/test/functional/feature_governance_cl.py @@ -5,11 +5,12 @@ """Tests governance checks can be skipped for blocks covered by the best chainlock.""" import json +import time from test_framework.governance import have_trigger_for_height from test_framework.messages import uint256_to_string from test_framework.test_framework import DashTestFramework -from test_framework.util import assert_equal, force_finish_mnsync, satoshi_round +from test_framework.util import assert_equal, satoshi_round class DashGovernanceTest (DashTestFramework): def set_test_params(self): @@ -22,7 +23,7 @@ class DashGovernanceTest (DashTestFramework): "type": object_type, "name": name, "start_epoch": proposal_time, - "end_epoch": proposal_time + 24 * 60 * 60, + "end_epoch": proposal_time + 20 * 156, "payment_amount": float(amount), "payment_address": payment_address, "url": "https://dash.org" @@ -40,6 +41,8 @@ class DashGovernanceTest (DashTestFramework): def run_test(self): sb_cycle = 20 + sb_maturity_window = 10 + sb_immaturity_window = sb_cycle - sb_maturity_window self.log.info("Make sure ChainLocks are active") @@ -62,7 +65,14 @@ class DashGovernanceTest (DashTestFramework): self.nodes[0].sporkupdate("SPORK_9_SUPERBLOCKS_ENABLED", 0) self.wait_for_sporks_same() - self.log.info("Prepare and submit proposals") + # Move to the superblock cycle start block + n = sb_cycle - self.nodes[0].getblockcount() % sb_cycle + for _ in range(n): + self.bump_mocktime(156) + self.nodes[0].generate(1) + self.sync_blocks() + + self.log.info("Prepare proposals") proposal_time = self.mocktime self.p0_payout_address = self.nodes[0].getnewaddress() @@ -81,6 +91,8 @@ class DashGovernanceTest (DashTestFramework): assert_equal(len(self.nodes[0].gobject("list-prepared")), 2) assert_equal(len(self.nodes[0].gobject("list")), 0) + self.log.info("Submit proposals") + self.p0_hash = self.nodes[0].gobject("submit", "0", 1, proposal_time, p0_collateral_prepare["hex"], p0_collateral_prepare["collateralHash"]) self.p1_hash = self.nodes[0].gobject("submit", "0", 1, proposal_time, p1_collateral_prepare["hex"], p1_collateral_prepare["collateralHash"]) @@ -99,31 +111,62 @@ class DashGovernanceTest (DashTestFramework): assert_equal(len(self.nodes[0].gobject("list", "valid", "triggers")), 0) - n = sb_cycle - self.nodes[0].getblockcount() % sb_cycle - assert n > 1 - - # Move remaining n blocks until the next Superblock - for _ in range(n - 1): - self.nodes[0].generate(1) + self.log.info("Move 1 block into sb maturity window") + n = sb_immaturity_window - self.nodes[0].getblockcount() % sb_cycle + assert n >= 0 + for _ in range(n + 1): self.bump_mocktime(156) + self.nodes[0].generate(1) self.sync_blocks(self.nodes[0:5]) self.log.info("Wait for new trigger and votes on non-isolated nodes") - sb_block_height = self.nodes[0].getblockcount() + 1 - self.wait_until(lambda: have_trigger_for_height(self.nodes[0:5], sb_block_height)) - # Mine superblock - self.nodes[0].generate(1) + sb_block_height = self.nodes[0].getblockcount() // sb_cycle * sb_cycle + sb_cycle + assert_equal(sb_block_height % sb_cycle, 0) + self.wait_until(lambda: have_trigger_for_height(self.nodes[0:5], sb_block_height), timeout=5) + + n = sb_cycle - self.nodes[0].getblockcount() % sb_cycle + assert n > 1 + + self.log.info("Move remaining n blocks until the next Superblock") + for _ in range(n - 1): + self.bump_mocktime(156) + self.nodes[0].generate(1) + self.sync_blocks(self.nodes[0:5]) + + # Confirm all is good + self.wait_until(lambda: have_trigger_for_height(self.nodes[0:5], sb_block_height), timeout=5) + + self.log.info("Mine superblock") self.bump_mocktime(156) + self.nodes[0].generate(1) self.sync_blocks(self.nodes[0:5]) self.wait_for_chainlocked_block(self.nodes[0], self.nodes[0].getbestblockhash()) + self.log.info("Mine (superblock cycle + 1) blocks on non-isolated nodes to forget about this trigger") + for _ in range(sb_cycle): + self.bump_mocktime(156) + self.nodes[0].generate(1) + self.sync_blocks(self.nodes[0:5]) + # Should still have at least 1 trigger for the old sb cycle and 0 for the current one + assert len(self.nodes[0].gobject("list", "valid", "triggers")) >= 1 + assert not have_trigger_for_height(self.nodes[0:5], sb_block_height + sb_cycle) + self.bump_mocktime(156) + self.nodes[0].generate(1) + self.sync_blocks(self.nodes[0:5]) + # Trigger scheduler to mark old triggers for deletion + self.bump_mocktime(5 * 60) + # Let it do the job + time.sleep(1) + # Move forward to satisfy GOVERNANCE_DELETION_DELAY, should actually remove old triggers now + self.bump_mocktime(10 * 60) + self.wait_until(lambda: len(self.nodes[0].gobject("list", "valid", "triggers")) == 0, timeout=5) + self.log.info("Reconnect isolated node and confirm the next ChainLock will let it sync") self.reconnect_isolated_node(5, 0) - # Force isolated node to be fully synced so that it would not request gov objects when reconnected assert_equal(self.nodes[5].mnsync("status")["IsSynced"], False) - force_finish_mnsync(self.nodes[5]) self.nodes[0].generate(1) - self.bump_mocktime(156) + # NOTE: bumping mocktime too much after recent reconnect can result in "timeout downloading block" + self.bump_mocktime(1) self.sync_blocks() diff --git a/test/functional/feature_llmq_signing.py b/test/functional/feature_llmq_signing.py index b2547a9d1f..08edc8827f 100755 --- a/test/functional/feature_llmq_signing.py +++ b/test/functional/feature_llmq_signing.py @@ -157,11 +157,11 @@ class LLMQSigningTest(DashTestFramework): assert_sigs_nochange(True, False, True, 3) # fast forward until 0.5 days before cleanup is expected, recovered sig should still be valid - self.bump_mocktime(recsig_time + int(60 * 60 * 24 * 6.5) - self.mocktime) + self.bump_mocktime(recsig_time + int(60 * 60 * 24 * 6.5) - self.mocktime, update_schedulers=False) # Cleanup starts every 5 seconds wait_for_sigs(True, False, True, 15) # fast forward 1 day, recovered sig should not be valid anymore - self.bump_mocktime(int(60 * 60 * 24 * 1)) + self.bump_mocktime(int(60 * 60 * 24 * 1), update_schedulers=False) # Cleanup starts every 5 seconds wait_for_sigs(False, False, False, 15) diff --git a/test/functional/feature_mnehf.py b/test/functional/feature_mnehf.py index c509f121ce..b12e9c8c12 100755 --- a/test/functional/feature_mnehf.py +++ b/test/functional/feature_mnehf.py @@ -243,7 +243,7 @@ class MnehfTest(DashTestFramework): assert ehf_tx_duplicate in node.getrawmempool() and ehf_tx_duplicate not in block['tx'] self.log.info("Testing EHF signal with same bit but with newer start time") - self.bump_mocktime(int(60 * 60 * 24 * 14)) + self.bump_mocktime(int(60 * 60 * 24 * 14), update_schedulers=False) node.generate(1) self.sync_blocks() self.restart_all_nodes(params=[self.mocktime, self.mocktime + 1000000]) diff --git a/test/functional/p2p_addr_relay.py b/test/functional/p2p_addr_relay.py index 9437ee9621..cbc15dcbf5 100755 --- a/test/functional/p2p_addr_relay.py +++ b/test/functional/p2p_addr_relay.py @@ -326,6 +326,8 @@ class AddrTest(BitcoinTestFramework): self.restart_node(0, []) for conn_type, no_relay in [("outbound-full-relay", False), ("block-relay-only", True), ("inbound", False)]: + # Advance the time by 5 * 60 seconds, permitting syncing from the same peer. + self.bump_mocktime(5 * 60) self.log.info(f'Test rate limiting of addr processing for {conn_type} peers') if conn_type == "inbound": peer = self.nodes[0].add_p2p_connection(AddrReceiver()) diff --git a/test/functional/test_framework/test_framework.py b/test/functional/test_framework/test_framework.py index a8a7b48be1..ccbc459667 100755 --- a/test/functional/test_framework/test_framework.py +++ b/test/functional/test_framework/test_framework.py @@ -455,7 +455,7 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass): # must have a timestamp not too old (see IsInitialBlockDownload()). if not self.disable_mocktime: self.log.debug('Generate a block with current mocktime') - self.bump_mocktime(156 * 200) + self.bump_mocktime(156 * 200, update_schedulers=False) block_hash = self.nodes[0].generate(1)[0] block = self.nodes[0].getblock(blockhash=block_hash, verbosity=0) for n in self.nodes: @@ -813,13 +813,24 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass): self.sync_blocks(nodes) self.sync_mempools(nodes) - def bump_mocktime(self, t, update_nodes=True, nodes=None): + def bump_mocktime(self, t, update_nodes=True, nodes=None, update_schedulers=True): if self.mocktime == 0: return self.mocktime += t - if update_nodes: - set_node_times(nodes or self.nodes, self.mocktime) + + if not update_nodes: + return + + nodes_to_update = nodes or self.nodes + set_node_times(nodes_to_update, self.mocktime) + + if not update_schedulers: + return + + for node in nodes_to_update: + if node.version_is_at_least(180100): + node.mockscheduler(t) def _initialize_mocktime(self, is_genesis): if is_genesis: @@ -913,7 +924,7 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass): gen_addresses = [k.address for k in TestNode.PRIV_KEYS][:3] + [ADDRESS_BCRT1_P2SH_OP_TRUE] assert_equal(len(gen_addresses), 4) for i in range(8): - self.bump_mocktime((25 if i != 7 else 24) * 156) + self.bump_mocktime((25 if i != 7 else 24) * 156, update_schedulers=False) cache_node.generatetoaddress( nblocks=25 if i != 7 else 24, address=gen_addresses[i % len(gen_addresses)], @@ -1132,13 +1143,13 @@ class DashTestFramework(BitcoinTestFramework): # NOTE: getblockchaininfo shows softforks active at block (window * 3 - 1) # since it's returning whether a softwork is active for the _next_ block. # Hence the last block prior to the activation is (expected_activation_height - 2). - while expected_activation_height - height - 2 >= batch_size: + while expected_activation_height - height - 2 > batch_size: self.bump_mocktime(batch_size) self.nodes[0].generate(batch_size) height += batch_size self.sync_blocks() blocks_left = expected_activation_height - height - 2 - assert blocks_left < batch_size + assert blocks_left <= batch_size self.bump_mocktime(blocks_left) self.nodes[0].generate(blocks_left) self.sync_blocks() diff --git a/test/functional/wallet_create_tx.py b/test/functional/wallet_create_tx.py index d422504766..730e3140c0 100755 --- a/test/functional/wallet_create_tx.py +++ b/test/functional/wallet_create_tx.py @@ -23,7 +23,7 @@ class CreateTxWalletTest(BitcoinTestFramework): def test_anti_fee_sniping(self): self.log.info('Check that we have some (old) blocks and that anti-fee-sniping is disabled') - self.bump_mocktime(8 * 60 * 60 + 1) + self.bump_mocktime(8 * 60 * 60 + 1, update_schedulers=False) assert_equal(self.nodes[0].getblockchaininfo()['blocks'], 200) txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1) tx = self.nodes[0].decoderawtransaction(self.nodes[0].gettransaction(txid)['hex'])