feat: bump_mocktime also bumps schedulers now

This commit is contained in:
UdjinM6 2024-08-30 15:33:15 +03:00
parent 1937f503fe
commit 11ac0819da
No known key found for this signature in database
GPG Key ID: 83592BD1400D58D9
6 changed files with 30 additions and 15 deletions

View File

@ -88,8 +88,10 @@ class DashGovernanceTest (DashTestFramework):
assert_equal(len(self.nodes[0].gobject("list-prepared")), 0)
# TODO: drop these extra 80 blocks - doesn't work without them
self.nodes[0].generate(80)
self.bump_mocktime(80)
for _ in range(8):
self.bump_mocktime(10)
self.nodes[0].generate(10)
self.sync_blocks()
self.nodes[0].generate(3)
self.bump_mocktime(3)
@ -280,7 +282,7 @@ class DashGovernanceTest (DashTestFramework):
before = self.nodes[1].gobject("count")["votes"]
# Bump mocktime to let MNs vote again
self.bump_mocktime(GOVERNANCE_UPDATE_MIN + 1)
self.bump_mocktime(GOVERNANCE_UPDATE_MIN + 1, update_schedulers=False)
# Move another block inside the Superblock maturity window
with self.nodes[1].assert_debug_log(["CGovernanceManager::VoteGovernanceTriggers"]):
@ -291,7 +293,7 @@ class DashGovernanceTest (DashTestFramework):
# Vote count should not change even though MNs are allowed to vote again
assert_equal(before, self.nodes[1].gobject("count")["votes"])
# Revert mocktime back to avoid issues in tests below
self.bump_mocktime(GOVERNANCE_UPDATE_MIN * -1)
self.bump_mocktime(GOVERNANCE_UPDATE_MIN * -1, update_schedulers=False)
block_count = self.nodes[0].getblockcount()
n = sb_cycle - block_count % sb_cycle

View File

@ -157,11 +157,11 @@ class LLMQSigningTest(DashTestFramework):
assert_sigs_nochange(True, False, True, 3)
# fast forward until 0.5 days before cleanup is expected, recovered sig should still be valid
self.bump_mocktime(recsig_time + int(60 * 60 * 24 * 6.5) - self.mocktime)
self.bump_mocktime(recsig_time + int(60 * 60 * 24 * 6.5) - self.mocktime, update_schedulers=False)
# Cleanup starts every 5 seconds
wait_for_sigs(True, False, True, 15)
# fast forward 1 day, recovered sig should not be valid anymore
self.bump_mocktime(int(60 * 60 * 24 * 1))
self.bump_mocktime(int(60 * 60 * 24 * 1), update_schedulers=False)
# Cleanup starts every 5 seconds
wait_for_sigs(False, False, False, 15)

View File

@ -243,7 +243,7 @@ class MnehfTest(DashTestFramework):
assert ehf_tx_duplicate in node.getrawmempool() and ehf_tx_duplicate not in block['tx']
self.log.info("Testing EHF signal with same bit but with newer start time")
self.bump_mocktime(int(60 * 60 * 24 * 14))
self.bump_mocktime(int(60 * 60 * 24 * 14), update_schedulers=False)
node.generate(1)
self.sync_blocks()
self.restart_all_nodes(params=[self.mocktime, self.mocktime + 1000000])

View File

@ -326,6 +326,8 @@ class AddrTest(BitcoinTestFramework):
self.restart_node(0, [])
for conn_type, no_relay in [("outbound-full-relay", False), ("block-relay-only", True), ("inbound", False)]:
# Advance the time by 5 * 60 seconds, permitting syncing from the same peer.
self.bump_mocktime(5 * 60)
self.log.info(f'Test rate limiting of addr processing for {conn_type} peers')
if conn_type == "inbound":
peer = self.nodes[0].add_p2p_connection(AddrReceiver())

View File

@ -455,7 +455,7 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
# must have a timestamp not too old (see IsInitialBlockDownload()).
if not self.disable_mocktime:
self.log.debug('Generate a block with current mocktime')
self.bump_mocktime(156 * 200)
self.bump_mocktime(156 * 200, update_schedulers=False)
block_hash = self.nodes[0].generate(1)[0]
block = self.nodes[0].getblock(blockhash=block_hash, verbosity=0)
for n in self.nodes:
@ -813,13 +813,24 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
self.sync_blocks(nodes)
self.sync_mempools(nodes)
def bump_mocktime(self, t, update_nodes=True, nodes=None):
def bump_mocktime(self, t, update_nodes=True, nodes=None, update_schedulers=True):
if self.mocktime == 0:
return
self.mocktime += t
if update_nodes:
set_node_times(nodes or self.nodes, self.mocktime)
if not update_nodes:
return
nodes_to_update = nodes or self.nodes
set_node_times(nodes_to_update, self.mocktime)
if not update_schedulers:
return
for node in nodes_to_update:
if node.version_is_at_least(180100):
node.mockscheduler(t)
def _initialize_mocktime(self, is_genesis):
if is_genesis:
@ -913,7 +924,7 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
gen_addresses = [k.address for k in TestNode.PRIV_KEYS][:3] + [ADDRESS_BCRT1_P2SH_OP_TRUE]
assert_equal(len(gen_addresses), 4)
for i in range(8):
self.bump_mocktime((25 if i != 7 else 24) * 156)
self.bump_mocktime((25 if i != 7 else 24) * 156, update_schedulers=False)
cache_node.generatetoaddress(
nblocks=25 if i != 7 else 24,
address=gen_addresses[i % len(gen_addresses)],
@ -1132,13 +1143,13 @@ class DashTestFramework(BitcoinTestFramework):
# NOTE: getblockchaininfo shows softforks active at block (window * 3 - 1)
# since it's returning whether a softwork is active for the _next_ block.
# Hence the last block prior to the activation is (expected_activation_height - 2).
while expected_activation_height - height - 2 >= batch_size:
while expected_activation_height - height - 2 > batch_size:
self.bump_mocktime(batch_size)
self.nodes[0].generate(batch_size)
height += batch_size
self.sync_blocks()
blocks_left = expected_activation_height - height - 2
assert blocks_left < batch_size
assert blocks_left <= batch_size
self.bump_mocktime(blocks_left)
self.nodes[0].generate(blocks_left)
self.sync_blocks()

View File

@ -23,7 +23,7 @@ class CreateTxWalletTest(BitcoinTestFramework):
def test_anti_fee_sniping(self):
self.log.info('Check that we have some (old) blocks and that anti-fee-sniping is disabled')
self.bump_mocktime(8 * 60 * 60 + 1)
self.bump_mocktime(8 * 60 * 60 + 1, update_schedulers=False)
assert_equal(self.nodes[0].getblockchaininfo()['blocks'], 200)
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
tx = self.nodes[0].decoderawtransaction(self.nodes[0].gettransaction(txid)['hex'])