mirror of
https://github.com/dashpay/dash.git
synced 2024-12-27 04:52:59 +01:00
[qa] pruning: Use cached utxo set to run faster
This commit is contained in:
parent
9e4bb312e6
commit
fa2ecc48fb
@ -26,7 +26,7 @@ class MempoolLimitTest(BitcoinTestFramework):
|
|||||||
|
|
||||||
def run_test(self):
|
def run_test(self):
|
||||||
txids = []
|
txids = []
|
||||||
utxos = create_confirmed_utxos(self.relayfee, self.nodes[0], 90)
|
utxos = create_confirmed_utxos(self.relayfee, self.nodes[0], 91)
|
||||||
|
|
||||||
#create a mempool tx that will be evicted
|
#create a mempool tx that will be evicted
|
||||||
us0 = utxos.pop()
|
us0 = utxos.pop()
|
||||||
@ -41,9 +41,9 @@ class MempoolLimitTest(BitcoinTestFramework):
|
|||||||
|
|
||||||
relayfee = self.nodes[0].getnetworkinfo()['relayfee']
|
relayfee = self.nodes[0].getnetworkinfo()['relayfee']
|
||||||
base_fee = relayfee*100
|
base_fee = relayfee*100
|
||||||
for i in range (4):
|
for i in range (3):
|
||||||
txids.append([])
|
txids.append([])
|
||||||
txids[i] = create_lots_of_big_transactions(self.nodes[0], self.txouts, utxos[30*i:30*i+30], (i+1)*base_fee)
|
txids[i] = create_lots_of_big_transactions(self.nodes[0], self.txouts, utxos[30*i:30*i+30], 30, (i+1)*base_fee)
|
||||||
|
|
||||||
# by now, the tx should be evicted, check confirmation state
|
# by now, the tx should be evicted, check confirmation state
|
||||||
assert(txid not in self.nodes[0].getrawmempool())
|
assert(txid not in self.nodes[0].getrawmempool())
|
||||||
|
@ -39,7 +39,7 @@ class PrioritiseTransactionTest(BitcoinTestFramework):
|
|||||||
txids.append([])
|
txids.append([])
|
||||||
start_range = i * range_size
|
start_range = i * range_size
|
||||||
end_range = start_range + range_size
|
end_range = start_range + range_size
|
||||||
txids[i] = create_lots_of_big_transactions(self.nodes[0], self.txouts, utxos[start_range:end_range], (i+1)*base_fee)
|
txids[i] = create_lots_of_big_transactions(self.nodes[0], self.txouts, utxos[start_range:end_range], end_range - start_range, (i+1)*base_fee)
|
||||||
|
|
||||||
# Make sure that the size of each group of transactions exceeds
|
# Make sure that the size of each group of transactions exceeds
|
||||||
# MAX_BLOCK_BASE_SIZE -- otherwise the test needs to be revised to create
|
# MAX_BLOCK_BASE_SIZE -- otherwise the test needs to be revised to create
|
||||||
|
@ -13,6 +13,9 @@
|
|||||||
|
|
||||||
from test_framework.test_framework import BitcoinTestFramework
|
from test_framework.test_framework import BitcoinTestFramework
|
||||||
from test_framework.util import *
|
from test_framework.util import *
|
||||||
|
import time
|
||||||
|
import os
|
||||||
|
|
||||||
|
|
||||||
def calc_usage(blockdir):
|
def calc_usage(blockdir):
|
||||||
return sum(os.path.getsize(blockdir+f) for f in os.listdir(blockdir) if os.path.isfile(blockdir+f)) / (1024. * 1024.)
|
return sum(os.path.getsize(blockdir+f) for f in os.listdir(blockdir) if os.path.isfile(blockdir+f)) / (1024. * 1024.)
|
||||||
@ -24,6 +27,10 @@ class PruneTest(BitcoinTestFramework):
|
|||||||
self.setup_clean_chain = True
|
self.setup_clean_chain = True
|
||||||
self.num_nodes = 3
|
self.num_nodes = 3
|
||||||
|
|
||||||
|
# Cache for utxos, as the listunspent may take a long time later in the test
|
||||||
|
self.utxo_cache_0 = []
|
||||||
|
self.utxo_cache_1 = []
|
||||||
|
|
||||||
def setup_network(self):
|
def setup_network(self):
|
||||||
self.nodes = []
|
self.nodes = []
|
||||||
self.is_network_split = False
|
self.is_network_split = False
|
||||||
@ -48,7 +55,7 @@ class PruneTest(BitcoinTestFramework):
|
|||||||
self.nodes[0].generate(150)
|
self.nodes[0].generate(150)
|
||||||
# Then mine enough full blocks to create more than 550MiB of data
|
# Then mine enough full blocks to create more than 550MiB of data
|
||||||
for i in range(645):
|
for i in range(645):
|
||||||
mine_large_block(self.nodes[0])
|
mine_large_block(self.nodes[0], self.utxo_cache_0)
|
||||||
|
|
||||||
sync_blocks(self.nodes[0:3])
|
sync_blocks(self.nodes[0:3])
|
||||||
|
|
||||||
@ -60,7 +67,7 @@ class PruneTest(BitcoinTestFramework):
|
|||||||
print("Mining 25 more blocks should cause the first block file to be pruned")
|
print("Mining 25 more blocks should cause the first block file to be pruned")
|
||||||
# Pruning doesn't run until we're allocating another chunk, 20 full blocks past the height cutoff will ensure this
|
# Pruning doesn't run until we're allocating another chunk, 20 full blocks past the height cutoff will ensure this
|
||||||
for i in range(25):
|
for i in range(25):
|
||||||
mine_large_block(self.nodes[0])
|
mine_large_block(self.nodes[0], self.utxo_cache_0)
|
||||||
|
|
||||||
waitstart = time.time()
|
waitstart = time.time()
|
||||||
while os.path.isfile(self.prunedir+"blk00000.dat"):
|
while os.path.isfile(self.prunedir+"blk00000.dat"):
|
||||||
@ -87,13 +94,13 @@ class PruneTest(BitcoinTestFramework):
|
|||||||
# Mine 24 blocks in node 1
|
# Mine 24 blocks in node 1
|
||||||
for i in range(24):
|
for i in range(24):
|
||||||
if j == 0:
|
if j == 0:
|
||||||
mine_large_block(self.nodes[1])
|
mine_large_block(self.nodes[1], self.utxo_cache_1)
|
||||||
else:
|
else:
|
||||||
self.nodes[1].generate(1) #tx's already in mempool from previous disconnects
|
self.nodes[1].generate(1) #tx's already in mempool from previous disconnects
|
||||||
|
|
||||||
# Reorg back with 25 block chain from node 0
|
# Reorg back with 25 block chain from node 0
|
||||||
for i in range(25):
|
for i in range(25):
|
||||||
mine_large_block(self.nodes[0])
|
mine_large_block(self.nodes[0], self.utxo_cache_0)
|
||||||
|
|
||||||
# Create connections in the order so both nodes can see the reorg at the same time
|
# Create connections in the order so both nodes can see the reorg at the same time
|
||||||
connect_nodes(self.nodes[1], 0)
|
connect_nodes(self.nodes[1], 0)
|
||||||
|
@ -654,10 +654,10 @@ def create_tx(node, coinbase, to_address, amount):
|
|||||||
|
|
||||||
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
|
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
|
||||||
# transaction to make it large. See gen_return_txouts() above.
|
# transaction to make it large. See gen_return_txouts() above.
|
||||||
def create_lots_of_big_transactions(node, txouts, utxos, fee):
|
def create_lots_of_big_transactions(node, txouts, utxos, num, fee):
|
||||||
addr = node.getnewaddress()
|
addr = node.getnewaddress()
|
||||||
txids = []
|
txids = []
|
||||||
for _ in range(len(utxos)):
|
for _ in range(num):
|
||||||
t = utxos.pop()
|
t = utxos.pop()
|
||||||
inputs=[{ "txid" : t["txid"], "vout" : t["vout"]}]
|
inputs=[{ "txid" : t["txid"], "vout" : t["vout"]}]
|
||||||
outputs = {}
|
outputs = {}
|
||||||
@ -672,13 +672,17 @@ def create_lots_of_big_transactions(node, txouts, utxos, fee):
|
|||||||
txids.append(txid)
|
txids.append(txid)
|
||||||
return txids
|
return txids
|
||||||
|
|
||||||
def mine_large_block(node):
|
def mine_large_block(node, utxos=None):
|
||||||
# generate a 66k transaction,
|
# generate a 66k transaction,
|
||||||
# and 14 of them is close to the 1MB block limit
|
# and 14 of them is close to the 1MB block limit
|
||||||
|
num = 14
|
||||||
txouts = gen_return_txouts()
|
txouts = gen_return_txouts()
|
||||||
utxos = node.listunspent()[:14]
|
utxos = utxos if utxos is not None else []
|
||||||
|
if len(utxos) < num:
|
||||||
|
utxos.clear()
|
||||||
|
utxos.extend(node.listunspent())
|
||||||
fee = 100 * node.getnetworkinfo()["relayfee"]
|
fee = 100 * node.getnetworkinfo()["relayfee"]
|
||||||
create_lots_of_big_transactions(node, txouts, utxos, fee=fee)
|
create_lots_of_big_transactions(node, txouts, utxos, num, fee=fee)
|
||||||
node.generate(1)
|
node.generate(1)
|
||||||
|
|
||||||
def get_bip9_status(node, key):
|
def get_bip9_status(node, key):
|
||||||
|
Loading…
Reference in New Issue
Block a user