mirror of
https://github.com/dashpay/dash.git
synced 2024-12-25 12:02:48 +01:00
Speed up integration tests with masternodes (#2642)
* Implement copy_datadir to allow easy copying of state from one node to another * Instead of starting with a fresh datadir for MNs, reuse a copy of the faucet * Start masternodes in parallel instead of waiting for the previous to finish * Allow specifying of window and threshold with -bip9params * Implement -dip3activationheight for regtests * Implement fast DIP3 activation in DashTestFramework * Speed up activation of DIP3 in dip3-deterministicmns.py * Update qa/rpc-tests/test_framework/test_framework.py Co-Authored-By: codablock <ablock84@gmail.com> * Always assign fast_dip3_activation
This commit is contained in:
parent
fda16f1fea
commit
7ee31cbd65
@ -23,7 +23,7 @@ AUTO_IX_MEM_THRESHOLD = 0.1
|
||||
|
||||
class AutoIXMempoolTest(DashTestFramework):
|
||||
def __init__(self):
|
||||
super().__init__(13, 10, ["-maxmempool=%d" % MAX_MEMPOOL_SIZE, '-limitdescendantsize=10'])
|
||||
super().__init__(13, 10, ["-maxmempool=%d" % MAX_MEMPOOL_SIZE, '-limitdescendantsize=10'], fast_dip3_activation=True)
|
||||
# set sender, receiver
|
||||
self.receiver_idx = self.num_nodes - 2
|
||||
self.sender_idx = self.num_nodes - 3
|
||||
|
@ -22,8 +22,9 @@ class DIP3Test(BitcoinTestFramework):
|
||||
self.num_nodes = 1 + self.num_initial_mn + 2 # +1 for controller, +1 for mn-qt, +1 for mn created after dip3 activation
|
||||
self.setup_clean_chain = True
|
||||
|
||||
self.extra_args = ["-budgetparams=240:100:240"]
|
||||
self.extra_args = ["-budgetparams=10:10:10"]
|
||||
self.extra_args += ["-sporkkey=cP4EKFyJsHT39LDqgdcB43Y3YXjNyjb5Fuas1GQSeAtjnZWmZEQK"]
|
||||
self.extra_args += ["-bip9params=dip0003:0:999999999999:45:45", "-dip3activationheight=150"]
|
||||
|
||||
def setup_network(self):
|
||||
disable_mocktime()
|
||||
@ -56,9 +57,9 @@ class DIP3Test(BitcoinTestFramework):
|
||||
self.nodes[0].generate(1) # generate enough for collaterals
|
||||
print("controller node has {} dash".format(self.nodes[0].getbalance()))
|
||||
|
||||
# Make sure we're below block 432 (which activates dip3)
|
||||
# Make sure we're below block 135 (which activates dip3)
|
||||
print("testing rejection of ProTx before dip3 activation")
|
||||
assert(self.nodes[0].getblockchaininfo()['blocks'] < 432)
|
||||
assert(self.nodes[0].getblockchaininfo()['blocks'] < 135)
|
||||
|
||||
mns = []
|
||||
|
||||
@ -69,7 +70,7 @@ class DIP3Test(BitcoinTestFramework):
|
||||
mns.append(before_dip3_mn)
|
||||
|
||||
# block 500 starts enforcing DIP3 MN payments
|
||||
while self.nodes[0].getblockcount() < 498:
|
||||
while self.nodes[0].getblockcount() < 150:
|
||||
self.nodes[0].generate(1)
|
||||
|
||||
print("mining final block for DIP3 activation")
|
||||
|
@ -17,7 +17,7 @@ Checks LLMQs signing sessions
|
||||
|
||||
class LLMQSigningTest(DashTestFramework):
|
||||
def __init__(self):
|
||||
super().__init__(11, 10, [])
|
||||
super().__init__(11, 10, [], fast_dip3_activation=True)
|
||||
|
||||
def run_test(self):
|
||||
|
||||
|
@ -23,7 +23,7 @@ transactions with high fee.
|
||||
|
||||
class AutoInstantSendTest(DashTestFramework):
|
||||
def __init__(self):
|
||||
super().__init__(14, 10, [])
|
||||
super().__init__(14, 10, [], fast_dip3_activation=True)
|
||||
# set sender, receiver, isolated nodes
|
||||
self.isolated_idx = self.num_nodes - 1
|
||||
self.receiver_idx = self.num_nodes - 2
|
||||
|
@ -14,7 +14,7 @@ InstantSendTest -- test InstantSend functionality (prevent doublespend for uncon
|
||||
|
||||
class InstantSendTest(DashTestFramework):
|
||||
def __init__(self):
|
||||
super().__init__(14, 10, [])
|
||||
super().__init__(14, 10, [], fast_dip3_activation=True)
|
||||
# set sender, receiver, isolated nodes
|
||||
self.isolated_idx = self.num_nodes - 1
|
||||
self.receiver_idx = self.num_nodes - 2
|
||||
@ -55,7 +55,7 @@ class InstantSendTest(DashTestFramework):
|
||||
# start last node
|
||||
self.nodes[self.isolated_idx] = start_node(self.isolated_idx,
|
||||
self.options.tmpdir,
|
||||
["-debug"])
|
||||
["-debug"] + self.extra_args)
|
||||
# send doublespend transaction to isolated node
|
||||
self.nodes[self.isolated_idx].sendrawtransaction(dblspnd_tx['hex'])
|
||||
# generate block on isolated node with doublespend transaction
|
||||
|
@ -11,6 +11,7 @@ import sys
|
||||
import shutil
|
||||
import tempfile
|
||||
import traceback
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from time import time, sleep
|
||||
|
||||
from .util import (
|
||||
@ -35,7 +36,8 @@ from .util import (
|
||||
set_node_times,
|
||||
p2p_port,
|
||||
satoshi_round,
|
||||
wait_to_sync)
|
||||
wait_to_sync,
|
||||
copy_datadir)
|
||||
from .authproxy import JSONRPCException
|
||||
|
||||
|
||||
@ -218,7 +220,7 @@ class MasternodeInfo:
|
||||
|
||||
|
||||
class DashTestFramework(BitcoinTestFramework):
|
||||
def __init__(self, num_nodes, masterodes_count, extra_args):
|
||||
def __init__(self, num_nodes, masterodes_count, extra_args, fast_dip3_activation=False):
|
||||
super().__init__()
|
||||
self.mn_count = masterodes_count
|
||||
self.num_nodes = num_nodes
|
||||
@ -228,6 +230,12 @@ class DashTestFramework(BitcoinTestFramework):
|
||||
# additional args
|
||||
self.extra_args = extra_args
|
||||
|
||||
self.extra_args += ["-sporkkey=cP4EKFyJsHT39LDqgdcB43Y3YXjNyjb5Fuas1GQSeAtjnZWmZEQK"]
|
||||
|
||||
self.fast_dip3_activation = fast_dip3_activation
|
||||
if fast_dip3_activation:
|
||||
self.extra_args += ["-bip9params=dip0003:0:999999999999:10:5", "-dip3activationheight=50"]
|
||||
|
||||
def create_simple_node(self):
|
||||
idx = len(self.nodes)
|
||||
args = self.extra_args
|
||||
@ -268,24 +276,64 @@ class DashTestFramework(BitcoinTestFramework):
|
||||
self.mninfo.append(MasternodeInfo(proTxHash, ownerAddr, votingAddr, bls['public'], bls['secret'], address, txid, collateral_vout))
|
||||
self.sync_all()
|
||||
|
||||
def start_masternodes(self):
|
||||
def prepare_datadirs(self):
|
||||
# stop faucet node so that we can copy the datadir
|
||||
stop_node(self.nodes[0], 0)
|
||||
|
||||
start_idx = len(self.nodes)
|
||||
for idx in range(0, self.mn_count):
|
||||
copy_datadir(0, idx + start_idx, self.options.tmpdir)
|
||||
|
||||
# restart faucet node
|
||||
self.nodes[0] = start_node(0, self.options.tmpdir, self.extra_args)
|
||||
|
||||
def start_masternodes(self):
|
||||
start_idx = len(self.nodes)
|
||||
|
||||
for idx in range(0, self.mn_count):
|
||||
self.nodes.append(None)
|
||||
executor = ThreadPoolExecutor(max_workers=20)
|
||||
|
||||
def do_start(idx):
|
||||
args = ['-masternode=1',
|
||||
'-masternodeblsprivkey=%s' % self.mninfo[idx].keyOperator] + self.extra_args
|
||||
node = start_node(idx + start_idx, self.options.tmpdir, args)
|
||||
self.mninfo[idx].node = node
|
||||
self.nodes.append(node)
|
||||
self.nodes[idx + start_idx] = node
|
||||
wait_to_sync(node, True)
|
||||
|
||||
def do_connect(idx):
|
||||
for i in range(0, idx + 1):
|
||||
connect_nodes(self.nodes[idx + start_idx], i)
|
||||
wait_to_sync(node, True)
|
||||
|
||||
jobs = []
|
||||
|
||||
# start up nodes in parallel
|
||||
for idx in range(0, self.mn_count):
|
||||
jobs.append(executor.submit(do_start, idx))
|
||||
|
||||
# wait for all nodes to start up
|
||||
for job in jobs:
|
||||
job.result()
|
||||
jobs.clear()
|
||||
|
||||
# connect nodes in parallel
|
||||
for idx in range(0, self.mn_count):
|
||||
jobs.append(executor.submit(do_connect, idx))
|
||||
|
||||
# wait for all nodes to connect
|
||||
for job in jobs:
|
||||
job.result()
|
||||
jobs.clear()
|
||||
|
||||
sync_masternodes(self.nodes, True)
|
||||
|
||||
executor.shutdown()
|
||||
|
||||
def setup_network(self):
|
||||
self.nodes = []
|
||||
# create faucet node for collateral and transactions
|
||||
args = ["-sporkkey=cP4EKFyJsHT39LDqgdcB43Y3YXjNyjb5Fuas1GQSeAtjnZWmZEQK"] + self.extra_args
|
||||
self.nodes.append(start_node(0, self.options.tmpdir, args))
|
||||
self.nodes.append(start_node(0, self.options.tmpdir, self.extra_args))
|
||||
required_balance = MASTERNODE_COLLATERAL * self.mn_count + 1
|
||||
while self.nodes[0].getbalance() < required_balance:
|
||||
set_mocktime(get_mocktime() + 1)
|
||||
@ -297,12 +345,14 @@ class DashTestFramework(BitcoinTestFramework):
|
||||
sync_masternodes(self.nodes, True)
|
||||
|
||||
# activate DIP3
|
||||
while self.nodes[0].getblockcount() < 500:
|
||||
self.nodes[0].generate(10)
|
||||
if not self.fast_dip3_activation:
|
||||
while self.nodes[0].getblockcount() < 500:
|
||||
self.nodes[0].generate(10)
|
||||
self.sync_all()
|
||||
|
||||
# create masternodes
|
||||
self.prepare_masternodes()
|
||||
self.prepare_datadirs()
|
||||
self.start_masternodes()
|
||||
|
||||
set_mocktime(get_mocktime() + 1)
|
||||
|
@ -400,6 +400,21 @@ def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None, timewait=None
|
||||
raise
|
||||
return rpcs
|
||||
|
||||
|
||||
def copy_datadir(from_node, to_node, dirname):
|
||||
from_datadir = os.path.join(dirname, "node"+str(from_node), "regtest")
|
||||
to_datadir = os.path.join(dirname, "node"+str(to_node), "regtest")
|
||||
|
||||
dirs = ["blocks", "chainstate", "evodb", "llmq"]
|
||||
for d in dirs:
|
||||
try:
|
||||
src = os.path.join(from_datadir, d)
|
||||
dst = os.path.join(to_datadir, d)
|
||||
shutil.copytree(src, dst)
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
def log_filename(dirname, n_node, logname):
|
||||
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
|
||||
|
||||
|
@ -789,10 +789,21 @@ public:
|
||||
consensus.llmqs[Consensus::LLMQ_50_60] = llmq50_60;
|
||||
}
|
||||
|
||||
void UpdateBIP9Parameters(Consensus::DeploymentPos d, int64_t nStartTime, int64_t nTimeout)
|
||||
void UpdateBIP9Parameters(Consensus::DeploymentPos d, int64_t nStartTime, int64_t nTimeout, int64_t nWindowSize, int64_t nThreshold)
|
||||
{
|
||||
consensus.vDeployments[d].nStartTime = nStartTime;
|
||||
consensus.vDeployments[d].nTimeout = nTimeout;
|
||||
if (nWindowSize != -1) {
|
||||
consensus.vDeployments[d].nWindowSize = nWindowSize;
|
||||
}
|
||||
if (nThreshold != -1) {
|
||||
consensus.vDeployments[d].nThreshold = nThreshold;
|
||||
}
|
||||
}
|
||||
|
||||
void UpdateDIP3ActivationHeight(int nHeight)
|
||||
{
|
||||
consensus.DIP0003Height = nHeight;
|
||||
}
|
||||
|
||||
void UpdateBudgetParameters(int nMasternodePaymentsStartBlock, int nBudgetPaymentsStartBlock, int nSuperblockStartBlock)
|
||||
@ -838,9 +849,14 @@ void SelectParams(const std::string& network)
|
||||
pCurrentParams = &Params(network);
|
||||
}
|
||||
|
||||
void UpdateRegtestBIP9Parameters(Consensus::DeploymentPos d, int64_t nStartTime, int64_t nTimeout)
|
||||
void UpdateRegtestBIP9Parameters(Consensus::DeploymentPos d, int64_t nStartTime, int64_t nTimeout, int64_t nWindowSize, int64_t nThreshold)
|
||||
{
|
||||
regTestParams.UpdateBIP9Parameters(d, nStartTime, nTimeout);
|
||||
regTestParams.UpdateBIP9Parameters(d, nStartTime, nTimeout, nWindowSize, nThreshold);
|
||||
}
|
||||
|
||||
void UpdateRegtestDIP3ActivationHeight(int nHeight)
|
||||
{
|
||||
regTestParams.UpdateDIP3ActivationHeight(nHeight);
|
||||
}
|
||||
|
||||
void UpdateRegtestBudgetParameters(int nMasternodePaymentsStartBlock, int nBudgetPaymentsStartBlock, int nSuperblockStartBlock)
|
||||
|
@ -145,7 +145,12 @@ void SelectParams(const std::string& chain);
|
||||
/**
|
||||
* Allows modifying the BIP9 regtest parameters.
|
||||
*/
|
||||
void UpdateRegtestBIP9Parameters(Consensus::DeploymentPos d, int64_t nStartTime, int64_t nTimeout);
|
||||
void UpdateRegtestBIP9Parameters(Consensus::DeploymentPos d, int64_t nStartTime, int64_t nTimeout, int64_t nWindowSize, int64_t nThreshold);
|
||||
|
||||
/**
|
||||
* Allows modifying the DIP3 activation height
|
||||
*/
|
||||
void UpdateRegtestDIP3ActivationHeight(int nHeight);
|
||||
|
||||
/**
|
||||
* Allows modifying the budget regtest parameters.
|
||||
|
24
src/init.cpp
24
src/init.cpp
@ -543,7 +543,7 @@ std::string HelpMessage(HelpMessageMode mode)
|
||||
strUsage += HelpMessageOpt("-limitancestorsize=<n>", strprintf("Do not accept transactions whose size with all in-mempool ancestors exceeds <n> kilobytes (default: %u)", DEFAULT_ANCESTOR_SIZE_LIMIT));
|
||||
strUsage += HelpMessageOpt("-limitdescendantcount=<n>", strprintf("Do not accept transactions if any ancestor would have <n> or more in-mempool descendants (default: %u)", DEFAULT_DESCENDANT_LIMIT));
|
||||
strUsage += HelpMessageOpt("-limitdescendantsize=<n>", strprintf("Do not accept transactions if any ancestor would have more than <n> kilobytes of in-mempool descendants (default: %u).", DEFAULT_DESCENDANT_SIZE_LIMIT));
|
||||
strUsage += HelpMessageOpt("-bip9params=<deployment>:<start>:<end>", "Use given start/end times for specified BIP9 deployment (regtest-only)");
|
||||
strUsage += HelpMessageOpt("-bip9params=<deployment>:<start>:<end>(:<window>:<threshold>)", "Use given start/end times for specified BIP9 deployment (regtest-only). Specifying window and threshold is optional.");
|
||||
strUsage += HelpMessageOpt("-watchquorums=<n>", strprintf("Watch and validate quorum communication (default: %u)", llmq::DEFAULT_WATCH_QUORUMS));
|
||||
}
|
||||
std::string debugCategories = "addrman, alert, bench, cmpctblock, coindb, db, http, leveldb, libevent, lock, mempool, mempoolrej, net, proxy, prune, rand, reindex, rpc, selectcoins, tor, zmq, "
|
||||
@ -1290,23 +1290,31 @@ bool AppInitParameterInteraction()
|
||||
for (auto i : deployments) {
|
||||
std::vector<std::string> vDeploymentParams;
|
||||
boost::split(vDeploymentParams, i, boost::is_any_of(":"));
|
||||
if (vDeploymentParams.size() != 3) {
|
||||
return InitError("BIP9 parameters malformed, expecting deployment:start:end");
|
||||
if (vDeploymentParams.size() != 3 && vDeploymentParams.size() != 5) {
|
||||
return InitError("BIP9 parameters malformed, expecting deployment:start:end or deployment:start:end:window:threshold");
|
||||
}
|
||||
int64_t nStartTime, nTimeout;
|
||||
int64_t nStartTime, nTimeout, nWindowSize = -1, nThreshold = -1;
|
||||
if (!ParseInt64(vDeploymentParams[1], &nStartTime)) {
|
||||
return InitError(strprintf("Invalid nStartTime (%s)", vDeploymentParams[1]));
|
||||
}
|
||||
if (!ParseInt64(vDeploymentParams[2], &nTimeout)) {
|
||||
return InitError(strprintf("Invalid nTimeout (%s)", vDeploymentParams[2]));
|
||||
}
|
||||
if (vDeploymentParams.size() == 5) {
|
||||
if (!ParseInt64(vDeploymentParams[3], &nWindowSize)) {
|
||||
return InitError(strprintf("Invalid nWindowSize (%s)", vDeploymentParams[3]));
|
||||
}
|
||||
if (!ParseInt64(vDeploymentParams[4], &nThreshold)) {
|
||||
return InitError(strprintf("Invalid nThreshold (%s)", vDeploymentParams[4]));
|
||||
}
|
||||
}
|
||||
bool found = false;
|
||||
for (int j=0; j<(int)Consensus::MAX_VERSION_BITS_DEPLOYMENTS; ++j)
|
||||
{
|
||||
if (vDeploymentParams[0].compare(VersionBitsDeploymentInfo[j].name) == 0) {
|
||||
UpdateRegtestBIP9Parameters(Consensus::DeploymentPos(j), nStartTime, nTimeout);
|
||||
UpdateRegtestBIP9Parameters(Consensus::DeploymentPos(j), nStartTime, nTimeout, nWindowSize, nThreshold);
|
||||
found = true;
|
||||
LogPrintf("Setting BIP9 activation parameters for %s to start=%ld, timeout=%ld\n", vDeploymentParams[0], nStartTime, nTimeout);
|
||||
LogPrintf("Setting BIP9 activation parameters for %s to start=%ld, timeout=%ld, window=%ld, threshold=%ld\n", vDeploymentParams[0], nStartTime, nTimeout, nWindowSize, nThreshold);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -1316,6 +1324,10 @@ bool AppInitParameterInteraction()
|
||||
}
|
||||
}
|
||||
|
||||
if (IsArgSet("-dip3activationheight")) {
|
||||
UpdateRegtestDIP3ActivationHeight(GetArg("-dip3activationheight", 0));
|
||||
}
|
||||
|
||||
if (IsArgSet("-budgetparams")) {
|
||||
// Allow overriding budget parameters for testing
|
||||
if (!chainparams.MineBlocksOnDemand()) {
|
||||
|
Loading…
Reference in New Issue
Block a user