Merge pull request #3227 from codablock/pr_v14_backports

[v0.14.0.x] Backport pending PRs
This commit is contained in:
UdjinM6 2019-12-07 13:53:00 +03:00 committed by GitHub
commit 3998c3191f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
23 changed files with 378 additions and 60 deletions

View File

@ -48,6 +48,7 @@ BASE_SCRIPTS= [
'llmq-chainlocks.py', # NOTE: needs dash_hash to pass
'llmq-simplepose.py', # NOTE: needs dash_hash to pass
'llmq-is-cl-conflicts.py', # NOTE: needs dash_hash to pass
'llmq-is-retroactive.py', # NOTE: needs dash_hash to pass
'llmq-dkgerrors.py', # NOTE: needs dash_hash to pass
'dip4-coinbasemerkleroots.py', # NOTE: needs dash_hash to pass
# vv Tests less than 60s vv

View File

@ -23,7 +23,7 @@ AUTO_IX_MEM_THRESHOLD = 0.1
class AutoISMempoolTest(DashTestFramework):
def __init__(self):
super().__init__(8, 5, ["-maxmempool=%d" % MAX_MEMPOOL_SIZE, '-limitdescendantsize=10'], fast_dip3_enforcement=True)
super().__init__(8, 5, [["-maxmempool=%d" % MAX_MEMPOOL_SIZE, '-limitdescendantsize=10']] * 8, fast_dip3_enforcement=True)
# set sender, receiver
self.receiver_idx = 1
self.sender_idx = 2

View File

@ -39,7 +39,7 @@ class TestNode(SingleNodeConnCB):
class LLMQCoinbaseCommitmentsTest(DashTestFramework):
def __init__(self):
super().__init__(6, 5, [], fast_dip3_enforcement=True)
super().__init__(6, 5, fast_dip3_enforcement=True)
def run_test(self):
self.test_node = TestNode()

View File

@ -17,7 +17,7 @@ Checks LLMQs based ChainLocks
class LLMQChainLocksTest(DashTestFramework):
def __init__(self):
super().__init__(6, 5, [], fast_dip3_enforcement=True)
super().__init__(6, 5, fast_dip3_enforcement=True)
def run_test(self):
@ -72,7 +72,7 @@ class LLMQChainLocksTest(DashTestFramework):
good_tip = self.nodes[0].getbestblockhash()
# Restart it so that it forgets all the chainlocks from the past
stop_node(self.nodes[0], 0)
self.nodes[0] = start_node(0, self.options.tmpdir, self.extra_args)
self.nodes[0] = start_node(0, self.options.tmpdir, self.extra_args[0])
connect_nodes(self.nodes[0], 1)
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# Now try to reorg the chain

View File

@ -15,7 +15,7 @@ Simulate and check DKG errors
class LLMQDKGErrors(DashTestFramework):
def __init__(self):
super().__init__(6, 5, [], fast_dip3_enforcement=True)
super().__init__(6, 5, fast_dip3_enforcement=True)
def run_test(self):

View File

@ -45,7 +45,7 @@ class TestNode(SingleNodeConnCB):
class LLMQ_IS_CL_Conflicts(DashTestFramework):
def __init__(self):
super().__init__(6, 5, [], fast_dip3_enforcement=True)
super().__init__(6, 5, fast_dip3_enforcement=True)
#disable_mocktime()
def run_test(self):

View File

@ -0,0 +1,179 @@
#!/usr/bin/env python3
# Copyright (c) 2015-2018 The Dash Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.mininode import *
from test_framework.test_framework import DashTestFramework
from test_framework.util import sync_blocks, set_node_times, \
isolate_node, reconnect_isolated_node, set_mocktime, get_mocktime
'''
llmq-is-retroactive.py
Tests retroactive signing
We have 6 nodes where node 0 is the control node, nodes 1-5 are masternodes.
Mempool inconsistencies are simulated via disconnecting/reconnecting node 3
and by having a higher relay fee on nodes 4 and 5.
'''
class LLMQ_IS_RetroactiveSigning(DashTestFramework):
def __init__(self):
# -whitelist is needed to avoid the trickling logic on node0
super().__init__(6, 5, [["-whitelist=127.0.0.1"], [], [], [], ["-minrelaytxfee=0.001"], ["-minrelaytxfee=0.001"]], fast_dip3_enforcement=True)
def run_test(self):
while self.nodes[0].getblockchaininfo()["bip9_softforks"]["dip0008"]["status"] != "active":
self.nodes[0].generate(10)
sync_blocks(self.nodes, timeout=60*5)
self.nodes[0].spork("SPORK_17_QUORUM_DKG_ENABLED", 0)
self.nodes[0].spork("SPORK_19_CHAINLOCKS_ENABLED", 0)
self.nodes[0].spork("SPORK_2_INSTANTSEND_ENABLED", 0)
self.nodes[0].spork("SPORK_3_INSTANTSEND_BLOCK_FILTERING", 0)
self.nodes[0].spork("SPORK_20_INSTANTSEND_LLMQ_BASED", 0)
self.wait_for_sporks_same()
self.mine_quorum()
self.mine_quorum()
# Make sure that all nodes are chainlocked at the same height before starting actual tests
self.wait_for_chainlocked_block_all_nodes(self.nodes[0].getbestblockhash())
self.log.info("trying normal IS lock")
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
# 3 nodes should be enough to create an IS lock even if nodes 4 and 5 (which have no tx itself)
# are the only "neighbours" in intra-quorum connections for one of them.
self.wait_for_instantlock(txid, self.nodes[0], do_assert=True)
set_mocktime(get_mocktime() + 1)
set_node_times(self.nodes, get_mocktime())
block = self.nodes[0].generate(1)[0]
self.wait_for_chainlocked_block_all_nodes(block)
self.log.info("testing normal signing with partially known TX")
isolate_node(self.nodes[3])
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
# Make sure nodes 1 and 2 received the TX before we continue,
# otherwise it might announce the TX to node 3 when reconnecting
self.wait_for_tx(txid, self.nodes[1])
self.wait_for_tx(txid, self.nodes[2])
reconnect_isolated_node(self.nodes[3], 0)
self.wait_for_mnauth(self.nodes[3], 2)
# node 3 fully reconnected but the TX wasn't relayed to it, so there should be no IS lock
self.wait_for_instantlock(txid, self.nodes[0], False, 5, do_assert=True)
# push the tx directly via rpc
self.nodes[3].sendrawtransaction(self.nodes[0].getrawtransaction(txid))
# node 3 should vote on a tx now since it became aware of it via sendrawtransaction
# and this should be enough to complete an IS lock
self.wait_for_instantlock(txid, self.nodes[0], do_assert=True)
self.log.info("testing retroactive signing with unknown TX")
isolate_node(self.nodes[3])
rawtx = self.nodes[0].createrawtransaction([], {self.nodes[0].getnewaddress(): 1})
rawtx = self.nodes[0].fundrawtransaction(rawtx)['hex']
rawtx = self.nodes[0].signrawtransaction(rawtx)['hex']
txid = self.nodes[3].sendrawtransaction(rawtx)
# Make node 3 consider the TX as safe
set_mocktime(get_mocktime() + 10 * 60 + 1)
set_node_times(self.nodes, get_mocktime())
block = self.nodes[3].generatetoaddress(1, self.nodes[0].getnewaddress())[0]
reconnect_isolated_node(self.nodes[3], 0)
self.wait_for_chainlocked_block_all_nodes(block)
self.nodes[0].setmocktime(get_mocktime())
self.log.info("testing retroactive signing with partially known TX")
isolate_node(self.nodes[3])
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
# Make sure nodes 1 and 2 received the TX before we continue,
# otherwise it might announce the TX to node 3 when reconnecting
self.wait_for_tx(txid, self.nodes[1])
self.wait_for_tx(txid, self.nodes[2])
reconnect_isolated_node(self.nodes[3], 0)
self.wait_for_mnauth(self.nodes[3], 2)
# node 3 fully reconnected but the TX wasn't relayed to it, so there should be no IS lock
self.wait_for_instantlock(txid, self.nodes[0], False, 5, do_assert=True)
# Make node0 consider the TX as safe
set_mocktime(get_mocktime() + 10 * 60 + 1)
set_node_times(self.nodes, get_mocktime())
block = self.nodes[0].generate(1)[0]
self.wait_for_chainlocked_block_all_nodes(block)
self.log.info("testing retroactive signing with partially known TX and all nodes session timeout")
self.test_all_nodes_session_timeout(False)
self.log.info("repeating test, but with cycled LLMQs")
self.test_all_nodes_session_timeout(True)
self.log.info("testing retroactive signing with partially known TX and single node session timeout")
self.test_single_node_session_timeout(False)
self.log.info("repeating test, but with cycled LLMQs")
self.test_single_node_session_timeout(True)
def cycle_llmqs(self):
self.mine_quorum()
self.mine_quorum()
self.wait_for_chainlocked_block_all_nodes(self.nodes[0].getbestblockhash())
def test_all_nodes_session_timeout(self, do_cycle_llmqs):
set_node_times(self.nodes, get_mocktime())
isolate_node(self.nodes[3])
rawtx = self.nodes[0].createrawtransaction([], {self.nodes[0].getnewaddress(): 1})
rawtx = self.nodes[0].fundrawtransaction(rawtx)['hex']
rawtx = self.nodes[0].signrawtransaction(rawtx)['hex']
txid = self.nodes[0].sendrawtransaction(rawtx)
txid = self.nodes[3].sendrawtransaction(rawtx)
# Make sure nodes 1 and 2 received the TX before we continue
self.wait_for_tx(txid, self.nodes[1])
self.wait_for_tx(txid, self.nodes[2])
# Make sure signing is done on nodes 1 and 2 (it's async)
time.sleep(5)
# Make the signing session for the IS lock timeout on nodes 1-3
set_mocktime(get_mocktime() + 61)
set_node_times(self.nodes, get_mocktime())
time.sleep(2) # make sure Cleanup() is called
reconnect_isolated_node(self.nodes[3], 0)
self.wait_for_mnauth(self.nodes[3], 2)
# node 3 fully reconnected but the signing session is already timed out on all nodes, so no IS lock
self.wait_for_instantlock(txid, self.nodes[0], False, 5, do_assert=True)
if do_cycle_llmqs:
self.cycle_llmqs()
self.wait_for_instantlock(txid, self.nodes[0], False, 5, do_assert=True)
# Make node 0 consider the TX as safe
set_mocktime(get_mocktime() + 10 * 60 + 1)
self.nodes[0].setmocktime(get_mocktime())
block = self.nodes[0].generate(1)[0]
self.wait_for_chainlocked_block_all_nodes(block)
def test_single_node_session_timeout(self, do_cycle_llmqs):
set_node_times(self.nodes, get_mocktime())
isolate_node(self.nodes[3])
rawtx = self.nodes[0].createrawtransaction([], {self.nodes[0].getnewaddress(): 1})
rawtx = self.nodes[0].fundrawtransaction(rawtx)['hex']
rawtx = self.nodes[0].signrawtransaction(rawtx)['hex']
txid = self.nodes[3].sendrawtransaction(rawtx)
time.sleep(2) # make sure signing is done on node 2 (it's async)
# Make the signing session for the IS lock timeout on node 3
set_mocktime(get_mocktime() + 61)
set_node_times(self.nodes, get_mocktime())
time.sleep(2) # make sure Cleanup() is called
reconnect_isolated_node(self.nodes[3], 0)
self.wait_for_mnauth(self.nodes[3], 2)
self.nodes[0].sendrawtransaction(rawtx)
# Make sure nodes 1 and 2 received the TX
self.wait_for_tx(txid, self.nodes[1])
self.wait_for_tx(txid, self.nodes[2])
# Make sure signing is done on nodes 1 and 2 (it's async)
time.sleep(5)
# node 3 fully reconnected but the signing session is already timed out on it, so no IS lock
self.wait_for_instantlock(txid, self.nodes[0], False, 1, do_assert=True)
if do_cycle_llmqs:
self.cycle_llmqs()
self.wait_for_instantlock(txid, self.nodes[0], False, 5, do_assert=True)
# Make node 0 consider the TX as safe
set_mocktime(get_mocktime() + 10 * 60 + 1)
self.nodes[0].setmocktime(get_mocktime())
block = self.nodes[0].generate(1)[0]
self.wait_for_chainlocked_block_all_nodes(block)
if __name__ == '__main__':
LLMQ_IS_RetroactiveSigning().main()

View File

@ -17,7 +17,7 @@ Checks LLMQs signing sessions
class LLMQSigningTest(DashTestFramework):
def __init__(self):
super().__init__(6, 5, [], fast_dip3_enforcement=True)
super().__init__(6, 5, fast_dip3_enforcement=True)
def run_test(self):

View File

@ -16,7 +16,7 @@ Checks simple PoSe system based on LLMQ commitments
class LLMQSimplePoSeTest(DashTestFramework):
def __init__(self):
super().__init__(6, 5, [], fast_dip3_enforcement=True)
super().__init__(6, 5, fast_dip3_enforcement=True)
def run_test(self):

View File

@ -23,7 +23,7 @@ transactions with high fee.
class AutoInstantSendTest(DashTestFramework):
def __init__(self):
super().__init__(8, 5, [], fast_dip3_enforcement=True)
super().__init__(8, 5, fast_dip3_enforcement=True)
# set sender, receiver, isolated nodes
self.receiver_idx = 1
self.sender_idx = 2

View File

@ -14,7 +14,7 @@ InstantSendTest -- test InstantSend functionality (prevent doublespend for uncon
class InstantSendTest(DashTestFramework):
def __init__(self):
super().__init__(9, 5, [], fast_dip3_enforcement=True)
super().__init__(9, 5, fast_dip3_enforcement=True)
# set sender, receiver, isolated nodes
self.isolated_idx = 1
self.receiver_idx = 2

View File

@ -14,6 +14,7 @@ import traceback
from concurrent.futures import ThreadPoolExecutor
from time import time, sleep
from test_framework.mininode import wait_until
from .util import (
assert_equal,
initialize_chain,
@ -252,7 +253,7 @@ class MasternodeInfo:
class DashTestFramework(BitcoinTestFramework):
def __init__(self, num_nodes, masterodes_count, extra_args, fast_dip3_enforcement=False):
def __init__(self, num_nodes, masterodes_count, extra_args=None, fast_dip3_enforcement=False):
super().__init__()
self.mn_count = masterodes_count
self.num_nodes = num_nodes
@ -260,17 +261,20 @@ class DashTestFramework(BitcoinTestFramework):
self.setup_clean_chain = True
self.is_network_split = False
# additional args
if extra_args is None:
extra_args = [[]] * num_nodes
assert_equal(len(extra_args), num_nodes)
self.extra_args = extra_args
self.extra_args += ["-sporkkey=cP4EKFyJsHT39LDqgdcB43Y3YXjNyjb5Fuas1GQSeAtjnZWmZEQK"]
self.extra_args[0] += ["-sporkkey=cP4EKFyJsHT39LDqgdcB43Y3YXjNyjb5Fuas1GQSeAtjnZWmZEQK"]
self.fast_dip3_enforcement = fast_dip3_enforcement
if fast_dip3_enforcement:
self.extra_args += ["-dip3params=30:50"]
for i in range(0, num_nodes):
self.extra_args[i] += ["-dip3params=30:50"]
def create_simple_node(self):
idx = len(self.nodes)
args = self.extra_args
args = self.extra_args[idx]
self.nodes.append(start_node(idx, self.options.tmpdir, args))
for i in range(0, idx):
connect_nodes(self.nodes[i], idx)
@ -329,7 +333,7 @@ class DashTestFramework(BitcoinTestFramework):
copy_datadir(0, idx + start_idx, self.options.tmpdir)
# restart faucet node
self.nodes[0] = start_node(0, self.options.tmpdir, self.extra_args)
self.nodes[0] = start_node(0, self.options.tmpdir, self.extra_args[0])
def start_masternodes(self):
start_idx = len(self.nodes)
@ -340,7 +344,7 @@ class DashTestFramework(BitcoinTestFramework):
def do_start(idx):
args = ['-masternode=1',
'-masternodeblsprivkey=%s' % self.mninfo[idx].keyOperator] + self.extra_args
'-masternodeblsprivkey=%s' % self.mninfo[idx].keyOperator] + self.extra_args[idx + start_idx]
node = start_node(idx + start_idx, self.options.tmpdir, args)
self.mninfo[idx].nodeIdx = idx + start_idx
self.mninfo[idx].node = node
@ -348,8 +352,8 @@ class DashTestFramework(BitcoinTestFramework):
wait_to_sync(node, True)
def do_connect(idx):
for i in range(0, idx + 1):
connect_nodes(self.nodes[idx + start_idx], i)
# Connect to the control node only, masternodes should take care of intra-quorum connections themselves
connect_nodes(self.mninfo[idx].node, 0)
jobs = []
@ -378,7 +382,7 @@ class DashTestFramework(BitcoinTestFramework):
def setup_network(self):
self.nodes = []
# create faucet node for collateral and transactions
self.nodes.append(start_node(0, self.options.tmpdir, self.extra_args))
self.nodes.append(start_node(0, self.options.tmpdir, self.extra_args[0]))
required_balance = MASTERNODE_COLLATERAL * self.mn_count + 1
while self.nodes[0].getbalance() < required_balance:
set_mocktime(get_mocktime() + 1)
@ -400,6 +404,12 @@ class DashTestFramework(BitcoinTestFramework):
self.prepare_datadirs()
self.start_masternodes()
# non-masternodes where disconnected from the control node during prepare_datadirs,
# let's reconnect them back to make sure they receive updates
num_simple_nodes = self.num_nodes - self.mn_count - 1
for i in range(0, num_simple_nodes):
connect_nodes(self.nodes[i+1], 0)
set_mocktime(get_mocktime() + 1)
set_node_times(self.nodes, get_mocktime())
self.nodes[0].generate(1)
@ -545,23 +555,53 @@ class DashTestFramework(BitcoinTestFramework):
self.sync_all()
return self.wait_for_instantlock(txid, sender)
def wait_for_instantlock(self, txid, node):
# wait for instantsend locks
start = time()
locked = False
while True:
def wait_for_tx(self, txid, node, expected=True, timeout=15):
def check_tx():
try:
is_tx = node.getrawtransaction(txid, True)
if is_tx['instantlock']:
locked = True
break
return node.getrawtransaction(txid)
except:
# TX not received yet?
pass
if time() > start + 10:
break
sleep(0.5)
return locked
return False
w = wait_until(check_tx, timeout=timeout, sleep=0.5)
if not w and expected:
raise AssertionError("wait_for_instantlock failed")
elif w and not expected:
raise AssertionError("waiting unexpectedly succeeded")
def wait_for_instantlock(self, txid, node, expected=True, timeout=15, do_assert=False):
def check_instantlock():
try:
return node.getrawtransaction(txid, True)["instantlock"]
except:
return False
w = wait_until(check_instantlock, timeout=timeout, sleep=0.1)
if not w and expected:
if do_assert:
raise AssertionError("wait_for_instantlock failed")
else:
return False
elif w and not expected:
if do_assert:
raise AssertionError("waiting unexpectedly succeeded")
else:
return False
return True
def wait_for_chainlocked_block(self, node, block_hash, expected=True, timeout=15):
def check_chainlocked_block():
try:
block = node.getblock(block_hash)
return block["confirmations"] > 0 and block["chainlock"]
except:
return False
w = wait_until(check_chainlocked_block, timeout=timeout, sleep=0.1)
if not w and expected:
raise AssertionError("wait_for_chainlocked_block failed")
elif w and not expected:
raise AssertionError("waiting unexpectedly succeeded")
def wait_for_chainlocked_block_all_nodes(self, block_hash, timeout=15):
for node in self.nodes:
self.wait_for_chainlocked_block(node, block_hash, timeout=timeout)
def wait_for_sporks_same(self, timeout=30):
st = time()
@ -695,6 +735,16 @@ class DashTestFramework(BitcoinTestFramework):
return new_quorum
def wait_for_mnauth(self, node, count, timeout=10):
def test():
pi = node.getpeerinfo()
c = 0
for p in pi:
if "verified_proregtx_hash" in p and p["verified_proregtx_hash"] != "":
c += 1
return c >= count
assert wait_until(test, timeout=timeout)
# Test framework for doing p2p comparison testing, which sets up some bitcoind
# binaries:
# 1 binary: test binary

View File

@ -44,7 +44,9 @@ CQuorum::~CQuorum()
{
// most likely the thread is already done
stopCachePopulatorThread = true;
if (cachePopulatorThread.joinable()) {
// watch out to not join the thread when we're called from inside the thread, which might happen on shutdown. This
// is because on shutdown the thread is the last owner of the shared CQuorum instance and thus the destroyer of it.
if (cachePopulatorThread.joinable() && cachePopulatorThread.get_id() != std::this_thread::get_id()) {
cachePopulatorThread.join();
}
}

View File

@ -374,7 +374,7 @@ void CInstantSendManager::InterruptWorkerThread()
workInterrupt();
}
bool CInstantSendManager::ProcessTx(const CTransaction& tx, const Consensus::Params& params)
bool CInstantSendManager::ProcessTx(const CTransaction& tx, bool allowReSigning, const Consensus::Params& params)
{
if (!IsNewInstantSendEnabled()) {
return true;
@ -405,11 +405,17 @@ bool CInstantSendManager::ProcessTx(const CTransaction& tx, const Consensus::Par
g_connman->RelayInvFiltered(inv, tx, LLMQS_PROTO_VERSION);
}
if (IsConflicted(tx)) {
auto conflictingLock = GetConflictingLock(tx);
if (conflictingLock) {
auto islockHash = ::SerializeHash(*conflictingLock);
LogPrintf("CInstantSendManager::%s -- txid=%s: conflicts with islock %s, txid=%s\n", __func__,
tx.GetHash().ToString(), islockHash.ToString(), conflictingLock->txid.ToString());
return false;
}
if (!CheckCanLock(tx, true, params)) {
LogPrint("instantsend", "CInstantSendManager::%s -- txid=%s: CheckCanLock returned false\n", __func__,
tx.GetHash().ToString());
return false;
}
@ -424,7 +430,7 @@ bool CInstantSendManager::ProcessTx(const CTransaction& tx, const Consensus::Par
uint256 otherTxHash;
if (quorumSigningManager->GetVoteForId(llmqType, id, otherTxHash)) {
if (otherTxHash != tx.GetHash()) {
LogPrintf("CInstantSendManager::%s -- txid=%s: input %s is conflicting with islock %s\n", __func__,
LogPrintf("CInstantSendManager::%s -- txid=%s: input %s is conflicting with previous vote for tx %s\n", __func__,
tx.GetHash().ToString(), in.prevout.ToStringShort(), otherTxHash.ToString());
return false;
}
@ -433,19 +439,28 @@ bool CInstantSendManager::ProcessTx(const CTransaction& tx, const Consensus::Par
// don't even try the actual signing if any input is conflicting
if (quorumSigningManager->IsConflicting(llmqType, id, tx.GetHash())) {
LogPrintf("CInstantSendManager::%s -- txid=%s: quorumSigningManager->IsConflicting returned true. id=%s\n", __func__,
tx.GetHash().ToString(), id.ToString());
return false;
}
}
if (alreadyVotedCount == ids.size()) {
if (!allowReSigning && alreadyVotedCount == ids.size()) {
LogPrint("instantsend", "CInstantSendManager::%s -- txid=%s: already voted on all inputs, bailing out\n", __func__,
tx.GetHash().ToString());
return true;
}
LogPrint("instantsend", "CInstantSendManager::%s -- txid=%s: trying to vote on %d inputs\n", __func__,
tx.GetHash().ToString(), tx.vin.size());
for (size_t i = 0; i < tx.vin.size(); i++) {
auto& in = tx.vin[i];
auto& id = ids[i];
inputRequestIds.emplace(id);
if (quorumSigningManager->AsyncSignIfMember(llmqType, id, tx.GetHash())) {
LogPrintf("CInstantSendManager::%s -- txid=%s: voted on input %s with id %s\n", __func__,
LogPrint("instantsend", "CInstantSendManager::%s -- txid=%s: trying to vote on input %s with id %s. allowReSigning=%d\n", __func__,
tx.GetHash().ToString(), in.prevout.ToStringShort(), id.ToString(), allowReSigning);
if (quorumSigningManager->AsyncSignIfMember(llmqType, id, tx.GetHash(), allowReSigning)) {
LogPrint("instantsend", "CInstantSendManager::%s -- txid=%s: voted on input %s with id %s\n", __func__,
tx.GetHash().ToString(), in.prevout.ToStringShort(), id.ToString());
}
}
@ -1000,6 +1015,10 @@ void CInstantSendManager::SyncTransaction(const CTransaction& tx, const CBlockIn
return;
}
// This is different on develop as allowReSigning is passed in from the caller. In 0.14.0.x, we have to figure this out
// here to mimic develop.
bool allowReSigning = !inMempool && !isDisconnect;
uint256 islockHash;
{
LOCK(cs);
@ -1022,7 +1041,7 @@ void CInstantSendManager::SyncTransaction(const CTransaction& tx, const CBlockIn
bool chainlocked = pindex && chainLocksHandler->HasChainLock(pindex->nHeight, pindex->GetBlockHash());
if (islockHash.IsNull() && !chainlocked) {
ProcessTx(tx, Params().GetConsensus());
ProcessTx(tx, allowReSigning, Params().GetConsensus());
}
LOCK(cs);
@ -1054,6 +1073,9 @@ void CInstantSendManager::AddNonLockedTx(const CTransactionRef& tx)
nonLockedTxsByInputs.emplace(in.prevout.hash, std::make_pair(in.prevout.n, tx->GetHash()));
}
}
LogPrint("instantsend", "CInstantSendManager::%s -- txid=%s\n", __func__,
tx->GetHash().ToString());
}
void CInstantSendManager::RemoveNonLockedTx(const uint256& txid, bool retryChildren)
@ -1066,10 +1088,12 @@ void CInstantSendManager::RemoveNonLockedTx(const uint256& txid, bool retryChild
}
auto& info = it->second;
size_t retryChildrenCount = 0;
if (retryChildren) {
// TX got locked, so we can retry locking children
for (auto& childTxid : info.children) {
pendingRetryTxs.emplace(childTxid);
retryChildrenCount++;
}
}
@ -1096,6 +1120,9 @@ void CInstantSendManager::RemoveNonLockedTx(const uint256& txid, bool retryChild
}
nonLockedTxs.erase(it);
LogPrint("instantsend", "CInstantSendManager::%s -- txid=%s, retryChildren=%d, retryChildrenCount=%d\n", __func__,
txid.ToString(), retryChildren, retryChildrenCount);
}
void CInstantSendManager::RemoveConflictedTx(const CTransaction& tx)
@ -1398,7 +1425,7 @@ bool CInstantSendManager::ProcessPendingRetryLockTxs()
tx->GetHash().ToString());
}
ProcessTx(*tx, Params().GetConsensus());
ProcessTx(*tx, false, Params().GetConsensus());
retryCount++;
}

View File

@ -120,7 +120,7 @@ public:
void InterruptWorkerThread();
public:
bool ProcessTx(const CTransaction& tx, const Consensus::Params& params);
bool ProcessTx(const CTransaction& tx, bool allowReSigning, const Consensus::Params& params);
bool CheckCanLock(const CTransaction& tx, bool printDebug, const Consensus::Params& params);
bool CheckCanLock(const COutPoint& outpoint, bool printDebug, const uint256& txHash, CAmount* retValue, const Consensus::Params& params);
bool IsLocked(const uint256& txHash);

View File

@ -473,7 +473,8 @@ void CSigningManager::ProcessMessageRecoveredSig(CNode* pfrom, const CRecoveredS
return;
}
LogPrint("llmq", "CSigningManager::%s -- signHash=%s, node=%d\n", __func__, CLLMQUtils::BuildSignHash(recoveredSig).ToString(), pfrom->id);
LogPrint("llmq", "CSigningManager::%s -- signHash=%s, id=%s, msgHash=%s, node=%d\n", __func__,
CLLMQUtils::BuildSignHash(recoveredSig).ToString(), recoveredSig.id.ToString(), recoveredSig.msgHash.ToString(), pfrom->GetId());
LOCK(cs);
pendingRecoveredSigs[pfrom->id].emplace_back(recoveredSig);
@ -742,7 +743,7 @@ void CSigningManager::UnregisterRecoveredSigsListener(CRecoveredSigsListener* l)
recoveredSigsListeners.erase(itRem, recoveredSigsListeners.end());
}
bool CSigningManager::AsyncSignIfMember(Consensus::LLMQType llmqType, const uint256& id, const uint256& msgHash)
bool CSigningManager::AsyncSignIfMember(Consensus::LLMQType llmqType, const uint256& id, const uint256& msgHash, bool allowReSign)
{
auto& params = Params().GetConsensus().llmqs.at(llmqType);
@ -753,24 +754,31 @@ bool CSigningManager::AsyncSignIfMember(Consensus::LLMQType llmqType, const uint
{
LOCK(cs);
if (db.HasVotedOnId(llmqType, id)) {
bool hasVoted = db.HasVotedOnId(llmqType, id);
if (hasVoted) {
uint256 prevMsgHash;
db.GetVoteForId(llmqType, id, prevMsgHash);
if (msgHash != prevMsgHash) {
LogPrintf("CSigningManager::%s -- already voted for id=%s and msgHash=%s. Not voting on conflicting msgHash=%s\n", __func__,
id.ToString(), prevMsgHash.ToString(), msgHash.ToString());
return false;
} else if (allowReSign) {
LogPrint("llmq", "CSigningManager::%s -- already voted for id=%s and msgHash=%s. Resigning!\n", __func__,
id.ToString(), prevMsgHash.ToString());
} else {
LogPrint("llmq", "CSigningManager::%s -- already voted for id=%s and msgHash=%s. Not voting again.\n", __func__,
id.ToString(), prevMsgHash.ToString());
return false;
}
return false;
}
if (db.HasRecoveredSigForId(llmqType, id)) {
// no need to sign it if we already have a recovered sig
return true;
}
db.WriteVoteForId(llmqType, id, msgHash);
if (!hasVoted) {
db.WriteVoteForId(llmqType, id, msgHash);
}
}
int tipHeight;
@ -795,6 +803,10 @@ bool CSigningManager::AsyncSignIfMember(Consensus::LLMQType llmqType, const uint
return false;
}
if (allowReSign) {
// make us re-announce all known shares (other nodes might have run into a timeout)
quorumSigSharesManager->ForceReAnnouncement(quorum, llmqType, id, msgHash);
}
quorumSigSharesManager->AsyncSign(quorum, id, msgHash);
return true;

View File

@ -167,7 +167,7 @@ public:
void RegisterRecoveredSigsListener(CRecoveredSigsListener* l);
void UnregisterRecoveredSigsListener(CRecoveredSigsListener* l);
bool AsyncSignIfMember(Consensus::LLMQType llmqType, const uint256& id, const uint256& msgHash);
bool AsyncSignIfMember(Consensus::LLMQType llmqType, const uint256& id, const uint256& msgHash, bool allowReSign = false);
bool HasRecoveredSig(Consensus::LLMQType llmqType, const uint256& id, const uint256& msgHash);
bool HasRecoveredSigForId(Consensus::LLMQType llmqType, const uint256& id);
bool HasRecoveredSigForSession(const uint256& signHash);

View File

@ -82,6 +82,13 @@ void CSigSharesInv::Set(uint16_t quorumMember, bool v)
inv[quorumMember] = v;
}
void CSigSharesInv::SetAll(bool v)
{
for (size_t i = 0; i < inv.size(); i++) {
inv[i] = v;
}
}
std::string CBatchedSigShares::ToInvString() const
{
CSigSharesInv inv;
@ -679,7 +686,7 @@ void CSigSharesManager::ProcessSigShare(NodeId nodeId, const CSigShare& sigShare
sigSharesToAnnounce.Add(sigShare.GetKey(), true);
// Update the time we've seen the last sigShare
timeSeenForSessions[sigShare.GetSignHash()] = GetTimeMillis();
timeSeenForSessions[sigShare.GetSignHash()] = GetAdjustedTime();
if (!quorumNodes.empty()) {
// don't announce and wait for other nodes to request this share and directly send it to them
@ -778,7 +785,7 @@ void CSigSharesManager::CollectSigSharesToRequest(std::unordered_map<NodeId, std
{
AssertLockHeld(cs);
int64_t now = GetTimeMillis();
int64_t now = GetAdjustedTime();
const size_t maxRequestsForNode = 32;
// avoid requesting from same nodes all the time
@ -1144,8 +1151,8 @@ CSigShare CSigSharesManager::RebuildSigShare(const CSigSharesNodeState::SessionI
void CSigSharesManager::Cleanup()
{
int64_t now = GetTimeMillis();
if (now - lastCleanupTime < 5000) {
int64_t now = GetAdjustedTime();
if (now - lastCleanupTime < 5) {
return;
}
@ -1266,7 +1273,7 @@ void CSigSharesManager::Cleanup()
nodeStates.erase(nodeId);
}
lastCleanupTime = GetTimeMillis();
lastCleanupTime = GetAdjustedTime();
}
void CSigSharesManager::RemoveSigSharesForSession(const uint256& signHash)
@ -1427,6 +1434,31 @@ void CSigSharesManager::Sign(const CQuorumCPtr& quorum, const uint256& id, const
ProcessSigShare(-1, sigShare, *g_connman, quorum);
}
// causes all known sigShares to be re-announced
void CSigSharesManager::ForceReAnnouncement(const CQuorumCPtr& quorum, Consensus::LLMQType llmqType, const uint256& id, const uint256& msgHash)
{
LOCK(cs);
auto signHash = CLLMQUtils::BuildSignHash(llmqType, quorum->qc.quorumHash, id, msgHash);
auto sigs = sigShares.GetAllForSignHash(signHash);
if (sigs) {
for (auto& p : *sigs) {
// re-announce every sigshare to every node
sigSharesToAnnounce.Add(std::make_pair(signHash, p.first), true);
}
}
for (auto& p : nodeStates) {
CSigSharesNodeState& nodeState = p.second;
auto session = nodeState.GetSessionBySignHash(signHash);
if (!session) {
continue;
}
// pretend that the other node doesn't know about any shares so that we re-announce everything
session->knows.SetAll(false);
// we need to use a new session id as we don't know if the other node has run into a timeout already
session->sendSessionId = (uint32_t)-1;
}
}
void CSigSharesManager::HandleNewRecoveredSig(const llmq::CRecoveredSig& recoveredSig)
{
LOCK(cs);

View File

@ -104,6 +104,7 @@ public:
void Init(size_t size);
bool IsSet(uint16_t quorumMember) const;
void Set(uint16_t quorumMember, bool v);
void SetAll(bool v);
void Merge(const CSigSharesInv& inv2);
size_t CountSet() const;
@ -329,8 +330,8 @@ public:
class CSigSharesManager : public CRecoveredSigsListener
{
static const int64_t SESSION_NEW_SHARES_TIMEOUT = 60 * 1000;
static const int64_t SIG_SHARE_REQUEST_TIMEOUT = 5 * 1000;
static const int64_t SESSION_NEW_SHARES_TIMEOUT = 60;
static const int64_t SIG_SHARE_REQUEST_TIMEOUT = 5;
// we try to keep total message size below 10k
const size_t MAX_MSGS_CNT_QSIGSESANN = 100;
@ -377,6 +378,7 @@ public:
void AsyncSign(const CQuorumCPtr& quorum, const uint256& id, const uint256& msgHash);
void Sign(const CQuorumCPtr& quorum, const uint256& id, const uint256& msgHash);
void ForceReAnnouncement(const CQuorumCPtr& quorum, Consensus::LLMQType llmqType, const uint256& id, const uint256& msgHash);
void HandleNewRecoveredSig(const CRecoveredSig& recoveredSig);

View File

@ -691,6 +691,11 @@ void CNode::copyStats(CNodeStats &stats)
// Leave string empty if addrLocal invalid (not filled in yet)
CService addrLocalUnlocked = GetAddrLocal();
stats.addrLocal = addrLocalUnlocked.IsValid() ? addrLocalUnlocked.ToString() : "";
{
LOCK(cs_mnauth);
X(verifiedProRegTxHash);
}
}
#undef X

View File

@ -660,6 +660,8 @@ public:
double dMinPing;
std::string addrLocal;
CAddress addr;
// In case this is a verified MN, this value is the proTx of the MN
uint256 verifiedProRegTxHash;
};

View File

@ -1382,10 +1382,10 @@ void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParam
}
} // release cs_main
if (it != pfrom->vRecvGetData.end()) {
if (it != pfrom->vRecvGetData.end() && !pfrom->fPauseSend) {
const CInv &inv = *it;
it++;
if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK || inv.type == MSG_CMPCT_BLOCK) {
it++;
ProcessGetBlockData(pfrom, consensusParams, inv, connman, interruptMsgProc);
}
}

View File

@ -79,6 +79,9 @@ UniValue getpeerinfo(const JSONRPCRequest& request)
" \"addr\":\"host:port\", (string) The ip address and port of the peer\n"
" \"addrlocal\":\"ip:port\", (string) local address\n"
" \"services\":\"xxxxxxxxxxxxxxxx\", (string) The services offered\n"
" \"verified_proregtx_hash\": h, (hex) Only present when the peer is a masternode and succesfully\n"
" autheticated via MNAUTH. In this case, this field contains the\n"
" protx hash of the masternode\n"
" \"relaytxes\":true|false, (boolean) Whether peer has asked us to relay transactions to it\n"
" \"lastsend\": ttt, (numeric) The time in seconds since epoch (Jan 1 1970 GMT) of the last send\n"
" \"lastrecv\": ttt, (numeric) The time in seconds since epoch (Jan 1 1970 GMT) of the last receive\n"
@ -135,6 +138,9 @@ UniValue getpeerinfo(const JSONRPCRequest& request)
if (!(stats.addrLocal.empty()))
obj.push_back(Pair("addrlocal", stats.addrLocal));
obj.push_back(Pair("services", strprintf("%016x", stats.nServices)));
if (!stats.verifiedProRegTxHash.IsNull()) {
obj.push_back(Pair("verified_proregtx_hash", stats.verifiedProRegTxHash.ToString()));
}
obj.push_back(Pair("relaytxes", stats.fRelayTxes));
obj.push_back(Pair("lastsend", stats.nLastSend));
obj.push_back(Pair("lastrecv", stats.nLastRecv));