mirror of
https://github.com/dashpay/dash.git
synced 2024-12-25 20:12:57 +01:00
Merge pull request #2764 from codablock/pr_activatebestchainrace
Backport PRs from Bitcoin required to make ActivateBestChain cs_main free on entry
This commit is contained in:
commit
8955eb82ef
@ -94,6 +94,7 @@ BASE_SCRIPTS= [
|
||||
'p2p-leaktests.py',
|
||||
'p2p-compactblocks.py',
|
||||
'sporks.py',
|
||||
'p2p-fingerprint.py',
|
||||
]
|
||||
|
||||
ZMQ_SCRIPTS = [
|
||||
|
189
qa/rpc-tests/p2p-fingerprint.py
Executable file
189
qa/rpc-tests/p2p-fingerprint.py
Executable file
@ -0,0 +1,189 @@
|
||||
#!/usr/bin/env python3
|
||||
# Copyright (c) 2017 The Bitcoin Core developers
|
||||
# Distributed under the MIT software license, see the accompanying
|
||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
"""Test various fingerprinting protections.
|
||||
|
||||
If an stale block more than a month old or its header are requested by a peer,
|
||||
the node should pretend that it does not have it to avoid fingerprinting.
|
||||
"""
|
||||
import threading
|
||||
|
||||
import time
|
||||
|
||||
from test_framework.blocktools import (create_block, create_coinbase)
|
||||
from test_framework.mininode import (
|
||||
CInv,
|
||||
NetworkThread,
|
||||
NodeConn,
|
||||
SingleNodeConnCB,
|
||||
msg_headers,
|
||||
msg_block,
|
||||
msg_getdata,
|
||||
msg_getheaders,
|
||||
wait_until,
|
||||
)
|
||||
from test_framework.test_framework import BitcoinTestFramework
|
||||
from test_framework.util import (
|
||||
assert_equal,
|
||||
p2p_port,
|
||||
start_nodes)
|
||||
|
||||
class P2PFingerprintTest(BitcoinTestFramework):
|
||||
def __init__(self):
|
||||
BitcoinTestFramework.__init__(self)
|
||||
# TODO When this asserts, you have probably backported bitcoin#11121, so you'll have to remove this constructor
|
||||
assert(not callable(getattr(BitcoinTestFramework(), "set_test_params", None)))
|
||||
self.set_test_params()
|
||||
|
||||
def set_test_params(self):
|
||||
self.setup_clean_chain = True
|
||||
self.num_nodes = 1
|
||||
|
||||
# TODO also remove this when bitcoin#11121 is backported
|
||||
def setup_network(self, split=False):
|
||||
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)
|
||||
self.is_network_split = False
|
||||
|
||||
# Build a chain of blocks on top of given one
|
||||
def build_chain(self, nblocks, prev_hash, prev_height, prev_median_time):
|
||||
blocks = []
|
||||
for _ in range(nblocks):
|
||||
coinbase = create_coinbase(prev_height + 1)
|
||||
block_time = prev_median_time + 1
|
||||
block = create_block(int(prev_hash, 16), coinbase, block_time)
|
||||
block.solve()
|
||||
|
||||
blocks.append(block)
|
||||
prev_hash = block.hash
|
||||
prev_height += 1
|
||||
prev_median_time = block_time
|
||||
return blocks
|
||||
|
||||
# Send a getdata request for a given block hash
|
||||
def send_block_request(self, block_hash, node):
|
||||
msg = msg_getdata()
|
||||
msg.inv.append(CInv(2, block_hash)) # 2 == "Block"
|
||||
node.send_message(msg)
|
||||
|
||||
# Send a getheaders request for a given single block hash
|
||||
def send_header_request(self, block_hash, node):
|
||||
msg = msg_getheaders()
|
||||
msg.hashstop = block_hash
|
||||
node.send_message(msg)
|
||||
|
||||
# Check whether last block received from node has a given hash
|
||||
def last_block_equals(self, expected_hash, node):
|
||||
block_msg = node.last_message.get("block")
|
||||
return block_msg and block_msg.block.rehash() == expected_hash
|
||||
|
||||
# Check whether last block header received from node has a given hash
|
||||
def last_header_equals(self, expected_hash, node):
|
||||
headers_msg = node.last_message.get("headers")
|
||||
return (headers_msg and
|
||||
headers_msg.headers and
|
||||
headers_msg.headers[0].rehash() == expected_hash)
|
||||
|
||||
# Checks that stale blocks timestamped more than a month ago are not served
|
||||
# by the node while recent stale blocks and old active chain blocks are.
|
||||
# This does not currently test that stale blocks timestamped within the
|
||||
# last month but that have over a month's worth of work are also withheld.
|
||||
def run_test(self):
|
||||
# TODO remove this when mininode is up-to-date with Bitcoin
|
||||
class MyNodeConnCB(SingleNodeConnCB):
|
||||
def __init__(self):
|
||||
SingleNodeConnCB.__init__(self)
|
||||
self.cond = threading.Condition()
|
||||
self.last_message = {}
|
||||
|
||||
def deliver(self, conn, message):
|
||||
SingleNodeConnCB.deliver(self, conn, message)
|
||||
command = message.command.decode('ascii')
|
||||
self.last_message[command] = message
|
||||
with self.cond:
|
||||
self.cond.notify_all()
|
||||
|
||||
def wait_for_getdata(self):
|
||||
with self.cond:
|
||||
assert(self.cond.wait_for(lambda: "getdata" in self.last_message, timeout=15))
|
||||
|
||||
|
||||
node0 = MyNodeConnCB()
|
||||
|
||||
connections = []
|
||||
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0))
|
||||
node0.add_connection(connections[0])
|
||||
|
||||
NetworkThread().start()
|
||||
node0.wait_for_verack()
|
||||
|
||||
# Set node time to 60 days ago
|
||||
self.nodes[0].setmocktime(int(time.time()) - 60 * 24 * 60 * 60)
|
||||
|
||||
# Generating a chain of 10 blocks
|
||||
block_hashes = self.nodes[0].generate(nblocks=10)
|
||||
|
||||
# Create longer chain starting 2 blocks before current tip
|
||||
height = len(block_hashes) - 2
|
||||
block_hash = block_hashes[height - 1]
|
||||
block_time = self.nodes[0].getblockheader(block_hash)["mediantime"] + 1
|
||||
new_blocks = self.build_chain(5, block_hash, height, block_time)
|
||||
|
||||
# Force reorg to a longer chain
|
||||
node0.send_message(msg_headers(new_blocks))
|
||||
node0.wait_for_getdata()
|
||||
for block in new_blocks:
|
||||
node0.send_and_ping(msg_block(block))
|
||||
|
||||
# Check that reorg succeeded
|
||||
assert_equal(self.nodes[0].getblockcount(), 13)
|
||||
|
||||
stale_hash = int(block_hashes[-1], 16)
|
||||
|
||||
# Check that getdata request for stale block succeeds
|
||||
self.send_block_request(stale_hash, node0)
|
||||
test_function = lambda: self.last_block_equals(stale_hash, node0)
|
||||
wait_until(test_function, timeout=3)
|
||||
|
||||
# Check that getheader request for stale block header succeeds
|
||||
self.send_header_request(stale_hash, node0)
|
||||
test_function = lambda: self.last_header_equals(stale_hash, node0)
|
||||
wait_until(test_function, timeout=3)
|
||||
|
||||
# Longest chain is extended so stale is much older than chain tip
|
||||
self.nodes[0].setmocktime(0)
|
||||
tip = self.nodes[0].generate(nblocks=1)[0]
|
||||
assert_equal(self.nodes[0].getblockcount(), 14)
|
||||
|
||||
# Send getdata & getheaders to refresh last received getheader message
|
||||
block_hash = int(tip, 16)
|
||||
self.send_block_request(block_hash, node0)
|
||||
self.send_header_request(block_hash, node0)
|
||||
node0.sync_with_ping()
|
||||
|
||||
# Request for very old stale block should now fail
|
||||
self.send_block_request(stale_hash, node0)
|
||||
time.sleep(3)
|
||||
assert not self.last_block_equals(stale_hash, node0)
|
||||
|
||||
# Request for very old stale block header should now fail
|
||||
self.send_header_request(stale_hash, node0)
|
||||
time.sleep(3)
|
||||
assert not self.last_header_equals(stale_hash, node0)
|
||||
|
||||
# Verify we can fetch very old blocks and headers on the active chain
|
||||
block_hash = int(block_hashes[2], 16)
|
||||
self.send_block_request(block_hash, node0)
|
||||
self.send_header_request(block_hash, node0)
|
||||
node0.sync_with_ping()
|
||||
|
||||
self.send_block_request(block_hash, node0)
|
||||
test_function = lambda: self.last_block_equals(block_hash, node0)
|
||||
wait_until(test_function, timeout=3)
|
||||
|
||||
self.send_header_request(block_hash, node0)
|
||||
test_function = lambda: self.last_header_equals(block_hash, node0)
|
||||
wait_until(test_function, timeout=3)
|
||||
|
||||
if __name__ == '__main__':
|
||||
P2PFingerprintTest().main()
|
@ -10,6 +10,17 @@ Setup:
|
||||
receive inv's (omitted from testing description below, this is our control).
|
||||
Second node is used for creating reorgs.
|
||||
|
||||
test_null_locators
|
||||
==================
|
||||
|
||||
Sends two getheaders requests with null locator values. First request's hashstop
|
||||
value refers to validated block, while second request's hashstop value refers to
|
||||
a block which hasn't been validated. Verifies only the first request returns
|
||||
headers.
|
||||
|
||||
test_nonnull_locators
|
||||
=====================
|
||||
|
||||
Part 1: No headers announcements before "sendheaders"
|
||||
a. node mines a block [expect: inv]
|
||||
send getdata for the block [expect: block]
|
||||
@ -279,6 +290,29 @@ class SendHeadersTest(BitcoinTestFramework):
|
||||
inv_node.wait_for_verack()
|
||||
test_node.wait_for_verack()
|
||||
|
||||
self.test_null_locators(test_node)
|
||||
self.test_nonnull_locators(test_node, inv_node)
|
||||
|
||||
def test_null_locators(self, test_node):
|
||||
tip = self.nodes[0].getblockheader(self.nodes[0].generate(1)[0])
|
||||
tip_hash = int(tip["hash"], 16)
|
||||
|
||||
self.log.info("Verify getheaders with null locator and valid hashstop returns headers.")
|
||||
test_node.clear_last_announcement()
|
||||
test_node.get_headers(locator=[], hashstop=tip_hash)
|
||||
assert_equal(test_node.check_last_announcement(headers=[tip_hash]), True)
|
||||
|
||||
self.log.info("Verify getheaders with null locator and invalid hashstop does not return headers.")
|
||||
block = create_block(int(tip["hash"], 16), create_coinbase(tip["height"] + 1), tip["mediantime"] + 1)
|
||||
block.solve()
|
||||
test_node.send_header_for_blocks([block])
|
||||
test_node.clear_last_announcement()
|
||||
test_node.get_headers(locator=[], hashstop=int(block.hash, 16))
|
||||
test_node.sync_with_ping()
|
||||
assert_equal(test_node.block_announced, False)
|
||||
test_node.send_message(msg_block(block))
|
||||
|
||||
def test_nonnull_locators(self, test_node, inv_node):
|
||||
tip = int(self.nodes[0].getbestblockhash(), 16)
|
||||
|
||||
# PART 1
|
||||
|
@ -1177,8 +1177,8 @@ class msg_getheaders(object):
|
||||
class msg_headers(object):
|
||||
command = b"headers"
|
||||
|
||||
def __init__(self):
|
||||
self.headers = []
|
||||
def __init__(self, headers=None):
|
||||
self.headers = headers if headers is not None else []
|
||||
|
||||
def deserialize(self, f):
|
||||
# comment in dashd indicates these should be deserialized as blocks
|
||||
|
@ -77,15 +77,24 @@ struct COrphanTx {
|
||||
NodeId fromPeer;
|
||||
int64_t nTimeExpire;
|
||||
};
|
||||
std::map<uint256, COrphanTx> mapOrphanTransactions GUARDED_BY(cs_main);
|
||||
std::map<COutPoint, std::set<std::map<uint256, COrphanTx>::iterator, IteratorComparator>> mapOrphanTransactionsByPrev GUARDED_BY(cs_main);
|
||||
void EraseOrphansFor(NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
|
||||
static CCriticalSection g_cs_orphans;
|
||||
std::map<uint256, COrphanTx> mapOrphanTransactions GUARDED_BY(g_cs_orphans);
|
||||
std::map<COutPoint, std::set<std::map<uint256, COrphanTx>::iterator, IteratorComparator>> mapOrphanTransactionsByPrev GUARDED_BY(g_cs_orphans);
|
||||
void EraseOrphansFor(NodeId peer);
|
||||
|
||||
static size_t vExtraTxnForCompactIt = 0;
|
||||
static std::vector<std::pair<uint256, CTransactionRef>> vExtraTxnForCompact GUARDED_BY(cs_main);
|
||||
static size_t vExtraTxnForCompactIt GUARDED_BY(g_cs_orphans) = 0;
|
||||
static std::vector<std::pair<uint256, CTransactionRef>> vExtraTxnForCompact GUARDED_BY(g_cs_orphans);
|
||||
|
||||
static const uint64_t RANDOMIZER_ID_ADDRESS_RELAY = 0x3cac0035b5866b90ULL; // SHA256("main address relay")[0:8]
|
||||
|
||||
/// Age after which a stale block will no longer be served if requested as
|
||||
/// protection against fingerprinting. Set to one month, denominated in seconds.
|
||||
static const int STALE_RELAY_AGE_LIMIT = 30 * 24 * 60 * 60;
|
||||
|
||||
/// Age after which a block is considered historical for purposes of rate
|
||||
/// limiting block relay. Set to one week, denominated in seconds.
|
||||
static const int HISTORICAL_BLOCK_AGE = 7 * 24 * 60 * 60;
|
||||
|
||||
// Internal stuff
|
||||
namespace {
|
||||
/** Number of nodes with fSyncStarted. */
|
||||
@ -141,6 +150,13 @@ namespace {
|
||||
/** Number of peers from which we're downloading blocks. */
|
||||
int nPeersWithValidatedDownloads = 0;
|
||||
|
||||
/* This comment is here to force a merge conflict when bitcoin#11560 is backported
|
||||
* It introduces this member as int64_t while bitcoin#11824 changes it to atomic<int64_t>.
|
||||
* bitcoin#11824 is partially backported already, which means you'll have to take the atomic
|
||||
* version when you encounter this merge conflict!
|
||||
std::atomic<int64_t> g_last_tip_update(0);
|
||||
*/
|
||||
|
||||
/** Relay map, protected by cs_main. */
|
||||
typedef std::map<uint256, CTransactionRef> MapRelay;
|
||||
MapRelay mapRelay;
|
||||
@ -605,7 +621,7 @@ void UnregisterNodeSignals(CNodeSignals& nodeSignals)
|
||||
// mapOrphanTransactions
|
||||
//
|
||||
|
||||
void AddToCompactExtraTransactions(const CTransactionRef& tx)
|
||||
void AddToCompactExtraTransactions(const CTransactionRef& tx) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans)
|
||||
{
|
||||
size_t max_extra_txn = GetArg("-blockreconstructionextratxn", DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN);
|
||||
if (max_extra_txn <= 0)
|
||||
@ -616,7 +632,7 @@ void AddToCompactExtraTransactions(const CTransactionRef& tx)
|
||||
vExtraTxnForCompactIt = (vExtraTxnForCompactIt + 1) % max_extra_txn;
|
||||
}
|
||||
|
||||
bool AddOrphanTx(const CTransactionRef& tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
|
||||
bool AddOrphanTx(const CTransactionRef& tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans)
|
||||
{
|
||||
const uint256& hash = tx->GetHash();
|
||||
if (mapOrphanTransactions.count(hash))
|
||||
@ -649,7 +665,7 @@ bool AddOrphanTx(const CTransactionRef& tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRE
|
||||
return true;
|
||||
}
|
||||
|
||||
int static EraseOrphanTx(uint256 hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
|
||||
int static EraseOrphanTx(uint256 hash) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans)
|
||||
{
|
||||
std::map<uint256, COrphanTx>::iterator it = mapOrphanTransactions.find(hash);
|
||||
if (it == mapOrphanTransactions.end())
|
||||
@ -669,6 +685,7 @@ int static EraseOrphanTx(uint256 hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
|
||||
|
||||
void EraseOrphansFor(NodeId peer)
|
||||
{
|
||||
LOCK(g_cs_orphans);
|
||||
int nErased = 0;
|
||||
std::map<uint256, COrphanTx>::iterator iter = mapOrphanTransactions.begin();
|
||||
while (iter != mapOrphanTransactions.end())
|
||||
@ -683,8 +700,10 @@ void EraseOrphansFor(NodeId peer)
|
||||
}
|
||||
|
||||
|
||||
unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
|
||||
unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans)
|
||||
{
|
||||
LOCK(g_cs_orphans);
|
||||
|
||||
unsigned int nEvicted = 0;
|
||||
static int64_t nNextSweep;
|
||||
int64_t nNow = GetTime();
|
||||
@ -761,6 +780,19 @@ bool IsBanned(NodeId pnode)
|
||||
// blockchain -> download logic notification
|
||||
//
|
||||
|
||||
// To prevent fingerprinting attacks, only send blocks/headers outside of the
|
||||
// active chain if they are no more than a month older (both in time, and in
|
||||
// best equivalent proof of work) than the best header chain we know about and
|
||||
// we fully-validated them at some point.
|
||||
static bool BlockRequestAllowed(const CBlockIndex* pindex, const Consensus::Params& consensusParams)
|
||||
{
|
||||
AssertLockHeld(cs_main);
|
||||
if (chainActive.Contains(pindex)) return true;
|
||||
return pindex->IsValid(BLOCK_VALID_SCRIPTS) && (pindexBestHeader != nullptr) &&
|
||||
(pindexBestHeader->GetBlockTime() - pindex->GetBlockTime() < STALE_RELAY_AGE_LIMIT) &&
|
||||
(GetBlockProofEquivalentTime(*pindexBestHeader, *pindex, *pindexBestHeader, consensusParams) < STALE_RELAY_AGE_LIMIT);
|
||||
}
|
||||
|
||||
PeerLogicValidation::PeerLogicValidation(CConnman* connmanIn) : connman(connmanIn) {
|
||||
// Initialize global variables that cannot be constructed at startup.
|
||||
recentRejects.reset(new CRollingBloomFilter(120000, 0.000001));
|
||||
@ -770,7 +802,7 @@ void PeerLogicValidation::SyncTransaction(const CTransaction& tx, const CBlockIn
|
||||
if (nPosInBlock == CMainSignals::SYNC_TRANSACTION_NOT_IN_BLOCK)
|
||||
return;
|
||||
|
||||
LOCK(cs_main);
|
||||
LOCK(g_cs_orphans);
|
||||
|
||||
std::vector<uint256> vOrphanErase;
|
||||
// Which orphan pool entries must we evict?
|
||||
@ -794,6 +826,7 @@ void PeerLogicValidation::SyncTransaction(const CTransaction& tx, const CBlockIn
|
||||
}
|
||||
}
|
||||
|
||||
// All of the following cache a recent block, and are protected by cs_most_recent_block
|
||||
static CCriticalSection cs_most_recent_block;
|
||||
static std::shared_ptr<const CBlock> most_recent_block;
|
||||
static std::shared_ptr<const CBlockHeaderAndShortTxIDs> most_recent_compact_block;
|
||||
@ -925,9 +958,13 @@ bool static AlreadyHave(const CInv& inv) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
|
||||
recentRejects->reset();
|
||||
}
|
||||
|
||||
{
|
||||
LOCK(g_cs_orphans);
|
||||
if (mapOrphanTransactions.count(inv.hash)) return true;
|
||||
}
|
||||
|
||||
return recentRejects->contains(inv.hash) ||
|
||||
mempool.exists(inv.hash) ||
|
||||
mapOrphanTransactions.count(inv.hash) ||
|
||||
pcoinsTip->HaveCoinInCache(COutPoint(inv.hash, 0)) || // Best effort: only try output 0 and 1
|
||||
pcoinsTip->HaveCoinInCache(COutPoint(inv.hash, 1));
|
||||
}
|
||||
@ -1021,301 +1058,324 @@ static void RelayAddress(const CAddress& addr, bool fReachable, CConnman& connma
|
||||
connman.ForEachNodeThen(std::move(sortfunc), std::move(pushfunc));
|
||||
}
|
||||
|
||||
void static ProcessGetBlockData(CNode* pfrom, const Consensus::Params& consensusParams, const CInv& inv, CConnman& connman, const std::atomic<bool>& interruptMsgProc)
|
||||
{
|
||||
bool send = false;
|
||||
std::shared_ptr<const CBlock> a_recent_block;
|
||||
std::shared_ptr<const CBlockHeaderAndShortTxIDs> a_recent_compact_block;
|
||||
{
|
||||
LOCK(cs_most_recent_block);
|
||||
a_recent_block = most_recent_block;
|
||||
a_recent_compact_block = most_recent_compact_block;
|
||||
}
|
||||
|
||||
bool need_activate_chain = false;
|
||||
{
|
||||
LOCK(cs_main);
|
||||
BlockMap::iterator mi = mapBlockIndex.find(inv.hash);
|
||||
if (mi != mapBlockIndex.end())
|
||||
{
|
||||
if (mi->second->nChainTx && !mi->second->IsValid(BLOCK_VALID_SCRIPTS) &&
|
||||
mi->second->IsValid(BLOCK_VALID_TREE)) {
|
||||
// If we have the block and all of its parents, but have not yet validated it,
|
||||
// we might be in the middle of connecting it (ie in the unlock of cs_main
|
||||
// before ActivateBestChain but after AcceptBlock).
|
||||
// In this case, we need to run ActivateBestChain prior to checking the relay
|
||||
// conditions below.
|
||||
need_activate_chain = true;
|
||||
}
|
||||
}
|
||||
} // release cs_main before calling ActivateBestChain
|
||||
if (need_activate_chain) {
|
||||
CValidationState dummy;
|
||||
ActivateBestChain(dummy, Params(), a_recent_block);
|
||||
}
|
||||
|
||||
LOCK(cs_main);
|
||||
BlockMap::iterator mi = mapBlockIndex.find(inv.hash);
|
||||
if (mi != mapBlockIndex.end()) {
|
||||
send = BlockRequestAllowed(mi->second, consensusParams);
|
||||
if (!send) {
|
||||
LogPrintf("%s: ignoring request from peer=%i for old block that isn't in the main chain\n", __func__, pfrom->GetId());
|
||||
}
|
||||
}
|
||||
const CNetMsgMaker msgMaker(pfrom->GetSendVersion());
|
||||
// disconnect node in case we have reached the outbound limit for serving historical blocks
|
||||
// never disconnect whitelisted nodes
|
||||
if (send && connman.OutboundTargetReached(true) && ( ((pindexBestHeader != NULL) && (pindexBestHeader->GetBlockTime() - mi->second->GetBlockTime() > HISTORICAL_BLOCK_AGE)) || inv.type == MSG_FILTERED_BLOCK) && !pfrom->fWhitelisted)
|
||||
{
|
||||
LogPrint("net", "historical block serving limit reached, disconnect peer=%d\n", pfrom->GetId());
|
||||
|
||||
//disconnect node
|
||||
pfrom->fDisconnect = true;
|
||||
send = false;
|
||||
}
|
||||
// Pruned nodes may have deleted the block, so check whether
|
||||
// it's available before trying to send.
|
||||
if (send && (mi->second->nStatus & BLOCK_HAVE_DATA))
|
||||
{
|
||||
std::shared_ptr<const CBlock> pblock;
|
||||
if (a_recent_block && a_recent_block->GetHash() == (*mi).second->GetBlockHash()) {
|
||||
pblock = a_recent_block;
|
||||
} else {
|
||||
// Send block from disk
|
||||
std::shared_ptr<CBlock> pblockRead = std::make_shared<CBlock>();
|
||||
if (!ReadBlockFromDisk(*pblockRead, (*mi).second, consensusParams))
|
||||
assert(!"cannot load block from disk");
|
||||
pblock = pblockRead;
|
||||
}
|
||||
if (inv.type == MSG_BLOCK)
|
||||
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::BLOCK, *pblock));
|
||||
else if (inv.type == MSG_FILTERED_BLOCK)
|
||||
{
|
||||
bool sendMerkleBlock = false;
|
||||
CMerkleBlock merkleBlock;
|
||||
{
|
||||
LOCK(pfrom->cs_filter);
|
||||
if (pfrom->pfilter) {
|
||||
sendMerkleBlock = true;
|
||||
merkleBlock = CMerkleBlock(*pblock, *pfrom->pfilter);
|
||||
}
|
||||
}
|
||||
if (sendMerkleBlock) {
|
||||
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::MERKLEBLOCK, merkleBlock));
|
||||
// CMerkleBlock just contains hashes, so also push any transactions in the block the client did not see
|
||||
// This avoids hurting performance by pointlessly requiring a round-trip
|
||||
// Note that there is currently no way for a node to request any single transactions we didn't send here -
|
||||
// they must either disconnect and retry or request the full block.
|
||||
// Thus, the protocol spec specified allows for us to provide duplicate txn here,
|
||||
// however we MUST always provide at least what the remote peer needs
|
||||
typedef std::pair<unsigned int, uint256> PairType;
|
||||
BOOST_FOREACH(PairType& pair, merkleBlock.vMatchedTxn)
|
||||
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::TX, *pblock->vtx[pair.first]));
|
||||
}
|
||||
// else
|
||||
// no response
|
||||
}
|
||||
else if (inv.type == MSG_CMPCT_BLOCK)
|
||||
{
|
||||
// If a peer is asking for old blocks, we're almost guaranteed
|
||||
// they won't have a useful mempool to match against a compact block,
|
||||
// and we don't feel like constructing the object for them, so
|
||||
// instead we respond with the full, non-compact block.
|
||||
if (CanDirectFetch(consensusParams) && mi->second->nHeight >= chainActive.Height() - MAX_CMPCTBLOCK_DEPTH) {
|
||||
if (a_recent_compact_block && a_recent_compact_block->header.GetHash() == mi->second->GetBlockHash()) {
|
||||
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::CMPCTBLOCK, *a_recent_compact_block));
|
||||
} else {
|
||||
CBlockHeaderAndShortTxIDs cmpctblock(*pblock);
|
||||
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::CMPCTBLOCK, cmpctblock));
|
||||
}
|
||||
} else {
|
||||
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::BLOCK, *pblock));
|
||||
}
|
||||
}
|
||||
|
||||
// Trigger the peer node to send a getblocks request for the next batch of inventory
|
||||
if (inv.hash == pfrom->hashContinue)
|
||||
{
|
||||
// Bypass PushInventory, this must send even if redundant,
|
||||
// and we want it right after the last block so they don't
|
||||
// wait for other stuff first.
|
||||
std::vector<CInv> vInv;
|
||||
vInv.push_back(CInv(MSG_BLOCK, chainActive.Tip()->GetBlockHash()));
|
||||
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::INV, vInv));
|
||||
pfrom->hashContinue.SetNull();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParams, CConnman& connman, const std::atomic<bool>& interruptMsgProc)
|
||||
{
|
||||
AssertLockNotHeld(cs_main);
|
||||
|
||||
std::deque<CInv>::iterator it = pfrom->vRecvGetData.begin();
|
||||
std::vector<CInv> vNotFound;
|
||||
const CNetMsgMaker msgMaker(pfrom->GetSendVersion());
|
||||
LOCK(cs_main);
|
||||
{
|
||||
LOCK(cs_main);
|
||||
|
||||
while (it != pfrom->vRecvGetData.end()) {
|
||||
// Don't bother if send buffer is too full to respond anyway
|
||||
if (pfrom->fPauseSend)
|
||||
break;
|
||||
|
||||
const CInv &inv = *it;
|
||||
LogPrint("net", "ProcessGetData -- inv = %s\n", inv.ToString());
|
||||
{
|
||||
while (it != pfrom->vRecvGetData.end() && it->IsKnownType()) {
|
||||
if (interruptMsgProc)
|
||||
return;
|
||||
// Don't bother if send buffer is too full to respond anyway
|
||||
if (pfrom->fPauseSend)
|
||||
break;
|
||||
|
||||
const CInv &inv = *it;
|
||||
if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK || inv.type == MSG_CMPCT_BLOCK) {
|
||||
break;
|
||||
}
|
||||
it++;
|
||||
|
||||
if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK || inv.type == MSG_CMPCT_BLOCK)
|
||||
{
|
||||
bool send = false;
|
||||
BlockMap::iterator mi = mapBlockIndex.find(inv.hash);
|
||||
if (mi != mapBlockIndex.end())
|
||||
{
|
||||
if (mi->second->nChainTx && !mi->second->IsValid(BLOCK_VALID_SCRIPTS) &&
|
||||
mi->second->IsValid(BLOCK_VALID_TREE)) {
|
||||
// If we have the block and all of its parents, but have not yet validated it,
|
||||
// we might be in the middle of connecting it (ie in the unlock of cs_main
|
||||
// before ActivateBestChain but after AcceptBlock).
|
||||
// In this case, we need to run ActivateBestChain prior to checking the relay
|
||||
// conditions below.
|
||||
std::shared_ptr<const CBlock> a_recent_block;
|
||||
{
|
||||
LOCK(cs_most_recent_block);
|
||||
a_recent_block = most_recent_block;
|
||||
}
|
||||
CValidationState dummy;
|
||||
ActivateBestChain(dummy, Params(), a_recent_block);
|
||||
}
|
||||
if (chainActive.Contains(mi->second)) {
|
||||
send = true;
|
||||
} else {
|
||||
static const int nOneMonth = 30 * 24 * 60 * 60;
|
||||
// To prevent fingerprinting attacks, only send blocks outside of the active
|
||||
// chain if they are valid, and no more than a month older (both in time, and in
|
||||
// best equivalent proof of work) than the best header chain we know about.
|
||||
send = mi->second->IsValid(BLOCK_VALID_SCRIPTS) && (pindexBestHeader != NULL) &&
|
||||
(pindexBestHeader->GetBlockTime() - mi->second->GetBlockTime() < nOneMonth) &&
|
||||
(GetBlockProofEquivalentTime(*pindexBestHeader, *mi->second, *pindexBestHeader, consensusParams) < nOneMonth);
|
||||
if (!send) {
|
||||
LogPrintf("%s: ignoring request from peer=%i for old block that isn't in the main chain\n", __func__, pfrom->GetId());
|
||||
}
|
||||
}
|
||||
}
|
||||
// disconnect node in case we have reached the outbound limit for serving historical blocks
|
||||
// never disconnect whitelisted nodes
|
||||
static const int nOneWeek = 7 * 24 * 60 * 60; // assume > 1 week = historical
|
||||
if (send && connman.OutboundTargetReached(true) && ( ((pindexBestHeader != NULL) && (pindexBestHeader->GetBlockTime() - mi->second->GetBlockTime() > nOneWeek)) || inv.type == MSG_FILTERED_BLOCK) && !pfrom->fWhitelisted)
|
||||
{
|
||||
LogPrint("net", "historical block serving limit reached, disconnect peer=%d\n", pfrom->GetId());
|
||||
|
||||
//disconnect node
|
||||
pfrom->fDisconnect = true;
|
||||
send = false;
|
||||
}
|
||||
// Pruned nodes may have deleted the block, so check whether
|
||||
// it's available before trying to send.
|
||||
if (send && (mi->second->nStatus & BLOCK_HAVE_DATA)) {
|
||||
// Send block from disk
|
||||
CBlock block;
|
||||
if (!ReadBlockFromDisk(block, (*mi).second, consensusParams))
|
||||
assert(!"cannot load block from disk");
|
||||
if (inv.type == MSG_BLOCK)
|
||||
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::BLOCK, block));
|
||||
else if (inv.type == MSG_FILTERED_BLOCK)
|
||||
{
|
||||
bool sendMerkleBlock = false;
|
||||
CMerkleBlock merkleBlock;
|
||||
{
|
||||
LOCK(pfrom->cs_filter);
|
||||
if (pfrom->pfilter) {
|
||||
sendMerkleBlock = true;
|
||||
merkleBlock = CMerkleBlock(block, *pfrom->pfilter);
|
||||
}
|
||||
}
|
||||
if (sendMerkleBlock) {
|
||||
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::MERKLEBLOCK, merkleBlock));
|
||||
// CMerkleBlock just contains hashes, so also push any transactions in the block the client did not see
|
||||
// This avoids hurting performance by pointlessly requiring a round-trip
|
||||
// Note that there is currently no way for a node to request any single transactions we didn't send here -
|
||||
// they must either disconnect and retry or request the full block.
|
||||
// Thus, the protocol spec specified allows for us to provide duplicate txn here,
|
||||
// however we MUST always provide at least what the remote peer needs
|
||||
typedef std::pair<unsigned int, uint256> PairType;
|
||||
BOOST_FOREACH(PairType& pair, merkleBlock.vMatchedTxn)
|
||||
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::TX, *block.vtx[pair.first]));
|
||||
}
|
||||
// else
|
||||
// no response
|
||||
}
|
||||
else if (inv.type == MSG_CMPCT_BLOCK)
|
||||
{
|
||||
// If a peer is asking for old blocks, we're almost guaranteed
|
||||
// they won't have a useful mempool to match against a compact block,
|
||||
// and we don't feel like constructing the object for them, so
|
||||
// instead we respond with the full, non-compact block.
|
||||
if (CanDirectFetch(consensusParams) && mi->second->nHeight >= chainActive.Height() - MAX_CMPCTBLOCK_DEPTH) {
|
||||
CBlockHeaderAndShortTxIDs cmpctblock(block);
|
||||
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::CMPCTBLOCK, cmpctblock));
|
||||
} else
|
||||
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::BLOCK, block));
|
||||
}
|
||||
|
||||
// Trigger the peer node to send a getblocks request for the next batch of inventory
|
||||
if (inv.hash == pfrom->hashContinue)
|
||||
{
|
||||
// Bypass PushInventory, this must send even if redundant,
|
||||
// and we want it right after the last block so they don't
|
||||
// wait for other stuff first.
|
||||
std::vector<CInv> vInv;
|
||||
vInv.push_back(CInv(MSG_BLOCK, chainActive.Tip()->GetBlockHash()));
|
||||
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::INV, vInv));
|
||||
pfrom->hashContinue.SetNull();
|
||||
// Send stream from relay memory
|
||||
bool push = false;
|
||||
// Only serve MSG_TX from mapRelay.
|
||||
// Otherwise we may send out a normal TX instead of a IX
|
||||
if (inv.type == MSG_TX) {
|
||||
auto mi = mapRelay.find(inv.hash);
|
||||
if (mi != mapRelay.end()) {
|
||||
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::TX, *mi->second));
|
||||
push = true;
|
||||
} else if (pfrom->timeLastMempoolReq) {
|
||||
auto txinfo = mempool.info(inv.hash);
|
||||
// To protect privacy, do not answer getdata using the mempool when
|
||||
// that TX couldn't have been INVed in reply to a MEMPOOL request.
|
||||
if (txinfo.tx && txinfo.nTime <= pfrom->timeLastMempoolReq) {
|
||||
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::TX, *txinfo.tx));
|
||||
push = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (inv.IsKnownType())
|
||||
{
|
||||
// Send stream from relay memory
|
||||
bool push = false;
|
||||
// Only serve MSG_TX from mapRelay.
|
||||
// Otherwise we may send out a normal TX instead of a IX
|
||||
if (inv.type == MSG_TX) {
|
||||
auto mi = mapRelay.find(inv.hash);
|
||||
if (mi != mapRelay.end()) {
|
||||
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::TX, *mi->second));
|
||||
push = true;
|
||||
} else if (pfrom->timeLastMempoolReq) {
|
||||
auto txinfo = mempool.info(inv.hash);
|
||||
// To protect privacy, do not answer getdata using the mempool when
|
||||
// that TX couldn't have been INVed in reply to a MEMPOOL request.
|
||||
if (txinfo.tx && txinfo.nTime <= pfrom->timeLastMempoolReq) {
|
||||
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::TX, *txinfo.tx));
|
||||
push = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!push && inv.type == MSG_TXLOCK_REQUEST) {
|
||||
CTxLockRequest txLockRequest;
|
||||
if(instantsend.GetTxLockRequest(inv.hash, txLockRequest)) {
|
||||
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::TXLOCKREQUEST, txLockRequest));
|
||||
push = true;
|
||||
}
|
||||
if (!push && inv.type == MSG_TXLOCK_REQUEST) {
|
||||
CTxLockRequest txLockRequest;
|
||||
if(instantsend.GetTxLockRequest(inv.hash, txLockRequest)) {
|
||||
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::TXLOCKREQUEST, txLockRequest));
|
||||
push = true;
|
||||
}
|
||||
|
||||
if (!push && inv.type == MSG_TXLOCK_VOTE) {
|
||||
CTxLockVote vote;
|
||||
if(instantsend.GetTxLockVote(inv.hash, vote)) {
|
||||
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::TXLOCKVOTE, vote));
|
||||
push = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!push && inv.type == MSG_SPORK) {
|
||||
CSporkMessage spork;
|
||||
if(sporkManager.GetSporkByHash(inv.hash, spork)) {
|
||||
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::SPORK, spork));
|
||||
push = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!push && inv.type == MSG_DSTX) {
|
||||
CPrivateSendBroadcastTx dstx = CPrivateSend::GetDSTX(inv.hash);
|
||||
if(dstx) {
|
||||
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::DSTX, dstx));
|
||||
push = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!push && inv.type == MSG_GOVERNANCE_OBJECT) {
|
||||
LogPrint("net", "ProcessGetData -- MSG_GOVERNANCE_OBJECT: inv = %s\n", inv.ToString());
|
||||
CDataStream ss(SER_NETWORK, pfrom->GetSendVersion());
|
||||
bool topush = false;
|
||||
{
|
||||
if(governance.HaveObjectForHash(inv.hash)) {
|
||||
ss.reserve(1000);
|
||||
if(governance.SerializeObjectForHash(inv.hash, ss)) {
|
||||
topush = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
LogPrint("net", "ProcessGetData -- MSG_GOVERNANCE_OBJECT: topush = %d, inv = %s\n", topush, inv.ToString());
|
||||
if(topush) {
|
||||
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::MNGOVERNANCEOBJECT, ss));
|
||||
push = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!push && inv.type == MSG_GOVERNANCE_OBJECT_VOTE) {
|
||||
CDataStream ss(SER_NETWORK, pfrom->GetSendVersion());
|
||||
bool topush = false;
|
||||
{
|
||||
if(governance.HaveVoteForHash(inv.hash)) {
|
||||
ss.reserve(1000);
|
||||
if(governance.SerializeVoteForHash(inv.hash, ss)) {
|
||||
topush = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
if(topush) {
|
||||
LogPrint("net", "ProcessGetData -- pushing: inv = %s\n", inv.ToString());
|
||||
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::MNGOVERNANCEOBJECTVOTE, ss));
|
||||
push = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!push && (inv.type == MSG_QUORUM_FINAL_COMMITMENT)) {
|
||||
llmq::CFinalCommitment o;
|
||||
if (llmq::quorumBlockProcessor->GetMinableCommitmentByHash(inv.hash, o)) {
|
||||
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::QFCOMMITMENT, o));
|
||||
push = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!push && (inv.type == MSG_QUORUM_CONTRIB)) {
|
||||
llmq::CDKGContribution o;
|
||||
if (llmq::quorumDKGSessionManager->GetContribution(inv.hash, o)) {
|
||||
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::QCONTRIB, o));
|
||||
push = true;
|
||||
}
|
||||
}
|
||||
if (!push && (inv.type == MSG_QUORUM_COMPLAINT)) {
|
||||
llmq::CDKGComplaint o;
|
||||
if (llmq::quorumDKGSessionManager->GetComplaint(inv.hash, o)) {
|
||||
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::QCOMPLAINT, o));
|
||||
push = true;
|
||||
}
|
||||
}
|
||||
if (!push && (inv.type == MSG_QUORUM_JUSTIFICATION)) {
|
||||
llmq::CDKGJustification o;
|
||||
if (llmq::quorumDKGSessionManager->GetJustification(inv.hash, o)) {
|
||||
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::QJUSTIFICATION, o));
|
||||
push = true;
|
||||
}
|
||||
}
|
||||
if (!push && (inv.type == MSG_QUORUM_PREMATURE_COMMITMENT)) {
|
||||
llmq::CDKGPrematureCommitment o;
|
||||
if (llmq::quorumDKGSessionManager->GetPrematureCommitment(inv.hash, o)) {
|
||||
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::QPCOMMITMENT, o));
|
||||
push = true;
|
||||
}
|
||||
}
|
||||
if (!push && (inv.type == MSG_QUORUM_DEBUG_STATUS)) {
|
||||
llmq::CDKGDebugStatus o;
|
||||
if (llmq::quorumDKGDebugManager->GetDebugStatus(inv.hash, o)) {
|
||||
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::QDEBUGSTATUS, o));
|
||||
push = true;
|
||||
}
|
||||
}
|
||||
if (!push && (inv.type == MSG_QUORUM_RECOVERED_SIG)) {
|
||||
llmq::CRecoveredSig o;
|
||||
if (llmq::quorumSigningManager->GetRecoveredSigForGetData(inv.hash, o)) {
|
||||
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::QSIGREC, o));
|
||||
push = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!push && (inv.type == MSG_CLSIG)) {
|
||||
llmq::CChainLockSig o;
|
||||
if (llmq::chainLocksHandler->GetChainLockByHash(inv.hash, o)) {
|
||||
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::CLSIG, o));
|
||||
push = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!push && (inv.type == MSG_ISLOCK)) {
|
||||
llmq::CInstantSendLock o;
|
||||
if (llmq::quorumInstantSendManager->GetInstantSendLockByHash(inv.hash, o)) {
|
||||
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::ISLOCK, o));
|
||||
push = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!push)
|
||||
vNotFound.push_back(inv);
|
||||
}
|
||||
|
||||
if (!push && inv.type == MSG_TXLOCK_VOTE) {
|
||||
CTxLockVote vote;
|
||||
if(instantsend.GetTxLockVote(inv.hash, vote)) {
|
||||
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::TXLOCKVOTE, vote));
|
||||
push = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!push && inv.type == MSG_SPORK) {
|
||||
CSporkMessage spork;
|
||||
if(sporkManager.GetSporkByHash(inv.hash, spork)) {
|
||||
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::SPORK, spork));
|
||||
push = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!push && inv.type == MSG_DSTX) {
|
||||
CPrivateSendBroadcastTx dstx = CPrivateSend::GetDSTX(inv.hash);
|
||||
if(dstx) {
|
||||
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::DSTX, dstx));
|
||||
push = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!push && inv.type == MSG_GOVERNANCE_OBJECT) {
|
||||
LogPrint("net", "ProcessGetData -- MSG_GOVERNANCE_OBJECT: inv = %s\n", inv.ToString());
|
||||
CDataStream ss(SER_NETWORK, pfrom->GetSendVersion());
|
||||
bool topush = false;
|
||||
{
|
||||
if(governance.HaveObjectForHash(inv.hash)) {
|
||||
ss.reserve(1000);
|
||||
if(governance.SerializeObjectForHash(inv.hash, ss)) {
|
||||
topush = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
LogPrint("net", "ProcessGetData -- MSG_GOVERNANCE_OBJECT: topush = %d, inv = %s\n", topush, inv.ToString());
|
||||
if(topush) {
|
||||
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::MNGOVERNANCEOBJECT, ss));
|
||||
push = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!push && inv.type == MSG_GOVERNANCE_OBJECT_VOTE) {
|
||||
CDataStream ss(SER_NETWORK, pfrom->GetSendVersion());
|
||||
bool topush = false;
|
||||
{
|
||||
if(governance.HaveVoteForHash(inv.hash)) {
|
||||
ss.reserve(1000);
|
||||
if(governance.SerializeVoteForHash(inv.hash, ss)) {
|
||||
topush = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
if(topush) {
|
||||
LogPrint("net", "ProcessGetData -- pushing: inv = %s\n", inv.ToString());
|
||||
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::MNGOVERNANCEOBJECTVOTE, ss));
|
||||
push = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!push && (inv.type == MSG_QUORUM_FINAL_COMMITMENT)) {
|
||||
llmq::CFinalCommitment o;
|
||||
if (llmq::quorumBlockProcessor->GetMinableCommitmentByHash(inv.hash, o)) {
|
||||
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::QFCOMMITMENT, o));
|
||||
push = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!push && (inv.type == MSG_QUORUM_CONTRIB)) {
|
||||
llmq::CDKGContribution o;
|
||||
if (llmq::quorumDKGSessionManager->GetContribution(inv.hash, o)) {
|
||||
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::QCONTRIB, o));
|
||||
push = true;
|
||||
}
|
||||
}
|
||||
if (!push && (inv.type == MSG_QUORUM_COMPLAINT)) {
|
||||
llmq::CDKGComplaint o;
|
||||
if (llmq::quorumDKGSessionManager->GetComplaint(inv.hash, o)) {
|
||||
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::QCOMPLAINT, o));
|
||||
push = true;
|
||||
}
|
||||
}
|
||||
if (!push && (inv.type == MSG_QUORUM_JUSTIFICATION)) {
|
||||
llmq::CDKGJustification o;
|
||||
if (llmq::quorumDKGSessionManager->GetJustification(inv.hash, o)) {
|
||||
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::QJUSTIFICATION, o));
|
||||
push = true;
|
||||
}
|
||||
}
|
||||
if (!push && (inv.type == MSG_QUORUM_PREMATURE_COMMITMENT)) {
|
||||
llmq::CDKGPrematureCommitment o;
|
||||
if (llmq::quorumDKGSessionManager->GetPrematureCommitment(inv.hash, o)) {
|
||||
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::QPCOMMITMENT, o));
|
||||
push = true;
|
||||
}
|
||||
}
|
||||
if (!push && (inv.type == MSG_QUORUM_DEBUG_STATUS)) {
|
||||
llmq::CDKGDebugStatus o;
|
||||
if (llmq::quorumDKGDebugManager->GetDebugStatus(inv.hash, o)) {
|
||||
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::QDEBUGSTATUS, o));
|
||||
push = true;
|
||||
}
|
||||
}
|
||||
if (!push && (inv.type == MSG_QUORUM_RECOVERED_SIG)) {
|
||||
llmq::CRecoveredSig o;
|
||||
if (llmq::quorumSigningManager->GetRecoveredSigForGetData(inv.hash, o)) {
|
||||
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::QSIGREC, o));
|
||||
push = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!push && (inv.type == MSG_CLSIG)) {
|
||||
llmq::CChainLockSig o;
|
||||
if (llmq::chainLocksHandler->GetChainLockByHash(inv.hash, o)) {
|
||||
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::CLSIG, o));
|
||||
push = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!push && (inv.type == MSG_ISLOCK)) {
|
||||
llmq::CInstantSendLock o;
|
||||
if (llmq::quorumInstantSendManager->GetInstantSendLockByHash(inv.hash, o)) {
|
||||
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::ISLOCK, o));
|
||||
push = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!push)
|
||||
vNotFound.push_back(inv);
|
||||
|
||||
// Track requests for our stuff.
|
||||
GetMainSignals().Inventory(inv.hash);
|
||||
}
|
||||
} // release cs_main
|
||||
|
||||
if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK || inv.type == MSG_CMPCT_BLOCK)
|
||||
break;
|
||||
if (it != pfrom->vRecvGetData.end()) {
|
||||
const CInv &inv = *it;
|
||||
it++;
|
||||
if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK || inv.type == MSG_CMPCT_BLOCK) {
|
||||
ProcessGetBlockData(pfrom, consensusParams, inv, connman, interruptMsgProc);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1924,7 +1984,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
|
||||
inv.type = MSG_BLOCK;
|
||||
inv.hash = req.blockhash;
|
||||
pfrom->vRecvGetData.push_back(inv);
|
||||
ProcessGetData(pfrom, chainparams.GetConsensus(), connman, interruptMsgProc);
|
||||
// The message processing loop will go around again (without pausing) and we'll respond then (without cs_main)
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1957,6 +2017,11 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
|
||||
if (mi == mapBlockIndex.end())
|
||||
return true;
|
||||
pindex = (*mi).second;
|
||||
|
||||
if (!BlockRequestAllowed(pindex, chainparams.GetConsensus())) {
|
||||
LogPrintf("%s: ignoring request from peer=%i for old block header that isn't in the main chain\n", __func__, pfrom->GetId());
|
||||
return true;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -2081,7 +2146,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
|
||||
mmetaman.DisallowMixing(dmn->proTxHash);
|
||||
}
|
||||
|
||||
LOCK(cs_main);
|
||||
LOCK2(cs_main, g_cs_orphans);
|
||||
|
||||
bool fMissingInputs = false;
|
||||
CValidationState state;
|
||||
@ -2308,7 +2373,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
|
||||
bool fBlockReconstructed = false;
|
||||
|
||||
{
|
||||
LOCK(cs_main);
|
||||
LOCK2(cs_main, g_cs_orphans);
|
||||
// If AcceptBlockHeader returned true, it set pindex
|
||||
assert(pindex);
|
||||
UpdateBlockAvailability(pfrom->GetId(), pindex->GetBlockHash());
|
||||
@ -3348,8 +3413,7 @@ bool SendMessages(CNode* pto, CConnman& connman, const std::atomic<bool>& interr
|
||||
{
|
||||
LOCK(cs_most_recent_block);
|
||||
if (most_recent_block_hash == pBestIndex->GetBlockHash()) {
|
||||
CBlockHeaderAndShortTxIDs cmpctblock(*most_recent_block);
|
||||
connman.PushMessage(pto, msgMaker.Make(NetMsgType::CMPCTBLOCK, cmpctblock));
|
||||
connman.PushMessage(pto, msgMaker.Make(NetMsgType::CMPCTBLOCK, *most_recent_compact_block));
|
||||
fGotBlockFromCache = true;
|
||||
}
|
||||
}
|
||||
|
10
src/sync.cpp
10
src/sync.cpp
@ -162,6 +162,16 @@ void AssertLockHeldInternal(const char* pszName, const char* pszFile, int nLine,
|
||||
abort();
|
||||
}
|
||||
|
||||
void AssertLockNotHeldInternal(const char* pszName, const char* pszFile, int nLine, void* cs)
|
||||
{
|
||||
for (const std::pair<void*, CLockLocation>& i : *lockstack) {
|
||||
if (i.first == cs) {
|
||||
fprintf(stderr, "Assertion failed: lock %s held in %s:%i; locks held:\n%s", pszName, pszFile, nLine, LocksHeld().c_str());
|
||||
abort();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void DeleteLock(void* cs)
|
||||
{
|
||||
if (!lockdata.available) {
|
||||
|
@ -76,14 +76,17 @@ void EnterCritical(const char* pszName, const char* pszFile, int nLine, void* cs
|
||||
void LeaveCritical();
|
||||
std::string LocksHeld();
|
||||
void AssertLockHeldInternal(const char* pszName, const char* pszFile, int nLine, void* cs);
|
||||
void AssertLockNotHeldInternal(const char* pszName, const char* pszFile, int nLine, void* cs);
|
||||
void DeleteLock(void* cs);
|
||||
#else
|
||||
void static inline EnterCritical(const char* pszName, const char* pszFile, int nLine, void* cs, bool fTry = false) {}
|
||||
void static inline LeaveCritical() {}
|
||||
void static inline AssertLockHeldInternal(const char* pszName, const char* pszFile, int nLine, void* cs) {}
|
||||
void static inline AssertLockNotHeldInternal(const char* pszName, const char* pszFile, int nLine, void* cs) {}
|
||||
void static inline DeleteLock(void* cs) {}
|
||||
#endif
|
||||
#define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs)
|
||||
#define AssertLockNotHeld(cs) AssertLockNotHeldInternal(#cs, __FILE__, __LINE__, &cs)
|
||||
|
||||
/**
|
||||
* Wrapped boost mutex: supports recursive locking, but no waiting
|
||||
|
@ -210,7 +210,6 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity)
|
||||
entry.dPriority = 111.0;
|
||||
entry.nHeight = 11;
|
||||
|
||||
LOCK(cs_main);
|
||||
fCheckpointsEnabled = false;
|
||||
|
||||
// Simple block creation, nothing special yet:
|
||||
@ -224,28 +223,30 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity)
|
||||
auto createAndProcessEmptyBlock = [&]() {
|
||||
int i = chainActive.Height();
|
||||
CBlock *pblock = &pemptyblocktemplate->block; // pointer for convenience
|
||||
pblock->nVersion = 2;
|
||||
pblock->nTime = chainActive.Tip()->GetMedianTimePast()+1;
|
||||
CMutableTransaction txCoinbase(*pblock->vtx[0]);
|
||||
txCoinbase.nVersion = 1;
|
||||
txCoinbase.vin[0].scriptSig = CScript() << (chainActive.Height() + 1);
|
||||
txCoinbase.vin[0].scriptSig.push_back(blockinfo[i].extranonce);
|
||||
txCoinbase.vin[0].scriptSig.push_back(chainActive.Height());
|
||||
txCoinbase.vout[0].scriptPubKey = CScript();
|
||||
pblock->vtx[0] = MakeTransactionRef(std::move(txCoinbase));
|
||||
if (txFirst.size() == 0)
|
||||
baseheight = chainActive.Height();
|
||||
if (txFirst.size() < 4)
|
||||
txFirst.push_back(pblock->vtx[0]);
|
||||
pblock->hashMerkleRoot = BlockMerkleRoot(*pblock);
|
||||
pblock->nNonce = blockinfo[i].nonce;
|
||||
{
|
||||
LOCK(cs_main);
|
||||
pblock->nVersion = 2;
|
||||
pblock->nTime = chainActive.Tip()->GetMedianTimePast()+1;
|
||||
CMutableTransaction txCoinbase(*pblock->vtx[0]);
|
||||
txCoinbase.nVersion = 1;
|
||||
txCoinbase.vin[0].scriptSig = CScript() << (chainActive.Height() + 1);
|
||||
txCoinbase.vin[0].scriptSig.push_back(blockinfo[i].extranonce);
|
||||
txCoinbase.vin[0].scriptSig.push_back(chainActive.Height());
|
||||
txCoinbase.vout[0].scriptPubKey = CScript();
|
||||
pblock->vtx[0] = MakeTransactionRef(std::move(txCoinbase));
|
||||
if (txFirst.size() == 0)
|
||||
baseheight = chainActive.Height();
|
||||
if (txFirst.size() < 4)
|
||||
txFirst.push_back(pblock->vtx[0]);
|
||||
pblock->hashMerkleRoot = BlockMerkleRoot(*pblock);
|
||||
pblock->nNonce = blockinfo[i].nonce;
|
||||
|
||||
// This will usually succeed in the first round as we take the nonce from blockinfo
|
||||
// It's however usefull when adding new blocks with unknown nonces (you should add the found block to blockinfo)
|
||||
while (!CheckProofOfWork(pblock->GetHash(), pblock->nBits, chainparams.GetConsensus())) {
|
||||
pblock->nNonce++;
|
||||
// This will usually succeed in the first round as we take the nonce from blockinfo
|
||||
// It's however usefull when adding new blocks with unknown nonces (you should add the found block to blockinfo)
|
||||
while (!CheckProofOfWork(pblock->GetHash(), pblock->nBits, chainparams.GetConsensus())) {
|
||||
pblock->nNonce++;
|
||||
}
|
||||
}
|
||||
|
||||
std::shared_ptr<const CBlock> shared_pblock = std::make_shared<const CBlock>(*pblock);
|
||||
BOOST_CHECK(ProcessNewBlock(chainparams, shared_pblock, true, NULL));
|
||||
pblock->hashPrevBlock = pblock->GetHash();
|
||||
@ -256,6 +257,9 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity)
|
||||
createAndProcessEmptyBlock();
|
||||
}
|
||||
|
||||
{
|
||||
LOCK(cs_main);
|
||||
|
||||
// Just to make sure we can still make simple blocks
|
||||
BOOST_CHECK(pblocktemplate = AssemblerForTest(chainparams).CreateNewBlock(scriptPubKey));
|
||||
|
||||
@ -504,9 +508,13 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity)
|
||||
for (int i = 0; i < CBlockIndex::nMedianTimeSpan; i++)
|
||||
chainActive.Tip()->GetAncestor(chainActive.Tip()->nHeight - i)->nTime += 512; //Trick the MedianTimePast
|
||||
|
||||
} // unlock cs_main while calling createAndProcessEmptyBlock
|
||||
|
||||
// Mine an empty block
|
||||
createAndProcessEmptyBlock();
|
||||
|
||||
LOCK(cs_main);
|
||||
|
||||
SetMockTime(chainActive.Tip()->GetMedianTimePast() + 1);
|
||||
|
||||
BOOST_CHECK(pblocktemplate = AssemblerForTest(chainparams).CreateNewBlock(scriptPubKey));
|
||||
|
@ -2886,6 +2886,7 @@ bool ActivateBestChain(CValidationState &state, const CChainParams& chainparams,
|
||||
// far from a guarantee. Things in the P2P/RPC will often end up calling
|
||||
// us in the middle of ProcessNewBlock - do not assume pblock is set
|
||||
// sanely for performance or correctness!
|
||||
AssertLockNotHeld(cs_main);
|
||||
|
||||
CBlockIndex *pindexMostWork = NULL;
|
||||
CBlockIndex *pindexNewTip = NULL;
|
||||
@ -3599,6 +3600,8 @@ static bool AcceptBlock(const std::shared_ptr<const CBlock>& pblock, CValidation
|
||||
|
||||
bool ProcessNewBlock(const CChainParams& chainparams, const std::shared_ptr<const CBlock> pblock, bool fForceProcessing, bool *fNewBlock)
|
||||
{
|
||||
AssertLockNotHeld(cs_main);
|
||||
|
||||
{
|
||||
CBlockIndex *pindex = NULL;
|
||||
if (fNewBlock) *fNewBlock = false;
|
||||
|
@ -363,14 +363,14 @@ BOOST_AUTO_TEST_CASE(ApproximateBestSubset)
|
||||
|
||||
BOOST_FIXTURE_TEST_CASE(rescan, TestChain100Setup)
|
||||
{
|
||||
LOCK(cs_main);
|
||||
|
||||
// Cap last block file size, and mine new block in a new block file.
|
||||
CBlockIndex* oldTip = chainActive.Tip();
|
||||
GetBlockFileInfo(oldTip->GetBlockPos().nFile)->nSize = MAX_BLOCKFILE_SIZE;
|
||||
CreateAndProcessBlock({}, GetScriptForRawPubKey(coinbaseKey.GetPubKey()));
|
||||
CBlockIndex* newTip = chainActive.Tip();
|
||||
|
||||
LOCK(cs_main);
|
||||
|
||||
// Verify ScanForWalletTransactions picks up transactions in both the old
|
||||
// and new block files.
|
||||
{
|
||||
@ -435,8 +435,6 @@ BOOST_FIXTURE_TEST_CASE(rescan, TestChain100Setup)
|
||||
BOOST_FIXTURE_TEST_CASE(importwallet_rescan, TestChain100Setup)
|
||||
{
|
||||
CWallet *pwalletMainBackup = ::pwalletMain;
|
||||
LOCK(cs_main);
|
||||
|
||||
// Create two blocks with same timestamp to verify that importwallet rescan
|
||||
// will pick up both blocks, not just the first.
|
||||
const int64_t BLOCK_TIME = chainActive.Tip()->GetBlockTimeMax() + 5;
|
||||
@ -450,6 +448,8 @@ BOOST_FIXTURE_TEST_CASE(importwallet_rescan, TestChain100Setup)
|
||||
SetMockTime(KEY_TIME);
|
||||
coinbaseTxns.emplace_back(*CreateAndProcessBlock({}, GetScriptForRawPubKey(coinbaseKey.GetPubKey())).vtx[0]);
|
||||
|
||||
LOCK(cs_main);
|
||||
|
||||
// Import key into wallet and call dumpwallet to create backup file.
|
||||
{
|
||||
CWallet wallet;
|
||||
|
Loading…
Reference in New Issue
Block a user