Merge pull request #1911 from codablock/pr_backports_from_cmptblk

Backports from compact block related Bitcoin PRs
This commit is contained in:
UdjinM6 2018-02-09 13:08:03 +03:00 committed by GitHub
commit 6825b347f2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 200 additions and 182 deletions

View File

@ -399,7 +399,7 @@ class FullBlockTest(ComparisonTestFramework):
# Extend the b26 chain to make sure bitcoind isn't accepting b26
b27 = block(27, spend=out[7])
yield rejected(RejectResult(16, b'bad-prevblk'))
yield rejected(RejectResult(0, b'bad-prevblk'))
# Now try a too-large-coinbase script
tip(15)
@ -411,7 +411,7 @@ class FullBlockTest(ComparisonTestFramework):
# Extend the b28 chain to make sure bitcoind isn't accepting b28
b29 = block(29, spend=out[7])
yield rejected(RejectResult(16, b'bad-prevblk'))
yield rejected(RejectResult(0, b'bad-prevblk'))
# b30 has a max-sized coinbase scriptSig.
tip(23)

View File

@ -102,7 +102,7 @@ class PreciousTest(BitcoinTestFramework):
assert_equal(self.nodes[2].getblockcount(), 6)
hashL = self.nodes[2].getbestblockhash()
print("Connect nodes and check no reorg occurs")
node_sync_via_rpc(self.nodes[0:3])
node_sync_via_rpc(self.nodes[1:3])
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
assert_equal(self.nodes[0].getbestblockhash(), hashH)

View File

@ -80,20 +80,19 @@ e. Announce one more that doesn't connect.
Expect: disconnect.
'''
class BaseNode(NodeConnCB):
direct_fetch_response_time = 0.05
class BaseNode(SingleNodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
SingleNodeConnCB.__init__(self)
self.last_inv = None
self.last_headers = None
self.last_block = None
self.ping_counter = 1
self.last_pong = msg_pong(0)
self.last_getdata = None
self.sleep_time = 0.05
self.block_announced = False
self.last_getheaders = None
self.disconnected = False
self.last_blockhash_announced = None
def clear_last_announcement(self):
with mininode_lock:
@ -101,9 +100,6 @@ class BaseNode(NodeConnCB):
self.last_inv = None
self.last_headers = None
def add_connection(self, conn):
self.connection = conn
# Request data for a list of block hashes
def get_data(self, block_hashes):
msg = msg_getdata()
@ -122,17 +118,17 @@ class BaseNode(NodeConnCB):
msg.inv = [CInv(2, blockhash)]
self.connection.send_message(msg)
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def on_inv(self, conn, message):
self.last_inv = message
self.block_announced = True
self.last_blockhash_announced = message.inv[-1].hash
def on_headers(self, conn, message):
self.last_headers = message
self.block_announced = True
if len(message.headers):
self.block_announced = True
message.headers[-1].calc_sha256()
self.last_blockhash_announced = message.headers[-1].sha256
def on_block(self, conn, message):
self.last_block = message.block
@ -141,9 +137,6 @@ class BaseNode(NodeConnCB):
def on_getdata(self, conn, message):
self.last_getdata = message
def on_pong(self, conn, message):
self.last_pong = message
def on_getheaders(self, conn, message):
self.last_getheaders = message
@ -157,7 +150,7 @@ class BaseNode(NodeConnCB):
expect_headers = headers if headers != None else []
expect_inv = inv if inv != None else []
test_function = lambda: self.block_announced
self.sync(test_function)
assert(wait_until(test_function, timeout=60))
with mininode_lock:
self.block_announced = False
@ -180,30 +173,14 @@ class BaseNode(NodeConnCB):
return success
# Syncing helpers
def sync(self, test_function, timeout=60):
while timeout > 0:
with mininode_lock:
if test_function():
return
time.sleep(self.sleep_time)
timeout -= self.sleep_time
raise AssertionError("Sync failed to complete")
def sync_with_ping(self, timeout=60):
self.send_message(msg_ping(nonce=self.ping_counter))
test_function = lambda: self.last_pong.nonce == self.ping_counter
self.sync(test_function, timeout)
self.ping_counter += 1
return
def wait_for_block(self, blockhash, timeout=60):
test_function = lambda: self.last_block != None and self.last_block.sha256 == blockhash
self.sync(test_function, timeout)
assert(wait_until(test_function, timeout=timeout))
return
def wait_for_getheaders(self, timeout=60):
test_function = lambda: self.last_getheaders != None
self.sync(test_function, timeout)
assert(wait_until(test_function, timeout=timeout))
return
def wait_for_getdata(self, hash_list, timeout=60):
@ -211,12 +188,17 @@ class BaseNode(NodeConnCB):
return
test_function = lambda: self.last_getdata != None and [x.hash for x in self.last_getdata.inv] == hash_list
self.sync(test_function, timeout)
assert(wait_until(test_function, timeout=timeout))
return
def wait_for_disconnect(self, timeout=60):
test_function = lambda: self.disconnected
self.sync(test_function, timeout)
assert(wait_until(test_function, timeout=timeout))
return
def wait_for_block_announcement(self, block_hash, timeout=60):
test_function = lambda: self.last_blockhash_announced == block_hash
assert(wait_until(test_function, timeout=timeout))
return
def send_header_for_blocks(self, new_blocks):
@ -266,7 +248,9 @@ class SendHeadersTest(BitcoinTestFramework):
def mine_reorg(self, length):
self.nodes[0].generate(length) # make sure all invalidated blocks are node0's
sync_blocks(self.nodes, wait=0.1)
[x.clear_last_announcement() for x in self.p2p_connections]
for x in self.p2p_connections:
x.wait_for_block_announcement(int(self.nodes[0].getbestblockhash(), 16))
x.clear_last_announcement()
tip_height = self.nodes[1].getblockcount()
hash_to_invalidate = self.nodes[1].getblockhash(tip_height-(length-1))
@ -494,7 +478,7 @@ class SendHeadersTest(BitcoinTestFramework):
test_node.send_header_for_blocks(blocks)
test_node.sync_with_ping()
test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=test_node.sleep_time)
test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=direct_fetch_response_time)
[ test_node.send_message(msg_block(x)) for x in blocks ]
@ -525,13 +509,13 @@ class SendHeadersTest(BitcoinTestFramework):
# both blocks (same work as tip)
test_node.send_header_for_blocks(blocks[1:2])
test_node.sync_with_ping()
test_node.wait_for_getdata([x.sha256 for x in blocks[0:2]], timeout=test_node.sleep_time)
test_node.wait_for_getdata([x.sha256 for x in blocks[0:2]], timeout=direct_fetch_response_time)
# Announcing 16 more headers should trigger direct fetch for 14 more
# blocks
test_node.send_header_for_blocks(blocks[2:18])
test_node.sync_with_ping()
test_node.wait_for_getdata([x.sha256 for x in blocks[2:16]], timeout=test_node.sleep_time)
test_node.wait_for_getdata([x.sha256 for x in blocks[2:16]], timeout=direct_fetch_response_time)
# Announcing 1 more header should not trigger any response
test_node.last_getdata = None

View File

@ -77,7 +77,19 @@ def hash256(s):
def dashhash(s):
return dash_hash.getPoWHash(s)
def deser_string(f):
def ser_compact_size(l):
r = b""
if l < 253:
r = struct.pack("B", l)
elif l < 0x10000:
r = struct.pack("<BH", 253, l)
elif l < 0x100000000:
r = struct.pack("<BI", 254, l)
else:
r = struct.pack("<BQ", 255, l)
return r
def deser_compact_size(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
@ -85,16 +97,14 @@ def deser_string(f):
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
return nit
def deser_string(f):
nit = deser_compact_size(f)
return f.read(nit)
def ser_string(s):
if len(s) < 253:
return struct.pack("B", len(s)) + s
elif len(s) < 0x10000:
return struct.pack("<BH", 253, len(s)) + s
elif len(s) < 0x100000000:
return struct.pack("<BI", 254, len(s)) + s
return struct.pack("<BQ", 255, len(s)) + s
return ser_compact_size(len(s)) + s
def deser_uint256(f):
r = 0
@ -127,13 +137,7 @@ def uint256_from_compact(c):
def deser_vector(f, c):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = c()
@ -143,28 +147,14 @@ def deser_vector(f, c):
def ser_vector(l):
r = b""
if len(l) < 253:
r = struct.pack("B", len(l))
elif len(l) < 0x10000:
r = struct.pack("<BH", 253, len(l))
elif len(l) < 0x100000000:
r = struct.pack("<BI", 254, len(l))
else:
r = struct.pack("<BQ", 255, len(l))
r = ser_compact_size(len(l))
for i in l:
r += i.serialize()
return r
def deser_uint256_vector(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_uint256(f)
@ -173,28 +163,14 @@ def deser_uint256_vector(f):
def ser_uint256_vector(l):
r = b""
if len(l) < 253:
r = struct.pack("B", len(l))
elif len(l) < 0x10000:
r = struct.pack("<BH", 253, len(l))
elif len(l) < 0x100000000:
r = struct.pack("<BI", 254, len(l))
else:
r = struct.pack("<BQ", 255, len(l))
r = ser_compact_size(len(l))
for i in l:
r += ser_uint256(i)
return r
def deser_string_vector(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_string(f)
@ -203,28 +179,14 @@ def deser_string_vector(f):
def ser_string_vector(l):
r = b""
if len(l) < 253:
r = struct.pack("B", len(l))
elif len(l) < 0x10000:
r = struct.pack("<BH", 253, len(l))
elif len(l) < 0x100000000:
r = struct.pack("<BI", 254, len(l))
else:
r = struct.pack("<BQ", 255, len(l))
r = ser_compact_size(len(l))
for sv in l:
r += ser_string(sv)
return r
def deser_int_vector(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = struct.unpack("<i", f.read(4))[0]
@ -233,15 +195,7 @@ def deser_int_vector(f):
def ser_int_vector(l):
r = b""
if len(l) < 253:
r = struct.pack("B", len(l))
elif len(l) < 0x10000:
r = struct.pack("<BH", 253, len(l))
elif len(l) < 0x100000000:
r = struct.pack("<BI", 254, len(l))
else:
r = struct.pack("<BQ", 255, len(l))
r = ser_compact_size(len(l))
for i in l:
r += struct.pack("<i", i)
return r
@ -1191,7 +1145,7 @@ class SingleNodeConnCB(NodeConnCB):
def received_pong():
return (self.last_pong.nonce == self.ping_counter)
self.send_message(msg_ping(nonce=self.ping_counter))
success = wait_until(received_pong, timeout)
success = wait_until(received_pong, timeout=timeout)
self.ping_counter += 1
return success

View File

@ -624,6 +624,12 @@ public:
// Regtest Dash BIP44 coin type is '1' (All coin's testnet default)
nExtCoinType = 1;
}
void UpdateBIP9Parameters(Consensus::DeploymentPos d, int64_t nStartTime, int64_t nTimeout)
{
consensus.vDeployments[d].nStartTime = nStartTime;
consensus.vDeployments[d].nTimeout = nTimeout;
}
};
static CRegTestParams regTestParams;
@ -658,3 +664,8 @@ void SelectParams(const std::string& network)
SelectBaseParams(network);
pCurrentParams = &Params(network);
}
void UpdateRegtestBIP9Parameters(Consensus::DeploymentPos d, int64_t nStartTime, int64_t nTimeout)
{
regTestParams.UpdateBIP9Parameters(d, nStartTime, nTimeout);
}

View File

@ -133,4 +133,9 @@ CChainParams& Params(const std::string& chain);
*/
void SelectParams(const std::string& chain);
/**
* Allows modifying the BIP9 regtest parameters.
*/
void UpdateRegtestBIP9Parameters(Consensus::DeploymentPos d, int64_t nStartTime, int64_t nTimeout);
#endif // BITCOIN_CHAINPARAMS_H

View File

@ -505,6 +505,7 @@ std::string HelpMessage(HelpMessageMode mode)
strUsage += HelpMessageOpt("-limitancestorsize=<n>", strprintf("Do not accept transactions whose size with all in-mempool ancestors exceeds <n> kilobytes (default: %u)", DEFAULT_ANCESTOR_SIZE_LIMIT));
strUsage += HelpMessageOpt("-limitdescendantcount=<n>", strprintf("Do not accept transactions if any ancestor would have <n> or more in-mempool descendants (default: %u)", DEFAULT_DESCENDANT_LIMIT));
strUsage += HelpMessageOpt("-limitdescendantsize=<n>", strprintf("Do not accept transactions if any ancestor would have more than <n> kilobytes of in-mempool descendants (default: %u).", DEFAULT_DESCENDANT_SIZE_LIMIT));
strUsage += HelpMessageOpt("-bip9params=deployment:start:end", "Use given start/end times for specified BIP9 deployment (regtest-only)");
}
std::string debugCategories = "addrman, alert, bench, coindb, db, http, leveldb, libevent, lock, mempool, mempoolrej, net, proxy, prune, rand, reindex, rpc, selectcoins, tor, zmq, "
"dash (or specifically: gobject, instantsend, keepass, masternode, mnpayments, mnsync, privatesend, spork)"; // Don't translate these and qt below
@ -1220,6 +1221,41 @@ bool AppInitParameterInteraction()
fEnableReplacement = (std::find(vstrReplacementModes.begin(), vstrReplacementModes.end(), "fee") != vstrReplacementModes.end());
}
if (mapMultiArgs.count("-bip9params")) {
// Allow overriding BIP9 parameters for testing
if (!chainparams.MineBlocksOnDemand()) {
return InitError("BIP9 parameters may only be overridden on regtest.");
}
const std::vector<std::string>& deployments = mapMultiArgs.at("-bip9params");
for (auto i : deployments) {
std::vector<std::string> vDeploymentParams;
boost::split(vDeploymentParams, i, boost::is_any_of(":"));
if (vDeploymentParams.size() != 3) {
return InitError("BIP9 parameters malformed, expecting deployment:start:end");
}
int64_t nStartTime, nTimeout;
if (!ParseInt64(vDeploymentParams[1], &nStartTime)) {
return InitError(strprintf("Invalid nStartTime (%s)", vDeploymentParams[1]));
}
if (!ParseInt64(vDeploymentParams[2], &nTimeout)) {
return InitError(strprintf("Invalid nTimeout (%s)", vDeploymentParams[2]));
}
bool found = false;
for (int j=0; j<(int)Consensus::MAX_VERSION_BITS_DEPLOYMENTS; ++j)
{
if (vDeploymentParams[0].compare(VersionBitsDeploymentInfo[j].name) == 0) {
UpdateRegtestBIP9Parameters(Consensus::DeploymentPos(j), nStartTime, nTimeout);
found = true;
LogPrintf("Setting BIP9 activation parameters for %s to start=%ld, timeout=%ld\n", vDeploymentParams[0], nStartTime, nTimeout);
break;
}
}
if (!found) {
return InitError(strprintf("Invalid deployment (%s)", vDeploymentParams[0]));
}
}
}
return true;
}

View File

@ -307,7 +307,7 @@ void CMasternodePayments::ProcessMessage(CNode* pfrom, const std::string& strCom
netfulfilledman.AddFulfilledRequest(pfrom->addr, NetMsgType::MASTERNODEPAYMENTSYNC);
Sync(pfrom, connman);
LogPrintf("MASTERNODEPAYMENTSYNC -- Sent Masternode payment votes to peer %d\n", pfrom->id);
LogPrintf("MASTERNODEPAYMENTSYNC -- Sent Masternode payment votes to peer=%d\n", pfrom->id);
} else if (strCommand == NetMsgType::MASTERNODEPAYMENTVOTE) { // Masternode Payments Vote for the Winner
@ -888,7 +888,7 @@ void CMasternodePayments::Sync(CNode* pnode, CConnman& connman)
}
}
LogPrintf("CMasternodePayments::Sync -- Sent %d votes to peer %d\n", nInvCount, pnode->id);
LogPrintf("CMasternodePayments::Sync -- Sent %d votes to peer=%d\n", nInvCount, pnode->id);
CNetMsgMaker msgMaker(pnode->GetSendVersion());
connman.PushMessage(pnode, msgMaker.Make(NetMsgType::SYNCSTATUSCOUNT, MASTERNODE_SYNC_MNW, nInvCount));
}
@ -912,7 +912,7 @@ void CMasternodePayments::RequestLowDataPaymentBlocks(CNode* pnode, CConnman& co
vToFetch.push_back(CInv(MSG_MASTERNODE_PAYMENT_BLOCK, pindex->GetBlockHash()));
// We should not violate GETDATA rules
if(vToFetch.size() == MAX_INV_SZ) {
LogPrintf("CMasternodePayments::SyncLowDataPaymentBlocks -- asking peer %d for %d blocks\n", pnode->id, MAX_INV_SZ);
LogPrintf("CMasternodePayments::SyncLowDataPaymentBlocks -- asking peer=%d for %d blocks\n", pnode->id, MAX_INV_SZ);
connman.PushMessage(pnode, msgMaker.Make(NetMsgType::GETDATA, vToFetch));
// Start filling new batch
vToFetch.clear();
@ -960,7 +960,7 @@ void CMasternodePayments::RequestLowDataPaymentBlocks(CNode* pnode, CConnman& co
}
// We should not violate GETDATA rules
if(vToFetch.size() == MAX_INV_SZ) {
LogPrintf("CMasternodePayments::SyncLowDataPaymentBlocks -- asking peer %d for %d payment blocks\n", pnode->id, MAX_INV_SZ);
LogPrintf("CMasternodePayments::SyncLowDataPaymentBlocks -- asking peer=%d for %d payment blocks\n", pnode->id, MAX_INV_SZ);
connman.PushMessage(pnode, msgMaker.Make(NetMsgType::GETDATA, vToFetch));
// Start filling new batch
vToFetch.clear();
@ -969,7 +969,7 @@ void CMasternodePayments::RequestLowDataPaymentBlocks(CNode* pnode, CConnman& co
}
// Ask for the rest of it
if(!vToFetch.empty()) {
LogPrintf("CMasternodePayments::SyncLowDataPaymentBlocks -- asking peer %d for %d payment blocks\n", pnode->id, vToFetch.size());
LogPrintf("CMasternodePayments::SyncLowDataPaymentBlocks -- asking peer=%d for %d payment blocks\n", pnode->id, vToFetch.size());
connman.PushMessage(pnode, msgMaker.Make(NetMsgType::GETDATA, vToFetch));
}
}

View File

@ -227,7 +227,7 @@ void CMasternodeSync::ProcessTick(CConnman& connman)
// We already fully synced from this node recently,
// disconnect to free this connection slot for another peer.
pnode->fDisconnect = true;
LogPrintf("CMasternodeSync::ProcessTick -- disconnecting from recently synced peer %d\n", pnode->id);
LogPrintf("CMasternodeSync::ProcessTick -- disconnecting from recently synced peer=%d\n", pnode->id);
continue;
}
@ -238,7 +238,7 @@ void CMasternodeSync::ProcessTick(CConnman& connman)
netfulfilledman.AddFulfilledRequest(pnode->addr, "spork-sync");
// get current network sporks
connman.PushMessage(pnode, msgMaker.Make(NetMsgType::GETSPORKS));
LogPrintf("CMasternodeSync::ProcessTick -- nTick %d nRequestedMasternodeAssets %d -- requesting sporks from peer %d\n", nTick, nRequestedMasternodeAssets, pnode->id);
LogPrintf("CMasternodeSync::ProcessTick -- nTick %d nRequestedMasternodeAssets %d -- requesting sporks from peer=%d\n", nTick, nRequestedMasternodeAssets, pnode->id);
}
// INITIAL TIMEOUT

View File

@ -909,18 +909,18 @@ void CMasternodeMan::ProcessMessage(CNode* pfrom, const std::string& strCommand,
mapSeenMasternodePing.insert(std::make_pair(hashMNP, mnp));
if (vin.prevout == mnpair.first) {
LogPrintf("DSEG -- Sent 1 Masternode inv to peer %d\n", pfrom->id);
LogPrintf("DSEG -- Sent 1 Masternode inv to peer=%d\n", pfrom->id);
return;
}
}
if(vin == CTxIn()) {
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::SYNCSTATUSCOUNT, MASTERNODE_SYNC_LIST, nInvCount));
LogPrintf("DSEG -- Sent %d Masternode invs to peer %d\n", nInvCount, pfrom->id);
LogPrintf("DSEG -- Sent %d Masternode invs to peer=%d\n", nInvCount, pfrom->id);
return;
}
// smth weird happen - someone asked us for vin we have no idea about?
LogPrint("masternode", "DSEG -- No invs sent to peer %d\n", pfrom->id);
LogPrint("masternode", "DSEG -- No invs sent to peer=%d\n", pfrom->id);
} else if (strCommand == NetMsgType::MNVERIFY) { // Masternode Verify

View File

@ -110,7 +110,7 @@ namespace {
/** Blocks that are in flight, and that are in the queue to be downloaded. Protected by cs_main. */
struct QueuedBlock {
uint256 hash;
CBlockIndex* pindex; //!< Optional.
const CBlockIndex* pindex; //!< Optional.
bool fValidatedHeaders; //!< Whether this block has validated headers at the time of request.
};
std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> > mapBlocksInFlight;
@ -161,13 +161,13 @@ struct CNodeState {
//! List of asynchronously-determined block rejections to notify this peer about.
std::vector<CBlockReject> rejects;
//! The best known block we know this peer has announced.
CBlockIndex *pindexBestKnownBlock;
const CBlockIndex *pindexBestKnownBlock;
//! The hash of the last unknown block this peer has announced.
uint256 hashLastUnknownBlock;
//! The last full block we both have.
CBlockIndex *pindexLastCommonBlock;
const CBlockIndex *pindexLastCommonBlock;
//! The best header we have sent our peer.
CBlockIndex *pindexBestHeaderSent;
const CBlockIndex *pindexBestHeaderSent;
//! Length of current-streak of unconnecting headers announcements
int nUnconnectingHeaders;
//! Whether we've started headers synchronization with this peer.
@ -314,7 +314,7 @@ bool MarkBlockAsReceived(const uint256& hash) {
}
// Requires cs_main.
void MarkBlockAsInFlight(NodeId nodeid, const uint256& hash, const Consensus::Params& consensusParams, CBlockIndex *pindex = NULL) {
void MarkBlockAsInFlight(NodeId nodeid, const uint256& hash, const Consensus::Params& consensusParams, const CBlockIndex *pindex = NULL) {
CNodeState *state = State(nodeid);
assert(state != NULL);
@ -375,7 +375,7 @@ bool CanDirectFetch(const Consensus::Params &consensusParams)
}
// Requires cs_main
bool PeerHasHeader(CNodeState *state, CBlockIndex *pindex)
bool PeerHasHeader(CNodeState *state, const CBlockIndex *pindex)
{
if (state->pindexBestKnownBlock && pindex == state->pindexBestKnownBlock->GetAncestor(pindex->nHeight))
return true;
@ -386,7 +386,7 @@ bool PeerHasHeader(CNodeState *state, CBlockIndex *pindex)
/** Find the last common ancestor two blocks have.
* Both pa and pb must be non-NULL. */
CBlockIndex* LastCommonAncestor(CBlockIndex* pa, CBlockIndex* pb) {
const CBlockIndex* LastCommonAncestor(const CBlockIndex* pa, const CBlockIndex* pb) {
if (pa->nHeight > pb->nHeight) {
pa = pa->GetAncestor(pb->nHeight);
} else if (pb->nHeight > pa->nHeight) {
@ -405,7 +405,7 @@ CBlockIndex* LastCommonAncestor(CBlockIndex* pa, CBlockIndex* pb) {
/** Update pindexLastCommonBlock and add not-in-flight missing successors to vBlocks, until it has
* at most count entries. */
void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector<CBlockIndex*>& vBlocks, NodeId& nodeStaller, const Consensus::Params& consensusParams) {
void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, NodeId& nodeStaller, const Consensus::Params& consensusParams) {
if (count == 0)
return;
@ -433,8 +433,8 @@ void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector<CBl
if (state->pindexLastCommonBlock == state->pindexBestKnownBlock)
return;
std::vector<CBlockIndex*> vToFetch;
CBlockIndex *pindexWalk = state->pindexLastCommonBlock;
std::vector<const CBlockIndex*> vToFetch;
const CBlockIndex *pindexWalk = state->pindexLastCommonBlock;
// Never fetch further than the best block we know the peer has, or more than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last
// linked block we have in common with this peer. The +1 is so we can detect stalling, namely if we would be able to
// download that next block if the window were 1 larger.
@ -457,7 +457,7 @@ void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector<CBl
// are not yet downloaded and not in flight to vBlocks. In the mean time, update
// pindexLastCommonBlock as long as all ancestors are already downloaded, or if it's
// already part of our chain (and therefore don't need it even if pruned).
BOOST_FOREACH(CBlockIndex* pindex, vToFetch) {
BOOST_FOREACH(const CBlockIndex* pindex, vToFetch) {
if (!pindex->IsValid(BLOCK_VALID_TREE)) {
// We consider the chain that this peer is on invalid.
return;
@ -1550,7 +1550,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
LOCK(cs_main);
// Find the last block the caller has in the main chain
CBlockIndex* pindex = FindForkInGlobalIndex(chainActive, locator);
const CBlockIndex* pindex = FindForkInGlobalIndex(chainActive, locator);
// Send the rest of the chain
if (pindex)
@ -1598,7 +1598,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
}
CNodeState *nodestate = State(pfrom->GetId());
CBlockIndex* pindex = NULL;
const CBlockIndex* pindex = NULL;
if (locator.IsNull())
{
// If locator is null, return the hashStop block
@ -1892,7 +1892,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
ReadCompactSize(vRecv); // ignore tx count; assume it is 0.
}
CBlockIndex *pindexLast = NULL;
const CBlockIndex *pindexLast = NULL;
{
LOCK(cs_main);
CNodeState *nodestate = State(pfrom->GetId());
@ -1983,8 +1983,8 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
// If this set of headers is valid and ends in a block with at least as
// much work as our tip, download as much as possible.
if (fCanDirectFetch && pindexLast->IsValid(BLOCK_VALID_TREE) && chainActive.Tip()->nChainWork <= pindexLast->nChainWork) {
std::vector<CBlockIndex *> vToFetch;
CBlockIndex *pindexWalk = pindexLast;
std::vector<const CBlockIndex *> vToFetch;
const CBlockIndex *pindexWalk = pindexLast;
// Calculate all the blocks we'd need to switch to pindexLast, up to a limit.
while (pindexWalk && !chainActive.Contains(pindexWalk) && vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
if (!(pindexWalk->nStatus & BLOCK_HAVE_DATA) &&
@ -2005,7 +2005,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
} else {
std::vector<CInv> vGetData;
// Download as much as possible, from earliest to latest.
BOOST_REVERSE_FOREACH(CBlockIndex *pindex, vToFetch) {
BOOST_REVERSE_FOREACH(const CBlockIndex *pindex, vToFetch) {
if (nodestate->nBlocksInFlight >= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
// Can't download any more from this peer
break;
@ -2633,7 +2633,7 @@ bool SendMessages(CNode* pto, CConnman& connman, const std::atomic<bool>& interr
LOCK(pto->cs_inventory);
std::vector<CBlock> vHeaders;
bool fRevertToInv = (!state.fPreferHeaders || pto->vBlockHashesToAnnounce.size() > MAX_BLOCKS_TO_ANNOUNCE);
CBlockIndex *pBestIndex = NULL; // last header queued for delivery
const CBlockIndex *pBestIndex = NULL; // last header queued for delivery
ProcessBlockAvailability(pto->id); // ensure pindexBestKnownBlock is up-to-date
if (!fRevertToInv) {
@ -2644,7 +2644,7 @@ bool SendMessages(CNode* pto, CConnman& connman, const std::atomic<bool>& interr
BOOST_FOREACH(const uint256 &hash, pto->vBlockHashesToAnnounce) {
BlockMap::iterator mi = mapBlockIndex.find(hash);
assert(mi != mapBlockIndex.end());
CBlockIndex *pindex = mi->second;
const CBlockIndex *pindex = mi->second;
if (chainActive[pindex->nHeight] != pindex) {
// Bail out if we reorged away from this block
fRevertToInv = true;
@ -2692,7 +2692,7 @@ bool SendMessages(CNode* pto, CConnman& connman, const std::atomic<bool>& interr
const uint256 &hashToAnnounce = pto->vBlockHashesToAnnounce.back();
BlockMap::iterator mi = mapBlockIndex.find(hashToAnnounce);
assert(mi != mapBlockIndex.end());
CBlockIndex *pindex = mi->second;
const CBlockIndex *pindex = mi->second;
// Warn if we're announcing a block that is not on the main chain.
// This should be very rare and could be optimized out.
@ -2935,10 +2935,10 @@ bool SendMessages(CNode* pto, CConnman& connman, const std::atomic<bool>& interr
//
std::vector<CInv> vGetData;
if (!pto->fClient && (fFetch || !IsInitialBlockDownload()) && state.nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
std::vector<CBlockIndex*> vToDownload;
std::vector<const CBlockIndex*> vToDownload;
NodeId staller = -1;
FindNextBlocksToDownload(pto->GetId(), MAX_BLOCKS_IN_TRANSIT_PER_PEER - state.nBlocksInFlight, vToDownload, staller, consensusParams);
BOOST_FOREACH(CBlockIndex *pindex, vToDownload) {
BOOST_FOREACH(const CBlockIndex *pindex, vToDownload) {
vGetData.push_back(CInv(MSG_BLOCK, pindex->GetBlockHash()));
MarkBlockAsInFlight(pto->GetId(), pindex->GetBlockHash(), consensusParams, pindex);
LogPrint("net", "Requesting block %s (%d) peer=%d\n", pindex->GetBlockHash().ToString(),

View File

@ -2023,7 +2023,7 @@ static bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockInd
int64_t nTimeStart = GetTimeMicros();
// Check it again in case a previous version let a bad block in
if (!CheckBlock(block, state, chainparams.GetConsensus(), GetAdjustedTime(), !fJustCheck, !fJustCheck))
if (!CheckBlock(block, state, chainparams.GetConsensus(), !fJustCheck, !fJustCheck))
return error("%s: Consensus::CheckBlock: %s", __func__, FormatStateMessage(state));
// verify that the view's current state corresponds to the previous block
@ -3259,16 +3259,12 @@ bool FindUndoPos(CValidationState &state, int nFile, CDiskBlockPos &pos, unsigne
return true;
}
bool CheckBlockHeader(const CBlockHeader& block, CValidationState& state, const Consensus::Params& consensusParams, int64_t nAdjustedTime, bool fCheckPOW)
bool CheckBlockHeader(const CBlockHeader& block, CValidationState& state, const Consensus::Params& consensusParams, bool fCheckPOW)
{
// Check proof of work matches claimed amount
if (fCheckPOW && !CheckProofOfWork(block.GetHash(), block.nBits, consensusParams))
return state.DoS(50, false, REJECT_INVALID, "high-hash", false, "proof of work failed");
// Check timestamp
if (block.GetBlockTime() > nAdjustedTime + 2 * 60 * 60)
return state.Invalid(false, REJECT_INVALID, "time-too-new", "block timestamp too far in the future");
// Check DevNet
if (!consensusParams.hashDevnetGenesisBlock.IsNull() &&
block.hashPrevBlock == consensusParams.hashGenesisBlock &&
@ -3280,7 +3276,7 @@ bool CheckBlockHeader(const CBlockHeader& block, CValidationState& state, const
return true;
}
bool CheckBlock(const CBlock& block, CValidationState& state, const Consensus::Params& consensusParams, int64_t nAdjustedTime, bool fCheckPOW, bool fCheckMerkleRoot)
bool CheckBlock(const CBlock& block, CValidationState& state, const Consensus::Params& consensusParams, bool fCheckPOW, bool fCheckMerkleRoot)
{
// These are checks that are independent of context.
@ -3289,7 +3285,7 @@ bool CheckBlock(const CBlock& block, CValidationState& state, const Consensus::P
// Check that the header is valid (particularly PoW). This is mostly
// redundant with the call in AcceptBlockHeader.
if (!CheckBlockHeader(block, state, consensusParams, nAdjustedTime, fCheckPOW))
if (!CheckBlockHeader(block, state, consensusParams, fCheckPOW))
return false;
// Check the merkle root.
@ -3385,7 +3381,7 @@ static bool CheckIndexAgainstCheckpoint(const CBlockIndex* pindexPrev, CValidati
return true;
}
bool ContextualCheckBlockHeader(const CBlockHeader& block, CValidationState& state, const Consensus::Params& consensusParams, const CBlockIndex* pindexPrev)
bool ContextualCheckBlockHeader(const CBlockHeader& block, CValidationState& state, const Consensus::Params& consensusParams, const CBlockIndex* pindexPrev, int64_t nAdjustedTime)
{
const int nHeight = pindexPrev == NULL ? 0 : pindexPrev->nHeight + 1;
// Check proof of work
@ -3407,6 +3403,10 @@ bool ContextualCheckBlockHeader(const CBlockHeader& block, CValidationState& sta
if (block.GetBlockTime() <= pindexPrev->GetMedianTimePast())
return state.Invalid(false, REJECT_INVALID, "time-too-old", "block's timestamp is too early");
// Check timestamp
if (block.GetBlockTime() > nAdjustedTime + 2 * 60 * 60)
return state.Invalid(false, REJECT_INVALID, "time-too-new", "block timestamp too far in the future");
// check for version 2, 3 and 4 upgrades
if((block.nVersion < 2 && nHeight >= consensusParams.BIP34Height) ||
(block.nVersion < 3 && nHeight >= consensusParams.BIP66Height) ||
@ -3489,7 +3489,7 @@ static bool AcceptBlockHeader(const CBlockHeader& block, CValidationState& state
return true;
}
if (!CheckBlockHeader(block, state, chainparams.GetConsensus(), GetAdjustedTime()))
if (!CheckBlockHeader(block, state, chainparams.GetConsensus()))
return error("%s: Consensus::CheckBlockHeader: %s, %s", __func__, hash.ToString(), FormatStateMessage(state));
// Get prev block index
@ -3505,7 +3505,7 @@ static bool AcceptBlockHeader(const CBlockHeader& block, CValidationState& state
if (fCheckpointsEnabled && !CheckIndexAgainstCheckpoint(pindexPrev, state, chainparams, hash))
return error("%s: CheckIndexAgainstCheckpoint(): %s", __func__, state.GetRejectReason().c_str());
if (!ContextualCheckBlockHeader(block, state, chainparams.GetConsensus(), pindexPrev))
if (!ContextualCheckBlockHeader(block, state, chainparams.GetConsensus(), pindexPrev, GetAdjustedTime()))
return error("%s: Consensus::ContextualCheckBlockHeader: %s, %s", __func__, hash.ToString(), FormatStateMessage(state));
}
if (pindex == NULL)
@ -3523,14 +3523,18 @@ static bool AcceptBlockHeader(const CBlockHeader& block, CValidationState& state
}
// Exposed wrapper for AcceptBlockHeader
bool ProcessNewBlockHeaders(const std::vector<CBlockHeader>& headers, CValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex)
bool ProcessNewBlockHeaders(const std::vector<CBlockHeader>& headers, CValidationState& state, const CChainParams& chainparams, const CBlockIndex** ppindex)
{
{
LOCK(cs_main);
for (const CBlockHeader& header : headers) {
if (!AcceptBlockHeader(header, state, chainparams, ppindex)) {
CBlockIndex *pindex = NULL; // Use a temp pindex instead of ppindex to avoid a const_cast
if (!AcceptBlockHeader(header, state, chainparams, &pindex)) {
return false;
}
if (ppindex) {
*ppindex = pindex;
}
}
}
NotifyHeaderTip();
@ -3538,8 +3542,10 @@ bool ProcessNewBlockHeaders(const std::vector<CBlockHeader>& headers, CValidatio
}
/** Store block on disk. If dbp is non-NULL, the file is known to already reside on disk */
static bool AcceptBlock(const CBlock& block, CValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex, bool fRequested, const CDiskBlockPos* dbp, bool* fNewBlock)
static bool AcceptBlock(const std::shared_ptr<const CBlock>& pblock, CValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex, bool fRequested, const CDiskBlockPos* dbp, bool* fNewBlock)
{
const CBlock& block = *pblock;
if (fNewBlock) *fNewBlock = false;
AssertLockHeld(cs_main);
@ -3576,7 +3582,7 @@ static bool AcceptBlock(const CBlock& block, CValidationState& state, const CCha
}
if (fNewBlock) *fNewBlock = true;
if (!CheckBlock(block, state, chainparams.GetConsensus(), GetAdjustedTime()) ||
if (!CheckBlock(block, state, chainparams.GetConsensus()) ||
!ContextualCheckBlock(block, state, chainparams.GetConsensus(), pindex->pprev)) {
if (state.IsInvalid() && !state.CorruptionPossible()) {
pindex->nStatus |= BLOCK_FAILED_VALID;
@ -3585,6 +3591,11 @@ static bool AcceptBlock(const CBlock& block, CValidationState& state, const CCha
return error("%s: %s", __func__, FormatStateMessage(state));
}
// Header is valid/has work, merkle tree is good...RELAY NOW
// (but if it does not build on our best tip, let the SendMessages loop relay it)
if (!IsInitialBlockDownload() && chainActive.Tip() == pindex->pprev)
GetMainSignals().NewPoWValidBlock(pindex, pblock);
int nHeight = pindex->nHeight;
// Write block to history file
@ -3613,13 +3624,19 @@ static bool AcceptBlock(const CBlock& block, CValidationState& state, const CCha
bool ProcessNewBlock(const CChainParams& chainparams, const std::shared_ptr<const CBlock> pblock, bool fForceProcessing, bool *fNewBlock)
{
{
LOCK(cs_main);
// Store to disk
CBlockIndex *pindex = NULL;
if (fNewBlock) *fNewBlock = false;
CValidationState state;
bool ret = AcceptBlock(*pblock, state, chainparams, &pindex, fForceProcessing, NULL, fNewBlock);
// Ensure that CheckBlock() passes before calling AcceptBlock, as
// belt-and-suspenders.
bool ret = CheckBlock(*pblock, state, chainparams.GetConsensus());
LOCK(cs_main);
if (ret) {
// Store to disk
ret = AcceptBlock(pblock, state, chainparams, &pindex, fForceProcessing, NULL, fNewBlock);
}
CheckBlockIndex(chainparams.GetConsensus());
if (!ret) {
GetMainSignals().BlockChecked(*pblock, state);
@ -3650,9 +3667,9 @@ bool TestBlockValidity(CValidationState& state, const CChainParams& chainparams,
indexDummy.nHeight = pindexPrev->nHeight + 1;
// NOTE: CheckBlockHeader is called by CheckBlock
if (!ContextualCheckBlockHeader(block, state, chainparams.GetConsensus(), pindexPrev))
if (!ContextualCheckBlockHeader(block, state, chainparams.GetConsensus(), pindexPrev, GetAdjustedTime()))
return error("%s: Consensus::ContextualCheckBlockHeader: %s", __func__, FormatStateMessage(state));
if (!CheckBlock(block, state, chainparams.GetConsensus(), GetAdjustedTime(), fCheckPOW, fCheckMerkleRoot))
if (!CheckBlock(block, state, chainparams.GetConsensus(), fCheckPOW, fCheckMerkleRoot))
return error("%s: Consensus::CheckBlock: %s", __func__, FormatStateMessage(state));
if (!ContextualCheckBlock(block, state, chainparams.GetConsensus(), pindexPrev))
return error("%s: Consensus::ContextualCheckBlock: %s", __func__, FormatStateMessage(state));
@ -4038,7 +4055,7 @@ bool CVerifyDB::VerifyDB(const CChainParams& chainparams, CCoinsView *coinsview,
if (!ReadBlockFromDisk(block, pindex, chainparams.GetConsensus()))
return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
// check level 1: verify block validity
if (nCheckLevel >= 1 && !CheckBlock(block, state, chainparams.GetConsensus(), GetAdjustedTime()))
if (nCheckLevel >= 1 && !CheckBlock(block, state, chainparams.GetConsensus()))
return error("%s: *** found bad block at %d, hash=%s (%s)\n", __func__,
pindex->nHeight, pindex->GetBlockHash().ToString(), FormatStateMessage(state));
// check level 2: verify undo validity
@ -4233,7 +4250,8 @@ bool LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, CDiskB
dbp->nPos = nBlockPos;
blkdat.SetLimit(nBlockPos + nSize);
blkdat.SetPos(nBlockPos);
CBlock block;
std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
CBlock& block = *pblock;
blkdat >> block;
nRewind = blkdat.GetPos();
@ -4251,7 +4269,7 @@ bool LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, CDiskB
if (mapBlockIndex.count(hash) == 0 || (mapBlockIndex[hash]->nStatus & BLOCK_HAVE_DATA) == 0) {
LOCK(cs_main);
CValidationState state;
if (AcceptBlock(block, state, chainparams, NULL, true, dbp, NULL))
if (AcceptBlock(pblock, state, chainparams, NULL, true, dbp, NULL))
nLoaded++;
if (state.IsError())
break;
@ -4278,16 +4296,17 @@ bool LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, CDiskB
std::pair<std::multimap<uint256, CDiskBlockPos>::iterator, std::multimap<uint256, CDiskBlockPos>::iterator> range = mapBlocksUnknownParent.equal_range(head);
while (range.first != range.second) {
std::multimap<uint256, CDiskBlockPos>::iterator it = range.first;
if (ReadBlockFromDisk(block, it->second, chainparams.GetConsensus()))
std::shared_ptr<CBlock> pblockrecursive = std::make_shared<CBlock>();
if (ReadBlockFromDisk(*pblockrecursive, it->second, chainparams.GetConsensus()))
{
LogPrint("reindex", "%s: Processing out of order child %s of %s\n", __func__, block.GetHash().ToString(),
LogPrint("reindex", "%s: Processing out of order child %s of %s\n", __func__, pblockrecursive->GetHash().ToString(),
head.ToString());
LOCK(cs_main);
CValidationState dummy;
if (AcceptBlock(block, dummy, chainparams, NULL, true, &it->second, NULL))
if (AcceptBlock(pblockrecursive, dummy, chainparams, NULL, true, &it->second, NULL))
{
nLoaded++;
queue.push_back(block.GetHash());
queue.push_back(pblockrecursive->GetHash());
}
}
range.first++;

View File

@ -255,7 +255,7 @@ bool ProcessNewBlock(const CChainParams& chainparams, const std::shared_ptr<cons
* @param[in] chainparams The params for the chain we want to connect to
* @param[out] ppindex If set, the pointer will be set to point to the last new block index object for the given headers
*/
bool ProcessNewBlockHeaders(const std::vector<CBlockHeader>& block, CValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex=NULL);
bool ProcessNewBlockHeaders(const std::vector<CBlockHeader>& block, CValidationState& state, const CChainParams& chainparams, const CBlockIndex** ppindex=NULL);
/** Check whether enough disk space is available for an incoming block */
bool CheckDiskSpace(uint64_t nAdditionalBytes = 0);
@ -491,13 +491,13 @@ bool DisconnectBlocks(int blocks);
void ReprocessBlocks(int nBlocks);
/** Context-independent validity checks */
bool CheckBlockHeader(const CBlockHeader& block, CValidationState& state, const Consensus::Params& consensusParams, int64_t nAdjustedTime, bool fCheckPOW = true);
bool CheckBlock(const CBlock& block, CValidationState& state, const Consensus::Params& consensusParams, int64_t nAdjustedTime, bool fCheckPOW = true, bool fCheckMerkleRoot = true);
bool CheckBlockHeader(const CBlockHeader& block, CValidationState& state, const Consensus::Params& consensusParams, bool fCheckPOW = true);
bool CheckBlock(const CBlock& block, CValidationState& state, const Consensus::Params& consensusParams, bool fCheckPOW = true, bool fCheckMerkleRoot = true);
/** Context-dependent validity checks.
* By "context", we mean only the previous block headers, but not the UTXO
* set; UTXO-related validity checks are done in ConnectBlock(). */
bool ContextualCheckBlockHeader(const CBlockHeader& block, CValidationState& state, const Consensus::Params& consensusParams, const CBlockIndex* pindexPrev);
bool ContextualCheckBlockHeader(const CBlockHeader& block, CValidationState& state, const Consensus::Params& consensusParams, const CBlockIndex* pindexPrev, int64_t nAdjustedTime);
bool ContextualCheckBlock(const CBlock& block, CValidationState& state, const Consensus::Params& consensusParams, const CBlockIndex *pindexPrev);
/** Check a block is completely valid from start to finish (only works on top of our current best block, with cs_main held) */

View File

@ -25,6 +25,7 @@ void RegisterValidationInterface(CValidationInterface* pwalletIn) {
g_signals.BlockChecked.connect(boost::bind(&CValidationInterface::BlockChecked, pwalletIn, _1, _2));
g_signals.ScriptForMining.connect(boost::bind(&CValidationInterface::GetScriptForMining, pwalletIn, _1));
g_signals.BlockFound.connect(boost::bind(&CValidationInterface::ResetRequestCount, pwalletIn, _1));
g_signals.NewPoWValidBlock.connect(boost::bind(&CValidationInterface::NewPoWValidBlock, pwalletIn, _1, _2));
}
void UnregisterValidationInterface(CValidationInterface* pwalletIn) {
@ -38,6 +39,7 @@ void UnregisterValidationInterface(CValidationInterface* pwalletIn) {
g_signals.NotifyTransactionLock.disconnect(boost::bind(&CValidationInterface::NotifyTransactionLock, pwalletIn, _1));
g_signals.SyncTransaction.disconnect(boost::bind(&CValidationInterface::SyncTransaction, pwalletIn, _1, _2, _3));
g_signals.UpdatedBlockTip.disconnect(boost::bind(&CValidationInterface::UpdatedBlockTip, pwalletIn, _1, _2, _3));
g_signals.NewPoWValidBlock.disconnect(boost::bind(&CValidationInterface::NewPoWValidBlock, pwalletIn, _1, _2));
g_signals.NotifyHeaderTip.disconnect(boost::bind(&CValidationInterface::NotifyHeaderTip, pwalletIn, _1, _2));
g_signals.AcceptedBlockHeader.disconnect(boost::bind(&CValidationInterface::AcceptedBlockHeader, pwalletIn, _1));
}
@ -53,6 +55,7 @@ void UnregisterAllValidationInterfaces() {
g_signals.NotifyTransactionLock.disconnect_all_slots();
g_signals.SyncTransaction.disconnect_all_slots();
g_signals.UpdatedBlockTip.disconnect_all_slots();
g_signals.NewPoWValidBlock.disconnect_all_slots();
g_signals.NotifyHeaderTip.disconnect_all_slots();
g_signals.AcceptedBlockHeader.disconnect_all_slots();
}

View File

@ -8,6 +8,7 @@
#include <boost/signals2/signal.hpp>
#include <boost/shared_ptr.hpp>
#include <memory>
class CBlock;
class CBlockIndex;
@ -43,6 +44,7 @@ protected:
virtual void BlockChecked(const CBlock&, const CValidationState&) {}
virtual void GetScriptForMining(boost::shared_ptr<CReserveScript>&) {};
virtual void ResetRequestCount(const uint256 &hash) {};
virtual void NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr<const CBlock>& block) {};
friend void ::RegisterValidationInterface(CValidationInterface*);
friend void ::UnregisterValidationInterface(CValidationInterface*);
friend void ::UnregisterAllValidationInterfaces();
@ -82,6 +84,10 @@ struct CMainSignals {
boost::signals2::signal<void (boost::shared_ptr<CReserveScript>&)> ScriptForMining;
/** Notifies listeners that a block has been successfully mined */
boost::signals2::signal<void (const uint256 &)> BlockFound;
/**
* Notifies listeners that a block which builds directly on our current tip
* has been received and connected to the headers tree, though not validated yet */
boost::signals2::signal<void (const CBlockIndex *, const std::shared_ptr<const CBlock>&)> NewPoWValidBlock;
};
CMainSignals& GetMainSignals();