Merge pull request #4375 from PastaPastaPasta/pr_4330

backport 0.18 PR #14522 #14672 #14812 #14494 #14690 #14700 #14705 #14478 #14247
This commit is contained in:
UdjinM6 2021-08-29 23:50:29 +03:00 committed by GitHub
commit f38aa44bfd
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 275 additions and 19 deletions

View File

@ -19,6 +19,7 @@ test/lint/check-rpc-mappings.py .
test/lint/lint-all.sh
if [ "$TRAVIS_REPO_SLUG" = "bitcoin/bitcoin" -a "$TRAVIS_EVENT_TYPE" = "cron" ]; then
git log --merges --before="2 days ago" -1 --format='%H' > ./contrib/verify-commits/trusted-sha512-root-commit
while read -r LINE; do travis_retry gpg --keyserver hkp://subset.pool.sks-keyservers.net --recv-keys $LINE; done < contrib/verify-commits/trusted-keys &&
travis_wait 50 contrib/verify-commits/verify-commits.py;
travis_wait 50 contrib/verify-commits/verify-commits.py --clean-merge=2;
fi

View File

@ -183,6 +183,7 @@ void AskPassphraseDialog::accept()
} break;
case UnlockMixing:
case Unlock:
try {
if(!model->setWalletLocked(false, oldpass, mode == UnlockMixing))
{
QMessageBox::critical(this, tr("Wallet unlock failed"),
@ -192,6 +193,9 @@ void AskPassphraseDialog::accept()
{
QDialog::accept(); // Success
}
} catch (const std::runtime_error& e) {
QMessageBox::critical(this, tr("Wallet unlock failed"), e.what());
}
break;
case Decrypt:
if(!model->setWalletEncrypted(false, oldpass))

View File

@ -180,6 +180,9 @@ template<typename Stream>
void SerializeHDKeypaths(Stream& s, const std::map<CPubKey, KeyOriginInfo>& hd_keypaths, uint8_t type)
{
for (auto keypath_pair : hd_keypaths) {
if (!keypath_pair.first.IsValid()) {
throw std::ios_base::failure("Invalid CPubKey being serialized");
}
SerializeToVector(s, type, MakeSpan(keypath_pair.first));
WriteCompactSize(s, (keypath_pair.second.path.size() + 1) * sizeof(uint32_t));
s << keypath_pair.second.fingerprint;

View File

@ -885,8 +885,10 @@ static bool GetConfigOptions(std::istream& stream, std::string& error, std::vect
std::string::size_type pos;
int linenr = 1;
while (std::getline(stream, str)) {
bool used_hash = false;
if ((pos = str.find('#')) != std::string::npos) {
str = str.substr(0, pos);
used_hash = true;
}
const static std::string pattern = " \t\r\n";
str = TrimString(str, pattern);
@ -899,6 +901,10 @@ static bool GetConfigOptions(std::istream& stream, std::string& error, std::vect
} else if ((pos = str.find('=')) != std::string::npos) {
std::string name = prefix + TrimString(str.substr(0, pos), pattern);
std::string value = TrimString(str.substr(pos + 1), pattern);
if (used_hash && name == "rpcpassword") {
error = strprintf("parse error on line %i, using # in rpcpassword can be ambiguous and should be avoided", linenr);
return false;
}
options.emplace_back(name, value);
} else {
error = strprintf("parse error on line %i: %s", linenr, str);

View File

@ -269,7 +269,7 @@ bool CCryptoKeyStore::Unlock(const CKeyingMaterial& vMasterKeyIn, bool fForMixin
if (keyPass && keyFail)
{
LogPrintf("The wallet is probably corrupted: Some keys decrypt but not all.\n");
assert(false);
throw std::runtime_error("Error unlocking wallet: some keys decrypt but not all. Your wallet file may be corrupt.");
}
if (keyFail || (!keyPass && cryptedHDChain.IsNull()))
return false;

View File

@ -39,6 +39,10 @@ class ConfArgsTest(BitcoinTestFramework):
conf.write('nono\n')
self.nodes[0].assert_start_raises_init_error(expected_msg='Error reading configuration file: parse error on line 1: nono, if you intended to specify a negated option, use nono=1 instead')
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('server=1\nrpcuser=someuser\nrpcpassword=some#pass')
self.nodes[0].assert_start_raises_init_error(expected_msg='Error reading configuration file: parse error on line 3, using # in rpcpassword can be ambiguous and should be avoided')
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('') # clear

View File

@ -78,9 +78,19 @@ class InvalidBlockRequestTest(BitcoinTestFramework):
block2.vtx.append(tx2)
assert_equal(block2.hashMerkleRoot, block2.calc_merkle_root())
assert_equal(orig_hash, block2.rehash())
assert(block2_orig.vtx != block2.vtx)
assert block2_orig.vtx != block2.vtx
node.p2p.send_blocks_and_test([block2], node, success=False, request_block=False, reject_code=16, reject_reason=b'bad-txns-duplicate')
node.p2p.send_blocks_and_test([block2], node, success=False, reject_code=16, reject_reason=b'bad-txns-duplicate')
# Check transactions for duplicate inputs
self.log.info("Test duplicate input block.")
block2_orig.vtx[2].vin.append(block2_orig.vtx[2].vin[0])
block2_orig.vtx[2].rehash()
block2_orig.hashMerkleRoot = block2_orig.calc_merkle_root()
block2_orig.rehash()
block2_orig.solve()
node.p2p.send_blocks_and_test([block2_orig], node, success=False, reject_code=16, reject_reason=b'bad-txns-inputs-duplicate')
self.log.info("Test very broken block.")
@ -93,7 +103,7 @@ class InvalidBlockRequestTest(BitcoinTestFramework):
block3.rehash()
block3.solve()
node.p2p.send_blocks_and_test([block3], node, success=False, request_block=False, reject_code=16, reject_reason=b'bad-cb-amount')
node.p2p.send_blocks_and_test([block3], node, success=False, reject_code=16, reject_reason=b'bad-cb-amount')
if __name__ == '__main__':
InvalidBlockRequestTest().main()

View File

@ -0,0 +1,175 @@
#!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test node responses to invalid network messages."""
import struct
from test_framework import messages
from test_framework.mininode import P2PDataStore
from test_framework.test_framework import BitcoinTestFramework
class msg_unrecognized:
"""Nonsensical message. Modeled after similar types in test_framework.messages."""
command = b'badmsg'
def __init__(self, str_data):
self.str_data = str_data.encode() if not isinstance(str_data, bytes) else str_data
def serialize(self):
return messages.ser_string(self.str_data)
def __repr__(self):
return "{}(data={})".format(self.command, self.str_data)
class msg_nametoolong(msg_unrecognized):
command = b'thisnameiswayyyyyyyyytoolong'
class InvalidMessagesTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def run_test(self):
"""
0. Send a bunch of large (3MB) messages of an unrecognized type. Check to see
that it isn't an effective DoS against the node.
1. Send an oversized (3MB+) message and check that we're disconnected.
2. Send a few messages with an incorrect data size in the header, ensure the
messages are ignored.
3. Send an unrecognized message with a command name longer than 12 characters.
"""
node = self.nodes[0]
self.node = node
node.add_p2p_connection(P2PDataStore())
conn2 = node.add_p2p_connection(P2PDataStore())
msg_limit = 3 * 1024 * 1024 # 3MB, per MAX_PROTOCOL_MESSAGE_LENGTH
valid_data_limit = msg_limit - 5 # Account for the 4-byte length prefix
#
# 0.
#
# Send as large a message as is valid, ensure we aren't disconnected but
# also can't exhaust resources.
#
msg_at_size = msg_unrecognized("b" * valid_data_limit)
assert len(msg_at_size.serialize()) == msg_limit
with node.assert_memory_usage_stable(increase_allowed=0.5):
self.log.info(
"Sending a bunch of large, junk messages to test "
"memory exhaustion. May take a bit...")
# Run a bunch of times to test for memory exhaustion.
for _ in range(80):
node.p2p.send_message(msg_at_size)
# Check that, even though the node is being hammered by nonsense from one
# connection, it can still service other peers in a timely way.
for _ in range(20):
conn2.sync_with_ping(timeout=2)
# Peer 1, despite serving up a bunch of nonsense, should still be connected.
self.log.info("Waiting for node to drop junk messages.")
node.p2p.sync_with_ping(timeout=30)
assert node.p2p.is_connected
#
# 1.
#
# Send an oversized message, ensure we're disconnected.
#
msg_over_size = msg_unrecognized("b" * (valid_data_limit + 1))
assert len(msg_over_size.serialize()) == (msg_limit + 1)
with node.assert_debug_log(["Oversized message from peer=0, disconnecting"]):
# An unknown message type (or *any* message type) over
# MAX_PROTOCOL_MESSAGE_LENGTH should result in a disconnect.
node.p2p.send_message(msg_over_size)
node.p2p.wait_for_disconnect(timeout=4)
node.disconnect_p2ps()
conn = node.add_p2p_connection(P2PDataStore())
conn.wait_for_verack()
#
# 2.
#
# Send messages with an incorrect data size in the header.
#
actual_size = 100
msg = msg_unrecognized("b" * actual_size)
# TODO: handle larger-than cases. I haven't been able to pin down what behavior to expect.
for wrong_size in (2, 77, 78, 79):
self.log.info("Sending a message with incorrect size of {}".format(wrong_size))
# Unmodified message should submit okay.
node.p2p.send_and_ping(msg)
# A message lying about its data size results in a disconnect when the incorrect
# data size is less than the actual size.
#
# TODO: why does behavior change at 78 bytes?
#
node.p2p.send_raw_message(self._tweak_msg_data_size(msg, wrong_size))
# For some reason unknown to me, we sometimes have to push additional data to the
# peer in order for it to realize a disconnect.
try:
node.p2p.send_message(messages.msg_ping(nonce=123123))
except IOError:
pass
node.p2p.wait_for_disconnect(timeout=10)
node.disconnect_p2ps()
node.add_p2p_connection(P2PDataStore())
#
# 3.
#
# Send a message with a too-long command name.
#
node.p2p.send_message(msg_nametoolong("foobar"))
node.p2p.wait_for_disconnect(timeout=4)
# Node is still up.
conn = node.add_p2p_connection(P2PDataStore())
conn.sync_with_ping()
def _tweak_msg_data_size(self, message, wrong_size):
"""
Return a raw message based on another message but with an incorrect data size in
the message header.
"""
raw_msg = self.node.p2p.build_message(message)
bad_size_bytes = struct.pack("<I", wrong_size)
num_header_bytes_before_size = 4 + 12
# Replace the correct data size in the message with an incorrect one.
raw_msg_with_wrong_size = (
raw_msg[:num_header_bytes_before_size] +
bad_size_bytes +
raw_msg[(num_header_bytes_before_size + len(bad_size_bytes)):]
)
assert len(raw_msg) == len(raw_msg_with_wrong_size)
return raw_msg_with_wrong_size
if __name__ == '__main__':
InvalidMessagesTest().main()

View File

@ -249,10 +249,13 @@ class P2PConnection(asyncio.Protocol):
This method takes a P2P payload, builds the P2P header and adds
the message to the send buffer to be sent over the socket."""
tmsg = self.build_message(message)
self._log_message("send", message)
return self.send_raw_message(tmsg)
def send_raw_message(self, raw_message_bytes):
if not self.is_connected:
raise IOError('Not connected')
self._log_message("send", message)
tmsg = self._build_message(message)
def maybe_write():
if not self._transport:
@ -262,12 +265,12 @@ class P2PConnection(asyncio.Protocol):
# Python 3.4 versions.
if hasattr(self._transport, 'is_closing') and self._transport.is_closing():
return
self._transport.write(tmsg)
self._transport.write(raw_message_bytes)
NetworkThread.network_event_loop.call_soon_threadsafe(maybe_write)
# Class utility methods
def _build_message(self, message):
def build_message(self, message):
"""Build a serialized P2P message"""
command = message.command
data = message.serialize()
@ -495,9 +498,9 @@ class P2PInterface(P2PConnection):
# Message sending helper functions
def send_and_ping(self, message):
def send_and_ping(self, message, timeout=60):
self.send_message(message)
self.sync_with_ping()
self.sync_with_ping(timeout=timeout)
# Sync up with the node
def sync_with_ping(self, timeout=60):

View File

@ -134,6 +134,28 @@ class TestNode():
]
return PRIV_KEYS[self.index]
def get_mem_rss_kilobytes(self):
"""Get the memory usage (RSS) per `ps`.
If process is stopped or `ps` is unavailable, return None.
"""
if not (self.running and self.process):
self.log.warning("Couldn't get memory usage; process isn't running.")
return None
try:
return int(subprocess.check_output(
"ps h -o rss {}".format(self.process.pid),
shell=True, stderr=subprocess.DEVNULL).strip())
# Catching `Exception` broadly to avoid failing on platforms where ps
# isn't installed or doesn't work as expected, e.g. OpenBSD.
#
# We could later use something like `psutils` to work across platforms.
except Exception:
self.log.exception("Unable to get memory usage")
return None
def _node_msg(self, msg: str) -> str:
"""Return a modified msg that identifies this node by its index as a debugging aid."""
return "[node %d] %s" % (self.index, msg)
@ -298,6 +320,33 @@ class TestNode():
if re.search(re.escape(expected_msg), log, flags=re.MULTILINE) is None:
self._raise_assertion_error('Expected message "{}" does not partially match log:\n\n{}\n\n'.format(expected_msg, print_log))
@contextlib.contextmanager
def assert_memory_usage_stable(self, *, increase_allowed=0.03):
"""Context manager that allows the user to assert that a node's memory usage (RSS)
hasn't increased beyond some threshold percentage.
Args:
increase_allowed (float): the fractional increase in memory allowed until failure;
e.g. `0.12` for up to 12% increase allowed.
"""
before_memory_usage = self.get_mem_rss_kilobytes()
yield
after_memory_usage = self.get_mem_rss_kilobytes()
if not (before_memory_usage and after_memory_usage):
self.log.warning("Unable to detect memory usage (RSS) - skipping memory check.")
return
perc_increase_memory_usage = 1 - (float(before_memory_usage) / after_memory_usage)
if perc_increase_memory_usage > increase_allowed:
self._raise_assertion_error(
"Memory usage increased over threshold of {:.3f}% from {} to {} ({:.3f}%)".format(
increase_allowed * 100, before_memory_usage, after_memory_usage,
perc_increase_memory_usage * 100))
@contextlib.contextmanager
def profile_with_perf(self, profile_name):
"""

View File

@ -140,6 +140,7 @@ BASE_SCRIPTS = [
'mining_prioritisetransaction.py',
'p2p_invalid_locator.py',
'p2p_invalid_block.py',
'p2p_invalid_messages.py',
'p2p_invalid_tx.py',
'feature_assumevalid.py',
'example_test.py',