Merge pull request #3531 from PastaPastaPasta/backports-0.17-pr6

Backports 0.17 pr6
This commit is contained in:
UdjinM6 2020-06-27 14:49:35 +03:00 committed by GitHub
commit 908c04f1e3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
25 changed files with 191 additions and 173 deletions

View File

@ -7,12 +7,26 @@
# Check for new lines in diff that introduce trailing whitespace. # Check for new lines in diff that introduce trailing whitespace.
# We can't run this check unless we know the commit range for the PR. # We can't run this check unless we know the commit range for the PR.
while getopts "?" opt; do
case $opt in
?)
echo "Usage: .lint-whitespace.sh [N]"
echo " COMMIT_RANGE='<commit range>' .lint-whitespace.sh"
echo " .lint-whitespace.sh -?"
echo "Checks unstaged changes, the previous N commits, or a commit range."
echo "COMMIT_RANGE='47ba2c3...ee50c9e' .lint-whitespace.sh"
exit 0
;;
esac
done
if [ -z "${COMMIT_RANGE}" ]; then if [ -z "${COMMIT_RANGE}" ]; then
echo "Cannot run lint-whitespace.sh without commit range. To run locally, use:" if [ "$1" ]; then
echo "COMMIT_RANGE='<commit range>' .lint-whitespace.sh" COMMIT_RANGE="HEAD~$1...HEAD"
echo "For example:" else
echo "COMMIT_RANGE='47ba2c3...ee50c9e' .lint-whitespace.sh" COMMIT_RANGE="HEAD"
exit 1 fi
fi fi
showdiff() { showdiff() {
@ -37,21 +51,26 @@ if showdiff | grep -E -q '^\+.*\s+$'; then
echo "The following changes were suspected:" echo "The following changes were suspected:"
FILENAME="" FILENAME=""
SEEN=0 SEEN=0
SEENLN=0
while read -r line; do while read -r line; do
if [[ "$line" =~ ^diff ]]; then if [[ "$line" =~ ^diff ]]; then
FILENAME="$line" FILENAME="$line"
SEEN=0 SEEN=0
elif [[ "$line" =~ ^@@ ]]; then elif [[ "$line" =~ ^@@ ]]; then
LINENUMBER="$line" LINENUMBER="$line"
SEENLN=0
else else
if [ "$SEEN" -eq 0 ]; then if [ "$SEEN" -eq 0 ]; then
# The first time a file is seen with trailing whitespace, we print the # The first time a file is seen with trailing whitespace, we print the
# filename (preceded by a newline). # filename (preceded by a newline).
echo echo
echo "$FILENAME" echo "$FILENAME"
echo "$LINENUMBER"
SEEN=1 SEEN=1
fi fi
if [ "$SEENLN" -eq 0 ]; then
echo "$LINENUMBER"
SEENLN=1
fi
echo "$line" echo "$line"
fi fi
done < <(showdiff | grep -E '^(diff --git |@@|\+.*\s+$)') done < <(showdiff | grep -E '^(diff --git |@@|\+.*\s+$)')
@ -59,29 +78,34 @@ if showdiff | grep -E -q '^\+.*\s+$'; then
fi fi
# Check if tab characters were found in the diff. # Check if tab characters were found in the diff.
if showcodediff | grep -P -q '^\+.*\t'; then if showcodediff | perl -nle '$MATCH++ if m{^\+.*\t}; END{exit 1 unless $MATCH>0}' > /dev/null; then
echo "This diff appears to have added new lines with tab characters instead of spaces." echo "This diff appears to have added new lines with tab characters instead of spaces."
echo "The following changes were suspected:" echo "The following changes were suspected:"
FILENAME="" FILENAME=""
SEEN=0 SEEN=0
SEENLN=0
while read -r line; do while read -r line; do
if [[ "$line" =~ ^diff ]]; then if [[ "$line" =~ ^diff ]]; then
FILENAME="$line" FILENAME="$line"
SEEN=0 SEEN=0
elif [[ "$line" =~ ^@@ ]]; then elif [[ "$line" =~ ^@@ ]]; then
LINENUMBER="$line" LINENUMBER="$line"
SEENLN=0
else else
if [ "$SEEN" -eq 0 ]; then if [ "$SEEN" -eq 0 ]; then
# The first time a file is seen with a tab character, we print the # The first time a file is seen with a tab character, we print the
# filename (preceded by a newline). # filename (preceded by a newline).
echo echo
echo "$FILENAME" echo "$FILENAME"
echo "$LINENUMBER"
SEEN=1 SEEN=1
fi fi
if [ "$SEENLN" -eq 0 ]; then
echo "$LINENUMBER"
SEENLN=1
fi
echo "$line" echo "$line"
fi fi
done < <(showcodediff | grep -P '^(diff --git |@@|\+.*\t)') done < <(showcodediff | perl -nle 'print if m{^(diff --git |@@|\+.*\t)}')
RET=1 RET=1
fi fi

View File

@ -1,14 +1,9 @@
package=native_biplist package=native_biplist
$(package)_version=0.9 $(package)_version=1.0.3
$(package)_download_path=https://pypi.python.org/packages/source/b/biplist $(package)_download_path=https://bitbucket.org/wooster/biplist/downloads
$(package)_file_name=biplist-$($(package)_version).tar.gz $(package)_file_name=biplist-$($(package)_version).tar.gz
$(package)_sha256_hash=b57cadfd26e4754efdf89e9e37de87885f9b5c847b2615688ca04adfaf6ca604 $(package)_sha256_hash=4c0549764c5fe50b28042ec21aa2e14fe1a2224e239a1dae77d9e7f3932aa4c6
$(package)_install_libdir=$(build_prefix)/lib/python/dist-packages $(package)_install_libdir=$(build_prefix)/lib/python/dist-packages
$(package)_patches=sorted_list.patch
define $(package)_preprocess_cmds
patch -p1 < $($(package)_patch_dir)/sorted_list.patch
endef
define $(package)_build_cmds define $(package)_build_cmds
python3 setup.py build python3 setup.py build

View File

@ -1,29 +0,0 @@
--- a/biplist/__init__.py 2014-10-26 19:03:11.000000000 +0000
+++ b/biplist/__init__.py 2016-07-19 19:30:17.663521999 +0000
@@ -541,7 +541,7 @@
return HashableWrapper(n)
elif isinstance(root, dict):
n = {}
- for key, value in iteritems(root):
+ for key, value in sorted(iteritems(root)):
n[self.wrapRoot(key)] = self.wrapRoot(value)
return HashableWrapper(n)
elif isinstance(root, list):
@@ -616,7 +616,7 @@
elif isinstance(obj, dict):
size = proc_size(len(obj))
self.incrementByteCount('dictBytes', incr=1+size)
- for key, value in iteritems(obj):
+ for key, value in sorted(iteritems(obj)):
check_key(key)
self.computeOffsets(key, asReference=True)
self.computeOffsets(value, asReference=True)
@@ -714,7 +714,7 @@
keys = []
values = []
objectsToWrite = []
- for key, value in iteritems(obj):
+ for key, value in sorted(iteritems(obj)):
keys.append(key)
values.append(value)
for key in keys:

View File

@ -28,4 +28,4 @@ BIPs that are implemented by Bitcoin Core (up-to-date up to **v0.13.0**):
* [`BIP 130`](https://github.com/bitcoin/bips/blob/master/bip-0130.mediawiki): direct headers announcement is negotiated with peer versions `>=70012` as of **v0.12.0** ([PR 6494](https://github.com/bitcoin/bitcoin/pull/6494)). * [`BIP 130`](https://github.com/bitcoin/bips/blob/master/bip-0130.mediawiki): direct headers announcement is negotiated with peer versions `>=70012` as of **v0.12.0** ([PR 6494](https://github.com/bitcoin/bitcoin/pull/6494)).
* [`BIP 147`](https://github.com/bitcoin/bips/blob/master/bip-0147.mediawiki): NULLDUMMY softfork as of **v0.13.1** ([PR 8636](https://github.com/bitcoin/bitcoin/pull/8636) and [PR 8937](https://github.com/bitcoin/bitcoin/pull/8937)). * [`BIP 147`](https://github.com/bitcoin/bips/blob/master/bip-0147.mediawiki): NULLDUMMY softfork as of **v0.13.1** ([PR 8636](https://github.com/bitcoin/bitcoin/pull/8636) and [PR 8937](https://github.com/bitcoin/bitcoin/pull/8937)).
* [`BIP 152`](https://github.com/bitcoin/bips/blob/master/bip-0152.mediawiki): Compact block transfer and related optimizations are used as of **v0.13.0** ([PR 8068](https://github.com/bitcoin/bitcoin/pull/8068)). * [`BIP 152`](https://github.com/bitcoin/bips/blob/master/bip-0152.mediawiki): Compact block transfer and related optimizations are used as of **v0.13.0** ([PR 8068](https://github.com/bitcoin/bitcoin/pull/8068)).
* [`BIP 159`](https://github.com/bitcoin/bips/blob/master/bip-0159.mediawiki): NODE_NETWORK_LIMITED service bit [signaling only] is supported as of **v0.16.0** ([PR 10740](https://github.com/bitcoin/bitcoin/pull/10740)). * [`BIP 159`](https://github.com/bitcoin/bips/blob/master/bip-0159.mediawiki): NODE_NETWORK_LIMITED service bit [signaling only] is supported as of **v0.16.0** ([PR 11740](https://github.com/bitcoin/bitcoin/pull/11740)).

View File

@ -89,7 +89,9 @@ Create the OS X SDK tarball, see the [OS X readme](README_osx.md) for details, a
### Optional: Seed the Gitian sources cache and offline git repositories ### Optional: Seed the Gitian sources cache and offline git repositories
By default, Gitian will fetch source files as needed. To cache them ahead of time: NOTE: Gitian is sometimes unable to download files. If you have errors, try the step below.
By default, Gitian will fetch source files as needed. To cache them ahead of time, make sure you have checked out the tag you want to build in dash, then:
pushd ./gitian-builder pushd ./gitian-builder
make -C ../dash/depends download SOURCES_PATH=`pwd`/cache/common make -C ../dash/depends download SOURCES_PATH=`pwd`/cache/common

View File

@ -26,6 +26,10 @@ benchmark::BenchRunner::RunAll(benchmark::duration elapsedTimeForOne)
if (std::ratio_less_equal<benchmark::clock::period, std::micro>::value) { if (std::ratio_less_equal<benchmark::clock::period, std::micro>::value) {
std::cerr << "WARNING: Clock precision is worse than microsecond - benchmarks may be less accurate!\n"; std::cerr << "WARNING: Clock precision is worse than microsecond - benchmarks may be less accurate!\n";
} }
#ifdef DEBUG
std::cerr << "WARNING: This is a debug build - may result in slower benchmarks.\n";
#endif
std::cout << "#Benchmark" << "," << "count" << "," << "min(ns)" << "," << "max(ns)" << "," << "average(ns)" << "," std::cout << "#Benchmark" << "," << "count" << "," << "min(ns)" << "," << "max(ns)" << "," << "average(ns)" << ","
<< "min_cycles" << "," << "max_cycles" << "," << "average_cycles" << "\n"; << "min_cycles" << "," << "max_cycles" << "," << "average_cycles" << "\n";

View File

@ -219,14 +219,10 @@ void HandleError(const leveldb::Status& status)
{ {
if (status.ok()) if (status.ok())
return; return;
LogPrintf("%s\n", status.ToString()); const std::string errmsg = "Fatal LevelDB error: " + status.ToString();
if (status.IsCorruption()) LogPrintf("%s\n", errmsg);
throw dbwrapper_error("Database corrupted"); LogPrintf("You can use -debug=leveldb to get more complete diagnostic messages\n");
if (status.IsIOError()) throw dbwrapper_error(errmsg);
throw dbwrapper_error("Database I/O error");
if (status.IsNotFound())
throw dbwrapper_error("Database entry missing");
throw dbwrapper_error("Unknown database error");
} }
const std::vector<unsigned char>& GetObfuscateKey(const CDBWrapper &w) const std::vector<unsigned char>& GetObfuscateKey(const CDBWrapper &w)

View File

@ -1017,7 +1017,13 @@ void InitLogging()
fLogIPs = gArgs.GetBoolArg("-logips", DEFAULT_LOGIPS); fLogIPs = gArgs.GetBoolArg("-logips", DEFAULT_LOGIPS);
LogPrintf("\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"); LogPrintf("\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n");
LogPrintf("Dash Core version %s\n", FormatFullVersion()); std::string version_string = FormatFullVersion();
#ifdef DEBUG
version_string += " (debug build)";
#else
version_string += " (release build)";
#endif
LogPrintf(PACKAGE_NAME " version %s\n", version_string);
} }
namespace { // Variables internal to initialization process only namespace { // Variables internal to initialization process only

View File

@ -170,7 +170,7 @@ CPrivKey CKey::GetPrivKey() const {
size_t privkeylen; size_t privkeylen;
privkey.resize(PRIVATE_KEY_SIZE); privkey.resize(PRIVATE_KEY_SIZE);
privkeylen = PRIVATE_KEY_SIZE; privkeylen = PRIVATE_KEY_SIZE;
ret = ec_privkey_export_der(secp256k1_context_sign, (unsigned char*) privkey.data(), &privkeylen, begin(), fCompressed ? SECP256K1_EC_COMPRESSED : SECP256K1_EC_UNCOMPRESSED); ret = ec_privkey_export_der(secp256k1_context_sign, privkey.data(), &privkeylen, begin(), fCompressed ? SECP256K1_EC_COMPRESSED : SECP256K1_EC_UNCOMPRESSED);
assert(ret); assert(ret);
privkey.resize(privkeylen); privkey.resize(privkeylen);
return privkey; return privkey;
@ -199,7 +199,7 @@ bool CKey::Sign(const uint256 &hash, std::vector<unsigned char>& vchSig, uint32_
secp256k1_ecdsa_signature sig; secp256k1_ecdsa_signature sig;
int ret = secp256k1_ecdsa_sign(secp256k1_context_sign, &sig, hash.begin(), begin(), secp256k1_nonce_function_rfc6979, test_case ? extra_entropy : nullptr); int ret = secp256k1_ecdsa_sign(secp256k1_context_sign, &sig, hash.begin(), begin(), secp256k1_nonce_function_rfc6979, test_case ? extra_entropy : nullptr);
assert(ret); assert(ret);
secp256k1_ecdsa_signature_serialize_der(secp256k1_context_sign, (unsigned char*)vchSig.data(), &nSigLen, &sig); secp256k1_ecdsa_signature_serialize_der(secp256k1_context_sign, vchSig.data(), &nSigLen, &sig);
vchSig.resize(nSigLen); vchSig.resize(nSigLen);
return true; return true;
} }
@ -226,7 +226,7 @@ bool CKey::SignCompact(const uint256 &hash, std::vector<unsigned char>& vchSig)
secp256k1_ecdsa_recoverable_signature sig; secp256k1_ecdsa_recoverable_signature sig;
int ret = secp256k1_ecdsa_sign_recoverable(secp256k1_context_sign, &sig, hash.begin(), begin(), secp256k1_nonce_function_rfc6979, nullptr); int ret = secp256k1_ecdsa_sign_recoverable(secp256k1_context_sign, &sig, hash.begin(), begin(), secp256k1_nonce_function_rfc6979, nullptr);
assert(ret); assert(ret);
secp256k1_ecdsa_recoverable_signature_serialize_compact(secp256k1_context_sign, (unsigned char*)&vchSig[1], &rec, &sig); secp256k1_ecdsa_recoverable_signature_serialize_compact(secp256k1_context_sign, &vchSig[1], &rec, &sig);
assert(ret); assert(ret);
assert(rec != -1); assert(rec != -1);
vchSig[0] = 27 + rec + (fCompressed ? 4 : 0); vchSig[0] = 27 + rec + (fCompressed ? 4 : 0);

View File

@ -255,7 +255,7 @@ QString TransactionDesc::toHTML(CWallet *wallet, CWalletTx &wtx, TransactionReco
if (wtx.mapValue.count("comment") && !wtx.mapValue["comment"].empty()) if (wtx.mapValue.count("comment") && !wtx.mapValue["comment"].empty())
strHTML += "<br><b>" + tr("Comment") + ":</b><br>" + GUIUtil::HtmlEscape(wtx.mapValue["comment"], true) + "<br>"; strHTML += "<br><b>" + tr("Comment") + ":</b><br>" + GUIUtil::HtmlEscape(wtx.mapValue["comment"], true) + "<br>";
strHTML += "<b>" + tr("Transaction ID") + ":</b> " + rec->getTxID() + "<br>"; strHTML += "<b>" + tr("Transaction ID") + ":</b> " + rec->getTxHash() + "<br>";
strHTML += "<b>" + tr("Output index") + ":</b> " + QString::number(rec->getOutputIndex()) + "<br>"; strHTML += "<b>" + tr("Output index") + ":</b> " + QString::number(rec->getOutputIndex()) + "<br>";
strHTML += "<b>" + tr("Transaction total size") + ":</b> " + QString::number(wtx.tx->GetTotalSize()) + " bytes<br>"; strHTML += "<b>" + tr("Transaction total size") + ":</b> " + QString::number(wtx.tx->GetTotalSize()) + " bytes<br>";

View File

@ -17,10 +17,9 @@ TransactionDescDialog::TransactionDescDialog(const QModelIndex &idx, QWidget *pa
ui(new Ui::TransactionDescDialog) ui(new Ui::TransactionDescDialog)
{ {
ui->setupUi(this); ui->setupUi(this);
setWindowTitle(tr("Details for %1").arg(idx.data(TransactionTableModel::TxIDRole).toString())); setWindowTitle(tr("Details for %1").arg(idx.data(TransactionTableModel::TxHashRole).toString()));
/* Open CSS when configured */ /* Open CSS when configured */
this->setStyleSheet(GUIUtil::loadStyleSheet()); this->setStyleSheet(GUIUtil::loadStyleSheet());
QString desc = idx.data(TransactionTableModel::LongDescriptionRole).toString(); QString desc = idx.data(TransactionTableModel::LongDescriptionRole).toString();
ui->detailText->setHtml(desc); ui->detailText->setHtml(desc);
} }

View File

@ -32,36 +32,39 @@ bool TransactionFilterProxy::filterAcceptsRow(int sourceRow, const QModelIndex &
{ {
QModelIndex index = sourceModel()->index(sourceRow, 0, sourceParent); QModelIndex index = sourceModel()->index(sourceRow, 0, sourceParent);
int type = index.data(TransactionTableModel::TypeRole).toInt();
qint64 datetime = index.data(TransactionTableModel::DateRoleInt).toLongLong();
bool involvesWatchAddress = index.data(TransactionTableModel::WatchonlyRole).toBool();
bool lockedByInstantSend = index.data(TransactionTableModel::InstantSendRole).toBool();
QString address = index.data(TransactionTableModel::AddressRole).toString();
QString label = index.data(TransactionTableModel::LabelRole).toString();
QString txid = index.data(TransactionTableModel::TxIDRole).toString();
qint64 amount = llabs(index.data(TransactionTableModel::AmountRole).toLongLong());
int status = index.data(TransactionTableModel::StatusRole).toInt(); int status = index.data(TransactionTableModel::StatusRole).toInt();
if (!showInactive && status == TransactionStatus::Conflicted)
return false;
if(!showInactive && status == TransactionStatus::Conflicted) int type = index.data(TransactionTableModel::TypeRole).toInt();
return false; if (!(TYPE(type) & typeFilter))
if(!(TYPE(type) & typeFilter))
return false; return false;
bool involvesWatchAddress = index.data(TransactionTableModel::WatchonlyRole).toBool();
if (involvesWatchAddress && watchOnlyFilter == WatchOnlyFilter_No) if (involvesWatchAddress && watchOnlyFilter == WatchOnlyFilter_No)
return false; return false;
if (!involvesWatchAddress && watchOnlyFilter == WatchOnlyFilter_Yes) if (!involvesWatchAddress && watchOnlyFilter == WatchOnlyFilter_Yes)
return false; return false;
bool lockedByInstantSend = index.data(TransactionTableModel::InstantSendRole).toBool();
if (lockedByInstantSend && instantsendFilter == InstantSendFilter_No) if (lockedByInstantSend && instantsendFilter == InstantSendFilter_No)
return false; return false;
if (!lockedByInstantSend && instantsendFilter == InstantSendFilter_Yes) if (!lockedByInstantSend && instantsendFilter == InstantSendFilter_Yes)
return false; return false;
if(datetime < dateFrom || datetime > dateTo) qint64 datetime = index.data(TransactionTableModel::DateRoleInt).toLongLong();
if (datetime < dateFrom || datetime > dateTo)
return false; return false;
QString address = index.data(TransactionTableModel::AddressRole).toString();
QString label = index.data(TransactionTableModel::LabelRole).toString();
QString txid = index.data(TransactionTableModel::TxHashRole).toString();
if (!address.contains(m_search_string, Qt::CaseInsensitive) && if (!address.contains(m_search_string, Qt::CaseInsensitive) &&
! label.contains(m_search_string, Qt::CaseInsensitive) && ! label.contains(m_search_string, Qt::CaseInsensitive) &&
! txid.contains(m_search_string, Qt::CaseInsensitive)) { ! txid.contains(m_search_string, Qt::CaseInsensitive)) {
return false; return false;
} }
if(amount < minAmount)
qint64 amount = llabs(index.data(TransactionTableModel::AmountRole).toLongLong());
if (amount < minAmount)
return false; return false;
return true; return true;

View File

@ -380,7 +380,7 @@ bool TransactionRecord::statusUpdateNeeded(int chainLockHeight) const
|| (!status.lockedByChainLocks && status.cachedChainLockHeight != chainLockHeight); || (!status.lockedByChainLocks && status.cachedChainLockHeight != chainLockHeight);
} }
QString TransactionRecord::getTxID() const QString TransactionRecord::getTxHash() const
{ {
return QString::fromStdString(hash.ToString()); return QString::fromStdString(hash.ToString());
} }

View File

@ -151,7 +151,7 @@ public:
bool involvesWatchAddress; bool involvesWatchAddress;
/** Return the unique identifier for this transaction (part) */ /** Return the unique identifier for this transaction (part) */
QString getTxID() const; QString getTxHash() const;
/** Return the output index of the subtransaction */ /** Return the output index of the subtransaction */
int getOutputIndex() const; int getOutputIndex() const;

View File

@ -689,10 +689,8 @@ QVariant TransactionTableModel::data(const QModelIndex &index, int role) const
return rec->status.label; return rec->status.label;
case AmountRole: case AmountRole:
return qint64(rec->credit + rec->debit); return qint64(rec->credit + rec->debit);
case TxIDRole:
return rec->getTxID();
case TxHashRole: case TxHashRole:
return QString::fromStdString(rec->hash.ToString()); return rec->getTxHash();
case TxHexRole: case TxHexRole:
return priv->getTxHex(rec); return priv->getTxHex(rec);
case TxPlainTextRole: case TxPlainTextRole:

View File

@ -63,8 +63,6 @@ public:
LabelRole, LabelRole,
/** Net amount of transaction */ /** Net amount of transaction */
AmountRole, AmountRole,
/** Unique identifier */
TxIDRole,
/** Transaction hash */ /** Transaction hash */
TxHashRole, TxHashRole,
/** Transaction data, hex-encoded */ /** Transaction data, hex-encoded */

View File

@ -423,7 +423,7 @@ void TransactionView::exportClicked()
writer.addColumn(tr("Label"), 0, TransactionTableModel::LabelRole); writer.addColumn(tr("Label"), 0, TransactionTableModel::LabelRole);
writer.addColumn(tr("Address"), 0, TransactionTableModel::AddressRole); writer.addColumn(tr("Address"), 0, TransactionTableModel::AddressRole);
writer.addColumn(BitcoinUnits::getAmountColumnTitle(model->getOptionsModel()->getDisplayUnit()), 0, TransactionTableModel::FormattedAmountRole); writer.addColumn(BitcoinUnits::getAmountColumnTitle(model->getOptionsModel()->getDisplayUnit()), 0, TransactionTableModel::FormattedAmountRole);
writer.addColumn(tr("ID"), 0, TransactionTableModel::TxIDRole); writer.addColumn(tr("ID"), 0, TransactionTableModel::TxHashRole);
if(!writer.write()) { if(!writer.write()) {
Q_EMIT message(tr("Exporting Failed"), tr("There was an error trying to save the transaction history to %1.").arg(filename), Q_EMIT message(tr("Exporting Failed"), tr("There was an error trying to save the transaction history to %1.").arg(filename),
@ -490,7 +490,7 @@ void TransactionView::copyAmount()
void TransactionView::copyTxID() void TransactionView::copyTxID()
{ {
GUIUtil::copyEntryData(transactionView, 0, TransactionTableModel::TxIDRole); GUIUtil::copyEntryData(transactionView, 0, TransactionTableModel::TxHashRole);
} }
void TransactionView::copyTxHex() void TransactionView::copyTxHex()

View File

@ -27,6 +27,7 @@
#include <util.h> #include <util.h>
#include <utilstrencodings.h> #include <utilstrencodings.h>
#include <hash.h> #include <hash.h>
#include <validationinterface.h>
#include <warnings.h> #include <warnings.h>
#include <evo/specialtx.h> #include <evo/specialtx.h>
@ -381,6 +382,21 @@ UniValue waitforblockheight(const JSONRPCRequest& request)
return ret; return ret;
} }
UniValue syncwithvalidationinterfacequeue(const JSONRPCRequest& request)
{
if (request.fHelp || request.params.size() > 0) {
throw std::runtime_error(
"syncwithvalidationinterfacequeue\n"
"\nWaits for the validation interface queue to catch up on everything that was there when we entered this function.\n"
"\nExamples:\n"
+ HelpExampleCli("syncwithvalidationinterfacequeue","")
+ HelpExampleRpc("syncwithvalidationinterfacequeue","")
);
}
SyncWithValidationInterfaceQueue();
return NullUniValue;
}
UniValue getdifficulty(const JSONRPCRequest& request) UniValue getdifficulty(const JSONRPCRequest& request)
{ {
if (request.fHelp || request.params.size() != 0) if (request.fHelp || request.params.size() != 0)
@ -2296,6 +2312,7 @@ static const CRPCCommand commands[] =
{ "hidden", "waitfornewblock", &waitfornewblock, {"timeout"} }, { "hidden", "waitfornewblock", &waitfornewblock, {"timeout"} },
{ "hidden", "waitforblock", &waitforblock, {"blockhash","timeout"} }, { "hidden", "waitforblock", &waitforblock, {"blockhash","timeout"} },
{ "hidden", "waitforblockheight", &waitforblockheight, {"height","timeout"} }, { "hidden", "waitforblockheight", &waitforblockheight, {"height","timeout"} },
{ "hidden", "syncwithvalidationinterfacequeue", &syncwithvalidationinterfacequeue, {} },
}; };
void RegisterBlockchainRPCCommands(CRPCTable &t) void RegisterBlockchainRPCCommands(CRPCTable &t)

View File

@ -829,7 +829,13 @@ UniValue dumpwallet(const JSONRPCRequest& request)
"Note that if your wallet contains keys which are not derived from your HD seed (e.g. imported keys), these are not covered by\n" "Note that if your wallet contains keys which are not derived from your HD seed (e.g. imported keys), these are not covered by\n"
"only backing up the seed itself, and must be backed up too (e.g. ensure you back up the whole dumpfile).\n" "only backing up the seed itself, and must be backed up too (e.g. ensure you back up the whole dumpfile).\n"
"\nArguments:\n" "\nArguments:\n"
"1. \"filename\" (string, required) The filename\n" "1. \"filename\" (string, required) The filename with path (either absolute or relative to dashd)\n"
"\nResult:\n"
"{ (json object)\n"
" \"keys\" : { (int) The number of keys contained in the wallet dump\n"
" \"filename\" : { (string) The filename with full absolute path\n"
" \"warning\" : { (string) A warning about not sharing the wallet dump with anyone\n"
"}\n"
"\nExamples:\n" "\nExamples:\n"
+ HelpExampleCli("dumpwallet", "\"test\"") + HelpExampleCli("dumpwallet", "\"test\"")
+ HelpExampleRpc("dumpwallet", "\"test\"") + HelpExampleRpc("dumpwallet", "\"test\"")
@ -969,7 +975,7 @@ UniValue dumpwallet(const JSONRPCRequest& request)
std::string strWarning = strprintf(_("%s file contains all private keys from this wallet. Do not share it with anyone!"), request.params[0].get_str().c_str()); std::string strWarning = strprintf(_("%s file contains all private keys from this wallet. Do not share it with anyone!"), request.params[0].get_str().c_str());
obj.pushKV("keys", int(vKeyBirth.size())); obj.pushKV("keys", int(vKeyBirth.size()));
obj.pushKV("file", request.params[0].get_str().c_str()); obj.pushKV("filename", filepath.string());
obj.pushKV("warning", strWarning); obj.pushKV("warning", strWarning);
return obj; return obj;

View File

@ -66,16 +66,19 @@ class MempoolPersistTest(BitcoinTestFramework):
self.log.debug("Stop-start the nodes. Verify that node0 has the transactions in its mempool and node1 does not. Verify that node2 calculates its balance correctly after loading wallet transactions.") self.log.debug("Stop-start the nodes. Verify that node0 has the transactions in its mempool and node1 does not. Verify that node2 calculates its balance correctly after loading wallet transactions.")
self.stop_nodes() self.stop_nodes()
# Give this node a head-start, so we can be "extra-sure" that it didn't load anything later
# Also don't store the mempool, to keep the datadir clean
self.start_node(1, extra_args=["-persistmempool=0"])
self.start_node(0) self.start_node(0)
self.start_node(1)
self.start_node(2) self.start_node(2)
# Give dashd a second to reload the mempool # Give dashd a second to reload the mempool
time.sleep(1) wait_until(lambda: len(self.nodes[0].getrawmempool()) == 5, timeout=1)
wait_until(lambda: len(self.nodes[0].getrawmempool()) == 5) wait_until(lambda: len(self.nodes[2].getrawmempool()) == 5, timeout=1)
wait_until(lambda: len(self.nodes[2].getrawmempool()) == 5) # The others have loaded their mempool. If node_1 loaded anything, we'd probably notice by now:
assert_equal(len(self.nodes[1].getrawmempool()), 0) assert_equal(len(self.nodes[1].getrawmempool()), 0)
# Verify accounting of mempool transactions after restart is correct # Verify accounting of mempool transactions after restart is correct
self.nodes[2].syncwithvalidationinterfacequeue() # Flush mempool to wallet
assert_equal(node2_balance, self.nodes[2].getbalance()) assert_equal(node2_balance, self.nodes[2].getbalance())
self.log.debug("Stop-start node0 with -persistmempool=0. Verify that it doesn't load its mempool.dat file.") self.log.debug("Stop-start node0 with -persistmempool=0. Verify that it doesn't load its mempool.dat file.")

View File

@ -7,25 +7,24 @@
Generate chains with block versions that appear to be signalling unknown Generate chains with block versions that appear to be signalling unknown
soft-forks, and test that warning alerts are generated. soft-forks, and test that warning alerts are generated.
""" """
import os
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import re import re
from test_framework.blocktools import create_block, create_coinbase
VB_PERIOD = 144 # versionbits period length for regtest from test_framework.blocktools import create_block, create_coinbase
VB_THRESHOLD = 108 # versionbits activation threshold for regtest from test_framework.messages import msg_block
from test_framework.mininode import P2PInterface, network_thread_start, mininode_lock
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import wait_until
VB_PERIOD = 144 # versionbits period length for regtest
VB_THRESHOLD = 108 # versionbits activation threshold for regtest
VB_TOP_BITS = 0x20000000 VB_TOP_BITS = 0x20000000
VB_UNKNOWN_BIT = 27 # Choose a bit unassigned to any deployment VB_UNKNOWN_BIT = 27 # Choose a bit unassigned to any deployment
VB_UNKNOWN_VERSION = VB_TOP_BITS | (1 << VB_UNKNOWN_BIT)
WARN_UNKNOWN_RULES_MINED = "Unknown block versions being mined! It's possible unknown rules are in effect" WARN_UNKNOWN_RULES_MINED = "Unknown block versions being mined! It's possible unknown rules are in effect"
WARN_UNKNOWN_RULES_ACTIVE = "unknown new rules activated (versionbit {})".format(VB_UNKNOWN_BIT) WARN_UNKNOWN_RULES_ACTIVE = "unknown new rules activated (versionbit {})".format(VB_UNKNOWN_BIT)
VB_PATTERN = re.compile("^Warning.*versionbit") VB_PATTERN = re.compile("Warning: unknown new rules activated.*versionbit")
class TestNode(P2PInterface):
def on_inv(self, message):
pass
class VersionBitsWarningTest(BitcoinTestFramework): class VersionBitsWarningTest(BitcoinTestFramework):
def set_test_params(self): def set_test_params(self):
@ -35,21 +34,21 @@ class VersionBitsWarningTest(BitcoinTestFramework):
def setup_network(self): def setup_network(self):
self.alert_filename = os.path.join(self.options.tmpdir, "alert.txt") self.alert_filename = os.path.join(self.options.tmpdir, "alert.txt")
# Open and close to create zero-length file # Open and close to create zero-length file
with open(self.alert_filename, 'w', encoding='utf8') as _: with open(self.alert_filename, 'w', encoding='utf8'):
pass pass
self.extra_args = [["-alertnotify=echo %s >> \"" + self.alert_filename + "\""]] self.extra_args = [["-alertnotify=echo %s >> \"" + self.alert_filename + "\""]]
self.setup_nodes() self.setup_nodes()
# Send numblocks blocks via peer with nVersionToUse set. def send_blocks_with_version(self, peer, numblocks, version):
def send_blocks_with_version(self, peer, numblocks, nVersionToUse): """Send numblocks blocks to peer with version set"""
tip = self.nodes[0].getbestblockhash() tip = self.nodes[0].getbestblockhash()
height = self.nodes[0].getblockcount() height = self.nodes[0].getblockcount()
block_time = self.nodes[0].getblockheader(tip)["time"]+1 block_time = self.nodes[0].getblockheader(tip)["time"] + 1
tip = int(tip, 16) tip = int(tip, 16)
for _ in range(numblocks): for _ in range(numblocks):
block = create_block(tip, create_coinbase(height+1), block_time) block = create_block(tip, create_coinbase(height + 1), block_time)
block.nVersion = nVersionToUse block.nVersion = version
block.solve() block.solve()
peer.send_message(msg_block(block)) peer.send_message(msg_block(block))
block_time += 1 block_time += 1
@ -57,70 +56,57 @@ class VersionBitsWarningTest(BitcoinTestFramework):
tip = block.sha256 tip = block.sha256
peer.sync_with_ping() peer.sync_with_ping()
def test_versionbits_in_alert_file(self): def versionbits_in_alert_file(self):
with open(self.alert_filename, 'r', encoding='utf8') as f: """Test that the versionbits warning has been written to the alert file."""
alert_text = f.read() alert_text = open(self.alert_filename, 'r', encoding='utf8').read()
assert(VB_PATTERN.match(alert_text)) return VB_PATTERN.search(alert_text) is not None
def run_test(self): def run_test(self):
# Setup the p2p connection and start up the network thread. # Handy alias
self.nodes[0].add_p2p_connection(TestNode()) node = self.nodes[0]
node.add_p2p_connection(P2PInterface())
network_thread_start() network_thread_start()
node.p2p.wait_for_verack()
# Test logic begins here # Mine one period worth of blocks
self.nodes[0].p2p.wait_for_verack() node.generate(VB_PERIOD)
# 1. Have the node mine one period worth of blocks self.log.info("Check that there is no warning if previous VB_BLOCKS have <VB_THRESHOLD blocks with unknown versionbits version.")
self.nodes[0].generate(VB_PERIOD) # Build one period of blocks with < VB_THRESHOLD blocks signaling some unknown bit
self.send_blocks_with_version(node.p2p, VB_THRESHOLD - 1, VB_UNKNOWN_VERSION)
node.generate(VB_PERIOD - VB_THRESHOLD + 1)
# 2. Now build one period of blocks on the tip, with < VB_THRESHOLD # Check that we're not getting any versionbit-related errors in get*info()
# blocks signaling some unknown bit. assert(not VB_PATTERN.match(node.getmininginfo()["warnings"]))
nVersion = VB_TOP_BITS | (1<<VB_UNKNOWN_BIT) assert(not VB_PATTERN.match(node.getnetworkinfo()["warnings"]))
self.send_blocks_with_version(self.nodes[0].p2p, VB_THRESHOLD-1, nVersion)
# Fill rest of period with regular version blocks self.log.info("Check that there is a warning if >50 blocks in the last 100 were an unknown version")
self.nodes[0].generate(VB_PERIOD - VB_THRESHOLD + 1) # Build one period of blocks with VB_THRESHOLD blocks signaling some unknown bit
# Check that we're not getting any versionbit-related errors in self.send_blocks_with_version(node.p2p, VB_THRESHOLD, VB_UNKNOWN_VERSION)
# get*info() node.generate(VB_PERIOD - VB_THRESHOLD)
assert(not VB_PATTERN.match(self.nodes[0].getmininginfo()["warnings"]))
assert(not VB_PATTERN.match(self.nodes[0].getnetworkinfo()["warnings"]))
# 3. Now build one period of blocks with >= VB_THRESHOLD blocks signaling # Check that get*info() shows the 51/100 unknown block version error.
# some unknown bit assert(WARN_UNKNOWN_RULES_MINED in node.getmininginfo()["warnings"])
self.send_blocks_with_version(self.nodes[0].p2p, VB_THRESHOLD, nVersion) assert(WARN_UNKNOWN_RULES_MINED in node.getnetworkinfo()["warnings"])
self.nodes[0].generate(VB_PERIOD - VB_THRESHOLD)
# Might not get a versionbits-related alert yet, as we should
# have gotten a different alert due to more than 51/100 blocks
# being of unexpected version.
# Check that get*info() shows some kind of error.
assert(WARN_UNKNOWN_RULES_MINED in self.nodes[0].getmininginfo()["warnings"])
assert(WARN_UNKNOWN_RULES_MINED in self.nodes[0].getnetworkinfo()["warnings"])
self.log.info("Check that there is a warning if previous VB_BLOCKS have >=VB_THRESHOLD blocks with unknown versionbits version.")
# Mine a period worth of expected blocks so the generic block-version warning # Mine a period worth of expected blocks so the generic block-version warning
# is cleared, and restart the node. This should move the versionbit state # is cleared. This will move the versionbit state to ACTIVE.
# to ACTIVE. node.generate(VB_PERIOD)
self.nodes[0].generate(VB_PERIOD)
self.stop_nodes()
# Empty out the alert file
with open(self.alert_filename, 'w', encoding='utf8') as _:
pass
self.start_nodes()
# TODO this is a workaround. We have to wait for IBD to finish before we generate a block, as otherwise there # Stop-start the node. This is required because dashd will only warn once about unknown versions or unknown rules activating.
# won't be any warning generated. This workaround must be removed when we backport https://github.com/bitcoin/bitcoin/pull/12264 self.restart_node(0)
self.nodes[0].generate(1)
time.sleep(5)
# Connecting one block should be enough to generate an error. # Generating one block guarantees that we'll get out of IBD
self.nodes[0].generate(1) node.generate(1)
assert(WARN_UNKNOWN_RULES_ACTIVE in self.nodes[0].getmininginfo()["warnings"]) wait_until(lambda: not node.getblockchaininfo()['initialblockdownload'], timeout=10, lock=mininode_lock)
assert(WARN_UNKNOWN_RULES_ACTIVE in self.nodes[0].getnetworkinfo()["warnings"]) # Generating one more block will be enough to generate an error.
self.stop_nodes() node.generate(1)
self.test_versionbits_in_alert_file() # Check that get*info() shows the versionbits unknown rules warning
assert(WARN_UNKNOWN_RULES_ACTIVE in node.getmininginfo()["warnings"])
# Test framework expects the node to still be running... assert(WARN_UNKNOWN_RULES_ACTIVE in node.getnetworkinfo()["warnings"])
self.start_nodes() # Check that the alert file shows the versionbits unknown rules warning
wait_until(lambda: self.versionbits_in_alert_file(), timeout=60)
if __name__ == '__main__': if __name__ == '__main__':
VersionBitsWarningTest().main() VersionBitsWarningTest().main()

View File

@ -9,7 +9,6 @@ Roughly based on http://voorloopnul.com/blog/a-python-netstat-in-less-than-100-l
import sys import sys
import socket import socket
import fcntl
import struct import struct
import array import array
import os import os
@ -90,6 +89,8 @@ def all_interfaces():
''' '''
Return all interfaces that are up Return all interfaces that are up
''' '''
import fcntl # Linux only, so only import when required
is_64bits = sys.maxsize > 2**32 is_64bits = sys.maxsize > 2**32
struct_size = 40 if is_64bits else 32 struct_size = 40 if is_64bits else 32
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)

View File

@ -437,7 +437,7 @@ def sync_chain(rpc_connections, *, wait=1, timeout=60):
timeout -= wait timeout -= wait
raise AssertionError("Chain sync failed: Best block hashes don't match") raise AssertionError("Chain sync failed: Best block hashes don't match")
def sync_mempools(rpc_connections, *, wait=1, timeout=60, wait_func=None): def sync_mempools(rpc_connections, *, wait=1, timeout=60, flush_scheduler=True, wait_func=None):
""" """
Wait until everybody has the same transactions in their memory Wait until everybody has the same transactions in their memory
pools pools
@ -449,6 +449,9 @@ def sync_mempools(rpc_connections, *, wait=1, timeout=60, wait_func=None):
if set(rpc_connections[i].getrawmempool()) == pool: if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match + 1 num_match = num_match + 1
if num_match == len(rpc_connections): if num_match == len(rpc_connections):
if flush_scheduler:
for r in rpc_connections:
r.syncwithvalidationinterfacequeue()
return return
if wait_func is not None: if wait_func is not None:
wait_func() wait_func()

View File

@ -7,7 +7,10 @@ import os
import sys import sys
from test_framework.test_framework import BitcoinTestFramework from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (assert_equal, assert_raises_rpc_error) from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
def read_dump(file_name, addrs, script_addrs, hd_master_addr_old): def read_dump(file_name, addrs, script_addrs, hd_master_addr_old):
@ -87,7 +90,8 @@ class WalletDumpTest(BitcoinTestFramework):
self.start_nodes() self.start_nodes()
def run_test (self): def run_test (self):
tmpdir = self.options.tmpdir wallet_unenc_dump = os.path.join(self.nodes[0].datadir, "wallet.unencrypted.dump")
wallet_enc_dump = os.path.join(self.nodes[0].datadir, "wallet.encrypted.dump")
# generate 20 addresses to compare against the dump # generate 20 addresses to compare against the dump
test_addr_count = 20 test_addr_count = 20
@ -104,10 +108,11 @@ class WalletDumpTest(BitcoinTestFramework):
script_addrs = [multisig_addr] script_addrs = [multisig_addr]
# dump unencrypted wallet # dump unencrypted wallet
self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.unencrypted.dump") result = self.nodes[0].dumpwallet(wallet_unenc_dump)
assert_equal(result['filename'], wallet_unenc_dump)
found_addr, found_script_addr, found_addr_chg, found_addr_rsv, hd_master_addr_unenc = \ found_addr, found_script_addr, found_addr_chg, found_addr_rsv, hd_master_addr_unenc = \
read_dump(tmpdir + "/node0/wallet.unencrypted.dump", addrs, script_addrs, None) read_dump(wallet_unenc_dump, addrs, script_addrs, None)
assert_equal(found_addr, test_addr_count) # all keys must be in the dump assert_equal(found_addr, test_addr_count) # all keys must be in the dump
# This is 1, not 2 because we aren't testing for witness scripts # This is 1, not 2 because we aren't testing for witness scripts
assert_equal(found_script_addr, 1) # all scripts must be in the dump assert_equal(found_script_addr, 1) # all scripts must be in the dump
@ -120,10 +125,10 @@ class WalletDumpTest(BitcoinTestFramework):
self.nodes[0].walletpassphrase('test', 30) self.nodes[0].walletpassphrase('test', 30)
# Should be a no-op: # Should be a no-op:
self.nodes[0].keypoolrefill() self.nodes[0].keypoolrefill()
self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.encrypted.dump") self.nodes[0].dumpwallet(wallet_enc_dump)
found_addr, found_script_addr, found_addr_chg, found_addr_rsv, _ = \ found_addr, found_script_addr, found_addr_chg, found_addr_rsv, _ = \
read_dump(tmpdir + "/node0/wallet.encrypted.dump", addrs, script_addrs, hd_master_addr_unenc) read_dump(wallet_enc_dump, addrs, script_addrs, hd_master_addr_unenc)
assert_equal(found_addr, test_addr_count) assert_equal(found_addr, test_addr_count)
# This is 1, not 2 because we aren't testing for witness scripts # This is 1, not 2 because we aren't testing for witness scripts
assert_equal(found_script_addr, 1) assert_equal(found_script_addr, 1)
@ -132,7 +137,7 @@ class WalletDumpTest(BitcoinTestFramework):
assert_equal(found_addr_rsv, 180) # keypool size assert_equal(found_addr_rsv, 180) # keypool size
# Overwriting should fail # Overwriting should fail
assert_raises_rpc_error(-8, "already exists", self.nodes[0].dumpwallet, tmpdir + "/node0/wallet.unencrypted.dump") assert_raises_rpc_error(-8, "already exists", lambda: self.nodes[0].dumpwallet(wallet_enc_dump))
# Restart node with new wallet, and test importwallet # Restart node with new wallet, and test importwallet
self.stop_node(0) self.stop_node(0)
@ -142,11 +147,11 @@ class WalletDumpTest(BitcoinTestFramework):
result = self.nodes[0].validateaddress(multisig_addr) result = self.nodes[0].validateaddress(multisig_addr)
assert(result['ismine'] == False) assert(result['ismine'] == False)
self.nodes[0].importwallet(os.path.abspath(tmpdir + "/node0/wallet.unencrypted.dump")) self.nodes[0].importwallet(wallet_unenc_dump)
# Now check IsMine is true # Now check IsMine is true
result = self.nodes[0].validateaddress(multisig_addr) result = self.nodes[0].validateaddress(multisig_addr)
assert(result['ismine'] == True) assert(result['ismine'] == True)
if __name__ == '__main__': if __name__ == '__main__':
WalletDumpTest().main () WalletDumpTest().main()

View File

@ -59,6 +59,7 @@ class ZapWalletTXesTest (BitcoinTestFramework):
self.start_node(0, ["-persistmempool=1", "-zapwallettxes=2"]) self.start_node(0, ["-persistmempool=1", "-zapwallettxes=2"])
wait_until(lambda: self.nodes[0].getmempoolinfo()['size'] == 1, timeout=3) wait_until(lambda: self.nodes[0].getmempoolinfo()['size'] == 1, timeout=3)
self.nodes[0].syncwithvalidationinterfacequeue() # Flush mempool to wallet
assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1) assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1)
assert_equal(self.nodes[0].gettransaction(txid2)['txid'], txid2) assert_equal(self.nodes[0].gettransaction(txid2)['txid'], txid2)