mirror of
https://github.com/dashpay/dash.git
synced 2024-12-25 12:02:48 +01:00
Merge pull request #3877 from PastaPastaPasta/backports-0.17-pr24
Backports 0.17 pr24
This commit is contained in:
commit
39ded9f7c1
@ -46,7 +46,6 @@ for folder in folders:
|
||||
file_path = os.path.join(absFolder, file)
|
||||
fileMetaMap = {'file' : file, 'osize': os.path.getsize(file_path), 'sha256Old' : file_hash(file_path)}
|
||||
fileMetaMap['contentHashPre'] = content_hash(file_path)
|
||||
|
||||
try:
|
||||
subprocess.call([pngcrush, "-brute", "-ow", "-rem", "gAMA", "-rem", "cHRM", "-rem", "iCCP", "-rem", "sRGB", "-rem", "alla", "-rem", "text", file_path],
|
||||
stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
|
||||
|
@ -731,7 +731,6 @@ if config.dmg is not None:
|
||||
hdiutil_args.append(str(value))
|
||||
|
||||
return run(hdiutil_args, universal_newlines=True)
|
||||
|
||||
if verbose >= 2:
|
||||
if fancy is None:
|
||||
print("+ Creating .dmg disk image +")
|
||||
|
@ -74,7 +74,6 @@ def gen_invalid_vector(template, corrupt_prefix, randomize_payload_size, corrupt
|
||||
prefix = os.urandom(1)
|
||||
else:
|
||||
prefix = bytearray(template[0])
|
||||
|
||||
if randomize_payload_size:
|
||||
payload = os.urandom(max(int(random.expovariate(0.5)), 50))
|
||||
else:
|
||||
|
@ -60,6 +60,11 @@ See detailed [set of changes](https://github.com/dashpay/dash/compare/v0.16.1.0.
|
||||
|
||||
- [`ccef3b4836`](https://github.com/dashpay/dash/commit/ccef3b48363d8bff4b919d9119355182e3902ef3) qt: Fix wallet encryption dialog (#3816)
|
||||
|
||||
Python Support
|
||||
--------------
|
||||
|
||||
Support for Python 2 has been discontinued for all test files and tools.
|
||||
|
||||
Credits
|
||||
=======
|
||||
|
||||
|
@ -221,7 +221,7 @@ private:
|
||||
// discriminate entries based on port. Should be false on mainnet/testnet and can be true on devnet/regtest
|
||||
bool discriminatePorts;
|
||||
|
||||
//! Holds addrs inserted into tried table that collide with existing entries. Test-before-evict discpline used to resolve these collisions.
|
||||
//! Holds addrs inserted into tried table that collide with existing entries. Test-before-evict discipline used to resolve these collisions.
|
||||
std::set<int> m_tried_collisions;
|
||||
|
||||
protected:
|
||||
|
@ -14,6 +14,24 @@
|
||||
|
||||
/** All alphanumeric characters except for "0", "I", "O", and "l" */
|
||||
static const char* pszBase58 = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz";
|
||||
static const int8_t mapBase58[256] = {
|
||||
-1,-1,-1,-1,-1,-1,-1,-1, -1,-1,-1,-1,-1,-1,-1,-1,
|
||||
-1,-1,-1,-1,-1,-1,-1,-1, -1,-1,-1,-1,-1,-1,-1,-1,
|
||||
-1,-1,-1,-1,-1,-1,-1,-1, -1,-1,-1,-1,-1,-1,-1,-1,
|
||||
-1, 0, 1, 2, 3, 4, 5, 6, 7, 8,-1,-1,-1,-1,-1,-1,
|
||||
-1, 9,10,11,12,13,14,15, 16,-1,17,18,19,20,21,-1,
|
||||
22,23,24,25,26,27,28,29, 30,31,32,-1,-1,-1,-1,-1,
|
||||
-1,33,34,35,36,37,38,39, 40,41,42,43,-1,44,45,46,
|
||||
47,48,49,50,51,52,53,54, 55,56,57,-1,-1,-1,-1,-1,
|
||||
-1,-1,-1,-1,-1,-1,-1,-1, -1,-1,-1,-1,-1,-1,-1,-1,
|
||||
-1,-1,-1,-1,-1,-1,-1,-1, -1,-1,-1,-1,-1,-1,-1,-1,
|
||||
-1,-1,-1,-1,-1,-1,-1,-1, -1,-1,-1,-1,-1,-1,-1,-1,
|
||||
-1,-1,-1,-1,-1,-1,-1,-1, -1,-1,-1,-1,-1,-1,-1,-1,
|
||||
-1,-1,-1,-1,-1,-1,-1,-1, -1,-1,-1,-1,-1,-1,-1,-1,
|
||||
-1,-1,-1,-1,-1,-1,-1,-1, -1,-1,-1,-1,-1,-1,-1,-1,
|
||||
-1,-1,-1,-1,-1,-1,-1,-1, -1,-1,-1,-1,-1,-1,-1,-1,
|
||||
-1,-1,-1,-1,-1,-1,-1,-1, -1,-1,-1,-1,-1,-1,-1,-1,
|
||||
};
|
||||
|
||||
bool DecodeBase58(const char* psz, std::vector<unsigned char>& vch)
|
||||
{
|
||||
@ -31,13 +49,12 @@ bool DecodeBase58(const char* psz, std::vector<unsigned char>& vch)
|
||||
int size = strlen(psz) * 733 /1000 + 1; // log(58) / log(256), rounded up.
|
||||
std::vector<unsigned char> b256(size);
|
||||
// Process the characters.
|
||||
static_assert(sizeof(mapBase58)/sizeof(mapBase58[0]) == 256, "mapBase58.size() should be 256"); // guarantee not out of range
|
||||
while (*psz && !isspace(*psz)) {
|
||||
// Decode base58 character
|
||||
const char* ch = strchr(pszBase58, *psz);
|
||||
if (ch == nullptr)
|
||||
int carry = mapBase58[(uint8_t)*psz];
|
||||
if (carry == -1) // Invalid b58 character
|
||||
return false;
|
||||
// Apply "b256 = b256 * 58 + ch".
|
||||
int carry = ch - pszBase58;
|
||||
int i = 0;
|
||||
for (std::vector<unsigned char>::reverse_iterator it = b256.rbegin(); (carry != 0 || i < length) && (it != b256.rend()); ++it, ++i) {
|
||||
carry += 58 * (*it);
|
||||
|
@ -106,7 +106,7 @@ public:
|
||||
bool ParameterInteraction() const override {return true;}
|
||||
void RegisterRPC(CRPCTable &) const override {}
|
||||
bool Verify() const override {return true;}
|
||||
bool Open() const override {return true;}
|
||||
bool Open() const override {LogPrintf("No wallet support compiled in!\n"); return true;}
|
||||
void Start(CScheduler& scheduler) const override {}
|
||||
void Flush() const override {}
|
||||
void Stop() const override {}
|
||||
@ -778,6 +778,7 @@ void ThreadImport(std::vector<fs::path> vImportFiles)
|
||||
{
|
||||
const CChainParams& chainparams = Params();
|
||||
RenameThread("dash-loadblk");
|
||||
ScheduleBatchPriority();
|
||||
|
||||
{
|
||||
CImportingNow imp;
|
||||
@ -1679,7 +1680,7 @@ bool AppInitMain()
|
||||
}
|
||||
|
||||
if (!fLogTimestamps)
|
||||
LogPrintf("Startup time: %s\n", DateTimeStrFormat("%Y-%m-%d %H:%M:%S", GetTime()));
|
||||
LogPrintf("Startup time: %s\n", FormatISO8601DateTime(GetTime()));
|
||||
LogPrintf("Default data directory %s\n", GetDefaultDataDir().string());
|
||||
LogPrintf("Using data directory %s\n", GetDataDir().string());
|
||||
LogPrintf("Using config file %s\n", GetConfigFile(gArgs.GetArg("-conf", BITCOIN_CONF_FILENAME)).string());
|
||||
|
@ -238,12 +238,14 @@ static std::string LogTimestampStr(const std::string &str, std::atomic_bool *fSt
|
||||
|
||||
if (*fStartedNewLine) {
|
||||
int64_t nTimeMicros = GetTimeMicros();
|
||||
strStamped = DateTimeStrFormat("%Y-%m-%d %H:%M:%S", nTimeMicros/1000000);
|
||||
if (fLogTimeMicros)
|
||||
strStamped += strprintf(".%06d", nTimeMicros%1000000);
|
||||
strStamped = FormatISO8601DateTime(nTimeMicros/1000000);
|
||||
if (fLogTimeMicros) {
|
||||
strStamped.pop_back();
|
||||
strStamped += strprintf(".%06dZ", nTimeMicros%1000000);
|
||||
}
|
||||
int64_t mocktime = GetMockTime();
|
||||
if (mocktime) {
|
||||
strStamped += " (mocktime: " + DateTimeStrFormat("%Y-%m-%d %H:%M:%S", mocktime) + ")";
|
||||
strStamped += " (mocktime: " + FormatISO8601DateTime(mocktime) + ")";
|
||||
}
|
||||
strStamped += ' ' + str;
|
||||
} else
|
||||
|
@ -211,6 +211,7 @@ public Q_SLOTS:
|
||||
void shutdownResult();
|
||||
/// Handle runaway exceptions. Shows a message box with the problem and quits the program.
|
||||
void handleRunawayException(const QString &message);
|
||||
void addWallet(WalletModel* walletModel);
|
||||
|
||||
Q_SIGNALS:
|
||||
void requestedInitialize();
|
||||
@ -229,6 +230,7 @@ private:
|
||||
#ifdef ENABLE_WALLET
|
||||
PaymentServer* paymentServer;
|
||||
std::vector<WalletModel*> m_wallet_models;
|
||||
std::unique_ptr<interfaces::Handler> m_handler_load_wallet;
|
||||
#endif
|
||||
int returnValue;
|
||||
std::unique_ptr<QWidget> shutdownWindow;
|
||||
@ -441,6 +443,22 @@ void BitcoinApplication::requestShutdown()
|
||||
Q_EMIT requestedShutdown();
|
||||
}
|
||||
|
||||
void BitcoinApplication::addWallet(WalletModel* walletModel)
|
||||
{
|
||||
#ifdef ENABLE_WALLET
|
||||
window->addWallet(walletModel);
|
||||
|
||||
if (m_wallet_models.empty()) {
|
||||
window->setCurrentWallet(walletModel->getWalletName());
|
||||
}
|
||||
|
||||
connect(walletModel, SIGNAL(coinsSent(WalletModel*, SendCoinsRecipient, QByteArray)),
|
||||
paymentServer, SLOT(fetchPaymentACK(WalletModel*, const SendCoinsRecipient&, QByteArray)));
|
||||
|
||||
m_wallet_models.push_back(walletModel);
|
||||
#endif
|
||||
}
|
||||
|
||||
void BitcoinApplication::initializeResult(bool success)
|
||||
{
|
||||
qDebug() << __func__ << ": Initialization result: " << success;
|
||||
@ -459,21 +477,13 @@ void BitcoinApplication::initializeResult(bool success)
|
||||
window->setClientModel(clientModel);
|
||||
|
||||
#ifdef ENABLE_WALLET
|
||||
bool fFirstWallet = true;
|
||||
auto wallets = m_node.getWallets();
|
||||
for (auto& wallet : wallets) {
|
||||
WalletModel * const walletModel = new WalletModel(std::move(wallet), m_node, optionsModel);
|
||||
m_handler_load_wallet = m_node.handleLoadWallet([this](std::unique_ptr<interfaces::Wallet> wallet) {
|
||||
QMetaObject::invokeMethod(this, "addWallet", Qt::QueuedConnection,
|
||||
Q_ARG(WalletModel*, new WalletModel(std::move(wallet), m_node, optionsModel)));
|
||||
});
|
||||
|
||||
window->addWallet(walletModel);
|
||||
if (fFirstWallet) {
|
||||
window->setCurrentWallet(walletModel->getWalletName());
|
||||
fFirstWallet = false;
|
||||
}
|
||||
|
||||
connect(walletModel, SIGNAL(coinsSent(WalletModel*,SendCoinsRecipient,QByteArray)),
|
||||
paymentServer, SLOT(fetchPaymentACK(WalletModel*,const SendCoinsRecipient&,QByteArray)));
|
||||
|
||||
m_wallet_models.push_back(walletModel);
|
||||
for (auto& wallet : m_node.getWallets()) {
|
||||
addWallet(new WalletModel(std::move(wallet), m_node, optionsModel));
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -565,6 +575,9 @@ int main(int argc, char *argv[])
|
||||
// IMPORTANT if it is no longer a typedef use the normal variant above
|
||||
qRegisterMetaType< CAmount >("CAmount");
|
||||
qRegisterMetaType< std::function<void(void)> >("std::function<void(void)>");
|
||||
#ifdef ENABLE_WALLET
|
||||
qRegisterMetaType<WalletModel*>("WalletModel*");
|
||||
#endif
|
||||
|
||||
/// 3. Application identification
|
||||
// must be set before OptionsModel is initialized or translations are loaded,
|
||||
|
@ -290,15 +290,6 @@ void setupAddressWidget(QValidatedLineEdit *widget, QWidget *parent, bool fAllow
|
||||
widget->setCheckValidator(new BitcoinAddressCheckValidator(parent));
|
||||
}
|
||||
|
||||
void setupAmountWidget(QLineEdit *widget, QWidget *parent)
|
||||
{
|
||||
QDoubleValidator *amountValidator = new QDoubleValidator(parent);
|
||||
amountValidator->setDecimals(8);
|
||||
amountValidator->setBottom(0.0);
|
||||
widget->setValidator(amountValidator);
|
||||
widget->setAlignment(Qt::AlignRight|Qt::AlignVCenter);
|
||||
}
|
||||
|
||||
void setupAppearance(QWidget* parent, OptionsModel* model)
|
||||
{
|
||||
if (!QSettings().value("fAppearanceSetupDone", false).toBool()) {
|
||||
@ -423,14 +414,6 @@ bool parseBitcoinURI(const QUrl &uri, SendCoinsRecipient *out)
|
||||
|
||||
bool parseBitcoinURI(QString uri, SendCoinsRecipient *out)
|
||||
{
|
||||
// Convert dash:// to dash:
|
||||
//
|
||||
// Cannot handle this later, because dash:// will cause Qt to see the part after // as host,
|
||||
// which will lower-case it (and thus invalidate the address).
|
||||
if(uri.startsWith("dash://", Qt::CaseInsensitive))
|
||||
{
|
||||
uri.replace(0, 7, "dash:");
|
||||
}
|
||||
QUrl uriInstance(uri);
|
||||
return parseBitcoinURI(uriInstance, out);
|
||||
}
|
||||
|
@ -108,9 +108,8 @@ namespace GUIUtil
|
||||
QString dateTimeStr(const QDateTime &datetime);
|
||||
QString dateTimeStr(qint64 nTime);
|
||||
|
||||
// Set up widgets for address and amounts
|
||||
// Set up widget for address
|
||||
void setupAddressWidget(QValidatedLineEdit *widget, QWidget *parent, bool fAllowURI = false);
|
||||
void setupAmountWidget(QLineEdit *widget, QWidget *parent);
|
||||
|
||||
// Setup appearance settings if not done yet
|
||||
void setupAppearance(QWidget* parent, OptionsModel* model);
|
||||
|
@ -396,7 +396,12 @@ void PaymentServer::handleURIOrFile(const QString& s)
|
||||
return;
|
||||
}
|
||||
|
||||
if (s.startsWith(BITCOIN_IPC_PREFIX, Qt::CaseInsensitive)) // dash: URI
|
||||
if (s.startsWith("dash://", Qt::CaseInsensitive))
|
||||
{
|
||||
Q_EMIT message(tr("URI handling"), tr("'dash://' is not a valid URI. Use 'dash:' instead."),
|
||||
CClientUIInterface::MSG_ERROR);
|
||||
}
|
||||
else if (s.startsWith(BITCOIN_IPC_PREFIX, Qt::CaseInsensitive)) // dash: URI
|
||||
{
|
||||
QUrlQuery uri((QUrl(s)));
|
||||
if (uri.hasQueryItem("r")) // payment request URI
|
||||
@ -755,7 +760,7 @@ bool PaymentServer::verifyExpired(const payments::PaymentDetails& requestDetails
|
||||
{
|
||||
bool fVerified = (requestDetails.has_expires() && (int64_t)requestDetails.expires() < GetTime());
|
||||
if (fVerified) {
|
||||
const QString requestExpires = QString::fromStdString(DateTimeStrFormat("%Y-%m-%d %H:%M:%S", (int64_t)requestDetails.expires()));
|
||||
const QString requestExpires = QString::fromStdString(FormatISO8601DateTime((int64_t)requestDetails.expires()));
|
||||
qWarning() << QString("PaymentServer::%1: Payment request expired \"%2\".")
|
||||
.arg(__func__)
|
||||
.arg(requestExpires);
|
||||
|
@ -309,7 +309,7 @@ bool RPCConsole::RPCParseCommandLine(interfaces::Node* node, std::string &strRes
|
||||
std::string method = stack.back()[0];
|
||||
std::string uri;
|
||||
#ifdef ENABLE_WALLET
|
||||
if (walletID && !walletID->empty()) {
|
||||
if (walletID) {
|
||||
QByteArray encodedName = QUrl::toPercentEncoding(QString::fromStdString(*walletID));
|
||||
uri = "/wallet/"+std::string(encodedName.constData(), encodedName.length());
|
||||
}
|
||||
@ -422,7 +422,7 @@ void RPCExecutor::request(const QString &command, const QString &walletID)
|
||||
return;
|
||||
}
|
||||
std::string wallet_id = walletID.toStdString();
|
||||
if(!RPCConsole::RPCExecuteCommandLine(m_node, result, executableCommand, nullptr, &wallet_id))
|
||||
if (!RPCConsole::RPCExecuteCommandLine(m_node, result, executableCommand, nullptr, walletID.isNull() ? nullptr : &wallet_id))
|
||||
{
|
||||
Q_EMIT reply(RPCConsole::CMD_ERROR, QString("Parse error: unbalanced ' or \""));
|
||||
return;
|
||||
@ -1036,7 +1036,7 @@ void RPCConsole::on_lineEdit_returnPressed()
|
||||
}
|
||||
|
||||
if (m_last_wallet_id != walletID) {
|
||||
if (walletID.isEmpty()) {
|
||||
if (walletID.isNull()) {
|
||||
message(CMD_REQUEST, tr("Executing command without any wallet"));
|
||||
} else {
|
||||
message(CMD_REQUEST, tr("Executing command using \"%1\" wallet").arg(walletID));
|
||||
|
@ -51,7 +51,7 @@ void URITests::uriTests()
|
||||
QVERIFY(rv.address == QString("XwnLY9Tf7Zsef8gMGL2fhWA9ZmMjt4KPwg"));
|
||||
QVERIFY(rv.label == QString());
|
||||
|
||||
QVERIFY(GUIUtil::parseBitcoinURI("dash://XwnLY9Tf7Zsef8gMGL2fhWA9ZmMjt4KPwg?message=Some Example Address", &rv));
|
||||
QVERIFY(GUIUtil::parseBitcoinURI("dash:XwnLY9Tf7Zsef8gMGL2fhWA9ZmMjt4KPwg?message=Some Example Address", &rv));
|
||||
QVERIFY(rv.address == QString("XwnLY9Tf7Zsef8gMGL2fhWA9ZmMjt4KPwg"));
|
||||
QVERIFY(rv.label == QString());
|
||||
|
||||
|
@ -56,8 +56,13 @@ bool WalletFrame::addWallet(WalletModel *walletModel)
|
||||
walletView->setWalletModel(walletModel);
|
||||
walletView->showOutOfSyncWarning(bOutOfSync);
|
||||
|
||||
/* TODO we should goto the currently selected page once dynamically adding wallets is supported */
|
||||
walletView->gotoOverviewPage();
|
||||
WalletView* current_wallet_view = currentWalletView();
|
||||
if (current_wallet_view) {
|
||||
walletView->setCurrentIndex(current_wallet_view->currentIndex());
|
||||
} else {
|
||||
walletView->gotoOverviewPage();
|
||||
}
|
||||
|
||||
walletStack->addWidget(walletView);
|
||||
mapWalletViews[name] = walletView;
|
||||
|
||||
|
@ -410,16 +410,22 @@ UniValue getdifficulty(const JSONRPCRequest& request)
|
||||
std::string EntryDescriptionString()
|
||||
{
|
||||
return " \"size\" : n, (numeric) transaction size in bytes\n"
|
||||
" \"fee\" : n, (numeric) transaction fee in " + CURRENCY_UNIT + "\n"
|
||||
" \"modifiedfee\" : n, (numeric) transaction fee with fee deltas used for mining priority\n"
|
||||
" \"fee\" : n, (numeric) transaction fee in " + CURRENCY_UNIT + " (DEPRECATED)\n"
|
||||
" \"modifiedfee\" : n, (numeric) transaction fee with fee deltas used for mining priority (DEPRECATED)\n"
|
||||
" \"time\" : n, (numeric) local time transaction entered pool in seconds since 1 Jan 1970 GMT\n"
|
||||
" \"height\" : n, (numeric) block height when transaction entered pool\n"
|
||||
" \"descendantcount\" : n, (numeric) number of in-mempool descendant transactions (including this one)\n"
|
||||
" \"descendantsize\" : n, (numeric) size of in-mempool descendants (including this one)\n"
|
||||
" \"descendantfees\" : n, (numeric) modified fees (see above) of in-mempool descendants (including this one)\n"
|
||||
" \"descendantfees\" : n, (numeric) modified fees (see above) of in-mempool descendants (including this one) (DEPRECATED)\n"
|
||||
" \"ancestorcount\" : n, (numeric) number of in-mempool ancestor transactions (including this one)\n"
|
||||
" \"ancestorsize\" : n, (numeric) size of in-mempool ancestors (including this one)\n"
|
||||
" \"ancestorfees\" : n, (numeric) modified fees (see above) of in-mempool ancestors (including this one)\n"
|
||||
" \"ancestorfees\" : n, (numeric) modified fees (see above) of in-mempool ancestors (including this one) (DEPRECATED)\n"
|
||||
" \"fees\" : {\n"
|
||||
" \"base\" : n, (numeric) transaction fee in " + CURRENCY_UNIT + "\n"
|
||||
" \"modified\" : n, (numeric) transaction fee with fee deltas used for mining priority in " + CURRENCY_UNIT + "\n"
|
||||
" \"ancestor\" : n, (numeric) modified fees (see above) of in-mempool ancestors (including this one) in " + CURRENCY_UNIT + "\n"
|
||||
" \"descendent\" : n, (numeric) number of in-mempool ancestor transactions (including this one) in " + CURRENCY_UNIT + "\n"
|
||||
" }\n"
|
||||
" \"depends\" : [ (array) unconfirmed transactions used as inputs for this transaction\n"
|
||||
" \"transactionid\", (string) parent transaction id\n"
|
||||
" ... ],\n"
|
||||
@ -433,6 +439,13 @@ void entryToJSON(UniValue &info, const CTxMemPoolEntry &e) EXCLUSIVE_LOCKS_REQUI
|
||||
{
|
||||
AssertLockHeld(mempool.cs);
|
||||
|
||||
UniValue fees(UniValue::VOBJ);
|
||||
fees.pushKV("base", ValueFromAmount(e.GetFee()));
|
||||
fees.pushKV("modified", ValueFromAmount(e.GetModifiedFee()));
|
||||
fees.pushKV("ancestor", ValueFromAmount(e.GetModFeesWithAncestors()));
|
||||
fees.pushKV("descendant", ValueFromAmount(e.GetModFeesWithDescendants()));
|
||||
info.pushKV("fees", fees);
|
||||
|
||||
info.pushKV("size", (int)e.GetTxSize());
|
||||
info.pushKV("fee", ValueFromAmount(e.GetFee()));
|
||||
info.pushKV("modifiedfee", ValueFromAmount(e.GetModifiedFee()));
|
||||
|
@ -1047,18 +1047,18 @@ UniValue signrawtransaction(const JSONRPCRequest& request)
|
||||
new_request.params.push_back(request.params[1]);
|
||||
new_request.params.push_back(request.params[3]);
|
||||
return signrawtransactionwithkey(new_request);
|
||||
}
|
||||
// Otherwise sign with the wallet which does not take a privkeys parameter
|
||||
} else {
|
||||
#ifdef ENABLE_WALLET
|
||||
else {
|
||||
// Otherwise sign with the wallet which does not take a privkeys parameter
|
||||
new_request.params.push_back(request.params[0]);
|
||||
new_request.params.push_back(request.params[1]);
|
||||
new_request.params.push_back(request.params[3]);
|
||||
return signrawtransactionwithwallet(new_request);
|
||||
}
|
||||
#else
|
||||
// If we have made it this far, then wallet is disabled and no private keys were given, so fail here.
|
||||
throw JSONRPCError(RPC_INVALID_PARAMETER, "No private keys available.");
|
||||
#endif
|
||||
// If we have made it this far, then wallet is disabled and no private keys were given, so fail here.
|
||||
throw JSONRPCError(RPC_INVALID_PARAMETER, "No private keys available.");
|
||||
}
|
||||
}
|
||||
|
||||
UniValue sendrawtransaction(const JSONRPCRequest& request)
|
||||
|
@ -10,5 +10,7 @@
|
||||
["572e4794", "3EFU7m"],
|
||||
["ecac89cad93923c02321", "EJDM8drfXA6uyA"],
|
||||
["10c8511e", "Rt5zm"],
|
||||
["00000000000000000000", "1111111111"]
|
||||
["00000000000000000000", "1111111111"],
|
||||
["000111d38e5fc9071ffcd20b4a763cc9ae4f252bb4e48fd66a835e252ada93ff480d6dd43dc62a641155a5", "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz"],
|
||||
["000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f505152535455565758595a5b5c5d5e5f606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeafb0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfeff", "1cWB5HCBdLjAuqGGReWE3R3CguuwSjw6RHn39s2yuDRTS5NsBgNiFpWgAnEx6VQi8csexkgYw3mdYrMHr8x9i7aEwP8kZ7vccXWqKDvGv3u1GxFKPuAkn8JCPPGDMf3vMMnbzm6Nh9zh1gcNsMvH3ZNLmP5fSG6DGbbi2tuwMWPthr4boWwCxf7ewSgNQeacyozhKDDQQ1qL5fQFUW52QKUZDZ5fw3KXNQJMcNTcaB723LchjeKun7MuGW5qyCBZYzA1KjofN1gYBV3NqyhQJ3Ns746GNuf9N2pQPmHz4xpnSrrfCvy6TVVz5d4PdrjeshsWQwpZsZGzvbdAdN8MKV5QsBDY"]
|
||||
]
|
||||
|
@ -164,10 +164,27 @@ BOOST_AUTO_TEST_CASE(util_DateTimeStrFormat)
|
||||
BOOST_CHECK_EQUAL(DateTimeStrFormat("%Y-%m-%d %H:%M:%S", 0), "1970-01-01 00:00:00");
|
||||
BOOST_CHECK_EQUAL(DateTimeStrFormat("%Y-%m-%d %H:%M:%S", 0x7FFFFFFF), "2038-01-19 03:14:07");
|
||||
BOOST_CHECK_EQUAL(DateTimeStrFormat("%Y-%m-%d %H:%M:%S", 1317425777), "2011-09-30 23:36:17");
|
||||
BOOST_CHECK_EQUAL(DateTimeStrFormat("%Y-%m-%dT%H:%M:%SZ", 1317425777), "2011-09-30T23:36:17Z");
|
||||
BOOST_CHECK_EQUAL(DateTimeStrFormat("%H:%M:%SZ", 1317425777), "23:36:17Z");
|
||||
BOOST_CHECK_EQUAL(DateTimeStrFormat("%Y-%m-%d %H:%M", 1317425777), "2011-09-30 23:36");
|
||||
BOOST_CHECK_EQUAL(DateTimeStrFormat("%a, %d %b %Y %H:%M:%S +0000", 1317425777), "Fri, 30 Sep 2011 23:36:17 +0000");
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(util_FormatISO8601DateTime)
|
||||
{
|
||||
BOOST_CHECK_EQUAL(FormatISO8601DateTime(1317425777), "2011-09-30T23:36:17Z");
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(util_FormatISO8601Date)
|
||||
{
|
||||
BOOST_CHECK_EQUAL(FormatISO8601Date(1317425777), "2011-09-30");
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(util_FormatISO8601Time)
|
||||
{
|
||||
BOOST_CHECK_EQUAL(FormatISO8601Time(1317425777), "23:36:17Z");
|
||||
}
|
||||
|
||||
struct TestArgsManager : public ArgsManager
|
||||
{
|
||||
TestArgsManager() { m_network_only_args.clear(); }
|
||||
|
15
src/util.cpp
15
src/util.cpp
@ -35,6 +35,7 @@
|
||||
|
||||
#include <algorithm>
|
||||
#include <fcntl.h>
|
||||
#include <sched.h>
|
||||
#include <sys/resource.h>
|
||||
#include <sys/stat.h>
|
||||
|
||||
@ -1104,3 +1105,17 @@ fs::path AbsPathForConfigVal(const fs::path& path, bool net_specific)
|
||||
{
|
||||
return fs::absolute(path, GetDataDir(net_specific));
|
||||
}
|
||||
|
||||
int ScheduleBatchPriority(void)
|
||||
{
|
||||
#ifdef SCHED_BATCH
|
||||
const static sched_param param{0};
|
||||
if (int ret = pthread_setschedparam(pthread_self(), SCHED_BATCH, ¶m)) {
|
||||
LogPrintf("Failed to pthread_setschedparam: %s\n", strerror(errno));
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
#else
|
||||
return 1;
|
||||
#endif
|
||||
}
|
||||
|
@ -319,4 +319,13 @@ template <typename Callable> void TraceThread(const std::string name, Callable
|
||||
|
||||
std::string CopyrightHolders(const std::string& strPrefix, unsigned int nStartYear, unsigned int nEndYear);
|
||||
|
||||
/**
|
||||
* On platforms that support it, tell the kernel the calling thread is
|
||||
* CPU-intensive and non-interactive. See SCHED_BATCH in sched(7) for details.
|
||||
*
|
||||
* @return The return value of sched_setschedule(), or 1 on systems without
|
||||
* sched_setchedule().
|
||||
*/
|
||||
int ScheduleBatchPriority(void);
|
||||
|
||||
#endif // BITCOIN_UTIL_H
|
||||
|
@ -98,3 +98,15 @@ std::string DateTimeStrFormat(const char* pszFormat, int64_t nTime)
|
||||
ss << boost::posix_time::from_time_t(nTime);
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
std::string FormatISO8601DateTime(int64_t nTime) {
|
||||
return DateTimeStrFormat("%Y-%m-%dT%H:%M:%SZ", nTime);
|
||||
}
|
||||
|
||||
std::string FormatISO8601Date(int64_t nTime) {
|
||||
return DateTimeStrFormat("%Y-%m-%d", nTime);
|
||||
}
|
||||
|
||||
std::string FormatISO8601Time(int64_t nTime) {
|
||||
return DateTimeStrFormat("%H:%M:%SZ", nTime);
|
||||
}
|
||||
|
@ -34,6 +34,14 @@ void MilliSleep(int64_t n);
|
||||
template <typename T>
|
||||
T GetTime();
|
||||
|
||||
/**
|
||||
* ISO 8601 formatting is preferred. Use the FormatISO8601{DateTime,Date,Time}
|
||||
* helper functions if possible.
|
||||
*/
|
||||
std::string DateTimeStrFormat(const char* pszFormat, int64_t nTime);
|
||||
|
||||
std::string FormatISO8601DateTime(int64_t nTime);
|
||||
std::string FormatISO8601Date(int64_t nTime);
|
||||
std::string FormatISO8601Time(int64_t nTime);
|
||||
|
||||
#endif // BITCOIN_UTILTIME_H
|
||||
|
@ -1376,13 +1376,12 @@ void static InvalidChainFound(CBlockIndex* pindexNew)
|
||||
|
||||
LogPrintf("%s: invalid block=%s height=%d log2_work=%.8f date=%s\n", __func__,
|
||||
pindexNew->GetBlockHash().ToString(), pindexNew->nHeight,
|
||||
log(pindexNew->nChainWork.getdouble())/log(2.0), DateTimeStrFormat("%Y-%m-%d %H:%M:%S",
|
||||
pindexNew->GetBlockTime()));
|
||||
log(pindexNew->nChainWork.getdouble())/log(2.0), FormatISO8601DateTime(pindexNew->GetBlockTime()));
|
||||
CBlockIndex *tip = chainActive.Tip();
|
||||
assert (tip);
|
||||
LogPrintf("%s: current best=%s height=%d log2_work=%.8f date=%s\n", __func__,
|
||||
tip->GetBlockHash().ToString(), chainActive.Height(), log(tip->nChainWork.getdouble())/log(2.0),
|
||||
DateTimeStrFormat("%Y-%m-%d %H:%M:%S", tip->GetBlockTime()));
|
||||
FormatISO8601DateTime(tip->GetBlockTime()));
|
||||
CheckForkWarningConditions();
|
||||
}
|
||||
|
||||
@ -2675,7 +2674,7 @@ void static UpdateTip(const CBlockIndex *pindexNew, const CChainParams& chainPar
|
||||
std::string strMessage = strprintf("%s: new best=%s height=%d version=0x%08x log2_work=%.8f tx=%lu date='%s' progress=%f cache=%.1fMiB(%utxo)", __func__,
|
||||
pindexNew->GetBlockHash().ToString(), pindexNew->nHeight, pindexNew->nVersion,
|
||||
log(pindexNew->nChainWork.getdouble())/log(2.0), (unsigned long)pindexNew->nChainTx,
|
||||
DateTimeStrFormat("%Y-%m-%d %H:%M:%S", pindexNew->GetBlockTime()),
|
||||
FormatISO8601DateTime(pindexNew->GetBlockTime()),
|
||||
GuessVerificationProgress(chainParams.TxData(), pindexNew), pcoinsTip->DynamicMemoryUsage() * (1.0 / (1<<20)), pcoinsTip->GetCacheSize());
|
||||
strMessage += strprintf(" evodb_cache=%.1fMiB", evoDb->GetMemoryUsage() * (1.0 / (1<<20)));
|
||||
if (!warningMessages.empty())
|
||||
@ -4371,7 +4370,7 @@ bool LoadChainTip(const CChainParams& chainparams)
|
||||
|
||||
LogPrintf("Loaded best chain: hashBestChain=%s height=%d date=%s progress=%f\n",
|
||||
chainActive.Tip()->GetBlockHash().ToString(), chainActive.Height(),
|
||||
DateTimeStrFormat("%Y-%m-%d %H:%M:%S", chainActive.Tip()->GetBlockTime()),
|
||||
FormatISO8601DateTime(chainActive.Tip()->GetBlockTime()),
|
||||
GuessVerificationProgress(chainparams.TxData(), chainActive.Tip()));
|
||||
return true;
|
||||
}
|
||||
@ -5016,7 +5015,7 @@ void CChainState::CheckBlockIndex(const Consensus::Params& consensusParams)
|
||||
}
|
||||
|
||||
std::string CBlockFileInfo::ToString() const {
|
||||
return strprintf("CBlockFileInfo(blocks=%u, size=%u, heights=%u...%u, time=%s...%s)", nBlocks, nSize, nHeightFirst, nHeightLast, DateTimeStrFormat("%Y-%m-%d", nTimeFirst), DateTimeStrFormat("%Y-%m-%d", nTimeLast));
|
||||
return strprintf("CBlockFileInfo(blocks=%u, size=%u, heights=%u...%u, time=%s...%s)", nBlocks, nSize, nHeightFirst, nHeightLast, FormatISO8601Date(nTimeFirst), FormatISO8601Date(nTimeLast));
|
||||
}
|
||||
|
||||
CBlockFileInfo* GetBlockFileInfo(size_t n)
|
||||
|
@ -27,10 +27,6 @@
|
||||
#include <univalue.h>
|
||||
|
||||
|
||||
std::string static EncodeDumpTime(int64_t nTime) {
|
||||
return DateTimeStrFormat("%Y-%m-%dT%H:%M:%SZ", nTime);
|
||||
}
|
||||
|
||||
int64_t static DecodeDumpTime(const std::string &str) {
|
||||
static const boost::posix_time::ptime epoch = boost::posix_time::from_time_t(0);
|
||||
static const std::locale loc(std::locale::classic(),
|
||||
@ -908,16 +904,16 @@ UniValue dumpwallet(const JSONRPCRequest& request)
|
||||
|
||||
// produce output
|
||||
file << strprintf("# Wallet dump created by Dash Core %s\n", CLIENT_BUILD);
|
||||
file << strprintf("# * Created on %s\n", EncodeDumpTime(GetTime()));
|
||||
file << strprintf("# * Created on %s\n", FormatISO8601DateTime(GetTime()));
|
||||
file << strprintf("# * Best block at time of backup was %i (%s),\n", chainActive.Height(), chainActive.Tip()->GetBlockHash().ToString());
|
||||
file << strprintf("# mined on %s\n", EncodeDumpTime(chainActive.Tip()->GetBlockTime()));
|
||||
file << strprintf("# mined on %s\n", FormatISO8601DateTime(chainActive.Tip()->GetBlockTime()));
|
||||
file << "\n";
|
||||
|
||||
UniValue obj(UniValue::VOBJ);
|
||||
obj.pushKV("dashcoreversion", CLIENT_BUILD);
|
||||
obj.pushKV("lastblockheight", chainActive.Height());
|
||||
obj.pushKV("lastblockhash", chainActive.Tip()->GetBlockHash().ToString());
|
||||
obj.pushKV("lastblocktime", EncodeDumpTime(chainActive.Tip()->GetBlockTime()));
|
||||
obj.pushKV("lastblocktime", FormatISO8601DateTime(chainActive.Tip()->GetBlockTime()));
|
||||
|
||||
// add the base58check encoded extended master if the wallet uses HD
|
||||
CHDChain hdChainCurrent;
|
||||
@ -966,7 +962,7 @@ UniValue dumpwallet(const JSONRPCRequest& request)
|
||||
|
||||
for (std::vector<std::pair<int64_t, CKeyID> >::const_iterator it = vKeyBirth.begin(); it != vKeyBirth.end(); it++) {
|
||||
const CKeyID &keyid = it->second;
|
||||
std::string strTime = EncodeDumpTime(it->first);
|
||||
std::string strTime = FormatISO8601DateTime(it->first);
|
||||
std::string strAddr = EncodeDestination(keyid);
|
||||
CKey key;
|
||||
if (pwallet->GetKey(keyid, key)) {
|
||||
@ -989,7 +985,7 @@ UniValue dumpwallet(const JSONRPCRequest& request)
|
||||
// get birth times for scripts with metadata
|
||||
auto it = pwallet->m_script_metadata.find(scriptid);
|
||||
if (it != pwallet->m_script_metadata.end()) {
|
||||
create_time = EncodeDumpTime(it->second.nCreateTime);
|
||||
create_time = FormatISO8601DateTime(it->second.nCreateTime);
|
||||
}
|
||||
if(pwallet->GetCScript(scriptid, script)) {
|
||||
file << strprintf("%s %s script=1", HexStr(script.begin(), script.end()), create_time);
|
||||
|
@ -2962,6 +2962,53 @@ UniValue loadwallet(const JSONRPCRequest& request)
|
||||
return obj;
|
||||
}
|
||||
|
||||
UniValue createwallet(const JSONRPCRequest& request)
|
||||
{
|
||||
if (request.fHelp || request.params.size() != 1) {
|
||||
throw std::runtime_error(
|
||||
"createwallet \"wallet_name\"\n"
|
||||
"\nCreates and loads a new wallet.\n"
|
||||
"\nArguments:\n"
|
||||
"1. \"wallet_name\" (string, required) The name for the new wallet. If this is a path, the wallet will be created at the path location.\n"
|
||||
"\nResult:\n"
|
||||
"{\n"
|
||||
" \"name\" : <wallet_name>, (string) The wallet name if created successfully. If the wallet was created using a full path, the wallet_name will be the full path.\n"
|
||||
" \"warning\" : <warning>, (string) Warning message if wallet was not loaded cleanly.\n"
|
||||
"}\n"
|
||||
"\nExamples:\n"
|
||||
+ HelpExampleCli("createwallet", "\"testwallet\"")
|
||||
+ HelpExampleRpc("createwallet", "\"testwallet\"")
|
||||
);
|
||||
}
|
||||
std::string wallet_name = request.params[0].get_str();
|
||||
std::string error;
|
||||
std::string warning;
|
||||
|
||||
fs::path wallet_path = fs::absolute(wallet_name, GetWalletDir());
|
||||
if (fs::symlink_status(wallet_path).type() != fs::file_not_found) {
|
||||
throw JSONRPCError(RPC_WALLET_ERROR, "Wallet " + wallet_name + " already exists.");
|
||||
}
|
||||
|
||||
// Wallet::Verify will check if we're trying to create a wallet with a duplication name.
|
||||
if (!CWallet::Verify(wallet_name, false, error, warning)) {
|
||||
throw JSONRPCError(RPC_WALLET_ERROR, "Wallet file verification failed: " + error);
|
||||
}
|
||||
|
||||
CWallet* const wallet = CWallet::CreateWalletFromFile(wallet_name, fs::absolute(wallet_name, GetWalletDir()));
|
||||
if (!wallet) {
|
||||
throw JSONRPCError(RPC_WALLET_ERROR, "Wallet creation failed.");
|
||||
}
|
||||
AddWallet(wallet);
|
||||
|
||||
wallet->postInitProcess();
|
||||
|
||||
UniValue obj(UniValue::VOBJ);
|
||||
obj.pushKV("name", wallet->GetName());
|
||||
obj.pushKV("warning", warning);
|
||||
|
||||
return obj;
|
||||
}
|
||||
|
||||
UniValue resendwallettransactions(const JSONRPCRequest& request)
|
||||
{
|
||||
CWallet * const pwallet = GetWalletForJSONRPCRequest(request);
|
||||
@ -3593,6 +3640,7 @@ static const CRPCCommand commands[] =
|
||||
{ "wallet", "abortrescan", &abortrescan, {} },
|
||||
{ "wallet", "addmultisigaddress", &addmultisigaddress, {"nrequired","keys","account"} },
|
||||
{ "wallet", "backupwallet", &backupwallet, {"destination"} },
|
||||
{ "wallet", "createwallet", &createwallet, {"wallet_name"} },
|
||||
{ "wallet", "dumpprivkey", &dumpprivkey, {"address"} },
|
||||
{ "wallet", "dumpwallet", &dumpwallet, {"filename"} },
|
||||
{ "wallet", "encryptwallet", &encryptwallet, {"passphrase"} },
|
||||
|
@ -5290,7 +5290,6 @@ CWallet* CWallet::CreateWalletFromFile(const std::string& name, const fs::path&
|
||||
if (!reserver.reserve()) {
|
||||
return error(_("Failed to rescan the wallet during initialization"));
|
||||
}
|
||||
uiInterface.LoadWallet(walletInstance); // TODO: move it up when backporting 13063
|
||||
walletInstance->ScanForWalletTransactions(pindexRescan, nullptr, reserver, true);
|
||||
}
|
||||
LogPrintf(" rescan %15dms\n", GetTimeMillis() - nStart);
|
||||
@ -5323,6 +5322,8 @@ CWallet* CWallet::CreateWalletFromFile(const std::string& name, const fs::path&
|
||||
}
|
||||
}
|
||||
|
||||
uiInterface.LoadWallet(walletInstance);
|
||||
|
||||
// Register with the validation interface. It's ok to do this after rescan since we're still holding cs_main.
|
||||
RegisterValidationInterface(temp_wallet.release());
|
||||
|
||||
|
@ -13,7 +13,7 @@ import re
|
||||
import sys
|
||||
|
||||
# Matches on the date format at the start of the log event
|
||||
TIMESTAMP_PATTERN = re.compile(r"^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{6}")
|
||||
TIMESTAMP_PATTERN = re.compile(r"^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{6}Z")
|
||||
|
||||
LogEvent = namedtuple('LogEvent', ['timestamp', 'source', 'event'])
|
||||
|
||||
|
@ -69,7 +69,7 @@ class AssumeValidTest(BitcoinTestFramework):
|
||||
def send_blocks_until_disconnected(self, p2p_conn):
|
||||
"""Keep sending blocks to the node until we're disconnected."""
|
||||
for i in range(len(self.blocks)):
|
||||
if p2p_conn.state != "connected":
|
||||
if not p2p_conn.is_connected:
|
||||
break
|
||||
try:
|
||||
p2p_conn.send_message(msg_block(self.blocks[i]))
|
||||
@ -78,7 +78,7 @@ class AssumeValidTest(BitcoinTestFramework):
|
||||
# backported
|
||||
#except IOError as e:
|
||||
except:
|
||||
#assert str(e) == 'Not connected, no pushbuf'
|
||||
#assert not p2p_conn.is_connected
|
||||
break
|
||||
|
||||
def assert_blockchain_height(self, node, height):
|
||||
|
@ -8,7 +8,7 @@ import os
|
||||
import re
|
||||
|
||||
from test_framework.test_framework import BitcoinTestFramework
|
||||
from test_framework.util import get_datadir_path
|
||||
|
||||
|
||||
class ConfArgsTest(BitcoinTestFramework):
|
||||
def set_test_params(self):
|
||||
@ -20,7 +20,7 @@ class ConfArgsTest(BitcoinTestFramework):
|
||||
# Remove the -datadir argument so it doesn't override the config file
|
||||
self.nodes[0].args = [arg for arg in self.nodes[0].args if not arg.startswith("-datadir")]
|
||||
|
||||
default_data_dir = get_datadir_path(self.options.tmpdir, 0)
|
||||
default_data_dir = self.nodes[0].datadir
|
||||
new_data_dir = os.path.join(default_data_dir, 'newdatadir')
|
||||
new_data_dir_2 = os.path.join(default_data_dir, 'newdatadir2')
|
||||
|
||||
|
@ -97,7 +97,7 @@ def split_inputs(from_node, txins, txouts, initial_split=False):
|
||||
txouts.append({"txid": txid, "vout": 0, "amount": half_change})
|
||||
txouts.append({"txid": txid, "vout": 1, "amount": rem_change})
|
||||
|
||||
def check_estimates(node, fees_seen, max_invalid):
|
||||
def check_estimates(node, fees_seen):
|
||||
"""Call estimatesmartfee and verify that the estimates meet certain invariants."""
|
||||
|
||||
delta = 1.0e-6 # account for rounding error
|
||||
@ -217,13 +217,13 @@ class EstimateFeeTest(BitcoinTestFramework):
|
||||
self.log.info("Creating transactions and mining them with a block size that can't keep up")
|
||||
# Create transactions and mine 10 small blocks with node 2, but create txs faster than we can mine
|
||||
self.transact_and_mine(10, self.nodes[2])
|
||||
check_estimates(self.nodes[1], self.fees_per_kb, 14)
|
||||
check_estimates(self.nodes[1], self.fees_per_kb)
|
||||
|
||||
self.log.info("Creating transactions and mining them at a block size that is just big enough")
|
||||
# Generate transactions while mining 10 more blocks, this time with node1
|
||||
# which mines blocks with capacity just above the rate that transactions are being created
|
||||
self.transact_and_mine(10, self.nodes[1])
|
||||
check_estimates(self.nodes[1], self.fees_per_kb, 2)
|
||||
check_estimates(self.nodes[1], self.fees_per_kb)
|
||||
|
||||
# Finish by mining a normal-sized block:
|
||||
while len(self.nodes[1].getrawmempool()) > 0:
|
||||
@ -231,7 +231,7 @@ class EstimateFeeTest(BitcoinTestFramework):
|
||||
|
||||
self.sync_blocks(self.nodes[0:3], wait=.1)
|
||||
self.log.info("Final estimates after emptying mempools")
|
||||
check_estimates(self.nodes[1], self.fees_per_kb, 2)
|
||||
check_estimates(self.nodes[1], self.fees_per_kb)
|
||||
|
||||
if __name__ == '__main__':
|
||||
EstimateFeeTest().main()
|
||||
|
@ -45,7 +45,7 @@ class PruneTest(BitcoinTestFramework):
|
||||
def setup_network(self):
|
||||
self.setup_nodes()
|
||||
|
||||
self.prunedir = self.options.tmpdir + "/node2/regtest/blocks/"
|
||||
self.prunedir = os.path.join(self.nodes[2].datadir, 'regtest', 'blocks', '')
|
||||
|
||||
connect_nodes(self.nodes[0], 1)
|
||||
connect_nodes(self.nodes[1], 2)
|
||||
|
@ -70,7 +70,10 @@ class MempoolPackagesTest(BitcoinTestFramework):
|
||||
assert_equal(mempool[x]['descendantcount'], descendant_count)
|
||||
descendant_fees += mempool[x]['fee']
|
||||
assert_equal(mempool[x]['modifiedfee'], mempool[x]['fee'])
|
||||
assert_equal(mempool[x]['fees']['base'], mempool[x]['fee'])
|
||||
assert_equal(mempool[x]['fees']['modified'], mempool[x]['modifiedfee'])
|
||||
assert_equal(mempool[x]['descendantfees'], descendant_fees * COIN)
|
||||
assert_equal(mempool[x]['fees']['descendant'], descendant_fees)
|
||||
descendant_size += mempool[x]['size']
|
||||
assert_equal(mempool[x]['descendantsize'], descendant_size)
|
||||
descendant_count += 1
|
||||
@ -132,6 +135,7 @@ class MempoolPackagesTest(BitcoinTestFramework):
|
||||
ancestor_fees = 0
|
||||
for x in chain:
|
||||
ancestor_fees += mempool[x]['fee']
|
||||
assert_equal(mempool[x]['fees']['ancestor'], ancestor_fees + Decimal('0.00001'))
|
||||
assert_equal(mempool[x]['ancestorfees'], ancestor_fees * COIN + 1000)
|
||||
|
||||
# Undo the prioritisetransaction for later tests
|
||||
@ -145,6 +149,7 @@ class MempoolPackagesTest(BitcoinTestFramework):
|
||||
descendant_fees = 0
|
||||
for x in reversed(chain):
|
||||
descendant_fees += mempool[x]['fee']
|
||||
assert_equal(mempool[x]['fees']['descendant'], descendant_fees + Decimal('0.00001'))
|
||||
assert_equal(mempool[x]['descendantfees'], descendant_fees * COIN + 1000)
|
||||
|
||||
# Adding one more transaction on to the chain should fail.
|
||||
@ -170,7 +175,9 @@ class MempoolPackagesTest(BitcoinTestFramework):
|
||||
descendant_fees += mempool[x]['fee']
|
||||
if (x == chain[-1]):
|
||||
assert_equal(mempool[x]['modifiedfee'], mempool[x]['fee']+satoshi_round(0.00002))
|
||||
assert_equal(mempool[x]['fees']['modified'], mempool[x]['fee']+satoshi_round(0.00002))
|
||||
assert_equal(mempool[x]['descendantfees'], descendant_fees * COIN + 2000)
|
||||
assert_equal(mempool[x]['fees']['descendant'], descendant_fees+satoshi_round(0.00002))
|
||||
|
||||
# TODO: check that node1's mempool is as expected
|
||||
|
||||
|
@ -93,8 +93,8 @@ class MempoolPersistTest(BitcoinTestFramework):
|
||||
self.start_node(0)
|
||||
wait_until(lambda: len(self.nodes[0].getrawmempool()) == 5)
|
||||
|
||||
mempooldat0 = os.path.join(self.options.tmpdir, 'node0', 'regtest', 'mempool.dat')
|
||||
mempooldat1 = os.path.join(self.options.tmpdir, 'node1', 'regtest', 'mempool.dat')
|
||||
mempooldat0 = os.path.join(self.nodes[0].datadir, 'regtest', 'mempool.dat')
|
||||
mempooldat1 = os.path.join(self.nodes[1].datadir, 'regtest', 'mempool.dat')
|
||||
self.log.debug("Remove the mempool.dat file. Verify that savemempool to disk via RPC re-creates it")
|
||||
os.remove(mempooldat0)
|
||||
self.nodes[0].savemempool()
|
||||
|
@ -83,7 +83,7 @@ class TestP2PConn(P2PInterface):
|
||||
This is used when we want to send a message into the node that we expect
|
||||
will get us disconnected, eg an invalid block."""
|
||||
self.send_message(message)
|
||||
wait_until(lambda: self.state != "connected", timeout=timeout, lock=mininode_lock)
|
||||
wait_until(lambda: not self.is_connected, timeout=timeout, lock=mininode_lock)
|
||||
|
||||
class CompactBlocksTest(BitcoinTestFramework):
|
||||
def set_test_params(self):
|
||||
|
@ -109,7 +109,7 @@ class P2PLeakTest(BitcoinTestFramework):
|
||||
time.sleep(5)
|
||||
|
||||
#This node should have been banned
|
||||
assert no_version_bannode.state != "connected"
|
||||
assert not no_version_bannode.is_connected
|
||||
|
||||
self.nodes[0].disconnect_p2ps()
|
||||
|
||||
|
@ -423,21 +423,18 @@ class SendHeadersTest(BitcoinTestFramework):
|
||||
inv_node.check_last_inv_announcement(inv=[tip])
|
||||
test_node.check_last_inv_announcement(inv=[tip])
|
||||
if i == 0:
|
||||
# Just get the data -- shouldn't cause headers announcements to resume
|
||||
self.log.debug("Just get the data -- shouldn't cause headers announcements to resume")
|
||||
test_node.send_get_data([tip])
|
||||
test_node.wait_for_block(tip)
|
||||
elif i == 1:
|
||||
# Send a getheaders message that shouldn't trigger headers announcements
|
||||
# to resume (best header sent will be too old)
|
||||
self.log.debug("Send a getheaders message that shouldn't trigger headers announcements to resume (best header sent will be too old)")
|
||||
test_node.send_get_headers(locator=[fork_point], hashstop=new_block_hashes[1])
|
||||
test_node.send_get_data([tip])
|
||||
test_node.wait_for_block(tip)
|
||||
elif i == 2:
|
||||
# This time, try sending either a getheaders to trigger resumption
|
||||
# of headers announcements, or mine a new block and inv it, also
|
||||
# triggering resumption of headers announcements.
|
||||
test_node.send_get_data([tip])
|
||||
test_node.wait_for_block(tip)
|
||||
self.log.debug("This time, try sending either a getheaders to trigger resumption of headers announcements, or mine a new block and inv it, also triggering resumption of headers announcements.")
|
||||
if j == 0:
|
||||
test_node.send_get_headers(locator=[tip], hashstop=0)
|
||||
test_node.sync_with_ping()
|
||||
|
@ -49,9 +49,9 @@ class TimeoutsTest(BitcoinTestFramework):
|
||||
|
||||
sleep(1)
|
||||
|
||||
assert no_verack_node.connected
|
||||
assert no_version_node.connected
|
||||
assert no_send_node.connected
|
||||
assert no_verack_node.is_connected
|
||||
assert no_version_node.is_connected
|
||||
assert no_send_node.is_connected
|
||||
|
||||
no_verack_node.send_message(msg_ping())
|
||||
no_version_node.send_message(msg_ping())
|
||||
@ -60,9 +60,9 @@ class TimeoutsTest(BitcoinTestFramework):
|
||||
|
||||
assert "version" in no_verack_node.last_message
|
||||
|
||||
assert no_verack_node.connected
|
||||
assert no_version_node.connected
|
||||
assert no_send_node.connected
|
||||
assert no_verack_node.is_connected
|
||||
assert no_version_node.is_connected
|
||||
assert no_send_node.is_connected
|
||||
|
||||
no_verack_node.send_message(msg_ping())
|
||||
no_version_node.send_message(msg_ping())
|
||||
@ -75,9 +75,9 @@ class TimeoutsTest(BitcoinTestFramework):
|
||||
|
||||
with self.nodes[0].assert_debug_log(expected_msgs=expected_timeout_logs):
|
||||
sleep(3 + 1) # Sleep one second more than peertimeout
|
||||
assert not no_verack_node.connected
|
||||
assert not no_version_node.connected
|
||||
assert not no_send_node.connected
|
||||
assert not no_verack_node.is_connected
|
||||
assert not no_version_node.is_connected
|
||||
assert not no_send_node.is_connected
|
||||
|
||||
if __name__ == '__main__':
|
||||
TimeoutsTest().main()
|
||||
|
@ -55,7 +55,7 @@ class RPCBindTest(BitcoinTestFramework):
|
||||
self.nodes[0].rpchost = None
|
||||
self.start_nodes([node_args])
|
||||
# connect to node through non-loopback interface
|
||||
node = get_rpc_proxy(rpc_url(get_datadir_path(self.options.tmpdir, 0), 0, "%s:%d" % (rpchost, rpcport)), 0, coveragedir=self.options.coveragedir)
|
||||
node = get_rpc_proxy(rpc_url(self.nodes[0].datadir, 0, "%s:%d" % (rpchost, rpcport)), 0, coveragedir=self.options.coveragedir)
|
||||
node.getnetworkinfo()
|
||||
self.stop_nodes()
|
||||
|
||||
|
@ -5,7 +5,11 @@
|
||||
"""Test multiple RPC users."""
|
||||
|
||||
from test_framework.test_framework import BitcoinTestFramework
|
||||
from test_framework.util import str_to_b64str, assert_equal
|
||||
from test_framework.util import (
|
||||
assert_equal,
|
||||
get_datadir_path,
|
||||
str_to_b64str,
|
||||
)
|
||||
|
||||
import os
|
||||
import http.client
|
||||
@ -15,7 +19,8 @@ from random import SystemRandom
|
||||
import string
|
||||
import configparser
|
||||
|
||||
class HTTPBasicsTest (BitcoinTestFramework):
|
||||
|
||||
class HTTPBasicsTest(BitcoinTestFramework):
|
||||
def set_test_params(self):
|
||||
self.num_nodes = 2
|
||||
|
||||
@ -36,11 +41,11 @@ class HTTPBasicsTest (BitcoinTestFramework):
|
||||
rpcauth3 = lines[1]
|
||||
self.password = lines[3]
|
||||
|
||||
with open(os.path.join(self.options.tmpdir+"/node0", "dash.conf"), 'a', encoding='utf8') as f:
|
||||
with open(os.path.join(get_datadir_path(self.options.tmpdir, 0), "dash.conf"), 'a', encoding='utf8') as f:
|
||||
f.write(rpcauth+"\n")
|
||||
f.write(rpcauth2+"\n")
|
||||
f.write(rpcauth3+"\n")
|
||||
with open(os.path.join(self.options.tmpdir+"/node1", "dash.conf"), 'a', encoding='utf8') as f:
|
||||
with open(os.path.join(get_datadir_path(self.options.tmpdir, 1), "dash.conf"), 'a', encoding='utf8') as f:
|
||||
f.write(rpcuser+"\n")
|
||||
f.write(rpcpassword+"\n")
|
||||
|
||||
|
@ -90,6 +90,12 @@ class P2PConnection(asyncore.dispatcher):
|
||||
def __init__(self):
|
||||
super().__init__(map=mininode_socket_map)
|
||||
|
||||
self._conn_open = False
|
||||
|
||||
@property
|
||||
def is_connected(self):
|
||||
return self._conn_open
|
||||
|
||||
def peer_connect(self, dstaddr, dstport, net="regtest", devnet_name=None):
|
||||
self.dstaddr = dstaddr
|
||||
self.dstport = dstport
|
||||
@ -97,7 +103,7 @@ class P2PConnection(asyncore.dispatcher):
|
||||
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
|
||||
self.sendbuf = b""
|
||||
self.recvbuf = b""
|
||||
self.state = "connecting"
|
||||
self._asyncore_pre_connection = True
|
||||
self.network = net
|
||||
self.devnet_name = devnet_name
|
||||
self.disconnect = False
|
||||
@ -111,22 +117,23 @@ class P2PConnection(asyncore.dispatcher):
|
||||
|
||||
def peer_disconnect(self):
|
||||
# Connection could have already been closed by other end.
|
||||
if self.state == "connected":
|
||||
self.disconnect_node()
|
||||
if self.is_connected:
|
||||
self.disconnect = True # Signal asyncore to disconnect
|
||||
|
||||
# Connection and disconnection methods
|
||||
|
||||
def handle_connect(self):
|
||||
"""asyncore callback when a connection is opened."""
|
||||
if self.state != "connected":
|
||||
if not self.is_connected:
|
||||
logger.debug("Connected & Listening: %s:%d" % (self.dstaddr, self.dstport))
|
||||
self.state = "connected"
|
||||
self._conn_open = True
|
||||
self._asyncore_pre_connection = False
|
||||
self.on_open()
|
||||
|
||||
def handle_close(self):
|
||||
"""asyncore callback when a connection is closed."""
|
||||
logger.debug("Closing connection to: %s:%d" % (self.dstaddr, self.dstport))
|
||||
self.state = "closed"
|
||||
self._conn_open = False
|
||||
self.recvbuf = b""
|
||||
self.sendbuf = b""
|
||||
try:
|
||||
@ -135,13 +142,6 @@ class P2PConnection(asyncore.dispatcher):
|
||||
pass
|
||||
self.on_close()
|
||||
|
||||
def disconnect_node(self):
|
||||
"""Disconnect the p2p connection.
|
||||
|
||||
Called by the test logic thread. Causes the p2p connection
|
||||
to be disconnected on the next iteration of the asyncore loop."""
|
||||
self.disconnect = True
|
||||
|
||||
# Socket read methods
|
||||
|
||||
def handle_read(self):
|
||||
@ -199,9 +199,8 @@ class P2PConnection(asyncore.dispatcher):
|
||||
def writable(self):
|
||||
"""asyncore method to determine whether the handle_write() callback should be called on the next loop."""
|
||||
with mininode_lock:
|
||||
pre_connection = self.state == "connecting"
|
||||
length = len(self.sendbuf)
|
||||
return (length > 0 or pre_connection)
|
||||
return length > 0 or self._asyncore_pre_connection
|
||||
|
||||
def handle_write(self):
|
||||
"""asyncore callback when data should be written to the socket."""
|
||||
@ -209,7 +208,7 @@ class P2PConnection(asyncore.dispatcher):
|
||||
# asyncore does not expose socket connection, only the first read/write
|
||||
# event, thus we must check connection manually here to know when we
|
||||
# actually connect
|
||||
if self.state == "connecting":
|
||||
if self._asyncore_pre_connection:
|
||||
self.handle_connect()
|
||||
if not self.writable():
|
||||
return
|
||||
@ -221,14 +220,29 @@ class P2PConnection(asyncore.dispatcher):
|
||||
return
|
||||
self.sendbuf = self.sendbuf[sent:]
|
||||
|
||||
def send_message(self, message, pushbuf=False):
|
||||
def send_message(self, message):
|
||||
"""Send a P2P message over the socket.
|
||||
|
||||
This method takes a P2P payload, builds the P2P header and adds
|
||||
the message to the send buffer to be sent over the socket."""
|
||||
if self.state != "connected" and not pushbuf:
|
||||
raise IOError('Not connected, no pushbuf')
|
||||
if not self.is_connected:
|
||||
raise IOError('Not connected')
|
||||
self._log_message("send", message)
|
||||
tmsg = self._build_message(message)
|
||||
with mininode_lock:
|
||||
if len(self.sendbuf) == 0:
|
||||
try:
|
||||
sent = self.send(tmsg)
|
||||
self.sendbuf = tmsg[sent:]
|
||||
except BlockingIOError:
|
||||
self.sendbuf = tmsg
|
||||
else:
|
||||
self.sendbuf += tmsg
|
||||
|
||||
# Class utility methods
|
||||
|
||||
def _build_message(self, message):
|
||||
"""Build a serialized P2P message"""
|
||||
command = message.command
|
||||
data = message.serialize()
|
||||
tmsg = MAGIC_BYTES[self.network]
|
||||
@ -239,17 +253,7 @@ class P2PConnection(asyncore.dispatcher):
|
||||
h = sha256(th)
|
||||
tmsg += h[:4]
|
||||
tmsg += data
|
||||
with mininode_lock:
|
||||
if (len(self.sendbuf) == 0 and not pushbuf):
|
||||
try:
|
||||
sent = self.send(tmsg)
|
||||
self.sendbuf = tmsg[sent:]
|
||||
except BlockingIOError:
|
||||
self.sendbuf = tmsg
|
||||
else:
|
||||
self.sendbuf += tmsg
|
||||
|
||||
# Class utility methods
|
||||
return tmsg
|
||||
|
||||
def _log_message(self, direction, msg):
|
||||
"""Logs a message being sent or received over the connection."""
|
||||
@ -299,7 +303,7 @@ class P2PInterface(P2PConnection):
|
||||
vt.addrFrom.port = 0
|
||||
if self.network == "devnet" and self.devnet_name is not None:
|
||||
vt.strSubVer = MY_SUBVERSION_DEVNET % self.devnet_name.encode()
|
||||
self.send_message(vt, True)
|
||||
self.sendbuf = self._build_message(vt) # Will be sent right after handle_connect
|
||||
|
||||
# Message receiving methods
|
||||
|
||||
@ -371,7 +375,7 @@ class P2PInterface(P2PConnection):
|
||||
# Connection helper methods
|
||||
|
||||
def wait_for_disconnect(self, timeout=60):
|
||||
test_function = lambda: self.state != "connected"
|
||||
test_function = lambda: not self.is_connected
|
||||
wait_until(test_function, timeout=timeout, lock=mininode_lock)
|
||||
# This is a hack. The related issues should be fixed by bitcoin 14119 and 14457.
|
||||
time.sleep(1)
|
||||
|
@ -279,7 +279,7 @@ class BitcoinTestFramework():
|
||||
assert_equal(len(binary), num_nodes)
|
||||
old_num_nodes = len(self.nodes)
|
||||
for i in range(num_nodes):
|
||||
self.nodes.append(TestNode(old_num_nodes + i, self.options.tmpdir, self.extra_args_from_options, rpchost=rpchost, timewait=timewait, bitcoind=binary[i], bitcoin_cli=self.options.bitcoincli, stderr=stderr, mocktime=self.mocktime, coverage_dir=self.options.coveragedir, extra_conf=extra_confs[i], extra_args=extra_args[i], use_cli=self.options.usecli))
|
||||
self.nodes.append(TestNode(old_num_nodes + i, get_datadir_path(self.options.tmpdir, old_num_nodes + i), self.extra_args_from_options, rpchost=rpchost, timewait=timewait, bitcoind=binary[i], bitcoin_cli=self.options.bitcoincli, stderr=stderr, mocktime=self.mocktime, coverage_dir=self.options.coveragedir, extra_conf=extra_confs[i], extra_args=extra_args[i], use_cli=self.options.usecli))
|
||||
|
||||
def start_node(self, i, *args, **kwargs):
|
||||
"""Start a dashd"""
|
||||
@ -405,7 +405,7 @@ class BitcoinTestFramework():
|
||||
ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper()
|
||||
ch.setLevel(ll)
|
||||
# Format logs the same as dashd's debug.log with microprecision (so log files can be concatenated and sorted)
|
||||
formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d000 %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
|
||||
formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d000Z %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%dT%H:%M:%S')
|
||||
formatter.converter = time.gmtime
|
||||
fh.setFormatter(formatter)
|
||||
ch.setFormatter(formatter)
|
||||
@ -450,7 +450,7 @@ class BitcoinTestFramework():
|
||||
args.append("-connect=127.0.0.1:" + str(p2p_port(0)))
|
||||
if extra_args is not None:
|
||||
args.extend(extra_args)
|
||||
self.nodes.append(TestNode(i, self.options.cachedir, extra_conf=["bind=127.0.0.1"], extra_args=[],extra_args_from_options=self.extra_args_from_options, rpchost=None, timewait=None, bitcoind=self.options.bitcoind, bitcoin_cli=self.options.bitcoincli, stderr=stderr, mocktime=self.mocktime, coverage_dir=None))
|
||||
self.nodes.append(TestNode(i, get_datadir_path(self.options.cachedir, i), extra_conf=["bind=127.0.0.1"], extra_args=[],extra_args_from_options=self.extra_args_from_options, rpchost=None, timewait=None, bitcoind=self.options.bitcoind, bitcoin_cli=self.options.bitcoincli, stderr=stderr, mocktime=self.mocktime, coverage_dir=None))
|
||||
self.nodes[i].args = args
|
||||
self.start_node(i)
|
||||
|
||||
|
@ -51,9 +51,9 @@ class TestNode():
|
||||
To make things easier for the test writer, any unrecognised messages will
|
||||
be dispatched to the RPC connection."""
|
||||
|
||||
def __init__(self, i, dirname, extra_args_from_options, rpchost, timewait, bitcoind, bitcoin_cli, stderr, mocktime, coverage_dir, extra_conf=None, extra_args=None, use_cli=False):
|
||||
def __init__(self, i, datadir, extra_args_from_options, rpchost, timewait, bitcoind, bitcoin_cli, stderr, mocktime, coverage_dir, extra_conf=None, extra_args=None, use_cli=False):
|
||||
self.index = i
|
||||
self.datadir = os.path.join(dirname, "node" + str(i))
|
||||
self.datadir = datadir
|
||||
self.rpchost = rpchost
|
||||
if timewait:
|
||||
self.rpc_timeout = timewait
|
||||
@ -65,7 +65,7 @@ class TestNode():
|
||||
self.coverage_dir = coverage_dir
|
||||
self.mocktime = mocktime
|
||||
if extra_conf != None:
|
||||
append_config(dirname, i, extra_conf)
|
||||
append_config(datadir, extra_conf)
|
||||
# Most callers will just need to add extra args to the standard list below.
|
||||
# For those callers that need more flexibity, they can just set the args property directly.
|
||||
# Note that common args are set in the config file (see initialize_datadir)
|
||||
|
@ -308,7 +308,7 @@ def rpc_url(datadir, i, rpchost=None):
|
||||
################
|
||||
|
||||
def initialize_datadir(dirname, n):
|
||||
datadir = os.path.join(dirname, "node" + str(n))
|
||||
datadir = get_datadir_path(dirname, n)
|
||||
if not os.path.isdir(datadir):
|
||||
os.makedirs(datadir)
|
||||
with open(os.path.join(datadir, "dash.conf"), 'w', encoding='utf8') as f:
|
||||
@ -325,8 +325,7 @@ def initialize_datadir(dirname, n):
|
||||
def get_datadir_path(dirname, n):
|
||||
return os.path.join(dirname, "node" + str(n))
|
||||
|
||||
def append_config(dirname, n, options):
|
||||
datadir = get_datadir_path(dirname, n)
|
||||
def append_config(datadir, options):
|
||||
with open(os.path.join(datadir, "dash.conf"), 'a', encoding='utf8') as f:
|
||||
for option in options:
|
||||
f.write(option + "\n")
|
||||
|
@ -549,21 +549,15 @@ class TestResult():
|
||||
|
||||
|
||||
def check_script_prefixes():
|
||||
"""Check that at most a handful of the
|
||||
test scripts don't start with one of the allowed name prefixes."""
|
||||
|
||||
# LEEWAY is provided as a transition measure, so that pull-requests
|
||||
# that introduce new tests that don't conform with the naming
|
||||
# convention don't immediately cause the tests to fail.
|
||||
LEEWAY = 10
|
||||
"""Check that test scripts start with one of the allowed name prefixes."""
|
||||
|
||||
good_prefixes_re = re.compile("(example|feature|interface|mempool|mining|p2p|rpc|wallet)_")
|
||||
bad_script_names = [script for script in ALL_SCRIPTS if good_prefixes_re.match(script) is None]
|
||||
|
||||
if len(bad_script_names) > 0:
|
||||
print("INFO: %d tests not meeting naming conventions:" % (len(bad_script_names)))
|
||||
if bad_script_names:
|
||||
print("%sERROR:%s %d tests not meeting naming conventions:" % (BOLD[1], BOLD[0], len(bad_script_names)))
|
||||
print(" %s" % ("\n ".join(sorted(bad_script_names))))
|
||||
assert len(bad_script_names) <= LEEWAY, "Too many tests not following naming convention! (%d found, maximum: %d)" % (len(bad_script_names), LEEWAY)
|
||||
raise AssertionError("Some tests are not following naming convention!")
|
||||
|
||||
|
||||
def check_script_list(*, src_dir, fail_on_warn):
|
||||
|
@ -90,9 +90,9 @@ class WalletBackupTest(BitcoinTestFramework):
|
||||
self.stop_node(2)
|
||||
|
||||
def erase_three(self):
|
||||
os.remove(self.options.tmpdir + "/node0/regtest/wallets/wallet.dat")
|
||||
os.remove(self.options.tmpdir + "/node1/regtest/wallets/wallet.dat")
|
||||
os.remove(self.options.tmpdir + "/node2/regtest/wallets/wallet.dat")
|
||||
os.remove(os.path.join(self.nodes[0].datadir, 'regtest', 'wallets', 'wallet.dat'))
|
||||
os.remove(os.path.join(self.nodes[1].datadir, 'regtest', 'wallets', 'wallet.dat'))
|
||||
os.remove(os.path.join(self.nodes[2].datadir, 'regtest', 'wallets', 'wallet.dat'))
|
||||
|
||||
def run_test(self):
|
||||
self.log.info("Generating initial blockchain")
|
||||
@ -116,13 +116,13 @@ class WalletBackupTest(BitcoinTestFramework):
|
||||
self.do_one_round()
|
||||
|
||||
self.log.info("Backing up")
|
||||
tmpdir = self.options.tmpdir
|
||||
self.nodes[0].backupwallet(tmpdir + "/node0/wallet.bak")
|
||||
self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.dump")
|
||||
self.nodes[1].backupwallet(tmpdir + "/node1/wallet.bak")
|
||||
self.nodes[1].dumpwallet(tmpdir + "/node1/wallet.dump")
|
||||
self.nodes[2].backupwallet(tmpdir + "/node2/wallet.bak")
|
||||
self.nodes[2].dumpwallet(tmpdir + "/node2/wallet.dump")
|
||||
|
||||
self.nodes[0].backupwallet(os.path.join(self.nodes[0].datadir, 'wallet.bak'))
|
||||
self.nodes[0].dumpwallet(os.path.join(self.nodes[0].datadir, 'wallet.dump'))
|
||||
self.nodes[1].backupwallet(os.path.join(self.nodes[1].datadir, 'wallet.bak'))
|
||||
self.nodes[1].dumpwallet(os.path.join(self.nodes[1].datadir, 'wallet.dump'))
|
||||
self.nodes[2].backupwallet(os.path.join(self.nodes[2].datadir, 'wallet.bak'))
|
||||
self.nodes[2].dumpwallet(os.path.join(self.nodes[2].datadir, 'wallet.dump'))
|
||||
|
||||
self.log.info("More transactions")
|
||||
for i in range(5):
|
||||
@ -150,15 +150,15 @@ class WalletBackupTest(BitcoinTestFramework):
|
||||
self.erase_three()
|
||||
|
||||
# Start node2 with no chain
|
||||
shutil.rmtree(self.options.tmpdir + "/node2/regtest/blocks")
|
||||
shutil.rmtree(self.options.tmpdir + "/node2/regtest/chainstate")
|
||||
shutil.rmtree(self.options.tmpdir + "/node2/regtest/evodb")
|
||||
shutil.rmtree(self.options.tmpdir + "/node2/regtest/llmq")
|
||||
shutil.rmtree(os.path.join(self.nodes[2].datadir, 'regtest', 'blocks'))
|
||||
shutil.rmtree(os.path.join(self.nodes[2].datadir, 'regtest', 'chainstate'))
|
||||
shutil.rmtree(os.path.join(self.nodes[2].datadir, 'regtest', 'evodb'))
|
||||
shutil.rmtree(os.path.join(self.nodes[2].datadir, 'regtest', 'llmq'))
|
||||
|
||||
# Restore wallets from backup
|
||||
shutil.copyfile(tmpdir + "/node0/wallet.bak", tmpdir + "/node0/regtest/wallets/wallet.dat")
|
||||
shutil.copyfile(tmpdir + "/node1/wallet.bak", tmpdir + "/node1/regtest/wallets/wallet.dat")
|
||||
shutil.copyfile(tmpdir + "/node2/wallet.bak", tmpdir + "/node2/regtest/wallets/wallet.dat")
|
||||
shutil.copyfile(os.path.join(self.nodes[0].datadir, 'wallet.bak'), os.path.join(self.nodes[0].datadir, 'regtest', 'wallets', 'wallet.dat'))
|
||||
shutil.copyfile(os.path.join(self.nodes[1].datadir, 'wallet.bak'), os.path.join(self.nodes[1].datadir, 'regtest', 'wallets', 'wallet.dat'))
|
||||
shutil.copyfile(os.path.join(self.nodes[2].datadir, 'wallet.bak'), os.path.join(self.nodes[2].datadir, 'regtest', 'wallets', 'wallet.dat'))
|
||||
|
||||
self.log.info("Re-starting nodes")
|
||||
self.start_three()
|
||||
@ -173,10 +173,10 @@ class WalletBackupTest(BitcoinTestFramework):
|
||||
self.erase_three()
|
||||
|
||||
#start node2 with no chain
|
||||
shutil.rmtree(self.options.tmpdir + "/node2/regtest/blocks")
|
||||
shutil.rmtree(self.options.tmpdir + "/node2/regtest/chainstate")
|
||||
shutil.rmtree(self.options.tmpdir + "/node2/regtest/evodb")
|
||||
shutil.rmtree(self.options.tmpdir + "/node2/regtest/llmq")
|
||||
shutil.rmtree(os.path.join(self.nodes[2].datadir, 'regtest', 'blocks'))
|
||||
shutil.rmtree(os.path.join(self.nodes[2].datadir, 'regtest', 'chainstate'))
|
||||
shutil.rmtree(os.path.join(self.nodes[2].datadir, 'regtest', 'evodb'))
|
||||
shutil.rmtree(os.path.join(self.nodes[2].datadir, 'regtest', 'llmq'))
|
||||
|
||||
self.start_three()
|
||||
|
||||
@ -184,9 +184,9 @@ class WalletBackupTest(BitcoinTestFramework):
|
||||
assert_equal(self.nodes[1].getbalance(), 0)
|
||||
assert_equal(self.nodes[2].getbalance(), 0)
|
||||
|
||||
self.nodes[0].importwallet(tmpdir + "/node0/wallet.dump")
|
||||
self.nodes[1].importwallet(tmpdir + "/node1/wallet.dump")
|
||||
self.nodes[2].importwallet(tmpdir + "/node2/wallet.dump")
|
||||
self.nodes[0].importwallet(os.path.join(self.nodes[0].datadir, 'wallet.dump'))
|
||||
self.nodes[1].importwallet(os.path.join(self.nodes[1].datadir, 'wallet.dump'))
|
||||
self.nodes[2].importwallet(os.path.join(self.nodes[2].datadir, 'wallet.dump'))
|
||||
|
||||
self.sync_blocks()
|
||||
|
||||
@ -196,10 +196,10 @@ class WalletBackupTest(BitcoinTestFramework):
|
||||
|
||||
# Backup to source wallet file must fail
|
||||
sourcePaths = [
|
||||
tmpdir + "/node0/regtest/wallets/wallet.dat",
|
||||
tmpdir + "/node0/./regtest/wallets/wallet.dat",
|
||||
tmpdir + "/node0/regtest/wallets/",
|
||||
tmpdir + "/node0/regtest/wallets"]
|
||||
os.path.join(self.nodes[0].datadir, 'regtest', 'wallets', 'wallet.dat'),
|
||||
os.path.join(self.nodes[0].datadir, 'regtest', '.', 'wallets', 'wallet.dat'),
|
||||
os.path.join(self.nodes[0].datadir, 'regtest', 'wallets', ''),
|
||||
os.path.join(self.nodes[0].datadir, 'regtest', 'wallets')]
|
||||
|
||||
for sourcePath in sourcePaths:
|
||||
assert_raises_rpc_error(-4, "backup failed", self.nodes[0].backupwallet, sourcePath)
|
||||
|
@ -9,7 +9,10 @@ import shutil
|
||||
import os
|
||||
|
||||
from test_framework.test_framework import BitcoinTestFramework
|
||||
from test_framework.util import *
|
||||
from test_framework.util import (
|
||||
assert_equal,
|
||||
connect_nodes_bi,
|
||||
)
|
||||
|
||||
class WalletHDTest(BitcoinTestFramework):
|
||||
def set_test_params(self):
|
||||
@ -21,9 +24,7 @@ class WalletHDTest(BitcoinTestFramework):
|
||||
self.add_nodes(self.num_nodes, self.extra_args, stderr=sys.stdout)
|
||||
self.start_nodes()
|
||||
|
||||
def run_test (self):
|
||||
tmpdir = self.options.tmpdir
|
||||
|
||||
def run_test(self):
|
||||
# Make sure can't switch off usehd after wallet creation
|
||||
self.stop_node(1)
|
||||
self.nodes[1].assert_start_raises_init_error(['-usehd=0'], "Error: Error loading : You can't disable HD on an already existing HD wallet")
|
||||
@ -44,8 +45,8 @@ class WalletHDTest(BitcoinTestFramework):
|
||||
self.nodes[1].importprivkey(self.nodes[0].dumpprivkey(non_hd_add))
|
||||
|
||||
# This should be enough to keep the master key and the non-HD key
|
||||
self.nodes[1].backupwallet(tmpdir + "/hd.bak")
|
||||
#self.nodes[1].dumpwallet(tmpdir + "/hd.dump")
|
||||
self.nodes[1].backupwallet(os.path.join(self.nodes[1].datadir, "hd.bak"))
|
||||
#self.nodes[1].dumpwallet(os.path.join(self.nodes[1].datadir, "hd.dump"))
|
||||
|
||||
# Derive some HD addresses and remember the last
|
||||
# Also send funds to each add
|
||||
@ -74,11 +75,11 @@ class WalletHDTest(BitcoinTestFramework):
|
||||
self.stop_node(1)
|
||||
# we need to delete the complete regtest directory
|
||||
# otherwise node1 would auto-recover all funds in flag the keypool keys as used
|
||||
shutil.rmtree(os.path.join(tmpdir, "node1/regtest/blocks"))
|
||||
shutil.rmtree(os.path.join(tmpdir, "node1/regtest/chainstate"))
|
||||
shutil.rmtree(os.path.join(tmpdir, "node1/regtest/evodb"))
|
||||
shutil.rmtree(os.path.join(tmpdir, "node1/regtest/llmq"))
|
||||
shutil.copyfile(os.path.join(tmpdir, "hd.bak"), os.path.join(tmpdir, "node1/regtest/wallets/wallet.dat"))
|
||||
shutil.rmtree(os.path.join(self.nodes[1].datadir, "regtest", "blocks"))
|
||||
shutil.rmtree(os.path.join(self.nodes[1].datadir, "regtest", "chainstate"))
|
||||
shutil.rmtree(os.path.join(self.nodes[1].datadir, "regtest", "evodb"))
|
||||
shutil.rmtree(os.path.join(self.nodes[1].datadir, "regtest", "llmq"))
|
||||
shutil.copyfile(os.path.join(self.nodes[1].datadir, "hd.bak"), os.path.join(self.nodes[1].datadir, "regtest", "wallets", "wallet.dat"))
|
||||
self.start_node(1)
|
||||
|
||||
# Assert that derivation is deterministic
|
||||
@ -99,11 +100,11 @@ class WalletHDTest(BitcoinTestFramework):
|
||||
|
||||
# Try a RPC based rescan
|
||||
self.stop_node(1)
|
||||
shutil.rmtree(os.path.join(tmpdir, "node1/regtest/blocks"))
|
||||
shutil.rmtree(os.path.join(tmpdir, "node1/regtest/chainstate"))
|
||||
shutil.rmtree(os.path.join(tmpdir, "node1/regtest/evodb"))
|
||||
shutil.rmtree(os.path.join(tmpdir, "node1/regtest/llmq"))
|
||||
shutil.copyfile(os.path.join(tmpdir, "hd.bak"), os.path.join(tmpdir, "node1/regtest/wallets/wallet.dat"))
|
||||
shutil.rmtree(os.path.join(self.nodes[1].datadir, "regtest", "blocks"))
|
||||
shutil.rmtree(os.path.join(self.nodes[1].datadir, "regtest", "chainstate"))
|
||||
shutil.rmtree(os.path.join(self.nodes[1].datadir, "regtest", "evodb"))
|
||||
shutil.rmtree(os.path.join(self.nodes[1].datadir, "regtest", "llmq"))
|
||||
shutil.copyfile(os.path.join(self.nodes[1].datadir, "hd.bak"), os.path.join(self.nodes[1].datadir, "regtest", "wallets", "wallet.dat"))
|
||||
self.start_node(1, extra_args=self.extra_args[1])
|
||||
connect_nodes_bi(self.nodes, 0, 1)
|
||||
self.sync_all()
|
||||
|
@ -10,6 +10,7 @@ Two nodes. Node1 is under test. Node0 is providing transactions and generating b
|
||||
- Generate 110 keys (enough to drain the keypool). Store key 90 (in the initial keypool) and key 110 (beyond the initial keypool). Send funds to key 90 and key 110.
|
||||
- Stop node1, clear the datadir, move wallet file back into the datadir and restart node1.
|
||||
- connect node1 to node0. Verify that they sync and node1 receives its funds."""
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
|
||||
@ -19,6 +20,7 @@ from test_framework.util import (
|
||||
connect_nodes_bi,
|
||||
)
|
||||
|
||||
|
||||
class KeypoolRestoreTest(BitcoinTestFramework):
|
||||
def set_test_params(self):
|
||||
self.setup_clean_chain = True
|
||||
@ -27,26 +29,23 @@ class KeypoolRestoreTest(BitcoinTestFramework):
|
||||
self.stderr = sys.stdout
|
||||
|
||||
def run_test(self):
|
||||
self.tmpdir = self.options.tmpdir
|
||||
wallet_path = os.path.join(self.nodes[1].datadir, "regtest", "wallets", "wallet.dat")
|
||||
wallet_backup_path = os.path.join(self.nodes[1].datadir, "wallet.bak")
|
||||
self.nodes[0].generate(101)
|
||||
|
||||
self.log.info("Make backup of wallet")
|
||||
|
||||
self.stop_node(1)
|
||||
|
||||
shutil.copyfile(self.tmpdir + "/node1/regtest/wallets/wallet.dat", self.tmpdir + "/wallet.bak")
|
||||
shutil.copyfile(wallet_path, wallet_backup_path)
|
||||
self.start_node(1, self.extra_args[1])
|
||||
connect_nodes_bi(self.nodes, 0, 1)
|
||||
|
||||
self.log.info("Generate keys for wallet")
|
||||
|
||||
for _ in range(90):
|
||||
addr_oldpool = self.nodes[1].getnewaddress()
|
||||
for _ in range(20):
|
||||
addr_extpool = self.nodes[1].getnewaddress()
|
||||
|
||||
self.log.info("Send funds to wallet")
|
||||
|
||||
self.nodes[0].sendtoaddress(addr_oldpool, 10)
|
||||
self.nodes[0].generate(1)
|
||||
self.nodes[0].sendtoaddress(addr_extpool, 5)
|
||||
@ -54,22 +53,18 @@ class KeypoolRestoreTest(BitcoinTestFramework):
|
||||
self.sync_blocks()
|
||||
|
||||
self.log.info("Restart node with wallet backup")
|
||||
|
||||
self.stop_node(1)
|
||||
|
||||
shutil.copyfile(self.tmpdir + "/wallet.bak", self.tmpdir + "/node1/regtest/wallets/wallet.dat")
|
||||
|
||||
self.log.info("Verify keypool is restored and balance is correct")
|
||||
|
||||
shutil.copyfile(wallet_backup_path, wallet_path)
|
||||
self.start_node(1, self.extra_args[1])
|
||||
connect_nodes_bi(self.nodes, 0, 1)
|
||||
self.sync_all()
|
||||
|
||||
self.log.info("Verify keypool is restored and balance is correct")
|
||||
assert_equal(self.nodes[1].getbalance(), 15)
|
||||
assert_equal(self.nodes[1].listtransactions()[0]['category'], "receive")
|
||||
|
||||
# Check that we have marked all keys up to the used keypool key as used
|
||||
assert_equal(self.nodes[1].validateaddress(self.nodes[1].getnewaddress())['hdkeypath'], "m/44'/1'/0'/0/110")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
KeypoolRestoreTest().main()
|
||||
|
@ -11,7 +11,11 @@ import re
|
||||
import shutil
|
||||
|
||||
from test_framework.test_framework import BitcoinTestFramework
|
||||
from test_framework.util import assert_equal, assert_raises_rpc_error
|
||||
from test_framework.util import (
|
||||
assert_equal,
|
||||
assert_raises_rpc_error,
|
||||
)
|
||||
|
||||
|
||||
class MultiWalletTest(BitcoinTestFramework):
|
||||
def set_test_params(self):
|
||||
@ -201,6 +205,29 @@ class MultiWalletTest(BitcoinTestFramework):
|
||||
# Fail to load if wallet file is a symlink
|
||||
assert_raises_rpc_error(-4, "Wallet file verification failed: Invalid -wallet path 'w8_symlink'", self.nodes[0].loadwallet, 'w8_symlink')
|
||||
|
||||
self.log.info("Test dynamic wallet creation.")
|
||||
|
||||
# Fail to create a wallet if it already exists.
|
||||
assert_raises_rpc_error(-4, "Wallet w2 already exists.", self.nodes[0].createwallet, 'w2')
|
||||
|
||||
# Successfully create a wallet with a new name
|
||||
loadwallet_name = self.nodes[0].createwallet('w9')
|
||||
assert_equal(loadwallet_name['name'], 'w9')
|
||||
w9 = node.get_wallet_rpc('w9')
|
||||
assert_equal(w9.getwalletinfo()['walletname'], 'w9')
|
||||
|
||||
assert 'w9' in self.nodes[0].listwallets()
|
||||
|
||||
# Successfully create a wallet using a full path
|
||||
new_wallet_dir = os.path.join(self.options.tmpdir, 'new_walletdir')
|
||||
new_wallet_name = os.path.join(new_wallet_dir, 'w10')
|
||||
loadwallet_name = self.nodes[0].createwallet(new_wallet_name)
|
||||
assert_equal(loadwallet_name['name'], new_wallet_name)
|
||||
w10 = node.get_wallet_rpc(new_wallet_name)
|
||||
assert_equal(w10.getwalletinfo()['walletname'], new_wallet_name)
|
||||
|
||||
assert new_wallet_name in self.nodes[0].listwallets()
|
||||
|
||||
# Fail to load if a directory is specified that doesn't contain a wallet
|
||||
os.mkdir(wallet_dir('empty_wallet_dir'))
|
||||
assert_raises_rpc_error(-18, "Directory empty_wallet_dir does not contain a wallet.dat file", self.nodes[0].loadwallet, 'empty_wallet_dir')
|
||||
|
@ -164,12 +164,12 @@
|
||||
{ "exec": "./dash-tx",
|
||||
"args": ["02000000000100000000000000000000000000"],
|
||||
"output_cmp": "txcreate2.hex",
|
||||
"description": "Parses a transation with no inputs and a single output script"
|
||||
"description": "Parses a transaction with no inputs and a single output script"
|
||||
},
|
||||
{ "exec": "./dash-tx",
|
||||
"args": ["-json", "02000000000100000000000000000000000000"],
|
||||
"output_cmp": "txcreate2.json",
|
||||
"description": "Parses a transation with no inputs and a single output script (output in json)"
|
||||
"description": "Parses a transaction with no inputs and a single output script (output in json)"
|
||||
},
|
||||
{ "exec": "./dash-tx",
|
||||
"args":
|
||||
|
Loading…
Reference in New Issue
Block a user