Merge pull request #3617 from PastaPastaPasta/backports-0.17-pr15

Backports 0.17 pr15
This commit is contained in:
UdjinM6 2020-07-19 19:27:24 +03:00 committed by GitHub
commit f8706009ae
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 87 additions and 20 deletions

View File

@ -45,7 +45,7 @@ Write release notes. git shortlog helps a lot, for example:
Generate list of authors:
git log --format='%aN' "$*" | sort -ui | sed -e 's/^/- /'
git log --format='- %aN' v(current version, e.g. 0.16.0)..v(new version, e.g. 0.16.1) | sort -fiu
Tag version (or release candidate) in git

View File

@ -69,7 +69,7 @@ username = USERNAME
Please see [http://docs.transifex.com/developer/client/setup#windows](http://docs.transifex.com/developer/client/setup#windows) for details on installation.
The Transifex Dash project config file is included as part of the repo. It can be found at `.tx/config`, however you shouldnt need change anything.
The Transifex Dash project config file is included as part of the repo. It can be found at `.tx/config`, however you shouldnt need to change anything.
### Synchronising translations
To assist in updating translations, we have created a script to help.

View File

@ -81,7 +81,7 @@ Section -Main SEC0000
File @abs_top_srcdir@/release/@BITCOIN_DAEMON_NAME@@EXEEXT@
File @abs_top_srcdir@/release/@BITCOIN_CLI_NAME@@EXEEXT@
SetOutPath $INSTDIR\doc
File /r @abs_top_srcdir@/doc\*.*
File /r /x Makefile* @abs_top_srcdir@/doc\*.*
SetOutPath $INSTDIR
WriteRegStr HKCU "${REGKEY}\Components" Main 1
SectionEnd

View File

@ -471,7 +471,7 @@ std::string HelpMessage(HelpMessageMode mode)
strUsage += HelpMessageOpt("-dbbatchsize", strprintf("Maximum database write batch size in bytes (default: %u)", nDefaultDbBatchSize));
}
strUsage += HelpMessageOpt("-dbcache=<n>", strprintf(_("Set database cache size in megabytes (%d to %d, default: %d)"), nMinDbCache, nMaxDbCache, nDefaultDbCache));
strUsage += HelpMessageOpt("-debuglogfile=<file>", strprintf(_("Specify location of debug log file. Relative paths will be prefixed by a net-specific datadir location. (default: %s)"), DEFAULT_DEBUGLOGFILE));
strUsage += HelpMessageOpt("-debuglogfile=<file>", strprintf(_("Specify location of debug log file. Relative paths will be prefixed by a net-specific datadir location. (0 to disable; default: %s)"), DEFAULT_DEBUGLOGFILE));
strUsage += HelpMessageOpt("-loadblock=<file>", _("Imports blocks from external blk000??.dat file on startup"));
strUsage += HelpMessageOpt("-maxorphantxsize=<n>", strprintf(_("Maximum total size of all orphan transactions in megabytes (default: %u)"), DEFAULT_MAX_ORPHAN_TRANSACTIONS_SIZE));
strUsage += HelpMessageOpt("-maxmempool=<n>", strprintf(_("Keep the transaction memory pool below <n> megabytes (default: %u)"), DEFAULT_MAX_MEMPOOL_SIZE));
@ -489,7 +489,7 @@ std::string HelpMessage(HelpMessageMode mode)
#endif
strUsage += HelpMessageOpt("-prune=<n>", strprintf(_("Reduce storage requirements by enabling pruning (deleting) of old blocks. This allows the pruneblockchain RPC to be called to delete specific blocks, and enables automatic pruning of old blocks if a target size in MiB is provided. This mode is incompatible with -txindex, -rescan and -disablegovernance=false. "
"Warning: Reverting this setting requires re-downloading the entire blockchain. "
"(default: 0 = disable pruning blocks, 1 = allow manual pruning via RPC, >%u = automatically prune block files to stay under the specified target size in MiB)"), MIN_DISK_SPACE_FOR_BLOCK_FILES / 1024 / 1024));
"(default: 0 = disable pruning blocks, 1 = allow manual pruning via RPC, >=%u = automatically prune block files to stay under the specified target size in MiB)"), MIN_DISK_SPACE_FOR_BLOCK_FILES / 1024 / 1024));
strUsage += HelpMessageOpt("-reindex-chainstate", _("Rebuild chain state from the currently indexed blocks"));
strUsage += HelpMessageOpt("-reindex", _("Rebuild chain state and block index from the blk*.dat files on disk"));
#ifndef WIN32
@ -598,7 +598,7 @@ std::string HelpMessage(HelpMessageMode mode)
}
strUsage += HelpMessageOpt("-maxtxfee=<amt>", strprintf(_("Maximum total fees (in %s) to use in a single wallet transaction or raw transaction; setting this too low may abort large transactions (default: %s)"),
CURRENCY_UNIT, FormatMoney(DEFAULT_TRANSACTION_MAXFEE)));
strUsage += HelpMessageOpt("-printtoconsole", _("Send trace/debug info to console instead of debug.log file"));
strUsage += HelpMessageOpt("-printtoconsole", _("Send trace/debug info to console (default: 1 when no -daemon. To disable logging to file, set debuglogfile=0)"));
strUsage += HelpMessageOpt("-printtodebuglog", strprintf(_("Send trace/debug info to debug.log file (default: %u)"), 1));
if (showDebug)
{

View File

@ -2275,7 +2275,6 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
CAddress addrConnect;
// Only connect out to one peer per network group (/16 for IPv4).
// Do this here so we don't have to critsect vNodes inside mapAddresses critsect.
// This is only done for mainnet and testnet
int nOutbound = 0;
std::set<std::vector<unsigned char> > setConnected;

View File

@ -340,7 +340,6 @@ UniValue getblocktemplate(const JSONRPCRequest& request)
" ],\n"
" \"fee\": n, (numeric) difference in value between transaction inputs and outputs (in duffs); for coinbase transactions, this is a negative Number of the total collected block fees (ie, not including the block subsidy); if key is not present, fee is unknown and clients MUST NOT assume there isn't one\n"
" \"sigops\" : n, (numeric) total number of SigOps, as counted for purposes of block limits; if key is not present, sigop count is unknown and clients MUST NOT assume there aren't any\n"
" \"required\" : true|false (boolean) if provided and true, this transaction must be in the final block\n"
" }\n"
" ,...\n"
" ],\n"

View File

@ -86,9 +86,13 @@ private:
/**
* Class used by CScheduler clients which may schedule multiple jobs
* which are required to be run serially. Does not require such jobs
* to be executed on the same thread, but no two jobs will be executed
* at the same time.
* which are required to be run serially. Jobs may not be run on the
* same thread, but no two jobs will be executed
* at the same time and memory will be release-acquire consistent
* (the scheduler will internally do an acquire before invoking a callback
* as well as a release at the end). In practice this means that a callback
* B() will be able to observe all of the effects of callback A() which executed
* before it.
*/
class SingleThreadedSchedulerClient {
private:
@ -103,6 +107,13 @@ private:
public:
explicit SingleThreadedSchedulerClient(CScheduler *pschedulerIn) : m_pscheduler(pschedulerIn) {}
/**
* Add a callback to be executed. Callbacks are executed serially
* and memory is release-acquire consistent between callback executions.
* Practially, this means that callbacks can behave as if they are executed
* in order by a single thread.
*/
void AddToProcessQueue(std::function<void (void)> func);
// Processes all remaining queue members on the calling thread, blocking until queue is empty

View File

@ -65,7 +65,7 @@ BOOST_AUTO_TEST_CASE(manythreads)
size_t nTasks = microTasks.getQueueInfo(first, last);
BOOST_CHECK(nTasks == 0);
for (int i = 0; i < 100; i++) {
for (int i = 0; i < 100; ++i) {
boost::chrono::system_clock::time_point t = now + boost::chrono::microseconds(randomMsec(rng));
boost::chrono::system_clock::time_point tReschedule = now + boost::chrono::microseconds(500 + randomMsec(rng));
int whichCounter = zeroToNine(rng);
@ -112,4 +112,46 @@ BOOST_AUTO_TEST_CASE(manythreads)
BOOST_CHECK_EQUAL(counterSum, 200);
}
BOOST_AUTO_TEST_CASE(singlethreadedscheduler_ordered)
{
CScheduler scheduler;
// each queue should be well ordered with respect to itself but not other queues
SingleThreadedSchedulerClient queue1(&scheduler);
SingleThreadedSchedulerClient queue2(&scheduler);
// create more threads than queues
// if the queues only permit execution of one task at once then
// the extra threads should effectively be doing nothing
// if they don't we'll get out of order behaviour
boost::thread_group threads;
for (int i = 0; i < 5; ++i) {
threads.create_thread(boost::bind(&CScheduler::serviceQueue, &scheduler));
}
// these are not atomic, if SinglethreadedSchedulerClient prevents
// parallel execution at the queue level no synchronization should be required here
int counter1 = 0;
int counter2 = 0;
// just simply count up on each queue - if execution is properly ordered then
// the callbacks should run in exactly the order in which they were enqueued
for (int i = 0; i < 100; ++i) {
queue1.AddToProcessQueue([i, &counter1]() {
BOOST_CHECK_EQUAL(i, counter1++);
});
queue2.AddToProcessQueue([i, &counter2]() {
BOOST_CHECK_EQUAL(i, counter2++);
});
}
// finish up
scheduler.stop(true);
threads.join_all();
BOOST_CHECK_EQUAL(counter1, 100);
BOOST_CHECK_EQUAL(counter2, 100);
}
BOOST_AUTO_TEST_SUITE_END()

View File

@ -61,6 +61,21 @@ void CallFunctionInValidationInterfaceQueue(std::function<void ()> func);
*/
void SyncWithValidationInterfaceQueue();
/**
* Implement this to subscribe to events generated in validation
*
* Each CValidationInterface() subscriber will receive event callbacks
* in the order in which the events were generated by validation.
* Furthermore, each ValidationInterface() subscriber may assume that
* callbacks effectively run in a single thread with single-threaded
* memory consistency. That is, for a given ValidationInterface()
* instantiation, each callback will complete before the next one is
* invoked. This means, for example when a block is connected that the
* UpdatedBlockTip() callback may depend on an operation performed in
* the BlockConnected() callback without worrying about explicit
* synchronization. No ordering should be assumed across
* ValidationInterface() subscribers.
*/
class CValidationInterface {
protected:
virtual void AcceptedBlockHeader(const CBlockIndex *pindexNew) {}

View File

@ -85,7 +85,7 @@ UniValue importprivkey(const JSONRPCRequest& request)
"1. \"privkey\" (string, required) The private key (see dumpprivkey)\n"
"2. \"label\" (string, optional, default=\"\") An optional label\n"
"3. rescan (boolean, optional, default=true) Rescan the wallet for transactions\n"
"\nNote: This call can take minutes to complete if rescan is true, during that time, other rpc calls\n"
"\nNote: This call can take over an hour to complete if rescan is true, during that time, other rpc calls\n"
"may report that the imported key exists but related transactions are still missing, leading to temporarily incorrect/bogus balances and unspent outputs until rescan completes.\n"
"\nExamples:\n"
"\nDump a private key\n"
@ -235,7 +235,7 @@ UniValue importaddress(const JSONRPCRequest& request)
"2. \"label\" (string, optional, default=\"\") An optional label\n"
"3. rescan (boolean, optional, default=true) Rescan the wallet for transactions\n"
"4. p2sh (boolean, optional, default=false) Add the P2SH version of the script as well\n"
"\nNote: This call can take minutes to complete if rescan is true, during that time, other rpc calls\n"
"\nNote: This call can take over an hour to complete if rescan is true, during that time, other rpc calls\n"
"may report that the imported address exists but related transactions are still missing, leading to temporarily incorrect/bogus balances and unspent outputs until rescan completes.\n"
"If you have the full public key, you should call importpubkey instead of this.\n"
"\nNote: If you import a non-standard raw script in hex form, outputs sending to it will be treated\n"
@ -411,7 +411,7 @@ UniValue importpubkey(const JSONRPCRequest& request)
"1. \"pubkey\" (string, required) The hex-encoded public key\n"
"2. \"label\" (string, optional, default=\"\") An optional label\n"
"3. rescan (boolean, optional, default=true) Rescan the wallet for transactions\n"
"\nNote: This call can take minutes to complete if rescan is true, during that time, other rpc calls\n"
"\nNote: This call can take over an hour to complete if rescan is true, during that time, other rpc calls\n"
"may report that the imported pubkey exists but related transactions are still missing, leading to temporarily incorrect/bogus balances and unspent outputs until rescan completes.\n"
"\nExamples:\n"
"\nImport a public key with rescan\n"
@ -1343,7 +1343,7 @@ UniValue importmulti(const JSONRPCRequest& mainRequest)
" {\n"
" \"rescan\": <false>, (boolean, optional, default: true) Stating if should rescan the blockchain after all imports\n"
" }\n"
"\nNote: This call can take minutes to complete if rescan is true, during that time, other rpc calls\n"
"\nNote: This call can take over an hour to complete if rescan is true, during that time, other rpc calls\n"
"may report that the imported keys, addresses or scripts exists but related transactions are still missing.\n"
"\nExamples:\n" +
HelpExampleCli("importmulti", "'[{ \"scriptPubKey\": { \"address\": \"<my address>\" }, \"timestamp\":1455191478 }, "

View File

@ -14,10 +14,10 @@ void SetfLargeWorkForkFound(bool flag);
bool GetfLargeWorkForkFound();
void SetfLargeWorkInvalidChainFound(bool flag);
/** Format a string that describes several potential problems detected by the core.
* strFor can have three values:
* - "statusbar": get all warnings
* - "gui": get all warnings, translated (where possible) for GUI
* This function only returns the highest priority warning of the set selected by strFor.
* @param[in] strFor can have the following values:
* - "statusbar": get the most important warning
* - "gui": get all warnings, translated (where possible) for GUI, separated by <hr />
* @returns the warning string selected by strFor
*/
std::string GetWarnings(const std::string& strFor);

View File

@ -26,6 +26,7 @@ don't have test cases for.
- When subclassing the BitcoinTestFramwork, place overrides for the
`set_test_params()`, `add_options()` and `setup_xxxx()` methods at the top of
the subclass, then locally-defined helper methods, then the `run_test()` method.
- Use `'{}'.format(x)` for string formatting, not `'%s' % x`.
#### Naming guidelines