mirror of
https://github.com/dashpay/dash.git
synced 2024-12-25 03:52:49 +01:00
Merge pull request #2911 from PastaPastaPasta/backports-0.15-pr10
Backports 0.15 pr10
This commit is contained in:
commit
92e5a38314
@ -37,8 +37,8 @@ fixes or code moves with actual code changes.
|
||||
|
||||
Commit messages should be verbose by default consisting of a short subject line
|
||||
(50 chars max), a blank line and detailed explanatory text as separate
|
||||
paragraph(s); unless the title alone is self-explanatory (like "Corrected typo
|
||||
in init.cpp") then a single title line is sufficient. Commit messages should be
|
||||
paragraph(s), unless the title alone is self-explanatory (like "Corrected typo
|
||||
in init.cpp") in which case a single title line is sufficient. Commit messages should be
|
||||
helpful to people reading your code in the future, so explain the reasoning for
|
||||
your decisions. Further explanation [here](http://chris.beams.io/posts/git-commit/).
|
||||
|
||||
@ -225,6 +225,36 @@ discussed extensively on the mailing list and IRC, be accompanied by a widely
|
||||
discussed BIP and have a generally widely perceived technical consensus of being
|
||||
a worthwhile change based on the judgement of the maintainers.
|
||||
|
||||
### Finding Reviewers
|
||||
|
||||
The review process is normally fairly responsive on the Dash Core repository, however
|
||||
this might not always be the case. If you find that you've been waiting
|
||||
for a pull request to be given attention for several months, there may be a number
|
||||
of reasons for this, some of which you can do something about:
|
||||
|
||||
- It may be because of a feature freeze due to an upcoming release. During this time,
|
||||
only bug fixes are taken into consideration. If your pull request is a new feature,
|
||||
it will not be prioritized until the release is over. Wait for release.
|
||||
- It may be because the changes you are suggesting do not appeal to people. Rather than
|
||||
nits and critique, which require effort and means they care enough to spend time on your
|
||||
contribution, thundering silence is a good sign of widespread (mild) dislike of a given change
|
||||
(because people don't assume *others* won't actually like the proposal). Don't take
|
||||
that personally, though! Instead, take another critical look at what you are suggesting
|
||||
and see if it: changes too much, is too broad, doesn't adhere to the
|
||||
[developer notes](doc/developer-notes.md), is dangerous or insecure, is messily written, etc.
|
||||
Identify and address any of the issues you find. Then ask e.g. on the forum or on a community
|
||||
discord if someone could give their opinion on the concept itself.
|
||||
- It may be because your code is too complex for all but a few people. And those people
|
||||
may not have realized your pull request even exists. A great way to find people who
|
||||
are qualified and care about the code you are touching is the
|
||||
[Git Blame feature](https://help.github.com/articles/tracing-changes-in-a-file/). Simply
|
||||
find the person touching the code you are touching before you and see if you can find
|
||||
them and give them a nudge. Don't be incessant about the nudging though.
|
||||
- Finally, if all else fails, ask on discord or elsewhere for someone to give your pull request
|
||||
a look. If you think you've been waiting an unreasonably long amount of time (month+) for
|
||||
no particular reason (few lines changed, etc), this is totally fine. Try to return the favor
|
||||
when someone else is asking for feedback on their code, and universe balances out.
|
||||
|
||||
|
||||
Release Policy
|
||||
--------------
|
||||
|
@ -271,7 +271,6 @@ if test "x$CXXFLAGS_overridden" = "xno"; then
|
||||
AX_CHECK_COMPILE_FLAG([-Wformat],[CXXFLAGS="$CXXFLAGS -Wformat"],,[[$CXXFLAG_WERROR]])
|
||||
AX_CHECK_COMPILE_FLAG([-Wvla],[CXXFLAGS="$CXXFLAGS -Wvla"],,[[$CXXFLAG_WERROR]])
|
||||
AX_CHECK_COMPILE_FLAG([-Wformat-security],[CXXFLAGS="$CXXFLAGS -Wformat-security"],,[[$CXXFLAG_WERROR]])
|
||||
AX_CHECK_COMPILE_FLAG([-Wshadow],[CXXFLAGS="$CXXFLAGS -Wshadow"],,[[$CXXFLAG_WERROR]])
|
||||
|
||||
## Some compilers (gcc) ignore unknown -Wno-* options, but warn about all
|
||||
## unknown options if any other warning is produced. Test the -Wfoo case, and
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env python3
|
||||
# Copyright (c) 2016 The Bitcoin Core developers
|
||||
# Copyright (c) 2016-2017 Bitcoin Core Developers
|
||||
# Distributed under the MIT software license, see the accompanying
|
||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
|
||||
@ -15,7 +15,7 @@
|
||||
# In case of a clean merge that is accepted by the user, the local branch with
|
||||
# name $BRANCH is overwritten with the merged result, and optionally pushed.
|
||||
from __future__ import division,print_function,unicode_literals
|
||||
import os
|
||||
import os,sys
|
||||
from sys import stdin,stdout,stderr
|
||||
import argparse
|
||||
import hashlib
|
||||
@ -127,6 +127,9 @@ def tree_sha512sum(commit='HEAD'):
|
||||
raise IOError('Non-zero return value executing git cat-file')
|
||||
return overall.hexdigest()
|
||||
|
||||
def print_merge_details(pull, title, branch, base_branch, head_branch):
|
||||
print('%s#%s%s %s %sinto %s%s' % (ATTR_RESET+ATTR_PR,pull,ATTR_RESET,title,ATTR_RESET+ATTR_PR,branch,ATTR_RESET))
|
||||
subprocess.check_call([GIT,'log','--graph','--topo-order','--pretty=format:'+COMMIT_FORMAT,base_branch+'..'+head_branch])
|
||||
|
||||
def parse_arguments():
|
||||
epilog = '''
|
||||
@ -171,7 +174,7 @@ def main():
|
||||
info = retrieve_pr_info(repo,pull)
|
||||
if info is None:
|
||||
exit(1)
|
||||
title = info['title']
|
||||
title = info['title'].strip()
|
||||
# precedence order for destination branch argument:
|
||||
# - command line argument
|
||||
# - githubmerge.branch setting
|
||||
@ -256,8 +259,7 @@ def main():
|
||||
printf("ERROR: Cannot update message.",file=stderr)
|
||||
exit(4)
|
||||
|
||||
print('%s#%s%s %s %sinto %s%s' % (ATTR_RESET+ATTR_PR,pull,ATTR_RESET,title,ATTR_RESET+ATTR_PR,branch,ATTR_RESET))
|
||||
subprocess.check_call([GIT,'log','--graph','--topo-order','--pretty=format:'+COMMIT_FORMAT,base_branch+'..'+head_branch])
|
||||
print_merge_details(pull, title, branch, base_branch, head_branch)
|
||||
print()
|
||||
|
||||
# Run test command if configured.
|
||||
@ -276,12 +278,6 @@ def main():
|
||||
print("Difference with github ignored.",file=stderr)
|
||||
else:
|
||||
exit(6)
|
||||
reply = ask_prompt("Press 'd' to accept the diff.")
|
||||
if reply.lower() == 'd':
|
||||
print("Diff accepted.",file=stderr)
|
||||
else:
|
||||
print("ERROR: Diff rejected.",file=stderr)
|
||||
exit(6)
|
||||
else:
|
||||
# Verify the result manually.
|
||||
print("Dropping you on a shell so you can try building/testing the merged source.",file=stderr)
|
||||
@ -290,12 +286,6 @@ def main():
|
||||
if os.path.isfile('/etc/debian_version'): # Show pull number on Debian default prompt
|
||||
os.putenv('debian_chroot',pull)
|
||||
subprocess.call([BASH,'-i'])
|
||||
reply = ask_prompt("Type 'm' to accept the merge.")
|
||||
if reply.lower() == 'm':
|
||||
print("Merge accepted.",file=stderr)
|
||||
else:
|
||||
print("ERROR: Merge rejected.",file=stderr)
|
||||
exit(7)
|
||||
|
||||
second_sha512 = tree_sha512sum()
|
||||
if first_sha512 != second_sha512:
|
||||
@ -303,16 +293,18 @@ def main():
|
||||
exit(8)
|
||||
|
||||
# Sign the merge commit.
|
||||
reply = ask_prompt("Type 's' to sign off on the merge.")
|
||||
if reply == 's':
|
||||
try:
|
||||
subprocess.check_call([GIT,'commit','-q','--gpg-sign','--amend','--no-edit'])
|
||||
except subprocess.CalledProcessError as e:
|
||||
print("Error signing, exiting.",file=stderr)
|
||||
print_merge_details(pull, title, branch, base_branch, head_branch)
|
||||
while True:
|
||||
reply = ask_prompt("Type 's' to sign off on the above merge, or 'x' to reject and exit.").lower()
|
||||
if reply == 's':
|
||||
try:
|
||||
subprocess.check_call([GIT,'commit','-q','--gpg-sign','--amend','--no-edit'])
|
||||
break
|
||||
except subprocess.CalledProcessError as e:
|
||||
print("Error while signing, asking again.",file=stderr)
|
||||
elif reply == 'x':
|
||||
print("Not signing off on merge, exiting.",file=stderr)
|
||||
exit(1)
|
||||
else:
|
||||
print("Not signing off on merge, exiting.",file=stderr)
|
||||
exit(1)
|
||||
|
||||
# Put the result in branch.
|
||||
subprocess.check_call([GIT,'checkout','-q',branch])
|
||||
@ -326,9 +318,13 @@ def main():
|
||||
subprocess.call([GIT,'branch','-q','-D',local_merge_branch],stderr=devnull)
|
||||
|
||||
# Push the result.
|
||||
reply = ask_prompt("Type 'push' to push the result to %s, branch %s." % (host_repo,branch))
|
||||
if reply.lower() == 'push':
|
||||
subprocess.check_call([GIT,'push',host_repo,'refs/heads/'+branch])
|
||||
while True:
|
||||
reply = ask_prompt("Type 'push' to push the result to %s, branch %s, or 'x' to exit without pushing." % (host_repo,branch)).lower()
|
||||
if reply == 'push':
|
||||
subprocess.check_call([GIT,'push',host_repo,'refs/heads/'+branch])
|
||||
break
|
||||
elif reply == 'x':
|
||||
exit(1)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
@ -11,7 +11,8 @@ https://github.com/dashpay/dash_hash
|
||||
$ ./linearize-hashes.py linearize.cfg > hashlist.txt
|
||||
|
||||
Required configuration file settings for linearize-hashes:
|
||||
* RPC: `rpcuser`, `rpcpassword`
|
||||
* RPC: `datadir` (Required if `rpcuser` and `rpcpassword` are not specified)
|
||||
* RPC: `rpcuser`, `rpcpassword` (Required if `datadir` is not specified)
|
||||
|
||||
Optional config file setting for linearize-hashes:
|
||||
* RPC: `host` (Default: `127.0.0.1`)
|
||||
|
@ -1,6 +1,7 @@
|
||||
# bitcoind RPC settings (linearize-hashes)
|
||||
rpcuser=someuser
|
||||
rpcpassword=somepassword
|
||||
#datadir=~/.bitcoin
|
||||
host=127.0.0.1
|
||||
port=9998
|
||||
|
||||
|
@ -16,6 +16,8 @@ import json
|
||||
import re
|
||||
import base64
|
||||
import sys
|
||||
import os
|
||||
import os.path
|
||||
|
||||
settings = {}
|
||||
|
||||
@ -93,6 +95,14 @@ def get_block_hashes(settings, max_blocks_per_call=10000):
|
||||
|
||||
height += num_blocks
|
||||
|
||||
def get_rpc_cookie():
|
||||
# Open the cookie file
|
||||
with open(os.path.join(os.path.expanduser(settings['datadir']), '.cookie'), 'r') as f:
|
||||
combined = f.readline()
|
||||
combined_split = combined.split(":")
|
||||
settings['rpcuser'] = combined_split[0]
|
||||
settings['rpcpassword'] = combined_split[1]
|
||||
|
||||
if __name__ == '__main__':
|
||||
if len(sys.argv) != 2:
|
||||
print("Usage: linearize-hashes.py CONFIG-FILE")
|
||||
@ -122,8 +132,15 @@ if __name__ == '__main__':
|
||||
settings['max_height'] = 313000
|
||||
if 'rev_hash_bytes' not in settings:
|
||||
settings['rev_hash_bytes'] = 'false'
|
||||
|
||||
use_userpass = True
|
||||
use_datadir = False
|
||||
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
|
||||
print("Missing username and/or password in cfg file", file=stderr)
|
||||
use_userpass = False
|
||||
if 'datadir' in settings and not use_userpass:
|
||||
use_datadir = True
|
||||
if not use_userpass and not use_datadir:
|
||||
print("Missing datadir or username and/or password in cfg file", file=stderr)
|
||||
sys.exit(1)
|
||||
|
||||
settings['port'] = int(settings['port'])
|
||||
@ -133,4 +150,8 @@ if __name__ == '__main__':
|
||||
# Force hash byte format setting to be lowercase to make comparisons easier.
|
||||
settings['rev_hash_bytes'] = settings['rev_hash_bytes'].lower()
|
||||
|
||||
# Get the rpc user and pass from the cookie if the datadir is set
|
||||
if use_datadir:
|
||||
get_rpc_cookie()
|
||||
|
||||
get_block_hashes(settings)
|
||||
|
@ -301,7 +301,6 @@ def copyFramework(framework, path, verbose):
|
||||
if os.path.exists(fromContentsDir):
|
||||
toContentsDir = os.path.join(path, framework.destinationVersionContentsDirectory)
|
||||
shutil.copytree(fromContentsDir, toContentsDir, symlinks=True)
|
||||
contentslinkfrom = os.path.join(path, framework.destinationContentsDirectory)
|
||||
if verbose >= 3:
|
||||
print("Copied Contents:", fromContentsDir)
|
||||
print(" to:", toContentsDir)
|
||||
@ -674,9 +673,8 @@ else:
|
||||
if verbose >= 2:
|
||||
print("+ Installing qt.conf +")
|
||||
|
||||
f = open(os.path.join(applicationBundle.resourcesPath, "qt.conf"), "wb")
|
||||
f.write(qt_conf.encode())
|
||||
f.close()
|
||||
with open(os.path.join(applicationBundle.resourcesPath, "qt.conf"), "wb") as f:
|
||||
f.write(qt_conf.encode())
|
||||
|
||||
# ------------------------------------------------
|
||||
|
||||
|
1
doc/.gitignore
vendored
Normal file
1
doc/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
Doxyfile
|
@ -199,6 +199,7 @@ BITCOIN_CORE_H = \
|
||||
protocol.h \
|
||||
random.h \
|
||||
reverselock.h \
|
||||
rpc/blockchain.h \
|
||||
rpc/client.h \
|
||||
rpc/protocol.h \
|
||||
rpc/server.h \
|
||||
@ -630,6 +631,14 @@ EXTRA_DIST = $(CTAES_DIST)
|
||||
EXTRA_DIST += $(IMMER_DIST)
|
||||
|
||||
|
||||
config/dash-config.h: config/stamp-h1
|
||||
@$(MAKE) -C $(top_builddir) $(subdir)/$(@)
|
||||
config/stamp-h1: $(top_srcdir)/$(subdir)/config/dash-config.h.in $(top_builddir)/config.status
|
||||
$(AM_V_at)$(MAKE) -C $(top_builddir) $(subdir)/$(@)
|
||||
$(top_srcdir)/$(subdir)/config/dash-config.h.in: $(am__configure_deps)
|
||||
$(AM_V_at)$(MAKE) -C $(top_srcdir) $(subdir)/config/dash-config.h.in
|
||||
|
||||
|
||||
config/dash-config.h: config/stamp-h1
|
||||
@$(MAKE) -C $(top_builddir) $(subdir)/$(@)
|
||||
config/stamp-h1: $(top_srcdir)/$(subdir)/config/dash-config.h.in $(top_builddir)/config.status
|
||||
|
@ -20,7 +20,7 @@ static void DeserializeBlockTest(benchmark::State& state)
|
||||
CDataStream stream((const char*)raw_bench::block813851,
|
||||
(const char*)&raw_bench::block813851[sizeof(raw_bench::block813851)],
|
||||
SER_NETWORK, PROTOCOL_VERSION);
|
||||
char a;
|
||||
char a = '\0';
|
||||
stream.write(&a, 1); // Prevent compaction
|
||||
|
||||
while (state.KeepRunning()) {
|
||||
@ -35,7 +35,7 @@ static void DeserializeAndCheckBlockTest(benchmark::State& state)
|
||||
CDataStream stream((const char*)raw_bench::block813851,
|
||||
(const char*)&raw_bench::block813851[sizeof(raw_bench::block813851)],
|
||||
SER_NETWORK, PROTOCOL_VERSION);
|
||||
char a;
|
||||
char a = '\0';
|
||||
stream.write(&a, 1); // Prevent compaction
|
||||
|
||||
Consensus::Params params = Params(CBaseChainParams::MAIN).GetConsensus();
|
||||
|
@ -91,7 +91,7 @@ void AddCoins(CCoinsViewCache& cache, const CTransaction &tx, int nHeight) {
|
||||
const uint256& txid = tx.GetHash();
|
||||
for (size_t i = 0; i < tx.vout.size(); ++i) {
|
||||
// Pass fCoinbase as the possible_overwrite flag to AddCoin, in order to correctly
|
||||
// deal with the pre-BIP30 occurrances of duplicate coinbase transactions.
|
||||
// deal with the pre-BIP30 occurrences of duplicate coinbase transactions.
|
||||
cache.AddCoin(COutPoint(txid, i), Coin(tx.vout[i], nHeight, fCoinbase), fCoinbase);
|
||||
}
|
||||
}
|
||||
|
@ -154,7 +154,7 @@ public:
|
||||
* @tparam Element should be a movable and copyable type
|
||||
* @tparam Hash should be a function/callable which takes a template parameter
|
||||
* hash_select and an Element and extracts a hash from it. Should return
|
||||
* high-entropy hashes for `Hash h; h<0>(e) ... h<7>(e)`.
|
||||
* high-entropy uint32_t hashes for `Hash h; h<0>(e) ... h<7>(e)`.
|
||||
*/
|
||||
template <typename Element, typename Hash>
|
||||
class cache
|
||||
@ -193,12 +193,6 @@ private:
|
||||
*/
|
||||
uint32_t epoch_size;
|
||||
|
||||
/** hash_mask should be set to appropriately mask out a hash such that every
|
||||
* masked hash is [0,size), eg, if floor(log2(size)) == 20, then hash_mask
|
||||
* should be (1<<20)-1
|
||||
*/
|
||||
uint32_t hash_mask;
|
||||
|
||||
/** depth_limit determines how many elements insert should try to replace.
|
||||
* Should be set to log2(n)*/
|
||||
uint8_t depth_limit;
|
||||
@ -217,14 +211,14 @@ private:
|
||||
*/
|
||||
inline std::array<uint32_t, 8> compute_hashes(const Element& e) const
|
||||
{
|
||||
return {{hash_function.template operator()<0>(e) & hash_mask,
|
||||
hash_function.template operator()<1>(e) & hash_mask,
|
||||
hash_function.template operator()<2>(e) & hash_mask,
|
||||
hash_function.template operator()<3>(e) & hash_mask,
|
||||
hash_function.template operator()<4>(e) & hash_mask,
|
||||
hash_function.template operator()<5>(e) & hash_mask,
|
||||
hash_function.template operator()<6>(e) & hash_mask,
|
||||
hash_function.template operator()<7>(e) & hash_mask}};
|
||||
return {{(uint32_t)((hash_function.template operator()<0>(e) * (uint64_t)size) >> 32),
|
||||
(uint32_t)((hash_function.template operator()<1>(e) * (uint64_t)size) >> 32),
|
||||
(uint32_t)((hash_function.template operator()<2>(e) * (uint64_t)size) >> 32),
|
||||
(uint32_t)((hash_function.template operator()<3>(e) * (uint64_t)size) >> 32),
|
||||
(uint32_t)((hash_function.template operator()<4>(e) * (uint64_t)size) >> 32),
|
||||
(uint32_t)((hash_function.template operator()<5>(e) * (uint64_t)size) >> 32),
|
||||
(uint32_t)((hash_function.template operator()<6>(e) * (uint64_t)size) >> 32),
|
||||
(uint32_t)((hash_function.template operator()<7>(e) * (uint64_t)size) >> 32)}};
|
||||
}
|
||||
|
||||
/* end
|
||||
@ -305,7 +299,7 @@ public:
|
||||
}
|
||||
|
||||
/** setup initializes the container to store no more than new_size
|
||||
* elements. setup rounds down to a power of two size.
|
||||
* elements.
|
||||
*
|
||||
* setup should only be called once.
|
||||
*
|
||||
@ -316,8 +310,7 @@ public:
|
||||
{
|
||||
// depth_limit must be at least one otherwise errors can occur.
|
||||
depth_limit = static_cast<uint8_t>(std::log2(static_cast<float>(std::max((uint32_t)2, new_size))));
|
||||
size = 1 << depth_limit;
|
||||
hash_mask = size-1;
|
||||
size = std::max<uint32_t>(2, new_size);
|
||||
table.resize(size);
|
||||
collection_flags.setup(size);
|
||||
epoch_flags.resize(size);
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include "policy/policy.h"
|
||||
#include "rpc/server.h"
|
||||
#include "rpc/register.h"
|
||||
#include "rpc/blockchain.h"
|
||||
#include "script/standard.h"
|
||||
#include "script/sigcache.h"
|
||||
#include "scheduler.h"
|
||||
|
32
src/net.cpp
32
src/net.cpp
@ -467,35 +467,31 @@ void CConnman::ClearBanned()
|
||||
|
||||
bool CConnman::IsBanned(CNetAddr ip)
|
||||
{
|
||||
bool fResult = false;
|
||||
LOCK(cs_setBanned);
|
||||
for (banmap_t::iterator it = setBanned.begin(); it != setBanned.end(); it++)
|
||||
{
|
||||
LOCK(cs_setBanned);
|
||||
for (banmap_t::iterator it = setBanned.begin(); it != setBanned.end(); it++)
|
||||
{
|
||||
CSubNet subNet = (*it).first;
|
||||
CBanEntry banEntry = (*it).second;
|
||||
CSubNet subNet = (*it).first;
|
||||
CBanEntry banEntry = (*it).second;
|
||||
|
||||
if(subNet.Match(ip) && GetTime() < banEntry.nBanUntil)
|
||||
fResult = true;
|
||||
if (subNet.Match(ip) && GetTime() < banEntry.nBanUntil) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return fResult;
|
||||
return false;
|
||||
}
|
||||
|
||||
bool CConnman::IsBanned(CSubNet subnet)
|
||||
{
|
||||
bool fResult = false;
|
||||
LOCK(cs_setBanned);
|
||||
banmap_t::iterator i = setBanned.find(subnet);
|
||||
if (i != setBanned.end())
|
||||
{
|
||||
LOCK(cs_setBanned);
|
||||
banmap_t::iterator i = setBanned.find(subnet);
|
||||
if (i != setBanned.end())
|
||||
{
|
||||
CBanEntry banEntry = (*i).second;
|
||||
if (GetTime() < banEntry.nBanUntil)
|
||||
fResult = true;
|
||||
CBanEntry banEntry = (*i).second;
|
||||
if (GetTime() < banEntry.nBanUntil) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return fResult;
|
||||
return false;
|
||||
}
|
||||
|
||||
void CConnman::Ban(const CNetAddr& addr, const BanReason &banReason, int64_t bantimeoffset, bool sinceUnixEpoch) {
|
||||
|
@ -917,8 +917,8 @@ void PeerLogicValidation::BlockChecked(const CBlock& block, const CValidationSta
|
||||
|
||||
int nDoS = 0;
|
||||
if (state.IsInvalid(nDoS)) {
|
||||
if (it != mapBlockSource.end() && State(it->second.first)) {
|
||||
assert (state.GetRejectCode() < REJECT_INTERNAL); // Blocks are never rejected with internal reject codes
|
||||
// Don't send reject message with code 0 or an internal reject code.
|
||||
if (it != mapBlockSource.end() && State(it->second.first) && state.GetRejectCode() > 0 && state.GetRejectCode() < REJECT_INTERNAL) {
|
||||
CBlockReject reject = {(unsigned char)state.GetRejectCode(), state.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH), hash};
|
||||
State(it->second.first)->rejects.push_back(reject);
|
||||
if (nDoS > 0 && it->second.second)
|
||||
@ -2348,7 +2348,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
|
||||
LogPrint("mempoolrej", "%s from peer=%d was not accepted: %s\n", tx.GetHash().ToString(),
|
||||
pfrom->id,
|
||||
FormatStateMessage(state));
|
||||
if (state.GetRejectCode() < REJECT_INTERNAL) // Never send AcceptToMemoryPool's internal codes over P2P
|
||||
if (state.GetRejectCode() > 0 && state.GetRejectCode() < REJECT_INTERNAL) // Never send AcceptToMemoryPool's internal codes over P2P
|
||||
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::REJECT, strCommand, (unsigned char)state.GetRejectCode(),
|
||||
state.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH), inv.hash));
|
||||
if (nDoS > 0) {
|
||||
|
@ -1221,7 +1221,7 @@
|
||||
<bool>false</bool>
|
||||
</property>
|
||||
<property name="default">
|
||||
<bool>true</bool>
|
||||
<bool>false</bool>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
|
10
src/rest.cpp
10
src/rest.cpp
@ -9,6 +9,7 @@
|
||||
#include "primitives/transaction.h"
|
||||
#include "validation.h"
|
||||
#include "httpserver.h"
|
||||
#include "rpc/blockchain.h"
|
||||
#include "rpc/server.h"
|
||||
#include "streams.h"
|
||||
#include "sync.h"
|
||||
@ -58,12 +59,9 @@ struct CCoin {
|
||||
}
|
||||
};
|
||||
|
||||
extern void TxToJSON(const CTransaction& tx, const uint256 hashBlock, UniValue& entry);
|
||||
extern UniValue blockToJSON(const CBlock& block, const CBlockIndex* blockindex, bool txDetails = false);
|
||||
extern UniValue mempoolInfoToJSON();
|
||||
extern UniValue mempoolToJSON(bool fVerbose = false);
|
||||
extern void ScriptPubKeyToJSON(const CScript& scriptPubKey, UniValue& out, bool fIncludeHex);
|
||||
extern UniValue blockheaderToJSON(const CBlockIndex* blockindex);
|
||||
/* Defined in rawtransaction.cpp */
|
||||
void TxToJSON(const CTransaction& tx, const uint256 hashBlock, UniValue& entry);
|
||||
void ScriptPubKeyToJSON(const CScript& scriptPubKey, UniValue& out, bool fIncludeHex);
|
||||
|
||||
static bool RESTERR(HTTPRequest* req, enum HTTPStatusCode status, std::string message)
|
||||
{
|
||||
|
@ -4,6 +4,8 @@
|
||||
// Distributed under the MIT software license, see the accompanying
|
||||
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
|
||||
#include "rpc/blockchain.h"
|
||||
|
||||
#include "amount.h"
|
||||
#include "chain.h"
|
||||
#include "chainparams.h"
|
||||
@ -52,13 +54,6 @@ static CUpdatedBlock latestblock;
|
||||
extern void TxToJSON(const CTransaction& tx, const uint256 hashBlock, UniValue& entry);
|
||||
void ScriptPubKeyToJSON(const CScript& scriptPubKey, UniValue& out, bool fIncludeHex);
|
||||
|
||||
/**
|
||||
* Get the difficulty of the net wrt to the given block index, or the chain tip if
|
||||
* not provided.
|
||||
*
|
||||
* @return A floating point number that is a multiple of the main net minimum
|
||||
* difficulty (4295032833 hashes).
|
||||
*/
|
||||
double GetDifficulty(const CBlockIndex* blockindex)
|
||||
{
|
||||
if (blockindex == NULL)
|
||||
@ -119,7 +114,7 @@ UniValue blockheaderToJSON(const CBlockIndex* blockindex)
|
||||
return result;
|
||||
}
|
||||
|
||||
UniValue blockToJSON(const CBlock& block, const CBlockIndex* blockindex, bool txDetails = false)
|
||||
UniValue blockToJSON(const CBlock& block, const CBlockIndex* blockindex, bool txDetails)
|
||||
{
|
||||
UniValue result(UniValue::VOBJ);
|
||||
result.push_back(Pair("hash", blockindex->GetBlockHash().GetHex()));
|
||||
@ -409,7 +404,7 @@ void entryToJSON(UniValue &info, const CTxMemPoolEntry &e)
|
||||
info.push_back(Pair("instantlock", instantsend.IsLockedInstantSendTransaction(tx.GetHash()) || llmq::quorumInstantSendManager->IsLocked(tx.GetHash())));
|
||||
}
|
||||
|
||||
UniValue mempoolToJSON(bool fVerbose = false)
|
||||
UniValue mempoolToJSON(bool fVerbose)
|
||||
{
|
||||
if (fVerbose)
|
||||
{
|
||||
|
40
src/rpc/blockchain.h
Normal file
40
src/rpc/blockchain.h
Normal file
@ -0,0 +1,40 @@
|
||||
// Copyright (c) 2017 The Bitcoin Core developers
|
||||
// Distributed under the MIT software license, see the accompanying
|
||||
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
|
||||
#ifndef BITCOIN_RPC_BLOCKCHAIN_H
|
||||
#define BITCOIN_RPC_BLOCKCHAIN_H
|
||||
|
||||
class CBlock;
|
||||
class CBlockIndex;
|
||||
class CScript;
|
||||
class CTransaction;
|
||||
class uint256;
|
||||
class UniValue;
|
||||
|
||||
/**
|
||||
* Get the difficulty of the net wrt to the given block index, or the chain tip if
|
||||
* not provided.
|
||||
*
|
||||
* @return A floating point number that is a multiple of the main net minimum
|
||||
* difficulty (4295032833 hashes).
|
||||
*/
|
||||
double GetDifficulty(const CBlockIndex* blockindex = nullptr);
|
||||
|
||||
/** Callback for when block tip changed. */
|
||||
void RPCNotifyBlockChange(bool ibd, const CBlockIndex *);
|
||||
|
||||
/** Block description to JSON */
|
||||
UniValue blockToJSON(const CBlock& block, const CBlockIndex* blockindex, bool txDetails = false);
|
||||
|
||||
/** Mempool information to JSON */
|
||||
UniValue mempoolInfoToJSON();
|
||||
|
||||
/** Mempool to JSON */
|
||||
UniValue mempoolToJSON(bool fVerbose = false);
|
||||
|
||||
/** Block header to JSON */
|
||||
UniValue blockheaderToJSON(const CBlockIndex* blockindex);
|
||||
|
||||
#endif
|
||||
|
@ -17,6 +17,7 @@
|
||||
#include "miner.h"
|
||||
#include "net.h"
|
||||
#include "pow.h"
|
||||
#include "rpc/blockchain.h"
|
||||
#include "rpc/server.h"
|
||||
#include "spork.h"
|
||||
#include "txmempool.h"
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include "init.h"
|
||||
#include "net.h"
|
||||
#include "netbase.h"
|
||||
#include "rpc/blockchain.h"
|
||||
#include "rpc/server.h"
|
||||
#include "timedata.h"
|
||||
#include "txmempool.h"
|
||||
|
@ -193,7 +193,6 @@ extern bool ParseBoolV(const UniValue& v, const std::string &strName);
|
||||
|
||||
extern CAmount AmountFromValue(const UniValue& value);
|
||||
extern UniValue ValueFromAmount(const CAmount& amount);
|
||||
extern double GetDifficulty(const CBlockIndex* blockindex = NULL);
|
||||
extern std::string HelpExampleCli(const std::string& methodname, const std::string& args);
|
||||
extern std::string HelpExampleRpc(const std::string& methodname, const std::string& args);
|
||||
|
||||
@ -201,6 +200,5 @@ bool StartRPC();
|
||||
void InterruptRPC();
|
||||
void StopRPC();
|
||||
std::string JSONRPCExecBatch(const UniValue& vReq);
|
||||
void RPCNotifyBlockChange(bool ibd, const CBlockIndex *);
|
||||
|
||||
#endif // BITCOIN_RPCSERVER_H
|
||||
|
@ -23,7 +23,9 @@ CScheduler::~CScheduler()
|
||||
#if BOOST_VERSION < 105000
|
||||
static boost::system_time toPosixTime(const boost::chrono::system_clock::time_point& t)
|
||||
{
|
||||
return boost::posix_time::from_time_t(boost::chrono::system_clock::to_time_t(t));
|
||||
// Creating the posix_time using from_time_t loses sub-second precision. So rather than exporting the time_point to time_t,
|
||||
// start with a posix_time at the epoch (0) and add the milliseconds that have passed since then.
|
||||
return boost::posix_time::from_time_t(0) + boost::posix_time::milliseconds(boost::chrono::duration_cast<boost::chrono::milliseconds>(t.time_since_epoch()).count());
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -352,11 +352,18 @@ I ReadVarInt(Stream& is)
|
||||
I n = 0;
|
||||
while(true) {
|
||||
unsigned char chData = ser_readdata8(is);
|
||||
if (n > (std::numeric_limits<I>::max() >> 7)) {
|
||||
throw std::ios_base::failure("ReadVarInt(): size too large");
|
||||
}
|
||||
n = (n << 7) | (chData & 0x7F);
|
||||
if (chData & 0x80)
|
||||
if (chData & 0x80) {
|
||||
if (n == std::numeric_limits<I>::max()) {
|
||||
throw std::ios_base::failure("ReadVarInt(): size too large");
|
||||
}
|
||||
n++;
|
||||
else
|
||||
} else {
|
||||
return n;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -181,7 +181,7 @@ BOOST_AUTO_TEST_CASE(coins_cache_simulation_test)
|
||||
}
|
||||
|
||||
// One every 10 iterations, remove a random entry from the cache
|
||||
if (insecure_rand() % 10) {
|
||||
if (insecure_rand() % 10 == 0) {
|
||||
COutPoint out(txids[insecure_rand() % txids.size()], 0);
|
||||
int cacheid = insecure_rand() % stack.size();
|
||||
stack[cacheid]->Uncache(out);
|
||||
@ -430,13 +430,13 @@ BOOST_AUTO_TEST_CASE(updatecoins_simulation_test)
|
||||
}
|
||||
|
||||
// One every 10 iterations, remove a random entry from the cache
|
||||
if (utxoset.size() > 1 && insecure_rand() % 30) {
|
||||
if (utxoset.size() > 1 && insecure_rand() % 30 == 0) {
|
||||
stack[insecure_rand() % stack.size()]->Uncache(FindRandomFrom(utxoset)->first);
|
||||
}
|
||||
if (disconnected_coins.size() > 1 && insecure_rand() % 30) {
|
||||
if (disconnected_coins.size() > 1 && insecure_rand() % 30 == 0) {
|
||||
stack[insecure_rand() % stack.size()]->Uncache(FindRandomFrom(disconnected_coins)->first);
|
||||
}
|
||||
if (duplicate_coins.size() > 1 && insecure_rand() % 30) {
|
||||
if (duplicate_coins.size() > 1 && insecure_rand() % 30 == 0) {
|
||||
stack[insecure_rand() % stack.size()]->Uncache(FindRandomFrom(duplicate_coins)->first);
|
||||
}
|
||||
|
||||
|
@ -60,7 +60,8 @@ BOOST_AUTO_TEST_CASE(test_cuckoocache_no_fakes)
|
||||
{
|
||||
insecure_rand = FastRandomContext(true);
|
||||
CuckooCache::cache<uint256, uint256Hasher> cc{};
|
||||
cc.setup_bytes(32 << 20);
|
||||
size_t megabytes = 4;
|
||||
cc.setup_bytes(megabytes << 20);
|
||||
uint256 v;
|
||||
for (int x = 0; x < 100000; ++x) {
|
||||
insecure_GetRandHash(v);
|
||||
@ -135,7 +136,7 @@ BOOST_AUTO_TEST_CASE(cuckoocache_hit_rate_ok)
|
||||
* as a lower bound on performance.
|
||||
*/
|
||||
double HitRateThresh = 0.98;
|
||||
size_t megabytes = 32;
|
||||
size_t megabytes = 4;
|
||||
for (double load = 0.1; load < 2; load *= 2) {
|
||||
double hits = test_cache<CuckooCache::cache<uint256, uint256Hasher>>(megabytes, load);
|
||||
BOOST_CHECK(normalize_hit_rate(hits, load) > HitRateThresh);
|
||||
@ -204,7 +205,7 @@ void test_cache_erase(size_t megabytes)
|
||||
|
||||
BOOST_AUTO_TEST_CASE(cuckoocache_erase_ok)
|
||||
{
|
||||
size_t megabytes = 32;
|
||||
size_t megabytes = 4;
|
||||
test_cache_erase<CuckooCache::cache<uint256, uint256Hasher>>(megabytes);
|
||||
}
|
||||
|
||||
@ -291,7 +292,7 @@ void test_cache_erase_parallel(size_t megabytes)
|
||||
}
|
||||
BOOST_AUTO_TEST_CASE(cuckoocache_erase_parallel_ok)
|
||||
{
|
||||
size_t megabytes = 32;
|
||||
size_t megabytes = 4;
|
||||
test_cache_erase_parallel<CuckooCache::cache<uint256, uint256Hasher>>(megabytes);
|
||||
}
|
||||
|
||||
@ -342,13 +343,13 @@ void test_cache_generations()
|
||||
}
|
||||
};
|
||||
|
||||
const uint32_t BLOCK_SIZE = 10000;
|
||||
const uint32_t BLOCK_SIZE = 1000;
|
||||
// We expect window size 60 to perform reasonably given that each epoch
|
||||
// stores 45% of the cache size (~472k).
|
||||
const uint32_t WINDOW_SIZE = 60;
|
||||
const uint32_t POP_AMOUNT = (BLOCK_SIZE / WINDOW_SIZE) / 2;
|
||||
const double load = 10;
|
||||
const size_t megabytes = 32;
|
||||
const size_t megabytes = 4;
|
||||
const size_t bytes = megabytes * (1 << 20);
|
||||
const uint32_t n_insert = static_cast<uint32_t>(load * (bytes / sizeof(uint256)));
|
||||
|
||||
|
17
src/util.cpp
17
src/util.cpp
@ -141,26 +141,24 @@ std::atomic<bool> fReopenDebugLog(false);
|
||||
CTranslationInterface translationInterface;
|
||||
|
||||
/** Init OpenSSL library multithreading support */
|
||||
static CCriticalSection** ppmutexOpenSSL;
|
||||
static std::unique_ptr<CCriticalSection[]> ppmutexOpenSSL;
|
||||
void locking_callback(int mode, int i, const char* file, int line) NO_THREAD_SAFETY_ANALYSIS
|
||||
{
|
||||
if (mode & CRYPTO_LOCK) {
|
||||
ENTER_CRITICAL_SECTION(*ppmutexOpenSSL[i]);
|
||||
ENTER_CRITICAL_SECTION(ppmutexOpenSSL[i]);
|
||||
} else {
|
||||
LEAVE_CRITICAL_SECTION(*ppmutexOpenSSL[i]);
|
||||
LEAVE_CRITICAL_SECTION(ppmutexOpenSSL[i]);
|
||||
}
|
||||
}
|
||||
|
||||
// Init
|
||||
// Singleton for wrapping OpenSSL setup/teardown.
|
||||
class CInit
|
||||
{
|
||||
public:
|
||||
CInit()
|
||||
{
|
||||
// Init OpenSSL library multithreading support
|
||||
ppmutexOpenSSL = (CCriticalSection**)OPENSSL_malloc(CRYPTO_num_locks() * sizeof(CCriticalSection*));
|
||||
for (int i = 0; i < CRYPTO_num_locks(); i++)
|
||||
ppmutexOpenSSL[i] = new CCriticalSection();
|
||||
ppmutexOpenSSL.reset(new CCriticalSection[CRYPTO_num_locks()]);
|
||||
CRYPTO_set_locking_callback(locking_callback);
|
||||
|
||||
// OpenSSL can optionally load a config file which lists optional loadable modules and engines.
|
||||
@ -184,9 +182,8 @@ public:
|
||||
RAND_cleanup();
|
||||
// Shutdown OpenSSL library multithreading support
|
||||
CRYPTO_set_locking_callback(NULL);
|
||||
for (int i = 0; i < CRYPTO_num_locks(); i++)
|
||||
delete ppmutexOpenSSL[i];
|
||||
OPENSSL_free(ppmutexOpenSSL);
|
||||
// Clear the set of locks now to maintain symmetry with the constructor.
|
||||
ppmutexOpenSSL.reset();
|
||||
}
|
||||
}
|
||||
instance_of_cinit;
|
||||
|
@ -2185,7 +2185,7 @@ static bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockInd
|
||||
LogPrint("bench", " - Connect %u transactions: %.2fms (%.3fms/tx, %.3fms/txin) [%.2fs]\n", (unsigned)block.vtx.size(), 0.001 * (nTime3 - nTime2), 0.001 * (nTime3 - nTime2) / block.vtx.size(), nInputs <= 1 ? 0 : 0.001 * (nTime3 - nTime2) / (nInputs-1), nTimeConnect * 0.000001);
|
||||
|
||||
if (!control.Wait())
|
||||
return state.DoS(100, false);
|
||||
return state.DoS(100, error("%s: CheckQueue failed", __func__), REJECT_INVALID, "block-validation-failed");
|
||||
int64_t nTime4 = GetTimeMicros(); nTimeVerify += nTime4 - nTime2;
|
||||
LogPrint("bench", " - Verify %u txins: %.2fms (%.3fms/txin) [%.2fs]\n", nInputs - 1, 0.001 * (nTime4 - nTime2), nInputs <= 1 ? 0 : 0.001 * (nTime4 - nTime2) / (nInputs-1), nTimeVerify * 0.000001);
|
||||
|
||||
@ -3338,10 +3338,12 @@ static bool CheckIndexAgainstCheckpoint(const CBlockIndex* pindexPrev, CValidati
|
||||
return true;
|
||||
|
||||
int nHeight = pindexPrev->nHeight+1;
|
||||
// Don't accept any forks from the main chain prior to last checkpoint
|
||||
// Don't accept any forks from the main chain prior to last checkpoint.
|
||||
// GetLastCheckpoint finds the last checkpoint in MapCheckpoints that's in our
|
||||
// MapBlockIndex.
|
||||
CBlockIndex* pcheckpoint = Checkpoints::GetLastCheckpoint(chainparams.Checkpoints());
|
||||
if (pcheckpoint && nHeight < pcheckpoint->nHeight)
|
||||
return state.DoS(100, error("%s: forked chain older than last checkpoint (height %d)", __func__, nHeight));
|
||||
return state.DoS(100, error("%s: forked chain older than last checkpoint (height %d)", __func__, nHeight), REJECT_CHECKPOINT, "bad-fork-prior-to-checkpoint");
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -3471,7 +3473,7 @@ static bool AcceptBlockHeader(const CBlockHeader& block, CValidationState& state
|
||||
CBlockIndex* pindexPrev = NULL;
|
||||
BlockMap::iterator mi = mapBlockIndex.find(block.hashPrevBlock);
|
||||
if (mi == mapBlockIndex.end())
|
||||
return state.DoS(10, error("%s: prev block not found", __func__), 0, "bad-prevblk");
|
||||
return state.DoS(10, error("%s: prev block not found", __func__), 0, "prev-blk-not-found");
|
||||
pindexPrev = (*mi).second;
|
||||
assert(pindexPrev);
|
||||
|
||||
|
@ -211,7 +211,6 @@ bool CDB::Recover(const std::string& filename, void *callbackDataIn, bool (*reco
|
||||
{
|
||||
CDataStream ssKey(row.first, SER_DISK, CLIENT_VERSION);
|
||||
CDataStream ssValue(row.second, SER_DISK, CLIENT_VERSION);
|
||||
std::string strType, strErr;
|
||||
if (!(*recoverKVcallback)(callbackDataIn, ssKey, ssValue))
|
||||
continue;
|
||||
}
|
||||
|
@ -333,7 +333,7 @@ UniValue removeprunedfunds(const JSONRPCRequest& request)
|
||||
if (request.fHelp || request.params.size() != 1)
|
||||
throw std::runtime_error(
|
||||
"removeprunedfunds \"txid\"\n"
|
||||
"\nDeletes the specified transaction from the wallet. Meant for use with pruned wallets and as a companion to importprunedfunds. This will effect wallet balances.\n"
|
||||
"\nDeletes the specified transaction from the wallet. Meant for use with pruned wallets and as a companion to importprunedfunds. This will affect wallet balances.\n"
|
||||
"\nArguments:\n"
|
||||
"1. \"txid\" (string, required) The hex-encoded id of the transaction you are deleting\n"
|
||||
"\nExamples:\n"
|
||||
|
@ -426,6 +426,17 @@ BOOST_FIXTURE_TEST_CASE(rescan, TestChain100Setup)
|
||||
BOOST_CHECK_EQUAL(response.write(), strprintf("[{\"success\":false,\"error\":{\"code\":-1,\"message\":\"Failed to rescan before time %d, transactions may be missing.\"}},{\"success\":true}]", newTip->GetBlockTimeMax()));
|
||||
::pwalletMain = backup;
|
||||
}
|
||||
|
||||
// Verify ScanForWalletTransactions does not return null when the scan is
|
||||
// elided due to the nTimeFirstKey optimization.
|
||||
{
|
||||
CWallet wallet;
|
||||
{
|
||||
LOCK(wallet.cs_wallet);
|
||||
wallet.UpdateTimeFirstKey(newTip->GetBlockTime() + 7200 + 1);
|
||||
}
|
||||
BOOST_CHECK_EQUAL(newTip, wallet.ScanForWalletTransactions(newTip));
|
||||
}
|
||||
}
|
||||
|
||||
// Verify importwallet RPC starts rescan at earliest block with timestamp
|
||||
|
@ -1604,7 +1604,7 @@ bool CWallet::GetDecryptedHDChain(CHDChain& hdChainRet)
|
||||
return true;
|
||||
}
|
||||
|
||||
bool CWallet::IsHDEnabled()
|
||||
bool CWallet::IsHDEnabled() const
|
||||
{
|
||||
CHDChain hdChainCurrent;
|
||||
return GetHDChain(hdChainCurrent);
|
||||
@ -1822,16 +1822,17 @@ void CWalletTx::GetAccountAmounts(const std::string& strAccount, CAmount& nRecei
|
||||
* exist in the wallet will be updated.
|
||||
*
|
||||
* Returns pointer to the first block in the last contiguous range that was
|
||||
* successfully scanned.
|
||||
*
|
||||
* successfully scanned or elided (elided if pIndexStart points at a block
|
||||
* before CWallet::nTimeFirstKey). Returns null if there is no such range, or
|
||||
* the range doesn't include chainActive.Tip().
|
||||
*/
|
||||
CBlockIndex* CWallet::ScanForWalletTransactions(CBlockIndex* pindexStart, bool fUpdate)
|
||||
{
|
||||
CBlockIndex* ret = nullptr;
|
||||
int64_t nNow = GetTime();
|
||||
const CChainParams& chainParams = Params();
|
||||
|
||||
CBlockIndex* pindex = pindexStart;
|
||||
CBlockIndex* ret = pindexStart;
|
||||
{
|
||||
LOCK2(cs_main, cs_wallet);
|
||||
|
||||
@ -2186,10 +2187,7 @@ CAmount CWalletTx::GetChange() const
|
||||
bool CWalletTx::InMempool() const
|
||||
{
|
||||
LOCK(mempool.cs);
|
||||
if (mempool.exists(GetHash())) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
return mempool.exists(GetHash());
|
||||
}
|
||||
|
||||
bool CWalletTx::IsTrusted() const
|
||||
|
@ -1164,7 +1164,7 @@ public:
|
||||
*/
|
||||
|
||||
/* Returns true if HD is enabled */
|
||||
bool IsHDEnabled();
|
||||
bool IsHDEnabled() const;
|
||||
/* Generates a new HD chain */
|
||||
void GenerateNewHDChain();
|
||||
/* Set the HD chain model (chain child index counters) */
|
||||
|
@ -200,16 +200,14 @@ class BIP9SoftForksTest(ComparisonTestFramework):
|
||||
yield TestInstance([[block, False]])
|
||||
|
||||
# Restart all
|
||||
self.test.block_store.close()
|
||||
self.test.clear_all_connections()
|
||||
stop_nodes(self.nodes)
|
||||
shutil.rmtree(self.options.tmpdir)
|
||||
shutil.rmtree(self.options.tmpdir + "/node0")
|
||||
self.setup_chain()
|
||||
self.setup_network()
|
||||
self.test.block_store = BlockStore(self.options.tmpdir)
|
||||
self.test.clear_all_connections()
|
||||
self.test.add_all_connections(self.nodes)
|
||||
NetworkThread().start() # Start up network handling in another thread
|
||||
|
||||
NetworkThread().start()
|
||||
self.test.test_nodes[0].wait_for_verack()
|
||||
|
||||
def get_tests(self):
|
||||
for test in itertools.chain(
|
||||
|
@ -3,6 +3,8 @@
|
||||
# Distributed under the MIT software license, see the accompanying
|
||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
"""Test the -alertnotify option."""
|
||||
import os
|
||||
import time
|
||||
|
||||
from test_framework.test_framework import BitcoinTestFramework
|
||||
from test_framework.util import *
|
||||
@ -41,12 +43,19 @@ class ForkNotifyTest(BitcoinTestFramework):
|
||||
self.nodes[1].generate(1)
|
||||
self.sync_all()
|
||||
|
||||
# Give bitcoind 10 seconds to write the alert notification
|
||||
timeout = 10.0
|
||||
while timeout > 0:
|
||||
if os.path.exists(self.alert_filename) and os.path.getsize(self.alert_filename):
|
||||
break
|
||||
time.sleep(0.1)
|
||||
timeout -= 0.1
|
||||
else:
|
||||
assert False, "-alertnotify did not warn of up-version blocks"
|
||||
|
||||
with open(self.alert_filename, 'r', encoding='utf8') as f:
|
||||
alert_text = f.read()
|
||||
|
||||
if len(alert_text) == 0:
|
||||
raise AssertionError("-alertnotify did not warn of up-version blocks")
|
||||
|
||||
# Mine more up-version blocks, should not get more alerts:
|
||||
self.nodes[1].generate(1)
|
||||
self.sync_all()
|
||||
|
54
test/functional/net.py
Executable file
54
test/functional/net.py
Executable file
@ -0,0 +1,54 @@
|
||||
#!/usr/bin/env python3
|
||||
# Copyright (c) 2017 The Bitcoin Core developers
|
||||
# Distributed under the MIT software license, see the accompanying
|
||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
"""Test RPC calls related to net.
|
||||
|
||||
Tests correspond to code in rpc/net.cpp.
|
||||
"""
|
||||
|
||||
from decimal import Decimal
|
||||
import time
|
||||
|
||||
from test_framework.test_framework import BitcoinTestFramework
|
||||
from test_framework.authproxy import JSONRPCException
|
||||
from test_framework.util import (
|
||||
assert_equal,
|
||||
start_nodes,
|
||||
connect_nodes_bi,
|
||||
)
|
||||
|
||||
|
||||
class NetTest(BitcoinTestFramework):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.setup_clean_chain = True
|
||||
self.num_nodes = 2
|
||||
|
||||
def setup_network(self):
|
||||
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)
|
||||
connect_nodes_bi(self.nodes, 0, 1)
|
||||
self.is_network_split = False
|
||||
self.sync_all()
|
||||
|
||||
def run_test(self):
|
||||
assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], True)
|
||||
assert_equal(self.nodes[0].getnetworkinfo()['connections'], 2) # bilateral connection
|
||||
|
||||
self.nodes[0].setnetworkactive(False)
|
||||
assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], False)
|
||||
timeout = 3
|
||||
while self.nodes[0].getnetworkinfo()['connections'] != 0:
|
||||
# Wait a bit for all sockets to close
|
||||
assert timeout > 0, 'not all connections closed in time'
|
||||
timeout -= 0.1
|
||||
time.sleep(0.1)
|
||||
|
||||
self.nodes[0].setnetworkactive(True)
|
||||
connect_nodes_bi(self.nodes, 0, 1)
|
||||
assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], True)
|
||||
assert_equal(self.nodes[0].getnetworkinfo()['connections'], 2)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
NetTest().main()
|
@ -404,7 +404,7 @@ class FullBlockTest(ComparisonTestFramework):
|
||||
|
||||
# Extend the b26 chain to make sure bitcoind isn't accepting b26
|
||||
b27 = block(27, spend=out[7])
|
||||
yield rejected(RejectResult(0, b'bad-prevblk'))
|
||||
yield rejected(False)
|
||||
|
||||
# Now try a too-large-coinbase script
|
||||
tip(15)
|
||||
@ -416,7 +416,7 @@ class FullBlockTest(ComparisonTestFramework):
|
||||
|
||||
# Extend the b28 chain to make sure bitcoind isn't accepting b28
|
||||
b29 = block(29, spend=out[7])
|
||||
yield rejected(RejectResult(0, b'bad-prevblk'))
|
||||
yield rejected(False)
|
||||
|
||||
# b30 has a max-sized coinbase scriptSig.
|
||||
tip(23)
|
||||
|
@ -82,6 +82,7 @@ BASE_SCRIPTS= [
|
||||
'decodescript.py',
|
||||
'blockchain.py',
|
||||
'disablewallet.py',
|
||||
'net.py',
|
||||
'keypool.py',
|
||||
'keypool-hd.py',
|
||||
'p2p-mempool.py',
|
||||
|
Loading…
Reference in New Issue
Block a user