mirror of
https://github.com/dashpay/dash.git
synced 2024-12-25 03:52:49 +01:00
Merge pull request #3023 from PastaPastaPasta/backports-0.15-pr19
Backports 0.15 pr19
This commit is contained in:
commit
7eeee33144
@ -414,17 +414,17 @@ AC_DEFUN([_BITCOIN_QT_FIND_LIBS_WITH_PKGCONFIG],[
|
||||
qt4_modules="QtCore QtGui QtNetwork"
|
||||
BITCOIN_QT_CHECK([
|
||||
if test x$bitcoin_qt_want_version = xqt5 || ( test x$bitcoin_qt_want_version = xauto && test x$auto_priority_version = xqt5 ); then
|
||||
PKG_CHECK_MODULES([QT], [$qt5_modules], [QT_INCLUDES="$QT_CFLAGS"; have_qt=yes],[have_qt=no])
|
||||
PKG_CHECK_MODULES([QT5], [$qt5_modules], [QT_INCLUDES="$QT5_CFLAGS"; QT_LIBS="$QT5_LIBS" have_qt=yes],[have_qt=no])
|
||||
elif test x$bitcoin_qt_want_version = xqt4 || ( test x$bitcoin_qt_want_version = xauto && test x$auto_priority_version = xqt4 ); then
|
||||
PKG_CHECK_MODULES([QT], [$qt4_modules], [QT_INCLUDES="$QT_CFLAGS"; have_qt=yes], [have_qt=no])
|
||||
PKG_CHECK_MODULES([QT4], [$qt4_modules], [QT_INCLUDES="$QT4_CFLAGS"; QT_LIBS="$QT4_LIBS" ; have_qt=yes], [have_qt=no])
|
||||
fi
|
||||
|
||||
dnl qt version is set to 'auto' and the preferred version wasn't found. Now try the other.
|
||||
if test x$have_qt = xno && test x$bitcoin_qt_want_version = xauto; then
|
||||
if test x$auto_priority_version = xqt5; then
|
||||
PKG_CHECK_MODULES([QT], [$qt4_modules], [QT_INCLUDES="$QT_CFLAGS"; have_qt=yes; QT_LIB_PREFIX=Qt; bitcoin_qt_got_major_vers=4], [have_qt=no])
|
||||
PKG_CHECK_MODULES([QT4], [$qt4_modules], [QT_INCLUDES="$QT4_CFLAGS"; QT_LIBS="$QT4_LIBS" ; have_qt=yes; QT_LIB_PREFIX=Qt; bitcoin_qt_got_major_vers=4], [have_qt=no])
|
||||
else
|
||||
PKG_CHECK_MODULES([QT], [$qt5_modules], [QT_INCLUDES="$QT_CFLAGS"; have_qt=yes; QT_LIB_PREFIX=Qt5; bitcoin_qt_got_major_vers=5], [have_qt=no])
|
||||
PKG_CHECK_MODULES([QT5], [$qt5_modules], [QT_INCLUDES="$QT5_CFLAGS"; QT_LIBS="$QT5_LIBS" ; have_qt=yes; QT_LIB_PREFIX=Qt5; bitcoin_qt_got_major_vers=5], [have_qt=no])
|
||||
fi
|
||||
fi
|
||||
if test x$have_qt != xyes; then
|
||||
|
@ -280,6 +280,12 @@ if test "x$CXXFLAGS_overridden" = "xno"; then
|
||||
AX_CHECK_COMPILE_FLAG([-Wunused-local-typedef],[CXXFLAGS="$CXXFLAGS -Wno-unused-local-typedef"],,[[$CXXFLAG_WERROR]])
|
||||
AX_CHECK_COMPILE_FLAG([-Wdeprecated-register],[CXXFLAGS="$CXXFLAGS -Wno-deprecated-register"],,[[$CXXFLAG_WERROR]])
|
||||
AX_CHECK_COMPILE_FLAG([-Wimplicit-fallthrough],[CXXFLAGS="$CXXFLAGS -Wno-implicit-fallthrough"],,[[$CXXFLAG_WERROR]])
|
||||
|
||||
# Check for optional instruction set support. Enabling these does _not_ imply that all code will
|
||||
# be compiled with them, rather that specific objects/libs may use them after checking for runtime
|
||||
# compatibility.
|
||||
AX_CHECK_COMPILE_FLAG([-msse4.2],[[enable_sse42=yes; SSE42_CXXFLAGS="-msse4.2"]],,[[$CXXFLAG_WERROR]])
|
||||
|
||||
fi
|
||||
CPPFLAGS="$CPPFLAGS -DHAVE_BUILD_INFO -D__STDC_FORMAT_MACROS"
|
||||
|
||||
@ -1170,6 +1176,7 @@ AM_CONDITIONAL([USE_QRCODE], [test x$use_qr = xyes])
|
||||
AM_CONDITIONAL([USE_LCOV],[test x$use_lcov = xyes])
|
||||
AM_CONDITIONAL([GLIBC_BACK_COMPAT],[test x$use_glibc_compat = xyes])
|
||||
AM_CONDITIONAL([HARDEN],[test x$use_hardening = xyes])
|
||||
AM_CONDITIONAL([ENABLE_SSE42],[test x$enable_sse42 = xyes])
|
||||
|
||||
AC_DEFINE(CLIENT_VERSION_MAJOR, _CLIENT_VERSION_MAJOR, [Major version])
|
||||
AC_DEFINE(CLIENT_VERSION_MINOR, _CLIENT_VERSION_MINOR, [Minor version])
|
||||
@ -1202,6 +1209,7 @@ AC_SUBST(HARDENED_CPPFLAGS)
|
||||
AC_SUBST(HARDENED_LDFLAGS)
|
||||
AC_SUBST(PIC_FLAGS)
|
||||
AC_SUBST(PIE_FLAGS)
|
||||
AC_SUBST(SSE42_CXXFLAGS)
|
||||
AC_SUBST(LIBTOOL_APP_LDFLAGS)
|
||||
AC_SUBST(USE_UPNP)
|
||||
AC_SUBST(USE_QRCODE)
|
||||
|
@ -20,20 +20,27 @@ RET=0
|
||||
PREV_BRANCH=`git name-rev --name-only HEAD`
|
||||
PREV_HEAD=`git rev-parse HEAD`
|
||||
for i in `git rev-list --reverse $1`; do
|
||||
git rev-list -n 1 --pretty="%s" $i | grep -q "^scripted-diff:" || continue
|
||||
git checkout --quiet $i^ || exit
|
||||
SCRIPT="`git rev-list --format=%b -n1 $i | sed '/^-BEGIN VERIFY SCRIPT-$/,/^-END VERIFY SCRIPT-$/{//!b};d'`"
|
||||
if test "x$SCRIPT" = "x"; then
|
||||
echo "Error: missing script for: $i"
|
||||
echo "Failed"
|
||||
RET=1
|
||||
else
|
||||
echo "Running script for: $i"
|
||||
echo "$SCRIPT"
|
||||
eval "$SCRIPT"
|
||||
git --no-pager diff --exit-code $i && echo "OK" || (echo "Failed"; false) || RET=1
|
||||
if git rev-list -n 1 --pretty="%s" $i | grep -q "^scripted-diff:"; then
|
||||
git checkout --quiet $i^ || exit
|
||||
SCRIPT="`git rev-list --format=%b -n1 $i | sed '/^-BEGIN VERIFY SCRIPT-$/,/^-END VERIFY SCRIPT-$/{//!b};d'`"
|
||||
if test "x$SCRIPT" = "x"; then
|
||||
echo "Error: missing script for: $i"
|
||||
echo "Failed"
|
||||
RET=1
|
||||
else
|
||||
echo "Running script for: $i"
|
||||
echo "$SCRIPT"
|
||||
eval "$SCRIPT"
|
||||
git --no-pager diff --exit-code $i && echo "OK" || (echo "Failed"; false) || RET=1
|
||||
fi
|
||||
git reset --quiet --hard HEAD
|
||||
else
|
||||
if git rev-list "--format=%b" -n1 $i | grep -q '^-\(BEGIN\|END\)[ a-zA-Z]*-$'; then
|
||||
echo "Error: script block marker but no scripted-diff in title"
|
||||
echo "Failed"
|
||||
RET=1
|
||||
fi
|
||||
fi
|
||||
git reset --quiet --hard HEAD
|
||||
done
|
||||
git checkout --quiet $PREV_BRANCH 2>/dev/null || git checkout --quiet $PREV_HEAD
|
||||
exit $RET
|
||||
|
@ -404,6 +404,14 @@ Source code organization
|
||||
|
||||
- *Rationale*: Shorter and simpler header files are easier to read, and reduce compile time
|
||||
|
||||
- Every `.cpp` and `.h` file should `#include` every header file it directly uses classes, functions or other
|
||||
definitions from, even if those headers are already included indirectly through other headers. One exception
|
||||
is that a `.cpp` file does not need to re-include the includes already included in its corresponding `.h` file.
|
||||
|
||||
- *Rationale*: Excluding headers because they are already indirectly included results in compilation
|
||||
failures when those indirect dependencies change. Furthermore, it obscures what the real code
|
||||
dependencies are.
|
||||
|
||||
- Don't import anything into the global namespace (`using namespace ...`). Use
|
||||
fully specified types such as `std::string`.
|
||||
|
||||
|
@ -49,6 +49,6 @@ SpacesInAngles: false
|
||||
SpacesInContainerLiterals: true
|
||||
SpacesInCStyleCastParentheses: false
|
||||
SpacesInParentheses: false
|
||||
Standard: Cpp03
|
||||
Standard: Cpp11
|
||||
TabWidth: 8
|
||||
UseTab: Never
|
||||
|
@ -536,6 +536,7 @@ dashd_LDADD = \
|
||||
$(LIBBITCOIN_CONSENSUS) \
|
||||
$(LIBBITCOIN_CRYPTO) \
|
||||
$(LIBLEVELDB) \
|
||||
$(LIBLEVELDB_SSE42) \
|
||||
$(LIBMEMENV) \
|
||||
$(LIBSECP256K1)
|
||||
|
||||
|
@ -43,6 +43,7 @@ bench_bench_dash_LDADD = \
|
||||
$(LIBBITCOIN_CONSENSUS) \
|
||||
$(LIBBITCOIN_CRYPTO) \
|
||||
$(LIBLEVELDB) \
|
||||
$(LIBLEVELDB_SSE42) \
|
||||
$(LIBMEMENV) \
|
||||
$(LIBSECP256K1) \
|
||||
$(LIBUNIVALUE)
|
||||
|
@ -4,12 +4,15 @@
|
||||
|
||||
LIBLEVELDB_INT = leveldb/libleveldb.a
|
||||
LIBMEMENV_INT = leveldb/libmemenv.a
|
||||
LIBLEVELDB_SSE42_INT = leveldb/libleveldb_sse42.a
|
||||
|
||||
EXTRA_LIBRARIES += $(LIBLEVELDB_INT)
|
||||
EXTRA_LIBRARIES += $(LIBMEMENV_INT)
|
||||
EXTRA_LIBRARIES += $(LIBLEVELDB_SSE42_INT)
|
||||
|
||||
LIBLEVELDB += $(LIBLEVELDB_INT)
|
||||
LIBMEMENV += $(LIBMEMENV_INT)
|
||||
LIBLEVELDB_SSE42 = $(LIBLEVELDB_SSE42_INT)
|
||||
|
||||
LEVELDB_CPPFLAGS += -I$(srcdir)/leveldb/include
|
||||
LEVELDB_CPPFLAGS += -I$(srcdir)/leveldb/helpers/memenv
|
||||
@ -74,6 +77,7 @@ leveldb_libleveldb_a_SOURCES += leveldb/table/merger.h
|
||||
leveldb_libleveldb_a_SOURCES += leveldb/table/format.h
|
||||
leveldb_libleveldb_a_SOURCES += leveldb/table/iterator_wrapper.h
|
||||
leveldb_libleveldb_a_SOURCES += leveldb/util/crc32c.h
|
||||
leveldb_libleveldb_a_SOURCES += leveldb/util/env_posix_test_helper.h
|
||||
leveldb_libleveldb_a_SOURCES += leveldb/util/arena.h
|
||||
leveldb_libleveldb_a_SOURCES += leveldb/util/random.h
|
||||
leveldb_libleveldb_a_SOURCES += leveldb/util/posix_logger.h
|
||||
@ -135,3 +139,11 @@ leveldb_libmemenv_a_CPPFLAGS = $(leveldb_libleveldb_a_CPPFLAGS)
|
||||
leveldb_libmemenv_a_CXXFLAGS = $(leveldb_libleveldb_a_CXXFLAGS)
|
||||
leveldb_libmemenv_a_SOURCES = leveldb/helpers/memenv/memenv.cc
|
||||
leveldb_libmemenv_a_SOURCES += leveldb/helpers/memenv/memenv.h
|
||||
|
||||
leveldb_libleveldb_sse42_a_CPPFLAGS = $(leveldb_libleveldb_a_CPPFLAGS)
|
||||
leveldb_libleveldb_sse42_a_CXXFLAGS = $(leveldb_libleveldb_a_CXXFLAGS)
|
||||
if ENABLE_SSE42
|
||||
leveldb_libleveldb_sse42_a_CPPFLAGS += -DLEVELDB_PLATFORM_POSIX_SSE
|
||||
leveldb_libleveldb_sse42_a_CXXFLAGS += $(SSE42_CXXFLAGS)
|
||||
endif
|
||||
leveldb_libleveldb_sse42_a_SOURCES = leveldb/port/port_posix_sse.cc
|
||||
|
@ -645,7 +645,7 @@ endif
|
||||
if ENABLE_ZMQ
|
||||
qt_dash_qt_LDADD += $(LIBBITCOIN_ZMQ) $(ZMQ_LIBS)
|
||||
endif
|
||||
qt_dash_qt_LDADD += $(LIBBITCOIN_CLI) $(LIBBITCOIN_COMMON) $(LIBBITCOIN_UTIL) $(LIBBITCOIN_CONSENSUS) $(LIBBITCOIN_CRYPTO) $(LIBUNIVALUE) $(LIBLEVELDB) $(LIBMEMENV) \
|
||||
qt_dash_qt_LDADD += $(LIBBITCOIN_CLI) $(LIBBITCOIN_COMMON) $(LIBBITCOIN_UTIL) $(LIBBITCOIN_CONSENSUS) $(LIBBITCOIN_CRYPTO) $(LIBUNIVALUE) $(LIBLEVELDB) $(LIBLEVELDB_SSE42) $(LIBMEMENV) \
|
||||
$(BACKTRACE_LIB) $(BOOST_LIBS) $(QT_LIBS) $(QT_DBUS_LIBS) $(QR_LIBS) $(PROTOBUF_LIBS) $(BDB_LIBS) $(SSL_LIBS) $(CRYPTO_LIBS) $(MINIUPNPC_LIBS) $(LIBSECP256K1) \
|
||||
$(EVENT_PTHREADS_LIBS) $(EVENT_LIBS) $(BLS_LIBS)
|
||||
qt_dash_qt_LDFLAGS = $(LDFLAGS_WRAP_EXCEPTIONS) $(RELDFLAGS) $(AM_LDFLAGS) $(QT_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
|
||||
|
@ -63,7 +63,7 @@ if ENABLE_ZMQ
|
||||
qt_test_test_dash_qt_LDADD += $(LIBBITCOIN_ZMQ) $(ZMQ_LIBS)
|
||||
endif
|
||||
qt_test_test_dash_qt_LDADD += $(LIBBITCOIN_CLI) $(LIBBITCOIN_COMMON) $(LIBBITCOIN_UTIL) $(LIBBITCOIN_CONSENSUS) $(LIBBITCOIN_CRYPTO) $(LIBUNIVALUE) $(LIBLEVELDB) \
|
||||
$(LIBMEMENV) $(BACKTRACE_LIB) $(BOOST_LIBS) $(QT_DBUS_LIBS) $(QT_TEST_LIBS) $(QT_LIBS) \
|
||||
$(LIBLEVELDB_SSE42) $(LIBMEMENV) $(BACKTRACE_LIB) $(BOOST_LIBS) $(QT_DBUS_LIBS) $(QT_TEST_LIBS) $(QT_LIBS) \
|
||||
$(QR_LIBS) $(PROTOBUF_LIBS) $(BDB_LIBS) $(SSL_LIBS) $(CRYPTO_LIBS) $(MINIUPNPC_LIBS) $(LIBSECP256K1) \
|
||||
$(EVENT_PTHREADS_LIBS) $(EVENT_LIBS) $(BLS_LIBS)
|
||||
qt_test_test_dash_qt_LDFLAGS = $(LDFLAGS_WRAP_EXCEPTIONS) $(RELDFLAGS) $(AM_LDFLAGS) $(QT_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
|
||||
|
@ -109,8 +109,8 @@ endif
|
||||
|
||||
test_test_dash_SOURCES = $(BITCOIN_TESTS) $(JSON_TEST_FILES) $(RAW_TEST_FILES)
|
||||
test_test_dash_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -I$(builddir)/test/ $(TESTDEFS) $(EVENT_CFLAGS)
|
||||
test_test_dash_LDADD = $(LIBBITCOIN_SERVER) $(LIBBITCOIN_CLI) $(LIBBITCOIN_COMMON) $(LIBBITCOIN_UTIL) $(LIBBITCOIN_CONSENSUS) $(LIBBITCOIN_CRYPTO) $(LIBUNIVALUE) $(LIBLEVELDB) $(LIBMEMENV) \
|
||||
$(BACKTRACE_LIB) $(BOOST_LIBS) $(BOOST_UNIT_TEST_FRAMEWORK_LIB) $(LIBSECP256K1) $(EVENT_LIBS) $(EVENT_PTHREADS_LIBS)
|
||||
test_test_dash_LDADD = $(LIBBITCOIN_SERVER) $(LIBBITCOIN_CLI) $(LIBBITCOIN_COMMON) $(LIBBITCOIN_UTIL) $(LIBBITCOIN_CONSENSUS) $(LIBBITCOIN_CRYPTO) $(LIBUNIVALUE) \
|
||||
$(LIBLEVELDB) $(LIBLEVELDB_SSE42) $(LIBMEMENV) $(BACKTRACE_LIB) $(BOOST_LIBS) $(BOOST_UNIT_TEST_FRAMEWORK_LIB) $(LIBSECP256K1) $(EVENT_LIBS) $(EVENT_PTHREADS_LIBS)
|
||||
test_test_dash_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
|
||||
if ENABLE_WALLET
|
||||
test_test_dash_LDADD += $(LIBBITCOIN_WALLET)
|
||||
|
@ -39,7 +39,7 @@ static void CoinSelection(benchmark::State& state)
|
||||
|
||||
while (state.KeepRunning()) {
|
||||
// Empty wallet.
|
||||
BOOST_FOREACH (COutput output, vCoins)
|
||||
for (COutput output : vCoins)
|
||||
delete output.tx;
|
||||
vCoins.clear();
|
||||
|
||||
|
@ -172,7 +172,7 @@ ReadStatus PartiallyDownloadedBlock::InitData(const CBlockHeaderAndShortTxIDs& c
|
||||
bool PartiallyDownloadedBlock::IsTxAvailable(size_t index) const {
|
||||
assert(!header.IsNull());
|
||||
assert(index < txn_available.size());
|
||||
return txn_available[index] ? true : false;
|
||||
return txn_available[index] != nullptr;
|
||||
}
|
||||
|
||||
ReadStatus PartiallyDownloadedBlock::FillBlock(CBlock& block, const std::vector<CTransactionRef>& vtx_missing) {
|
||||
|
@ -272,7 +272,7 @@ bool CBloomFilter::IsRelevantAndUpdate(const CTransaction& tx)
|
||||
if (fFound)
|
||||
return true;
|
||||
|
||||
BOOST_FOREACH(const CTxIn& txin, tx.vin)
|
||||
for (const CTxIn& txin : tx.vin)
|
||||
{
|
||||
// Match if the filter contains an outpoint tx spends
|
||||
if (contains(txin.prevout))
|
||||
|
@ -120,7 +120,7 @@ private:
|
||||
fOk = fAllOk;
|
||||
}
|
||||
// execute work
|
||||
BOOST_FOREACH (T& check, vChecks)
|
||||
for (T& check : vChecks)
|
||||
if (fOk)
|
||||
fOk = check();
|
||||
vChecks.clear();
|
||||
@ -150,7 +150,7 @@ public:
|
||||
void Add(std::vector<T>& vChecks)
|
||||
{
|
||||
boost::unique_lock<boost::mutex> lock(mutex);
|
||||
BOOST_FOREACH (T& check, vChecks) {
|
||||
for (T& check : vChecks) {
|
||||
queue.push_back(T());
|
||||
check.swap(queue.back());
|
||||
}
|
||||
|
@ -146,7 +146,7 @@ void ScriptPubKeyToUniv(const CScript& scriptPubKey,
|
||||
out.pushKV("type", GetTxnOutputType(type));
|
||||
|
||||
UniValue a(UniValue::VARR);
|
||||
BOOST_FOREACH(const CTxDestination& addr, addresses)
|
||||
for (const CTxDestination& addr : addresses)
|
||||
a.push_back(CBitcoinAddress(addr).ToString());
|
||||
out.pushKV("addresses", a);
|
||||
}
|
||||
@ -160,7 +160,7 @@ void TxToUniv(const CTransaction& tx, const uint256& hashBlock, UniValue& entry,
|
||||
entry.pushKV("locktime", (int64_t)tx.nLockTime);
|
||||
|
||||
UniValue vin(UniValue::VARR);
|
||||
BOOST_FOREACH(const CTxIn& txin, tx.vin) {
|
||||
for (const CTxIn& txin : tx.vin) {
|
||||
UniValue in(UniValue::VOBJ);
|
||||
if (tx.IsCoinBase())
|
||||
in.pushKV("coinbase", HexStr(txin.scriptSig.begin(), txin.scriptSig.end()));
|
||||
|
@ -585,7 +585,7 @@ static void MutateTxSign(CMutableTransaction& tx, const std::string& flagStr)
|
||||
SignSignature(keystore, prevPubKey, mergedTx, i, nHashType);
|
||||
|
||||
// ... and merge in other signatures:
|
||||
BOOST_FOREACH(const CTransaction& txv, txVariants) {
|
||||
for (const CTransaction& txv : txVariants) {
|
||||
txin.scriptSig = CombineSignatures(prevPubKey, mergedTx, i, txin.scriptSig, txv.vin[i].scriptSig);
|
||||
}
|
||||
if (!VerifyScript(txin.scriptSig, prevPubKey, STANDARD_SCRIPT_VERIFY_FLAGS, MutableTransactionSignatureChecker(&mergedTx, i)))
|
||||
|
@ -108,7 +108,7 @@ CDBWrapper::CDBWrapper(const fs::path& path, size_t nCacheSize, bool fMemory, bo
|
||||
leveldb::Status result = leveldb::DestroyDB(path.string(), options);
|
||||
dbwrapper_private::HandleError(result);
|
||||
}
|
||||
TryCreateDirectory(path);
|
||||
TryCreateDirectories(path);
|
||||
LogPrintf("Opening LevelDB in %s\n", path.string());
|
||||
}
|
||||
leveldb::Status status = leveldb::DB::Open(options, path.string(), &pdb);
|
||||
|
@ -692,10 +692,10 @@ bool CDeterministicMNManager::BuildNewListFromBlock(const CBlock& block, const C
|
||||
}
|
||||
|
||||
if (newList.HasUniqueProperty(proTx.addr)) {
|
||||
return _state.DoS(100, false, REJECT_CONFLICT, "bad-protx-dup-addr");
|
||||
return _state.DoS(100, false, REJECT_DUPLICATE, "bad-protx-dup-addr");
|
||||
}
|
||||
if (newList.HasUniqueProperty(proTx.keyIDOwner) || newList.HasUniqueProperty(proTx.pubKeyOperator)) {
|
||||
return _state.DoS(100, false, REJECT_CONFLICT, "bad-protx-dup-key");
|
||||
return _state.DoS(100, false, REJECT_DUPLICATE, "bad-protx-dup-key");
|
||||
}
|
||||
|
||||
dmn->nOperatorReward = proTx.nOperatorReward;
|
||||
@ -724,7 +724,7 @@ bool CDeterministicMNManager::BuildNewListFromBlock(const CBlock& block, const C
|
||||
}
|
||||
|
||||
if (newList.HasUniqueProperty(proTx.addr) && newList.GetUniquePropertyMN(proTx.addr)->proTxHash != proTx.proTxHash) {
|
||||
return _state.DoS(100, false, REJECT_CONFLICT, "bad-protx-dup-addr");
|
||||
return _state.DoS(100, false, REJECT_DUPLICATE, "bad-protx-dup-addr");
|
||||
}
|
||||
|
||||
CDeterministicMNCPtr dmn = newList.GetMN(proTx.proTxHash);
|
||||
|
@ -18,7 +18,7 @@
|
||||
#include <stdio.h>
|
||||
|
||||
#include <boost/algorithm/string.hpp> // boost::trim
|
||||
#include <boost/foreach.hpp> //BOOST_FOREACH
|
||||
#include <boost/foreach.hpp>
|
||||
|
||||
/** WWW-Authenticate to present with 401 Unauthorized response */
|
||||
static const char* WWW_AUTH_HEADER_DATA = "Basic realm=\"jsonrpc\"";
|
||||
@ -94,7 +94,7 @@ static bool multiUserAuthorized(std::string strUserPass)
|
||||
|
||||
if (gArgs.IsArgSet("-rpcauth")) {
|
||||
//Search for multi-user login/pass "rpcauth" from config
|
||||
BOOST_FOREACH(std::string strRPCAuth, gArgs.GetArgs("-rpcauth"))
|
||||
for (std::string strRPCAuth : gArgs.GetArgs("-rpcauth"))
|
||||
{
|
||||
std::vector<std::string> vFields;
|
||||
boost::split(vFields, strRPCAuth, boost::is_any_of(":$"));
|
||||
|
@ -8,8 +8,6 @@
|
||||
#include <string>
|
||||
#include <map>
|
||||
|
||||
class HTTPRequest;
|
||||
|
||||
/** Start HTTP RPC subsystem.
|
||||
* Precondition; HTTP and RPC has been started.
|
||||
*/
|
||||
|
21
src/init.cpp
21
src/init.cpp
@ -750,7 +750,7 @@ void CleanupBlockRevFiles()
|
||||
// keeping a separate counter. Once we hit a gap (or if 0 doesn't exist)
|
||||
// start removing block files.
|
||||
int nContigCounter = 0;
|
||||
BOOST_FOREACH(const PAIRTYPE(std::string, fs::path)& item, mapBlockFiles) {
|
||||
for (const std::pair<std::string, fs::path>& item : mapBlockFiles) {
|
||||
if (atoi(item.first) == nContigCounter) {
|
||||
nContigCounter++;
|
||||
continue;
|
||||
@ -803,7 +803,7 @@ void ThreadImport(std::vector<fs::path> vImportFiles)
|
||||
}
|
||||
|
||||
// -loadblock=
|
||||
BOOST_FOREACH(const fs::path& path, vImportFiles) {
|
||||
for (const fs::path& path : vImportFiles) {
|
||||
FILE *file = fsbridge::fopen(path, "rb");
|
||||
if (file) {
|
||||
LogPrintf("Importing blocks file %s...\n", path.string());
|
||||
@ -1467,6 +1467,7 @@ bool AppInitSanityChecks()
|
||||
// ********************************************************* Step 4: sanity checks
|
||||
|
||||
// Initialize elliptic curve code
|
||||
RandomInit();
|
||||
ECC_Start();
|
||||
globalVerifyHandle.reset(new ECCVerifyHandle());
|
||||
|
||||
@ -1591,7 +1592,7 @@ bool AppInitMain(boost::thread_group& threadGroup, CScheduler& scheduler)
|
||||
}
|
||||
|
||||
if (gArgs.IsArgSet("-uacomment")) {
|
||||
BOOST_FOREACH(std::string cmt, gArgs.GetArgs("-uacomment"))
|
||||
for (std::string cmt : gArgs.GetArgs("-uacomment"))
|
||||
{
|
||||
if (cmt != SanitizeString(cmt, SAFE_CHARS_UA_COMMENT))
|
||||
return InitError(strprintf(_("User Agent comment (%s) contains unsafe characters."), cmt));
|
||||
@ -1606,7 +1607,7 @@ bool AppInitMain(boost::thread_group& threadGroup, CScheduler& scheduler)
|
||||
|
||||
if (gArgs.IsArgSet("-onlynet")) {
|
||||
std::set<enum Network> nets;
|
||||
BOOST_FOREACH(const std::string& snet, gArgs.GetArgs("-onlynet")) {
|
||||
for (const std::string& snet : gArgs.GetArgs("-onlynet")) {
|
||||
enum Network net = ParseNetwork(snet);
|
||||
if (net == NET_UNROUTABLE)
|
||||
return InitError(strprintf(_("Unknown network specified in -onlynet: '%s'"), snet));
|
||||
@ -1620,7 +1621,7 @@ bool AppInitMain(boost::thread_group& threadGroup, CScheduler& scheduler)
|
||||
}
|
||||
|
||||
if (gArgs.IsArgSet("-whitelist")) {
|
||||
BOOST_FOREACH(const std::string& net, gArgs.GetArgs("-whitelist")) {
|
||||
for (const std::string& net : gArgs.GetArgs("-whitelist")) {
|
||||
CSubNet subnet;
|
||||
LookupSubNet(net.c_str(), subnet);
|
||||
if (!subnet.IsValid())
|
||||
@ -1682,7 +1683,7 @@ bool AppInitMain(boost::thread_group& threadGroup, CScheduler& scheduler)
|
||||
if (fListen) {
|
||||
bool fBound = false;
|
||||
if (gArgs.IsArgSet("-bind")) {
|
||||
BOOST_FOREACH(const std::string& strBind, gArgs.GetArgs("-bind")) {
|
||||
for (const std::string& strBind : gArgs.GetArgs("-bind")) {
|
||||
CService addrBind;
|
||||
if (!Lookup(strBind.c_str(), addrBind, GetListenPort(), false))
|
||||
return InitError(ResolveErrMsg("bind", strBind));
|
||||
@ -1690,7 +1691,7 @@ bool AppInitMain(boost::thread_group& threadGroup, CScheduler& scheduler)
|
||||
}
|
||||
}
|
||||
if (gArgs.IsArgSet("-whitebind")) {
|
||||
BOOST_FOREACH(const std::string& strBind, gArgs.GetArgs("-whitebind")) {
|
||||
for (const std::string& strBind : gArgs.GetArgs("-whitebind")) {
|
||||
CService addrBind;
|
||||
if (!Lookup(strBind.c_str(), addrBind, 0, false))
|
||||
return InitError(ResolveErrMsg("whitebind", strBind));
|
||||
@ -1710,7 +1711,7 @@ bool AppInitMain(boost::thread_group& threadGroup, CScheduler& scheduler)
|
||||
}
|
||||
|
||||
if (gArgs.IsArgSet("-externalip")) {
|
||||
BOOST_FOREACH(const std::string& strAddr, gArgs.GetArgs("-externalip")) {
|
||||
for (const std::string& strAddr : gArgs.GetArgs("-externalip")) {
|
||||
CService addrLocal;
|
||||
if (Lookup(strAddr.c_str(), addrLocal, GetListenPort(), fNameLookup) && addrLocal.IsValid())
|
||||
AddLocal(addrLocal, LOCAL_MANUAL);
|
||||
@ -1765,8 +1766,6 @@ bool AppInitMain(boost::thread_group& threadGroup, CScheduler& scheduler)
|
||||
fReindex = gArgs.GetBoolArg("-reindex", false);
|
||||
bool fReindexChainState = gArgs.GetBoolArg("-reindex-chainstate", false);
|
||||
|
||||
fs::create_directories(GetDataDir() / "blocks");
|
||||
|
||||
// cache size calculations
|
||||
int64_t nTotalCache = (gArgs.GetArg("-dbcache", nDefaultDbCache) << 20);
|
||||
nTotalCache = std::max(nTotalCache, nMinDbCache << 20); // total cache cannot be less than nMinDbCache
|
||||
@ -2098,7 +2097,7 @@ bool AppInitMain(boost::thread_group& threadGroup, CScheduler& scheduler)
|
||||
std::vector<fs::path> vImportFiles;
|
||||
if (gArgs.IsArgSet("-loadblock"))
|
||||
{
|
||||
BOOST_FOREACH(const std::string& strFile, gArgs.GetArgs("-loadblock"))
|
||||
for (const std::string& strFile : gArgs.GetArgs("-loadblock"))
|
||||
vImportFiles.push_back(strFile);
|
||||
}
|
||||
|
||||
|
@ -44,6 +44,7 @@ TESTS = \
|
||||
util/cache_test \
|
||||
util/coding_test \
|
||||
util/crc32c_test \
|
||||
util/env_posix_test \
|
||||
util/env_test \
|
||||
util/hash_test
|
||||
|
||||
@ -121,7 +122,7 @@ SHARED_MEMENVLIB = $(SHARED_OUTDIR)/libmemenv.a
|
||||
else
|
||||
# Update db.h if you change these.
|
||||
SHARED_VERSION_MAJOR = 1
|
||||
SHARED_VERSION_MINOR = 19
|
||||
SHARED_VERSION_MINOR = 20
|
||||
SHARED_LIB1 = libleveldb.$(PLATFORM_SHARED_EXT)
|
||||
SHARED_LIB2 = $(SHARED_LIB1).$(SHARED_VERSION_MAJOR)
|
||||
SHARED_LIB3 = $(SHARED_LIB1).$(SHARED_VERSION_MAJOR).$(SHARED_VERSION_MINOR)
|
||||
@ -337,6 +338,9 @@ $(STATIC_OUTDIR)/db_test:db/db_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
|
||||
$(STATIC_OUTDIR)/dbformat_test:db/dbformat_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
|
||||
$(CXX) $(LDFLAGS) $(CXXFLAGS) db/dbformat_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
|
||||
|
||||
$(STATIC_OUTDIR)/env_posix_test:util/env_posix_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
|
||||
$(CXX) $(LDFLAGS) $(CXXFLAGS) util/env_posix_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
|
||||
|
||||
$(STATIC_OUTDIR)/env_test:util/env_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
|
||||
$(CXX) $(LDFLAGS) $(CXXFLAGS) util/env_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
|
||||
|
||||
@ -412,3 +416,9 @@ $(SHARED_OUTDIR)/%.o: %.cc
|
||||
|
||||
$(SHARED_OUTDIR)/%.o: %.c
|
||||
$(CC) $(CFLAGS) $(PLATFORM_SHARED_CFLAGS) -c $< -o $@
|
||||
|
||||
$(STATIC_OUTDIR)/port/port_posix_sse.o: port/port_posix_sse.cc
|
||||
$(CXX) $(CXXFLAGS) $(PLATFORM_SSEFLAGS) -c $< -o $@
|
||||
|
||||
$(SHARED_OUTDIR)/port/port_posix_sse.o: port/port_posix_sse.cc
|
||||
$(CXX) $(CXXFLAGS) $(PLATFORM_SHARED_CFLAGS) $(PLATFORM_SSEFLAGS) -c $< -o $@
|
||||
|
@ -16,7 +16,7 @@ Authors: Sanjay Ghemawat (sanjay@google.com) and Jeff Dean (jeff@google.com)
|
||||
* External activity (file system operations etc.) is relayed through a virtual interface so users can customize the operating system interactions.
|
||||
|
||||
# Documentation
|
||||
[LevelDB library documentation](https://rawgit.com/google/leveldb/master/doc/index.html) is online and bundled with the source code.
|
||||
[LevelDB library documentation](https://github.com/google/leveldb/blob/master/doc/index.md) is online and bundled with the source code.
|
||||
|
||||
|
||||
# Limitations
|
||||
@ -113,29 +113,30 @@ by the one or two disk seeks needed to fetch the data from disk.
|
||||
Write performance will be mostly unaffected by whether or not the
|
||||
working set fits in memory.
|
||||
|
||||
readrandom : 16.677 micros/op; (approximately 60,000 reads per second)
|
||||
readseq : 0.476 micros/op; 232.3 MB/s
|
||||
readreverse : 0.724 micros/op; 152.9 MB/s
|
||||
readrandom : 16.677 micros/op; (approximately 60,000 reads per second)
|
||||
readseq : 0.476 micros/op; 232.3 MB/s
|
||||
readreverse : 0.724 micros/op; 152.9 MB/s
|
||||
|
||||
LevelDB compacts its underlying storage data in the background to
|
||||
improve read performance. The results listed above were done
|
||||
immediately after a lot of random writes. The results after
|
||||
compactions (which are usually triggered automatically) are better.
|
||||
|
||||
readrandom : 11.602 micros/op; (approximately 85,000 reads per second)
|
||||
readseq : 0.423 micros/op; 261.8 MB/s
|
||||
readreverse : 0.663 micros/op; 166.9 MB/s
|
||||
readrandom : 11.602 micros/op; (approximately 85,000 reads per second)
|
||||
readseq : 0.423 micros/op; 261.8 MB/s
|
||||
readreverse : 0.663 micros/op; 166.9 MB/s
|
||||
|
||||
Some of the high cost of reads comes from repeated decompression of blocks
|
||||
read from disk. If we supply enough cache to the leveldb so it can hold the
|
||||
uncompressed blocks in memory, the read performance improves again:
|
||||
|
||||
readrandom : 9.775 micros/op; (approximately 100,000 reads per second before compaction)
|
||||
readrandom : 5.215 micros/op; (approximately 190,000 reads per second after compaction)
|
||||
readrandom : 9.775 micros/op; (approximately 100,000 reads per second before compaction)
|
||||
readrandom : 5.215 micros/op; (approximately 190,000 reads per second after compaction)
|
||||
|
||||
## Repository contents
|
||||
|
||||
See doc/index.html for more explanation. See doc/impl.html for a brief overview of the implementation.
|
||||
See [doc/index.md](doc/index.md) for more explanation. See
|
||||
[doc/impl.md](doc/impl.md) for a brief overview of the implementation.
|
||||
|
||||
The public interface is in include/*.h. Callers should not include or
|
||||
rely on the details of any other header files in this package. Those
|
||||
@ -148,7 +149,7 @@ Guide to header files:
|
||||
* **include/options.h**: Control over the behavior of an entire database,
|
||||
and also control over the behavior of individual reads and writes.
|
||||
|
||||
* **include/comparator.h**: Abstraction for user-specified comparison function.
|
||||
* **include/comparator.h**: Abstraction for user-specified comparison function.
|
||||
If you want just bytewise comparison of keys, you can use the default
|
||||
comparator, but clients can write their own comparator implementations if they
|
||||
want custom ordering (e.g. to handle different character encodings, etc.)
|
||||
@ -165,7 +166,7 @@ length into some other byte array.
|
||||
* **include/status.h**: Status is returned from many of the public interfaces
|
||||
and is used to report success and various kinds of errors.
|
||||
|
||||
* **include/env.h**:
|
||||
* **include/env.h**:
|
||||
Abstraction of the OS environment. A posix implementation of this interface is
|
||||
in util/env_posix.cc
|
||||
|
||||
|
@ -63,6 +63,7 @@ PLATFORM_SHARED_EXT="so"
|
||||
PLATFORM_SHARED_LDFLAGS="-shared -Wl,-soname -Wl,"
|
||||
PLATFORM_SHARED_CFLAGS="-fPIC"
|
||||
PLATFORM_SHARED_VERSIONED=true
|
||||
PLATFORM_SSEFLAGS=
|
||||
|
||||
MEMCMP_FLAG=
|
||||
if [ "$CXX" = "g++" ]; then
|
||||
@ -77,6 +78,7 @@ case "$TARGET_OS" in
|
||||
COMMON_FLAGS="$MEMCMP_FLAG -lpthread -DOS_LINUX -DCYGWIN"
|
||||
PLATFORM_LDFLAGS="-lpthread"
|
||||
PORT_FILE=port/port_posix.cc
|
||||
PORT_SSE_FILE=port/port_posix_sse.cc
|
||||
;;
|
||||
Darwin)
|
||||
PLATFORM=OS_MACOSX
|
||||
@ -85,24 +87,28 @@ case "$TARGET_OS" in
|
||||
[ -z "$INSTALL_PATH" ] && INSTALL_PATH=`pwd`
|
||||
PLATFORM_SHARED_LDFLAGS="-dynamiclib -install_name $INSTALL_PATH/"
|
||||
PORT_FILE=port/port_posix.cc
|
||||
PORT_SSE_FILE=port/port_posix_sse.cc
|
||||
;;
|
||||
Linux)
|
||||
PLATFORM=OS_LINUX
|
||||
COMMON_FLAGS="$MEMCMP_FLAG -pthread -DOS_LINUX"
|
||||
PLATFORM_LDFLAGS="-pthread"
|
||||
PORT_FILE=port/port_posix.cc
|
||||
PORT_SSE_FILE=port/port_posix_sse.cc
|
||||
;;
|
||||
SunOS)
|
||||
PLATFORM=OS_SOLARIS
|
||||
COMMON_FLAGS="$MEMCMP_FLAG -D_REENTRANT -DOS_SOLARIS"
|
||||
PLATFORM_LIBS="-lpthread -lrt"
|
||||
PORT_FILE=port/port_posix.cc
|
||||
PORT_SSE_FILE=port/port_posix_sse.cc
|
||||
;;
|
||||
FreeBSD)
|
||||
PLATFORM=OS_FREEBSD
|
||||
COMMON_FLAGS="$MEMCMP_FLAG -D_REENTRANT -DOS_FREEBSD"
|
||||
PLATFORM_LIBS="-lpthread"
|
||||
PORT_FILE=port/port_posix.cc
|
||||
PORT_SSE_FILE=port/port_posix_sse.cc
|
||||
;;
|
||||
GNU/kFreeBSD)
|
||||
PLATFORM=OS_KFREEBSD
|
||||
@ -115,24 +121,28 @@ case "$TARGET_OS" in
|
||||
COMMON_FLAGS="$MEMCMP_FLAG -D_REENTRANT -DOS_NETBSD"
|
||||
PLATFORM_LIBS="-lpthread -lgcc_s"
|
||||
PORT_FILE=port/port_posix.cc
|
||||
PORT_SSE_FILE=port/port_posix_sse.cc
|
||||
;;
|
||||
OpenBSD)
|
||||
PLATFORM=OS_OPENBSD
|
||||
COMMON_FLAGS="$MEMCMP_FLAG -D_REENTRANT -DOS_OPENBSD"
|
||||
PLATFORM_LDFLAGS="-pthread"
|
||||
PORT_FILE=port/port_posix.cc
|
||||
PORT_SSE_FILE=port/port_posix_sse.cc
|
||||
;;
|
||||
DragonFly)
|
||||
PLATFORM=OS_DRAGONFLYBSD
|
||||
COMMON_FLAGS="$MEMCMP_FLAG -D_REENTRANT -DOS_DRAGONFLYBSD"
|
||||
PLATFORM_LIBS="-lpthread"
|
||||
PORT_FILE=port/port_posix.cc
|
||||
PORT_SSE_FILE=port/port_posix_sse.cc
|
||||
;;
|
||||
OS_ANDROID_CROSSCOMPILE)
|
||||
PLATFORM=OS_ANDROID
|
||||
COMMON_FLAGS="$MEMCMP_FLAG -D_REENTRANT -DOS_ANDROID -DLEVELDB_PLATFORM_POSIX"
|
||||
PLATFORM_LDFLAGS="" # All pthread features are in the Android C library
|
||||
PORT_FILE=port/port_posix.cc
|
||||
PORT_SSE_FILE=port/port_posix_sse.cc
|
||||
CROSS_COMPILE=true
|
||||
;;
|
||||
HP-UX)
|
||||
@ -140,6 +150,7 @@ case "$TARGET_OS" in
|
||||
COMMON_FLAGS="$MEMCMP_FLAG -D_REENTRANT -DOS_HPUX"
|
||||
PLATFORM_LDFLAGS="-pthread"
|
||||
PORT_FILE=port/port_posix.cc
|
||||
PORT_SSE_FILE=port/port_posix_sse.cc
|
||||
# man ld: +h internal_name
|
||||
PLATFORM_SHARED_LDFLAGS="-shared -Wl,+h -Wl,"
|
||||
;;
|
||||
@ -148,6 +159,7 @@ case "$TARGET_OS" in
|
||||
COMMON_FLAGS="$MEMCMP_FLAG -DOS_MACOSX"
|
||||
[ -z "$INSTALL_PATH" ] && INSTALL_PATH=`pwd`
|
||||
PORT_FILE=port/port_posix.cc
|
||||
PORT_SSE_FILE=port/port_posix_sse.cc
|
||||
PLATFORM_SHARED_EXT=
|
||||
PLATFORM_SHARED_LDFLAGS=
|
||||
PLATFORM_SHARED_CFLAGS=
|
||||
@ -182,7 +194,7 @@ set +f # re-enable globbing
|
||||
|
||||
# The sources consist of the portable files, plus the platform-specific port
|
||||
# file.
|
||||
echo "SOURCES=$PORTABLE_FILES $PORT_FILE" >> $OUTPUT
|
||||
echo "SOURCES=$PORTABLE_FILES $PORT_FILE $PORT_SSE_FILE" >> $OUTPUT
|
||||
echo "MEMENV_SOURCES=helpers/memenv/memenv.cc" >> $OUTPUT
|
||||
|
||||
if [ "$CROSS_COMPILE" = "true" ]; then
|
||||
@ -213,6 +225,21 @@ EOF
|
||||
fi
|
||||
|
||||
rm -f $CXXOUTPUT 2>/dev/null
|
||||
|
||||
# Test if gcc SSE 4.2 is supported
|
||||
$CXX $CXXFLAGS -x c++ - -o $CXXOUTPUT -msse4.2 2>/dev/null <<EOF
|
||||
int main() {}
|
||||
EOF
|
||||
if [ "$?" = 0 ]; then
|
||||
PLATFORM_SSEFLAGS="-msse4.2"
|
||||
fi
|
||||
|
||||
rm -f $CXXOUTPUT 2>/dev/null
|
||||
fi
|
||||
|
||||
# Use the SSE 4.2 CRC32C intrinsics iff runtime checks indicate compiler supports them.
|
||||
if [ -n "$PLATFORM_SSEFLAGS" ]; then
|
||||
PLATFORM_SSEFLAGS="$PLATFORM_SSEFLAGS -DLEVELDB_PLATFORM_POSIX_SSE"
|
||||
fi
|
||||
|
||||
PLATFORM_CCFLAGS="$PLATFORM_CCFLAGS $COMMON_FLAGS"
|
||||
@ -225,6 +252,7 @@ echo "PLATFORM_LDFLAGS=$PLATFORM_LDFLAGS" >> $OUTPUT
|
||||
echo "PLATFORM_LIBS=$PLATFORM_LIBS" >> $OUTPUT
|
||||
echo "PLATFORM_CCFLAGS=$PLATFORM_CCFLAGS" >> $OUTPUT
|
||||
echo "PLATFORM_CXXFLAGS=$PLATFORM_CXXFLAGS" >> $OUTPUT
|
||||
echo "PLATFORM_SSEFLAGS=$PLATFORM_SSEFLAGS" >> $OUTPUT
|
||||
echo "PLATFORM_SHARED_CFLAGS=$PLATFORM_SHARED_CFLAGS" >> $OUTPUT
|
||||
echo "PLATFORM_SHARED_EXT=$PLATFORM_SHARED_EXT" >> $OUTPUT
|
||||
echo "PLATFORM_SHARED_LDFLAGS=$PLATFORM_SHARED_LDFLAGS" >> $OUTPUT
|
||||
|
@ -84,6 +84,14 @@ static bool FLAGS_histogram = false;
|
||||
// (initialized to default value by "main")
|
||||
static int FLAGS_write_buffer_size = 0;
|
||||
|
||||
// Number of bytes written to each file.
|
||||
// (initialized to default value by "main")
|
||||
static int FLAGS_max_file_size = 0;
|
||||
|
||||
// Approximate size of user data packed per block (before compression.
|
||||
// (initialized to default value by "main")
|
||||
static int FLAGS_block_size = 0;
|
||||
|
||||
// Number of bytes to use as a cache of uncompressed data.
|
||||
// Negative means use default settings.
|
||||
static int FLAGS_cache_size = -1;
|
||||
@ -109,6 +117,7 @@ static const char* FLAGS_db = NULL;
|
||||
namespace leveldb {
|
||||
|
||||
namespace {
|
||||
leveldb::Env* g_env = NULL;
|
||||
|
||||
// Helper for quickly generating random data.
|
||||
class RandomGenerator {
|
||||
@ -186,7 +195,7 @@ class Stats {
|
||||
done_ = 0;
|
||||
bytes_ = 0;
|
||||
seconds_ = 0;
|
||||
start_ = Env::Default()->NowMicros();
|
||||
start_ = g_env->NowMicros();
|
||||
finish_ = start_;
|
||||
message_.clear();
|
||||
}
|
||||
@ -204,7 +213,7 @@ class Stats {
|
||||
}
|
||||
|
||||
void Stop() {
|
||||
finish_ = Env::Default()->NowMicros();
|
||||
finish_ = g_env->NowMicros();
|
||||
seconds_ = (finish_ - start_) * 1e-6;
|
||||
}
|
||||
|
||||
@ -214,7 +223,7 @@ class Stats {
|
||||
|
||||
void FinishedSingleOp() {
|
||||
if (FLAGS_histogram) {
|
||||
double now = Env::Default()->NowMicros();
|
||||
double now = g_env->NowMicros();
|
||||
double micros = now - last_op_finish_;
|
||||
hist_.Add(micros);
|
||||
if (micros > 20000) {
|
||||
@ -404,10 +413,10 @@ class Benchmark {
|
||||
reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads),
|
||||
heap_counter_(0) {
|
||||
std::vector<std::string> files;
|
||||
Env::Default()->GetChildren(FLAGS_db, &files);
|
||||
g_env->GetChildren(FLAGS_db, &files);
|
||||
for (size_t i = 0; i < files.size(); i++) {
|
||||
if (Slice(files[i]).starts_with("heap-")) {
|
||||
Env::Default()->DeleteFile(std::string(FLAGS_db) + "/" + files[i]);
|
||||
g_env->DeleteFile(std::string(FLAGS_db) + "/" + files[i]);
|
||||
}
|
||||
}
|
||||
if (!FLAGS_use_existing_db) {
|
||||
@ -589,7 +598,7 @@ class Benchmark {
|
||||
arg[i].shared = &shared;
|
||||
arg[i].thread = new ThreadState(i);
|
||||
arg[i].thread->shared = &shared;
|
||||
Env::Default()->StartThread(ThreadBody, &arg[i]);
|
||||
g_env->StartThread(ThreadBody, &arg[i]);
|
||||
}
|
||||
|
||||
shared.mu.Lock();
|
||||
@ -700,9 +709,12 @@ class Benchmark {
|
||||
void Open() {
|
||||
assert(db_ == NULL);
|
||||
Options options;
|
||||
options.env = g_env;
|
||||
options.create_if_missing = !FLAGS_use_existing_db;
|
||||
options.block_cache = cache_;
|
||||
options.write_buffer_size = FLAGS_write_buffer_size;
|
||||
options.max_file_size = FLAGS_max_file_size;
|
||||
options.block_size = FLAGS_block_size;
|
||||
options.max_open_files = FLAGS_open_files;
|
||||
options.filter_policy = filter_policy_;
|
||||
options.reuse_logs = FLAGS_reuse_logs;
|
||||
@ -925,7 +937,7 @@ class Benchmark {
|
||||
char fname[100];
|
||||
snprintf(fname, sizeof(fname), "%s/heap-%04d", FLAGS_db, ++heap_counter_);
|
||||
WritableFile* file;
|
||||
Status s = Env::Default()->NewWritableFile(fname, &file);
|
||||
Status s = g_env->NewWritableFile(fname, &file);
|
||||
if (!s.ok()) {
|
||||
fprintf(stderr, "%s\n", s.ToString().c_str());
|
||||
return;
|
||||
@ -934,7 +946,7 @@ class Benchmark {
|
||||
delete file;
|
||||
if (!ok) {
|
||||
fprintf(stderr, "heap profiling not supported\n");
|
||||
Env::Default()->DeleteFile(fname);
|
||||
g_env->DeleteFile(fname);
|
||||
}
|
||||
}
|
||||
};
|
||||
@ -943,6 +955,8 @@ class Benchmark {
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
FLAGS_write_buffer_size = leveldb::Options().write_buffer_size;
|
||||
FLAGS_max_file_size = leveldb::Options().max_file_size;
|
||||
FLAGS_block_size = leveldb::Options().block_size;
|
||||
FLAGS_open_files = leveldb::Options().max_open_files;
|
||||
std::string default_db_path;
|
||||
|
||||
@ -973,6 +987,10 @@ int main(int argc, char** argv) {
|
||||
FLAGS_value_size = n;
|
||||
} else if (sscanf(argv[i], "--write_buffer_size=%d%c", &n, &junk) == 1) {
|
||||
FLAGS_write_buffer_size = n;
|
||||
} else if (sscanf(argv[i], "--max_file_size=%d%c", &n, &junk) == 1) {
|
||||
FLAGS_max_file_size = n;
|
||||
} else if (sscanf(argv[i], "--block_size=%d%c", &n, &junk) == 1) {
|
||||
FLAGS_block_size = n;
|
||||
} else if (sscanf(argv[i], "--cache_size=%d%c", &n, &junk) == 1) {
|
||||
FLAGS_cache_size = n;
|
||||
} else if (sscanf(argv[i], "--bloom_bits=%d%c", &n, &junk) == 1) {
|
||||
@ -987,9 +1005,11 @@ int main(int argc, char** argv) {
|
||||
}
|
||||
}
|
||||
|
||||
leveldb::g_env = leveldb::Env::Default();
|
||||
|
||||
// Choose a location for the test database if none given with --db=<path>
|
||||
if (FLAGS_db == NULL) {
|
||||
leveldb::Env::Default()->GetTestDirectory(&default_db_path);
|
||||
leveldb::g_env->GetTestDirectory(&default_db_path);
|
||||
default_db_path += "/dbbench";
|
||||
FLAGS_db = default_db_path.c_str();
|
||||
}
|
||||
|
@ -96,6 +96,7 @@ Options SanitizeOptions(const std::string& dbname,
|
||||
result.filter_policy = (src.filter_policy != NULL) ? ipolicy : NULL;
|
||||
ClipToRange(&result.max_open_files, 64 + kNumNonTableCacheFiles, 50000);
|
||||
ClipToRange(&result.write_buffer_size, 64<<10, 1<<30);
|
||||
ClipToRange(&result.max_file_size, 1<<20, 1<<30);
|
||||
ClipToRange(&result.block_size, 1<<10, 4<<20);
|
||||
if (result.info_log == NULL) {
|
||||
// Open a log file in the same directory as the db
|
||||
|
@ -3,7 +3,7 @@
|
||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||
//
|
||||
// Log format information shared by reader and writer.
|
||||
// See ../doc/log_format.txt for more detail.
|
||||
// See ../doc/log_format.md for more detail.
|
||||
|
||||
#ifndef STORAGE_LEVELDB_DB_LOG_FORMAT_H_
|
||||
#define STORAGE_LEVELDB_DB_LOG_FORMAT_H_
|
||||
|
@ -20,21 +20,29 @@
|
||||
|
||||
namespace leveldb {
|
||||
|
||||
static const int kTargetFileSize = 2 * 1048576;
|
||||
static int TargetFileSize(const Options* options) {
|
||||
return options->max_file_size;
|
||||
}
|
||||
|
||||
// Maximum bytes of overlaps in grandparent (i.e., level+2) before we
|
||||
// stop building a single file in a level->level+1 compaction.
|
||||
static const int64_t kMaxGrandParentOverlapBytes = 10 * kTargetFileSize;
|
||||
static int64_t MaxGrandParentOverlapBytes(const Options* options) {
|
||||
return 10 * TargetFileSize(options);
|
||||
}
|
||||
|
||||
// Maximum number of bytes in all compacted files. We avoid expanding
|
||||
// the lower level file set of a compaction if it would make the
|
||||
// total compaction cover more than this many bytes.
|
||||
static const int64_t kExpandedCompactionByteSizeLimit = 25 * kTargetFileSize;
|
||||
static int64_t ExpandedCompactionByteSizeLimit(const Options* options) {
|
||||
return 25 * TargetFileSize(options);
|
||||
}
|
||||
|
||||
static double MaxBytesForLevel(int level) {
|
||||
static double MaxBytesForLevel(const Options* options, int level) {
|
||||
// Note: the result for level zero is not really used since we set
|
||||
// the level-0 compaction threshold based on number of files.
|
||||
double result = 10 * 1048576.0; // Result for both level-0 and level-1
|
||||
|
||||
// Result for both level-0 and level-1
|
||||
double result = 10. * 1048576.0;
|
||||
while (level > 1) {
|
||||
result *= 10;
|
||||
level--;
|
||||
@ -42,8 +50,9 @@ static double MaxBytesForLevel(int level) {
|
||||
return result;
|
||||
}
|
||||
|
||||
static uint64_t MaxFileSizeForLevel(int level) {
|
||||
return kTargetFileSize; // We could vary per level to reduce number of files?
|
||||
static uint64_t MaxFileSizeForLevel(const Options* options, int level) {
|
||||
// We could vary per level to reduce number of files?
|
||||
return TargetFileSize(options);
|
||||
}
|
||||
|
||||
static int64_t TotalFileSize(const std::vector<FileMetaData*>& files) {
|
||||
@ -508,7 +517,7 @@ int Version::PickLevelForMemTableOutput(
|
||||
// Check that file does not overlap too many grandparent bytes.
|
||||
GetOverlappingInputs(level + 2, &start, &limit, &overlaps);
|
||||
const int64_t sum = TotalFileSize(overlaps);
|
||||
if (sum > kMaxGrandParentOverlapBytes) {
|
||||
if (sum > MaxGrandParentOverlapBytes(vset_->options_)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -1027,7 +1036,7 @@ bool VersionSet::ReuseManifest(const std::string& dscname,
|
||||
manifest_type != kDescriptorFile ||
|
||||
!env_->GetFileSize(dscname, &manifest_size).ok() ||
|
||||
// Make new compacted MANIFEST if old one is too big
|
||||
manifest_size >= kTargetFileSize) {
|
||||
manifest_size >= TargetFileSize(options_)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -1076,7 +1085,8 @@ void VersionSet::Finalize(Version* v) {
|
||||
} else {
|
||||
// Compute the ratio of current size to size limit.
|
||||
const uint64_t level_bytes = TotalFileSize(v->files_[level]);
|
||||
score = static_cast<double>(level_bytes) / MaxBytesForLevel(level);
|
||||
score =
|
||||
static_cast<double>(level_bytes) / MaxBytesForLevel(options_, level);
|
||||
}
|
||||
|
||||
if (score > best_score) {
|
||||
@ -1290,7 +1300,7 @@ Compaction* VersionSet::PickCompaction() {
|
||||
level = current_->compaction_level_;
|
||||
assert(level >= 0);
|
||||
assert(level+1 < config::kNumLevels);
|
||||
c = new Compaction(level);
|
||||
c = new Compaction(options_, level);
|
||||
|
||||
// Pick the first file that comes after compact_pointer_[level]
|
||||
for (size_t i = 0; i < current_->files_[level].size(); i++) {
|
||||
@ -1307,7 +1317,7 @@ Compaction* VersionSet::PickCompaction() {
|
||||
}
|
||||
} else if (seek_compaction) {
|
||||
level = current_->file_to_compact_level_;
|
||||
c = new Compaction(level);
|
||||
c = new Compaction(options_, level);
|
||||
c->inputs_[0].push_back(current_->file_to_compact_);
|
||||
} else {
|
||||
return NULL;
|
||||
@ -1352,7 +1362,8 @@ void VersionSet::SetupOtherInputs(Compaction* c) {
|
||||
const int64_t inputs1_size = TotalFileSize(c->inputs_[1]);
|
||||
const int64_t expanded0_size = TotalFileSize(expanded0);
|
||||
if (expanded0.size() > c->inputs_[0].size() &&
|
||||
inputs1_size + expanded0_size < kExpandedCompactionByteSizeLimit) {
|
||||
inputs1_size + expanded0_size <
|
||||
ExpandedCompactionByteSizeLimit(options_)) {
|
||||
InternalKey new_start, new_limit;
|
||||
GetRange(expanded0, &new_start, &new_limit);
|
||||
std::vector<FileMetaData*> expanded1;
|
||||
@ -1414,7 +1425,7 @@ Compaction* VersionSet::CompactRange(
|
||||
// and we must not pick one file and drop another older file if the
|
||||
// two files overlap.
|
||||
if (level > 0) {
|
||||
const uint64_t limit = MaxFileSizeForLevel(level);
|
||||
const uint64_t limit = MaxFileSizeForLevel(options_, level);
|
||||
uint64_t total = 0;
|
||||
for (size_t i = 0; i < inputs.size(); i++) {
|
||||
uint64_t s = inputs[i]->file_size;
|
||||
@ -1426,7 +1437,7 @@ Compaction* VersionSet::CompactRange(
|
||||
}
|
||||
}
|
||||
|
||||
Compaction* c = new Compaction(level);
|
||||
Compaction* c = new Compaction(options_, level);
|
||||
c->input_version_ = current_;
|
||||
c->input_version_->Ref();
|
||||
c->inputs_[0] = inputs;
|
||||
@ -1434,9 +1445,9 @@ Compaction* VersionSet::CompactRange(
|
||||
return c;
|
||||
}
|
||||
|
||||
Compaction::Compaction(int level)
|
||||
Compaction::Compaction(const Options* options, int level)
|
||||
: level_(level),
|
||||
max_output_file_size_(MaxFileSizeForLevel(level)),
|
||||
max_output_file_size_(MaxFileSizeForLevel(options, level)),
|
||||
input_version_(NULL),
|
||||
grandparent_index_(0),
|
||||
seen_key_(false),
|
||||
@ -1453,12 +1464,13 @@ Compaction::~Compaction() {
|
||||
}
|
||||
|
||||
bool Compaction::IsTrivialMove() const {
|
||||
const VersionSet* vset = input_version_->vset_;
|
||||
// Avoid a move if there is lots of overlapping grandparent data.
|
||||
// Otherwise, the move could create a parent file that will require
|
||||
// a very expensive merge later on.
|
||||
return (num_input_files(0) == 1 &&
|
||||
num_input_files(1) == 0 &&
|
||||
TotalFileSize(grandparents_) <= kMaxGrandParentOverlapBytes);
|
||||
return (num_input_files(0) == 1 && num_input_files(1) == 0 &&
|
||||
TotalFileSize(grandparents_) <=
|
||||
MaxGrandParentOverlapBytes(vset->options_));
|
||||
}
|
||||
|
||||
void Compaction::AddInputDeletions(VersionEdit* edit) {
|
||||
@ -1491,8 +1503,9 @@ bool Compaction::IsBaseLevelForKey(const Slice& user_key) {
|
||||
}
|
||||
|
||||
bool Compaction::ShouldStopBefore(const Slice& internal_key) {
|
||||
const VersionSet* vset = input_version_->vset_;
|
||||
// Scan to find earliest grandparent file that contains key.
|
||||
const InternalKeyComparator* icmp = &input_version_->vset_->icmp_;
|
||||
const InternalKeyComparator* icmp = &vset->icmp_;
|
||||
while (grandparent_index_ < grandparents_.size() &&
|
||||
icmp->Compare(internal_key,
|
||||
grandparents_[grandparent_index_]->largest.Encode()) > 0) {
|
||||
@ -1503,7 +1516,7 @@ bool Compaction::ShouldStopBefore(const Slice& internal_key) {
|
||||
}
|
||||
seen_key_ = true;
|
||||
|
||||
if (overlapped_bytes_ > kMaxGrandParentOverlapBytes) {
|
||||
if (overlapped_bytes_ > MaxGrandParentOverlapBytes(vset->options_)) {
|
||||
// Too much overlap for current output; start new output
|
||||
overlapped_bytes_ = 0;
|
||||
return true;
|
||||
|
@ -366,7 +366,7 @@ class Compaction {
|
||||
friend class Version;
|
||||
friend class VersionSet;
|
||||
|
||||
explicit Compaction(int level);
|
||||
Compaction(const Options* options, int level);
|
||||
|
||||
int level_;
|
||||
uint64_t max_output_file_size_;
|
||||
|
@ -1,89 +0,0 @@
|
||||
body {
|
||||
margin-left: 0.5in;
|
||||
margin-right: 0.5in;
|
||||
background: white;
|
||||
color: black;
|
||||
}
|
||||
|
||||
h1 {
|
||||
margin-left: -0.2in;
|
||||
font-size: 14pt;
|
||||
}
|
||||
h2 {
|
||||
margin-left: -0in;
|
||||
font-size: 12pt;
|
||||
}
|
||||
h3 {
|
||||
margin-left: -0in;
|
||||
}
|
||||
h4 {
|
||||
margin-left: -0in;
|
||||
}
|
||||
hr {
|
||||
margin-left: -0in;
|
||||
}
|
||||
|
||||
/* Definition lists: definition term bold */
|
||||
dt {
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
address {
|
||||
text-align: center;
|
||||
}
|
||||
code,samp,var {
|
||||
color: blue;
|
||||
}
|
||||
kbd {
|
||||
color: #600000;
|
||||
}
|
||||
div.note p {
|
||||
float: right;
|
||||
width: 3in;
|
||||
margin-right: 0%;
|
||||
padding: 1px;
|
||||
border: 2px solid #6060a0;
|
||||
background-color: #fffff0;
|
||||
}
|
||||
|
||||
ul {
|
||||
margin-top: -0em;
|
||||
margin-bottom: -0em;
|
||||
}
|
||||
|
||||
ol {
|
||||
margin-top: -0em;
|
||||
margin-bottom: -0em;
|
||||
}
|
||||
|
||||
UL.nobullets {
|
||||
list-style-type: none;
|
||||
list-style-image: none;
|
||||
margin-left: -1em;
|
||||
}
|
||||
|
||||
p {
|
||||
margin: 1em 0 1em 0;
|
||||
padding: 0 0 0 0;
|
||||
}
|
||||
|
||||
pre {
|
||||
line-height: 1.3em;
|
||||
padding: 0.4em 0 0.8em 0;
|
||||
margin: 0 0 0 0;
|
||||
border: 0 0 0 0;
|
||||
color: blue;
|
||||
}
|
||||
|
||||
.datatable {
|
||||
margin-left: auto;
|
||||
margin-right: auto;
|
||||
margin-top: 2em;
|
||||
margin-bottom: 2em;
|
||||
border: 1px solid;
|
||||
}
|
||||
|
||||
.datatable td,th {
|
||||
padding: 0 0.5em 0 0.5em;
|
||||
text-align: right;
|
||||
}
|
@ -1,213 +0,0 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<link rel="stylesheet" type="text/css" href="doc.css" />
|
||||
<title>Leveldb file layout and compactions</title>
|
||||
</head>
|
||||
|
||||
<body>
|
||||
|
||||
<h1>Files</h1>
|
||||
|
||||
The implementation of leveldb is similar in spirit to the
|
||||
representation of a single
|
||||
<a href="http://research.google.com/archive/bigtable.html">
|
||||
Bigtable tablet (section 5.3)</a>.
|
||||
However the organization of the files that make up the representation
|
||||
is somewhat different and is explained below.
|
||||
|
||||
<p>
|
||||
Each database is represented by a set of files stored in a directory.
|
||||
There are several different types of files as documented below:
|
||||
<p>
|
||||
<h2>Log files</h2>
|
||||
<p>
|
||||
A log file (*.log) stores a sequence of recent updates. Each update
|
||||
is appended to the current log file. When the log file reaches a
|
||||
pre-determined size (approximately 4MB by default), it is converted
|
||||
to a sorted table (see below) and a new log file is created for future
|
||||
updates.
|
||||
<p>
|
||||
A copy of the current log file is kept in an in-memory structure (the
|
||||
<code>memtable</code>). This copy is consulted on every read so that read
|
||||
operations reflect all logged updates.
|
||||
<p>
|
||||
<h2>Sorted tables</h2>
|
||||
<p>
|
||||
A sorted table (*.sst) stores a sequence of entries sorted by key.
|
||||
Each entry is either a value for the key, or a deletion marker for the
|
||||
key. (Deletion markers are kept around to hide obsolete values
|
||||
present in older sorted tables).
|
||||
<p>
|
||||
The set of sorted tables are organized into a sequence of levels. The
|
||||
sorted table generated from a log file is placed in a special <code>young</code>
|
||||
level (also called level-0). When the number of young files exceeds a
|
||||
certain threshold (currently four), all of the young files are merged
|
||||
together with all of the overlapping level-1 files to produce a
|
||||
sequence of new level-1 files (we create a new level-1 file for every
|
||||
2MB of data.)
|
||||
<p>
|
||||
Files in the young level may contain overlapping keys. However files
|
||||
in other levels have distinct non-overlapping key ranges. Consider
|
||||
level number L where L >= 1. When the combined size of files in
|
||||
level-L exceeds (10^L) MB (i.e., 10MB for level-1, 100MB for level-2,
|
||||
...), one file in level-L, and all of the overlapping files in
|
||||
level-(L+1) are merged to form a set of new files for level-(L+1).
|
||||
These merges have the effect of gradually migrating new updates from
|
||||
the young level to the largest level using only bulk reads and writes
|
||||
(i.e., minimizing expensive seeks).
|
||||
|
||||
<h2>Manifest</h2>
|
||||
<p>
|
||||
A MANIFEST file lists the set of sorted tables that make up each
|
||||
level, the corresponding key ranges, and other important metadata.
|
||||
A new MANIFEST file (with a new number embedded in the file name)
|
||||
is created whenever the database is reopened. The MANIFEST file is
|
||||
formatted as a log, and changes made to the serving state (as files
|
||||
are added or removed) are appended to this log.
|
||||
<p>
|
||||
<h2>Current</h2>
|
||||
<p>
|
||||
CURRENT is a simple text file that contains the name of the latest
|
||||
MANIFEST file.
|
||||
<p>
|
||||
<h2>Info logs</h2>
|
||||
<p>
|
||||
Informational messages are printed to files named LOG and LOG.old.
|
||||
<p>
|
||||
<h2>Others</h2>
|
||||
<p>
|
||||
Other files used for miscellaneous purposes may also be present
|
||||
(LOCK, *.dbtmp).
|
||||
|
||||
<h1>Level 0</h1>
|
||||
When the log file grows above a certain size (1MB by default):
|
||||
<ul>
|
||||
<li>Create a brand new memtable and log file and direct future updates here
|
||||
<li>In the background:
|
||||
<ul>
|
||||
<li>Write the contents of the previous memtable to an sstable
|
||||
<li>Discard the memtable
|
||||
<li>Delete the old log file and the old memtable
|
||||
<li>Add the new sstable to the young (level-0) level.
|
||||
</ul>
|
||||
</ul>
|
||||
|
||||
<h1>Compactions</h1>
|
||||
|
||||
<p>
|
||||
When the size of level L exceeds its limit, we compact it in a
|
||||
background thread. The compaction picks a file from level L and all
|
||||
overlapping files from the next level L+1. Note that if a level-L
|
||||
file overlaps only part of a level-(L+1) file, the entire file at
|
||||
level-(L+1) is used as an input to the compaction and will be
|
||||
discarded after the compaction. Aside: because level-0 is special
|
||||
(files in it may overlap each other), we treat compactions from
|
||||
level-0 to level-1 specially: a level-0 compaction may pick more than
|
||||
one level-0 file in case some of these files overlap each other.
|
||||
|
||||
<p>
|
||||
A compaction merges the contents of the picked files to produce a
|
||||
sequence of level-(L+1) files. We switch to producing a new
|
||||
level-(L+1) file after the current output file has reached the target
|
||||
file size (2MB). We also switch to a new output file when the key
|
||||
range of the current output file has grown enough to overlap more than
|
||||
ten level-(L+2) files. This last rule ensures that a later compaction
|
||||
of a level-(L+1) file will not pick up too much data from level-(L+2).
|
||||
|
||||
<p>
|
||||
The old files are discarded and the new files are added to the serving
|
||||
state.
|
||||
|
||||
<p>
|
||||
Compactions for a particular level rotate through the key space. In
|
||||
more detail, for each level L, we remember the ending key of the last
|
||||
compaction at level L. The next compaction for level L will pick the
|
||||
first file that starts after this key (wrapping around to the
|
||||
beginning of the key space if there is no such file).
|
||||
|
||||
<p>
|
||||
Compactions drop overwritten values. They also drop deletion markers
|
||||
if there are no higher numbered levels that contain a file whose range
|
||||
overlaps the current key.
|
||||
|
||||
<h2>Timing</h2>
|
||||
|
||||
Level-0 compactions will read up to four 1MB files from level-0, and
|
||||
at worst all the level-1 files (10MB). I.e., we will read 14MB and
|
||||
write 14MB.
|
||||
|
||||
<p>
|
||||
Other than the special level-0 compactions, we will pick one 2MB file
|
||||
from level L. In the worst case, this will overlap ~ 12 files from
|
||||
level L+1 (10 because level-(L+1) is ten times the size of level-L,
|
||||
and another two at the boundaries since the file ranges at level-L
|
||||
will usually not be aligned with the file ranges at level-L+1). The
|
||||
compaction will therefore read 26MB and write 26MB. Assuming a disk
|
||||
IO rate of 100MB/s (ballpark range for modern drives), the worst
|
||||
compaction cost will be approximately 0.5 second.
|
||||
|
||||
<p>
|
||||
If we throttle the background writing to something small, say 10% of
|
||||
the full 100MB/s speed, a compaction may take up to 5 seconds. If the
|
||||
user is writing at 10MB/s, we might build up lots of level-0 files
|
||||
(~50 to hold the 5*10MB). This may significantly increase the cost of
|
||||
reads due to the overhead of merging more files together on every
|
||||
read.
|
||||
|
||||
<p>
|
||||
Solution 1: To reduce this problem, we might want to increase the log
|
||||
switching threshold when the number of level-0 files is large. Though
|
||||
the downside is that the larger this threshold, the more memory we will
|
||||
need to hold the corresponding memtable.
|
||||
|
||||
<p>
|
||||
Solution 2: We might want to decrease write rate artificially when the
|
||||
number of level-0 files goes up.
|
||||
|
||||
<p>
|
||||
Solution 3: We work on reducing the cost of very wide merges.
|
||||
Perhaps most of the level-0 files will have their blocks sitting
|
||||
uncompressed in the cache and we will only need to worry about the
|
||||
O(N) complexity in the merging iterator.
|
||||
|
||||
<h2>Number of files</h2>
|
||||
|
||||
Instead of always making 2MB files, we could make larger files for
|
||||
larger levels to reduce the total file count, though at the expense of
|
||||
more bursty compactions. Alternatively, we could shard the set of
|
||||
files into multiple directories.
|
||||
|
||||
<p>
|
||||
An experiment on an <code>ext3</code> filesystem on Feb 04, 2011 shows
|
||||
the following timings to do 100K file opens in directories with
|
||||
varying number of files:
|
||||
<table class="datatable">
|
||||
<tr><th>Files in directory</th><th>Microseconds to open a file</th></tr>
|
||||
<tr><td>1000</td><td>9</td>
|
||||
<tr><td>10000</td><td>10</td>
|
||||
<tr><td>100000</td><td>16</td>
|
||||
</table>
|
||||
So maybe even the sharding is not necessary on modern filesystems?
|
||||
|
||||
<h1>Recovery</h1>
|
||||
|
||||
<ul>
|
||||
<li> Read CURRENT to find name of the latest committed MANIFEST
|
||||
<li> Read the named MANIFEST file
|
||||
<li> Clean up stale files
|
||||
<li> We could open all sstables here, but it is probably better to be lazy...
|
||||
<li> Convert log chunk to a new level-0 sstable
|
||||
<li> Start directing new writes to a new log file with recovered sequence#
|
||||
</ul>
|
||||
|
||||
<h1>Garbage collection of files</h1>
|
||||
|
||||
<code>DeleteObsoleteFiles()</code> is called at the end of every
|
||||
compaction and at the end of recovery. It finds the names of all
|
||||
files in the database. It deletes all log files that are not the
|
||||
current log file. It deletes all table files that are not referenced
|
||||
from some level and are not the output of an active compaction.
|
||||
|
||||
</body>
|
||||
</html>
|
170
src/leveldb/doc/impl.md
Normal file
170
src/leveldb/doc/impl.md
Normal file
@ -0,0 +1,170 @@
|
||||
## Files
|
||||
|
||||
The implementation of leveldb is similar in spirit to the representation of a
|
||||
single [Bigtable tablet (section 5.3)](http://research.google.com/archive/bigtable.html).
|
||||
However the organization of the files that make up the representation is
|
||||
somewhat different and is explained below.
|
||||
|
||||
Each database is represented by a set of files stored in a directory. There are
|
||||
several different types of files as documented below:
|
||||
|
||||
### Log files
|
||||
|
||||
A log file (*.log) stores a sequence of recent updates. Each update is appended
|
||||
to the current log file. When the log file reaches a pre-determined size
|
||||
(approximately 4MB by default), it is converted to a sorted table (see below)
|
||||
and a new log file is created for future updates.
|
||||
|
||||
A copy of the current log file is kept in an in-memory structure (the
|
||||
`memtable`). This copy is consulted on every read so that read operations
|
||||
reflect all logged updates.
|
||||
|
||||
## Sorted tables
|
||||
|
||||
A sorted table (*.ldb) stores a sequence of entries sorted by key. Each entry is
|
||||
either a value for the key, or a deletion marker for the key. (Deletion markers
|
||||
are kept around to hide obsolete values present in older sorted tables).
|
||||
|
||||
The set of sorted tables are organized into a sequence of levels. The sorted
|
||||
table generated from a log file is placed in a special **young** level (also
|
||||
called level-0). When the number of young files exceeds a certain threshold
|
||||
(currently four), all of the young files are merged together with all of the
|
||||
overlapping level-1 files to produce a sequence of new level-1 files (we create
|
||||
a new level-1 file for every 2MB of data.)
|
||||
|
||||
Files in the young level may contain overlapping keys. However files in other
|
||||
levels have distinct non-overlapping key ranges. Consider level number L where
|
||||
L >= 1. When the combined size of files in level-L exceeds (10^L) MB (i.e., 10MB
|
||||
for level-1, 100MB for level-2, ...), one file in level-L, and all of the
|
||||
overlapping files in level-(L+1) are merged to form a set of new files for
|
||||
level-(L+1). These merges have the effect of gradually migrating new updates
|
||||
from the young level to the largest level using only bulk reads and writes
|
||||
(i.e., minimizing expensive seeks).
|
||||
|
||||
### Manifest
|
||||
|
||||
A MANIFEST file lists the set of sorted tables that make up each level, the
|
||||
corresponding key ranges, and other important metadata. A new MANIFEST file
|
||||
(with a new number embedded in the file name) is created whenever the database
|
||||
is reopened. The MANIFEST file is formatted as a log, and changes made to the
|
||||
serving state (as files are added or removed) are appended to this log.
|
||||
|
||||
### Current
|
||||
|
||||
CURRENT is a simple text file that contains the name of the latest MANIFEST
|
||||
file.
|
||||
|
||||
### Info logs
|
||||
|
||||
Informational messages are printed to files named LOG and LOG.old.
|
||||
|
||||
### Others
|
||||
|
||||
Other files used for miscellaneous purposes may also be present (LOCK, *.dbtmp).
|
||||
|
||||
## Level 0
|
||||
|
||||
When the log file grows above a certain size (1MB by default):
|
||||
Create a brand new memtable and log file and direct future updates here
|
||||
In the background:
|
||||
Write the contents of the previous memtable to an sstable
|
||||
Discard the memtable
|
||||
Delete the old log file and the old memtable
|
||||
Add the new sstable to the young (level-0) level.
|
||||
|
||||
## Compactions
|
||||
|
||||
When the size of level L exceeds its limit, we compact it in a background
|
||||
thread. The compaction picks a file from level L and all overlapping files from
|
||||
the next level L+1. Note that if a level-L file overlaps only part of a
|
||||
level-(L+1) file, the entire file at level-(L+1) is used as an input to the
|
||||
compaction and will be discarded after the compaction. Aside: because level-0
|
||||
is special (files in it may overlap each other), we treat compactions from
|
||||
level-0 to level-1 specially: a level-0 compaction may pick more than one
|
||||
level-0 file in case some of these files overlap each other.
|
||||
|
||||
A compaction merges the contents of the picked files to produce a sequence of
|
||||
level-(L+1) files. We switch to producing a new level-(L+1) file after the
|
||||
current output file has reached the target file size (2MB). We also switch to a
|
||||
new output file when the key range of the current output file has grown enough
|
||||
to overlap more than ten level-(L+2) files. This last rule ensures that a later
|
||||
compaction of a level-(L+1) file will not pick up too much data from
|
||||
level-(L+2).
|
||||
|
||||
The old files are discarded and the new files are added to the serving state.
|
||||
|
||||
Compactions for a particular level rotate through the key space. In more detail,
|
||||
for each level L, we remember the ending key of the last compaction at level L.
|
||||
The next compaction for level L will pick the first file that starts after this
|
||||
key (wrapping around to the beginning of the key space if there is no such
|
||||
file).
|
||||
|
||||
Compactions drop overwritten values. They also drop deletion markers if there
|
||||
are no higher numbered levels that contain a file whose range overlaps the
|
||||
current key.
|
||||
|
||||
### Timing
|
||||
|
||||
Level-0 compactions will read up to four 1MB files from level-0, and at worst
|
||||
all the level-1 files (10MB). I.e., we will read 14MB and write 14MB.
|
||||
|
||||
Other than the special level-0 compactions, we will pick one 2MB file from level
|
||||
L. In the worst case, this will overlap ~ 12 files from level L+1 (10 because
|
||||
level-(L+1) is ten times the size of level-L, and another two at the boundaries
|
||||
since the file ranges at level-L will usually not be aligned with the file
|
||||
ranges at level-L+1). The compaction will therefore read 26MB and write 26MB.
|
||||
Assuming a disk IO rate of 100MB/s (ballpark range for modern drives), the worst
|
||||
compaction cost will be approximately 0.5 second.
|
||||
|
||||
If we throttle the background writing to something small, say 10% of the full
|
||||
100MB/s speed, a compaction may take up to 5 seconds. If the user is writing at
|
||||
10MB/s, we might build up lots of level-0 files (~50 to hold the 5*10MB). This
|
||||
may significantly increase the cost of reads due to the overhead of merging more
|
||||
files together on every read.
|
||||
|
||||
Solution 1: To reduce this problem, we might want to increase the log switching
|
||||
threshold when the number of level-0 files is large. Though the downside is that
|
||||
the larger this threshold, the more memory we will need to hold the
|
||||
corresponding memtable.
|
||||
|
||||
Solution 2: We might want to decrease write rate artificially when the number of
|
||||
level-0 files goes up.
|
||||
|
||||
Solution 3: We work on reducing the cost of very wide merges. Perhaps most of
|
||||
the level-0 files will have their blocks sitting uncompressed in the cache and
|
||||
we will only need to worry about the O(N) complexity in the merging iterator.
|
||||
|
||||
### Number of files
|
||||
|
||||
Instead of always making 2MB files, we could make larger files for larger levels
|
||||
to reduce the total file count, though at the expense of more bursty
|
||||
compactions. Alternatively, we could shard the set of files into multiple
|
||||
directories.
|
||||
|
||||
An experiment on an ext3 filesystem on Feb 04, 2011 shows the following timings
|
||||
to do 100K file opens in directories with varying number of files:
|
||||
|
||||
|
||||
| Files in directory | Microseconds to open a file |
|
||||
|-------------------:|----------------------------:|
|
||||
| 1000 | 9 |
|
||||
| 10000 | 10 |
|
||||
| 100000 | 16 |
|
||||
|
||||
So maybe even the sharding is not necessary on modern filesystems?
|
||||
|
||||
## Recovery
|
||||
|
||||
* Read CURRENT to find name of the latest committed MANIFEST
|
||||
* Read the named MANIFEST file
|
||||
* Clean up stale files
|
||||
* We could open all sstables here, but it is probably better to be lazy...
|
||||
* Convert log chunk to a new level-0 sstable
|
||||
* Start directing new writes to a new log file with recovered sequence#
|
||||
|
||||
## Garbage collection of files
|
||||
|
||||
`DeleteObsoleteFiles()` is called at the end of every compaction and at the end
|
||||
of recovery. It finds the names of all files in the database. It deletes all log
|
||||
files that are not the current log file. It deletes all table files that are not
|
||||
referenced from some level and are not the output of an active compaction.
|
@ -1,549 +0,0 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<link rel="stylesheet" type="text/css" href="doc.css" />
|
||||
<title>Leveldb</title>
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<h1>Leveldb</h1>
|
||||
<address>Jeff Dean, Sanjay Ghemawat</address>
|
||||
<p>
|
||||
The <code>leveldb</code> library provides a persistent key value store. Keys and
|
||||
values are arbitrary byte arrays. The keys are ordered within the key
|
||||
value store according to a user-specified comparator function.
|
||||
|
||||
<p>
|
||||
<h1>Opening A Database</h1>
|
||||
<p>
|
||||
A <code>leveldb</code> database has a name which corresponds to a file system
|
||||
directory. All of the contents of database are stored in this
|
||||
directory. The following example shows how to open a database,
|
||||
creating it if necessary:
|
||||
<p>
|
||||
<pre>
|
||||
#include <cassert>
|
||||
#include "leveldb/db.h"
|
||||
|
||||
leveldb::DB* db;
|
||||
leveldb::Options options;
|
||||
options.create_if_missing = true;
|
||||
leveldb::Status status = leveldb::DB::Open(options, "/tmp/testdb", &db);
|
||||
assert(status.ok());
|
||||
...
|
||||
</pre>
|
||||
If you want to raise an error if the database already exists, add
|
||||
the following line before the <code>leveldb::DB::Open</code> call:
|
||||
<pre>
|
||||
options.error_if_exists = true;
|
||||
</pre>
|
||||
<h1>Status</h1>
|
||||
<p>
|
||||
You may have noticed the <code>leveldb::Status</code> type above. Values of this
|
||||
type are returned by most functions in <code>leveldb</code> that may encounter an
|
||||
error. You can check if such a result is ok, and also print an
|
||||
associated error message:
|
||||
<p>
|
||||
<pre>
|
||||
leveldb::Status s = ...;
|
||||
if (!s.ok()) cerr << s.ToString() << endl;
|
||||
</pre>
|
||||
<h1>Closing A Database</h1>
|
||||
<p>
|
||||
When you are done with a database, just delete the database object.
|
||||
Example:
|
||||
<p>
|
||||
<pre>
|
||||
... open the db as described above ...
|
||||
... do something with db ...
|
||||
delete db;
|
||||
</pre>
|
||||
<h1>Reads And Writes</h1>
|
||||
<p>
|
||||
The database provides <code>Put</code>, <code>Delete</code>, and <code>Get</code> methods to
|
||||
modify/query the database. For example, the following code
|
||||
moves the value stored under key1 to key2.
|
||||
<pre>
|
||||
std::string value;
|
||||
leveldb::Status s = db->Get(leveldb::ReadOptions(), key1, &value);
|
||||
if (s.ok()) s = db->Put(leveldb::WriteOptions(), key2, value);
|
||||
if (s.ok()) s = db->Delete(leveldb::WriteOptions(), key1);
|
||||
</pre>
|
||||
|
||||
<h1>Atomic Updates</h1>
|
||||
<p>
|
||||
Note that if the process dies after the Put of key2 but before the
|
||||
delete of key1, the same value may be left stored under multiple keys.
|
||||
Such problems can be avoided by using the <code>WriteBatch</code> class to
|
||||
atomically apply a set of updates:
|
||||
<p>
|
||||
<pre>
|
||||
#include "leveldb/write_batch.h"
|
||||
...
|
||||
std::string value;
|
||||
leveldb::Status s = db->Get(leveldb::ReadOptions(), key1, &value);
|
||||
if (s.ok()) {
|
||||
leveldb::WriteBatch batch;
|
||||
batch.Delete(key1);
|
||||
batch.Put(key2, value);
|
||||
s = db->Write(leveldb::WriteOptions(), &batch);
|
||||
}
|
||||
</pre>
|
||||
The <code>WriteBatch</code> holds a sequence of edits to be made to the database,
|
||||
and these edits within the batch are applied in order. Note that we
|
||||
called <code>Delete</code> before <code>Put</code> so that if <code>key1</code> is identical to <code>key2</code>,
|
||||
we do not end up erroneously dropping the value entirely.
|
||||
<p>
|
||||
Apart from its atomicity benefits, <code>WriteBatch</code> may also be used to
|
||||
speed up bulk updates by placing lots of individual mutations into the
|
||||
same batch.
|
||||
|
||||
<h1>Synchronous Writes</h1>
|
||||
By default, each write to <code>leveldb</code> is asynchronous: it
|
||||
returns after pushing the write from the process into the operating
|
||||
system. The transfer from operating system memory to the underlying
|
||||
persistent storage happens asynchronously. The <code>sync</code> flag
|
||||
can be turned on for a particular write to make the write operation
|
||||
not return until the data being written has been pushed all the way to
|
||||
persistent storage. (On Posix systems, this is implemented by calling
|
||||
either <code>fsync(...)</code> or <code>fdatasync(...)</code> or
|
||||
<code>msync(..., MS_SYNC)</code> before the write operation returns.)
|
||||
<pre>
|
||||
leveldb::WriteOptions write_options;
|
||||
write_options.sync = true;
|
||||
db->Put(write_options, ...);
|
||||
</pre>
|
||||
Asynchronous writes are often more than a thousand times as fast as
|
||||
synchronous writes. The downside of asynchronous writes is that a
|
||||
crash of the machine may cause the last few updates to be lost. Note
|
||||
that a crash of just the writing process (i.e., not a reboot) will not
|
||||
cause any loss since even when <code>sync</code> is false, an update
|
||||
is pushed from the process memory into the operating system before it
|
||||
is considered done.
|
||||
|
||||
<p>
|
||||
Asynchronous writes can often be used safely. For example, when
|
||||
loading a large amount of data into the database you can handle lost
|
||||
updates by restarting the bulk load after a crash. A hybrid scheme is
|
||||
also possible where every Nth write is synchronous, and in the event
|
||||
of a crash, the bulk load is restarted just after the last synchronous
|
||||
write finished by the previous run. (The synchronous write can update
|
||||
a marker that describes where to restart on a crash.)
|
||||
|
||||
<p>
|
||||
<code>WriteBatch</code> provides an alternative to asynchronous writes.
|
||||
Multiple updates may be placed in the same <code>WriteBatch</code> and
|
||||
applied together using a synchronous write (i.e.,
|
||||
<code>write_options.sync</code> is set to true). The extra cost of
|
||||
the synchronous write will be amortized across all of the writes in
|
||||
the batch.
|
||||
|
||||
<p>
|
||||
<h1>Concurrency</h1>
|
||||
<p>
|
||||
A database may only be opened by one process at a time.
|
||||
The <code>leveldb</code> implementation acquires a lock from the
|
||||
operating system to prevent misuse. Within a single process, the
|
||||
same <code>leveldb::DB</code> object may be safely shared by multiple
|
||||
concurrent threads. I.e., different threads may write into or fetch
|
||||
iterators or call <code>Get</code> on the same database without any
|
||||
external synchronization (the leveldb implementation will
|
||||
automatically do the required synchronization). However other objects
|
||||
(like Iterator and WriteBatch) may require external synchronization.
|
||||
If two threads share such an object, they must protect access to it
|
||||
using their own locking protocol. More details are available in
|
||||
the public header files.
|
||||
<p>
|
||||
<h1>Iteration</h1>
|
||||
<p>
|
||||
The following example demonstrates how to print all key,value pairs
|
||||
in a database.
|
||||
<p>
|
||||
<pre>
|
||||
leveldb::Iterator* it = db->NewIterator(leveldb::ReadOptions());
|
||||
for (it->SeekToFirst(); it->Valid(); it->Next()) {
|
||||
cout << it->key().ToString() << ": " << it->value().ToString() << endl;
|
||||
}
|
||||
assert(it->status().ok()); // Check for any errors found during the scan
|
||||
delete it;
|
||||
</pre>
|
||||
The following variation shows how to process just the keys in the
|
||||
range <code>[start,limit)</code>:
|
||||
<p>
|
||||
<pre>
|
||||
for (it->Seek(start);
|
||||
it->Valid() && it->key().ToString() < limit;
|
||||
it->Next()) {
|
||||
...
|
||||
}
|
||||
</pre>
|
||||
You can also process entries in reverse order. (Caveat: reverse
|
||||
iteration may be somewhat slower than forward iteration.)
|
||||
<p>
|
||||
<pre>
|
||||
for (it->SeekToLast(); it->Valid(); it->Prev()) {
|
||||
...
|
||||
}
|
||||
</pre>
|
||||
<h1>Snapshots</h1>
|
||||
<p>
|
||||
Snapshots provide consistent read-only views over the entire state of
|
||||
the key-value store. <code>ReadOptions::snapshot</code> may be non-NULL to indicate
|
||||
that a read should operate on a particular version of the DB state.
|
||||
If <code>ReadOptions::snapshot</code> is NULL, the read will operate on an
|
||||
implicit snapshot of the current state.
|
||||
<p>
|
||||
Snapshots are created by the DB::GetSnapshot() method:
|
||||
<p>
|
||||
<pre>
|
||||
leveldb::ReadOptions options;
|
||||
options.snapshot = db->GetSnapshot();
|
||||
... apply some updates to db ...
|
||||
leveldb::Iterator* iter = db->NewIterator(options);
|
||||
... read using iter to view the state when the snapshot was created ...
|
||||
delete iter;
|
||||
db->ReleaseSnapshot(options.snapshot);
|
||||
</pre>
|
||||
Note that when a snapshot is no longer needed, it should be released
|
||||
using the DB::ReleaseSnapshot interface. This allows the
|
||||
implementation to get rid of state that was being maintained just to
|
||||
support reading as of that snapshot.
|
||||
<h1>Slice</h1>
|
||||
<p>
|
||||
The return value of the <code>it->key()</code> and <code>it->value()</code> calls above
|
||||
are instances of the <code>leveldb::Slice</code> type. <code>Slice</code> is a simple
|
||||
structure that contains a length and a pointer to an external byte
|
||||
array. Returning a <code>Slice</code> is a cheaper alternative to returning a
|
||||
<code>std::string</code> since we do not need to copy potentially large keys and
|
||||
values. In addition, <code>leveldb</code> methods do not return null-terminated
|
||||
C-style strings since <code>leveldb</code> keys and values are allowed to
|
||||
contain '\0' bytes.
|
||||
<p>
|
||||
C++ strings and null-terminated C-style strings can be easily converted
|
||||
to a Slice:
|
||||
<p>
|
||||
<pre>
|
||||
leveldb::Slice s1 = "hello";
|
||||
|
||||
std::string str("world");
|
||||
leveldb::Slice s2 = str;
|
||||
</pre>
|
||||
A Slice can be easily converted back to a C++ string:
|
||||
<pre>
|
||||
std::string str = s1.ToString();
|
||||
assert(str == std::string("hello"));
|
||||
</pre>
|
||||
Be careful when using Slices since it is up to the caller to ensure that
|
||||
the external byte array into which the Slice points remains live while
|
||||
the Slice is in use. For example, the following is buggy:
|
||||
<p>
|
||||
<pre>
|
||||
leveldb::Slice slice;
|
||||
if (...) {
|
||||
std::string str = ...;
|
||||
slice = str;
|
||||
}
|
||||
Use(slice);
|
||||
</pre>
|
||||
When the <code>if</code> statement goes out of scope, <code>str</code> will be destroyed and the
|
||||
backing storage for <code>slice</code> will disappear.
|
||||
<p>
|
||||
<h1>Comparators</h1>
|
||||
<p>
|
||||
The preceding examples used the default ordering function for key,
|
||||
which orders bytes lexicographically. You can however supply a custom
|
||||
comparator when opening a database. For example, suppose each
|
||||
database key consists of two numbers and we should sort by the first
|
||||
number, breaking ties by the second number. First, define a proper
|
||||
subclass of <code>leveldb::Comparator</code> that expresses these rules:
|
||||
<p>
|
||||
<pre>
|
||||
class TwoPartComparator : public leveldb::Comparator {
|
||||
public:
|
||||
// Three-way comparison function:
|
||||
// if a < b: negative result
|
||||
// if a > b: positive result
|
||||
// else: zero result
|
||||
int Compare(const leveldb::Slice& a, const leveldb::Slice& b) const {
|
||||
int a1, a2, b1, b2;
|
||||
ParseKey(a, &a1, &a2);
|
||||
ParseKey(b, &b1, &b2);
|
||||
if (a1 < b1) return -1;
|
||||
if (a1 > b1) return +1;
|
||||
if (a2 < b2) return -1;
|
||||
if (a2 > b2) return +1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Ignore the following methods for now:
|
||||
const char* Name() const { return "TwoPartComparator"; }
|
||||
void FindShortestSeparator(std::string*, const leveldb::Slice&) const { }
|
||||
void FindShortSuccessor(std::string*) const { }
|
||||
};
|
||||
</pre>
|
||||
Now create a database using this custom comparator:
|
||||
<p>
|
||||
<pre>
|
||||
TwoPartComparator cmp;
|
||||
leveldb::DB* db;
|
||||
leveldb::Options options;
|
||||
options.create_if_missing = true;
|
||||
options.comparator = &cmp;
|
||||
leveldb::Status status = leveldb::DB::Open(options, "/tmp/testdb", &db);
|
||||
...
|
||||
</pre>
|
||||
<h2>Backwards compatibility</h2>
|
||||
<p>
|
||||
The result of the comparator's <code>Name</code> method is attached to the
|
||||
database when it is created, and is checked on every subsequent
|
||||
database open. If the name changes, the <code>leveldb::DB::Open</code> call will
|
||||
fail. Therefore, change the name if and only if the new key format
|
||||
and comparison function are incompatible with existing databases, and
|
||||
it is ok to discard the contents of all existing databases.
|
||||
<p>
|
||||
You can however still gradually evolve your key format over time with
|
||||
a little bit of pre-planning. For example, you could store a version
|
||||
number at the end of each key (one byte should suffice for most uses).
|
||||
When you wish to switch to a new key format (e.g., adding an optional
|
||||
third part to the keys processed by <code>TwoPartComparator</code>),
|
||||
(a) keep the same comparator name (b) increment the version number
|
||||
for new keys (c) change the comparator function so it uses the
|
||||
version numbers found in the keys to decide how to interpret them.
|
||||
<p>
|
||||
<h1>Performance</h1>
|
||||
<p>
|
||||
Performance can be tuned by changing the default values of the
|
||||
types defined in <code>include/leveldb/options.h</code>.
|
||||
|
||||
<p>
|
||||
<h2>Block size</h2>
|
||||
<p>
|
||||
<code>leveldb</code> groups adjacent keys together into the same block and such a
|
||||
block is the unit of transfer to and from persistent storage. The
|
||||
default block size is approximately 4096 uncompressed bytes.
|
||||
Applications that mostly do bulk scans over the contents of the
|
||||
database may wish to increase this size. Applications that do a lot
|
||||
of point reads of small values may wish to switch to a smaller block
|
||||
size if performance measurements indicate an improvement. There isn't
|
||||
much benefit in using blocks smaller than one kilobyte, or larger than
|
||||
a few megabytes. Also note that compression will be more effective
|
||||
with larger block sizes.
|
||||
<p>
|
||||
<h2>Compression</h2>
|
||||
<p>
|
||||
Each block is individually compressed before being written to
|
||||
persistent storage. Compression is on by default since the default
|
||||
compression method is very fast, and is automatically disabled for
|
||||
uncompressible data. In rare cases, applications may want to disable
|
||||
compression entirely, but should only do so if benchmarks show a
|
||||
performance improvement:
|
||||
<p>
|
||||
<pre>
|
||||
leveldb::Options options;
|
||||
options.compression = leveldb::kNoCompression;
|
||||
... leveldb::DB::Open(options, name, ...) ....
|
||||
</pre>
|
||||
<h2>Cache</h2>
|
||||
<p>
|
||||
The contents of the database are stored in a set of files in the
|
||||
filesystem and each file stores a sequence of compressed blocks. If
|
||||
<code>options.cache</code> is non-NULL, it is used to cache frequently used
|
||||
uncompressed block contents.
|
||||
<p>
|
||||
<pre>
|
||||
#include "leveldb/cache.h"
|
||||
|
||||
leveldb::Options options;
|
||||
options.cache = leveldb::NewLRUCache(100 * 1048576); // 100MB cache
|
||||
leveldb::DB* db;
|
||||
leveldb::DB::Open(options, name, &db);
|
||||
... use the db ...
|
||||
delete db
|
||||
delete options.cache;
|
||||
</pre>
|
||||
Note that the cache holds uncompressed data, and therefore it should
|
||||
be sized according to application level data sizes, without any
|
||||
reduction from compression. (Caching of compressed blocks is left to
|
||||
the operating system buffer cache, or any custom <code>Env</code>
|
||||
implementation provided by the client.)
|
||||
<p>
|
||||
When performing a bulk read, the application may wish to disable
|
||||
caching so that the data processed by the bulk read does not end up
|
||||
displacing most of the cached contents. A per-iterator option can be
|
||||
used to achieve this:
|
||||
<p>
|
||||
<pre>
|
||||
leveldb::ReadOptions options;
|
||||
options.fill_cache = false;
|
||||
leveldb::Iterator* it = db->NewIterator(options);
|
||||
for (it->SeekToFirst(); it->Valid(); it->Next()) {
|
||||
...
|
||||
}
|
||||
</pre>
|
||||
<h2>Key Layout</h2>
|
||||
<p>
|
||||
Note that the unit of disk transfer and caching is a block. Adjacent
|
||||
keys (according to the database sort order) will usually be placed in
|
||||
the same block. Therefore the application can improve its performance
|
||||
by placing keys that are accessed together near each other and placing
|
||||
infrequently used keys in a separate region of the key space.
|
||||
<p>
|
||||
For example, suppose we are implementing a simple file system on top
|
||||
of <code>leveldb</code>. The types of entries we might wish to store are:
|
||||
<p>
|
||||
<pre>
|
||||
filename -> permission-bits, length, list of file_block_ids
|
||||
file_block_id -> data
|
||||
</pre>
|
||||
We might want to prefix <code>filename</code> keys with one letter (say '/') and the
|
||||
<code>file_block_id</code> keys with a different letter (say '0') so that scans
|
||||
over just the metadata do not force us to fetch and cache bulky file
|
||||
contents.
|
||||
<p>
|
||||
<h2>Filters</h2>
|
||||
<p>
|
||||
Because of the way <code>leveldb</code> data is organized on disk,
|
||||
a single <code>Get()</code> call may involve multiple reads from disk.
|
||||
The optional <code>FilterPolicy</code> mechanism can be used to reduce
|
||||
the number of disk reads substantially.
|
||||
<pre>
|
||||
leveldb::Options options;
|
||||
options.filter_policy = NewBloomFilterPolicy(10);
|
||||
leveldb::DB* db;
|
||||
leveldb::DB::Open(options, "/tmp/testdb", &db);
|
||||
... use the database ...
|
||||
delete db;
|
||||
delete options.filter_policy;
|
||||
</pre>
|
||||
The preceding code associates a
|
||||
<a href="http://en.wikipedia.org/wiki/Bloom_filter">Bloom filter</a>
|
||||
based filtering policy with the database. Bloom filter based
|
||||
filtering relies on keeping some number of bits of data in memory per
|
||||
key (in this case 10 bits per key since that is the argument we passed
|
||||
to NewBloomFilterPolicy). This filter will reduce the number of unnecessary
|
||||
disk reads needed for <code>Get()</code> calls by a factor of
|
||||
approximately a 100. Increasing the bits per key will lead to a
|
||||
larger reduction at the cost of more memory usage. We recommend that
|
||||
applications whose working set does not fit in memory and that do a
|
||||
lot of random reads set a filter policy.
|
||||
<p>
|
||||
If you are using a custom comparator, you should ensure that the filter
|
||||
policy you are using is compatible with your comparator. For example,
|
||||
consider a comparator that ignores trailing spaces when comparing keys.
|
||||
<code>NewBloomFilterPolicy</code> must not be used with such a comparator.
|
||||
Instead, the application should provide a custom filter policy that
|
||||
also ignores trailing spaces. For example:
|
||||
<pre>
|
||||
class CustomFilterPolicy : public leveldb::FilterPolicy {
|
||||
private:
|
||||
FilterPolicy* builtin_policy_;
|
||||
public:
|
||||
CustomFilterPolicy() : builtin_policy_(NewBloomFilterPolicy(10)) { }
|
||||
~CustomFilterPolicy() { delete builtin_policy_; }
|
||||
|
||||
const char* Name() const { return "IgnoreTrailingSpacesFilter"; }
|
||||
|
||||
void CreateFilter(const Slice* keys, int n, std::string* dst) const {
|
||||
// Use builtin bloom filter code after removing trailing spaces
|
||||
std::vector<Slice> trimmed(n);
|
||||
for (int i = 0; i < n; i++) {
|
||||
trimmed[i] = RemoveTrailingSpaces(keys[i]);
|
||||
}
|
||||
return builtin_policy_->CreateFilter(&trimmed[i], n, dst);
|
||||
}
|
||||
|
||||
bool KeyMayMatch(const Slice& key, const Slice& filter) const {
|
||||
// Use builtin bloom filter code after removing trailing spaces
|
||||
return builtin_policy_->KeyMayMatch(RemoveTrailingSpaces(key), filter);
|
||||
}
|
||||
};
|
||||
</pre>
|
||||
<p>
|
||||
Advanced applications may provide a filter policy that does not use
|
||||
a bloom filter but uses some other mechanism for summarizing a set
|
||||
of keys. See <code>leveldb/filter_policy.h</code> for detail.
|
||||
<p>
|
||||
<h1>Checksums</h1>
|
||||
<p>
|
||||
<code>leveldb</code> associates checksums with all data it stores in the file system.
|
||||
There are two separate controls provided over how aggressively these
|
||||
checksums are verified:
|
||||
<p>
|
||||
<ul>
|
||||
<li> <code>ReadOptions::verify_checksums</code> may be set to true to force
|
||||
checksum verification of all data that is read from the file system on
|
||||
behalf of a particular read. By default, no such verification is
|
||||
done.
|
||||
<p>
|
||||
<li> <code>Options::paranoid_checks</code> may be set to true before opening a
|
||||
database to make the database implementation raise an error as soon as
|
||||
it detects an internal corruption. Depending on which portion of the
|
||||
database has been corrupted, the error may be raised when the database
|
||||
is opened, or later by another database operation. By default,
|
||||
paranoid checking is off so that the database can be used even if
|
||||
parts of its persistent storage have been corrupted.
|
||||
<p>
|
||||
If a database is corrupted (perhaps it cannot be opened when
|
||||
paranoid checking is turned on), the <code>leveldb::RepairDB</code> function
|
||||
may be used to recover as much of the data as possible
|
||||
<p>
|
||||
</ul>
|
||||
<h1>Approximate Sizes</h1>
|
||||
<p>
|
||||
The <code>GetApproximateSizes</code> method can used to get the approximate
|
||||
number of bytes of file system space used by one or more key ranges.
|
||||
<p>
|
||||
<pre>
|
||||
leveldb::Range ranges[2];
|
||||
ranges[0] = leveldb::Range("a", "c");
|
||||
ranges[1] = leveldb::Range("x", "z");
|
||||
uint64_t sizes[2];
|
||||
leveldb::Status s = db->GetApproximateSizes(ranges, 2, sizes);
|
||||
</pre>
|
||||
The preceding call will set <code>sizes[0]</code> to the approximate number of
|
||||
bytes of file system space used by the key range <code>[a..c)</code> and
|
||||
<code>sizes[1]</code> to the approximate number of bytes used by the key range
|
||||
<code>[x..z)</code>.
|
||||
<p>
|
||||
<h1>Environment</h1>
|
||||
<p>
|
||||
All file operations (and other operating system calls) issued by the
|
||||
<code>leveldb</code> implementation are routed through a <code>leveldb::Env</code> object.
|
||||
Sophisticated clients may wish to provide their own <code>Env</code>
|
||||
implementation to get better control. For example, an application may
|
||||
introduce artificial delays in the file IO paths to limit the impact
|
||||
of <code>leveldb</code> on other activities in the system.
|
||||
<p>
|
||||
<pre>
|
||||
class SlowEnv : public leveldb::Env {
|
||||
.. implementation of the Env interface ...
|
||||
};
|
||||
|
||||
SlowEnv env;
|
||||
leveldb::Options options;
|
||||
options.env = &env;
|
||||
Status s = leveldb::DB::Open(options, ...);
|
||||
</pre>
|
||||
<h1>Porting</h1>
|
||||
<p>
|
||||
<code>leveldb</code> may be ported to a new platform by providing platform
|
||||
specific implementations of the types/methods/functions exported by
|
||||
<code>leveldb/port/port.h</code>. See <code>leveldb/port/port_example.h</code> for more
|
||||
details.
|
||||
<p>
|
||||
In addition, the new platform may need a new default <code>leveldb::Env</code>
|
||||
implementation. See <code>leveldb/util/env_posix.h</code> for an example.
|
||||
|
||||
<h1>Other Information</h1>
|
||||
|
||||
<p>
|
||||
Details about the <code>leveldb</code> implementation may be found in
|
||||
the following documents:
|
||||
<ul>
|
||||
<li> <a href="impl.html">Implementation notes</a>
|
||||
<li> <a href="table_format.txt">Format of an immutable Table file</a>
|
||||
<li> <a href="log_format.txt">Format of a log file</a>
|
||||
</ul>
|
||||
|
||||
</body>
|
||||
</html>
|
523
src/leveldb/doc/index.md
Normal file
523
src/leveldb/doc/index.md
Normal file
@ -0,0 +1,523 @@
|
||||
leveldb
|
||||
=======
|
||||
|
||||
_Jeff Dean, Sanjay Ghemawat_
|
||||
|
||||
The leveldb library provides a persistent key value store. Keys and values are
|
||||
arbitrary byte arrays. The keys are ordered within the key value store
|
||||
according to a user-specified comparator function.
|
||||
|
||||
## Opening A Database
|
||||
|
||||
A leveldb database has a name which corresponds to a file system directory. All
|
||||
of the contents of database are stored in this directory. The following example
|
||||
shows how to open a database, creating it if necessary:
|
||||
|
||||
```c++
|
||||
#include <cassert>
|
||||
#include "leveldb/db.h"
|
||||
|
||||
leveldb::DB* db;
|
||||
leveldb::Options options;
|
||||
options.create_if_missing = true;
|
||||
leveldb::Status status = leveldb::DB::Open(options, "/tmp/testdb", &db);
|
||||
assert(status.ok());
|
||||
...
|
||||
```
|
||||
|
||||
If you want to raise an error if the database already exists, add the following
|
||||
line before the `leveldb::DB::Open` call:
|
||||
|
||||
```c++
|
||||
options.error_if_exists = true;
|
||||
```
|
||||
|
||||
## Status
|
||||
|
||||
You may have noticed the `leveldb::Status` type above. Values of this type are
|
||||
returned by most functions in leveldb that may encounter an error. You can check
|
||||
if such a result is ok, and also print an associated error message:
|
||||
|
||||
```c++
|
||||
leveldb::Status s = ...;
|
||||
if (!s.ok()) cerr << s.ToString() << endl;
|
||||
```
|
||||
|
||||
## Closing A Database
|
||||
|
||||
When you are done with a database, just delete the database object. Example:
|
||||
|
||||
```c++
|
||||
... open the db as described above ...
|
||||
... do something with db ...
|
||||
delete db;
|
||||
```
|
||||
|
||||
## Reads And Writes
|
||||
|
||||
The database provides Put, Delete, and Get methods to modify/query the database.
|
||||
For example, the following code moves the value stored under key1 to key2.
|
||||
|
||||
```c++
|
||||
std::string value;
|
||||
leveldb::Status s = db->Get(leveldb::ReadOptions(), key1, &value);
|
||||
if (s.ok()) s = db->Put(leveldb::WriteOptions(), key2, value);
|
||||
if (s.ok()) s = db->Delete(leveldb::WriteOptions(), key1);
|
||||
```
|
||||
|
||||
## Atomic Updates
|
||||
|
||||
Note that if the process dies after the Put of key2 but before the delete of
|
||||
key1, the same value may be left stored under multiple keys. Such problems can
|
||||
be avoided by using the `WriteBatch` class to atomically apply a set of updates:
|
||||
|
||||
```c++
|
||||
#include "leveldb/write_batch.h"
|
||||
...
|
||||
std::string value;
|
||||
leveldb::Status s = db->Get(leveldb::ReadOptions(), key1, &value);
|
||||
if (s.ok()) {
|
||||
leveldb::WriteBatch batch;
|
||||
batch.Delete(key1);
|
||||
batch.Put(key2, value);
|
||||
s = db->Write(leveldb::WriteOptions(), &batch);
|
||||
}
|
||||
```
|
||||
|
||||
The `WriteBatch` holds a sequence of edits to be made to the database, and these
|
||||
edits within the batch are applied in order. Note that we called Delete before
|
||||
Put so that if key1 is identical to key2, we do not end up erroneously dropping
|
||||
the value entirely.
|
||||
|
||||
Apart from its atomicity benefits, `WriteBatch` may also be used to speed up
|
||||
bulk updates by placing lots of individual mutations into the same batch.
|
||||
|
||||
## Synchronous Writes
|
||||
|
||||
By default, each write to leveldb is asynchronous: it returns after pushing the
|
||||
write from the process into the operating system. The transfer from operating
|
||||
system memory to the underlying persistent storage happens asynchronously. The
|
||||
sync flag can be turned on for a particular write to make the write operation
|
||||
not return until the data being written has been pushed all the way to
|
||||
persistent storage. (On Posix systems, this is implemented by calling either
|
||||
`fsync(...)` or `fdatasync(...)` or `msync(..., MS_SYNC)` before the write
|
||||
operation returns.)
|
||||
|
||||
```c++
|
||||
leveldb::WriteOptions write_options;
|
||||
write_options.sync = true;
|
||||
db->Put(write_options, ...);
|
||||
```
|
||||
|
||||
Asynchronous writes are often more than a thousand times as fast as synchronous
|
||||
writes. The downside of asynchronous writes is that a crash of the machine may
|
||||
cause the last few updates to be lost. Note that a crash of just the writing
|
||||
process (i.e., not a reboot) will not cause any loss since even when sync is
|
||||
false, an update is pushed from the process memory into the operating system
|
||||
before it is considered done.
|
||||
|
||||
Asynchronous writes can often be used safely. For example, when loading a large
|
||||
amount of data into the database you can handle lost updates by restarting the
|
||||
bulk load after a crash. A hybrid scheme is also possible where every Nth write
|
||||
is synchronous, and in the event of a crash, the bulk load is restarted just
|
||||
after the last synchronous write finished by the previous run. (The synchronous
|
||||
write can update a marker that describes where to restart on a crash.)
|
||||
|
||||
`WriteBatch` provides an alternative to asynchronous writes. Multiple updates
|
||||
may be placed in the same WriteBatch and applied together using a synchronous
|
||||
write (i.e., `write_options.sync` is set to true). The extra cost of the
|
||||
synchronous write will be amortized across all of the writes in the batch.
|
||||
|
||||
## Concurrency
|
||||
|
||||
A database may only be opened by one process at a time. The leveldb
|
||||
implementation acquires a lock from the operating system to prevent misuse.
|
||||
Within a single process, the same `leveldb::DB` object may be safely shared by
|
||||
multiple concurrent threads. I.e., different threads may write into or fetch
|
||||
iterators or call Get on the same database without any external synchronization
|
||||
(the leveldb implementation will automatically do the required synchronization).
|
||||
However other objects (like Iterator and `WriteBatch`) may require external
|
||||
synchronization. If two threads share such an object, they must protect access
|
||||
to it using their own locking protocol. More details are available in the public
|
||||
header files.
|
||||
|
||||
## Iteration
|
||||
|
||||
The following example demonstrates how to print all key,value pairs in a
|
||||
database.
|
||||
|
||||
```c++
|
||||
leveldb::Iterator* it = db->NewIterator(leveldb::ReadOptions());
|
||||
for (it->SeekToFirst(); it->Valid(); it->Next()) {
|
||||
cout << it->key().ToString() << ": " << it->value().ToString() << endl;
|
||||
}
|
||||
assert(it->status().ok()); // Check for any errors found during the scan
|
||||
delete it;
|
||||
```
|
||||
|
||||
The following variation shows how to process just the keys in the range
|
||||
[start,limit):
|
||||
|
||||
```c++
|
||||
for (it->Seek(start);
|
||||
it->Valid() && it->key().ToString() < limit;
|
||||
it->Next()) {
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
You can also process entries in reverse order. (Caveat: reverse iteration may be
|
||||
somewhat slower than forward iteration.)
|
||||
|
||||
```c++
|
||||
for (it->SeekToLast(); it->Valid(); it->Prev()) {
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
## Snapshots
|
||||
|
||||
Snapshots provide consistent read-only views over the entire state of the
|
||||
key-value store. `ReadOptions::snapshot` may be non-NULL to indicate that a
|
||||
read should operate on a particular version of the DB state. If
|
||||
`ReadOptions::snapshot` is NULL, the read will operate on an implicit snapshot
|
||||
of the current state.
|
||||
|
||||
Snapshots are created by the `DB::GetSnapshot()` method:
|
||||
|
||||
```c++
|
||||
leveldb::ReadOptions options;
|
||||
options.snapshot = db->GetSnapshot();
|
||||
... apply some updates to db ...
|
||||
leveldb::Iterator* iter = db->NewIterator(options);
|
||||
... read using iter to view the state when the snapshot was created ...
|
||||
delete iter;
|
||||
db->ReleaseSnapshot(options.snapshot);
|
||||
```
|
||||
|
||||
Note that when a snapshot is no longer needed, it should be released using the
|
||||
`DB::ReleaseSnapshot` interface. This allows the implementation to get rid of
|
||||
state that was being maintained just to support reading as of that snapshot.
|
||||
|
||||
## Slice
|
||||
|
||||
The return value of the `it->key()` and `it->value()` calls above are instances
|
||||
of the `leveldb::Slice` type. Slice is a simple structure that contains a length
|
||||
and a pointer to an external byte array. Returning a Slice is a cheaper
|
||||
alternative to returning a `std::string` since we do not need to copy
|
||||
potentially large keys and values. In addition, leveldb methods do not return
|
||||
null-terminated C-style strings since leveldb keys and values are allowed to
|
||||
contain `'\0'` bytes.
|
||||
|
||||
C++ strings and null-terminated C-style strings can be easily converted to a
|
||||
Slice:
|
||||
|
||||
```c++
|
||||
leveldb::Slice s1 = "hello";
|
||||
|
||||
std::string str("world");
|
||||
leveldb::Slice s2 = str;
|
||||
```
|
||||
|
||||
A Slice can be easily converted back to a C++ string:
|
||||
|
||||
```c++
|
||||
std::string str = s1.ToString();
|
||||
assert(str == std::string("hello"));
|
||||
```
|
||||
|
||||
Be careful when using Slices since it is up to the caller to ensure that the
|
||||
external byte array into which the Slice points remains live while the Slice is
|
||||
in use. For example, the following is buggy:
|
||||
|
||||
```c++
|
||||
leveldb::Slice slice;
|
||||
if (...) {
|
||||
std::string str = ...;
|
||||
slice = str;
|
||||
}
|
||||
Use(slice);
|
||||
```
|
||||
|
||||
When the if statement goes out of scope, str will be destroyed and the backing
|
||||
storage for slice will disappear.
|
||||
|
||||
## Comparators
|
||||
|
||||
The preceding examples used the default ordering function for key, which orders
|
||||
bytes lexicographically. You can however supply a custom comparator when opening
|
||||
a database. For example, suppose each database key consists of two numbers and
|
||||
we should sort by the first number, breaking ties by the second number. First,
|
||||
define a proper subclass of `leveldb::Comparator` that expresses these rules:
|
||||
|
||||
```c++
|
||||
class TwoPartComparator : public leveldb::Comparator {
|
||||
public:
|
||||
// Three-way comparison function:
|
||||
// if a < b: negative result
|
||||
// if a > b: positive result
|
||||
// else: zero result
|
||||
int Compare(const leveldb::Slice& a, const leveldb::Slice& b) const {
|
||||
int a1, a2, b1, b2;
|
||||
ParseKey(a, &a1, &a2);
|
||||
ParseKey(b, &b1, &b2);
|
||||
if (a1 < b1) return -1;
|
||||
if (a1 > b1) return +1;
|
||||
if (a2 < b2) return -1;
|
||||
if (a2 > b2) return +1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Ignore the following methods for now:
|
||||
const char* Name() const { return "TwoPartComparator"; }
|
||||
void FindShortestSeparator(std::string*, const leveldb::Slice&) const {}
|
||||
void FindShortSuccessor(std::string*) const {}
|
||||
};
|
||||
```
|
||||
|
||||
Now create a database using this custom comparator:
|
||||
|
||||
```c++
|
||||
TwoPartComparator cmp;
|
||||
leveldb::DB* db;
|
||||
leveldb::Options options;
|
||||
options.create_if_missing = true;
|
||||
options.comparator = &cmp;
|
||||
leveldb::Status status = leveldb::DB::Open(options, "/tmp/testdb", &db);
|
||||
...
|
||||
```
|
||||
|
||||
### Backwards compatibility
|
||||
|
||||
The result of the comparator's Name method is attached to the database when it
|
||||
is created, and is checked on every subsequent database open. If the name
|
||||
changes, the `leveldb::DB::Open` call will fail. Therefore, change the name if
|
||||
and only if the new key format and comparison function are incompatible with
|
||||
existing databases, and it is ok to discard the contents of all existing
|
||||
databases.
|
||||
|
||||
You can however still gradually evolve your key format over time with a little
|
||||
bit of pre-planning. For example, you could store a version number at the end of
|
||||
each key (one byte should suffice for most uses). When you wish to switch to a
|
||||
new key format (e.g., adding an optional third part to the keys processed by
|
||||
`TwoPartComparator`), (a) keep the same comparator name (b) increment the
|
||||
version number for new keys (c) change the comparator function so it uses the
|
||||
version numbers found in the keys to decide how to interpret them.
|
||||
|
||||
## Performance
|
||||
|
||||
Performance can be tuned by changing the default values of the types defined in
|
||||
`include/leveldb/options.h`.
|
||||
|
||||
### Block size
|
||||
|
||||
leveldb groups adjacent keys together into the same block and such a block is
|
||||
the unit of transfer to and from persistent storage. The default block size is
|
||||
approximately 4096 uncompressed bytes. Applications that mostly do bulk scans
|
||||
over the contents of the database may wish to increase this size. Applications
|
||||
that do a lot of point reads of small values may wish to switch to a smaller
|
||||
block size if performance measurements indicate an improvement. There isn't much
|
||||
benefit in using blocks smaller than one kilobyte, or larger than a few
|
||||
megabytes. Also note that compression will be more effective with larger block
|
||||
sizes.
|
||||
|
||||
### Compression
|
||||
|
||||
Each block is individually compressed before being written to persistent
|
||||
storage. Compression is on by default since the default compression method is
|
||||
very fast, and is automatically disabled for uncompressible data. In rare cases,
|
||||
applications may want to disable compression entirely, but should only do so if
|
||||
benchmarks show a performance improvement:
|
||||
|
||||
```c++
|
||||
leveldb::Options options;
|
||||
options.compression = leveldb::kNoCompression;
|
||||
... leveldb::DB::Open(options, name, ...) ....
|
||||
```
|
||||
|
||||
### Cache
|
||||
|
||||
The contents of the database are stored in a set of files in the filesystem and
|
||||
each file stores a sequence of compressed blocks. If options.cache is non-NULL,
|
||||
it is used to cache frequently used uncompressed block contents.
|
||||
|
||||
```c++
|
||||
#include "leveldb/cache.h"
|
||||
|
||||
leveldb::Options options;
|
||||
options.cache = leveldb::NewLRUCache(100 * 1048576); // 100MB cache
|
||||
leveldb::DB* db;
|
||||
leveldb::DB::Open(options, name, &db);
|
||||
... use the db ...
|
||||
delete db
|
||||
delete options.cache;
|
||||
```
|
||||
|
||||
Note that the cache holds uncompressed data, and therefore it should be sized
|
||||
according to application level data sizes, without any reduction from
|
||||
compression. (Caching of compressed blocks is left to the operating system
|
||||
buffer cache, or any custom Env implementation provided by the client.)
|
||||
|
||||
When performing a bulk read, the application may wish to disable caching so that
|
||||
the data processed by the bulk read does not end up displacing most of the
|
||||
cached contents. A per-iterator option can be used to achieve this:
|
||||
|
||||
```c++
|
||||
leveldb::ReadOptions options;
|
||||
options.fill_cache = false;
|
||||
leveldb::Iterator* it = db->NewIterator(options);
|
||||
for (it->SeekToFirst(); it->Valid(); it->Next()) {
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
### Key Layout
|
||||
|
||||
Note that the unit of disk transfer and caching is a block. Adjacent keys
|
||||
(according to the database sort order) will usually be placed in the same block.
|
||||
Therefore the application can improve its performance by placing keys that are
|
||||
accessed together near each other and placing infrequently used keys in a
|
||||
separate region of the key space.
|
||||
|
||||
For example, suppose we are implementing a simple file system on top of leveldb.
|
||||
The types of entries we might wish to store are:
|
||||
|
||||
filename -> permission-bits, length, list of file_block_ids
|
||||
file_block_id -> data
|
||||
|
||||
We might want to prefix filename keys with one letter (say '/') and the
|
||||
`file_block_id` keys with a different letter (say '0') so that scans over just
|
||||
the metadata do not force us to fetch and cache bulky file contents.
|
||||
|
||||
### Filters
|
||||
|
||||
Because of the way leveldb data is organized on disk, a single `Get()` call may
|
||||
involve multiple reads from disk. The optional FilterPolicy mechanism can be
|
||||
used to reduce the number of disk reads substantially.
|
||||
|
||||
```c++
|
||||
leveldb::Options options;
|
||||
options.filter_policy = NewBloomFilterPolicy(10);
|
||||
leveldb::DB* db;
|
||||
leveldb::DB::Open(options, "/tmp/testdb", &db);
|
||||
... use the database ...
|
||||
delete db;
|
||||
delete options.filter_policy;
|
||||
```
|
||||
|
||||
The preceding code associates a Bloom filter based filtering policy with the
|
||||
database. Bloom filter based filtering relies on keeping some number of bits of
|
||||
data in memory per key (in this case 10 bits per key since that is the argument
|
||||
we passed to `NewBloomFilterPolicy`). This filter will reduce the number of
|
||||
unnecessary disk reads needed for Get() calls by a factor of approximately
|
||||
a 100. Increasing the bits per key will lead to a larger reduction at the cost
|
||||
of more memory usage. We recommend that applications whose working set does not
|
||||
fit in memory and that do a lot of random reads set a filter policy.
|
||||
|
||||
If you are using a custom comparator, you should ensure that the filter policy
|
||||
you are using is compatible with your comparator. For example, consider a
|
||||
comparator that ignores trailing spaces when comparing keys.
|
||||
`NewBloomFilterPolicy` must not be used with such a comparator. Instead, the
|
||||
application should provide a custom filter policy that also ignores trailing
|
||||
spaces. For example:
|
||||
|
||||
```c++
|
||||
class CustomFilterPolicy : public leveldb::FilterPolicy {
|
||||
private:
|
||||
FilterPolicy* builtin_policy_;
|
||||
|
||||
public:
|
||||
CustomFilterPolicy() : builtin_policy_(NewBloomFilterPolicy(10)) {}
|
||||
~CustomFilterPolicy() { delete builtin_policy_; }
|
||||
|
||||
const char* Name() const { return "IgnoreTrailingSpacesFilter"; }
|
||||
|
||||
void CreateFilter(const Slice* keys, int n, std::string* dst) const {
|
||||
// Use builtin bloom filter code after removing trailing spaces
|
||||
std::vector<Slice> trimmed(n);
|
||||
for (int i = 0; i < n; i++) {
|
||||
trimmed[i] = RemoveTrailingSpaces(keys[i]);
|
||||
}
|
||||
return builtin_policy_->CreateFilter(&trimmed[i], n, dst);
|
||||
}
|
||||
};
|
||||
```
|
||||
|
||||
Advanced applications may provide a filter policy that does not use a bloom
|
||||
filter but uses some other mechanism for summarizing a set of keys. See
|
||||
`leveldb/filter_policy.h` for detail.
|
||||
|
||||
## Checksums
|
||||
|
||||
leveldb associates checksums with all data it stores in the file system. There
|
||||
are two separate controls provided over how aggressively these checksums are
|
||||
verified:
|
||||
|
||||
`ReadOptions::verify_checksums` may be set to true to force checksum
|
||||
verification of all data that is read from the file system on behalf of a
|
||||
particular read. By default, no such verification is done.
|
||||
|
||||
`Options::paranoid_checks` may be set to true before opening a database to make
|
||||
the database implementation raise an error as soon as it detects an internal
|
||||
corruption. Depending on which portion of the database has been corrupted, the
|
||||
error may be raised when the database is opened, or later by another database
|
||||
operation. By default, paranoid checking is off so that the database can be used
|
||||
even if parts of its persistent storage have been corrupted.
|
||||
|
||||
If a database is corrupted (perhaps it cannot be opened when paranoid checking
|
||||
is turned on), the `leveldb::RepairDB` function may be used to recover as much
|
||||
of the data as possible
|
||||
|
||||
## Approximate Sizes
|
||||
|
||||
The `GetApproximateSizes` method can used to get the approximate number of bytes
|
||||
of file system space used by one or more key ranges.
|
||||
|
||||
```c++
|
||||
leveldb::Range ranges[2];
|
||||
ranges[0] = leveldb::Range("a", "c");
|
||||
ranges[1] = leveldb::Range("x", "z");
|
||||
uint64_t sizes[2];
|
||||
leveldb::Status s = db->GetApproximateSizes(ranges, 2, sizes);
|
||||
```
|
||||
|
||||
The preceding call will set `sizes[0]` to the approximate number of bytes of
|
||||
file system space used by the key range `[a..c)` and `sizes[1]` to the
|
||||
approximate number of bytes used by the key range `[x..z)`.
|
||||
|
||||
## Environment
|
||||
|
||||
All file operations (and other operating system calls) issued by the leveldb
|
||||
implementation are routed through a `leveldb::Env` object. Sophisticated clients
|
||||
may wish to provide their own Env implementation to get better control.
|
||||
For example, an application may introduce artificial delays in the file IO
|
||||
paths to limit the impact of leveldb on other activities in the system.
|
||||
|
||||
```c++
|
||||
class SlowEnv : public leveldb::Env {
|
||||
... implementation of the Env interface ...
|
||||
};
|
||||
|
||||
SlowEnv env;
|
||||
leveldb::Options options;
|
||||
options.env = &env;
|
||||
Status s = leveldb::DB::Open(options, ...);
|
||||
```
|
||||
|
||||
## Porting
|
||||
|
||||
leveldb may be ported to a new platform by providing platform specific
|
||||
implementations of the types/methods/functions exported by
|
||||
`leveldb/port/port.h`. See `leveldb/port/port_example.h` for more details.
|
||||
|
||||
In addition, the new platform may need a new default `leveldb::Env`
|
||||
implementation. See `leveldb/util/env_posix.h` for an example.
|
||||
|
||||
## Other Information
|
||||
|
||||
Details about the leveldb implementation may be found in the following
|
||||
documents:
|
||||
|
||||
1. [Implementation notes](impl.md)
|
||||
2. [Format of an immutable Table file](table_format.md)
|
||||
3. [Format of a log file](log_format.md)
|
75
src/leveldb/doc/log_format.md
Normal file
75
src/leveldb/doc/log_format.md
Normal file
@ -0,0 +1,75 @@
|
||||
leveldb Log format
|
||||
==================
|
||||
The log file contents are a sequence of 32KB blocks. The only exception is that
|
||||
the tail of the file may contain a partial block.
|
||||
|
||||
Each block consists of a sequence of records:
|
||||
|
||||
block := record* trailer?
|
||||
record :=
|
||||
checksum: uint32 // crc32c of type and data[] ; little-endian
|
||||
length: uint16 // little-endian
|
||||
type: uint8 // One of FULL, FIRST, MIDDLE, LAST
|
||||
data: uint8[length]
|
||||
|
||||
A record never starts within the last six bytes of a block (since it won't fit).
|
||||
Any leftover bytes here form the trailer, which must consist entirely of zero
|
||||
bytes and must be skipped by readers.
|
||||
|
||||
Aside: if exactly seven bytes are left in the current block, and a new non-zero
|
||||
length record is added, the writer must emit a FIRST record (which contains zero
|
||||
bytes of user data) to fill up the trailing seven bytes of the block and then
|
||||
emit all of the user data in subsequent blocks.
|
||||
|
||||
More types may be added in the future. Some Readers may skip record types they
|
||||
do not understand, others may report that some data was skipped.
|
||||
|
||||
FULL == 1
|
||||
FIRST == 2
|
||||
MIDDLE == 3
|
||||
LAST == 4
|
||||
|
||||
The FULL record contains the contents of an entire user record.
|
||||
|
||||
FIRST, MIDDLE, LAST are types used for user records that have been split into
|
||||
multiple fragments (typically because of block boundaries). FIRST is the type
|
||||
of the first fragment of a user record, LAST is the type of the last fragment of
|
||||
a user record, and MIDDLE is the type of all interior fragments of a user
|
||||
record.
|
||||
|
||||
Example: consider a sequence of user records:
|
||||
|
||||
A: length 1000
|
||||
B: length 97270
|
||||
C: length 8000
|
||||
|
||||
**A** will be stored as a FULL record in the first block.
|
||||
|
||||
**B** will be split into three fragments: first fragment occupies the rest of
|
||||
the first block, second fragment occupies the entirety of the second block, and
|
||||
the third fragment occupies a prefix of the third block. This will leave six
|
||||
bytes free in the third block, which will be left empty as the trailer.
|
||||
|
||||
**C** will be stored as a FULL record in the fourth block.
|
||||
|
||||
----
|
||||
|
||||
## Some benefits over the recordio format:
|
||||
|
||||
1. We do not need any heuristics for resyncing - just go to next block boundary
|
||||
and scan. If there is a corruption, skip to the next block. As a
|
||||
side-benefit, we do not get confused when part of the contents of one log
|
||||
file are embedded as a record inside another log file.
|
||||
|
||||
2. Splitting at approximate boundaries (e.g., for mapreduce) is simple: find the
|
||||
next block boundary and skip records until we hit a FULL or FIRST record.
|
||||
|
||||
3. We do not need extra buffering for large records.
|
||||
|
||||
## Some downsides compared to recordio format:
|
||||
|
||||
1. No packing of tiny records. This could be fixed by adding a new record type,
|
||||
so it is a shortcoming of the current implementation, not necessarily the
|
||||
format.
|
||||
|
||||
2. No compression. Again, this could be fixed by adding new record types.
|
@ -1,75 +0,0 @@
|
||||
The log file contents are a sequence of 32KB blocks. The only
|
||||
exception is that the tail of the file may contain a partial block.
|
||||
|
||||
Each block consists of a sequence of records:
|
||||
block := record* trailer?
|
||||
record :=
|
||||
checksum: uint32 // crc32c of type and data[] ; little-endian
|
||||
length: uint16 // little-endian
|
||||
type: uint8 // One of FULL, FIRST, MIDDLE, LAST
|
||||
data: uint8[length]
|
||||
|
||||
A record never starts within the last six bytes of a block (since it
|
||||
won't fit). Any leftover bytes here form the trailer, which must
|
||||
consist entirely of zero bytes and must be skipped by readers.
|
||||
|
||||
Aside: if exactly seven bytes are left in the current block, and a new
|
||||
non-zero length record is added, the writer must emit a FIRST record
|
||||
(which contains zero bytes of user data) to fill up the trailing seven
|
||||
bytes of the block and then emit all of the user data in subsequent
|
||||
blocks.
|
||||
|
||||
More types may be added in the future. Some Readers may skip record
|
||||
types they do not understand, others may report that some data was
|
||||
skipped.
|
||||
|
||||
FULL == 1
|
||||
FIRST == 2
|
||||
MIDDLE == 3
|
||||
LAST == 4
|
||||
|
||||
The FULL record contains the contents of an entire user record.
|
||||
|
||||
FIRST, MIDDLE, LAST are types used for user records that have been
|
||||
split into multiple fragments (typically because of block boundaries).
|
||||
FIRST is the type of the first fragment of a user record, LAST is the
|
||||
type of the last fragment of a user record, and MIDDLE is the type of
|
||||
all interior fragments of a user record.
|
||||
|
||||
Example: consider a sequence of user records:
|
||||
A: length 1000
|
||||
B: length 97270
|
||||
C: length 8000
|
||||
A will be stored as a FULL record in the first block.
|
||||
|
||||
B will be split into three fragments: first fragment occupies the rest
|
||||
of the first block, second fragment occupies the entirety of the
|
||||
second block, and the third fragment occupies a prefix of the third
|
||||
block. This will leave six bytes free in the third block, which will
|
||||
be left empty as the trailer.
|
||||
|
||||
C will be stored as a FULL record in the fourth block.
|
||||
|
||||
===================
|
||||
|
||||
Some benefits over the recordio format:
|
||||
|
||||
(1) We do not need any heuristics for resyncing - just go to next
|
||||
block boundary and scan. If there is a corruption, skip to the next
|
||||
block. As a side-benefit, we do not get confused when part of the
|
||||
contents of one log file are embedded as a record inside another log
|
||||
file.
|
||||
|
||||
(2) Splitting at approximate boundaries (e.g., for mapreduce) is
|
||||
simple: find the next block boundary and skip records until we
|
||||
hit a FULL or FIRST record.
|
||||
|
||||
(3) We do not need extra buffering for large records.
|
||||
|
||||
Some downsides compared to recordio format:
|
||||
|
||||
(1) No packing of tiny records. This could be fixed by adding a new
|
||||
record type, so it is a shortcoming of the current implementation,
|
||||
not necessarily the format.
|
||||
|
||||
(2) No compression. Again, this could be fixed by adding new record types.
|
107
src/leveldb/doc/table_format.md
Normal file
107
src/leveldb/doc/table_format.md
Normal file
@ -0,0 +1,107 @@
|
||||
leveldb File format
|
||||
===================
|
||||
|
||||
<beginning_of_file>
|
||||
[data block 1]
|
||||
[data block 2]
|
||||
...
|
||||
[data block N]
|
||||
[meta block 1]
|
||||
...
|
||||
[meta block K]
|
||||
[metaindex block]
|
||||
[index block]
|
||||
[Footer] (fixed size; starts at file_size - sizeof(Footer))
|
||||
<end_of_file>
|
||||
|
||||
The file contains internal pointers. Each such pointer is called
|
||||
a BlockHandle and contains the following information:
|
||||
|
||||
offset: varint64
|
||||
size: varint64
|
||||
|
||||
See [varints](https://developers.google.com/protocol-buffers/docs/encoding#varints)
|
||||
for an explanation of varint64 format.
|
||||
|
||||
1. The sequence of key/value pairs in the file are stored in sorted
|
||||
order and partitioned into a sequence of data blocks. These blocks
|
||||
come one after another at the beginning of the file. Each data block
|
||||
is formatted according to the code in `block_builder.cc`, and then
|
||||
optionally compressed.
|
||||
|
||||
2. After the data blocks we store a bunch of meta blocks. The
|
||||
supported meta block types are described below. More meta block types
|
||||
may be added in the future. Each meta block is again formatted using
|
||||
`block_builder.cc` and then optionally compressed.
|
||||
|
||||
3. A "metaindex" block. It contains one entry for every other meta
|
||||
block where the key is the name of the meta block and the value is a
|
||||
BlockHandle pointing to that meta block.
|
||||
|
||||
4. An "index" block. This block contains one entry per data block,
|
||||
where the key is a string >= last key in that data block and before
|
||||
the first key in the successive data block. The value is the
|
||||
BlockHandle for the data block.
|
||||
|
||||
5. At the very end of the file is a fixed length footer that contains
|
||||
the BlockHandle of the metaindex and index blocks as well as a magic number.
|
||||
|
||||
metaindex_handle: char[p]; // Block handle for metaindex
|
||||
index_handle: char[q]; // Block handle for index
|
||||
padding: char[40-p-q];// zeroed bytes to make fixed length
|
||||
// (40==2*BlockHandle::kMaxEncodedLength)
|
||||
magic: fixed64; // == 0xdb4775248b80fb57 (little-endian)
|
||||
|
||||
## "filter" Meta Block
|
||||
|
||||
If a `FilterPolicy` was specified when the database was opened, a
|
||||
filter block is stored in each table. The "metaindex" block contains
|
||||
an entry that maps from `filter.<N>` to the BlockHandle for the filter
|
||||
block where `<N>` is the string returned by the filter policy's
|
||||
`Name()` method.
|
||||
|
||||
The filter block stores a sequence of filters, where filter i contains
|
||||
the output of `FilterPolicy::CreateFilter()` on all keys that are stored
|
||||
in a block whose file offset falls within the range
|
||||
|
||||
[ i*base ... (i+1)*base-1 ]
|
||||
|
||||
Currently, "base" is 2KB. So for example, if blocks X and Y start in
|
||||
the range `[ 0KB .. 2KB-1 ]`, all of the keys in X and Y will be
|
||||
converted to a filter by calling `FilterPolicy::CreateFilter()`, and the
|
||||
resulting filter will be stored as the first filter in the filter
|
||||
block.
|
||||
|
||||
The filter block is formatted as follows:
|
||||
|
||||
[filter 0]
|
||||
[filter 1]
|
||||
[filter 2]
|
||||
...
|
||||
[filter N-1]
|
||||
|
||||
[offset of filter 0] : 4 bytes
|
||||
[offset of filter 1] : 4 bytes
|
||||
[offset of filter 2] : 4 bytes
|
||||
...
|
||||
[offset of filter N-1] : 4 bytes
|
||||
|
||||
[offset of beginning of offset array] : 4 bytes
|
||||
lg(base) : 1 byte
|
||||
|
||||
The offset array at the end of the filter block allows efficient
|
||||
mapping from a data block offset to the corresponding filter.
|
||||
|
||||
## "stats" Meta Block
|
||||
|
||||
This meta block contains a bunch of stats. The key is the name
|
||||
of the statistic. The value contains the statistic.
|
||||
|
||||
TODO(postrelease): record following stats.
|
||||
|
||||
data size
|
||||
index size
|
||||
key size (uncompressed)
|
||||
value size (uncompressed)
|
||||
number of entries
|
||||
number of data blocks
|
@ -1,104 +0,0 @@
|
||||
File format
|
||||
===========
|
||||
|
||||
<beginning_of_file>
|
||||
[data block 1]
|
||||
[data block 2]
|
||||
...
|
||||
[data block N]
|
||||
[meta block 1]
|
||||
...
|
||||
[meta block K]
|
||||
[metaindex block]
|
||||
[index block]
|
||||
[Footer] (fixed size; starts at file_size - sizeof(Footer))
|
||||
<end_of_file>
|
||||
|
||||
The file contains internal pointers. Each such pointer is called
|
||||
a BlockHandle and contains the following information:
|
||||
offset: varint64
|
||||
size: varint64
|
||||
See https://developers.google.com/protocol-buffers/docs/encoding#varints
|
||||
for an explanation of varint64 format.
|
||||
|
||||
(1) The sequence of key/value pairs in the file are stored in sorted
|
||||
order and partitioned into a sequence of data blocks. These blocks
|
||||
come one after another at the beginning of the file. Each data block
|
||||
is formatted according to the code in block_builder.cc, and then
|
||||
optionally compressed.
|
||||
|
||||
(2) After the data blocks we store a bunch of meta blocks. The
|
||||
supported meta block types are described below. More meta block types
|
||||
may be added in the future. Each meta block is again formatted using
|
||||
block_builder.cc and then optionally compressed.
|
||||
|
||||
(3) A "metaindex" block. It contains one entry for every other meta
|
||||
block where the key is the name of the meta block and the value is a
|
||||
BlockHandle pointing to that meta block.
|
||||
|
||||
(4) An "index" block. This block contains one entry per data block,
|
||||
where the key is a string >= last key in that data block and before
|
||||
the first key in the successive data block. The value is the
|
||||
BlockHandle for the data block.
|
||||
|
||||
(6) At the very end of the file is a fixed length footer that contains
|
||||
the BlockHandle of the metaindex and index blocks as well as a magic number.
|
||||
metaindex_handle: char[p]; // Block handle for metaindex
|
||||
index_handle: char[q]; // Block handle for index
|
||||
padding: char[40-p-q]; // zeroed bytes to make fixed length
|
||||
// (40==2*BlockHandle::kMaxEncodedLength)
|
||||
magic: fixed64; // == 0xdb4775248b80fb57 (little-endian)
|
||||
|
||||
"filter" Meta Block
|
||||
-------------------
|
||||
|
||||
If a "FilterPolicy" was specified when the database was opened, a
|
||||
filter block is stored in each table. The "metaindex" block contains
|
||||
an entry that maps from "filter.<N>" to the BlockHandle for the filter
|
||||
block where "<N>" is the string returned by the filter policy's
|
||||
"Name()" method.
|
||||
|
||||
The filter block stores a sequence of filters, where filter i contains
|
||||
the output of FilterPolicy::CreateFilter() on all keys that are stored
|
||||
in a block whose file offset falls within the range
|
||||
|
||||
[ i*base ... (i+1)*base-1 ]
|
||||
|
||||
Currently, "base" is 2KB. So for example, if blocks X and Y start in
|
||||
the range [ 0KB .. 2KB-1 ], all of the keys in X and Y will be
|
||||
converted to a filter by calling FilterPolicy::CreateFilter(), and the
|
||||
resulting filter will be stored as the first filter in the filter
|
||||
block.
|
||||
|
||||
The filter block is formatted as follows:
|
||||
|
||||
[filter 0]
|
||||
[filter 1]
|
||||
[filter 2]
|
||||
...
|
||||
[filter N-1]
|
||||
|
||||
[offset of filter 0] : 4 bytes
|
||||
[offset of filter 1] : 4 bytes
|
||||
[offset of filter 2] : 4 bytes
|
||||
...
|
||||
[offset of filter N-1] : 4 bytes
|
||||
|
||||
[offset of beginning of offset array] : 4 bytes
|
||||
lg(base) : 1 byte
|
||||
|
||||
The offset array at the end of the filter block allows efficient
|
||||
mapping from a data block offset to the corresponding filter.
|
||||
|
||||
"stats" Meta Block
|
||||
------------------
|
||||
|
||||
This meta block contains a bunch of stats. The key is the name
|
||||
of the statistic. The value contains the statistic.
|
||||
TODO(postrelease): record following stats.
|
||||
data size
|
||||
index size
|
||||
key size (uncompressed)
|
||||
value size (uncompressed)
|
||||
number of entries
|
||||
number of data blocks
|
@ -14,7 +14,7 @@ namespace leveldb {
|
||||
|
||||
// Update Makefile if you change these
|
||||
static const int kMajorVersion = 1;
|
||||
static const int kMinorVersion = 19;
|
||||
static const int kMinorVersion = 20;
|
||||
|
||||
struct Options;
|
||||
struct ReadOptions;
|
||||
|
@ -112,6 +112,18 @@ struct Options {
|
||||
// Default: 16
|
||||
int block_restart_interval;
|
||||
|
||||
// Leveldb will write up to this amount of bytes to a file before
|
||||
// switching to a new one.
|
||||
// Most clients should leave this parameter alone. However if your
|
||||
// filesystem is more efficient with larger files, you could
|
||||
// consider increasing the value. The downside will be longer
|
||||
// compactions and hence longer latency/performance hiccups.
|
||||
// Another reason to increase this parameter might be when you are
|
||||
// initially populating a large database.
|
||||
//
|
||||
// Default: 2MB
|
||||
size_t max_file_size;
|
||||
|
||||
// Compress blocks using the specified compression algorithm. This
|
||||
// parameter can be changed dynamically.
|
||||
//
|
||||
|
@ -129,6 +129,12 @@ extern bool Snappy_Uncompress(const char* input_data, size_t input_length,
|
||||
// The concatenation of all "data[0,n-1]" fragments is the heap profile.
|
||||
extern bool GetHeapProfile(void (*func)(void*, const char*, int), void* arg);
|
||||
|
||||
// Extend the CRC to include the first n bytes of buf.
|
||||
//
|
||||
// Returns zero if the CRC cannot be extended using acceleration, else returns
|
||||
// the newly extended CRC value (which may also be zero).
|
||||
uint32_t AcceleratedCRC32C(uint32_t crc, const char* buf, size_t size);
|
||||
|
||||
} // namespace port
|
||||
} // namespace leveldb
|
||||
|
||||
|
@ -152,6 +152,8 @@ inline bool GetHeapProfile(void (*func)(void*, const char*, int), void* arg) {
|
||||
return false;
|
||||
}
|
||||
|
||||
uint32_t AcceleratedCRC32C(uint32_t crc, const char* buf, size_t size);
|
||||
|
||||
} // namespace port
|
||||
} // namespace leveldb
|
||||
|
||||
|
129
src/leveldb/port/port_posix_sse.cc
Normal file
129
src/leveldb/port/port_posix_sse.cc
Normal file
@ -0,0 +1,129 @@
|
||||
// Copyright 2016 The LevelDB Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||
//
|
||||
// A portable implementation of crc32c, optimized to handle
|
||||
// four bytes at a time.
|
||||
//
|
||||
// In a separate source file to allow this accelerated CRC32C function to be
|
||||
// compiled with the appropriate compiler flags to enable x86 SSE 4.2
|
||||
// instructions.
|
||||
|
||||
#include <stdint.h>
|
||||
#include <string.h>
|
||||
#include "port/port.h"
|
||||
|
||||
#if defined(LEVELDB_PLATFORM_POSIX_SSE)
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
#include <intrin.h>
|
||||
#elif defined(__GNUC__) && defined(__SSE4_2__)
|
||||
#include <nmmintrin.h>
|
||||
#include <cpuid.h>
|
||||
#endif
|
||||
|
||||
#endif // defined(LEVELDB_PLATFORM_POSIX_SSE)
|
||||
|
||||
namespace leveldb {
|
||||
namespace port {
|
||||
|
||||
#if defined(LEVELDB_PLATFORM_POSIX_SSE)
|
||||
|
||||
// Used to fetch a naturally-aligned 32-bit word in little endian byte-order
|
||||
static inline uint32_t LE_LOAD32(const uint8_t *p) {
|
||||
// SSE is x86 only, so ensured that |p| is always little-endian.
|
||||
uint32_t word;
|
||||
memcpy(&word, p, sizeof(word));
|
||||
return word;
|
||||
}
|
||||
|
||||
#if defined(_M_X64) || defined(__x86_64__) // LE_LOAD64 is only used on x64.
|
||||
|
||||
// Used to fetch a naturally-aligned 64-bit word in little endian byte-order
|
||||
static inline uint64_t LE_LOAD64(const uint8_t *p) {
|
||||
uint64_t dword;
|
||||
memcpy(&dword, p, sizeof(dword));
|
||||
return dword;
|
||||
}
|
||||
|
||||
#endif // defined(_M_X64) || defined(__x86_64__)
|
||||
|
||||
static inline bool HaveSSE42() {
|
||||
#if defined(_MSC_VER)
|
||||
int cpu_info[4];
|
||||
__cpuid(cpu_info, 1);
|
||||
return (cpu_info[2] & (1 << 20)) != 0;
|
||||
#elif defined(__GNUC__)
|
||||
unsigned int eax, ebx, ecx, edx;
|
||||
__get_cpuid(1, &eax, &ebx, &ecx, &edx);
|
||||
return (ecx & (1 << 20)) != 0;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif // defined(LEVELDB_PLATFORM_POSIX_SSE)
|
||||
|
||||
// For further improvements see Intel publication at:
|
||||
// http://download.intel.com/design/intarch/papers/323405.pdf
|
||||
uint32_t AcceleratedCRC32C(uint32_t crc, const char* buf, size_t size) {
|
||||
#if !defined(LEVELDB_PLATFORM_POSIX_SSE)
|
||||
return 0;
|
||||
#else
|
||||
static bool have = HaveSSE42();
|
||||
if (!have) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
const uint8_t *p = reinterpret_cast<const uint8_t *>(buf);
|
||||
const uint8_t *e = p + size;
|
||||
uint32_t l = crc ^ 0xffffffffu;
|
||||
|
||||
#define STEP1 do { \
|
||||
l = _mm_crc32_u8(l, *p++); \
|
||||
} while (0)
|
||||
#define STEP4 do { \
|
||||
l = _mm_crc32_u32(l, LE_LOAD32(p)); \
|
||||
p += 4; \
|
||||
} while (0)
|
||||
#define STEP8 do { \
|
||||
l = _mm_crc32_u64(l, LE_LOAD64(p)); \
|
||||
p += 8; \
|
||||
} while (0)
|
||||
|
||||
if (size > 16) {
|
||||
// Process unaligned bytes
|
||||
for (unsigned int i = reinterpret_cast<uintptr_t>(p) % 8; i; --i) {
|
||||
STEP1;
|
||||
}
|
||||
|
||||
// _mm_crc32_u64 is only available on x64.
|
||||
#if defined(_M_X64) || defined(__x86_64__)
|
||||
// Process 8 bytes at a time
|
||||
while ((e-p) >= 8) {
|
||||
STEP8;
|
||||
}
|
||||
// Process 4 bytes at a time
|
||||
if ((e-p) >= 4) {
|
||||
STEP4;
|
||||
}
|
||||
#else // !(defined(_M_X64) || defined(__x86_64__))
|
||||
// Process 4 bytes at a time
|
||||
while ((e-p) >= 4) {
|
||||
STEP4;
|
||||
}
|
||||
#endif // defined(_M_X64) || defined(__x86_64__)
|
||||
}
|
||||
// Process the last few bytes
|
||||
while (p != e) {
|
||||
STEP1;
|
||||
}
|
||||
#undef STEP8
|
||||
#undef STEP4
|
||||
#undef STEP1
|
||||
return l ^ 0xffffffffu;
|
||||
#endif // defined(LEVELDB_PLATFORM_POSIX_SSE)
|
||||
}
|
||||
|
||||
} // namespace port
|
||||
} // namespace leveldb
|
@ -168,6 +168,8 @@ inline bool GetHeapProfile(void (*func)(void*, const char*, int), void* arg) {
|
||||
return false;
|
||||
}
|
||||
|
||||
uint32_t AcceleratedCRC32C(uint32_t crc, const char* buf, size_t size);
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -9,7 +9,7 @@
|
||||
|
||||
namespace leveldb {
|
||||
|
||||
// See doc/table_format.txt for an explanation of the filter block format.
|
||||
// See doc/table_format.md for an explanation of the filter block format.
|
||||
|
||||
// Generate new filter every 2KB of data
|
||||
static const size_t kFilterBaseLg = 11;
|
||||
|
@ -8,6 +8,8 @@
|
||||
#include "util/crc32c.h"
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include "port/port.h"
|
||||
#include "util/coding.h"
|
||||
|
||||
namespace leveldb {
|
||||
@ -283,7 +285,23 @@ static inline uint32_t LE_LOAD32(const uint8_t *p) {
|
||||
return DecodeFixed32(reinterpret_cast<const char*>(p));
|
||||
}
|
||||
|
||||
// Determine if the CPU running this program can accelerate the CRC32C
|
||||
// calculation.
|
||||
static bool CanAccelerateCRC32C() {
|
||||
// port::AcceleretedCRC32C returns zero when unable to accelerate.
|
||||
static const char kTestCRCBuffer[] = "TestCRCBuffer";
|
||||
static const char kBufSize = sizeof(kTestCRCBuffer) - 1;
|
||||
static const uint32_t kTestCRCValue = 0xdcbc59fa;
|
||||
|
||||
return port::AcceleratedCRC32C(0, kTestCRCBuffer, kBufSize) == kTestCRCValue;
|
||||
}
|
||||
|
||||
uint32_t Extend(uint32_t crc, const char* buf, size_t size) {
|
||||
static bool accelerate = CanAccelerateCRC32C();
|
||||
if (accelerate) {
|
||||
return port::AcceleratedCRC32C(crc, buf, size);
|
||||
}
|
||||
|
||||
const uint8_t *p = reinterpret_cast<const uint8_t *>(buf);
|
||||
const uint8_t *e = p + size;
|
||||
uint32_t l = crc ^ 0xffffffffu;
|
||||
|
@ -11,12 +11,14 @@
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/resource.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/time.h>
|
||||
#include <sys/types.h>
|
||||
#include <time.h>
|
||||
#include <unistd.h>
|
||||
#include <deque>
|
||||
#include <limits>
|
||||
#include <set>
|
||||
#include "leveldb/env.h"
|
||||
#include "leveldb/slice.h"
|
||||
@ -24,15 +26,70 @@
|
||||
#include "util/logging.h"
|
||||
#include "util/mutexlock.h"
|
||||
#include "util/posix_logger.h"
|
||||
#include "util/env_posix_test_helper.h"
|
||||
|
||||
namespace leveldb {
|
||||
|
||||
namespace {
|
||||
|
||||
static int open_read_only_file_limit = -1;
|
||||
static int mmap_limit = -1;
|
||||
|
||||
static Status IOError(const std::string& context, int err_number) {
|
||||
return Status::IOError(context, strerror(err_number));
|
||||
}
|
||||
|
||||
// Helper class to limit resource usage to avoid exhaustion.
|
||||
// Currently used to limit read-only file descriptors and mmap file usage
|
||||
// so that we do not end up running out of file descriptors, virtual memory,
|
||||
// or running into kernel performance problems for very large databases.
|
||||
class Limiter {
|
||||
public:
|
||||
// Limit maximum number of resources to |n|.
|
||||
Limiter(intptr_t n) {
|
||||
SetAllowed(n);
|
||||
}
|
||||
|
||||
// If another resource is available, acquire it and return true.
|
||||
// Else return false.
|
||||
bool Acquire() {
|
||||
if (GetAllowed() <= 0) {
|
||||
return false;
|
||||
}
|
||||
MutexLock l(&mu_);
|
||||
intptr_t x = GetAllowed();
|
||||
if (x <= 0) {
|
||||
return false;
|
||||
} else {
|
||||
SetAllowed(x - 1);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// Release a resource acquired by a previous call to Acquire() that returned
|
||||
// true.
|
||||
void Release() {
|
||||
MutexLock l(&mu_);
|
||||
SetAllowed(GetAllowed() + 1);
|
||||
}
|
||||
|
||||
private:
|
||||
port::Mutex mu_;
|
||||
port::AtomicPointer allowed_;
|
||||
|
||||
intptr_t GetAllowed() const {
|
||||
return reinterpret_cast<intptr_t>(allowed_.Acquire_Load());
|
||||
}
|
||||
|
||||
// REQUIRES: mu_ must be held
|
||||
void SetAllowed(intptr_t v) {
|
||||
allowed_.Release_Store(reinterpret_cast<void*>(v));
|
||||
}
|
||||
|
||||
Limiter(const Limiter&);
|
||||
void operator=(const Limiter&);
|
||||
};
|
||||
|
||||
class PosixSequentialFile: public SequentialFile {
|
||||
private:
|
||||
std::string filename_;
|
||||
@ -70,87 +127,65 @@ class PosixSequentialFile: public SequentialFile {
|
||||
class PosixRandomAccessFile: public RandomAccessFile {
|
||||
private:
|
||||
std::string filename_;
|
||||
bool temporary_fd_; // If true, fd_ is -1 and we open on every read.
|
||||
int fd_;
|
||||
Limiter* limiter_;
|
||||
|
||||
public:
|
||||
PosixRandomAccessFile(const std::string& fname, int fd)
|
||||
: filename_(fname), fd_(fd) { }
|
||||
virtual ~PosixRandomAccessFile() { close(fd_); }
|
||||
PosixRandomAccessFile(const std::string& fname, int fd, Limiter* limiter)
|
||||
: filename_(fname), fd_(fd), limiter_(limiter) {
|
||||
temporary_fd_ = !limiter->Acquire();
|
||||
if (temporary_fd_) {
|
||||
// Open file on every access.
|
||||
close(fd_);
|
||||
fd_ = -1;
|
||||
}
|
||||
}
|
||||
|
||||
virtual ~PosixRandomAccessFile() {
|
||||
if (!temporary_fd_) {
|
||||
close(fd_);
|
||||
limiter_->Release();
|
||||
}
|
||||
}
|
||||
|
||||
virtual Status Read(uint64_t offset, size_t n, Slice* result,
|
||||
char* scratch) const {
|
||||
int fd = fd_;
|
||||
if (temporary_fd_) {
|
||||
fd = open(filename_.c_str(), O_RDONLY);
|
||||
if (fd < 0) {
|
||||
return IOError(filename_, errno);
|
||||
}
|
||||
}
|
||||
|
||||
Status s;
|
||||
ssize_t r = pread(fd_, scratch, n, static_cast<off_t>(offset));
|
||||
ssize_t r = pread(fd, scratch, n, static_cast<off_t>(offset));
|
||||
*result = Slice(scratch, (r < 0) ? 0 : r);
|
||||
if (r < 0) {
|
||||
// An error: return a non-ok status
|
||||
s = IOError(filename_, errno);
|
||||
}
|
||||
if (temporary_fd_) {
|
||||
// Close the temporary file descriptor opened earlier.
|
||||
close(fd);
|
||||
}
|
||||
return s;
|
||||
}
|
||||
};
|
||||
|
||||
// Helper class to limit mmap file usage so that we do not end up
|
||||
// running out virtual memory or running into kernel performance
|
||||
// problems for very large databases.
|
||||
class MmapLimiter {
|
||||
public:
|
||||
// Up to 1000 mmaps for 64-bit binaries; none for smaller pointer sizes.
|
||||
MmapLimiter() {
|
||||
SetAllowed(sizeof(void*) >= 8 ? 1000 : 0);
|
||||
}
|
||||
|
||||
// If another mmap slot is available, acquire it and return true.
|
||||
// Else return false.
|
||||
bool Acquire() {
|
||||
if (GetAllowed() <= 0) {
|
||||
return false;
|
||||
}
|
||||
MutexLock l(&mu_);
|
||||
intptr_t x = GetAllowed();
|
||||
if (x <= 0) {
|
||||
return false;
|
||||
} else {
|
||||
SetAllowed(x - 1);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// Release a slot acquired by a previous call to Acquire() that returned true.
|
||||
void Release() {
|
||||
MutexLock l(&mu_);
|
||||
SetAllowed(GetAllowed() + 1);
|
||||
}
|
||||
|
||||
private:
|
||||
port::Mutex mu_;
|
||||
port::AtomicPointer allowed_;
|
||||
|
||||
intptr_t GetAllowed() const {
|
||||
return reinterpret_cast<intptr_t>(allowed_.Acquire_Load());
|
||||
}
|
||||
|
||||
// REQUIRES: mu_ must be held
|
||||
void SetAllowed(intptr_t v) {
|
||||
allowed_.Release_Store(reinterpret_cast<void*>(v));
|
||||
}
|
||||
|
||||
MmapLimiter(const MmapLimiter&);
|
||||
void operator=(const MmapLimiter&);
|
||||
};
|
||||
|
||||
// mmap() based random-access
|
||||
class PosixMmapReadableFile: public RandomAccessFile {
|
||||
private:
|
||||
std::string filename_;
|
||||
void* mmapped_region_;
|
||||
size_t length_;
|
||||
MmapLimiter* limiter_;
|
||||
Limiter* limiter_;
|
||||
|
||||
public:
|
||||
// base[0,length-1] contains the mmapped contents of the file.
|
||||
PosixMmapReadableFile(const std::string& fname, void* base, size_t length,
|
||||
MmapLimiter* limiter)
|
||||
Limiter* limiter)
|
||||
: filename_(fname), mmapped_region_(base), length_(length),
|
||||
limiter_(limiter) {
|
||||
}
|
||||
@ -231,7 +266,7 @@ class PosixWritableFile : public WritableFile {
|
||||
if (fd < 0) {
|
||||
s = IOError(dir, errno);
|
||||
} else {
|
||||
if (fsync(fd) < 0) {
|
||||
if (fsync(fd) < 0 && errno != EINVAL) {
|
||||
s = IOError(dir, errno);
|
||||
}
|
||||
close(fd);
|
||||
@ -333,7 +368,7 @@ class PosixEnv : public Env {
|
||||
mmap_limit_.Release();
|
||||
}
|
||||
} else {
|
||||
*result = new PosixRandomAccessFile(fname, fd);
|
||||
*result = new PosixRandomAccessFile(fname, fd, &fd_limit_);
|
||||
}
|
||||
return s;
|
||||
}
|
||||
@ -533,10 +568,42 @@ class PosixEnv : public Env {
|
||||
BGQueue queue_;
|
||||
|
||||
PosixLockTable locks_;
|
||||
MmapLimiter mmap_limit_;
|
||||
Limiter mmap_limit_;
|
||||
Limiter fd_limit_;
|
||||
};
|
||||
|
||||
PosixEnv::PosixEnv() : started_bgthread_(false) {
|
||||
// Return the maximum number of concurrent mmaps.
|
||||
static int MaxMmaps() {
|
||||
if (mmap_limit >= 0) {
|
||||
return mmap_limit;
|
||||
}
|
||||
// Up to 1000 mmaps for 64-bit binaries; none for smaller pointer sizes.
|
||||
mmap_limit = sizeof(void*) >= 8 ? 1000 : 0;
|
||||
return mmap_limit;
|
||||
}
|
||||
|
||||
// Return the maximum number of read-only files to keep open.
|
||||
static intptr_t MaxOpenFiles() {
|
||||
if (open_read_only_file_limit >= 0) {
|
||||
return open_read_only_file_limit;
|
||||
}
|
||||
struct rlimit rlim;
|
||||
if (getrlimit(RLIMIT_NOFILE, &rlim)) {
|
||||
// getrlimit failed, fallback to hard-coded default.
|
||||
open_read_only_file_limit = 50;
|
||||
} else if (rlim.rlim_cur == RLIM_INFINITY) {
|
||||
open_read_only_file_limit = std::numeric_limits<int>::max();
|
||||
} else {
|
||||
// Allow use of 20% of available file descriptors for read-only files.
|
||||
open_read_only_file_limit = rlim.rlim_cur / 5;
|
||||
}
|
||||
return open_read_only_file_limit;
|
||||
}
|
||||
|
||||
PosixEnv::PosixEnv()
|
||||
: started_bgthread_(false),
|
||||
mmap_limit_(MaxMmaps()),
|
||||
fd_limit_(MaxOpenFiles()) {
|
||||
PthreadCall("mutex_init", pthread_mutex_init(&mu_, NULL));
|
||||
PthreadCall("cvar_init", pthread_cond_init(&bgsignal_, NULL));
|
||||
}
|
||||
@ -611,6 +678,16 @@ static pthread_once_t once = PTHREAD_ONCE_INIT;
|
||||
static Env* default_env;
|
||||
static void InitDefaultEnv() { default_env = new PosixEnv; }
|
||||
|
||||
void EnvPosixTestHelper::SetReadOnlyFDLimit(int limit) {
|
||||
assert(default_env == NULL);
|
||||
open_read_only_file_limit = limit;
|
||||
}
|
||||
|
||||
void EnvPosixTestHelper::SetReadOnlyMMapLimit(int limit) {
|
||||
assert(default_env == NULL);
|
||||
mmap_limit = limit;
|
||||
}
|
||||
|
||||
Env* Env::Default() {
|
||||
pthread_once(&once, InitDefaultEnv);
|
||||
return default_env;
|
||||
|
66
src/leveldb/util/env_posix_test.cc
Normal file
66
src/leveldb/util/env_posix_test.cc
Normal file
@ -0,0 +1,66 @@
|
||||
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||
|
||||
#include "leveldb/env.h"
|
||||
|
||||
#include "port/port.h"
|
||||
#include "util/testharness.h"
|
||||
#include "util/env_posix_test_helper.h"
|
||||
|
||||
namespace leveldb {
|
||||
|
||||
static const int kDelayMicros = 100000;
|
||||
static const int kReadOnlyFileLimit = 4;
|
||||
static const int kMMapLimit = 4;
|
||||
|
||||
class EnvPosixTest {
|
||||
public:
|
||||
Env* env_;
|
||||
EnvPosixTest() : env_(Env::Default()) { }
|
||||
|
||||
static void SetFileLimits(int read_only_file_limit, int mmap_limit) {
|
||||
EnvPosixTestHelper::SetReadOnlyFDLimit(read_only_file_limit);
|
||||
EnvPosixTestHelper::SetReadOnlyMMapLimit(mmap_limit);
|
||||
}
|
||||
};
|
||||
|
||||
TEST(EnvPosixTest, TestOpenOnRead) {
|
||||
// Write some test data to a single file that will be opened |n| times.
|
||||
std::string test_dir;
|
||||
ASSERT_OK(env_->GetTestDirectory(&test_dir));
|
||||
std::string test_file = test_dir + "/open_on_read.txt";
|
||||
|
||||
FILE* f = fopen(test_file.c_str(), "w");
|
||||
ASSERT_TRUE(f != NULL);
|
||||
const char kFileData[] = "abcdefghijklmnopqrstuvwxyz";
|
||||
fputs(kFileData, f);
|
||||
fclose(f);
|
||||
|
||||
// Open test file some number above the sum of the two limits to force
|
||||
// open-on-read behavior of POSIX Env leveldb::RandomAccessFile.
|
||||
const int kNumFiles = kReadOnlyFileLimit + kMMapLimit + 5;
|
||||
leveldb::RandomAccessFile* files[kNumFiles] = {0};
|
||||
for (int i = 0; i < kNumFiles; i++) {
|
||||
ASSERT_OK(env_->NewRandomAccessFile(test_file, &files[i]));
|
||||
}
|
||||
char scratch;
|
||||
Slice read_result;
|
||||
for (int i = 0; i < kNumFiles; i++) {
|
||||
ASSERT_OK(files[i]->Read(i, 1, &read_result, &scratch));
|
||||
ASSERT_EQ(kFileData[i], read_result[0]);
|
||||
}
|
||||
for (int i = 0; i < kNumFiles; i++) {
|
||||
delete files[i];
|
||||
}
|
||||
ASSERT_OK(env_->DeleteFile(test_file));
|
||||
}
|
||||
|
||||
} // namespace leveldb
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
// All tests currently run with the same read-only file limits.
|
||||
leveldb::EnvPosixTest::SetFileLimits(leveldb::kReadOnlyFileLimit,
|
||||
leveldb::kMMapLimit);
|
||||
return leveldb::test::RunAllTests();
|
||||
}
|
28
src/leveldb/util/env_posix_test_helper.h
Normal file
28
src/leveldb/util/env_posix_test_helper.h
Normal file
@ -0,0 +1,28 @@
|
||||
// Copyright 2017 The LevelDB Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||
|
||||
#ifndef STORAGE_LEVELDB_UTIL_ENV_POSIX_TEST_HELPER_H_
|
||||
#define STORAGE_LEVELDB_UTIL_ENV_POSIX_TEST_HELPER_H_
|
||||
|
||||
namespace leveldb {
|
||||
|
||||
class EnvPosixTest;
|
||||
|
||||
// A helper for the POSIX Env to facilitate testing.
|
||||
class EnvPosixTestHelper {
|
||||
private:
|
||||
friend class EnvPosixTest;
|
||||
|
||||
// Set the maximum number of read-only files that will be opened.
|
||||
// Must be called before creating an Env.
|
||||
static void SetReadOnlyFDLimit(int limit);
|
||||
|
||||
// Set the maximum number of read-only files that will be mapped via mmap.
|
||||
// Must be called before creating an Env.
|
||||
static void SetReadOnlyMMapLimit(int limit);
|
||||
};
|
||||
|
||||
} // namespace leveldb
|
||||
|
||||
#endif // STORAGE_LEVELDB_UTIL_ENV_POSIX_TEST_HELPER_H_
|
@ -10,29 +10,31 @@
|
||||
namespace leveldb {
|
||||
|
||||
static const int kDelayMicros = 100000;
|
||||
static const int kReadOnlyFileLimit = 4;
|
||||
static const int kMMapLimit = 4;
|
||||
|
||||
class EnvPosixTest {
|
||||
class EnvTest {
|
||||
private:
|
||||
port::Mutex mu_;
|
||||
std::string events_;
|
||||
|
||||
public:
|
||||
Env* env_;
|
||||
EnvPosixTest() : env_(Env::Default()) { }
|
||||
EnvTest() : env_(Env::Default()) { }
|
||||
};
|
||||
|
||||
static void SetBool(void* ptr) {
|
||||
reinterpret_cast<port::AtomicPointer*>(ptr)->NoBarrier_Store(ptr);
|
||||
}
|
||||
|
||||
TEST(EnvPosixTest, RunImmediately) {
|
||||
TEST(EnvTest, RunImmediately) {
|
||||
port::AtomicPointer called (NULL);
|
||||
env_->Schedule(&SetBool, &called);
|
||||
Env::Default()->SleepForMicroseconds(kDelayMicros);
|
||||
env_->SleepForMicroseconds(kDelayMicros);
|
||||
ASSERT_TRUE(called.NoBarrier_Load() != NULL);
|
||||
}
|
||||
|
||||
TEST(EnvPosixTest, RunMany) {
|
||||
TEST(EnvTest, RunMany) {
|
||||
port::AtomicPointer last_id (NULL);
|
||||
|
||||
struct CB {
|
||||
@ -59,7 +61,7 @@ TEST(EnvPosixTest, RunMany) {
|
||||
env_->Schedule(&CB::Run, &cb3);
|
||||
env_->Schedule(&CB::Run, &cb4);
|
||||
|
||||
Env::Default()->SleepForMicroseconds(kDelayMicros);
|
||||
env_->SleepForMicroseconds(kDelayMicros);
|
||||
void* cur = last_id.Acquire_Load();
|
||||
ASSERT_EQ(4, reinterpret_cast<uintptr_t>(cur));
|
||||
}
|
||||
@ -78,7 +80,7 @@ static void ThreadBody(void* arg) {
|
||||
s->mu.Unlock();
|
||||
}
|
||||
|
||||
TEST(EnvPosixTest, StartThread) {
|
||||
TEST(EnvTest, StartThread) {
|
||||
State state;
|
||||
state.val = 0;
|
||||
state.num_running = 3;
|
||||
@ -92,7 +94,7 @@ TEST(EnvPosixTest, StartThread) {
|
||||
if (num == 0) {
|
||||
break;
|
||||
}
|
||||
Env::Default()->SleepForMicroseconds(kDelayMicros);
|
||||
env_->SleepForMicroseconds(kDelayMicros);
|
||||
}
|
||||
ASSERT_EQ(state.val, 3);
|
||||
}
|
||||
|
@ -1,7 +1,7 @@
|
||||
// This file contains source that originates from:
|
||||
// http://code.google.com/p/leveldbwin/source/browse/trunk/win32_impl_src/env_win32.h
|
||||
// http://code.google.com/p/leveldbwin/source/browse/trunk/win32_impl_src/port_win32.cc
|
||||
// Those files dont' have any explict license headers but the
|
||||
// Those files don't have any explicit license headers but the
|
||||
// project (http://code.google.com/p/leveldbwin/) lists the 'New BSD License'
|
||||
// as the license.
|
||||
#if defined(LEVELDB_PLATFORM_WINDOWS)
|
||||
@ -355,11 +355,13 @@ BOOL Win32SequentialFile::_Init()
|
||||
ToWidePath(_filename, path);
|
||||
_hFile = CreateFileW(path.c_str(),
|
||||
GENERIC_READ,
|
||||
FILE_SHARE_READ,
|
||||
FILE_SHARE_READ | FILE_SHARE_WRITE,
|
||||
NULL,
|
||||
OPEN_EXISTING,
|
||||
FILE_ATTRIBUTE_NORMAL,
|
||||
FILE_ATTRIBUTE_NORMAL | FILE_FLAG_SEQUENTIAL_SCAN,
|
||||
NULL);
|
||||
if (_hFile == INVALID_HANDLE_VALUE)
|
||||
_hFile = NULL;
|
||||
return _hFile ? TRUE : FALSE;
|
||||
}
|
||||
|
||||
@ -403,7 +405,7 @@ BOOL Win32RandomAccessFile::_Init( LPCWSTR path )
|
||||
{
|
||||
BOOL bRet = FALSE;
|
||||
if(!_hFile)
|
||||
_hFile = ::CreateFileW(path,GENERIC_READ,FILE_SHARE_READ,NULL,OPEN_EXISTING,
|
||||
_hFile = ::CreateFileW(path,GENERIC_READ,FILE_SHARE_READ|FILE_SHARE_WRITE,NULL,OPEN_EXISTING,
|
||||
FILE_ATTRIBUTE_NORMAL | FILE_FLAG_RANDOM_ACCESS,NULL);
|
||||
if(!_hFile || _hFile == INVALID_HANDLE_VALUE )
|
||||
_hFile = NULL;
|
||||
@ -669,7 +671,7 @@ Status Win32Env::GetFileSize( const std::string& fname, uint64_t* file_size )
|
||||
ToWidePath(ModifyPath(path), wpath);
|
||||
|
||||
HANDLE file = ::CreateFileW(wpath.c_str(),
|
||||
GENERIC_READ,FILE_SHARE_READ,NULL,OPEN_EXISTING,FILE_ATTRIBUTE_NORMAL,NULL);
|
||||
GENERIC_READ,FILE_SHARE_READ|FILE_SHARE_WRITE,NULL,OPEN_EXISTING,FILE_ATTRIBUTE_NORMAL,NULL);
|
||||
LARGE_INTEGER li;
|
||||
if(::GetFileSizeEx(file,&li)){
|
||||
*file_size = (uint64_t)li.QuadPart;
|
||||
|
@ -21,6 +21,7 @@ Options::Options()
|
||||
block_cache(NULL),
|
||||
block_size(4096),
|
||||
block_restart_interval(16),
|
||||
max_file_size(2<<20),
|
||||
compression(kSnappyCompression),
|
||||
reuse_logs(false),
|
||||
filter_policy(NULL) {
|
||||
|
@ -275,7 +275,7 @@ bool BlockAssembler::TestPackage(uint64_t packageSize, unsigned int packageSigOp
|
||||
// - safe TXs in regard to ChainLocks
|
||||
bool BlockAssembler::TestPackageTransactions(const CTxMemPool::setEntries& package)
|
||||
{
|
||||
BOOST_FOREACH (const CTxMemPool::txiter it, package) {
|
||||
for (const CTxMemPool::txiter it : package) {
|
||||
if (!IsFinalTx(it->GetTx(), nHeight, nLockTimeCutoff))
|
||||
return false;
|
||||
if (!llmq::chainLocksHandler->IsTxSafeForMining(it->GetTx().GetHash())) {
|
||||
@ -308,11 +308,11 @@ int BlockAssembler::UpdatePackagesForAdded(const CTxMemPool::setEntries& already
|
||||
indexed_modified_transaction_set &mapModifiedTx)
|
||||
{
|
||||
int nDescendantsUpdated = 0;
|
||||
BOOST_FOREACH(const CTxMemPool::txiter it, alreadyAdded) {
|
||||
for (const CTxMemPool::txiter it : alreadyAdded) {
|
||||
CTxMemPool::setEntries descendants;
|
||||
mempool.CalculateDescendants(it, descendants);
|
||||
// Insert all descendants (not yet in block) into the modified set
|
||||
BOOST_FOREACH(CTxMemPool::txiter desc, descendants) {
|
||||
for (CTxMemPool::txiter desc : descendants) {
|
||||
if (alreadyAdded.count(desc))
|
||||
continue;
|
||||
++nDescendantsUpdated;
|
||||
|
@ -17,9 +17,7 @@
|
||||
class CBlockIndex;
|
||||
class CChainParams;
|
||||
class CConnman;
|
||||
class CReserveKey;
|
||||
class CScript;
|
||||
class CWallet;
|
||||
|
||||
namespace Consensus { struct Params; };
|
||||
|
||||
|
62
src/net.cpp
62
src/net.cpp
@ -305,7 +305,7 @@ bool IsReachable(const CNetAddr& addr)
|
||||
CNode* CConnman::FindNode(const CNetAddr& ip)
|
||||
{
|
||||
LOCK(cs_vNodes);
|
||||
BOOST_FOREACH(CNode* pnode, vNodes)
|
||||
for (CNode* pnode : vNodes)
|
||||
if ((CNetAddr)pnode->addr == ip)
|
||||
return (pnode);
|
||||
return NULL;
|
||||
@ -314,7 +314,7 @@ CNode* CConnman::FindNode(const CNetAddr& ip)
|
||||
CNode* CConnman::FindNode(const CSubNet& subNet)
|
||||
{
|
||||
LOCK(cs_vNodes);
|
||||
BOOST_FOREACH(CNode* pnode, vNodes)
|
||||
for (CNode* pnode : vNodes)
|
||||
if (subNet.Match((CNetAddr)pnode->addr))
|
||||
return (pnode);
|
||||
return NULL;
|
||||
@ -323,7 +323,7 @@ CNode* CConnman::FindNode(const CSubNet& subNet)
|
||||
CNode* CConnman::FindNode(const std::string& addrName)
|
||||
{
|
||||
LOCK(cs_vNodes);
|
||||
BOOST_FOREACH(CNode* pnode, vNodes) {
|
||||
for (CNode* pnode : vNodes) {
|
||||
if (pnode->GetAddrName() == addrName) {
|
||||
return (pnode);
|
||||
}
|
||||
@ -334,7 +334,7 @@ CNode* CConnman::FindNode(const std::string& addrName)
|
||||
CNode* CConnman::FindNode(const CService& addr)
|
||||
{
|
||||
LOCK(cs_vNodes);
|
||||
BOOST_FOREACH(CNode* pnode, vNodes)
|
||||
for (CNode* pnode : vNodes)
|
||||
if((CService)pnode->addr == addr)
|
||||
return (pnode);
|
||||
return NULL;
|
||||
@ -343,7 +343,7 @@ CNode* CConnman::FindNode(const CService& addr)
|
||||
bool CConnman::CheckIncomingNonce(uint64_t nonce)
|
||||
{
|
||||
LOCK(cs_vNodes);
|
||||
BOOST_FOREACH(CNode* pnode, vNodes) {
|
||||
for (CNode* pnode : vNodes) {
|
||||
if (!pnode->fSuccessfullyConnected && !pnode->fInbound && pnode->GetLocalNonce() == nonce)
|
||||
return false;
|
||||
}
|
||||
@ -537,7 +537,7 @@ void CConnman::Ban(const CSubNet& subNet, const BanReason &banReason, int64_t ba
|
||||
clientInterface->BannedListChanged();
|
||||
{
|
||||
LOCK(cs_vNodes);
|
||||
BOOST_FOREACH(CNode* pnode, vNodes) {
|
||||
for (CNode* pnode : vNodes) {
|
||||
if (subNet.Match((CNetAddr)pnode->addr))
|
||||
pnode->fDisconnect = true;
|
||||
}
|
||||
@ -615,7 +615,7 @@ void CConnman::SetBannedSetDirty(bool dirty)
|
||||
|
||||
bool CConnman::IsWhitelistedRange(const CNetAddr &addr) {
|
||||
LOCK(cs_vWhitelistedRange);
|
||||
BOOST_FOREACH(const CSubNet& subnet, vWhitelistedRange) {
|
||||
for (const CSubNet& subnet : vWhitelistedRange) {
|
||||
if (subnet.Match(addr))
|
||||
return true;
|
||||
}
|
||||
@ -986,7 +986,7 @@ bool CConnman::AttemptToEvictConnection()
|
||||
{
|
||||
LOCK(cs_vNodes);
|
||||
|
||||
BOOST_FOREACH(CNode *node, vNodes) {
|
||||
for (CNode *node : vNodes) {
|
||||
if (node->fWhitelisted)
|
||||
continue;
|
||||
if (!node->fInbound)
|
||||
@ -1066,7 +1066,7 @@ bool CConnman::AttemptToEvictConnection()
|
||||
unsigned int nMostConnections = 0;
|
||||
int64_t nMostConnectionsTime = 0;
|
||||
std::map<uint64_t, std::vector<NodeEvictionCandidate> > mapNetGroupNodes;
|
||||
BOOST_FOREACH(const NodeEvictionCandidate &node, vEvictionCandidates) {
|
||||
for (const NodeEvictionCandidate &node : vEvictionCandidates) {
|
||||
mapNetGroupNodes[node.nKeyedNetGroup].push_back(node);
|
||||
int64_t grouptime = mapNetGroupNodes[node.nKeyedNetGroup][0].nTimeConnected;
|
||||
size_t groupsize = mapNetGroupNodes[node.nKeyedNetGroup].size();
|
||||
@ -1111,7 +1111,7 @@ void CConnman::AcceptConnection(const ListenSocket& hListenSocket) {
|
||||
bool whitelisted = hListenSocket.whitelisted || IsWhitelistedRange(addr);
|
||||
{
|
||||
LOCK(cs_vNodes);
|
||||
BOOST_FOREACH(CNode* pnode, vNodes) {
|
||||
for (CNode* pnode : vNodes) {
|
||||
if (pnode->fInbound) {
|
||||
nInbound++;
|
||||
if (!pnode->verifiedProRegTxHash.IsNull()) {
|
||||
@ -1206,7 +1206,7 @@ void CConnman::ThreadSocketHandler()
|
||||
LOCK(cs_vNodes);
|
||||
// Disconnect unused nodes
|
||||
std::vector<CNode*> vNodesCopy = vNodes;
|
||||
BOOST_FOREACH(CNode* pnode, vNodesCopy)
|
||||
for (CNode* pnode : vNodesCopy)
|
||||
{
|
||||
if (pnode->fDisconnect)
|
||||
{
|
||||
@ -1232,7 +1232,7 @@ void CConnman::ThreadSocketHandler()
|
||||
{
|
||||
// Delete disconnected nodes
|
||||
std::list<CNode*> vNodesDisconnectedCopy = vNodesDisconnected;
|
||||
BOOST_FOREACH(CNode* pnode, vNodesDisconnectedCopy)
|
||||
for (CNode* pnode : vNodesDisconnectedCopy)
|
||||
{
|
||||
// wait until threads are done using it
|
||||
if (pnode->GetRefCount() <= 0) {
|
||||
@ -1292,7 +1292,7 @@ void CConnman::ThreadSocketHandler()
|
||||
have_fds = true;
|
||||
#endif
|
||||
|
||||
BOOST_FOREACH(const ListenSocket& hListenSocket, vhListenSocket) {
|
||||
for (const ListenSocket& hListenSocket : vhListenSocket) {
|
||||
FD_SET(hListenSocket.socket, &fdsetRecv);
|
||||
hSocketMax = std::max(hSocketMax, hListenSocket.socket);
|
||||
have_fds = true;
|
||||
@ -1300,7 +1300,7 @@ void CConnman::ThreadSocketHandler()
|
||||
|
||||
{
|
||||
LOCK(cs_vNodes);
|
||||
BOOST_FOREACH(CNode* pnode, vNodes)
|
||||
for (CNode* pnode : vNodes)
|
||||
{
|
||||
// Implement the following logic:
|
||||
// * If there is data to send, select() for sending data. As this only
|
||||
@ -1377,7 +1377,7 @@ void CConnman::ThreadSocketHandler()
|
||||
//
|
||||
// Accept new connections
|
||||
//
|
||||
BOOST_FOREACH(const ListenSocket& hListenSocket, vhListenSocket)
|
||||
for (const ListenSocket& hListenSocket : vhListenSocket)
|
||||
{
|
||||
if (hListenSocket.socket != INVALID_SOCKET && FD_ISSET(hListenSocket.socket, &fdsetRecv))
|
||||
{
|
||||
@ -1389,7 +1389,7 @@ void CConnman::ThreadSocketHandler()
|
||||
// Service each socket
|
||||
//
|
||||
std::vector<CNode*> vNodesCopy = CopyNodeVector();
|
||||
BOOST_FOREACH(CNode* pnode, vNodesCopy)
|
||||
for (CNode* pnode : vNodesCopy)
|
||||
{
|
||||
if (interruptNet)
|
||||
return;
|
||||
@ -1703,7 +1703,7 @@ void CConnman::ThreadDNSAddressSeed()
|
||||
|
||||
LogPrintf("Loading addresses from DNS seeds (could take a while)\n");
|
||||
|
||||
BOOST_FOREACH(const CDNSSeedData &seed, vSeeds) {
|
||||
for (const CDNSSeedData &seed : vSeeds) {
|
||||
if (interruptNet) {
|
||||
return;
|
||||
}
|
||||
@ -1715,7 +1715,7 @@ void CConnman::ThreadDNSAddressSeed()
|
||||
ServiceFlags requiredServiceBits = nRelevantServices;
|
||||
if (LookupHost(GetDNSHost(seed, &requiredServiceBits).c_str(), vIPs, 0, true))
|
||||
{
|
||||
BOOST_FOREACH(const CNetAddr& ip, vIPs)
|
||||
for (const CNetAddr& ip : vIPs)
|
||||
{
|
||||
int nOneDay = 24*3600;
|
||||
CAddress addr = CAddress(CService(ip, Params().GetDefaultPort()), requiredServiceBits);
|
||||
@ -1796,7 +1796,7 @@ void CConnman::ThreadOpenConnections()
|
||||
for (int64_t nLoop = 0;; nLoop++)
|
||||
{
|
||||
ProcessOneShot();
|
||||
BOOST_FOREACH(const std::string& strAddr, gArgs.GetArgs("-connect"))
|
||||
for (const std::string& strAddr : gArgs.GetArgs("-connect"))
|
||||
{
|
||||
CAddress addr(CService(), NODE_NONE);
|
||||
OpenNetworkConnection(addr, false, NULL, strAddr.c_str());
|
||||
@ -1852,7 +1852,7 @@ void CConnman::ThreadOpenConnections()
|
||||
std::set<std::vector<unsigned char> > setConnected;
|
||||
if (!Params().AllowMultipleAddressesFromGroup()) {
|
||||
LOCK(cs_vNodes);
|
||||
BOOST_FOREACH(CNode* pnode, vNodes) {
|
||||
for (CNode* pnode : vNodes) {
|
||||
if (!pnode->fInbound && !pnode->fAddnode && !pnode->fMasternode) {
|
||||
|
||||
// Count the peers that have all relevant services
|
||||
@ -1978,7 +1978,7 @@ std::vector<AddedNodeInfo> CConnman::GetAddedNodeInfo()
|
||||
{
|
||||
LOCK(cs_vAddedNodes);
|
||||
ret.reserve(vAddedNodes.size());
|
||||
BOOST_FOREACH(const std::string& strAddNode, vAddedNodes)
|
||||
for (const std::string& strAddNode : vAddedNodes)
|
||||
lAddresses.push_back(strAddNode);
|
||||
}
|
||||
|
||||
@ -1999,7 +1999,7 @@ std::vector<AddedNodeInfo> CConnman::GetAddedNodeInfo()
|
||||
}
|
||||
}
|
||||
|
||||
BOOST_FOREACH(const std::string& strAddNode, lAddresses) {
|
||||
for (const std::string& strAddNode : lAddresses) {
|
||||
CService service(LookupNumeric(strAddNode.c_str(), Params().GetDefaultPort()));
|
||||
if (service.IsValid()) {
|
||||
// strAddNode is an IP:port
|
||||
@ -2203,7 +2203,7 @@ void CConnman::ThreadMessageHandler()
|
||||
|
||||
bool fMoreWork = false;
|
||||
|
||||
BOOST_FOREACH(CNode* pnode, vNodesCopy)
|
||||
for (CNode* pnode : vNodesCopy)
|
||||
{
|
||||
if (pnode->fDisconnect)
|
||||
continue;
|
||||
@ -2349,7 +2349,7 @@ void Discover(boost::thread_group& threadGroup)
|
||||
std::vector<CNetAddr> vaddr;
|
||||
if (LookupHost(pszHostName, vaddr, 0, true))
|
||||
{
|
||||
BOOST_FOREACH (const CNetAddr &addr, vaddr)
|
||||
for (const CNetAddr &addr : vaddr)
|
||||
{
|
||||
if (AddLocal(addr, LOCAL_IF))
|
||||
LogPrintf("%s: %s - %s\n", __func__, pszHostName, addr.ToString());
|
||||
@ -2396,7 +2396,7 @@ void CConnman::SetNetworkActive(bool active)
|
||||
|
||||
LOCK(cs_vNodes);
|
||||
// Close sockets to all nodes
|
||||
BOOST_FOREACH(CNode* pnode, vNodes) {
|
||||
for (CNode* pnode : vNodes) {
|
||||
pnode->CloseSocketDisconnect();
|
||||
}
|
||||
} else {
|
||||
@ -2642,18 +2642,18 @@ void CConnman::Stop()
|
||||
}
|
||||
|
||||
// Close sockets
|
||||
BOOST_FOREACH(CNode* pnode, vNodes)
|
||||
for (CNode* pnode : vNodes)
|
||||
pnode->CloseSocketDisconnect();
|
||||
BOOST_FOREACH(ListenSocket& hListenSocket, vhListenSocket)
|
||||
for (ListenSocket& hListenSocket : vhListenSocket)
|
||||
if (hListenSocket.socket != INVALID_SOCKET)
|
||||
if (!CloseSocket(hListenSocket.socket))
|
||||
LogPrintf("CloseSocket(hListenSocket) failed with error %s\n", NetworkErrorString(WSAGetLastError()));
|
||||
|
||||
// clean up some globals (to help leak detection)
|
||||
BOOST_FOREACH(CNode *pnode, vNodes) {
|
||||
for (CNode *pnode : vNodes) {
|
||||
DeleteNode(pnode);
|
||||
}
|
||||
BOOST_FOREACH(CNode *pnode, vNodesDisconnected) {
|
||||
for (CNode *pnode : vNodesDisconnected) {
|
||||
DeleteNode(pnode);
|
||||
}
|
||||
vNodes.clear();
|
||||
@ -2894,7 +2894,7 @@ void CConnman::RelayTransaction(const CTransaction& tx)
|
||||
}
|
||||
CInv inv(nInv, hash);
|
||||
LOCK(cs_vNodes);
|
||||
BOOST_FOREACH(CNode* pnode, vNodes)
|
||||
for (CNode* pnode : vNodes)
|
||||
{
|
||||
pnode->PushInventory(inv);
|
||||
}
|
||||
@ -3135,7 +3135,7 @@ CNode::CNode(NodeId idIn, ServiceFlags nLocalServicesIn, int nMyStartingHeightIn
|
||||
fPauseSend = false;
|
||||
nProcessQueueSize = 0;
|
||||
|
||||
BOOST_FOREACH(const std::string &msg, getAllNetMessageTypes())
|
||||
for (const std::string &msg : getAllNetMessageTypes())
|
||||
mapRecvBytesPerMsgCmd[msg] = 0;
|
||||
mapRecvBytesPerMsgCmd[NET_MESSAGE_COMMAND_OTHER] = 0;
|
||||
|
||||
|
@ -125,7 +125,6 @@ struct AddedNodeInfo
|
||||
bool fInbound;
|
||||
};
|
||||
|
||||
class CTransaction;
|
||||
class CNodeStats;
|
||||
class CClientUIInterface;
|
||||
|
||||
|
@ -323,7 +323,7 @@ void FinalizeNode(NodeId nodeid, bool& fUpdateConnectionTime) {
|
||||
fUpdateConnectionTime = true;
|
||||
}
|
||||
|
||||
BOOST_FOREACH(const QueuedBlock& entry, state->vBlocksInFlight) {
|
||||
for (const QueuedBlock& entry : state->vBlocksInFlight) {
|
||||
mapBlocksInFlight.erase(entry.hash);
|
||||
}
|
||||
EraseOrphansFor(nodeid);
|
||||
@ -377,7 +377,9 @@ bool MarkBlockAsInFlight(NodeId nodeid, const uint256& hash, const CBlockIndex *
|
||||
// Short-circuit most stuff in case its from the same node
|
||||
std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> >::iterator itInFlight = mapBlocksInFlight.find(hash);
|
||||
if (itInFlight != mapBlocksInFlight.end() && itInFlight->second.first == nodeid) {
|
||||
*pit = &itInFlight->second.second;
|
||||
if (pit) {
|
||||
*pit = &itInFlight->second.second;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -558,7 +560,7 @@ void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector<con
|
||||
// are not yet downloaded and not in flight to vBlocks. In the mean time, update
|
||||
// pindexLastCommonBlock as long as all ancestors are already downloaded, or if it's
|
||||
// already part of our chain (and therefore don't need it even if pruned).
|
||||
BOOST_FOREACH(const CBlockIndex* pindex, vToFetch) {
|
||||
for (const CBlockIndex* pindex : vToFetch) {
|
||||
if (!pindex->IsValid(BLOCK_VALID_TREE)) {
|
||||
// We consider the chain that this peer is on invalid.
|
||||
return;
|
||||
@ -598,7 +600,7 @@ bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats) {
|
||||
stats.nMisbehavior = state->nMisbehavior;
|
||||
stats.nSyncHeight = state->pindexBestKnownBlock ? state->pindexBestKnownBlock->nHeight : -1;
|
||||
stats.nCommonHeight = state->pindexLastCommonBlock ? state->pindexLastCommonBlock->nHeight : -1;
|
||||
BOOST_FOREACH(const QueuedBlock& queue, state->vBlocksInFlight) {
|
||||
for (const QueuedBlock& queue : state->vBlocksInFlight) {
|
||||
if (queue.pindex)
|
||||
stats.vHeightInFlight.push_back(queue.pindex->nHeight);
|
||||
}
|
||||
@ -659,7 +661,7 @@ bool AddOrphanTx(const CTransactionRef& tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRE
|
||||
|
||||
auto ret = mapOrphanTransactions.emplace(hash, COrphanTx{tx, peer, GetTime() + ORPHAN_TX_EXPIRE_TIME});
|
||||
assert(ret.second);
|
||||
BOOST_FOREACH(const CTxIn& txin, tx->vin) {
|
||||
for (const CTxIn& txin : tx->vin) {
|
||||
mapOrphanTransactionsByPrev[txin.prevout].insert(ret.first);
|
||||
}
|
||||
|
||||
@ -675,7 +677,7 @@ int static EraseOrphanTx(uint256 hash) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans)
|
||||
std::map<uint256, COrphanTx>::iterator it = mapOrphanTransactions.find(hash);
|
||||
if (it == mapOrphanTransactions.end())
|
||||
return 0;
|
||||
BOOST_FOREACH(const CTxIn& txin, it->second.tx->vin)
|
||||
for (const CTxIn& txin : it->second.tx->vin)
|
||||
{
|
||||
auto itPrev = mapOrphanTransactionsByPrev.find(txin.prevout);
|
||||
if (itPrev == mapOrphanTransactionsByPrev.end())
|
||||
@ -826,7 +828,7 @@ void PeerLogicValidation::BlockConnected(const std::shared_ptr<const CBlock>& pb
|
||||
// Erase orphan transactions include or precluded by this block
|
||||
if (vOrphanErase.size()) {
|
||||
int nErased = 0;
|
||||
BOOST_FOREACH(uint256 &orphanHash, vOrphanErase) {
|
||||
for (uint256 &orphanHash : vOrphanErase) {
|
||||
nErased += EraseOrphanTx(orphanHash);
|
||||
}
|
||||
LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx included or conflicted by block\n", nErased);
|
||||
@ -1147,7 +1149,7 @@ void static ProcessGetBlockData(CNode* pfrom, const Consensus::Params& consensus
|
||||
// Thus, the protocol spec specified allows for us to provide duplicate txn here,
|
||||
// however we MUST always provide at least what the remote peer needs
|
||||
typedef std::pair<unsigned int, uint256> PairType;
|
||||
BOOST_FOREACH(PairType& pair, merkleBlock.vMatchedTxn)
|
||||
for (PairType& pair : merkleBlock.vMatchedTxn)
|
||||
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::TX, *pblock->vtx[pair.first]));
|
||||
}
|
||||
// else
|
||||
@ -1708,7 +1710,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
|
||||
std::vector<CAddress> vAddrOk;
|
||||
int64_t nNow = GetAdjustedTime();
|
||||
int64_t nSince = nNow - 10 * 60;
|
||||
BOOST_FOREACH(CAddress& addr, vAddr)
|
||||
for (CAddress& addr : vAddr)
|
||||
{
|
||||
if (interruptMsgProc)
|
||||
return true;
|
||||
@ -2216,20 +2218,20 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
|
||||
}
|
||||
}
|
||||
|
||||
BOOST_FOREACH(uint256 hash, vEraseQueue)
|
||||
for (uint256 hash : vEraseQueue)
|
||||
EraseOrphanTx(hash);
|
||||
}
|
||||
else if (fMissingInputs)
|
||||
{
|
||||
bool fRejectedParents = false; // It may be the case that the orphans parents have all been rejected
|
||||
BOOST_FOREACH(const CTxIn& txin, tx.vin) {
|
||||
for (const CTxIn& txin : tx.vin) {
|
||||
if (recentRejects->contains(txin.prevout.hash)) {
|
||||
fRejectedParents = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!fRejectedParents) {
|
||||
BOOST_FOREACH(const CTxIn& txin, tx.vin) {
|
||||
for (const CTxIn& txin : tx.vin) {
|
||||
CInv _inv(MSG_TX, txin.prevout.hash);
|
||||
pfrom->AddInventoryKnown(_inv);
|
||||
if (!AlreadyHave(_inv)) pfrom->AskFor(_inv);
|
||||
@ -2748,7 +2750,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
|
||||
pfrom->vAddrToSend.clear();
|
||||
std::vector<CAddress> vAddr = connman.GetAddresses();
|
||||
FastRandomContext insecure_rand;
|
||||
BOOST_FOREACH(const CAddress &addr, vAddr)
|
||||
for (const CAddress &addr : vAddr)
|
||||
pfrom->PushAddress(addr, insecure_rand);
|
||||
}
|
||||
|
||||
@ -2944,7 +2946,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
|
||||
else {
|
||||
bool found = false;
|
||||
const std::vector<std::string> &allMessages = getAllNetMessageTypes();
|
||||
BOOST_FOREACH(const std::string msg, allMessages) {
|
||||
for (const std::string msg : allMessages) {
|
||||
if(msg == strCommand) {
|
||||
found = true;
|
||||
break;
|
||||
@ -2984,7 +2986,7 @@ static bool SendRejectsAndCheckIfBanned(CNode* pnode, CConnman& connman)
|
||||
AssertLockHeld(cs_main);
|
||||
CNodeState &state = *State(pnode->GetId());
|
||||
|
||||
BOOST_FOREACH(const CBlockReject& reject, state.rejects) {
|
||||
for (const CBlockReject& reject : state.rejects) {
|
||||
connman.PushMessage(pnode, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::REJECT, (std::string)NetMsgType::BLOCK, reject.chRejectCode, reject.strRejectReason, reject.hashBlock));
|
||||
}
|
||||
state.rejects.clear();
|
||||
@ -3205,7 +3207,7 @@ bool SendMessages(CNode* pto, CConnman& connman, const std::atomic<bool>& interr
|
||||
pto->nNextAddrSend = PoissonNextSend(nNow, AVG_ADDRESS_BROADCAST_INTERVAL);
|
||||
std::vector<CAddress> vAddr;
|
||||
vAddr.reserve(pto->vAddrToSend.size());
|
||||
BOOST_FOREACH(const CAddress& addr, pto->vAddrToSend)
|
||||
for (const CAddress& addr : pto->vAddrToSend)
|
||||
{
|
||||
if (!pto->addrKnown.contains(addr.GetKey()))
|
||||
{
|
||||
@ -3284,7 +3286,7 @@ bool SendMessages(CNode* pto, CConnman& connman, const std::atomic<bool>& interr
|
||||
// Try to find first header that our peer doesn't have, and
|
||||
// then send all headers past that one. If we come across any
|
||||
// headers that aren't on chainActive, give up.
|
||||
BOOST_FOREACH(const uint256 &hash, pto->vBlockHashesToAnnounce) {
|
||||
for (const uint256 &hash : pto->vBlockHashesToAnnounce) {
|
||||
BlockMap::iterator mi = mapBlockIndex.find(hash);
|
||||
assert(mi != mapBlockIndex.end());
|
||||
const CBlockIndex *pindex = mi->second;
|
||||
@ -3412,7 +3414,7 @@ bool SendMessages(CNode* pto, CConnman& connman, const std::atomic<bool>& interr
|
||||
vInv.reserve(std::max<size_t>(pto->vInventoryBlockToSend.size(), INVENTORY_BROADCAST_MAX_PER_1MB_BLOCK * MaxBlockSize(true) / 1000000));
|
||||
|
||||
// Add blocks
|
||||
BOOST_FOREACH(const uint256& hash, pto->vInventoryBlockToSend) {
|
||||
for (const uint256& hash : pto->vInventoryBlockToSend) {
|
||||
vInv.push_back(CInv(MSG_BLOCK, hash));
|
||||
if (vInv.size() == MAX_INV_SZ) {
|
||||
connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
|
||||
@ -3606,7 +3608,7 @@ bool SendMessages(CNode* pto, CConnman& connman, const std::atomic<bool>& interr
|
||||
std::vector<const CBlockIndex*> vToDownload;
|
||||
NodeId staller = -1;
|
||||
FindNextBlocksToDownload(pto->GetId(), MAX_BLOCKS_IN_TRANSIT_PER_PEER - state.nBlocksInFlight, vToDownload, staller, consensusParams);
|
||||
BOOST_FOREACH(const CBlockIndex *pindex, vToDownload) {
|
||||
for (const CBlockIndex *pindex : vToDownload) {
|
||||
vGetData.push_back(CInv(MSG_BLOCK, pindex->GetBlockHash()));
|
||||
MarkBlockAsInFlight(pto->GetId(), pindex->GetBlockHash(), pindex);
|
||||
LogPrint(BCLog::NET, "Requesting block %s (%d) peer=%d\n", pindex->GetBlockHash().ToString(),
|
||||
|
@ -92,7 +92,7 @@ bool IsStandardTx(const CTransaction& tx, std::string& reason)
|
||||
return false;
|
||||
}
|
||||
|
||||
BOOST_FOREACH(const CTxIn& txin, tx.vin)
|
||||
for (const CTxIn& txin : tx.vin)
|
||||
{
|
||||
// Biggest 'standard' txin is a 15-of-15 P2SH multisig with compressed
|
||||
// keys (remember the 520 byte limit on redeemScript size). That works
|
||||
@ -113,7 +113,7 @@ bool IsStandardTx(const CTransaction& tx, std::string& reason)
|
||||
|
||||
unsigned int nDataOut = 0;
|
||||
txnouttype whichType;
|
||||
BOOST_FOREACH(const CTxOut& txout, tx.vout) {
|
||||
for (const CTxOut& txout : tx.vout) {
|
||||
if (!::IsStandard(txout.scriptPubKey, whichType)) {
|
||||
reason = "scriptpubkey";
|
||||
return false;
|
||||
|
@ -387,6 +387,12 @@ public:
|
||||
}
|
||||
|
||||
iterator erase(iterator first, iterator last) {
|
||||
// Erase is not allowed to the change the object's capacity. That means
|
||||
// that when starting with an indirectly allocated prevector with
|
||||
// size and capacity > N, the result may be a still indirectly allocated
|
||||
// prevector with size <= N and capacity > N. A shrink_to_fit() call is
|
||||
// necessary to switch to the (more efficient) directly allocated
|
||||
// representation (with capacity N and size <= N).
|
||||
iterator p = first;
|
||||
char* endp = (char*)&(*end());
|
||||
if (!std::is_trivially_destructible<T>::value) {
|
||||
|
@ -280,7 +280,7 @@ void AddressBookPage::done(int retval)
|
||||
// Figure out which address was selected, and return it
|
||||
QModelIndexList indexes = table->selectionModel()->selectedRows(AddressTableModel::Address);
|
||||
|
||||
Q_FOREACH (const QModelIndex& index, indexes) {
|
||||
for (const QModelIndex& index : indexes) {
|
||||
QVariant address = table->model()->data(index);
|
||||
returnValue = address.toString();
|
||||
}
|
||||
|
@ -8,7 +8,6 @@
|
||||
#include <QDialog>
|
||||
|
||||
class AddressTableModel;
|
||||
class OptionsModel;
|
||||
class PlatformStyle;
|
||||
|
||||
namespace Ui {
|
||||
@ -20,7 +19,6 @@ class QItemSelection;
|
||||
class QMenu;
|
||||
class QModelIndex;
|
||||
class QSortFilterProxyModel;
|
||||
class QTableView;
|
||||
QT_END_NAMESPACE
|
||||
|
||||
/** Widget that shows a list of sending or receiving addresses.
|
||||
|
@ -82,7 +82,7 @@ public:
|
||||
cachedAddressTable.clear();
|
||||
{
|
||||
LOCK(wallet->cs_wallet);
|
||||
BOOST_FOREACH(const PAIRTYPE(CTxDestination, CAddressBookData)& item, wallet->mapAddressBook)
|
||||
for (const std::pair<CTxDestination, CAddressBookData>& item : wallet->mapAddressBook)
|
||||
{
|
||||
const CBitcoinAddress& address = item.first;
|
||||
bool fMine = IsMine(*wallet, address.Get());
|
||||
|
@ -1254,7 +1254,7 @@ void BitcoinGUI::dropEvent(QDropEvent *event)
|
||||
{
|
||||
if(event->mimeData()->hasUrls())
|
||||
{
|
||||
Q_FOREACH(const QUrl &uri, event->mimeData()->urls())
|
||||
for (const QUrl &uri : event->mimeData()->urls())
|
||||
{
|
||||
Q_EMIT receivedURI(uri.toString());
|
||||
}
|
||||
@ -1476,7 +1476,7 @@ UnitDisplayStatusBarControl::UnitDisplayStatusBarControl(const PlatformStyle *pl
|
||||
QList<BitcoinUnits::Unit> units = BitcoinUnits::availableUnits();
|
||||
int max_width = 0;
|
||||
const QFontMetrics fm(font());
|
||||
Q_FOREACH (const BitcoinUnits::Unit unit, units)
|
||||
for (const BitcoinUnits::Unit unit : units)
|
||||
{
|
||||
max_width = qMax(max_width, fm.width(BitcoinUnits::name(unit)));
|
||||
}
|
||||
@ -1495,7 +1495,7 @@ void UnitDisplayStatusBarControl::mousePressEvent(QMouseEvent *event)
|
||||
void UnitDisplayStatusBarControl::createContextMenu()
|
||||
{
|
||||
menu = new QMenu(this);
|
||||
Q_FOREACH(BitcoinUnits::Unit u, BitcoinUnits::availableUnits())
|
||||
for (BitcoinUnits::Unit u : BitcoinUnits::availableUnits())
|
||||
{
|
||||
QAction *menuAction = new QAction(QString(BitcoinUnits::name(u)), this);
|
||||
menuAction->setData(QVariant(u));
|
||||
|
@ -32,8 +32,6 @@ class WalletModel;
|
||||
class HelpMessageDialog;
|
||||
class ModalOverlay;
|
||||
|
||||
class CWallet;
|
||||
|
||||
QT_BEGIN_NAMESPACE
|
||||
class QAction;
|
||||
class QProgressBar;
|
||||
|
@ -14,13 +14,10 @@
|
||||
|
||||
#include <atomic>
|
||||
|
||||
class AddressTableModel;
|
||||
class BanTableModel;
|
||||
class OptionsModel;
|
||||
class PeerTableModel;
|
||||
class TransactionTableModel;
|
||||
|
||||
class CWallet;
|
||||
class CBlockIndex;
|
||||
|
||||
QT_BEGIN_NAMESPACE
|
||||
|
@ -478,7 +478,7 @@ void CoinControlDialog::updateLabels(WalletModel *model, QDialog* dialog)
|
||||
CAmount nPayAmount = 0;
|
||||
bool fDust = false;
|
||||
CMutableTransaction txDummy;
|
||||
Q_FOREACH(const CAmount &amount, CoinControlDialog::payAmounts)
|
||||
for (const CAmount &amount : CoinControlDialog::payAmounts)
|
||||
{
|
||||
nPayAmount += amount;
|
||||
|
||||
@ -504,7 +504,7 @@ void CoinControlDialog::updateLabels(WalletModel *model, QDialog* dialog)
|
||||
coinControl->ListSelected(vCoinControl);
|
||||
model->getOutputs(vCoinControl, vOutputs);
|
||||
|
||||
BOOST_FOREACH(const COutput& out, vOutputs) {
|
||||
for (const COutput& out : vOutputs) {
|
||||
// unselect already spent, very unlikely scenario, this could happen
|
||||
// when selected are spent elsewhere, like rpc or another computer
|
||||
uint256 txhash = out.tx->GetHash();
|
||||
@ -549,7 +549,7 @@ void CoinControlDialog::updateLabels(WalletModel *model, QDialog* dialog)
|
||||
nBytes -= 34;
|
||||
|
||||
// Fee
|
||||
nPayFee = CWallet::GetMinimumFee(nBytes, nTxConfirmTarget, ::mempool, ::feeEstimator);
|
||||
nPayFee = CWallet::GetMinimumFee(nBytes, coinControl->nConfirmTarget, ::mempool, ::feeEstimator);
|
||||
|
||||
if (nPayAmount > 0)
|
||||
{
|
||||
@ -633,7 +633,7 @@ void CoinControlDialog::updateLabels(WalletModel *model, QDialog* dialog)
|
||||
if (payTxFee.GetFeePerK() > 0)
|
||||
dFeeVary = (double)std::max(CWallet::GetRequiredFee(1000), payTxFee.GetFeePerK()) / 1000;
|
||||
else {
|
||||
dFeeVary = (double)std::max(CWallet::GetRequiredFee(1000), ::feeEstimator.estimateSmartFee(nTxConfirmTarget, NULL, ::mempool).GetFeePerK()) / 1000;
|
||||
dFeeVary = (double)std::max(CWallet::GetRequiredFee(1000), ::feeEstimator.estimateSmartFee(coinControl->nConfirmTarget, NULL, ::mempool).GetFeePerK()) / 1000;
|
||||
}
|
||||
QString toolTip4 = tr("Can vary +/- %1 duff(s) per input.").arg(dFeeVary);
|
||||
|
||||
@ -672,7 +672,7 @@ void CoinControlDialog::updateView()
|
||||
std::map<QString, std::vector<COutput> > mapCoins;
|
||||
model->listCoins(mapCoins);
|
||||
|
||||
BOOST_FOREACH(const PAIRTYPE(QString, std::vector<COutput>)& coins, mapCoins) {
|
||||
for (const std::pair<QString, std::vector<COutput>>& coins : mapCoins) {
|
||||
CCoinControlWidgetItem *itemWalletAddress = new CCoinControlWidgetItem();
|
||||
itemWalletAddress->setCheckState(COLUMN_CHECKBOX, Qt::Unchecked);
|
||||
QString sWalletAddress = coins.first;
|
||||
@ -699,7 +699,7 @@ void CoinControlDialog::updateView()
|
||||
|
||||
CAmount nSum = 0;
|
||||
int nChildren = 0;
|
||||
BOOST_FOREACH(const COutput& out, coins.second) {
|
||||
for (const COutput& out : coins.second) {
|
||||
nSum += out.tx->tx->vout[out.i].nValue;
|
||||
nChildren++;
|
||||
|
||||
|
@ -20,7 +20,6 @@ class PlatformStyle;
|
||||
class WalletModel;
|
||||
|
||||
class CCoinControl;
|
||||
class CTxMemPool;
|
||||
|
||||
namespace Ui {
|
||||
class CoinControlDialog;
|
||||
|
@ -217,7 +217,7 @@ bool Intro::pickDataDirectory()
|
||||
}
|
||||
dataDir = intro.getDataDirectory();
|
||||
try {
|
||||
TryCreateDirectory(GUIUtil::qstringToBoostPath(dataDir));
|
||||
TryCreateDirectories(GUIUtil::qstringToBoostPath(dataDir));
|
||||
break;
|
||||
} catch (const fs::filesystem_error&) {
|
||||
QMessageBox::critical(0, tr(PACKAGE_NAME),
|
||||
|
@ -107,7 +107,7 @@ OptionsDialog::OptionsDialog(QWidget *parent, bool enableWallet) :
|
||||
|
||||
ui->lang->setToolTip(ui->lang->toolTip().arg(tr(PACKAGE_NAME)));
|
||||
ui->lang->addItem(QString("(") + tr("default") + QString(")"), QVariant(""));
|
||||
Q_FOREACH(const QString &langStr, translations.entryList())
|
||||
for (const QString &langStr : translations.entryList())
|
||||
{
|
||||
QLocale locale(langStr);
|
||||
|
||||
|
@ -145,7 +145,7 @@ void PaymentServer::LoadRootCAs(X509_STORE* _store)
|
||||
int nRootCerts = 0;
|
||||
const QDateTime currentTime = QDateTime::currentDateTime();
|
||||
|
||||
Q_FOREACH (const QSslCertificate& cert, certList) {
|
||||
for (const QSslCertificate& cert : certList) {
|
||||
// Don't log NULL certificates
|
||||
if (cert.isNull())
|
||||
continue;
|
||||
@ -268,7 +268,7 @@ void PaymentServer::ipcParseCommandLine(int argc, char* argv[])
|
||||
bool PaymentServer::ipcSendCommandLine()
|
||||
{
|
||||
bool fResult = false;
|
||||
Q_FOREACH (const QString& r, savedPaymentRequests)
|
||||
for (const QString& r : savedPaymentRequests)
|
||||
{
|
||||
QLocalSocket* socket = new QLocalSocket();
|
||||
socket->connectToServer(ipcServerName(), QIODevice::WriteOnly);
|
||||
@ -393,7 +393,7 @@ void PaymentServer::uiReady()
|
||||
initNetManager();
|
||||
|
||||
saveURIs = false;
|
||||
Q_FOREACH (const QString& s, savedPaymentRequests)
|
||||
for (const QString& s : savedPaymentRequests)
|
||||
{
|
||||
handleURIOrFile(s);
|
||||
}
|
||||
@ -556,7 +556,7 @@ bool PaymentServer::processPaymentRequest(const PaymentRequestPlus& request, Sen
|
||||
QList<std::pair<CScript, CAmount> > sendingTos = request.getPayTo();
|
||||
QStringList addresses;
|
||||
|
||||
Q_FOREACH(const PAIRTYPE(CScript, CAmount)& sendingTo, sendingTos) {
|
||||
for (const std::pair<CScript, CAmount>& sendingTo : sendingTos) {
|
||||
// Extract and check destination addresses
|
||||
CTxDestination dest;
|
||||
if (ExtractDestination(sendingTo.first, dest)) {
|
||||
@ -743,7 +743,7 @@ void PaymentServer::reportSslErrors(QNetworkReply* reply, const QList<QSslError>
|
||||
Q_UNUSED(reply);
|
||||
|
||||
QString errString;
|
||||
Q_FOREACH (const QSslError& err, errs) {
|
||||
for (const QSslError& err : errs) {
|
||||
qWarning() << "PaymentServer::reportSslErrors: " << err;
|
||||
errString += err.errorString() + "\n";
|
||||
}
|
||||
|
@ -62,7 +62,7 @@ public:
|
||||
#if QT_VERSION >= 0x040700
|
||||
cachedNodeStats.reserve(vstats.size());
|
||||
#endif
|
||||
Q_FOREACH (const CNodeStats& nodestats, vstats)
|
||||
for (const CNodeStats& nodestats : vstats)
|
||||
{
|
||||
CNodeCombinedStats stats;
|
||||
stats.nodeStateStats.nMisbehavior = 0;
|
||||
@ -79,7 +79,7 @@ public:
|
||||
TRY_LOCK(cs_main, lockMain);
|
||||
if (lockMain)
|
||||
{
|
||||
BOOST_FOREACH(CNodeCombinedStats &stats, cachedNodeStats)
|
||||
for (CNodeCombinedStats &stats : cachedNodeStats)
|
||||
stats.fNodeStateStatsAvailable = GetNodeStateStats(stats.nodeStats.nodeid, stats.nodeStateStats);
|
||||
}
|
||||
}
|
||||
@ -91,7 +91,7 @@ public:
|
||||
// build index map
|
||||
mapNodeRows.clear();
|
||||
int row = 0;
|
||||
Q_FOREACH (const CNodeCombinedStats& stats, cachedNodeStats)
|
||||
for (const CNodeCombinedStats& stats : cachedNodeStats)
|
||||
mapNodeRows.insert(std::pair<NodeId, int>(stats.nodeStats.nodeid, row++));
|
||||
}
|
||||
|
||||
|
@ -48,8 +48,7 @@ void MakeSingleColorImage(QImage& img, const QColor& colorbase)
|
||||
QIcon ColorizeIcon(const QIcon& ico, const QColor& colorbase)
|
||||
{
|
||||
QIcon new_ico;
|
||||
QSize sz;
|
||||
Q_FOREACH(sz, ico.availableSizes())
|
||||
for (const QSize sz : ico.availableSizes())
|
||||
{
|
||||
QImage img(ico.pixmap(sz).toImage());
|
||||
MakeSingleColorImage(img, colorbase);
|
||||
|
@ -192,7 +192,7 @@ void ReceiveCoinsDialog::on_showRequestButton_clicked()
|
||||
return;
|
||||
QModelIndexList selection = ui->recentRequestsView->selectionModel()->selectedRows();
|
||||
|
||||
Q_FOREACH (const QModelIndex& index, selection) {
|
||||
for (const QModelIndex& index : selection) {
|
||||
on_recentRequestsView_doubleClicked(index);
|
||||
}
|
||||
}
|
||||
|
@ -15,7 +15,6 @@
|
||||
#include <QPoint>
|
||||
#include <QVariant>
|
||||
|
||||
class OptionsModel;
|
||||
class PlatformStyle;
|
||||
class WalletModel;
|
||||
|
||||
|
@ -22,7 +22,7 @@ RecentRequestsTableModel::RecentRequestsTableModel(CWallet *wallet, WalletModel
|
||||
// Load entries from wallet
|
||||
std::vector<std::string> vReceiveRequests;
|
||||
parent->loadReceiveRequests(vReceiveRequests);
|
||||
BOOST_FOREACH(const std::string& request, vReceiveRequests)
|
||||
for (const std::string& request : vReceiveRequests)
|
||||
addNewRequest(request);
|
||||
|
||||
/* These columns must match the indices in the ColumnIndex enumeration */
|
||||
|
@ -329,7 +329,7 @@ void SendCoinsDialog::send(QList<SendCoinsRecipient> recipients, QString strFee,
|
||||
|
||||
// Format confirmation message
|
||||
QStringList formatted;
|
||||
Q_FOREACH(const SendCoinsRecipient &rcp, currentTransaction.getRecipients())
|
||||
for (const SendCoinsRecipient &rcp : currentTransaction.getRecipients())
|
||||
{
|
||||
// generate bold amount string
|
||||
QString amount = "<b>" + BitcoinUnits::formatHtmlWithUnit(model->getOptionsModel()->getDisplayUnit(), rcp.amount);
|
||||
@ -386,7 +386,7 @@ void SendCoinsDialog::send(QList<SendCoinsRecipient> recipients, QString strFee,
|
||||
questionString.append("<hr />");
|
||||
CAmount totalAmount = currentTransaction.getTotalTransactionAmount() + txFee;
|
||||
QStringList alternativeUnits;
|
||||
Q_FOREACH(BitcoinUnits::Unit u, BitcoinUnits::availableUnits())
|
||||
for (BitcoinUnits::Unit u : BitcoinUnits::availableUnits())
|
||||
{
|
||||
if(u != model->getOptionsModel()->getDisplayUnit())
|
||||
alternativeUnits.append(BitcoinUnits::formatHtmlWithUnit(u, totalAmount));
|
||||
@ -912,6 +912,12 @@ void SendCoinsDialog::coinControlUpdateLabels()
|
||||
// set pay amounts
|
||||
CoinControlDialog::payAmounts.clear();
|
||||
CoinControlDialog::fSubtractFeeFromAmount = false;
|
||||
if (ui->radioSmartFee->isChecked()) {
|
||||
CoinControlDialog::coinControl->nConfirmTarget = ui->sliderSmartFee->maximum() - ui->sliderSmartFee->value() + 2;
|
||||
} else {
|
||||
CoinControlDialog::coinControl->nConfirmTarget = model->getDefaultConfirmTarget();
|
||||
}
|
||||
|
||||
for(int i = 0; i < ui->entries->count(); ++i)
|
||||
{
|
||||
SendCoinsEntry *entry = qobject_cast<SendCoinsEntry*>(ui->entries->itemAt(i)->widget());
|
||||
|
@ -15,7 +15,6 @@
|
||||
static const int MAX_SEND_POPUP_ENTRIES = 10;
|
||||
|
||||
class ClientModel;
|
||||
class OptionsModel;
|
||||
class PlatformStyle;
|
||||
class SendCoinsEntry;
|
||||
class SendCoinsRecipient;
|
||||
|
@ -192,7 +192,7 @@ void SplashScreen::unsubscribeFromCoreSignals()
|
||||
uiInterface.InitMessage.disconnect(boost::bind(InitMessage, this, _1));
|
||||
uiInterface.ShowProgress.disconnect(boost::bind(ShowProgress, this, _1, _2));
|
||||
#ifdef ENABLE_WALLET
|
||||
Q_FOREACH(CWallet* const & pwallet, connectedWallets) {
|
||||
for (CWallet* const & pwallet : connectedWallets) {
|
||||
pwallet->ShowProgress.disconnect(boost::bind(ShowProgress, this, _1, _2));
|
||||
}
|
||||
#endif
|
||||
|
@ -196,7 +196,7 @@ void PaymentServerTests::paymentServerTests()
|
||||
QVERIFY(r.paymentRequest.IsInitialized());
|
||||
// Extract address and amount from the request
|
||||
QList<std::pair<CScript, CAmount> > sendingTos = r.paymentRequest.getPayTo();
|
||||
Q_FOREACH (const PAIRTYPE(CScript, CAmount)& sendingTo, sendingTos) {
|
||||
for (const std::pair<CScript, CAmount>& sendingTo : sendingTos) {
|
||||
CTxDestination dest;
|
||||
if (ExtractDestination(sendingTo.first, dest))
|
||||
QCOMPARE(PaymentServer::verifyAmount(sendingTo.second), false);
|
||||
|
@ -155,7 +155,7 @@ void TrafficGraphWidget::updateRates()
|
||||
|
||||
if (updated){
|
||||
float tmax = DEFAULT_SAMPLE_HEIGHT;
|
||||
Q_FOREACH(const TrafficSample& sample, trafficGraphData.getCurrentRangeQueueWithAverageBandwidth()) {
|
||||
for (const TrafficSample& sample : trafficGraphData.getCurrentRangeQueueWithAverageBandwidth()) {
|
||||
if(sample.in > tmax) tmax = sample.in;
|
||||
if(sample.out > tmax) tmax = sample.out;
|
||||
}
|
||||
|
@ -149,7 +149,7 @@ QString TransactionDesc::toHTML(CWallet *wallet, CWalletTx &wtx, TransactionReco
|
||||
// Coinbase
|
||||
//
|
||||
CAmount nUnmatured = 0;
|
||||
BOOST_FOREACH(const CTxOut& txout, wtx.tx->vout)
|
||||
for (const CTxOut& txout : wtx.tx->vout)
|
||||
nUnmatured += wallet->GetCredit(txout, ISMINE_ALL);
|
||||
strHTML += "<b>" + tr("Credit") + ":</b> ";
|
||||
if (wtx.IsInMainChain())
|
||||
@ -168,14 +168,14 @@ QString TransactionDesc::toHTML(CWallet *wallet, CWalletTx &wtx, TransactionReco
|
||||
else
|
||||
{
|
||||
isminetype fAllFromMe = ISMINE_SPENDABLE;
|
||||
BOOST_FOREACH(const CTxIn& txin, wtx.tx->vin)
|
||||
for (const CTxIn& txin : wtx.tx->vin)
|
||||
{
|
||||
isminetype mine = wallet->IsMine(txin);
|
||||
if(fAllFromMe > mine) fAllFromMe = mine;
|
||||
}
|
||||
|
||||
isminetype fAllToMe = ISMINE_SPENDABLE;
|
||||
BOOST_FOREACH(const CTxOut& txout, wtx.tx->vout)
|
||||
for (const CTxOut& txout : wtx.tx->vout)
|
||||
{
|
||||
isminetype mine = wallet->IsMine(txout);
|
||||
if(fAllToMe > mine) fAllToMe = mine;
|
||||
@ -189,7 +189,7 @@ QString TransactionDesc::toHTML(CWallet *wallet, CWalletTx &wtx, TransactionReco
|
||||
//
|
||||
// Debit
|
||||
//
|
||||
BOOST_FOREACH(const CTxOut& txout, wtx.tx->vout)
|
||||
for (const CTxOut& txout : wtx.tx->vout)
|
||||
{
|
||||
// Ignore change
|
||||
isminetype toSelf = wallet->IsMine(txout);
|
||||
@ -237,10 +237,10 @@ QString TransactionDesc::toHTML(CWallet *wallet, CWalletTx &wtx, TransactionReco
|
||||
//
|
||||
// Mixed debit transaction
|
||||
//
|
||||
BOOST_FOREACH(const CTxIn& txin, wtx.tx->vin)
|
||||
for (const CTxIn& txin : wtx.tx->vin)
|
||||
if (wallet->IsMine(txin))
|
||||
strHTML += "<b>" + tr("Debit") + ":</b> " + BitcoinUnits::formatHtmlWithUnit(unit, -wallet->GetDebit(txin, ISMINE_ALL)) + "<br>";
|
||||
BOOST_FOREACH(const CTxOut& txout, wtx.tx->vout)
|
||||
for (const CTxOut& txout : wtx.tx->vout)
|
||||
if (wallet->IsMine(txout))
|
||||
strHTML += "<b>" + tr("Credit") + ":</b> " + BitcoinUnits::formatHtmlWithUnit(unit, wallet->GetCredit(txout, ISMINE_ALL)) + "<br>";
|
||||
}
|
||||
@ -261,14 +261,14 @@ QString TransactionDesc::toHTML(CWallet *wallet, CWalletTx &wtx, TransactionReco
|
||||
strHTML += "<b>" + tr("Transaction total size") + ":</b> " + QString::number(wtx.tx->GetTotalSize()) + " bytes<br>";
|
||||
|
||||
// Message from normal dash:URI (dash:XyZ...?message=example)
|
||||
Q_FOREACH (const PAIRTYPE(std::string, std::string)& r, wtx.vOrderForm)
|
||||
for (const std::pair<std::string, std::string>& r : wtx.vOrderForm)
|
||||
if (r.first == "Message")
|
||||
strHTML += "<br><b>" + tr("Message") + ":</b><br>" + GUIUtil::HtmlEscape(r.second, true) + "<br>";
|
||||
|
||||
//
|
||||
// PaymentRequest info:
|
||||
//
|
||||
Q_FOREACH (const PAIRTYPE(std::string, std::string)& r, wtx.vOrderForm)
|
||||
for (const std::pair<std::string, std::string>& r : wtx.vOrderForm)
|
||||
{
|
||||
if (r.first == "PaymentRequest")
|
||||
{
|
||||
@ -292,10 +292,10 @@ QString TransactionDesc::toHTML(CWallet *wallet, CWalletTx &wtx, TransactionReco
|
||||
if (logCategories != BCLog::NONE)
|
||||
{
|
||||
strHTML += "<hr><br>" + tr("Debug information") + "<br><br>";
|
||||
BOOST_FOREACH(const CTxIn& txin, wtx.tx->vin)
|
||||
for (const CTxIn& txin : wtx.tx->vin)
|
||||
if(wallet->IsMine(txin))
|
||||
strHTML += "<b>" + tr("Debit") + ":</b> " + BitcoinUnits::formatHtmlWithUnit(unit, -wallet->GetDebit(txin, ISMINE_ALL)) + "<br>";
|
||||
BOOST_FOREACH(const CTxOut& txout, wtx.tx->vout)
|
||||
for (const CTxOut& txout : wtx.tx->vout)
|
||||
if(wallet->IsMine(txout))
|
||||
strHTML += "<b>" + tr("Credit") + ":</b> " + BitcoinUnits::formatHtmlWithUnit(unit, wallet->GetCredit(txout, ISMINE_ALL)) + "<br>";
|
||||
|
||||
@ -305,7 +305,7 @@ QString TransactionDesc::toHTML(CWallet *wallet, CWalletTx &wtx, TransactionReco
|
||||
strHTML += "<br><b>" + tr("Inputs") + ":</b>";
|
||||
strHTML += "<ul>";
|
||||
|
||||
BOOST_FOREACH(const CTxIn& txin, wtx.tx->vin)
|
||||
for (const CTxIn& txin : wtx.tx->vin)
|
||||
{
|
||||
COutPoint prevout = txin.prevout;
|
||||
|
||||
|
@ -83,7 +83,7 @@ QList<TransactionRecord> TransactionRecord::decomposeTransaction(const CWallet *
|
||||
int nFromMe = 0;
|
||||
bool involvesWatchAddress = false;
|
||||
isminetype fAllFromMe = ISMINE_SPENDABLE;
|
||||
BOOST_FOREACH(const CTxIn& txin, wtx.tx->vin)
|
||||
for (const CTxIn& txin : wtx.tx->vin)
|
||||
{
|
||||
if(wallet->IsMine(txin)) {
|
||||
fAllFromMeDenom = fAllFromMeDenom && wallet->IsDenominated(txin.prevout);
|
||||
@ -97,7 +97,7 @@ QList<TransactionRecord> TransactionRecord::decomposeTransaction(const CWallet *
|
||||
isminetype fAllToMe = ISMINE_SPENDABLE;
|
||||
bool fAllToMeDenom = true;
|
||||
int nToMe = 0;
|
||||
BOOST_FOREACH(const CTxOut& txout, wtx.tx->vout) {
|
||||
for (const CTxOut& txout : wtx.tx->vout) {
|
||||
if(wallet->IsMine(txout)) {
|
||||
fAllToMeDenom = fAllToMeDenom && CPrivateSend::IsDenominatedAmount(txout.nValue);
|
||||
nToMe++;
|
||||
|
@ -144,7 +144,7 @@ public:
|
||||
{
|
||||
parent->beginInsertRows(QModelIndex(), lowerIndex, lowerIndex+toInsert.size()-1);
|
||||
int insert_idx = lowerIndex;
|
||||
Q_FOREACH(const TransactionRecord &rec, toInsert)
|
||||
for (const TransactionRecord &rec : toInsert)
|
||||
{
|
||||
cachedWallet.insert(insert_idx, rec);
|
||||
insert_idx += 1;
|
||||
|
@ -574,7 +574,7 @@ void TransactionView::computeSum()
|
||||
return;
|
||||
QModelIndexList selection = transactionView->selectionModel()->selectedRows();
|
||||
|
||||
Q_FOREACH (QModelIndex index, selection){
|
||||
for (QModelIndex index : selection){
|
||||
amount += index.data(TransactionTableModel::AmountRole).toLongLong();
|
||||
}
|
||||
QString strAmount(BitcoinUnits::formatWithUnit(nDisplayUnit, amount, true, BitcoinUnits::separatorAlways));
|
||||
|
@ -108,7 +108,7 @@ HelpMessageDialog::HelpMessageDialog(QWidget *parent, HelpMode helpMode) :
|
||||
QTextCharFormat bold;
|
||||
bold.setFontWeight(QFont::Bold);
|
||||
|
||||
Q_FOREACH (const QString &line, coreOptions.split("\n")) {
|
||||
for (const QString &line : coreOptions.split("\n")) {
|
||||
if (line.startsWith(" -"))
|
||||
{
|
||||
cursor.currentTable()->appendRows(1);
|
||||
|
@ -9,7 +9,6 @@
|
||||
#include <QObject>
|
||||
|
||||
class BitcoinGUI;
|
||||
class ClientModel;
|
||||
|
||||
namespace Ui {
|
||||
class HelpMessageDialog;
|
||||
|
@ -79,7 +79,7 @@ CAmount WalletModel::getBalance(const CCoinControl *coinControl) const
|
||||
CAmount nBalance = 0;
|
||||
std::vector<COutput> vCoins;
|
||||
wallet->AvailableCoins(vCoins, true, coinControl);
|
||||
BOOST_FOREACH(const COutput& out, vCoins)
|
||||
for (const COutput& out : vCoins)
|
||||
if(out.fSpendable)
|
||||
nBalance += out.tx->tx->vout[out.i].nValue;
|
||||
|
||||
@ -256,7 +256,7 @@ WalletModel::SendCoinsReturn WalletModel::prepareTransaction(WalletModelTransact
|
||||
int nAddresses = 0;
|
||||
|
||||
// Pre-check input data for validity
|
||||
Q_FOREACH(const SendCoinsRecipient &rcp, recipients)
|
||||
for (const SendCoinsRecipient &rcp : recipients)
|
||||
{
|
||||
if (rcp.fSubtractFeeFromAmount)
|
||||
fSubtractFeeFromAmount = true;
|
||||
@ -367,7 +367,7 @@ WalletModel::SendCoinsReturn WalletModel::sendCoins(WalletModelTransaction &tran
|
||||
CWalletTx *newTx = transaction.getTransaction();
|
||||
QList<SendCoinsRecipient> recipients = transaction.getRecipients();
|
||||
|
||||
Q_FOREACH(const SendCoinsRecipient &rcp, recipients)
|
||||
for (const SendCoinsRecipient &rcp : recipients)
|
||||
{
|
||||
if (rcp.paymentRequest.IsInitialized())
|
||||
{
|
||||
@ -400,7 +400,7 @@ WalletModel::SendCoinsReturn WalletModel::sendCoins(WalletModelTransaction &tran
|
||||
|
||||
// Add addresses / update labels that we've sent to to the address book,
|
||||
// and emit coinsSent signal for each recipient
|
||||
Q_FOREACH(const SendCoinsRecipient &rcp, transaction.getRecipients())
|
||||
for (const SendCoinsRecipient &rcp : transaction.getRecipients())
|
||||
{
|
||||
// Don't touch the address book when we have a payment request
|
||||
if (!rcp.paymentRequest.IsInitialized())
|
||||
@ -681,7 +681,7 @@ bool WalletModel::getPrivKey(const CKeyID &address, CKey& vchPrivKeyOut) const
|
||||
void WalletModel::getOutputs(const std::vector<COutPoint>& vOutpoints, std::vector<COutput>& vOutputs)
|
||||
{
|
||||
LOCK2(cs_main, wallet->cs_wallet);
|
||||
BOOST_FOREACH(const COutPoint& outpoint, vOutpoints)
|
||||
for (const COutPoint& outpoint : vOutpoints)
|
||||
{
|
||||
if (!wallet->mapWallet.count(outpoint.hash)) continue;
|
||||
int nDepth = wallet->mapWallet[outpoint.hash].GetDepthInMainChain();
|
||||
@ -708,7 +708,7 @@ void WalletModel::listCoins(std::map<QString, std::vector<COutput> >& mapCoins)
|
||||
wallet->ListLockedCoins(vLockedCoins);
|
||||
|
||||
// add locked coins
|
||||
BOOST_FOREACH(const COutPoint& outpoint, vLockedCoins)
|
||||
for (const COutPoint& outpoint : vLockedCoins)
|
||||
{
|
||||
if (!wallet->mapWallet.count(outpoint.hash)) continue;
|
||||
int nDepth = wallet->mapWallet[outpoint.hash].GetDepthInMainChain();
|
||||
@ -718,7 +718,7 @@ void WalletModel::listCoins(std::map<QString, std::vector<COutput> >& mapCoins)
|
||||
vCoins.push_back(out);
|
||||
}
|
||||
|
||||
BOOST_FOREACH(const COutput& out, vCoins)
|
||||
for (const COutput& out : vCoins)
|
||||
{
|
||||
COutput cout = out;
|
||||
|
||||
@ -768,8 +768,8 @@ void WalletModel::listProTxCoins(std::vector<COutPoint>& vOutpts)
|
||||
void WalletModel::loadReceiveRequests(std::vector<std::string>& vReceiveRequests)
|
||||
{
|
||||
LOCK(wallet->cs_wallet);
|
||||
BOOST_FOREACH(const PAIRTYPE(CTxDestination, CAddressBookData)& item, wallet->mapAddressBook)
|
||||
BOOST_FOREACH(const PAIRTYPE(std::string, std::string)& item2, item.second.destdata)
|
||||
for (const std::pair<CTxDestination, CAddressBookData>& item : wallet->mapAddressBook)
|
||||
for (const std::pair<std::string, std::string>& item2 : item.second.destdata)
|
||||
if (item2.first.size() > 2 && item2.first.substr(0,2) == "rr") // receive request
|
||||
vReceiveRequests.push_back(item2.second);
|
||||
}
|
||||
|
@ -88,7 +88,7 @@ void WalletModelTransaction::reassignAmounts()
|
||||
CAmount WalletModelTransaction::getTotalTransactionAmount()
|
||||
{
|
||||
CAmount totalTransactionAmount = 0;
|
||||
Q_FOREACH(const SendCoinsRecipient &rcp, recipients)
|
||||
for (const SendCoinsRecipient &rcp : recipients)
|
||||
{
|
||||
totalTransactionAmount += rcp.amount;
|
||||
}
|
||||
|
@ -65,6 +65,70 @@ static inline int64_t GetPerformanceCounter()
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
#if defined(__x86_64__) || defined(__amd64__) || defined(__i386__)
|
||||
static std::atomic<bool> hwrand_initialized{false};
|
||||
static bool rdrand_supported = false;
|
||||
static constexpr uint32_t CPUID_F1_ECX_RDRAND = 0x40000000;
|
||||
static void RDRandInit()
|
||||
{
|
||||
uint32_t eax, ecx, edx;
|
||||
#if defined(__i386__) && ( defined(__PIC__) || defined(__PIE__))
|
||||
// Avoid clobbering ebx, as that is used for PIC on x86.
|
||||
uint32_t tmp;
|
||||
__asm__ ("mov %%ebx, %1; cpuid; mov %1, %%ebx": "=a"(eax), "=g"(tmp), "=c"(ecx), "=d"(edx) : "a"(1));
|
||||
#else
|
||||
uint32_t ebx;
|
||||
__asm__ ("cpuid": "=a"(eax), "=b"(ebx), "=c"(ecx), "=d"(edx) : "a"(1));
|
||||
#endif
|
||||
//! When calling cpuid function #1, ecx register will have this set if RDRAND is available.
|
||||
if (ecx & CPUID_F1_ECX_RDRAND) {
|
||||
LogPrintf("Using RdRand as entropy source\n");
|
||||
rdrand_supported = true;
|
||||
}
|
||||
hwrand_initialized.store(true);
|
||||
}
|
||||
#else
|
||||
static void RDRandInit() {}
|
||||
#endif
|
||||
|
||||
static bool GetHWRand(unsigned char* ent32) {
|
||||
#if defined(__x86_64__) || defined(__amd64__) || defined(__i386__)
|
||||
assert(hwrand_initialized.load(std::memory_order_relaxed));
|
||||
if (rdrand_supported) {
|
||||
uint8_t ok;
|
||||
// Not all assemblers support the rdrand instruction, write it in hex.
|
||||
#ifdef __i386__
|
||||
for (int iter = 0; iter < 4; ++iter) {
|
||||
uint32_t r1, r2;
|
||||
__asm__ volatile (".byte 0x0f, 0xc7, 0xf0;" // rdrand %eax
|
||||
".byte 0x0f, 0xc7, 0xf2;" // rdrand %edx
|
||||
"setc %2" :
|
||||
"=a"(r1), "=d"(r2), "=q"(ok) :: "cc");
|
||||
if (!ok) return false;
|
||||
WriteLE32(ent32 + 8 * iter, r1);
|
||||
WriteLE32(ent32 + 8 * iter + 4, r2);
|
||||
}
|
||||
#else
|
||||
uint64_t r1, r2, r3, r4;
|
||||
__asm__ volatile (".byte 0x48, 0x0f, 0xc7, 0xf0, " // rdrand %rax
|
||||
"0x48, 0x0f, 0xc7, 0xf3, " // rdrand %rbx
|
||||
"0x48, 0x0f, 0xc7, 0xf1, " // rdrand %rcx
|
||||
"0x48, 0x0f, 0xc7, 0xf2; " // rdrand %rdx
|
||||
"setc %4" :
|
||||
"=a"(r1), "=b"(r2), "=c"(r3), "=d"(r4), "=q"(ok) :: "cc");
|
||||
if (!ok) return false;
|
||||
WriteLE64(ent32, r1);
|
||||
WriteLE64(ent32 + 8, r2);
|
||||
WriteLE64(ent32 + 16, r3);
|
||||
WriteLE64(ent32 + 24, r4);
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
|
||||
void RandAddSeed()
|
||||
{
|
||||
// Seed with CPU performance counter
|
||||
@ -255,6 +319,11 @@ void GetStrongRandBytes(unsigned char* out, int num)
|
||||
GetOSRand(buf);
|
||||
hasher.Write(buf, 32);
|
||||
|
||||
// Third source: HW RNG, if available.
|
||||
if (GetHWRand(buf)) {
|
||||
hasher.Write(buf, 32);
|
||||
}
|
||||
|
||||
// Combine with and update state
|
||||
{
|
||||
std::unique_lock<std::mutex> lock(cs_rng_state);
|
||||
@ -392,3 +461,8 @@ FastRandomContext::FastRandomContext(bool fDeterministic) : requires_seed(!fDete
|
||||
uint256 seed;
|
||||
rng.SetKey(seed.begin(), 32);
|
||||
}
|
||||
|
||||
void RandomInit()
|
||||
{
|
||||
RDRandInit();
|
||||
}
|
||||
|
@ -150,4 +150,7 @@ void GetOSRand(unsigned char *ent32);
|
||||
*/
|
||||
bool Random_SanityCheck();
|
||||
|
||||
/** Initialize the RNG. */
|
||||
void RandomInit();
|
||||
|
||||
#endif // BITCOIN_RANDOM_H
|
||||
|
@ -158,7 +158,7 @@ static bool rest_headers(HTTPRequest* req,
|
||||
}
|
||||
|
||||
CDataStream ssHeader(SER_NETWORK, PROTOCOL_VERSION);
|
||||
BOOST_FOREACH(const CBlockIndex *pindex, headers) {
|
||||
for (const CBlockIndex *pindex : headers) {
|
||||
ssHeader << pindex->GetBlockHeader();
|
||||
}
|
||||
|
||||
@ -178,7 +178,7 @@ static bool rest_headers(HTTPRequest* req,
|
||||
}
|
||||
case RF_JSON: {
|
||||
UniValue jsonHeaders(UniValue::VARR);
|
||||
BOOST_FOREACH(const CBlockIndex *pindex, headers) {
|
||||
for (const CBlockIndex *pindex : headers) {
|
||||
jsonHeaders.push_back(blockheaderToJSON(pindex));
|
||||
}
|
||||
std::string strJSON = jsonHeaders.write() + "\n";
|
||||
@ -558,7 +558,7 @@ static bool rest_getutxos(HTTPRequest* req, const std::string& strURIPart)
|
||||
objGetUTXOResponse.push_back(Pair("bitmap", bitmapStringRepresentation));
|
||||
|
||||
UniValue utxos(UniValue::VARR);
|
||||
BOOST_FOREACH (const CCoin& coin, outs) {
|
||||
for (const CCoin& coin : outs) {
|
||||
UniValue utxo(UniValue::VOBJ);
|
||||
utxo.push_back(Pair("height", (int32_t)coin.nHeight));
|
||||
utxo.push_back(Pair("value", ValueFromAmount(coin.out.nValue)));
|
||||
|
@ -387,14 +387,14 @@ void entryToJSON(UniValue &info, const CTxMemPoolEntry &e)
|
||||
info.push_back(Pair("ancestorfees", e.GetModFeesWithAncestors()));
|
||||
const CTransaction& tx = e.GetTx();
|
||||
std::set<std::string> setDepends;
|
||||
BOOST_FOREACH(const CTxIn& txin, tx.vin)
|
||||
for (const CTxIn& txin : tx.vin)
|
||||
{
|
||||
if (mempool.exists(txin.prevout.hash))
|
||||
setDepends.insert(txin.prevout.hash.ToString());
|
||||
}
|
||||
|
||||
UniValue depends(UniValue::VARR);
|
||||
BOOST_FOREACH(const std::string& dep, setDepends)
|
||||
for (const std::string& dep : setDepends)
|
||||
{
|
||||
depends.push_back(dep);
|
||||
}
|
||||
@ -409,7 +409,7 @@ UniValue mempoolToJSON(bool fVerbose)
|
||||
{
|
||||
LOCK(mempool.cs);
|
||||
UniValue o(UniValue::VOBJ);
|
||||
BOOST_FOREACH(const CTxMemPoolEntry& e, mempool.mapTx)
|
||||
for (const CTxMemPoolEntry& e : mempool.mapTx)
|
||||
{
|
||||
const uint256& hash = e.GetTx().GetHash();
|
||||
UniValue info(UniValue::VOBJ);
|
||||
@ -424,7 +424,7 @@ UniValue mempoolToJSON(bool fVerbose)
|
||||
mempool.queryHashes(vtxid);
|
||||
|
||||
UniValue a(UniValue::VARR);
|
||||
BOOST_FOREACH(const uint256& hash, vtxid)
|
||||
for (const uint256& hash : vtxid)
|
||||
a.push_back(hash.ToString());
|
||||
|
||||
return a;
|
||||
@ -509,14 +509,14 @@ UniValue getmempoolancestors(const JSONRPCRequest& request)
|
||||
|
||||
if (!fVerbose) {
|
||||
UniValue o(UniValue::VARR);
|
||||
BOOST_FOREACH(CTxMemPool::txiter ancestorIt, setAncestors) {
|
||||
for (CTxMemPool::txiter ancestorIt : setAncestors) {
|
||||
o.push_back(ancestorIt->GetTx().GetHash().ToString());
|
||||
}
|
||||
|
||||
return o;
|
||||
} else {
|
||||
UniValue o(UniValue::VOBJ);
|
||||
BOOST_FOREACH(CTxMemPool::txiter ancestorIt, setAncestors) {
|
||||
for (CTxMemPool::txiter ancestorIt : setAncestors) {
|
||||
const CTxMemPoolEntry &e = *ancestorIt;
|
||||
const uint256& _hash = e.GetTx().GetHash();
|
||||
UniValue info(UniValue::VOBJ);
|
||||
@ -573,14 +573,14 @@ UniValue getmempooldescendants(const JSONRPCRequest& request)
|
||||
|
||||
if (!fVerbose) {
|
||||
UniValue o(UniValue::VARR);
|
||||
BOOST_FOREACH(CTxMemPool::txiter descendantIt, setDescendants) {
|
||||
for (CTxMemPool::txiter descendantIt : setDescendants) {
|
||||
o.push_back(descendantIt->GetTx().GetHash().ToString());
|
||||
}
|
||||
|
||||
return o;
|
||||
} else {
|
||||
UniValue o(UniValue::VOBJ);
|
||||
BOOST_FOREACH(CTxMemPool::txiter descendantIt, setDescendants) {
|
||||
for (CTxMemPool::txiter descendantIt : setDescendants) {
|
||||
const CTxMemPoolEntry &e = *descendantIt;
|
||||
const uint256& _hash = e.GetTx().GetHash();
|
||||
UniValue info(UniValue::VOBJ);
|
||||
@ -1512,7 +1512,7 @@ UniValue getchaintips(const JSONRPCRequest& request)
|
||||
std::set<const CBlockIndex*> setOrphans;
|
||||
std::set<const CBlockIndex*> setPrevs;
|
||||
|
||||
BOOST_FOREACH(const PAIRTYPE(const uint256, CBlockIndex*)& item, mapBlockIndex)
|
||||
for (const std::pair<const uint256, CBlockIndex*>& item : mapBlockIndex)
|
||||
{
|
||||
if (!chainActive.Contains(item.second)) {
|
||||
setOrphans.insert(item.second);
|
||||
@ -1541,7 +1541,7 @@ UniValue getchaintips(const JSONRPCRequest& request)
|
||||
|
||||
/* Construct the output array. */
|
||||
UniValue res(UniValue::VARR);
|
||||
BOOST_FOREACH(const CBlockIndex* block, setTips)
|
||||
for (const CBlockIndex* block : setTips)
|
||||
{
|
||||
const CBlockIndex* pindexFork = chainActive.FindFork(block);
|
||||
const int branchLen = block->nHeight - pindexFork->nHeight;
|
||||
|
@ -7,9 +7,6 @@
|
||||
|
||||
class CBlock;
|
||||
class CBlockIndex;
|
||||
class CScript;
|
||||
class CTransaction;
|
||||
class uint256;
|
||||
class UniValue;
|
||||
|
||||
/**
|
||||
|
@ -601,7 +601,7 @@ UniValue getblocktemplate(const JSONRPCRequest& request)
|
||||
entry.push_back(Pair("hash", txHash.GetHex()));
|
||||
|
||||
UniValue deps(UniValue::VARR);
|
||||
BOOST_FOREACH (const CTxIn &in, tx.vin)
|
||||
for (const CTxIn &in : tx.vin)
|
||||
{
|
||||
if (setTxIndex.count(in.prevout.hash))
|
||||
deps.push_back(setTxIndex[in.prevout.hash]);
|
||||
|
@ -232,7 +232,7 @@ public:
|
||||
obj.push_back(Pair("script", GetTxnOutputType(whichType)));
|
||||
obj.push_back(Pair("hex", HexStr(subscript.begin(), subscript.end())));
|
||||
UniValue a(UniValue::VARR);
|
||||
BOOST_FOREACH(const CTxDestination& addr, addresses)
|
||||
for (const CTxDestination& addr : addresses)
|
||||
a.push_back(CBitcoinAddress(addr).ToString());
|
||||
obj.push_back(Pair("addresses", a));
|
||||
if (whichType == TX_MULTISIG)
|
||||
@ -371,8 +371,8 @@ UniValue validateaddress(const JSONRPCRequest& request)
|
||||
|
||||
#ifdef ENABLE_WALLET
|
||||
isminetype mine = pwallet ? IsMine(*pwallet, dest) : ISMINE_NO;
|
||||
ret.push_back(Pair("ismine", (mine & ISMINE_SPENDABLE) ? true : false));
|
||||
ret.push_back(Pair("iswatchonly", (mine & ISMINE_WATCH_ONLY) ? true: false));
|
||||
ret.push_back(Pair("ismine", bool(mine & ISMINE_SPENDABLE)));
|
||||
ret.push_back(Pair("iswatchonly", bool(mine & ISMINE_WATCH_ONLY)));
|
||||
UniValue detail = boost::apply_visitor(DescribeAddressVisitor(pwallet), dest);
|
||||
ret.pushKVs(detail);
|
||||
if (pwallet && pwallet->mapAddressBook.count(dest)) {
|
||||
|
@ -127,7 +127,7 @@ UniValue getpeerinfo(const JSONRPCRequest& request)
|
||||
|
||||
UniValue ret(UniValue::VARR);
|
||||
|
||||
BOOST_FOREACH(const CNodeStats& stats, vstats) {
|
||||
for (const CNodeStats& stats : vstats) {
|
||||
UniValue obj(UniValue::VOBJ);
|
||||
CNodeStateStats statestats;
|
||||
bool fStateStats = GetNodeStateStats(stats.nodeid, statestats);
|
||||
@ -164,7 +164,7 @@ UniValue getpeerinfo(const JSONRPCRequest& request)
|
||||
obj.push_back(Pair("synced_headers", statestats.nSyncHeight));
|
||||
obj.push_back(Pair("synced_blocks", statestats.nCommonHeight));
|
||||
UniValue heights(UniValue::VARR);
|
||||
BOOST_FOREACH(int height, statestats.vHeightInFlight) {
|
||||
for (int height : statestats.vHeightInFlight) {
|
||||
heights.push_back(height);
|
||||
}
|
||||
obj.push_back(Pair("inflight", heights));
|
||||
@ -172,14 +172,14 @@ UniValue getpeerinfo(const JSONRPCRequest& request)
|
||||
obj.push_back(Pair("whitelisted", stats.fWhitelisted));
|
||||
|
||||
UniValue sendPerMsgCmd(UniValue::VOBJ);
|
||||
BOOST_FOREACH(const mapMsgCmdSize::value_type &i, stats.mapSendBytesPerMsgCmd) {
|
||||
for (const mapMsgCmdSize::value_type &i : stats.mapSendBytesPerMsgCmd) {
|
||||
if (i.second > 0)
|
||||
sendPerMsgCmd.push_back(Pair(i.first, i.second));
|
||||
}
|
||||
obj.push_back(Pair("bytessent_per_msg", sendPerMsgCmd));
|
||||
|
||||
UniValue recvPerMsgCmd(UniValue::VOBJ);
|
||||
BOOST_FOREACH(const mapMsgCmdSize::value_type &i, stats.mapRecvBytesPerMsgCmd) {
|
||||
for (const mapMsgCmdSize::value_type &i : stats.mapRecvBytesPerMsgCmd) {
|
||||
if (i.second > 0)
|
||||
recvPerMsgCmd.push_back(Pair(i.first, i.second));
|
||||
}
|
||||
@ -475,7 +475,7 @@ UniValue getnetworkinfo(const JSONRPCRequest& request)
|
||||
UniValue localAddresses(UniValue::VARR);
|
||||
{
|
||||
LOCK(cs_mapLocalHost);
|
||||
BOOST_FOREACH(const PAIRTYPE(CNetAddr, LocalServiceInfo) &item, mapLocalHost)
|
||||
for (const std::pair<CNetAddr, LocalServiceInfo> &item : mapLocalHost)
|
||||
{
|
||||
UniValue rec(UniValue::VOBJ);
|
||||
rec.push_back(Pair("address", item.first.ToString()));
|
||||
|
@ -265,9 +265,13 @@ UniValue gettxoutproof(const JSONRPCRequest& request)
|
||||
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found");
|
||||
pblockindex = mapBlockIndex[hashBlock];
|
||||
} else {
|
||||
const Coin& coin = AccessByTxid(*pcoinsTip, oneTxid);
|
||||
if (!coin.IsSpent() && coin.nHeight > 0 && coin.nHeight <= chainActive.Height()) {
|
||||
pblockindex = chainActive[coin.nHeight];
|
||||
// Loop through txids and try to find which block they're in. Exit loop once a block is found.
|
||||
for (const auto& tx : setTxids) {
|
||||
const Coin& coin = AccessByTxid(*pcoinsTip, tx);
|
||||
if (!coin.IsSpent()) {
|
||||
pblockindex = chainActive[coin.nHeight];
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -290,7 +294,7 @@ UniValue gettxoutproof(const JSONRPCRequest& request)
|
||||
if (setTxids.count(tx->GetHash()))
|
||||
ntxFound++;
|
||||
if (ntxFound != setTxids.size())
|
||||
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "(Not all) transactions not found in specified block");
|
||||
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Not all transactions found in specified or retrieved block");
|
||||
|
||||
CDataStream ssMB(SER_NETWORK, PROTOCOL_VERSION);
|
||||
CMerkleBlock mb(block, setTxids);
|
||||
@ -332,7 +336,7 @@ UniValue verifytxoutproof(const JSONRPCRequest& request)
|
||||
if (!mapBlockIndex.count(merkleBlock.header.GetHash()) || !chainActive.Contains(mapBlockIndex[merkleBlock.header.GetHash()]))
|
||||
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found in chain");
|
||||
|
||||
BOOST_FOREACH(const uint256& hash, vMatch)
|
||||
for (const uint256& hash : vMatch)
|
||||
res.push_back(hash.GetHex());
|
||||
return res;
|
||||
}
|
||||
@ -423,7 +427,7 @@ UniValue createrawtransaction(const JSONRPCRequest& request)
|
||||
|
||||
std::set<CBitcoinAddress> setAddress;
|
||||
std::vector<std::string> addrList = sendTo.getKeys();
|
||||
BOOST_FOREACH(const std::string& name_, addrList) {
|
||||
for (const std::string& name_ : addrList) {
|
||||
|
||||
if (name_ == "data") {
|
||||
std::vector<unsigned char> data = ParseHexV(sendTo[name_].getValStr(),"Data");
|
||||
@ -682,7 +686,7 @@ UniValue signrawtransaction(const JSONRPCRequest& request)
|
||||
CCoinsViewMemPool viewMempool(&viewChain, mempool);
|
||||
view.SetBackend(viewMempool); // temporarily switch cache backend to db+mempool view
|
||||
|
||||
BOOST_FOREACH(const CTxIn& txin, mergedTx.vin) {
|
||||
for (const CTxIn& txin : mergedTx.vin) {
|
||||
view.AccessCoin(txin.prevout); // Load entries from viewChain into view; can fail.
|
||||
}
|
||||
|
||||
@ -821,7 +825,7 @@ UniValue signrawtransaction(const JSONRPCRequest& request)
|
||||
SignSignature(keystore, prevPubKey, mergedTx, i, nHashType);
|
||||
|
||||
// ... and merge in other signatures:
|
||||
BOOST_FOREACH(const CMutableTransaction& txv, txVariants) {
|
||||
for (const CMutableTransaction& txv : txVariants) {
|
||||
if (txv.vin.size() > i) {
|
||||
txin.scriptSig = CombineSignatures(prevPubKey, txConst, i, txin.scriptSig, txv.vin[i].scriptSig);
|
||||
}
|
||||
|
@ -64,7 +64,7 @@ void RPCTypeCheck(const UniValue& params,
|
||||
bool fAllowNull)
|
||||
{
|
||||
unsigned int i = 0;
|
||||
BOOST_FOREACH(UniValue::VType t, typesExpected)
|
||||
for (UniValue::VType t : typesExpected)
|
||||
{
|
||||
if (params.size() <= i)
|
||||
break;
|
||||
@ -103,7 +103,7 @@ void RPCTypeCheckObj(const UniValue& o,
|
||||
|
||||
if (fStrict)
|
||||
{
|
||||
BOOST_FOREACH(const std::string& k, o.getKeys())
|
||||
for (const std::string& k : o.getKeys())
|
||||
{
|
||||
if (typesExpected.count(k) == 0)
|
||||
{
|
||||
@ -233,7 +233,7 @@ std::string CRPCTable::help(const std::string& strCommand, const std::string& st
|
||||
jreq.fHelp = true;
|
||||
jreq.params = UniValue();
|
||||
|
||||
BOOST_FOREACH(const PAIRTYPE(std::string, const CRPCCommand*)& command, vCommands)
|
||||
for (const std::pair<std::string, const CRPCCommand*>& command : vCommands)
|
||||
{
|
||||
const CRPCCommand *pcmd = command.second;
|
||||
std::string strMethod = pcmd->name;
|
||||
|
@ -26,9 +26,6 @@ namespace RPCServer
|
||||
void OnPreCommand(std::function<void (const CRPCCommand&)> slot);
|
||||
}
|
||||
|
||||
class CBlockIndex;
|
||||
class CNetAddr;
|
||||
|
||||
/** Wrapper for UniValue::VType, which includes typeAny:
|
||||
* Used to denote don't care type. Only used by RPCTypeCheckObj */
|
||||
struct UniValueType {
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user