Merge #8753: Locked memory manager
444c673 bench: Add benchmark for lockedpool allocation/deallocation (Wladimir J. van der Laan) 6567999 rpc: Add `getmemoryinfo` call (Wladimir J. van der Laan) 4536148 support: Add LockedPool (Wladimir J. van der Laan) f4d1fc2 wallet: Get rid of LockObject and UnlockObject calls in key.h (Wladimir J. van der Laan) 999e4c9 wallet: Change CCrypter to use vectors with secure allocator (Wladimir J. van der Laan)
This commit is contained in:
parent
88f9dc2f16
commit
bc3b9294e8
@ -164,7 +164,7 @@ BITCOIN_CORE_H = \
|
|||||||
support/allocators/secure.h \
|
support/allocators/secure.h \
|
||||||
support/allocators/zeroafterfree.h \
|
support/allocators/zeroafterfree.h \
|
||||||
support/cleanse.h \
|
support/cleanse.h \
|
||||||
support/pagelocker.h \
|
support/lockedpool.h \
|
||||||
sync.h \
|
sync.h \
|
||||||
threadsafety.h \
|
threadsafety.h \
|
||||||
threadinterrupt.h \
|
threadinterrupt.h \
|
||||||
@ -399,7 +399,7 @@ libbitcoin_common_a_SOURCES = \
|
|||||||
libbitcoin_util_a_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
|
libbitcoin_util_a_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
|
||||||
libbitcoin_util_a_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
|
libbitcoin_util_a_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
|
||||||
libbitcoin_util_a_SOURCES = \
|
libbitcoin_util_a_SOURCES = \
|
||||||
support/pagelocker.cpp \
|
support/lockedpool.cpp \
|
||||||
chainparamsbase.cpp \
|
chainparamsbase.cpp \
|
||||||
clientversion.cpp \
|
clientversion.cpp \
|
||||||
compat/glibc_sanity.cpp \
|
compat/glibc_sanity.cpp \
|
||||||
|
@ -17,7 +17,8 @@ bench_bench_dash_SOURCES = \
|
|||||||
bench/crypto_hash.cpp \
|
bench/crypto_hash.cpp \
|
||||||
bench/ccoins_caching.cpp \
|
bench/ccoins_caching.cpp \
|
||||||
bench/mempool_eviction.cpp \
|
bench/mempool_eviction.cpp \
|
||||||
bench/base58.cpp
|
bench/base58.cpp \
|
||||||
|
bench/lockedpool.cpp
|
||||||
|
|
||||||
bench_bench_dash_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) $(EVENT_CLFAGS) $(EVENT_PTHREADS_CFLAGS) -I$(builddir)/bench/
|
bench_bench_dash_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) $(EVENT_CLFAGS) $(EVENT_PTHREADS_CFLAGS) -I$(builddir)/bench/
|
||||||
bench_bench_dash_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
|
bench_bench_dash_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
|
||||||
|
47
src/bench/lockedpool.cpp
Normal file
47
src/bench/lockedpool.cpp
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
// Copyright (c) 2016 The Bitcoin Core developers
|
||||||
|
// Distributed under the MIT software license, see the accompanying
|
||||||
|
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||||
|
|
||||||
|
#include "bench.h"
|
||||||
|
|
||||||
|
#include "support/lockedpool.h"
|
||||||
|
|
||||||
|
#include <iostream>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#define ASIZE 2048
|
||||||
|
#define BITER 5000
|
||||||
|
#define MSIZE 2048
|
||||||
|
|
||||||
|
static void LockedPool(benchmark::State& state)
|
||||||
|
{
|
||||||
|
void *synth_base = reinterpret_cast<void*>(0x08000000);
|
||||||
|
const size_t synth_size = 1024*1024;
|
||||||
|
Arena b(synth_base, synth_size, 16);
|
||||||
|
|
||||||
|
std::vector<void*> addr;
|
||||||
|
for (int x=0; x<ASIZE; ++x)
|
||||||
|
addr.push_back(0);
|
||||||
|
uint32_t s = 0x12345678;
|
||||||
|
while (state.KeepRunning()) {
|
||||||
|
for (int x=0; x<BITER; ++x) {
|
||||||
|
int idx = s & (addr.size()-1);
|
||||||
|
if (s & 0x80000000) {
|
||||||
|
b.free(addr[idx]);
|
||||||
|
addr[idx] = 0;
|
||||||
|
} else if(!addr[idx]) {
|
||||||
|
addr[idx] = b.alloc((s >> 16) & (MSIZE-1));
|
||||||
|
}
|
||||||
|
bool lsb = s & 1;
|
||||||
|
s >>= 1;
|
||||||
|
if (lsb)
|
||||||
|
s ^= 0xf00f00f0; // LFSR period 0xf7ffffe0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (void *ptr: addr)
|
||||||
|
b.free(ptr);
|
||||||
|
addr.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
BENCHMARK(LockedPool);
|
||||||
|
|
34
src/key.cpp
34
src/key.cpp
@ -125,8 +125,8 @@ bool CKey::Check(const unsigned char *vch) {
|
|||||||
|
|
||||||
void CKey::MakeNewKey(bool fCompressedIn) {
|
void CKey::MakeNewKey(bool fCompressedIn) {
|
||||||
do {
|
do {
|
||||||
GetStrongRandBytes(vch, sizeof(vch));
|
GetStrongRandBytes(keydata.data(), keydata.size());
|
||||||
} while (!Check(vch));
|
} while (!Check(keydata.data()));
|
||||||
fValid = true;
|
fValid = true;
|
||||||
fCompressed = fCompressedIn;
|
fCompressed = fCompressedIn;
|
||||||
}
|
}
|
||||||
@ -224,20 +224,18 @@ bool CKey::Load(CPrivKey &privkey, CPubKey &vchPubKey, bool fSkipCheck=false) {
|
|||||||
bool CKey::Derive(CKey& keyChild, ChainCode &ccChild, unsigned int nChild, const ChainCode& cc) const {
|
bool CKey::Derive(CKey& keyChild, ChainCode &ccChild, unsigned int nChild, const ChainCode& cc) const {
|
||||||
assert(IsValid());
|
assert(IsValid());
|
||||||
assert(IsCompressed());
|
assert(IsCompressed());
|
||||||
unsigned char out[64];
|
std::vector<unsigned char, secure_allocator<unsigned char>> vout(64);
|
||||||
LockObject(out);
|
|
||||||
if ((nChild >> 31) == 0) {
|
if ((nChild >> 31) == 0) {
|
||||||
CPubKey pubkey = GetPubKey();
|
CPubKey pubkey = GetPubKey();
|
||||||
assert(pubkey.begin() + 33 == pubkey.end());
|
assert(pubkey.begin() + 33 == pubkey.end());
|
||||||
BIP32Hash(cc, nChild, *pubkey.begin(), pubkey.begin()+1, out);
|
BIP32Hash(cc, nChild, *pubkey.begin(), pubkey.begin()+1, vout.data());
|
||||||
} else {
|
} else {
|
||||||
assert(begin() + 32 == end());
|
assert(begin() + 32 == end());
|
||||||
BIP32Hash(cc, nChild, 0, begin(), out);
|
BIP32Hash(cc, nChild, 0, begin(), vout.data());
|
||||||
}
|
}
|
||||||
memcpy(ccChild.begin(), out+32, 32);
|
memcpy(ccChild.begin(), vout.data()+32, 32);
|
||||||
memcpy((unsigned char*)keyChild.begin(), begin(), 32);
|
memcpy((unsigned char*)keyChild.begin(), begin(), 32);
|
||||||
bool ret = secp256k1_ec_privkey_tweak_add(secp256k1_context_sign, (unsigned char*)keyChild.begin(), out);
|
bool ret = secp256k1_ec_privkey_tweak_add(secp256k1_context_sign, (unsigned char*)keyChild.begin(), vout.data());
|
||||||
UnlockObject(out);
|
|
||||||
keyChild.fCompressed = true;
|
keyChild.fCompressed = true;
|
||||||
keyChild.fValid = ret;
|
keyChild.fValid = ret;
|
||||||
return ret;
|
return ret;
|
||||||
@ -253,12 +251,10 @@ bool CExtKey::Derive(CExtKey &out, unsigned int _nChild) const {
|
|||||||
|
|
||||||
void CExtKey::SetMaster(const unsigned char *seed, unsigned int nSeedLen) {
|
void CExtKey::SetMaster(const unsigned char *seed, unsigned int nSeedLen) {
|
||||||
static const unsigned char hashkey[] = {'B','i','t','c','o','i','n',' ','s','e','e','d'};
|
static const unsigned char hashkey[] = {'B','i','t','c','o','i','n',' ','s','e','e','d'};
|
||||||
unsigned char out[64];
|
std::vector<unsigned char, secure_allocator<unsigned char>> vout(64);
|
||||||
LockObject(out);
|
CHMAC_SHA512(hashkey, sizeof(hashkey)).Write(seed, nSeedLen).Finalize(vout.data());
|
||||||
CHMAC_SHA512(hashkey, sizeof(hashkey)).Write(seed, nSeedLen).Finalize(out);
|
key.Set(&vout[0], &vout[32], true);
|
||||||
key.Set(&out[0], &out[32], true);
|
memcpy(chaincode.begin(), &vout[32], 32);
|
||||||
memcpy(chaincode.begin(), &out[32], 32);
|
|
||||||
UnlockObject(out);
|
|
||||||
nDepth = 0;
|
nDepth = 0;
|
||||||
nChild = 0;
|
nChild = 0;
|
||||||
memset(vchFingerprint, 0, sizeof(vchFingerprint));
|
memset(vchFingerprint, 0, sizeof(vchFingerprint));
|
||||||
@ -308,12 +304,10 @@ void ECC_Start() {
|
|||||||
|
|
||||||
{
|
{
|
||||||
// Pass in a random blinding seed to the secp256k1 context.
|
// Pass in a random blinding seed to the secp256k1 context.
|
||||||
unsigned char seed[32];
|
std::vector<unsigned char, secure_allocator<unsigned char>> vseed(32);
|
||||||
LockObject(seed);
|
GetRandBytes(vseed.data(), 32);
|
||||||
GetRandBytes(seed, 32);
|
bool ret = secp256k1_context_randomize(ctx, vseed.data());
|
||||||
bool ret = secp256k1_context_randomize(ctx, seed);
|
|
||||||
assert(ret);
|
assert(ret);
|
||||||
UnlockObject(seed);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
secp256k1_context_sign = ctx;
|
secp256k1_context_sign = ctx;
|
||||||
|
27
src/key.h
27
src/key.h
@ -43,9 +43,7 @@ private:
|
|||||||
bool fCompressed;
|
bool fCompressed;
|
||||||
|
|
||||||
//! The actual byte data
|
//! The actual byte data
|
||||||
unsigned char vch[32];
|
std::vector<unsigned char, secure_allocator<unsigned char> > keydata;
|
||||||
|
|
||||||
static_assert(sizeof(vch) == 32, "vch must be 32 bytes in length to not break serialization");
|
|
||||||
|
|
||||||
//! Check whether the 32-byte array pointed to be vch is valid keydata.
|
//! Check whether the 32-byte array pointed to be vch is valid keydata.
|
||||||
bool static Check(const unsigned char* vch);
|
bool static Check(const unsigned char* vch);
|
||||||
@ -54,37 +52,30 @@ public:
|
|||||||
//! Construct an invalid private key.
|
//! Construct an invalid private key.
|
||||||
CKey() : fValid(false), fCompressed(false)
|
CKey() : fValid(false), fCompressed(false)
|
||||||
{
|
{
|
||||||
LockObject(vch);
|
// Important: vch must be 32 bytes in length to not break serialization
|
||||||
}
|
keydata.resize(32);
|
||||||
|
|
||||||
//! Copy constructor. This is necessary because of memlocking.
|
|
||||||
CKey(const CKey& secret) : fValid(secret.fValid), fCompressed(secret.fCompressed)
|
|
||||||
{
|
|
||||||
LockObject(vch);
|
|
||||||
memcpy(vch, secret.vch, sizeof(vch));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
//! Destructor (again necessary because of memlocking).
|
//! Destructor (again necessary because of memlocking).
|
||||||
~CKey()
|
~CKey()
|
||||||
{
|
{
|
||||||
UnlockObject(vch);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
friend bool operator==(const CKey& a, const CKey& b)
|
friend bool operator==(const CKey& a, const CKey& b)
|
||||||
{
|
{
|
||||||
return a.fCompressed == b.fCompressed &&
|
return a.fCompressed == b.fCompressed &&
|
||||||
a.size() == b.size() &&
|
a.size() == b.size() &&
|
||||||
memcmp(&a.vch[0], &b.vch[0], a.size()) == 0;
|
memcmp(a.keydata.data(), b.keydata.data(), a.size()) == 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
//! Initialize using begin and end iterators to byte data.
|
//! Initialize using begin and end iterators to byte data.
|
||||||
template <typename T>
|
template <typename T>
|
||||||
void Set(const T pbegin, const T pend, bool fCompressedIn)
|
void Set(const T pbegin, const T pend, bool fCompressedIn)
|
||||||
{
|
{
|
||||||
if (pend - pbegin != sizeof(vch)) {
|
if (size_t(pend - pbegin) != keydata.size()) {
|
||||||
fValid = false;
|
fValid = false;
|
||||||
} else if (Check(&pbegin[0])) {
|
} else if (Check(&pbegin[0])) {
|
||||||
memcpy(vch, (unsigned char*)&pbegin[0], sizeof(vch));
|
memcpy(keydata.data(), (unsigned char*)&pbegin[0], keydata.size());
|
||||||
fValid = true;
|
fValid = true;
|
||||||
fCompressed = fCompressedIn;
|
fCompressed = fCompressedIn;
|
||||||
} else {
|
} else {
|
||||||
@ -93,9 +84,9 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
//! Simple read-only vector-like interface.
|
//! Simple read-only vector-like interface.
|
||||||
unsigned int size() const { return (fValid ? sizeof(vch) : 0); }
|
unsigned int size() const { return (fValid ? keydata.size() : 0); }
|
||||||
const unsigned char* begin() const { return vch; }
|
const unsigned char* begin() const { return keydata.data(); }
|
||||||
const unsigned char* end() const { return vch + size(); }
|
const unsigned char* end() const { return keydata.data() + size(); }
|
||||||
|
|
||||||
//! Check whether this private key is valid.
|
//! Check whether this private key is valid.
|
||||||
bool IsValid() const { return fValid; }
|
bool IsValid() const { return fValid; }
|
||||||
|
@ -1043,11 +1043,54 @@ UniValue getspentinfo(const JSONRPCRequest& request)
|
|||||||
return obj;
|
return obj;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static UniValue RPCLockedMemoryInfo()
|
||||||
|
{
|
||||||
|
LockedPool::Stats stats = LockedPoolManager::Instance().stats();
|
||||||
|
UniValue obj(UniValue::VOBJ);
|
||||||
|
obj.push_back(Pair("used", uint64_t(stats.used)));
|
||||||
|
obj.push_back(Pair("free", uint64_t(stats.free)));
|
||||||
|
obj.push_back(Pair("total", uint64_t(stats.total)));
|
||||||
|
obj.push_back(Pair("locked", uint64_t(stats.locked)));
|
||||||
|
obj.push_back(Pair("chunks_used", uint64_t(stats.chunks_used)));
|
||||||
|
obj.push_back(Pair("chunks_free", uint64_t(stats.chunks_free)));
|
||||||
|
return obj;
|
||||||
|
}
|
||||||
|
|
||||||
|
UniValue getmemoryinfo(const JSONRPCRequest& request)
|
||||||
|
{
|
||||||
|
/* Please, avoid using the word "pool" here in the RPC interface or help,
|
||||||
|
* as users will undoubtedly confuse it with the other "memory pool"
|
||||||
|
*/
|
||||||
|
if (request.fHelp || request.params.size() != 0)
|
||||||
|
throw runtime_error(
|
||||||
|
"getmemoryinfo\n"
|
||||||
|
"Returns an object containing information about memory usage.\n"
|
||||||
|
"\nResult:\n"
|
||||||
|
"{\n"
|
||||||
|
" \"locked\": { (json object) Information about locked memory manager\n"
|
||||||
|
" \"used\": xxxxx, (numeric) Number of bytes used\n"
|
||||||
|
" \"free\": xxxxx, (numeric) Number of bytes available in current arenas\n"
|
||||||
|
" \"total\": xxxxxxx, (numeric) Total number of bytes managed\n"
|
||||||
|
" \"locked\": xxxxxx, (numeric) Amount of bytes that succeeded locking. If this number is smaller than total, locking pages failed at some point and key data could be swapped to disk.\n"
|
||||||
|
" \"chunks_used\": xxxxx, (numeric) Number allocated chunks\n"
|
||||||
|
" \"chunks_free\": xxxxx, (numeric) Number unused chunks\n"
|
||||||
|
" }\n"
|
||||||
|
"}\n"
|
||||||
|
"\nExamples:\n"
|
||||||
|
+ HelpExampleCli("getmemoryinfo", "")
|
||||||
|
+ HelpExampleRpc("getmemoryinfo", "")
|
||||||
|
);
|
||||||
|
UniValue obj(UniValue::VOBJ);
|
||||||
|
obj.push_back(Pair("locked", RPCLockedMemoryInfo()));
|
||||||
|
return obj;
|
||||||
|
}
|
||||||
|
|
||||||
static const CRPCCommand commands[] =
|
static const CRPCCommand commands[] =
|
||||||
{ // category name actor (function) okSafeMode
|
{ // category name actor (function) okSafeMode
|
||||||
// --------------------- ------------------------ ----------------------- ----------
|
// --------------------- ------------------------ ----------------------- ----------
|
||||||
{ "control", "debug", &debug, true },
|
{ "control", "debug", &debug, true },
|
||||||
{ "control", "getinfo", &getinfo, true }, /* uses wallet if enabled */
|
{ "control", "getinfo", &getinfo, true }, /* uses wallet if enabled */
|
||||||
|
{ "control", "getmemoryinfo", &getmemoryinfo, true },
|
||||||
{ "util", "validateaddress", &validateaddress, true }, /* uses wallet if enabled */
|
{ "util", "validateaddress", &validateaddress, true }, /* uses wallet if enabled */
|
||||||
{ "util", "createmultisig", &createmultisig, true },
|
{ "util", "createmultisig", &createmultisig, true },
|
||||||
{ "util", "verifymessage", &verifymessage, true },
|
{ "util", "verifymessage", &verifymessage, true },
|
||||||
|
@ -6,7 +6,8 @@
|
|||||||
#ifndef BITCOIN_SUPPORT_ALLOCATORS_SECURE_H
|
#ifndef BITCOIN_SUPPORT_ALLOCATORS_SECURE_H
|
||||||
#define BITCOIN_SUPPORT_ALLOCATORS_SECURE_H
|
#define BITCOIN_SUPPORT_ALLOCATORS_SECURE_H
|
||||||
|
|
||||||
#include "support/pagelocker.h"
|
#include "support/lockedpool.h"
|
||||||
|
#include "support/cleanse.h"
|
||||||
|
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
@ -40,20 +41,15 @@ struct secure_allocator : public std::allocator<T> {
|
|||||||
|
|
||||||
T* allocate(std::size_t n, const void* hint = 0)
|
T* allocate(std::size_t n, const void* hint = 0)
|
||||||
{
|
{
|
||||||
T* p;
|
return static_cast<T*>(LockedPoolManager::Instance().alloc(sizeof(T) * n));
|
||||||
p = std::allocator<T>::allocate(n, hint);
|
|
||||||
if (p != NULL)
|
|
||||||
LockedPageManager::Instance().LockRange(p, sizeof(T) * n);
|
|
||||||
return p;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void deallocate(T* p, std::size_t n)
|
void deallocate(T* p, std::size_t n)
|
||||||
{
|
{
|
||||||
if (p != NULL) {
|
if (p != NULL) {
|
||||||
memory_cleanse(p, sizeof(T) * n);
|
memory_cleanse(p, sizeof(T) * n);
|
||||||
LockedPageManager::Instance().UnlockRange(p, sizeof(T) * n);
|
|
||||||
}
|
}
|
||||||
std::allocator<T>::deallocate(p, n);
|
LockedPoolManager::Instance().free(p);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
383
src/support/lockedpool.cpp
Normal file
383
src/support/lockedpool.cpp
Normal file
@ -0,0 +1,383 @@
|
|||||||
|
// Copyright (c) 2016 The Bitcoin Core developers
|
||||||
|
// Distributed under the MIT software license, see the accompanying
|
||||||
|
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||||
|
|
||||||
|
#include "support/lockedpool.h"
|
||||||
|
#include "support/cleanse.h"
|
||||||
|
|
||||||
|
#if defined(HAVE_CONFIG_H)
|
||||||
|
#include "config/dash-config.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef WIN32
|
||||||
|
#ifdef _WIN32_WINNT
|
||||||
|
#undef _WIN32_WINNT
|
||||||
|
#endif
|
||||||
|
#define _WIN32_WINNT 0x0501
|
||||||
|
#define WIN32_LEAN_AND_MEAN 1
|
||||||
|
#ifndef NOMINMAX
|
||||||
|
#define NOMINMAX
|
||||||
|
#endif
|
||||||
|
#include <windows.h>
|
||||||
|
#else
|
||||||
|
#include <sys/mman.h> // for mmap
|
||||||
|
#include <sys/resource.h> // for getrlimit
|
||||||
|
#include <limits.h> // for PAGESIZE
|
||||||
|
#include <unistd.h> // for sysconf
|
||||||
|
#endif
|
||||||
|
|
||||||
|
LockedPoolManager* LockedPoolManager::_instance = NULL;
|
||||||
|
std::once_flag LockedPoolManager::init_flag;
|
||||||
|
|
||||||
|
/*******************************************************************************/
|
||||||
|
// Utilities
|
||||||
|
//
|
||||||
|
/** Align up to power of 2 */
|
||||||
|
static inline size_t align_up(size_t x, size_t align)
|
||||||
|
{
|
||||||
|
return (x + align - 1) & ~(align - 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*******************************************************************************/
|
||||||
|
// Implementation: Arena
|
||||||
|
|
||||||
|
Arena::Arena(void *base_in, size_t size_in, size_t alignment_in):
|
||||||
|
base(static_cast<char*>(base_in)), end(static_cast<char*>(base_in) + size_in), alignment(alignment_in)
|
||||||
|
{
|
||||||
|
// Start with one free chunk that covers the entire arena
|
||||||
|
chunks.emplace(base, Chunk(size_in, false));
|
||||||
|
}
|
||||||
|
|
||||||
|
Arena::~Arena()
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
void* Arena::alloc(size_t size)
|
||||||
|
{
|
||||||
|
// Round to next multiple of alignment
|
||||||
|
size = align_up(size, alignment);
|
||||||
|
|
||||||
|
// Don't handle zero-sized chunks, or those bigger than MAX_SIZE
|
||||||
|
if (size == 0 || size >= Chunk::MAX_SIZE) {
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (auto& chunk: chunks) {
|
||||||
|
if (!chunk.second.isInUse() && size <= chunk.second.getSize()) {
|
||||||
|
char* base = chunk.first;
|
||||||
|
size_t leftover = chunk.second.getSize() - size;
|
||||||
|
if (leftover > 0) { // Split chunk
|
||||||
|
chunks.emplace(base + size, Chunk(leftover, false));
|
||||||
|
chunk.second.setSize(size);
|
||||||
|
}
|
||||||
|
chunk.second.setInUse(true);
|
||||||
|
return reinterpret_cast<void*>(base);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
void Arena::free(void *ptr)
|
||||||
|
{
|
||||||
|
// Freeing the NULL pointer is OK.
|
||||||
|
if (ptr == nullptr) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
auto i = chunks.find(static_cast<char*>(ptr));
|
||||||
|
if (i == chunks.end() || !i->second.isInUse()) {
|
||||||
|
throw std::runtime_error("Arena: invalid or double free");
|
||||||
|
}
|
||||||
|
|
||||||
|
i->second.setInUse(false);
|
||||||
|
|
||||||
|
if (i != chunks.begin()) { // Absorb into previous chunk if exists and free
|
||||||
|
auto prev = i;
|
||||||
|
--prev;
|
||||||
|
if (!prev->second.isInUse()) {
|
||||||
|
// Absorb current chunk size into previous chunk.
|
||||||
|
prev->second.setSize(prev->second.getSize() + i->second.getSize());
|
||||||
|
// Erase current chunk. Erasing does not invalidate current
|
||||||
|
// iterators for a map, except for that pointing to the object
|
||||||
|
// itself, which will be overwritten in the next statement.
|
||||||
|
chunks.erase(i);
|
||||||
|
// From here on, the previous chunk is our current chunk.
|
||||||
|
i = prev;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
auto next = i;
|
||||||
|
++next;
|
||||||
|
if (next != chunks.end()) { // Absorb next chunk if exists and free
|
||||||
|
if (!next->second.isInUse()) {
|
||||||
|
// Absurb next chunk size into current chunk
|
||||||
|
i->second.setSize(i->second.getSize() + next->second.getSize());
|
||||||
|
// Erase next chunk.
|
||||||
|
chunks.erase(next);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Arena::Stats Arena::stats() const
|
||||||
|
{
|
||||||
|
Arena::Stats r;
|
||||||
|
r.used = r.free = r.total = r.chunks_used = r.chunks_free = 0;
|
||||||
|
for (const auto& chunk: chunks) {
|
||||||
|
if (chunk.second.isInUse()) {
|
||||||
|
r.used += chunk.second.getSize();
|
||||||
|
r.chunks_used += 1;
|
||||||
|
} else {
|
||||||
|
r.free += chunk.second.getSize();
|
||||||
|
r.chunks_free += 1;
|
||||||
|
}
|
||||||
|
r.total += chunk.second.getSize();
|
||||||
|
}
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef ARENA_DEBUG
|
||||||
|
void Arena::walk() const
|
||||||
|
{
|
||||||
|
for (const auto& chunk: chunks) {
|
||||||
|
std::cout <<
|
||||||
|
"0x" << std::hex << std::setw(16) << std::setfill('0') << chunk.first <<
|
||||||
|
" 0x" << std::hex << std::setw(16) << std::setfill('0') << chunk.second.getSize() <<
|
||||||
|
" 0x" << chunk.second.isInUse() << std::endl;
|
||||||
|
}
|
||||||
|
std::cout << std::endl;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/*******************************************************************************/
|
||||||
|
// Implementation: Win32LockedPageAllocator
|
||||||
|
|
||||||
|
#ifdef WIN32
|
||||||
|
/** LockedPageAllocator specialized for Windows.
|
||||||
|
*/
|
||||||
|
class Win32LockedPageAllocator: public LockedPageAllocator
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
Win32LockedPageAllocator();
|
||||||
|
void* AllocateLocked(size_t len, bool *lockingSuccess);
|
||||||
|
void FreeLocked(void* addr, size_t len);
|
||||||
|
size_t GetLimit();
|
||||||
|
private:
|
||||||
|
size_t page_size;
|
||||||
|
};
|
||||||
|
|
||||||
|
Win32LockedPageAllocator::Win32LockedPageAllocator()
|
||||||
|
{
|
||||||
|
// Determine system page size in bytes
|
||||||
|
SYSTEM_INFO sSysInfo;
|
||||||
|
GetSystemInfo(&sSysInfo);
|
||||||
|
page_size = sSysInfo.dwPageSize;
|
||||||
|
}
|
||||||
|
void *Win32LockedPageAllocator::AllocateLocked(size_t len, bool *lockingSuccess)
|
||||||
|
{
|
||||||
|
len = align_up(len, page_size);
|
||||||
|
void *addr = VirtualAlloc(nullptr, len, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
|
||||||
|
if (addr) {
|
||||||
|
// VirtualLock is used to attempt to keep keying material out of swap. Note
|
||||||
|
// that it does not provide this as a guarantee, but, in practice, memory
|
||||||
|
// that has been VirtualLock'd almost never gets written to the pagefile
|
||||||
|
// except in rare circumstances where memory is extremely low.
|
||||||
|
*lockingSuccess = VirtualLock(const_cast<void*>(addr), len) != 0;
|
||||||
|
}
|
||||||
|
return addr;
|
||||||
|
}
|
||||||
|
void Win32LockedPageAllocator::FreeLocked(void* addr, size_t len)
|
||||||
|
{
|
||||||
|
len = align_up(len, page_size);
|
||||||
|
memory_cleanse(addr, len);
|
||||||
|
VirtualUnlock(const_cast<void*>(addr), len);
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t Win32LockedPageAllocator::GetLimit()
|
||||||
|
{
|
||||||
|
// TODO is there a limit on windows, how to get it?
|
||||||
|
return std::numeric_limits<size_t>::max();
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/*******************************************************************************/
|
||||||
|
// Implementation: PosixLockedPageAllocator
|
||||||
|
|
||||||
|
#ifndef WIN32
|
||||||
|
/** LockedPageAllocator specialized for OSes that don't try to be
|
||||||
|
* special snowflakes.
|
||||||
|
*/
|
||||||
|
class PosixLockedPageAllocator: public LockedPageAllocator
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
PosixLockedPageAllocator();
|
||||||
|
void* AllocateLocked(size_t len, bool *lockingSuccess);
|
||||||
|
void FreeLocked(void* addr, size_t len);
|
||||||
|
size_t GetLimit();
|
||||||
|
private:
|
||||||
|
size_t page_size;
|
||||||
|
};
|
||||||
|
|
||||||
|
PosixLockedPageAllocator::PosixLockedPageAllocator()
|
||||||
|
{
|
||||||
|
// Determine system page size in bytes
|
||||||
|
#if defined(PAGESIZE) // defined in limits.h
|
||||||
|
page_size = PAGESIZE;
|
||||||
|
#else // assume some POSIX OS
|
||||||
|
page_size = sysconf(_SC_PAGESIZE);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
void *PosixLockedPageAllocator::AllocateLocked(size_t len, bool *lockingSuccess)
|
||||||
|
{
|
||||||
|
void *addr;
|
||||||
|
len = align_up(len, page_size);
|
||||||
|
addr = mmap(nullptr, len, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
|
||||||
|
if (addr) {
|
||||||
|
*lockingSuccess = mlock(addr, len) == 0;
|
||||||
|
}
|
||||||
|
return addr;
|
||||||
|
}
|
||||||
|
void PosixLockedPageAllocator::FreeLocked(void* addr, size_t len)
|
||||||
|
{
|
||||||
|
len = align_up(len, page_size);
|
||||||
|
memory_cleanse(addr, len);
|
||||||
|
munlock(addr, len);
|
||||||
|
munmap(addr, len);
|
||||||
|
}
|
||||||
|
size_t PosixLockedPageAllocator::GetLimit()
|
||||||
|
{
|
||||||
|
#ifdef RLIMIT_MEMLOCK
|
||||||
|
struct rlimit rlim;
|
||||||
|
if (getrlimit(RLIMIT_MEMLOCK, &rlim) == 0) {
|
||||||
|
if (rlim.rlim_cur != RLIM_INFINITY) {
|
||||||
|
return rlim.rlim_cur;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
return std::numeric_limits<size_t>::max();
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/*******************************************************************************/
|
||||||
|
// Implementation: LockedPool
|
||||||
|
|
||||||
|
LockedPool::LockedPool(std::unique_ptr<LockedPageAllocator> allocator_in, LockingFailed_Callback lf_cb_in):
|
||||||
|
allocator(std::move(allocator_in)), lf_cb(lf_cb_in), cumulative_bytes_locked(0)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
LockedPool::~LockedPool()
|
||||||
|
{
|
||||||
|
}
|
||||||
|
void* LockedPool::alloc(size_t size)
|
||||||
|
{
|
||||||
|
std::lock_guard<std::mutex> lock(mutex);
|
||||||
|
// Try allocating from each current arena
|
||||||
|
for (auto &arena: arenas) {
|
||||||
|
void *addr = arena.alloc(size);
|
||||||
|
if (addr) {
|
||||||
|
return addr;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// If that fails, create a new one
|
||||||
|
if (new_arena(ARENA_SIZE, ARENA_ALIGN)) {
|
||||||
|
return arenas.back().alloc(size);
|
||||||
|
}
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
void LockedPool::free(void *ptr)
|
||||||
|
{
|
||||||
|
std::lock_guard<std::mutex> lock(mutex);
|
||||||
|
// TODO we can do better than this linear search by keeping a map of arena
|
||||||
|
// extents to arena, and looking up the address.
|
||||||
|
for (auto &arena: arenas) {
|
||||||
|
if (arena.addressInArena(ptr)) {
|
||||||
|
arena.free(ptr);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
throw std::runtime_error("LockedPool: invalid address not pointing to any arena");
|
||||||
|
}
|
||||||
|
|
||||||
|
LockedPool::Stats LockedPool::stats() const
|
||||||
|
{
|
||||||
|
std::lock_guard<std::mutex> lock(mutex);
|
||||||
|
LockedPool::Stats r;
|
||||||
|
r.used = r.free = r.total = r.chunks_used = r.chunks_free = 0;
|
||||||
|
r.locked = cumulative_bytes_locked;
|
||||||
|
for (const auto &arena: arenas) {
|
||||||
|
Arena::Stats i = arena.stats();
|
||||||
|
r.used += i.used;
|
||||||
|
r.free += i.free;
|
||||||
|
r.total += i.total;
|
||||||
|
r.chunks_used += i.chunks_used;
|
||||||
|
r.chunks_free += i.chunks_free;
|
||||||
|
}
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool LockedPool::new_arena(size_t size, size_t align)
|
||||||
|
{
|
||||||
|
bool locked;
|
||||||
|
// If this is the first arena, handle this specially: Cap the upper size
|
||||||
|
// by the process limit. This makes sure that the first arena will at least
|
||||||
|
// be locked. An exception to this is if the process limit is 0:
|
||||||
|
// in this case no memory can be locked at all so we'll skip past this logic.
|
||||||
|
if (arenas.empty()) {
|
||||||
|
size_t limit = allocator->GetLimit();
|
||||||
|
if (limit > 0) {
|
||||||
|
size = std::min(size, limit);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
void *addr = allocator->AllocateLocked(size, &locked);
|
||||||
|
if (!addr) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (locked) {
|
||||||
|
cumulative_bytes_locked += size;
|
||||||
|
} else if (lf_cb) { // Call the locking-failed callback if locking failed
|
||||||
|
if (!lf_cb()) { // If the callback returns false, free the memory and fail, otherwise consider the user warned and proceed.
|
||||||
|
allocator->FreeLocked(addr, size);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
arenas.emplace_back(allocator.get(), addr, size, align);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
LockedPool::LockedPageArena::LockedPageArena(LockedPageAllocator *allocator_in, void *base_in, size_t size_in, size_t align_in):
|
||||||
|
Arena(base_in, size_in, align_in), base(base_in), size(size_in), allocator(allocator_in)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
LockedPool::LockedPageArena::~LockedPageArena()
|
||||||
|
{
|
||||||
|
allocator->FreeLocked(base, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*******************************************************************************/
|
||||||
|
// Implementation: LockedPoolManager
|
||||||
|
//
|
||||||
|
LockedPoolManager::LockedPoolManager(std::unique_ptr<LockedPageAllocator> allocator):
|
||||||
|
LockedPool(std::move(allocator), &LockedPoolManager::LockingFailed)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
bool LockedPoolManager::LockingFailed()
|
||||||
|
{
|
||||||
|
// TODO: log something but how? without including util.h
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
void LockedPoolManager::CreateInstance()
|
||||||
|
{
|
||||||
|
// Using a local static instance guarantees that the object is initialized
|
||||||
|
// when it's first needed and also deinitialized after all objects that use
|
||||||
|
// it are done with it. I can think of one unlikely scenario where we may
|
||||||
|
// have a static deinitialization order/problem, but the check in
|
||||||
|
// LockedPoolManagerBase's destructor helps us detect if that ever happens.
|
||||||
|
#ifdef WIN32
|
||||||
|
std::unique_ptr<LockedPageAllocator> allocator(new Win32LockedPageAllocator());
|
||||||
|
#else
|
||||||
|
std::unique_ptr<LockedPageAllocator> allocator(new PosixLockedPageAllocator());
|
||||||
|
#endif
|
||||||
|
static LockedPoolManager instance(std::move(allocator));
|
||||||
|
LockedPoolManager::_instance = &instance;
|
||||||
|
}
|
251
src/support/lockedpool.h
Normal file
251
src/support/lockedpool.h
Normal file
@ -0,0 +1,251 @@
|
|||||||
|
// Copyright (c) 2016 The Bitcoin Core developers
|
||||||
|
// Distributed under the MIT software license, see the accompanying
|
||||||
|
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||||
|
|
||||||
|
#ifndef BITCOIN_SUPPORT_LOCKEDPOOL_H
|
||||||
|
#define BITCOIN_SUPPORT_LOCKEDPOOL_H
|
||||||
|
|
||||||
|
#include <stdint.h>
|
||||||
|
#include <list>
|
||||||
|
#include <map>
|
||||||
|
#include <mutex>
|
||||||
|
#include <memory>
|
||||||
|
|
||||||
|
/**
|
||||||
|
* OS-dependent allocation and deallocation of locked/pinned memory pages.
|
||||||
|
* Abstract base class.
|
||||||
|
*/
|
||||||
|
class LockedPageAllocator
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
virtual ~LockedPageAllocator() {}
|
||||||
|
/** Allocate and lock memory pages.
|
||||||
|
* If len is not a multiple of the system page size, it is rounded up.
|
||||||
|
* Returns 0 in case of allocation failure.
|
||||||
|
*
|
||||||
|
* If locking the memory pages could not be accomplished it will still
|
||||||
|
* return the memory, however the lockingSuccess flag will be false.
|
||||||
|
* lockingSuccess is undefined if the allocation fails.
|
||||||
|
*/
|
||||||
|
virtual void* AllocateLocked(size_t len, bool *lockingSuccess) = 0;
|
||||||
|
|
||||||
|
/** Unlock and free memory pages.
|
||||||
|
* Clear the memory before unlocking.
|
||||||
|
*/
|
||||||
|
virtual void FreeLocked(void* addr, size_t len) = 0;
|
||||||
|
|
||||||
|
/** Get the total limit on the amount of memory that may be locked by this
|
||||||
|
* process, in bytes. Return size_t max if there is no limit or the limit
|
||||||
|
* is unknown. Return 0 if no memory can be locked at all.
|
||||||
|
*/
|
||||||
|
virtual size_t GetLimit() = 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
/* An arena manages a contiguous region of memory by dividing it into
|
||||||
|
* chunks.
|
||||||
|
*/
|
||||||
|
class Arena
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
Arena(void *base, size_t size, size_t alignment);
|
||||||
|
virtual ~Arena();
|
||||||
|
|
||||||
|
/** A chunk of memory.
|
||||||
|
*/
|
||||||
|
struct Chunk
|
||||||
|
{
|
||||||
|
/** Most significant bit of size_t. This is used to mark
|
||||||
|
* in-usedness of chunk.
|
||||||
|
*/
|
||||||
|
const static size_t SIZE_MSB = 1LLU << ((sizeof(size_t)*8)-1);
|
||||||
|
/** Maximum size of a chunk */
|
||||||
|
const static size_t MAX_SIZE = SIZE_MSB - 1;
|
||||||
|
|
||||||
|
Chunk(size_t size_in, bool used_in):
|
||||||
|
size(size_in | (used_in ? SIZE_MSB : 0)) {}
|
||||||
|
|
||||||
|
bool isInUse() const { return size & SIZE_MSB; }
|
||||||
|
void setInUse(bool used_in) { size = (size & ~SIZE_MSB) | (used_in ? SIZE_MSB : 0); }
|
||||||
|
size_t getSize() const { return size & ~SIZE_MSB; }
|
||||||
|
void setSize(size_t size_in) { size = (size & SIZE_MSB) | size_in; }
|
||||||
|
private:
|
||||||
|
size_t size;
|
||||||
|
};
|
||||||
|
/** Memory statistics. */
|
||||||
|
struct Stats
|
||||||
|
{
|
||||||
|
size_t used;
|
||||||
|
size_t free;
|
||||||
|
size_t total;
|
||||||
|
size_t chunks_used;
|
||||||
|
size_t chunks_free;
|
||||||
|
};
|
||||||
|
|
||||||
|
/** Allocate size bytes from this arena.
|
||||||
|
* Returns pointer on success, or 0 if memory is full or
|
||||||
|
* the application tried to allocate 0 bytes.
|
||||||
|
*/
|
||||||
|
void* alloc(size_t size);
|
||||||
|
|
||||||
|
/** Free a previously allocated chunk of memory.
|
||||||
|
* Freeing the zero pointer has no effect.
|
||||||
|
* Raises std::runtime_error in case of error.
|
||||||
|
*/
|
||||||
|
void free(void *ptr);
|
||||||
|
|
||||||
|
/** Get arena usage statistics */
|
||||||
|
Stats stats() const;
|
||||||
|
|
||||||
|
#ifdef ARENA_DEBUG
|
||||||
|
void walk() const;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/** Return whether a pointer points inside this arena.
|
||||||
|
* This returns base <= ptr < (base+size) so only use it for (inclusive)
|
||||||
|
* chunk starting addresses.
|
||||||
|
*/
|
||||||
|
bool addressInArena(void *ptr) const { return ptr >= base && ptr < end; }
|
||||||
|
private:
|
||||||
|
Arena(const Arena& other) = delete; // non construction-copyable
|
||||||
|
Arena& operator=(const Arena&) = delete; // non copyable
|
||||||
|
|
||||||
|
/** Map of chunk address to chunk information. This class makes use of the
|
||||||
|
* sorted order to merge previous and next chunks during deallocation.
|
||||||
|
*/
|
||||||
|
std::map<char*, Chunk> chunks;
|
||||||
|
/** Base address of arena */
|
||||||
|
char* base;
|
||||||
|
/** End address of arena */
|
||||||
|
char* end;
|
||||||
|
/** Minimum chunk alignment */
|
||||||
|
size_t alignment;
|
||||||
|
};
|
||||||
|
|
||||||
|
/** Pool for locked memory chunks.
|
||||||
|
*
|
||||||
|
* To avoid sensitive key data from being swapped to disk, the memory in this pool
|
||||||
|
* is locked/pinned.
|
||||||
|
*
|
||||||
|
* An arena manages a contiguous region of memory. The pool starts out with one arena
|
||||||
|
* but can grow to multiple arenas if the need arises.
|
||||||
|
*
|
||||||
|
* Unlike a normal C heap, the administrative structures are seperate from the managed
|
||||||
|
* memory. This has been done as the sizes and bases of objects are not in themselves sensitive
|
||||||
|
* information, as to conserve precious locked memory. In some operating systems
|
||||||
|
* the amount of memory that can be locked is small.
|
||||||
|
*/
|
||||||
|
class LockedPool
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
/** Size of one arena of locked memory. This is a compromise.
|
||||||
|
* Do not set this too low, as managing many arenas will increase
|
||||||
|
* allocation and deallocation overhead. Setting it too high allocates
|
||||||
|
* more locked memory from the OS than strictly necessary.
|
||||||
|
*/
|
||||||
|
static const size_t ARENA_SIZE = 256*1024;
|
||||||
|
/** Chunk alignment. Another compromise. Setting this too high will waste
|
||||||
|
* memory, setting it too low will facilitate fragmentation.
|
||||||
|
*/
|
||||||
|
static const size_t ARENA_ALIGN = 16;
|
||||||
|
|
||||||
|
/** Callback when allocation succeeds but locking fails.
|
||||||
|
*/
|
||||||
|
typedef bool (*LockingFailed_Callback)();
|
||||||
|
|
||||||
|
/** Memory statistics. */
|
||||||
|
struct Stats
|
||||||
|
{
|
||||||
|
size_t used;
|
||||||
|
size_t free;
|
||||||
|
size_t total;
|
||||||
|
size_t locked;
|
||||||
|
size_t chunks_used;
|
||||||
|
size_t chunks_free;
|
||||||
|
};
|
||||||
|
|
||||||
|
/** Create a new LockedPool. This takes ownership of the MemoryPageLocker,
|
||||||
|
* you can only instantiate this with LockedPool(std::move(...)).
|
||||||
|
*
|
||||||
|
* The second argument is an optional callback when locking a newly allocated arena failed.
|
||||||
|
* If this callback is provided and returns false, the allocation fails (hard fail), if
|
||||||
|
* it returns true the allocation proceeds, but it could warn.
|
||||||
|
*/
|
||||||
|
LockedPool(std::unique_ptr<LockedPageAllocator> allocator, LockingFailed_Callback lf_cb_in = 0);
|
||||||
|
~LockedPool();
|
||||||
|
|
||||||
|
/** Allocate size bytes from this arena.
|
||||||
|
* Returns pointer on success, or 0 if memory is full or
|
||||||
|
* the application tried to allocate 0 bytes.
|
||||||
|
*/
|
||||||
|
void* alloc(size_t size);
|
||||||
|
|
||||||
|
/** Free a previously allocated chunk of memory.
|
||||||
|
* Freeing the zero pointer has no effect.
|
||||||
|
* Raises std::runtime_error in case of error.
|
||||||
|
*/
|
||||||
|
void free(void *ptr);
|
||||||
|
|
||||||
|
/** Get pool usage statistics */
|
||||||
|
Stats stats() const;
|
||||||
|
private:
|
||||||
|
LockedPool(const LockedPool& other) = delete; // non construction-copyable
|
||||||
|
LockedPool& operator=(const LockedPool&) = delete; // non copyable
|
||||||
|
|
||||||
|
std::unique_ptr<LockedPageAllocator> allocator;
|
||||||
|
|
||||||
|
/** Create an arena from locked pages */
|
||||||
|
class LockedPageArena: public Arena
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
LockedPageArena(LockedPageAllocator *alloc_in, void *base_in, size_t size, size_t align);
|
||||||
|
~LockedPageArena();
|
||||||
|
private:
|
||||||
|
void *base;
|
||||||
|
size_t size;
|
||||||
|
LockedPageAllocator *allocator;
|
||||||
|
};
|
||||||
|
|
||||||
|
bool new_arena(size_t size, size_t align);
|
||||||
|
|
||||||
|
std::list<LockedPageArena> arenas;
|
||||||
|
LockingFailed_Callback lf_cb;
|
||||||
|
size_t cumulative_bytes_locked;
|
||||||
|
/** Mutex protects access to this pool's data structures, including arenas.
|
||||||
|
*/
|
||||||
|
mutable std::mutex mutex;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Singleton class to keep track of locked (ie, non-swappable) memory, for use in
|
||||||
|
* std::allocator templates.
|
||||||
|
*
|
||||||
|
* Some implementations of the STL allocate memory in some constructors (i.e., see
|
||||||
|
* MSVC's vector<T> implementation where it allocates 1 byte of memory in the allocator.)
|
||||||
|
* Due to the unpredictable order of static initializers, we have to make sure the
|
||||||
|
* LockedPoolManager instance exists before any other STL-based objects that use
|
||||||
|
* secure_allocator are created. So instead of having LockedPoolManager also be
|
||||||
|
* static-initialized, it is created on demand.
|
||||||
|
*/
|
||||||
|
class LockedPoolManager : public LockedPool
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
/** Return the current instance, or create it once */
|
||||||
|
static LockedPoolManager& Instance()
|
||||||
|
{
|
||||||
|
std::call_once(LockedPoolManager::init_flag, LockedPoolManager::CreateInstance);
|
||||||
|
return *LockedPoolManager::_instance;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
LockedPoolManager(std::unique_ptr<LockedPageAllocator> allocator);
|
||||||
|
|
||||||
|
/** Create a new LockedPoolManager specialized to the OS */
|
||||||
|
static void CreateInstance();
|
||||||
|
/** Called when locking fails, warn the user here */
|
||||||
|
static bool LockingFailed();
|
||||||
|
|
||||||
|
static LockedPoolManager* _instance;
|
||||||
|
static std::once_flag init_flag;
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif // BITCOIN_SUPPORT_LOCKEDPOOL_H
|
@ -1,70 +0,0 @@
|
|||||||
// Copyright (c) 2009-2015 The Bitcoin Core developers
|
|
||||||
// Distributed under the MIT software license, see the accompanying
|
|
||||||
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
|
||||||
|
|
||||||
#include "support/pagelocker.h"
|
|
||||||
|
|
||||||
#if defined(HAVE_CONFIG_H)
|
|
||||||
#include "config/dash-config.h"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef WIN32
|
|
||||||
#ifdef _WIN32_WINNT
|
|
||||||
#undef _WIN32_WINNT
|
|
||||||
#endif
|
|
||||||
#define _WIN32_WINNT 0x0501
|
|
||||||
#define WIN32_LEAN_AND_MEAN 1
|
|
||||||
#ifndef NOMINMAX
|
|
||||||
#define NOMINMAX
|
|
||||||
#endif
|
|
||||||
#include <windows.h>
|
|
||||||
// This is used to attempt to keep keying material out of swap
|
|
||||||
// Note that VirtualLock does not provide this as a guarantee on Windows,
|
|
||||||
// but, in practice, memory that has been VirtualLock'd almost never gets written to
|
|
||||||
// the pagefile except in rare circumstances where memory is extremely low.
|
|
||||||
#else
|
|
||||||
#include <sys/mman.h>
|
|
||||||
#include <limits.h> // for PAGESIZE
|
|
||||||
#include <unistd.h> // for sysconf
|
|
||||||
#endif
|
|
||||||
|
|
||||||
LockedPageManager* LockedPageManager::_instance = NULL;
|
|
||||||
boost::once_flag LockedPageManager::init_flag = BOOST_ONCE_INIT;
|
|
||||||
|
|
||||||
/** Determine system page size in bytes */
|
|
||||||
static inline size_t GetSystemPageSize()
|
|
||||||
{
|
|
||||||
size_t page_size;
|
|
||||||
#if defined(WIN32)
|
|
||||||
SYSTEM_INFO sSysInfo;
|
|
||||||
GetSystemInfo(&sSysInfo);
|
|
||||||
page_size = sSysInfo.dwPageSize;
|
|
||||||
#elif defined(PAGESIZE) // defined in limits.h
|
|
||||||
page_size = PAGESIZE;
|
|
||||||
#else // assume some POSIX OS
|
|
||||||
page_size = sysconf(_SC_PAGESIZE);
|
|
||||||
#endif
|
|
||||||
return page_size;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool MemoryPageLocker::Lock(const void* addr, size_t len)
|
|
||||||
{
|
|
||||||
#ifdef WIN32
|
|
||||||
return VirtualLock(const_cast<void*>(addr), len) != 0;
|
|
||||||
#else
|
|
||||||
return mlock(addr, len) == 0;
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
bool MemoryPageLocker::Unlock(const void* addr, size_t len)
|
|
||||||
{
|
|
||||||
#ifdef WIN32
|
|
||||||
return VirtualUnlock(const_cast<void*>(addr), len) != 0;
|
|
||||||
#else
|
|
||||||
return munlock(addr, len) == 0;
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
LockedPageManager::LockedPageManager() : LockedPageManagerBase<MemoryPageLocker>(GetSystemPageSize())
|
|
||||||
{
|
|
||||||
}
|
|
@ -1,177 +0,0 @@
|
|||||||
// Copyright (c) 2009-2010 Satoshi Nakamoto
|
|
||||||
// Copyright (c) 2009-2015 The Bitcoin Core developers
|
|
||||||
// Distributed under the MIT software license, see the accompanying
|
|
||||||
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
|
||||||
|
|
||||||
#ifndef BITCOIN_SUPPORT_PAGELOCKER_H
|
|
||||||
#define BITCOIN_SUPPORT_PAGELOCKER_H
|
|
||||||
|
|
||||||
#include "support/cleanse.h"
|
|
||||||
|
|
||||||
#include <map>
|
|
||||||
|
|
||||||
#include <boost/thread/mutex.hpp>
|
|
||||||
#include <boost/thread/once.hpp>
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Thread-safe class to keep track of locked (ie, non-swappable) memory pages.
|
|
||||||
*
|
|
||||||
* Memory locks do not stack, that is, pages which have been locked several times by calls to mlock()
|
|
||||||
* will be unlocked by a single call to munlock(). This can result in keying material ending up in swap when
|
|
||||||
* those functions are used naively. This class simulates stacking memory locks by keeping a counter per page.
|
|
||||||
*
|
|
||||||
* @note By using a map from each page base address to lock count, this class is optimized for
|
|
||||||
* small objects that span up to a few pages, mostly smaller than a page. To support large allocations,
|
|
||||||
* something like an interval tree would be the preferred data structure.
|
|
||||||
*/
|
|
||||||
template <class Locker>
|
|
||||||
class LockedPageManagerBase
|
|
||||||
{
|
|
||||||
public:
|
|
||||||
LockedPageManagerBase(size_t _page_size) : page_size(_page_size)
|
|
||||||
{
|
|
||||||
// Determine bitmask for extracting page from address
|
|
||||||
assert(!(_page_size & (_page_size - 1))); // size must be power of two
|
|
||||||
page_mask = ~(_page_size - 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
~LockedPageManagerBase()
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
// For all pages in affected range, increase lock count
|
|
||||||
void LockRange(void* p, size_t size)
|
|
||||||
{
|
|
||||||
boost::mutex::scoped_lock lock(mutex);
|
|
||||||
if (!size)
|
|
||||||
return;
|
|
||||||
const size_t base_addr = reinterpret_cast<size_t>(p);
|
|
||||||
const size_t start_page = base_addr & page_mask;
|
|
||||||
const size_t end_page = (base_addr + size - 1) & page_mask;
|
|
||||||
for (size_t page = start_page; page <= end_page; page += page_size) {
|
|
||||||
Histogram::iterator it = histogram.find(page);
|
|
||||||
if (it == histogram.end()) // Newly locked page
|
|
||||||
{
|
|
||||||
locker.Lock(reinterpret_cast<void*>(page), page_size);
|
|
||||||
histogram.insert(std::make_pair(page, 1));
|
|
||||||
} else // Page was already locked; increase counter
|
|
||||||
{
|
|
||||||
it->second += 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// For all pages in affected range, decrease lock count
|
|
||||||
void UnlockRange(void* p, size_t size)
|
|
||||||
{
|
|
||||||
boost::mutex::scoped_lock lock(mutex);
|
|
||||||
if (!size)
|
|
||||||
return;
|
|
||||||
const size_t base_addr = reinterpret_cast<size_t>(p);
|
|
||||||
const size_t start_page = base_addr & page_mask;
|
|
||||||
const size_t end_page = (base_addr + size - 1) & page_mask;
|
|
||||||
for (size_t page = start_page; page <= end_page; page += page_size) {
|
|
||||||
Histogram::iterator it = histogram.find(page);
|
|
||||||
assert(it != histogram.end()); // Cannot unlock an area that was not locked
|
|
||||||
// Decrease counter for page, when it is zero, the page will be unlocked
|
|
||||||
it->second -= 1;
|
|
||||||
if (it->second == 0) // Nothing on the page anymore that keeps it locked
|
|
||||||
{
|
|
||||||
// Unlock page and remove the count from histogram
|
|
||||||
locker.Unlock(reinterpret_cast<void*>(page), page_size);
|
|
||||||
histogram.erase(it);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get number of locked pages for diagnostics
|
|
||||||
int GetLockedPageCount()
|
|
||||||
{
|
|
||||||
boost::mutex::scoped_lock lock(mutex);
|
|
||||||
return histogram.size();
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
Locker locker;
|
|
||||||
boost::mutex mutex;
|
|
||||||
size_t page_size, page_mask;
|
|
||||||
// map of page base address to lock count
|
|
||||||
typedef std::map<size_t, int> Histogram;
|
|
||||||
Histogram histogram;
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
|
||||||
* OS-dependent memory page locking/unlocking.
|
|
||||||
* Defined as policy class to make stubbing for test possible.
|
|
||||||
*/
|
|
||||||
class MemoryPageLocker
|
|
||||||
{
|
|
||||||
public:
|
|
||||||
/** Lock memory pages.
|
|
||||||
* addr and len must be a multiple of the system page size
|
|
||||||
*/
|
|
||||||
bool Lock(const void* addr, size_t len);
|
|
||||||
/** Unlock memory pages.
|
|
||||||
* addr and len must be a multiple of the system page size
|
|
||||||
*/
|
|
||||||
bool Unlock(const void* addr, size_t len);
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Singleton class to keep track of locked (ie, non-swappable) memory pages, for use in
|
|
||||||
* std::allocator templates.
|
|
||||||
*
|
|
||||||
* Some implementations of the STL allocate memory in some constructors (i.e., see
|
|
||||||
* MSVC's vector<T> implementation where it allocates 1 byte of memory in the allocator.)
|
|
||||||
* Due to the unpredictable order of static initializers, we have to make sure the
|
|
||||||
* LockedPageManager instance exists before any other STL-based objects that use
|
|
||||||
* secure_allocator are created. So instead of having LockedPageManager also be
|
|
||||||
* static-initialized, it is created on demand.
|
|
||||||
*/
|
|
||||||
class LockedPageManager : public LockedPageManagerBase<MemoryPageLocker>
|
|
||||||
{
|
|
||||||
public:
|
|
||||||
static LockedPageManager& Instance()
|
|
||||||
{
|
|
||||||
boost::call_once(LockedPageManager::CreateInstance, LockedPageManager::init_flag);
|
|
||||||
return *LockedPageManager::_instance;
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
LockedPageManager();
|
|
||||||
|
|
||||||
static void CreateInstance()
|
|
||||||
{
|
|
||||||
// Using a local static instance guarantees that the object is initialized
|
|
||||||
// when it's first needed and also deinitialized after all objects that use
|
|
||||||
// it are done with it. I can think of one unlikely scenario where we may
|
|
||||||
// have a static deinitialization order/problem, but the check in
|
|
||||||
// LockedPageManagerBase's destructor helps us detect if that ever happens.
|
|
||||||
static LockedPageManager instance;
|
|
||||||
LockedPageManager::_instance = &instance;
|
|
||||||
}
|
|
||||||
|
|
||||||
static LockedPageManager* _instance;
|
|
||||||
static boost::once_flag init_flag;
|
|
||||||
};
|
|
||||||
|
|
||||||
//
|
|
||||||
// Functions for directly locking/unlocking memory objects.
|
|
||||||
// Intended for non-dynamically allocated structures.
|
|
||||||
//
|
|
||||||
template <typename T>
|
|
||||||
void LockObject(const T& t)
|
|
||||||
{
|
|
||||||
LockedPageManager::Instance().LockRange((void*)(&t), sizeof(T));
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
void UnlockObject(const T& t)
|
|
||||||
{
|
|
||||||
memory_cleanse((void*)(&t), sizeof(T));
|
|
||||||
LockedPageManager::Instance().UnlockRange((void*)(&t), sizeof(T));
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif // BITCOIN_SUPPORT_PAGELOCKER_H
|
|
@ -11,110 +11,214 @@
|
|||||||
|
|
||||||
BOOST_FIXTURE_TEST_SUITE(allocator_tests, BasicTestingSetup)
|
BOOST_FIXTURE_TEST_SUITE(allocator_tests, BasicTestingSetup)
|
||||||
|
|
||||||
// Dummy memory page locker for platform independent tests
|
BOOST_AUTO_TEST_CASE(arena_tests)
|
||||||
static const void *last_lock_addr, *last_unlock_addr;
|
{
|
||||||
static size_t last_lock_len, last_unlock_len;
|
// Fake memory base address for testing
|
||||||
class TestLocker
|
// without actually using memory.
|
||||||
|
void *synth_base = reinterpret_cast<void*>(0x08000000);
|
||||||
|
const size_t synth_size = 1024*1024;
|
||||||
|
Arena b(synth_base, synth_size, 16);
|
||||||
|
void *chunk = b.alloc(1000);
|
||||||
|
#ifdef ARENA_DEBUG
|
||||||
|
b.walk();
|
||||||
|
#endif
|
||||||
|
BOOST_CHECK(chunk != nullptr);
|
||||||
|
BOOST_CHECK(b.stats().used == 1008); // Aligned to 16
|
||||||
|
BOOST_CHECK(b.stats().total == synth_size); // Nothing has disappeared?
|
||||||
|
b.free(chunk);
|
||||||
|
#ifdef ARENA_DEBUG
|
||||||
|
b.walk();
|
||||||
|
#endif
|
||||||
|
BOOST_CHECK(b.stats().used == 0);
|
||||||
|
BOOST_CHECK(b.stats().free == synth_size);
|
||||||
|
try { // Test exception on double-free
|
||||||
|
b.free(chunk);
|
||||||
|
BOOST_CHECK(0);
|
||||||
|
} catch(std::runtime_error &)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
void *a0 = b.alloc(128);
|
||||||
|
BOOST_CHECK(a0 == synth_base); // first allocation must start at beginning
|
||||||
|
void *a1 = b.alloc(256);
|
||||||
|
void *a2 = b.alloc(512);
|
||||||
|
BOOST_CHECK(b.stats().used == 896);
|
||||||
|
BOOST_CHECK(b.stats().total == synth_size);
|
||||||
|
#ifdef ARENA_DEBUG
|
||||||
|
b.walk();
|
||||||
|
#endif
|
||||||
|
b.free(a0);
|
||||||
|
#ifdef ARENA_DEBUG
|
||||||
|
b.walk();
|
||||||
|
#endif
|
||||||
|
BOOST_CHECK(b.stats().used == 768);
|
||||||
|
b.free(a1);
|
||||||
|
BOOST_CHECK(b.stats().used == 512);
|
||||||
|
void *a3 = b.alloc(128);
|
||||||
|
#ifdef ARENA_DEBUG
|
||||||
|
b.walk();
|
||||||
|
#endif
|
||||||
|
BOOST_CHECK(b.stats().used == 640);
|
||||||
|
b.free(a2);
|
||||||
|
BOOST_CHECK(b.stats().used == 128);
|
||||||
|
b.free(a3);
|
||||||
|
BOOST_CHECK(b.stats().used == 0);
|
||||||
|
BOOST_CHECK(b.stats().total == synth_size);
|
||||||
|
BOOST_CHECK(b.stats().free == synth_size);
|
||||||
|
|
||||||
|
std::vector<void*> addr;
|
||||||
|
BOOST_CHECK(b.alloc(0) == nullptr); // allocating 0 always returns nullptr
|
||||||
|
#ifdef ARENA_DEBUG
|
||||||
|
b.walk();
|
||||||
|
#endif
|
||||||
|
// Sweeping allocate all memory
|
||||||
|
for (int x=0; x<1024; ++x)
|
||||||
|
addr.push_back(b.alloc(1024));
|
||||||
|
BOOST_CHECK(addr[0] == synth_base); // first allocation must start at beginning
|
||||||
|
BOOST_CHECK(b.stats().free == 0);
|
||||||
|
BOOST_CHECK(b.alloc(1024) == nullptr); // memory is full, this must return nullptr
|
||||||
|
BOOST_CHECK(b.alloc(0) == nullptr);
|
||||||
|
for (int x=0; x<1024; ++x)
|
||||||
|
b.free(addr[x]);
|
||||||
|
addr.clear();
|
||||||
|
BOOST_CHECK(b.stats().total == synth_size);
|
||||||
|
BOOST_CHECK(b.stats().free == synth_size);
|
||||||
|
|
||||||
|
// Now in the other direction...
|
||||||
|
for (int x=0; x<1024; ++x)
|
||||||
|
addr.push_back(b.alloc(1024));
|
||||||
|
for (int x=0; x<1024; ++x)
|
||||||
|
b.free(addr[1023-x]);
|
||||||
|
addr.clear();
|
||||||
|
|
||||||
|
// Now allocate in smaller unequal chunks, then deallocate haphazardly
|
||||||
|
// Not all the chunks will succeed allocating, but freeing nullptr is
|
||||||
|
// allowed so that is no problem.
|
||||||
|
for (int x=0; x<2048; ++x)
|
||||||
|
addr.push_back(b.alloc(x+1));
|
||||||
|
for (int x=0; x<2048; ++x)
|
||||||
|
b.free(addr[((x*23)%2048)^242]);
|
||||||
|
addr.clear();
|
||||||
|
|
||||||
|
// Go entirely wild: free and alloc interleaved,
|
||||||
|
// generate targets and sizes using pseudo-randomness.
|
||||||
|
for (int x=0; x<2048; ++x)
|
||||||
|
addr.push_back(0);
|
||||||
|
uint32_t s = 0x12345678;
|
||||||
|
for (int x=0; x<5000; ++x) {
|
||||||
|
int idx = s & (addr.size()-1);
|
||||||
|
if (s & 0x80000000) {
|
||||||
|
b.free(addr[idx]);
|
||||||
|
addr[idx] = 0;
|
||||||
|
} else if(!addr[idx]) {
|
||||||
|
addr[idx] = b.alloc((s >> 16) & 2047);
|
||||||
|
}
|
||||||
|
bool lsb = s & 1;
|
||||||
|
s >>= 1;
|
||||||
|
if (lsb)
|
||||||
|
s ^= 0xf00f00f0; // LFSR period 0xf7ffffe0
|
||||||
|
}
|
||||||
|
for (void *ptr: addr)
|
||||||
|
b.free(ptr);
|
||||||
|
addr.clear();
|
||||||
|
|
||||||
|
BOOST_CHECK(b.stats().total == synth_size);
|
||||||
|
BOOST_CHECK(b.stats().free == synth_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Mock LockedPageAllocator for testing */
|
||||||
|
class TestLockedPageAllocator: public LockedPageAllocator
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
bool Lock(const void *addr, size_t len)
|
TestLockedPageAllocator(int count_in, int lockedcount_in): count(count_in), lockedcount(lockedcount_in) {}
|
||||||
|
void* AllocateLocked(size_t len, bool *lockingSuccess)
|
||||||
{
|
{
|
||||||
last_lock_addr = addr;
|
*lockingSuccess = false;
|
||||||
last_lock_len = len;
|
if (count > 0) {
|
||||||
return true;
|
--count;
|
||||||
|
|
||||||
|
if (lockedcount > 0) {
|
||||||
|
--lockedcount;
|
||||||
|
*lockingSuccess = true;
|
||||||
}
|
}
|
||||||
bool Unlock(const void *addr, size_t len)
|
|
||||||
|
return reinterpret_cast<void*>(0x08000000 + (count<<24)); // Fake address, do not actually use this memory
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
void FreeLocked(void* addr, size_t len)
|
||||||
{
|
{
|
||||||
last_unlock_addr = addr;
|
|
||||||
last_unlock_len = len;
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
size_t GetLimit()
|
||||||
|
{
|
||||||
|
return std::numeric_limits<size_t>::max();
|
||||||
|
}
|
||||||
|
private:
|
||||||
|
int count;
|
||||||
|
int lockedcount;
|
||||||
};
|
};
|
||||||
|
|
||||||
BOOST_AUTO_TEST_CASE(test_LockedPageManagerBase)
|
BOOST_AUTO_TEST_CASE(lockedpool_tests_mock)
|
||||||
{
|
{
|
||||||
const size_t test_page_size = 4096;
|
// Test over three virtual arenas, of which one will succeed being locked
|
||||||
LockedPageManagerBase<TestLocker> lpm(test_page_size);
|
std::unique_ptr<LockedPageAllocator> x(new TestLockedPageAllocator(3, 1));
|
||||||
size_t addr;
|
LockedPool pool(std::move(x));
|
||||||
last_lock_addr = last_unlock_addr = 0;
|
BOOST_CHECK(pool.stats().total == 0);
|
||||||
last_lock_len = last_unlock_len = 0;
|
BOOST_CHECK(pool.stats().locked == 0);
|
||||||
|
|
||||||
/* Try large number of small objects */
|
void *a0 = pool.alloc(LockedPool::ARENA_SIZE / 2);
|
||||||
addr = 0;
|
BOOST_CHECK(a0);
|
||||||
for(int i=0; i<1000; ++i)
|
BOOST_CHECK(pool.stats().locked == LockedPool::ARENA_SIZE);
|
||||||
{
|
void *a1 = pool.alloc(LockedPool::ARENA_SIZE / 2);
|
||||||
lpm.LockRange(reinterpret_cast<void*>(addr), 33);
|
BOOST_CHECK(a1);
|
||||||
addr += 33;
|
void *a2 = pool.alloc(LockedPool::ARENA_SIZE / 2);
|
||||||
}
|
BOOST_CHECK(a2);
|
||||||
/* Try small number of page-sized objects, straddling two pages */
|
void *a3 = pool.alloc(LockedPool::ARENA_SIZE / 2);
|
||||||
addr = test_page_size*100 + 53;
|
BOOST_CHECK(a3);
|
||||||
for(int i=0; i<100; ++i)
|
void *a4 = pool.alloc(LockedPool::ARENA_SIZE / 2);
|
||||||
{
|
BOOST_CHECK(a4);
|
||||||
lpm.LockRange(reinterpret_cast<void*>(addr), test_page_size);
|
void *a5 = pool.alloc(LockedPool::ARENA_SIZE / 2);
|
||||||
addr += test_page_size;
|
BOOST_CHECK(a5);
|
||||||
}
|
// We've passed a count of three arenas, so this allocation should fail
|
||||||
/* Try small number of page-sized objects aligned to exactly one page */
|
void *a6 = pool.alloc(16);
|
||||||
addr = test_page_size*300;
|
BOOST_CHECK(!a6);
|
||||||
for(int i=0; i<100; ++i)
|
|
||||||
{
|
|
||||||
lpm.LockRange(reinterpret_cast<void*>(addr), test_page_size);
|
|
||||||
addr += test_page_size;
|
|
||||||
}
|
|
||||||
/* one very large object, straddling pages */
|
|
||||||
lpm.LockRange(reinterpret_cast<void*>(test_page_size*600+1), test_page_size*500);
|
|
||||||
BOOST_CHECK(last_lock_addr == reinterpret_cast<void*>(test_page_size*(600+500)));
|
|
||||||
/* one very large object, page aligned */
|
|
||||||
lpm.LockRange(reinterpret_cast<void*>(test_page_size*1200), test_page_size*500-1);
|
|
||||||
BOOST_CHECK(last_lock_addr == reinterpret_cast<void*>(test_page_size*(1200+500-1)));
|
|
||||||
|
|
||||||
BOOST_CHECK(lpm.GetLockedPageCount() == (
|
pool.free(a0);
|
||||||
(1000*33+test_page_size-1)/test_page_size + // small objects
|
pool.free(a2);
|
||||||
101 + 100 + // page-sized objects
|
pool.free(a4);
|
||||||
501 + 500)); // large objects
|
pool.free(a1);
|
||||||
BOOST_CHECK((last_lock_len & (test_page_size-1)) == 0); // always lock entire pages
|
pool.free(a3);
|
||||||
BOOST_CHECK(last_unlock_len == 0); // nothing unlocked yet
|
pool.free(a5);
|
||||||
|
BOOST_CHECK(pool.stats().total == 3*LockedPool::ARENA_SIZE);
|
||||||
|
BOOST_CHECK(pool.stats().locked == LockedPool::ARENA_SIZE);
|
||||||
|
BOOST_CHECK(pool.stats().used == 0);
|
||||||
|
}
|
||||||
|
|
||||||
/* And unlock again */
|
// These tests used the live LockedPoolManager object, this is also used
|
||||||
addr = 0;
|
// by other tests so the conditions are somewhat less controllable and thus the
|
||||||
for(int i=0; i<1000; ++i)
|
// tests are somewhat more error-prone.
|
||||||
{
|
BOOST_AUTO_TEST_CASE(lockedpool_tests_live)
|
||||||
lpm.UnlockRange(reinterpret_cast<void*>(addr), 33);
|
{
|
||||||
addr += 33;
|
LockedPoolManager &pool = LockedPoolManager::Instance();
|
||||||
}
|
LockedPool::Stats initial = pool.stats();
|
||||||
addr = test_page_size*100 + 53;
|
|
||||||
for(int i=0; i<100; ++i)
|
|
||||||
{
|
|
||||||
lpm.UnlockRange(reinterpret_cast<void*>(addr), test_page_size);
|
|
||||||
addr += test_page_size;
|
|
||||||
}
|
|
||||||
addr = test_page_size*300;
|
|
||||||
for(int i=0; i<100; ++i)
|
|
||||||
{
|
|
||||||
lpm.UnlockRange(reinterpret_cast<void*>(addr), test_page_size);
|
|
||||||
addr += test_page_size;
|
|
||||||
}
|
|
||||||
lpm.UnlockRange(reinterpret_cast<void*>(test_page_size*600+1), test_page_size*500);
|
|
||||||
lpm.UnlockRange(reinterpret_cast<void*>(test_page_size*1200), test_page_size*500-1);
|
|
||||||
|
|
||||||
/* Check that everything is released */
|
void *a0 = pool.alloc(16);
|
||||||
BOOST_CHECK(lpm.GetLockedPageCount() == 0);
|
BOOST_CHECK(a0);
|
||||||
|
// Test reading and writing the allocated memory
|
||||||
|
*((uint32_t*)a0) = 0x1234;
|
||||||
|
BOOST_CHECK(*((uint32_t*)a0) == 0x1234);
|
||||||
|
|
||||||
/* A few and unlocks of size zero (should have no effect) */
|
pool.free(a0);
|
||||||
addr = 0;
|
try { // Test exception on double-free
|
||||||
for(int i=0; i<1000; ++i)
|
pool.free(a0);
|
||||||
|
BOOST_CHECK(0);
|
||||||
|
} catch(std::runtime_error &)
|
||||||
{
|
{
|
||||||
lpm.LockRange(reinterpret_cast<void*>(addr), 0);
|
|
||||||
addr += 1;
|
|
||||||
}
|
}
|
||||||
BOOST_CHECK(lpm.GetLockedPageCount() == 0);
|
// If more than one new arena was allocated for the above tests, something is wrong
|
||||||
addr = 0;
|
BOOST_CHECK(pool.stats().total <= (initial.total + LockedPool::ARENA_SIZE));
|
||||||
for(int i=0; i<1000; ++i)
|
// Usage must be back to where it started
|
||||||
{
|
BOOST_CHECK(pool.stats().used == initial.used);
|
||||||
lpm.UnlockRange(reinterpret_cast<void*>(addr), 0);
|
|
||||||
addr += 1;
|
|
||||||
}
|
|
||||||
BOOST_CHECK(lpm.GetLockedPageCount() == 0);
|
|
||||||
BOOST_CHECK((last_unlock_len & (test_page_size-1)) == 0); // always unlock entire pages
|
|
||||||
}
|
}
|
||||||
|
|
||||||
BOOST_AUTO_TEST_SUITE_END()
|
BOOST_AUTO_TEST_SUITE_END()
|
||||||
|
@ -48,12 +48,12 @@ bool CCrypter::SetKeyFromPassphrase(const SecureString& strKeyData, const std::v
|
|||||||
|
|
||||||
int i = 0;
|
int i = 0;
|
||||||
if (nDerivationMethod == 0)
|
if (nDerivationMethod == 0)
|
||||||
i = BytesToKeySHA512AES(chSalt, strKeyData, nRounds, chKey, chIV);
|
i = BytesToKeySHA512AES(chSalt, strKeyData, nRounds, vchKey.data(), vchIV.data());
|
||||||
|
|
||||||
if (i != (int)WALLET_CRYPTO_KEY_SIZE)
|
if (i != (int)WALLET_CRYPTO_KEY_SIZE)
|
||||||
{
|
{
|
||||||
memory_cleanse(chKey, sizeof(chKey));
|
memory_cleanse(vchKey.data(), vchKey.size());
|
||||||
memory_cleanse(chIV, sizeof(chIV));
|
memory_cleanse(vchIV.data(), vchIV.size());
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -66,8 +66,8 @@ bool CCrypter::SetKey(const CKeyingMaterial& chNewKey, const std::vector<unsigne
|
|||||||
if (chNewKey.size() != WALLET_CRYPTO_KEY_SIZE || chNewIV.size() != WALLET_CRYPTO_IV_SIZE)
|
if (chNewKey.size() != WALLET_CRYPTO_KEY_SIZE || chNewIV.size() != WALLET_CRYPTO_IV_SIZE)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
memcpy(&chKey[0], &chNewKey[0], sizeof chKey);
|
memcpy(vchKey.data(), chNewKey.data(), chNewKey.size());
|
||||||
memcpy(&chIV[0], &chNewIV[0], sizeof chIV);
|
memcpy(vchIV.data(), chNewIV.data(), chNewIV.size());
|
||||||
|
|
||||||
fKeySet = true;
|
fKeySet = true;
|
||||||
return true;
|
return true;
|
||||||
@ -82,7 +82,7 @@ bool CCrypter::Encrypt(const CKeyingMaterial& vchPlaintext, std::vector<unsigned
|
|||||||
// n + AES_BLOCKSIZE bytes
|
// n + AES_BLOCKSIZE bytes
|
||||||
vchCiphertext.resize(vchPlaintext.size() + AES_BLOCKSIZE);
|
vchCiphertext.resize(vchPlaintext.size() + AES_BLOCKSIZE);
|
||||||
|
|
||||||
AES256CBCEncrypt enc(chKey, chIV, true);
|
AES256CBCEncrypt enc(vchKey.data(), vchIV.data(), true);
|
||||||
size_t nLen = enc.Encrypt(&vchPlaintext[0], vchPlaintext.size(), &vchCiphertext[0]);
|
size_t nLen = enc.Encrypt(&vchPlaintext[0], vchPlaintext.size(), &vchCiphertext[0]);
|
||||||
if(nLen < vchPlaintext.size())
|
if(nLen < vchPlaintext.size())
|
||||||
return false;
|
return false;
|
||||||
@ -101,7 +101,7 @@ bool CCrypter::Decrypt(const std::vector<unsigned char>& vchCiphertext, CKeyingM
|
|||||||
|
|
||||||
vchPlaintext.resize(nLen);
|
vchPlaintext.resize(nLen);
|
||||||
|
|
||||||
AES256CBCDecrypt dec(chKey, chIV, true);
|
AES256CBCDecrypt dec(vchKey.data(), vchIV.data(), true);
|
||||||
nLen = dec.Decrypt(&vchCiphertext[0], vchCiphertext.size(), &vchPlaintext[0]);
|
nLen = dec.Decrypt(&vchCiphertext[0], vchCiphertext.size(), &vchPlaintext[0]);
|
||||||
if(nLen == 0)
|
if(nLen == 0)
|
||||||
return false;
|
return false;
|
||||||
|
@ -77,8 +77,8 @@ class CCrypter
|
|||||||
{
|
{
|
||||||
friend class wallet_crypto::TestCrypter; // for test access to chKey/chIV
|
friend class wallet_crypto::TestCrypter; // for test access to chKey/chIV
|
||||||
private:
|
private:
|
||||||
unsigned char chKey[WALLET_CRYPTO_KEY_SIZE];
|
std::vector<unsigned char, secure_allocator<unsigned char>> vchKey;
|
||||||
unsigned char chIV[WALLET_CRYPTO_IV_SIZE];
|
std::vector<unsigned char, secure_allocator<unsigned char>> vchIV;
|
||||||
bool fKeySet;
|
bool fKeySet;
|
||||||
|
|
||||||
int BytesToKeySHA512AES(const std::vector<unsigned char>& chSalt, const SecureString& strKeyData, int count, unsigned char *key,unsigned char *iv) const;
|
int BytesToKeySHA512AES(const std::vector<unsigned char>& chSalt, const SecureString& strKeyData, int count, unsigned char *key,unsigned char *iv) const;
|
||||||
@ -91,28 +91,21 @@ public:
|
|||||||
|
|
||||||
void CleanKey()
|
void CleanKey()
|
||||||
{
|
{
|
||||||
memory_cleanse(chKey, sizeof(chKey));
|
memory_cleanse(vchKey.data(), vchKey.size());
|
||||||
memory_cleanse(chIV, sizeof(chIV));
|
memory_cleanse(vchIV.data(), vchIV.size());
|
||||||
fKeySet = false;
|
fKeySet = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
CCrypter()
|
CCrypter()
|
||||||
{
|
{
|
||||||
fKeySet = false;
|
fKeySet = false;
|
||||||
|
vchKey.resize(WALLET_CRYPTO_KEY_SIZE);
|
||||||
// Try to keep the key data out of swap (and be a bit over-careful to keep the IV that we don't even use out of swap)
|
vchIV.resize(WALLET_CRYPTO_IV_SIZE);
|
||||||
// Note that this does nothing about suspend-to-disk (which will put all our key data on disk)
|
|
||||||
// Note as well that at no point in this program is any attempt made to prevent stealing of keys by reading the memory of the running process.
|
|
||||||
LockedPageManager::Instance().LockRange(&chKey[0], sizeof chKey);
|
|
||||||
LockedPageManager::Instance().LockRange(&chIV[0], sizeof chIV);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
~CCrypter()
|
~CCrypter()
|
||||||
{
|
{
|
||||||
CleanKey();
|
CleanKey();
|
||||||
|
|
||||||
LockedPageManager::Instance().UnlockRange(&chKey[0], sizeof chKey);
|
|
||||||
LockedPageManager::Instance().UnlockRange(&chIV[0], sizeof chIV);
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -191,10 +191,10 @@ static void TestPassphraseSingle(const std::vector<unsigned char>& vchSalt, cons
|
|||||||
|
|
||||||
OldSetKeyFromPassphrase(passphrase, vchSalt, rounds, 0, chKey, chIV);
|
OldSetKeyFromPassphrase(passphrase, vchSalt, rounds, 0, chKey, chIV);
|
||||||
|
|
||||||
BOOST_CHECK_MESSAGE(memcmp(chKey, crypt.chKey, sizeof(chKey)) == 0, \
|
BOOST_CHECK_MESSAGE(memcmp(chKey, crypt.vchKey.data(), crypt.vchKey.size()) == 0, \
|
||||||
HexStr(chKey, chKey+sizeof(chKey)) + std::string(" != ") + HexStr(crypt.chKey, crypt.chKey + (sizeof crypt.chKey)));
|
HexStr(chKey, chKey+sizeof(chKey)) + std::string(" != ") + HexStr(crypt.vchKey));
|
||||||
BOOST_CHECK_MESSAGE(memcmp(chIV, crypt.chIV, sizeof(chIV)) == 0, \
|
BOOST_CHECK_MESSAGE(memcmp(chIV, crypt.vchIV.data(), crypt.vchIV.size()) == 0, \
|
||||||
HexStr(chIV, chIV+sizeof(chIV)) + std::string(" != ") + HexStr(crypt.chIV, crypt.chIV + (sizeof crypt.chIV)));
|
HexStr(chIV, chIV+sizeof(chIV)) + std::string(" != ") + HexStr(crypt.vchIV));
|
||||||
|
|
||||||
if(!correctKey.empty())
|
if(!correctKey.empty())
|
||||||
BOOST_CHECK_MESSAGE(memcmp(chKey, &correctKey[0], sizeof(chKey)) == 0, \
|
BOOST_CHECK_MESSAGE(memcmp(chKey, &correctKey[0], sizeof(chKey)) == 0, \
|
||||||
@ -221,7 +221,7 @@ static void TestDecrypt(const CCrypter& crypt, const std::vector<unsigned char>&
|
|||||||
CKeyingMaterial vchDecrypted2;
|
CKeyingMaterial vchDecrypted2;
|
||||||
int result1, result2;
|
int result1, result2;
|
||||||
result1 = crypt.Decrypt(vchCiphertext, vchDecrypted1);
|
result1 = crypt.Decrypt(vchCiphertext, vchDecrypted1);
|
||||||
result2 = OldDecrypt(vchCiphertext, vchDecrypted2, crypt.chKey, crypt.chIV);
|
result2 = OldDecrypt(vchCiphertext, vchDecrypted2, crypt.vchKey.data(), crypt.vchIV.data());
|
||||||
BOOST_CHECK(result1 == result2);
|
BOOST_CHECK(result1 == result2);
|
||||||
|
|
||||||
// These two should be equal. However, OpenSSL 1.0.1j introduced a change
|
// These two should be equal. However, OpenSSL 1.0.1j introduced a change
|
||||||
@ -246,7 +246,7 @@ static void TestEncryptSingle(const CCrypter& crypt, const CKeyingMaterial& vchP
|
|||||||
std::vector<unsigned char> vchCiphertext2;
|
std::vector<unsigned char> vchCiphertext2;
|
||||||
int result1 = crypt.Encrypt(vchPlaintext, vchCiphertext1);
|
int result1 = crypt.Encrypt(vchPlaintext, vchCiphertext1);
|
||||||
|
|
||||||
int result2 = OldEncrypt(vchPlaintext, vchCiphertext2, crypt.chKey, crypt.chIV);
|
int result2 = OldEncrypt(vchPlaintext, vchCiphertext2, crypt.vchKey.data(), crypt.vchIV.data());
|
||||||
BOOST_CHECK(result1 == result2);
|
BOOST_CHECK(result1 == result2);
|
||||||
BOOST_CHECK(vchCiphertext1 == vchCiphertext2);
|
BOOST_CHECK(vchCiphertext1 == vchCiphertext2);
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user