Implement mt_pooled_secure_allocator and use it for BLS secure allocation (#2375)

* Add pooled_secure_allocator and mt_pooled_secure_allocator

* Use mt_pooled_secure_allocator for BLS secure allocation

The old solution relied on thread-local-storage and was thus not compatible
to libc6 2.11 (which is the minimum supported version we use).

Also, the old solution turned out to be erroneous. It would have crashed
or memory leaked when ownership of CBLSPrivateKey would be handled over
to another thread.

* Add new header files to Makefile.am

* Fix copyright headers of new files

* Bail out early from secure deallocation

* Clean up global BLS keys when shutting down
This commit is contained in:
Alexander Block 2018-10-26 07:04:22 +02:00 committed by GitHub
parent 0692de1c58
commit d1debfc267
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 173 additions and 40 deletions

View File

@ -172,6 +172,8 @@ BITCOIN_CORE_H = \
script/ismine.h \ script/ismine.h \
spork.h \ spork.h \
streams.h \ streams.h \
support/allocators/mt_pooled_secure.h \
support/allocators/pooled_secure.h \
support/allocators/secure.h \ support/allocators/secure.h \
support/allocators/zeroafterfree.h \ support/allocators/zeroafterfree.h \
support/cleanse.h \ support/cleanse.h \

View File

@ -9,8 +9,7 @@
#include "tinyformat.h" #include "tinyformat.h"
#ifndef BUILD_BITCOIN_INTERNAL #ifndef BUILD_BITCOIN_INTERNAL
#include "support/allocators/secure.h" #include "support/allocators/mt_pooled_secure.h"
#include <boost/pool/pool_alloc.hpp>
#endif #endif
#include <assert.h> #include <assert.h>
@ -425,40 +424,14 @@ bool CBLSSignature::Recover(const std::vector<CBLSSignature>& sigs, const std::v
} }
#ifndef BUILD_BITCOIN_INTERNAL #ifndef BUILD_BITCOIN_INTERNAL
struct secure_user_allocator {
typedef std::size_t size_type;
typedef std::ptrdiff_t difference_type;
static char* malloc(const size_type bytes) static mt_pooled_secure_allocator<uint8_t> g_blsSecureAllocator(sizeof(bn_t) + sizeof(size_t));
{
return static_cast<char*>(LockedPoolManager::Instance().alloc(bytes));
}
static void free(char* const block)
{
LockedPoolManager::Instance().free(block);
}
};
// every thread has it's own pool allocator for secure data to speed things up
// otherwise locking of mutexes slows down the system at places were you'd never expect it
// downside is that we must make sure that all threads have destroyed their copy of the pool before the global
// LockedPool is destroyed. This means that all worker threads must finish before static destruction begins
// we use sizeof(bn_t) as the pool request size as this is what Chia's BLS library will request in most cases
// In case something larger is requested, we directly call into LockedPool and accept the slowness
thread_local static boost::pool<secure_user_allocator> securePool(sizeof(bn_t) + sizeof(size_t));
static void* secure_allocate(size_t n) static void* secure_allocate(size_t n)
{ {
void* p; uint8_t* ptr = g_blsSecureAllocator.allocate(n + sizeof(size_t));
if (n <= securePool.get_requested_size() - sizeof(size_t)) { *(size_t*)ptr = n;
p = securePool.ordered_malloc(); return ptr + sizeof(size_t);
} else {
p = secure_user_allocator::malloc(n + sizeof(size_t));
}
*(size_t*)p = n;
p = (uint8_t*)p + sizeof(size_t);
return p;
} }
static void secure_free(void* p) static void secure_free(void* p)
@ -466,14 +439,10 @@ static void secure_free(void* p)
if (!p) { if (!p) {
return; return;
} }
p = (uint8_t*)p - sizeof(size_t);
size_t n = *(size_t*)p; uint8_t* ptr = (uint8_t*)p - sizeof(size_t);
memory_cleanse(p, n + sizeof(size_t)); size_t n = *(size_t*)ptr;
if (n <= securePool.get_requested_size() - sizeof(size_t)) { return g_blsSecureAllocator.deallocate(ptr, n);
securePool.ordered_free(p);
} else {
secure_user_allocator::free((char*)p);
}
} }
#endif #endif

View File

@ -324,6 +324,10 @@ void PrepareShutdown()
UnregisterValidationInterface(activeMasternodeManager); UnregisterValidationInterface(activeMasternodeManager);
} }
// make sure to clean up BLS keys before global destructors are called (they have allocated from the secure memory pool)
activeMasternodeInfo.blsKeyOperator.reset();
activeMasternodeInfo.blsPubKeyOperator.reset();
#ifndef WIN32 #ifndef WIN32
try { try {
boost::filesystem::remove(GetPidFile()); boost::filesystem::remove(GetPidFile());

View File

@ -0,0 +1,85 @@
// Copyright (c) 2014-2018 The Dash Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef BITCOIN_SUPPORT_ALLOCATORS_MT_POOLED_SECURE_H
#define BITCOIN_SUPPORT_ALLOCATORS_MT_POOLED_SECURE_H
#include "pooled_secure.h"
#include <thread>
#include <mutex>
//
// Manages a pool of pools to balance allocation between those when multiple threads are involved
// This allocator is fully thread safe
//
template <typename T>
struct mt_pooled_secure_allocator : public std::allocator<T> {
// MSVC8 default copy constructor is broken
typedef std::allocator<T> base;
typedef typename base::size_type size_type;
typedef typename base::difference_type difference_type;
typedef typename base::pointer pointer;
typedef typename base::const_pointer const_pointer;
typedef typename base::reference reference;
typedef typename base::const_reference const_reference;
typedef typename base::value_type value_type;
mt_pooled_secure_allocator(size_type nrequested_size = 32,
size_type nnext_size = 32,
size_type nmax_size = 0) throw()
{
// we add enough bytes to the requested size so that we can store the bucket as well
nrequested_size += sizeof(size_t);
size_t pools_count = std::thread::hardware_concurrency();
pools.resize(pools_count);
for (size_t i = 0; i < pools_count; i++) {
pools[i] = std::make_unique<internal_pool>(nrequested_size, nnext_size, nmax_size);
}
}
~mt_pooled_secure_allocator() throw() {}
T* allocate(std::size_t n, const void* hint = 0)
{
size_t bucket = get_bucket();
std::lock_guard<std::mutex> lock(pools[bucket]->mutex);
uint8_t* ptr = pools[bucket]->allocate(n * sizeof(T) + sizeof(size_t));
*(size_t*)ptr = bucket;
return static_cast<T*>(ptr + sizeof(size_t));
}
void deallocate(T* p, std::size_t n)
{
if (!p) {
return;
}
uint8_t* ptr = (uint8_t*)p - sizeof(size_t);
size_t bucket = *(size_t*)ptr;
std::lock_guard<std::mutex> lock(pools[bucket]->mutex);
pools[bucket]->deallocate(ptr, n * sizeof(T));
}
private:
size_t get_bucket()
{
auto tid = std::this_thread::get_id();
size_t x = std::hash<std::thread::id>{}(std::this_thread::get_id());
return x % pools.size();
}
struct internal_pool : pooled_secure_allocator<uint8_t> {
internal_pool(size_type nrequested_size,
size_type nnext_size,
size_type nmax_size) :
pooled_secure_allocator(nrequested_size, nnext_size, nmax_size)
{
}
std::mutex mutex;
};
private:
std::vector<std::unique_ptr<internal_pool>> pools;
};
#endif // BITCOIN_SUPPORT_ALLOCATORS_MT_POOLED_SECURE_H

View File

@ -0,0 +1,73 @@
// Copyright (c) 2014-2018 The Dash Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef BITCOIN_SUPPORT_ALLOCATORS_POOLED_SECURE_H
#define BITCOIN_SUPPORT_ALLOCATORS_POOLED_SECURE_H
#include "support/lockedpool.h"
#include "support/cleanse.h"
#include <string>
#include <vector>
#include <boost/pool/pool_alloc.hpp>
//
// Allocator that allocates memory in chunks from a pool, which in turn allocates larger chunks from secure memory
// Memory is cleaned when freed as well. This allocator is NOT thread safe
//
template <typename T>
struct pooled_secure_allocator : public std::allocator<T> {
// MSVC8 default copy constructor is broken
typedef std::allocator<T> base;
typedef typename base::size_type size_type;
typedef typename base::difference_type difference_type;
typedef typename base::pointer pointer;
typedef typename base::const_pointer const_pointer;
typedef typename base::reference reference;
typedef typename base::const_reference const_reference;
typedef typename base::value_type value_type;
pooled_secure_allocator(const size_type nrequested_size = 32,
const size_type nnext_size = 32,
const size_type nmax_size = 0) throw() :
pool(nrequested_size, nnext_size, nmax_size){}
~pooled_secure_allocator() throw() {}
T* allocate(std::size_t n, const void* hint = 0)
{
size_t chunks = (n * sizeof(T) + pool.get_requested_size() - 1) / pool.get_requested_size();
return static_cast<T*>(pool.ordered_malloc(chunks));
}
void deallocate(T* p, std::size_t n)
{
if (!p) {
return;
}
size_t chunks = (n * sizeof(T) + pool.get_requested_size() - 1) / pool.get_requested_size();
memory_cleanse(p, chunks * pool.get_requested_size());
pool.ordered_free(p, chunks);
}
public:
struct internal_secure_allocator {
typedef std::size_t size_type;
typedef std::ptrdiff_t difference_type;
static char* malloc(const size_type bytes)
{
return static_cast<char*>(LockedPoolManager::Instance().alloc(bytes));
}
static void free(char* const block)
{
LockedPoolManager::Instance().free(block);
}
};
private:
boost::pool<internal_secure_allocator> pool;
};
#endif // BITCOIN_SUPPORT_ALLOCATORS_POOLED_SECURE_H