2014-06-26 14:41:53 +02:00
|
|
|
// Copyright (c) 2009-2010 Satoshi Nakamoto
|
2015-12-13 14:51:43 +01:00
|
|
|
// Copyright (c) 2009-2015 The Bitcoin Core developers
|
2014-12-13 05:09:33 +01:00
|
|
|
// Distributed under the MIT software license, see the accompanying
|
2014-06-26 14:41:53 +02:00
|
|
|
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
|
|
|
|
2020-03-19 23:46:56 +01:00
|
|
|
#include <random.h>
|
2014-06-26 14:41:53 +02:00
|
|
|
|
2022-04-25 11:59:51 +02:00
|
|
|
#include <compat/cpuid.h>
|
2020-03-19 23:46:56 +01:00
|
|
|
#include <crypto/sha512.h>
|
|
|
|
#include <support/cleanse.h>
|
2014-06-26 14:41:53 +02:00
|
|
|
#ifdef WIN32
|
2020-03-19 23:46:56 +01:00
|
|
|
#include <compat.h> // for Windows API
|
2016-05-30 15:46:16 +02:00
|
|
|
#include <wincrypt.h>
|
2014-06-26 14:41:53 +02:00
|
|
|
#endif
|
2022-04-25 11:59:51 +02:00
|
|
|
#include <logging.h> // for LogPrintf()
|
|
|
|
#include <sync.h> // for Mutex
|
2020-03-17 06:00:31 +01:00
|
|
|
#include <util/time.h> // for GetTimeMicros()
|
2014-06-26 14:41:53 +02:00
|
|
|
|
2016-05-30 15:46:16 +02:00
|
|
|
#include <stdlib.h>
|
2017-05-09 19:10:23 +02:00
|
|
|
#include <thread>
|
2014-09-14 12:43:56 +02:00
|
|
|
|
2022-04-25 11:59:51 +02:00
|
|
|
#include <randomenv.h>
|
|
|
|
|
2021-09-11 22:52:36 +02:00
|
|
|
#include <support/allocators/secure.h>
|
|
|
|
|
2014-06-26 14:41:53 +02:00
|
|
|
#ifndef WIN32
|
2018-05-17 18:25:09 +02:00
|
|
|
#include <fcntl.h>
|
2014-06-26 14:41:53 +02:00
|
|
|
#endif
|
2014-09-14 12:43:56 +02:00
|
|
|
|
2017-03-01 12:40:06 +01:00
|
|
|
#ifdef HAVE_SYS_GETRANDOM
|
|
|
|
#include <sys/syscall.h>
|
|
|
|
#include <linux/random.h>
|
|
|
|
#endif
|
2017-08-07 17:13:49 +02:00
|
|
|
#if defined(HAVE_GETENTROPY) || (defined(HAVE_GETENTROPY_RAND) && defined(MAC_OSX))
|
2017-03-01 12:40:06 +01:00
|
|
|
#include <unistd.h>
|
|
|
|
#endif
|
2017-08-07 17:13:49 +02:00
|
|
|
#if defined(HAVE_GETENTROPY_RAND) && defined(MAC_OSX)
|
|
|
|
#include <sys/random.h>
|
|
|
|
#endif
|
2017-03-01 12:40:06 +01:00
|
|
|
#ifdef HAVE_SYSCTL_ARND
|
2021-06-27 08:33:13 +02:00
|
|
|
#include <util/strencodings.h> // for ARRAYLEN
|
2017-03-01 12:40:06 +01:00
|
|
|
#include <sys/sysctl.h>
|
|
|
|
#endif
|
|
|
|
|
2017-08-22 09:36:06 +02:00
|
|
|
[[noreturn]] static void RandFailure()
|
2016-05-30 15:46:16 +02:00
|
|
|
{
|
|
|
|
LogPrintf("Failed to read randomness, aborting\n");
|
2017-08-22 09:36:06 +02:00
|
|
|
std::abort();
|
2016-05-30 15:46:16 +02:00
|
|
|
}
|
|
|
|
|
2021-09-11 22:52:36 +02:00
|
|
|
static inline int64_t GetPerformanceCounter() noexcept
|
2014-06-26 14:41:53 +02:00
|
|
|
{
|
2017-05-09 19:10:23 +02:00
|
|
|
// Read the hardware time stamp counter when available.
|
|
|
|
// See https://en.wikipedia.org/wiki/Time_Stamp_Counter for more information.
|
|
|
|
#if defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_X64))
|
|
|
|
return __rdtsc();
|
|
|
|
#elif !defined(_MSC_VER) && defined(__i386__)
|
|
|
|
uint64_t r = 0;
|
|
|
|
__asm__ volatile ("rdtsc" : "=A"(r)); // Constrain the r variable to the eax:edx pair.
|
|
|
|
return r;
|
|
|
|
#elif !defined(_MSC_VER) && (defined(__x86_64__) || defined(__amd64__))
|
|
|
|
uint64_t r1 = 0, r2 = 0;
|
|
|
|
__asm__ volatile ("rdtsc" : "=a"(r1), "=d"(r2)); // Constrain r1 to rax and r2 to rdx.
|
|
|
|
return (r2 << 32) | r1;
|
2014-06-26 14:41:53 +02:00
|
|
|
#else
|
2017-05-09 19:10:23 +02:00
|
|
|
// Fall back to using C++11 clock (usually microsecond or nanosecond precision)
|
|
|
|
return std::chrono::high_resolution_clock::now().time_since_epoch().count();
|
2014-06-26 14:41:53 +02:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2022-04-25 11:59:51 +02:00
|
|
|
#ifdef HAVE_GETCPUID
|
2019-02-18 10:55:34 +01:00
|
|
|
static bool g_rdrand_supported = false;
|
|
|
|
static bool g_rdseed_supported = false;
|
2017-06-14 15:22:08 +02:00
|
|
|
static constexpr uint32_t CPUID_F1_ECX_RDRAND = 0x40000000;
|
2019-02-18 10:55:34 +01:00
|
|
|
static constexpr uint32_t CPUID_F7_EBX_RDSEED = 0x00040000;
|
|
|
|
#ifdef bit_RDRND
|
|
|
|
static_assert(CPUID_F1_ECX_RDRAND == bit_RDRND, "Unexpected value for bit_RDRND");
|
|
|
|
#endif
|
|
|
|
#ifdef bit_RDSEED
|
|
|
|
static_assert(CPUID_F7_EBX_RDSEED == bit_RDSEED, "Unexpected value for bit_RDSEED");
|
|
|
|
#endif
|
|
|
|
|
2021-09-11 22:52:36 +02:00
|
|
|
static void InitHardwareRand()
|
2017-06-14 15:22:08 +02:00
|
|
|
{
|
2017-07-16 20:49:45 +02:00
|
|
|
uint32_t eax, ebx, ecx, edx;
|
2019-02-18 10:55:34 +01:00
|
|
|
GetCPUID(1, 0, eax, ebx, ecx, edx);
|
|
|
|
if (ecx & CPUID_F1_ECX_RDRAND) {
|
|
|
|
g_rdrand_supported = true;
|
|
|
|
}
|
|
|
|
GetCPUID(7, 0, eax, ebx, ecx, edx);
|
|
|
|
if (ebx & CPUID_F7_EBX_RDSEED) {
|
|
|
|
g_rdseed_supported = true;
|
2017-06-14 15:22:08 +02:00
|
|
|
}
|
|
|
|
}
|
2021-09-11 22:52:36 +02:00
|
|
|
|
|
|
|
static void ReportHardwareRand()
|
|
|
|
{
|
2019-10-17 14:26:05 +02:00
|
|
|
// This must be done in a separate function, as InitHardwareRand() may be indirectly called
|
2019-02-18 10:55:34 +01:00
|
|
|
// from global constructors, before logging is initialized.
|
|
|
|
if (g_rdseed_supported) {
|
|
|
|
LogPrintf("Using RdSeed as additional entropy source\n");
|
|
|
|
}
|
|
|
|
if (g_rdrand_supported) {
|
2021-09-11 22:52:36 +02:00
|
|
|
LogPrintf("Using RdRand as an additional entropy source\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-18 10:55:34 +01:00
|
|
|
/** Read 64 bits of entropy using rdrand.
|
|
|
|
*
|
|
|
|
* Must only be called when RdRand is supported.
|
|
|
|
*/
|
|
|
|
static uint64_t GetRdRand() noexcept
|
|
|
|
{
|
|
|
|
// RdRand may very rarely fail. Invoke it up to 10 times in a loop to reduce this risk.
|
|
|
|
#ifdef __i386__
|
|
|
|
uint8_t ok;
|
|
|
|
// Initialize to 0 to silence a compiler warning that r1 or r2 may be used
|
|
|
|
// uninitialized. Even if rdrand fails (!ok) it will set the output to 0,
|
|
|
|
// but there is no way that the compiler could know that.
|
|
|
|
uint32_t r1 = 0, r2 = 0;
|
|
|
|
for (int i = 0; i < 10; ++i) {
|
|
|
|
__asm__ volatile (".byte 0x0f, 0xc7, 0xf0; setc %1" : "=a"(r1), "=q"(ok) :: "cc"); // rdrand %eax
|
|
|
|
if (ok) break;
|
|
|
|
}
|
|
|
|
for (int i = 0; i < 10; ++i) {
|
|
|
|
__asm__ volatile (".byte 0x0f, 0xc7, 0xf0; setc %1" : "=a"(r2), "=q"(ok) :: "cc"); // rdrand %eax
|
|
|
|
if (ok) break;
|
|
|
|
}
|
|
|
|
return (((uint64_t)r2) << 32) | r1;
|
|
|
|
#elif defined(__x86_64__) || defined(__amd64__)
|
|
|
|
uint8_t ok;
|
|
|
|
uint64_t r1 = 0; // See above why we initialize to 0.
|
|
|
|
for (int i = 0; i < 10; ++i) {
|
|
|
|
__asm__ volatile (".byte 0x48, 0x0f, 0xc7, 0xf0; setc %1" : "=a"(r1), "=q"(ok) :: "cc"); // rdrand %rax
|
|
|
|
if (ok) break;
|
|
|
|
}
|
|
|
|
return r1;
|
|
|
|
#else
|
|
|
|
#error "RdRand is only supported on x86 and x86_64"
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Read 64 bits of entropy using rdseed.
|
|
|
|
*
|
|
|
|
* Must only be called when RdSeed is supported.
|
|
|
|
*/
|
|
|
|
static uint64_t GetRdSeed() noexcept
|
|
|
|
{
|
|
|
|
// RdSeed may fail when the HW RNG is overloaded. Loop indefinitely until enough entropy is gathered,
|
|
|
|
// but pause after every failure.
|
|
|
|
#ifdef __i386__
|
|
|
|
uint8_t ok;
|
|
|
|
uint32_t r1, r2;
|
|
|
|
do {
|
|
|
|
__asm__ volatile (".byte 0x0f, 0xc7, 0xf8; setc %1" : "=a"(r1), "=q"(ok) :: "cc"); // rdseed %eax
|
|
|
|
if (ok) break;
|
|
|
|
__asm__ volatile ("pause");
|
|
|
|
} while(true);
|
|
|
|
do {
|
|
|
|
__asm__ volatile (".byte 0x0f, 0xc7, 0xf8; setc %1" : "=a"(r2), "=q"(ok) :: "cc"); // rdseed %eax
|
|
|
|
if (ok) break;
|
|
|
|
__asm__ volatile ("pause");
|
|
|
|
} while(true);
|
|
|
|
return (((uint64_t)r2) << 32) | r1;
|
|
|
|
#elif defined(__x86_64__) || defined(__amd64__)
|
|
|
|
uint8_t ok;
|
|
|
|
uint64_t r1;
|
|
|
|
do {
|
|
|
|
__asm__ volatile (".byte 0x48, 0x0f, 0xc7, 0xf8; setc %1" : "=a"(r1), "=q"(ok) :: "cc"); // rdseed %rax
|
|
|
|
if (ok) break;
|
|
|
|
__asm__ volatile ("pause");
|
|
|
|
} while(true);
|
|
|
|
return r1;
|
|
|
|
#else
|
|
|
|
#error "RdSeed is only supported on x86 and x86_64"
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2017-06-14 15:22:08 +02:00
|
|
|
#else
|
2021-09-11 22:52:36 +02:00
|
|
|
/* Access to other hardware random number generators could be added here later,
|
|
|
|
* assuming it is sufficiently fast (in the order of a few hundred CPU cycles).
|
|
|
|
* Slower sources should probably be invoked separately, and/or only from
|
2019-12-05 15:14:24 +01:00
|
|
|
* RandAddPeriodic (which is called once a minute).
|
2021-09-11 22:52:36 +02:00
|
|
|
*/
|
|
|
|
static void InitHardwareRand() {}
|
|
|
|
static void ReportHardwareRand() {}
|
2017-06-14 15:22:08 +02:00
|
|
|
#endif
|
|
|
|
|
2019-02-18 10:55:34 +01:00
|
|
|
/** Add 64 bits of entropy gathered from hardware to hasher. Do nothing if not supported. */
|
|
|
|
static void SeedHardwareFast(CSHA512& hasher) noexcept {
|
2017-06-14 15:22:08 +02:00
|
|
|
#if defined(__x86_64__) || defined(__amd64__) || defined(__i386__)
|
2019-02-18 10:55:34 +01:00
|
|
|
if (g_rdrand_supported) {
|
|
|
|
uint64_t out = GetRdRand();
|
|
|
|
hasher.Write((const unsigned char*)&out, sizeof(out));
|
|
|
|
return;
|
|
|
|
}
|
2017-06-14 15:22:08 +02:00
|
|
|
#endif
|
2019-02-18 10:55:34 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/** Add 256 bits of entropy gathered from hardware to hasher. Do nothing if not supported. */
|
|
|
|
static void SeedHardwareSlow(CSHA512& hasher) noexcept {
|
|
|
|
#if defined(__x86_64__) || defined(__amd64__) || defined(__i386__)
|
|
|
|
// When we want 256 bits of entropy, prefer RdSeed over RdRand, as it's
|
|
|
|
// guaranteed to produce independent randomness on every call.
|
|
|
|
if (g_rdseed_supported) {
|
|
|
|
for (int i = 0; i < 4; ++i) {
|
|
|
|
uint64_t out = GetRdSeed();
|
|
|
|
hasher.Write((const unsigned char*)&out, sizeof(out));
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
// When falling back to RdRand, XOR the result of 1024 results.
|
|
|
|
// This guarantees a reseeding occurs between each.
|
|
|
|
if (g_rdrand_supported) {
|
|
|
|
for (int i = 0; i < 4; ++i) {
|
|
|
|
uint64_t out = 0;
|
|
|
|
for (int j = 0; j < 1024; ++j) out ^= GetRdRand();
|
|
|
|
hasher.Write((const unsigned char*)&out, sizeof(out));
|
|
|
|
}
|
|
|
|
return;
|
2017-06-14 15:22:08 +02:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2019-05-18 10:01:21 +02:00
|
|
|
/** Use repeated SHA512 to strengthen the randomness in seed32, and feed into hasher. */
|
|
|
|
static void Strengthen(const unsigned char (&seed)[32], int microseconds, CSHA512& hasher) noexcept
|
|
|
|
{
|
|
|
|
CSHA512 inner_hasher;
|
|
|
|
inner_hasher.Write(seed, sizeof(seed));
|
|
|
|
|
|
|
|
// Hash loop
|
|
|
|
unsigned char buffer[64];
|
|
|
|
int64_t stop = GetTimeMicros() + microseconds;
|
|
|
|
do {
|
|
|
|
for (int i = 0; i < 1000; ++i) {
|
|
|
|
inner_hasher.Finalize(buffer);
|
|
|
|
inner_hasher.Reset();
|
|
|
|
inner_hasher.Write(buffer, sizeof(buffer));
|
|
|
|
}
|
|
|
|
// Benchmark operation and feed it into outer hasher.
|
|
|
|
int64_t perf = GetPerformanceCounter();
|
|
|
|
hasher.Write((const unsigned char*)&perf, sizeof(perf));
|
|
|
|
} while (GetTimeMicros() < stop);
|
|
|
|
|
|
|
|
// Produce output from inner state and feed it to outer hasher.
|
|
|
|
inner_hasher.Finalize(buffer);
|
|
|
|
hasher.Write(buffer, sizeof(buffer));
|
|
|
|
// Try to clean up.
|
|
|
|
inner_hasher.Reset();
|
|
|
|
memory_cleanse(buffer, sizeof(buffer));
|
|
|
|
}
|
|
|
|
|
2017-03-01 12:40:06 +01:00
|
|
|
#ifndef WIN32
|
|
|
|
/** Fallback: get 32 bytes of system entropy from /dev/urandom. The most
|
|
|
|
* compatible way to get cryptographic randomness on UNIX-ish platforms.
|
|
|
|
*/
|
2018-05-04 22:42:39 +02:00
|
|
|
static void GetDevURandom(unsigned char *ent32)
|
2017-03-01 12:40:06 +01:00
|
|
|
{
|
|
|
|
int f = open("/dev/urandom", O_RDONLY);
|
|
|
|
if (f == -1) {
|
|
|
|
RandFailure();
|
|
|
|
}
|
|
|
|
int have = 0;
|
|
|
|
do {
|
|
|
|
ssize_t n = read(f, ent32 + have, NUM_OS_RANDOM_BYTES - have);
|
|
|
|
if (n <= 0 || n + have > NUM_OS_RANDOM_BYTES) {
|
2017-07-17 13:10:50 +02:00
|
|
|
close(f);
|
2017-03-01 12:40:06 +01:00
|
|
|
RandFailure();
|
|
|
|
}
|
|
|
|
have += n;
|
|
|
|
} while (have < NUM_OS_RANDOM_BYTES);
|
|
|
|
close(f);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2016-05-30 15:46:16 +02:00
|
|
|
/** Get 32 bytes of system entropy. */
|
2017-03-01 12:40:06 +01:00
|
|
|
void GetOSRand(unsigned char *ent32)
|
2016-05-30 15:46:16 +02:00
|
|
|
{
|
2017-03-01 12:40:06 +01:00
|
|
|
#if defined(WIN32)
|
2016-05-30 15:46:16 +02:00
|
|
|
HCRYPTPROV hProvider;
|
2019-08-06 05:08:33 +02:00
|
|
|
int ret = CryptAcquireContextW(&hProvider, nullptr, nullptr, PROV_RSA_FULL, CRYPT_VERIFYCONTEXT);
|
2016-05-30 15:46:16 +02:00
|
|
|
if (!ret) {
|
|
|
|
RandFailure();
|
|
|
|
}
|
2017-03-01 12:40:06 +01:00
|
|
|
ret = CryptGenRandom(hProvider, NUM_OS_RANDOM_BYTES, ent32);
|
2016-05-30 15:46:16 +02:00
|
|
|
if (!ret) {
|
|
|
|
RandFailure();
|
|
|
|
}
|
|
|
|
CryptReleaseContext(hProvider, 0);
|
2017-03-01 12:40:06 +01:00
|
|
|
#elif defined(HAVE_SYS_GETRANDOM)
|
|
|
|
/* Linux. From the getrandom(2) man page:
|
|
|
|
* "If the urandom source has been initialized, reads of up to 256 bytes
|
|
|
|
* will always return as many bytes as requested and will not be
|
|
|
|
* interrupted by signals."
|
|
|
|
*/
|
|
|
|
int rv = syscall(SYS_getrandom, ent32, NUM_OS_RANDOM_BYTES, 0);
|
|
|
|
if (rv != NUM_OS_RANDOM_BYTES) {
|
|
|
|
if (rv < 0 && errno == ENOSYS) {
|
|
|
|
/* Fallback for kernel <3.17: the return value will be -1 and errno
|
|
|
|
* ENOSYS if the syscall is not available, in that case fall back
|
|
|
|
* to /dev/urandom.
|
|
|
|
*/
|
|
|
|
GetDevURandom(ent32);
|
|
|
|
} else {
|
|
|
|
RandFailure();
|
|
|
|
}
|
|
|
|
}
|
2017-07-18 08:24:40 +02:00
|
|
|
#elif defined(HAVE_GETENTROPY) && defined(__OpenBSD__)
|
2017-03-01 12:40:06 +01:00
|
|
|
/* On OpenBSD this can return up to 256 bytes of entropy, will return an
|
|
|
|
* error if more are requested.
|
|
|
|
* The call cannot return less than the requested number of bytes.
|
2017-07-18 08:24:40 +02:00
|
|
|
getentropy is explicitly limited to openbsd here, as a similar (but not
|
|
|
|
the same) function may exist on other platforms via glibc.
|
2017-03-01 12:40:06 +01:00
|
|
|
*/
|
|
|
|
if (getentropy(ent32, NUM_OS_RANDOM_BYTES) != 0) {
|
2016-05-30 15:46:16 +02:00
|
|
|
RandFailure();
|
|
|
|
}
|
2020-08-10 14:56:12 +02:00
|
|
|
// Silence a compiler warning about unused function.
|
|
|
|
(void)GetDevURandom;
|
2017-08-07 17:13:49 +02:00
|
|
|
#elif defined(HAVE_GETENTROPY_RAND) && defined(MAC_OSX)
|
2020-03-17 06:00:31 +01:00
|
|
|
/* getentropy() is available on macOS 10.12 and later.
|
|
|
|
*/
|
|
|
|
if (getentropy(ent32, NUM_OS_RANDOM_BYTES) != 0) {
|
|
|
|
RandFailure();
|
2017-08-07 17:13:49 +02:00
|
|
|
}
|
2020-08-10 14:56:12 +02:00
|
|
|
// Silence a compiler warning about unused function.
|
|
|
|
(void)GetDevURandom;
|
2017-03-01 12:40:06 +01:00
|
|
|
#elif defined(HAVE_SYSCTL_ARND)
|
|
|
|
/* FreeBSD and similar. It is possible for the call to return less
|
|
|
|
* bytes than requested, so need to read in a loop.
|
|
|
|
*/
|
|
|
|
static const int name[2] = {CTL_KERN, KERN_ARND};
|
2016-05-30 15:46:16 +02:00
|
|
|
int have = 0;
|
|
|
|
do {
|
2017-03-01 12:40:06 +01:00
|
|
|
size_t len = NUM_OS_RANDOM_BYTES - have;
|
2019-08-06 05:08:33 +02:00
|
|
|
if (sysctl(name, ARRAYLEN(name), ent32 + have, &len, nullptr, 0) != 0) {
|
2016-05-30 15:46:16 +02:00
|
|
|
RandFailure();
|
|
|
|
}
|
2017-03-01 12:40:06 +01:00
|
|
|
have += len;
|
|
|
|
} while (have < NUM_OS_RANDOM_BYTES);
|
2020-08-10 14:56:12 +02:00
|
|
|
// Silence a compiler warning about unused function.
|
|
|
|
(void)GetDevURandom;
|
2017-03-01 12:40:06 +01:00
|
|
|
#else
|
|
|
|
/* Fall back to /dev/urandom if there is no specific method implemented to
|
|
|
|
* get system entropy for this OS.
|
|
|
|
*/
|
|
|
|
GetDevURandom(ent32);
|
2016-05-30 15:46:16 +02:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2021-09-11 22:52:36 +02:00
|
|
|
namespace {
|
|
|
|
|
|
|
|
class RNGState {
|
|
|
|
Mutex m_mutex;
|
|
|
|
/* The RNG state consists of 256 bits of entropy, taken from the output of
|
|
|
|
* one operation's SHA512 output, and fed as input to the next one.
|
|
|
|
* Carrying 256 bits of entropy should be sufficient to guarantee
|
|
|
|
* unpredictability as long as any entropy source was ever unpredictable
|
|
|
|
* to an attacker. To protect against situations where an attacker might
|
|
|
|
* observe the RNG's state, fresh entropy is always mixed when
|
|
|
|
* GetStrongRandBytes is called.
|
|
|
|
*/
|
|
|
|
unsigned char m_state[32] GUARDED_BY(m_mutex) = {0};
|
|
|
|
uint64_t m_counter GUARDED_BY(m_mutex) = 0;
|
|
|
|
bool m_strongly_seeded GUARDED_BY(m_mutex) = false;
|
|
|
|
|
|
|
|
public:
|
|
|
|
RNGState() noexcept
|
|
|
|
{
|
|
|
|
InitHardwareRand();
|
|
|
|
}
|
|
|
|
|
|
|
|
~RNGState()
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Extract up to 32 bytes of entropy from the RNG state, mixing in new entropy from hasher.
|
|
|
|
*
|
|
|
|
* If this function has never been called with strong_seed = true, false is returned.
|
|
|
|
*/
|
|
|
|
bool MixExtract(unsigned char* out, size_t num, CSHA512&& hasher, bool strong_seed) noexcept
|
|
|
|
{
|
|
|
|
assert(num <= 32);
|
|
|
|
unsigned char buf[64];
|
|
|
|
static_assert(sizeof(buf) == CSHA512::OUTPUT_SIZE, "Buffer needs to have hasher's output size");
|
|
|
|
bool ret;
|
|
|
|
{
|
|
|
|
LOCK(m_mutex);
|
|
|
|
ret = (m_strongly_seeded |= strong_seed);
|
|
|
|
// Write the current state of the RNG into the hasher
|
|
|
|
hasher.Write(m_state, 32);
|
|
|
|
// Write a new counter number into the state
|
|
|
|
hasher.Write((const unsigned char*)&m_counter, sizeof(m_counter));
|
|
|
|
++m_counter;
|
|
|
|
// Finalize the hasher
|
|
|
|
hasher.Finalize(buf);
|
|
|
|
// Store the last 32 bytes of the hash output as new RNG state.
|
|
|
|
memcpy(m_state, buf + 32, 32);
|
|
|
|
}
|
|
|
|
// If desired, copy (up to) the first 32 bytes of the hash output as output.
|
|
|
|
if (num) {
|
|
|
|
assert(out != nullptr);
|
|
|
|
memcpy(out, buf, num);
|
|
|
|
}
|
|
|
|
// Best effort cleanup of internal state
|
|
|
|
hasher.Reset();
|
|
|
|
memory_cleanse(buf, 64);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
RNGState& GetRNGState() noexcept
|
2014-06-26 14:41:53 +02:00
|
|
|
{
|
2021-09-11 22:52:36 +02:00
|
|
|
// This C++11 idiom relies on the guarantee that static variable are initialized
|
|
|
|
// on first call, even when multiple parallel calls are permitted.
|
|
|
|
static std::vector<RNGState, secure_allocator<RNGState>> g_rng(1);
|
|
|
|
return g_rng[0];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* A note on the use of noexcept in the seeding functions below:
|
|
|
|
*
|
|
|
|
* None of the RNG code should ever throw any exception.
|
|
|
|
*/
|
2017-05-23 17:53:00 +02:00
|
|
|
|
2021-09-11 22:52:36 +02:00
|
|
|
static void SeedTimestamp(CSHA512& hasher) noexcept
|
2017-05-23 17:53:00 +02:00
|
|
|
{
|
2021-09-11 22:52:36 +02:00
|
|
|
int64_t perfcounter = GetPerformanceCounter();
|
|
|
|
hasher.Write((const unsigned char*)&perfcounter, sizeof(perfcounter));
|
|
|
|
}
|
2017-05-23 17:53:00 +02:00
|
|
|
|
2021-09-11 22:52:36 +02:00
|
|
|
static void SeedFast(CSHA512& hasher) noexcept
|
|
|
|
{
|
|
|
|
unsigned char buffer[32];
|
|
|
|
|
|
|
|
// Stack pointer to indirectly commit to thread/callstack
|
|
|
|
const unsigned char* ptr = buffer;
|
|
|
|
hasher.Write((const unsigned char*)&ptr, sizeof(ptr));
|
2017-05-23 17:53:00 +02:00
|
|
|
|
2021-09-11 22:52:36 +02:00
|
|
|
// Hardware randomness is very fast when available; use it always.
|
2019-02-18 10:55:34 +01:00
|
|
|
SeedHardwareFast(hasher);
|
2021-09-11 22:52:36 +02:00
|
|
|
|
|
|
|
// High-precision timestamp
|
|
|
|
SeedTimestamp(hasher);
|
2017-05-23 17:53:00 +02:00
|
|
|
}
|
|
|
|
|
2021-09-11 22:52:36 +02:00
|
|
|
static void SeedSlow(CSHA512& hasher) noexcept
|
|
|
|
{
|
|
|
|
unsigned char buffer[32];
|
2017-05-23 17:53:00 +02:00
|
|
|
|
2021-09-11 22:52:36 +02:00
|
|
|
// Everything that the 'fast' seeder includes
|
|
|
|
SeedFast(hasher);
|
2017-05-09 19:13:45 +02:00
|
|
|
|
2021-09-11 22:52:36 +02:00
|
|
|
// OS randomness
|
|
|
|
GetOSRand(buffer);
|
|
|
|
hasher.Write(buffer, sizeof(buffer));
|
|
|
|
|
|
|
|
// High-precision timestamp.
|
|
|
|
//
|
|
|
|
// Note that we also commit to a timestamp in the Fast seeder, so we indirectly commit to a
|
|
|
|
// benchmark of all the entropy gathering sources in this function).
|
|
|
|
SeedTimestamp(hasher);
|
2017-05-23 17:53:00 +02:00
|
|
|
}
|
|
|
|
|
2019-05-18 10:01:21 +02:00
|
|
|
/** Extract entropy from rng, strengthen it, and feed it into hasher. */
|
2022-04-25 11:59:51 +02:00
|
|
|
static void SeedStrengthen(CSHA512& hasher, RNGState& rng, int microseconds) noexcept
|
2019-05-18 10:01:21 +02:00
|
|
|
{
|
2022-04-25 11:59:51 +02:00
|
|
|
// Generate 32 bytes of entropy from the RNG, and a copy of the entropy already in hasher.
|
|
|
|
unsigned char strengthen_seed[32];
|
|
|
|
rng.MixExtract(strengthen_seed, sizeof(strengthen_seed), CSHA512(hasher), false);
|
|
|
|
// Strengthen the seed, and feed it into hasher.
|
|
|
|
Strengthen(strengthen_seed, microseconds, hasher);
|
2019-05-18 10:01:21 +02:00
|
|
|
}
|
|
|
|
|
2019-12-05 15:14:24 +01:00
|
|
|
static void SeedPeriodic(CSHA512& hasher, RNGState& rng) noexcept
|
2016-05-30 15:46:16 +02:00
|
|
|
{
|
2021-09-11 22:52:36 +02:00
|
|
|
// Everything that the 'fast' seeder includes
|
|
|
|
SeedFast(hasher);
|
2016-05-30 15:46:16 +02:00
|
|
|
|
2021-09-11 22:52:36 +02:00
|
|
|
// High-precision timestamp
|
|
|
|
SeedTimestamp(hasher);
|
2016-05-30 15:46:16 +02:00
|
|
|
|
2022-04-25 11:59:51 +02:00
|
|
|
// Dynamic environment data (performance monitoring, ...)
|
|
|
|
auto old_size = hasher.Size();
|
|
|
|
RandAddDynamicEnv(hasher);
|
|
|
|
LogPrintf("Feeding %i bytes of dynamic environment data into RNG\n", hasher.Size() - old_size);
|
2019-05-18 10:01:21 +02:00
|
|
|
|
2022-04-25 11:59:51 +02:00
|
|
|
// Strengthen for 10 ms
|
|
|
|
SeedStrengthen(hasher, rng, 10000);
|
2021-09-11 22:52:36 +02:00
|
|
|
}
|
|
|
|
|
2019-05-18 10:01:21 +02:00
|
|
|
static void SeedStartup(CSHA512& hasher, RNGState& rng) noexcept
|
2021-09-11 22:52:36 +02:00
|
|
|
{
|
2019-02-18 10:55:34 +01:00
|
|
|
// Gather 256 bits of hardware randomness, if available
|
|
|
|
SeedHardwareSlow(hasher);
|
|
|
|
|
2021-09-11 22:52:36 +02:00
|
|
|
// Everything that the 'slow' seeder includes.
|
|
|
|
SeedSlow(hasher);
|
|
|
|
|
2022-04-25 11:59:51 +02:00
|
|
|
// Dynamic environment data (performance monitoring, ...)
|
|
|
|
auto old_size = hasher.Size();
|
|
|
|
RandAddDynamicEnv(hasher);
|
|
|
|
|
|
|
|
// Static environment data
|
|
|
|
RandAddStaticEnv(hasher);
|
|
|
|
LogPrintf("Feeding %i bytes of environment data into RNG\n", hasher.Size() - old_size);
|
2019-05-18 10:01:21 +02:00
|
|
|
|
2022-04-25 11:59:51 +02:00
|
|
|
// Strengthen for 100 ms
|
|
|
|
SeedStrengthen(hasher, rng, 100000);
|
2016-05-30 15:46:16 +02:00
|
|
|
}
|
|
|
|
|
2021-09-11 22:52:36 +02:00
|
|
|
enum class RNGLevel {
|
|
|
|
FAST, //!< Automatically called by GetRandBytes
|
|
|
|
SLOW, //!< Automatically called by GetStrongRandBytes
|
2022-04-25 11:59:51 +02:00
|
|
|
PERIODIC, //!< Called by RandAddPeriodic()
|
2021-09-11 22:52:36 +02:00
|
|
|
};
|
2019-02-04 12:11:34 +01:00
|
|
|
|
2021-09-11 22:52:36 +02:00
|
|
|
static void ProcRand(unsigned char* out, int num, RNGLevel level)
|
2014-06-26 14:41:53 +02:00
|
|
|
{
|
2021-09-11 22:52:36 +02:00
|
|
|
// Make sure the RNG is initialized first (as all Seed* function possibly need hwrand to be available).
|
|
|
|
RNGState& rng = GetRNGState();
|
2014-06-26 14:41:53 +02:00
|
|
|
|
2021-09-11 22:52:36 +02:00
|
|
|
assert(num <= 32);
|
|
|
|
|
|
|
|
CSHA512 hasher;
|
|
|
|
switch (level) {
|
|
|
|
case RNGLevel::FAST:
|
|
|
|
SeedFast(hasher);
|
|
|
|
break;
|
|
|
|
case RNGLevel::SLOW:
|
|
|
|
SeedSlow(hasher);
|
|
|
|
break;
|
2022-04-25 11:59:51 +02:00
|
|
|
case RNGLevel::PERIODIC:
|
|
|
|
SeedPeriodic(hasher, rng);
|
2021-09-11 22:52:36 +02:00
|
|
|
break;
|
2019-02-04 12:11:34 +01:00
|
|
|
}
|
|
|
|
|
2021-09-11 22:52:36 +02:00
|
|
|
// Combine with and update state
|
|
|
|
if (!rng.MixExtract(out, num, std::move(hasher), false)) {
|
|
|
|
// On the first invocation, also seed with SeedStartup().
|
|
|
|
CSHA512 startup_hasher;
|
2019-05-18 10:01:21 +02:00
|
|
|
SeedStartup(startup_hasher, rng);
|
2021-09-11 22:52:36 +02:00
|
|
|
rng.MixExtract(out, num, std::move(startup_hasher), true);
|
|
|
|
}
|
2014-06-26 14:41:53 +02:00
|
|
|
}
|
|
|
|
|
2020-04-08 14:27:07 +02:00
|
|
|
std::chrono::microseconds GetRandMicros(std::chrono::microseconds duration_max) noexcept
|
|
|
|
{
|
|
|
|
return std::chrono::microseconds{GetRand(duration_max.count())};
|
|
|
|
}
|
|
|
|
|
2021-09-11 22:52:36 +02:00
|
|
|
void GetRandBytes(unsigned char* buf, int num) noexcept { ProcRand(buf, num, RNGLevel::FAST); }
|
|
|
|
void GetStrongRandBytes(unsigned char* buf, int num) noexcept { ProcRand(buf, num, RNGLevel::SLOW); }
|
2019-12-05 15:14:24 +01:00
|
|
|
void RandAddPeriodic() noexcept { ProcRand(nullptr, 0, RNGLevel::PERIODIC); }
|
2021-09-11 22:52:36 +02:00
|
|
|
|
|
|
|
bool g_mock_deterministic_tests{false};
|
|
|
|
|
|
|
|
uint64_t GetRand(uint64_t nMax) noexcept
|
|
|
|
{
|
|
|
|
return FastRandomContext(g_mock_deterministic_tests).randrange(nMax);
|
|
|
|
}
|
|
|
|
|
|
|
|
int GetRandInt(int nMax) noexcept
|
2014-06-26 14:41:53 +02:00
|
|
|
{
|
|
|
|
return GetRand(nMax);
|
|
|
|
}
|
|
|
|
|
2021-09-11 22:52:36 +02:00
|
|
|
uint256 GetRandHash() noexcept
|
2014-06-26 14:41:53 +02:00
|
|
|
{
|
|
|
|
uint256 hash;
|
|
|
|
GetRandBytes((unsigned char*)&hash, sizeof(hash));
|
|
|
|
return hash;
|
|
|
|
}
|
|
|
|
|
2019-01-09 07:30:19 +01:00
|
|
|
bool GetRandBool(double rate)
|
|
|
|
{
|
|
|
|
if (rate == 0.0) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
const uint64_t v = 100000000;
|
|
|
|
uint64_t r = GetRand(v + 1);
|
|
|
|
return r <= v * rate;
|
|
|
|
}
|
|
|
|
|
2017-04-24 14:02:12 +02:00
|
|
|
void FastRandomContext::RandomSeed()
|
2014-06-26 14:41:53 +02:00
|
|
|
{
|
2017-04-24 14:02:12 +02:00
|
|
|
uint256 seed = GetRandHash();
|
|
|
|
rng.SetKey(seed.begin(), 32);
|
|
|
|
requires_seed = false;
|
|
|
|
}
|
|
|
|
|
2021-09-11 22:52:36 +02:00
|
|
|
uint256 FastRandomContext::rand256() noexcept
|
2017-06-07 23:59:41 +02:00
|
|
|
{
|
|
|
|
if (bytebuf_size < 32) {
|
|
|
|
FillByteBuffer();
|
|
|
|
}
|
|
|
|
uint256 ret;
|
|
|
|
memcpy(ret.begin(), bytebuf + 64 - bytebuf_size, 32);
|
|
|
|
bytebuf_size -= 32;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<unsigned char> FastRandomContext::randbytes(size_t len)
|
|
|
|
{
|
2018-12-13 13:43:12 +01:00
|
|
|
if (requires_seed) RandomSeed();
|
2017-06-07 23:59:41 +02:00
|
|
|
std::vector<unsigned char> ret(len);
|
|
|
|
if (len > 0) {
|
2019-05-10 09:26:02 +02:00
|
|
|
rng.Keystream(&ret[0], len);
|
2017-06-07 23:59:41 +02:00
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2021-09-11 22:52:36 +02:00
|
|
|
FastRandomContext::FastRandomContext(const uint256& seed) noexcept : requires_seed(false), bytebuf_size(0), bitbuf_size(0)
|
2017-04-24 14:02:12 +02:00
|
|
|
{
|
|
|
|
rng.SetKey(seed.begin(), 32);
|
2014-06-26 14:41:53 +02:00
|
|
|
}
|
2017-03-01 12:40:06 +01:00
|
|
|
|
|
|
|
bool Random_SanityCheck()
|
|
|
|
{
|
2017-05-09 19:10:23 +02:00
|
|
|
uint64_t start = GetPerformanceCounter();
|
|
|
|
|
2017-03-01 12:40:06 +01:00
|
|
|
/* This does not measure the quality of randomness, but it does test that
|
2022-04-25 11:59:51 +02:00
|
|
|
* GetOSRand() overwrites all 32 bytes of the output given a maximum
|
2017-03-01 12:40:06 +01:00
|
|
|
* number of tries.
|
|
|
|
*/
|
|
|
|
static const ssize_t MAX_TRIES = 1024;
|
|
|
|
uint8_t data[NUM_OS_RANDOM_BYTES];
|
|
|
|
bool overwritten[NUM_OS_RANDOM_BYTES] = {}; /* Tracks which bytes have been overwritten at least once */
|
|
|
|
int num_overwritten;
|
|
|
|
int tries = 0;
|
|
|
|
/* Loop until all bytes have been overwritten at least once, or max number tries reached */
|
|
|
|
do {
|
|
|
|
memset(data, 0, NUM_OS_RANDOM_BYTES);
|
|
|
|
GetOSRand(data);
|
|
|
|
for (int x=0; x < NUM_OS_RANDOM_BYTES; ++x) {
|
|
|
|
overwritten[x] |= (data[x] != 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
num_overwritten = 0;
|
|
|
|
for (int x=0; x < NUM_OS_RANDOM_BYTES; ++x) {
|
|
|
|
if (overwritten[x]) {
|
|
|
|
num_overwritten += 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
tries += 1;
|
|
|
|
} while (num_overwritten < NUM_OS_RANDOM_BYTES && tries < MAX_TRIES);
|
2017-05-09 19:10:23 +02:00
|
|
|
if (num_overwritten != NUM_OS_RANDOM_BYTES) return false; /* If this failed, bailed out after too many tries */
|
|
|
|
|
|
|
|
// Check that GetPerformanceCounter increases at least during a GetOSRand() call + 1ms sleep.
|
|
|
|
std::this_thread::sleep_for(std::chrono::milliseconds(1));
|
|
|
|
uint64_t stop = GetPerformanceCounter();
|
|
|
|
if (stop == start) return false;
|
|
|
|
|
|
|
|
// We called GetPerformanceCounter. Use it as entropy.
|
2021-09-11 22:52:36 +02:00
|
|
|
CSHA512 to_add;
|
|
|
|
to_add.Write((const unsigned char*)&start, sizeof(start));
|
|
|
|
to_add.Write((const unsigned char*)&stop, sizeof(stop));
|
|
|
|
GetRNGState().MixExtract(nullptr, 0, std::move(to_add), false);
|
2017-05-09 19:10:23 +02:00
|
|
|
|
|
|
|
return true;
|
2017-03-01 12:40:06 +01:00
|
|
|
}
|
2017-04-24 14:02:12 +02:00
|
|
|
|
2021-09-11 22:52:36 +02:00
|
|
|
FastRandomContext::FastRandomContext(bool fDeterministic) noexcept : requires_seed(!fDeterministic), bytebuf_size(0), bitbuf_size(0)
|
2017-04-24 14:02:12 +02:00
|
|
|
{
|
|
|
|
if (!fDeterministic) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
uint256 seed;
|
|
|
|
rng.SetKey(seed.begin(), 32);
|
|
|
|
}
|
2017-06-14 15:22:08 +02:00
|
|
|
|
2018-12-13 13:43:12 +01:00
|
|
|
FastRandomContext& FastRandomContext::operator=(FastRandomContext&& from) noexcept
|
|
|
|
{
|
|
|
|
requires_seed = from.requires_seed;
|
|
|
|
rng = from.rng;
|
|
|
|
std::copy(std::begin(from.bytebuf), std::end(from.bytebuf), std::begin(bytebuf));
|
|
|
|
bytebuf_size = from.bytebuf_size;
|
|
|
|
bitbuf = from.bitbuf;
|
|
|
|
bitbuf_size = from.bitbuf_size;
|
|
|
|
from.requires_seed = true;
|
|
|
|
from.bytebuf_size = 0;
|
|
|
|
from.bitbuf_size = 0;
|
|
|
|
return *this;
|
|
|
|
}
|
|
|
|
|
2017-06-14 15:22:08 +02:00
|
|
|
void RandomInit()
|
|
|
|
{
|
2021-09-11 22:52:36 +02:00
|
|
|
// Invoke RNG code to trigger initialization (if not already performed)
|
|
|
|
ProcRand(nullptr, 0, RNGLevel::FAST);
|
|
|
|
|
|
|
|
ReportHardwareRand();
|
2017-06-14 15:22:08 +02:00
|
|
|
}
|