2010-08-29 18:58:15 +02:00
// Copyright (c) 2009-2010 Satoshi Nakamoto
2015-12-13 14:51:43 +01:00
// Copyright (c) 2009-2015 The Bitcoin Core developers
2014-12-13 05:09:33 +01:00
// Distributed under the MIT software license, see the accompanying
2012-05-18 16:02:28 +02:00
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
2010-08-29 18:58:15 +02:00
2020-03-19 23:46:56 +01:00
# include <wallet/db.h>
2013-04-13 07:13:08 +02:00
2021-06-27 08:33:13 +02:00
# include <util/strencodings.h>
2013-04-13 07:13:08 +02:00
# include <stdint.h>
2010-08-29 18:58:15 +02:00
2012-04-15 22:10:54 +02:00
# ifndef WIN32
2013-04-13 07:13:08 +02:00
# include <sys/stat.h>
2012-04-15 22:10:54 +02:00
# endif
2017-10-19 18:12:59 +02:00
namespace {
2018-10-24 17:22:25 +02:00
2017-10-19 18:12:59 +02:00
//! Make sure database has a unique fileid within the environment. If it
//! doesn't, throw an error. BDB caches do not work properly when more than one
//! open database has the same fileid (values written to one database may show
//! up in reads to other databases).
//!
//! BerkeleyDB generates unique fileids by default
//! (https://docs.oracle.com/cd/E17275_01/html/programmer_reference/program_copy.html),
//! so bitcoin should never create different databases with the same fileid, but
//! this error can be triggered if users manually copy database files.
2018-10-24 17:22:25 +02:00
void CheckUniqueFileid ( const BerkeleyEnvironment & env , const std : : string & filename , Db & db , WalletDatabaseFileId & fileid )
2017-10-19 18:12:59 +02:00
{
if ( env . IsMock ( ) ) return ;
2018-10-24 17:22:25 +02:00
int ret = db . get_mpf ( ) - > get_fileid ( fileid . value ) ;
2017-10-19 18:12:59 +02:00
if ( ret ! = 0 ) {
2020-04-15 00:13:51 +02:00
throw std : : runtime_error ( strprintf ( " BerkeleyBatch: Can't open database %s (get_fileid failed with %d) " , filename , ret ) ) ;
2017-10-19 18:12:59 +02:00
}
2018-10-24 17:22:25 +02:00
for ( const auto & item : env . m_fileids ) {
if ( fileid = = item . second & & & fileid ! = & item . second ) {
2020-04-15 00:13:51 +02:00
throw std : : runtime_error ( strprintf ( " BerkeleyBatch: Can't open database %s (duplicates fileid %s from %s) " , filename ,
2020-06-24 17:26:47 +02:00
HexStr ( item . second . value ) , item . first ) ) ;
2017-10-19 18:12:59 +02:00
}
}
}
2018-03-07 17:05:08 +01:00
CCriticalSection cs_db ;
2019-01-31 18:01:30 +01:00
std : : map < std : : string , std : : weak_ptr < BerkeleyEnvironment > > g_dbenvs GUARDED_BY ( cs_db ) ; //!< Map from directory name to db environment.
2017-10-19 18:12:59 +02:00
} // namespace
2018-10-24 17:22:25 +02:00
bool WalletDatabaseFileId : : operator = = ( const WalletDatabaseFileId & rhs ) const
{
return memcmp ( value , & rhs . value , sizeof ( value ) ) = = 0 ;
}
2018-11-20 15:15:51 +01:00
static void SplitWalletPath ( const fs : : path & wallet_path , fs : : path & env_directory , std : : string & database_filename )
2018-03-07 17:05:08 +01:00
{
if ( fs : : is_regular_file ( wallet_path ) ) {
// Special case for backwards compatibility: if wallet path points to an
// existing file, treat it as the path to a BDB data file in a parent
// directory that also contains BDB log files.
env_directory = wallet_path . parent_path ( ) ;
database_filename = wallet_path . filename ( ) . string ( ) ;
} else {
// Normal case: Interpret wallet path as a directory path containing
// data and log files.
env_directory = wallet_path ;
database_filename = " wallet.dat " ;
}
2018-11-20 15:15:51 +01:00
}
bool IsWalletLoaded ( const fs : : path & wallet_path )
{
fs : : path env_directory ;
std : : string database_filename ;
SplitWalletPath ( wallet_path , env_directory , database_filename ) ;
LOCK ( cs_db ) ;
auto env = g_dbenvs . find ( env_directory . string ( ) ) ;
if ( env = = g_dbenvs . end ( ) ) return false ;
2019-01-31 18:01:30 +01:00
auto database = env - > second . lock ( ) ;
return database & & database - > IsDatabaseLoaded ( database_filename ) ;
2018-11-20 15:15:51 +01:00
}
2019-03-18 08:34:23 +01:00
fs : : path WalletDataFilePath ( const fs : : path & wallet_path )
{
fs : : path env_directory ;
std : : string database_filename ;
SplitWalletPath ( wallet_path , env_directory , database_filename ) ;
return env_directory / database_filename ;
}
2019-01-31 18:01:30 +01:00
/**
* @ param [ in ] wallet_path Path to wallet directory . Or ( for backwards compatibility only ) a path to a berkeley btree data file inside a wallet directory .
* @ param [ out ] database_filename Filename of berkeley btree data file inside the wallet directory .
* @ return A shared pointer to the BerkeleyEnvironment object for the wallet directory , never empty because ~ BerkeleyEnvironment
* erases the weak pointer from the g_dbenvs map .
* @ post A new BerkeleyEnvironment weak pointer is inserted into g_dbenvs if the directory path key was not already in the map .
*/
std : : shared_ptr < BerkeleyEnvironment > GetWalletEnv ( const fs : : path & wallet_path , std : : string & database_filename )
2018-11-20 15:15:51 +01:00
{
fs : : path env_directory ;
SplitWalletPath ( wallet_path , env_directory , database_filename ) ;
2018-03-07 17:05:08 +01:00
LOCK ( cs_db ) ;
2019-01-31 18:01:30 +01:00
auto inserted = g_dbenvs . emplace ( env_directory . string ( ) , std : : weak_ptr < BerkeleyEnvironment > ( ) ) ;
if ( inserted . second ) {
auto env = std : : make_shared < BerkeleyEnvironment > ( env_directory . string ( ) ) ;
inserted . first - > second = env ;
return env ;
}
return inserted . first - > second . lock ( ) ;
2018-03-07 17:05:08 +01:00
}
2010-08-29 18:58:15 +02:00
//
2020-04-15 00:13:51 +02:00
// BerkeleyBatch
2010-08-29 18:58:15 +02:00
//
2020-04-15 00:13:51 +02:00
void BerkeleyEnvironment : : Close ( )
2011-11-11 03:12:46 +01:00
{
if ( ! fDbEnvInit )
return ;
fDbEnvInit = false ;
2018-03-07 17:05:08 +01:00
2018-11-20 15:15:51 +01:00
for ( auto & db : m_databases ) {
2018-03-07 17:05:08 +01:00
auto count = mapFileUseCount . find ( db . first ) ;
assert ( count = = mapFileUseCount . end ( ) | | count - > second = = 0 ) ;
2018-11-20 15:15:51 +01:00
BerkeleyDatabase & database = db . second . get ( ) ;
if ( database . m_db ) {
database . m_db - > close ( 0 ) ;
database . m_db . reset ( ) ;
2018-03-07 17:05:08 +01:00
}
}
2019-02-05 22:09:24 +01:00
FILE * error_file = nullptr ;
dbenv - > get_errfile ( & error_file ) ;
2015-03-03 16:49:12 +01:00
int ret = dbenv - > close ( 0 ) ;
2012-10-08 21:18:04 +02:00
if ( ret ! = 0 )
2018-05-14 14:54:23 +02:00
LogPrintf ( " BerkeleyEnvironment::Close: Error %d closing database environment: %s \n " , ret , DbEnv : : strerror ( ret ) ) ;
2012-05-22 21:51:13 +02:00
if ( ! fMockDb )
2016-08-31 14:42:38 +02:00
DbEnv ( ( u_int32_t ) 0 ) . remove ( strPath . c_str ( ) , 0 ) ;
2019-02-05 22:09:24 +01:00
if ( error_file ) fclose ( error_file ) ;
UnlockDirectory ( strPath , " .walletlock " ) ;
2011-11-11 03:12:46 +01:00
}
2020-04-15 00:13:51 +02:00
void BerkeleyEnvironment : : Reset ( )
2010-08-29 18:58:15 +02:00
{
2017-11-09 21:22:08 +01:00
dbenv . reset ( new DbEnv ( DB_CXX_NO_EXCEPTIONS ) ) ;
2012-11-18 11:58:32 +01:00
fDbEnvInit = false ;
fMockDb = false ;
2010-08-29 18:58:15 +02:00
}
2020-04-15 00:13:51 +02:00
BerkeleyEnvironment : : BerkeleyEnvironment ( const fs : : path & dir_path ) : strPath ( dir_path . string ( ) )
2015-03-03 16:49:12 +01:00
{
Reset ( ) ;
}
2020-04-15 00:13:51 +02:00
BerkeleyEnvironment : : ~ BerkeleyEnvironment ( )
2012-05-14 03:37:39 +02:00
{
2019-02-04 13:00:49 +01:00
LOCK ( cs_db ) ;
2019-01-31 18:01:30 +01:00
g_dbenvs . erase ( strPath ) ;
2018-03-07 17:05:08 +01:00
Close ( ) ;
2012-05-14 03:37:39 +02:00
}
2020-04-15 00:13:51 +02:00
bool BerkeleyEnvironment : : Open ( bool retry )
2012-05-14 03:37:39 +02:00
{
2019-11-08 14:45:35 +01:00
if ( fDbEnvInit ) {
2012-05-14 03:37:39 +02:00
return true ;
2019-11-08 14:45:35 +01:00
}
2012-05-14 03:37:39 +02:00
2018-03-07 17:05:08 +01:00
fs : : path pathIn = strPath ;
TryCreateDirectories ( pathIn ) ;
2018-01-16 10:54:13 +01:00
if ( ! LockDirectory ( pathIn , " .walletlock " ) ) {
LogPrintf ( " Cannot obtain a lock on wallet directory %s. Another instance of bitcoin may be using it. \n " , strPath ) ;
return false ;
}
2017-04-06 20:19:21 +02:00
fs : : path pathLogDir = pathIn / " database " ;
2017-06-14 16:00:39 +02:00
TryCreateDirectories ( pathLogDir ) ;
2017-04-06 20:19:21 +02:00
fs : : path pathErrorFile = pathIn / " db.log " ;
2020-04-15 00:13:51 +02:00
LogPrintf ( " BerkeleyEnvironment::Open: LogDir=%s ErrorFile=%s \n " , pathLogDir . string ( ) , pathErrorFile . string ( ) ) ;
2012-05-14 03:37:39 +02:00
2012-05-22 23:45:00 +02:00
unsigned int nEnvFlags = 0 ;
2019-06-24 18:44:27 +02:00
if ( gArgs . GetBoolArg ( " -privdb " , DEFAULT_WALLET_PRIVDB ) )
2012-05-22 23:45:00 +02:00
nEnvFlags | = DB_PRIVATE ;
2015-03-03 16:49:12 +01:00
dbenv - > set_lg_dir ( pathLogDir . string ( ) . c_str ( ) ) ;
dbenv - > set_cachesize ( 0 , 0x100000 , 1 ) ; // 1 MiB should be enough for just the wallet
dbenv - > set_lg_bsize ( 0x10000 ) ;
dbenv - > set_lg_max ( 1048576 ) ;
dbenv - > set_lk_max_locks ( 40000 ) ;
dbenv - > set_lk_max_objects ( 40000 ) ;
2017-04-06 20:19:21 +02:00
dbenv - > set_errfile ( fsbridge : : fopen ( pathErrorFile , " a " ) ) ; /// debug
2015-03-03 16:49:12 +01:00
dbenv - > set_flags ( DB_AUTO_COMMIT , 1 ) ;
dbenv - > set_flags ( DB_TXN_WRITE_NOSYNC , 1 ) ;
dbenv - > log_set_config ( DB_LOG_AUTO_REMOVE , 1 ) ;
2015-06-15 07:46:51 +02:00
int ret = dbenv - > open ( strPath . c_str ( ) ,
2014-09-19 19:21:46 +02:00
DB_CREATE |
DB_INIT_LOCK |
DB_INIT_LOG |
DB_INIT_MPOOL |
DB_INIT_TXN |
DB_THREAD |
DB_RECOVER |
nEnvFlags ,
S_IRUSR | S_IWUSR ) ;
2017-08-15 21:24:07 +02:00
if ( ret ! = 0 ) {
2020-04-15 00:13:51 +02:00
LogPrintf ( " BerkeleyEnvironment::Open: Error %d opening database environment: %s \n " , ret , DbEnv : : strerror ( ret ) ) ;
2018-05-14 14:54:23 +02:00
int ret2 = dbenv - > close ( 0 ) ;
if ( ret2 ! = 0 ) {
LogPrintf ( " BerkeleyEnvironment::Open: Error %d closing failed database environment: %s \n " , ret2 , DbEnv : : strerror ( ret2 ) ) ;
}
Reset ( ) ;
2018-01-16 10:54:13 +01:00
if ( retry ) {
// try moving the database env out of the way
fs : : path pathDatabaseBak = pathIn / strprintf ( " database.%d.bak " , GetTime ( ) ) ;
try {
fs : : rename ( pathLogDir , pathDatabaseBak ) ;
LogPrintf ( " Moved old %s to %s. Retrying. \n " , pathLogDir . string ( ) , pathDatabaseBak . string ( ) ) ;
} catch ( const fs : : filesystem_error & ) {
// failure is ok (well, not really, but it's not worse than what we started with)
}
// try opening it again one more time
2018-03-07 17:05:08 +01:00
if ( ! Open ( false /* retry */ ) ) {
2018-01-16 10:54:13 +01:00
// if it still fails, it probably means we can't even create the database env
return false ;
}
} else {
return false ;
}
2017-08-15 21:24:07 +02:00
}
2012-05-14 03:37:39 +02:00
fDbEnvInit = true ;
2012-05-22 21:51:13 +02:00
fMockDb = false ;
2012-05-14 03:37:39 +02:00
return true ;
}
2019-11-08 14:45:35 +01:00
//! Construct an in-memory mock Berkeley environment for testing
2019-01-31 18:01:30 +01:00
BerkeleyEnvironment : : BerkeleyEnvironment ( )
2012-05-22 21:51:13 +02:00
{
2019-01-31 18:01:30 +01:00
Reset ( ) ;
2012-05-22 21:51:13 +02:00
2019-11-10 10:50:36 +01:00
LogPrint ( BCLog : : WALLETDB , " BerkeleyEnvironment::MakeMock \n " ) ;
2012-05-22 21:51:13 +02:00
2015-03-03 16:49:12 +01:00
dbenv - > set_cachesize ( 1 , 0 , 1 ) ;
dbenv - > set_lg_bsize ( 10485760 * 4 ) ;
dbenv - > set_lg_max ( 10485760 ) ;
dbenv - > set_lk_max_locks ( 10000 ) ;
dbenv - > set_lk_max_objects ( 10000 ) ;
dbenv - > set_flags ( DB_AUTO_COMMIT , 1 ) ;
dbenv - > log_set_config ( DB_LOG_IN_MEMORY , 1 ) ;
2019-08-06 05:08:33 +02:00
int ret = dbenv - > open ( nullptr ,
2014-09-19 19:21:46 +02:00
DB_CREATE |
DB_INIT_LOCK |
DB_INIT_LOG |
DB_INIT_MPOOL |
DB_INIT_TXN |
DB_THREAD |
DB_PRIVATE ,
S_IRUSR | S_IWUSR ) ;
2019-11-08 14:45:35 +01:00
if ( ret > 0 ) {
2020-04-15 00:13:51 +02:00
throw std : : runtime_error ( strprintf ( " BerkeleyEnvironment::MakeMock: Error %d opening database environment. " , ret ) ) ;
2019-11-08 14:45:35 +01:00
}
2012-05-22 21:51:13 +02:00
fDbEnvInit = true ;
fMockDb = true ;
}
2022-03-06 08:00:20 +01:00
bool BerkeleyEnvironment : : Verify ( const std : : string & strFile )
2012-09-18 20:30:47 +02:00
{
LOCK ( cs_db ) ;
assert ( mapFileUseCount . count ( strFile ) = = 0 ) ;
2017-11-09 21:22:08 +01:00
Db db ( dbenv . get ( ) , 0 ) ;
2019-08-06 05:08:33 +02:00
int result = db . verify ( strFile . c_str ( ) , nullptr , nullptr , 0 ) ;
2022-03-06 08:00:20 +01:00
return result = = 0 ;
2012-09-18 20:30:47 +02:00
}
2019-01-16 14:55:24 +01:00
BerkeleyBatch : : SafeDbt : : SafeDbt ( )
{
m_dbt . set_flags ( DB_DBT_MALLOC ) ;
}
BerkeleyBatch : : SafeDbt : : SafeDbt ( void * data , size_t size )
: m_dbt ( data , size )
{
}
BerkeleyBatch : : SafeDbt : : ~ SafeDbt ( )
{
if ( m_dbt . get_data ( ) ! = nullptr ) {
// Clear memory, e.g. in case it was a private key
memory_cleanse ( m_dbt . get_data ( ) , m_dbt . get_size ( ) ) ;
// under DB_DBT_MALLOC, data is malloced by the Dbt, but must be
// freed by the caller.
// https://docs.oracle.com/cd/E17275_01/html/api_reference/C/dbt.html
if ( m_dbt . get_flags ( ) & DB_DBT_MALLOC ) {
free ( m_dbt . get_data ( ) ) ;
}
}
}
const void * BerkeleyBatch : : SafeDbt : : get_data ( ) const
{
return m_dbt . get_data ( ) ;
}
u_int32_t BerkeleyBatch : : SafeDbt : : get_size ( ) const
{
return m_dbt . get_size ( ) ;
}
BerkeleyBatch : : SafeDbt : : operator Dbt * ( )
{
return & m_dbt ;
}
2020-04-15 00:13:51 +02:00
bool BerkeleyBatch : : VerifyEnvironment ( const fs : : path & file_path , std : : string & errorStr )
2017-03-06 12:49:01 +01:00
{
2018-03-07 17:05:08 +01:00
std : : string walletFile ;
2019-01-31 18:01:30 +01:00
std : : shared_ptr < BerkeleyEnvironment > env = GetWalletEnv ( file_path , walletFile ) ;
2018-03-07 17:05:08 +01:00
fs : : path walletDir = env - > Directory ( ) ;
2020-03-14 07:51:27 +01:00
LogPrintf ( " Using BerkeleyDB version %s \n " , BerkeleyDatabaseVersion ( ) ) ;
2019-02-13 22:51:14 +01:00
LogPrintf ( " Using wallet %s \n " , file_path . string ( ) ) ;
2017-03-06 12:49:01 +01:00
2018-03-07 17:05:08 +01:00
if ( ! env - > Open ( true /* retry */ ) ) {
2018-01-16 10:54:13 +01:00
errorStr = strprintf ( _ ( " Error initializing wallet database environment %s! " ) , walletDir ) ;
return false ;
2017-03-06 12:49:01 +01:00
}
2018-01-16 10:54:13 +01:00
2017-03-06 12:49:01 +01:00
return true ;
}
2022-03-06 08:00:20 +01:00
bool BerkeleyBatch : : VerifyDatabaseFile ( const fs : : path & file_path , std : : string & errorStr )
2017-03-06 12:49:01 +01:00
{
2018-03-07 17:05:08 +01:00
std : : string walletFile ;
2019-01-31 18:01:30 +01:00
std : : shared_ptr < BerkeleyEnvironment > env = GetWalletEnv ( file_path , walletFile ) ;
2018-03-07 17:05:08 +01:00
fs : : path walletDir = env - > Directory ( ) ;
2017-11-18 14:32:50 +01:00
if ( fs : : exists ( walletDir / walletFile ) )
2017-03-06 12:49:01 +01:00
{
2022-03-06 08:00:20 +01:00
if ( ! env - > Verify ( walletFile ) ) {
errorStr = strprintf ( _ ( " %s corrupt. Try using the wallet tool dash-wallet to salvage or restoring a backup. " ) , walletFile ) ;
2017-03-06 12:49:01 +01:00
return false ;
}
}
// also return true if files does not exists
return true ;
}
2020-04-15 00:13:51 +02:00
void BerkeleyEnvironment : : CheckpointLSN ( const std : : string & strFile )
2012-05-14 03:37:39 +02:00
{
2015-03-03 16:49:12 +01:00
dbenv - > txn_checkpoint ( 0 , 0 , 0 ) ;
2012-05-22 21:51:13 +02:00
if ( fMockDb )
return ;
2015-03-03 16:49:12 +01:00
dbenv - > lsn_reset ( strFile . c_str ( ) , 0 ) ;
2012-05-14 03:37:39 +02:00
}
2010-08-29 18:58:15 +02:00
2020-04-15 00:13:51 +02:00
BerkeleyBatch : : BerkeleyBatch ( BerkeleyDatabase & database , const char * pszMode , bool fFlushOnCloseIn ) : pdb ( nullptr ) , activeTxn ( nullptr )
2010-08-29 18:58:15 +02:00
{
2014-08-28 15:28:57 +02:00
fReadOnly = ( ! strchr ( pszMode , ' + ' ) & & ! strchr ( pszMode , ' w ' ) ) ;
2014-08-31 05:55:27 +02:00
fFlushOnClose = fFlushOnCloseIn ;
2019-01-31 18:01:30 +01:00
env = database . env . get ( ) ;
2020-04-15 00:13:51 +02:00
if ( database . IsDummy ( ) ) {
2010-08-29 18:58:15 +02:00
return ;
2017-04-24 14:43:38 +02:00
}
2020-04-15 00:13:51 +02:00
const std : : string & strFilename = database . strFile ;
2010-08-29 18:58:15 +02:00
2019-08-06 05:08:33 +02:00
bool fCreate = strchr ( pszMode , ' c ' ) ! = nullptr ;
2010-08-29 18:58:15 +02:00
unsigned int nFlags = DB_THREAD ;
if ( fCreate )
nFlags | = DB_CREATE ;
{
2018-03-07 17:05:08 +01:00
LOCK ( cs_db ) ;
if ( ! env - > Open ( false /* retry */ ) )
2020-04-15 00:13:51 +02:00
throw std : : runtime_error ( " BerkeleyBatch: Failed to open database environment. " ) ;
2010-08-29 18:58:15 +02:00
2018-11-20 15:15:51 +01:00
pdb = database . m_db . get ( ) ;
2019-08-06 05:08:33 +02:00
if ( pdb = = nullptr ) {
2017-06-09 21:58:02 +02:00
int ret ;
2017-11-09 21:22:08 +01:00
std : : unique_ptr < Db > pdb_temp = MakeUnique < Db > ( env - > dbenv . get ( ) , 0 ) ;
2010-08-29 18:58:15 +02:00
2017-04-24 14:43:38 +02:00
bool fMockDb = env - > IsMock ( ) ;
2014-09-19 19:21:46 +02:00
if ( fMockDb ) {
2017-10-18 15:38:07 +02:00
DbMpoolFile * mpf = pdb_temp - > get_mpf ( ) ;
2012-05-22 21:51:13 +02:00
ret = mpf - > set_flags ( DB_MPOOL_NOFILE , 1 ) ;
2017-10-18 15:38:07 +02:00
if ( ret ! = 0 ) {
2020-04-15 00:13:51 +02:00
throw std : : runtime_error ( strprintf ( " BerkeleyBatch: Failed to configure for no temp file backing for database %s " , strFilename ) ) ;
2017-10-18 15:38:07 +02:00
}
2012-05-22 21:51:13 +02:00
}
2017-10-18 15:38:07 +02:00
ret = pdb_temp - > open ( nullptr , // Txn pointer
fMockDb ? nullptr : strFilename . c_str ( ) , // Filename
fMockDb ? strFilename . c_str ( ) : " main " , // Logical db name
DB_BTREE , // Database type
nFlags , // Flags
2010-08-29 18:58:15 +02:00
0 ) ;
2014-09-19 19:21:46 +02:00
if ( ret ! = 0 ) {
2020-04-15 00:13:51 +02:00
throw std : : runtime_error ( strprintf ( " BerkeleyBatch: Error %d, can't open database %s " , ret , strFilename ) ) ;
2010-08-29 18:58:15 +02:00
}
2018-03-07 17:05:08 +01:00
// Call CheckUniqueFileid on the containing BDB environment to
// avoid BDB data consistency bugs that happen when different data
// files in the same environment have the same fileid.
//
// Also call CheckUniqueFileid on all the other g_dbenvs to prevent
// bitcoin from opening the same data file through another
// environment when the file is referenced through equivalent but
// not obviously identical symlinked or hard linked or bind mounted
// paths. In the future a more relaxed check for equal inode and
// device ids could be done instead, which would allow opening
// different backup copies of a wallet at the same time. Maybe even
// more ideally, an exclusive lock for accessing the database could
// be implemented, so no equality checks are needed at all. (Newer
// versions of BDB have an set_lk_exclusive method for this
// purpose, but the older version we use does not.)
2018-09-04 15:36:09 +02:00
for ( const auto & env : g_dbenvs ) {
2019-01-31 18:01:30 +01:00
CheckUniqueFileid ( * env . second . lock ( ) . get ( ) , strFilename , * pdb_temp , this - > env - > m_fileids [ strFilename ] ) ;
2018-03-07 17:05:08 +01:00
}
2010-08-29 18:58:15 +02:00
2017-10-18 15:38:07 +02:00
pdb = pdb_temp . release ( ) ;
2018-11-20 15:15:51 +01:00
database . m_db . reset ( pdb ) ;
2017-10-18 15:38:07 +02:00
2017-03-09 08:10:09 +01:00
if ( fCreate & & ! Exists ( std : : string ( " version " ) ) ) {
2010-08-29 18:58:15 +02:00
bool fTmp = fReadOnly ;
fReadOnly = false ;
2021-12-12 14:38:12 +01:00
Write ( std : : string ( " version " ) , CLIENT_VERSION ) ;
2010-08-29 18:58:15 +02:00
fReadOnly = fTmp ;
}
}
2017-10-18 15:38:07 +02:00
+ + env - > mapFileUseCount [ strFilename ] ;
strFile = strFilename ;
2010-08-29 18:58:15 +02:00
}
}
2020-04-15 00:13:51 +02:00
void BerkeleyBatch : : Flush ( )
2010-08-29 18:58:15 +02:00
{
2012-05-14 18:39:29 +02:00
if ( activeTxn )
2012-07-06 16:33:34 +02:00
return ;
2010-08-29 18:58:15 +02:00
// Flush database activity from memory pool to disk log
unsigned int nMinutes = 0 ;
2010-12-05 10:29:30 +01:00
if ( fReadOnly )
nMinutes = 1 ;
2012-03-28 22:09:18 +02:00
2021-12-21 14:13:07 +01:00
if ( env ) { // env is nullptr for dummy databases (i.e. in tests). Don't actually flush if env is nullptr so we don't segfault
env - > dbenv - > txn_checkpoint ( nMinutes ? gArgs . GetArg ( " -dblogsize " , DEFAULT_WALLET_DBLOGSIZE ) * 1024 : 0 , nMinutes , 0 ) ;
}
2012-07-06 16:33:34 +02:00
}
2020-04-15 00:13:51 +02:00
void BerkeleyDatabase : : IncrementUpdateCounter ( )
2019-07-11 09:50:52 +02:00
{
+ + nUpdateCounter ;
}
2020-04-15 00:13:51 +02:00
void BerkeleyBatch : : Close ( )
2012-07-06 16:33:34 +02:00
{
if ( ! pdb )
return ;
if ( activeTxn )
activeTxn - > abort ( ) ;
2019-08-06 05:08:33 +02:00
activeTxn = nullptr ;
pdb = nullptr ;
2012-07-06 16:33:34 +02:00
2014-08-31 05:55:27 +02:00
if ( fFlushOnClose )
Flush ( ) ;
2010-08-29 18:58:15 +02:00
2012-04-06 18:39:12 +02:00
{
2018-03-07 17:05:08 +01:00
LOCK ( cs_db ) ;
2017-04-24 14:43:38 +02:00
- - env - > mapFileUseCount [ strFile ] ;
2012-04-06 18:39:12 +02:00
}
2018-09-14 10:28:27 +02:00
env - > m_db_in_use . notify_all ( ) ;
2010-08-29 18:58:15 +02:00
}
2020-04-15 00:13:51 +02:00
void BerkeleyEnvironment : : CloseDb ( const std : : string & strFile )
2010-08-29 18:58:15 +02:00
{
{
2012-04-06 18:39:12 +02:00
LOCK ( cs_db ) ;
2018-11-20 15:15:51 +01:00
auto it = m_databases . find ( strFile ) ;
assert ( it ! = m_databases . end ( ) ) ;
BerkeleyDatabase & database = it - > second . get ( ) ;
if ( database . m_db ) {
2010-08-29 18:58:15 +02:00
// Close the database handle
2018-11-20 15:15:51 +01:00
database . m_db - > close ( 0 ) ;
database . m_db . reset ( ) ;
2010-08-29 18:58:15 +02:00
}
}
}
2018-09-14 10:28:27 +02:00
void BerkeleyEnvironment : : ReloadDbEnv ( )
{
// Make sure that no Db's are in use
AssertLockNotHeld ( cs_db ) ;
std : : unique_lock < CCriticalSection > lock ( cs_db ) ;
m_db_in_use . wait ( lock , [ this ] ( ) {
for ( auto & count : mapFileUseCount ) {
if ( count . second > 0 ) return false ;
}
return true ;
} ) ;
std : : vector < std : : string > filenames ;
2018-11-20 15:15:51 +01:00
for ( auto it : m_databases ) {
2018-09-14 10:28:27 +02:00
filenames . push_back ( it . first ) ;
}
// Close the individual Db's
for ( const std : : string & filename : filenames ) {
CloseDb ( filename ) ;
}
// Reset the environment
Flush ( true ) ; // This will flush and close the environment
Reset ( ) ;
Open ( true ) ;
}
2020-04-15 00:13:51 +02:00
bool BerkeleyBatch : : Rewrite ( BerkeleyDatabase & database , const char * pszSkip )
2011-11-10 21:29:23 +01:00
{
2020-04-15 00:13:51 +02:00
if ( database . IsDummy ( ) ) {
2017-04-24 14:43:38 +02:00
return true ;
}
2019-01-31 18:01:30 +01:00
BerkeleyEnvironment * env = database . env . get ( ) ;
2020-04-15 00:13:51 +02:00
const std : : string & strFile = database . strFile ;
2014-09-19 19:21:46 +02:00
while ( true ) {
2011-11-10 21:29:23 +01:00
{
2018-03-07 17:05:08 +01:00
LOCK ( cs_db ) ;
2017-04-24 14:43:38 +02:00
if ( ! env - > mapFileUseCount . count ( strFile ) | | env - > mapFileUseCount [ strFile ] = = 0 ) {
2011-11-10 21:29:23 +01:00
// Flush log data to the dat file
2017-04-24 14:43:38 +02:00
env - > CloseDb ( strFile ) ;
env - > CheckpointLSN ( strFile ) ;
env - > mapFileUseCount . erase ( strFile ) ;
2011-11-10 21:29:23 +01:00
bool fSuccess = true ;
2020-04-15 00:13:51 +02:00
LogPrintf ( " BerkeleyBatch::Rewrite: Rewriting %s... \n " , strFile ) ;
2017-03-09 08:10:09 +01:00
std : : string strFileRes = strFile + " .rewrite " ;
2011-11-20 17:12:00 +01:00
{ // surround usage of db with extra {}
2020-04-15 00:13:51 +02:00
BerkeleyBatch db ( database , " r " ) ;
2017-11-09 21:22:08 +01:00
std : : unique_ptr < Db > pdbCopy = MakeUnique < Db > ( env - > dbenv . get ( ) , 0 ) ;
2012-09-18 21:07:58 +02:00
2019-08-06 05:08:33 +02:00
int ret = pdbCopy - > open ( nullptr , // Txn pointer
2014-09-19 19:21:46 +02:00
strFileRes . c_str ( ) , // Filename
" main " , // Logical db name
DB_BTREE , // Database type
DB_CREATE , // Flags
2011-11-20 17:12:00 +01:00
0 ) ;
2014-09-19 19:21:46 +02:00
if ( ret > 0 ) {
2020-04-15 00:13:51 +02:00
LogPrintf ( " BerkeleyBatch::Rewrite: Can't create database file %s \n " , strFileRes ) ;
2011-11-20 17:12:00 +01:00
fSuccess = false ;
}
2012-09-18 21:07:58 +02:00
2011-11-20 17:12:00 +01:00
Dbc * pcursor = db . GetCursor ( ) ;
if ( pcursor )
2014-09-19 19:21:46 +02:00
while ( fSuccess ) {
2012-04-16 14:56:45 +02:00
CDataStream ssKey ( SER_DISK , CLIENT_VERSION ) ;
CDataStream ssValue ( SER_DISK , CLIENT_VERSION ) ;
2016-08-31 16:20:29 +02:00
int ret1 = db . ReadAtCursor ( pcursor , ssKey , ssValue ) ;
if ( ret1 = = DB_NOTFOUND ) {
2011-11-20 17:12:00 +01:00
pcursor - > close ( ) ;
break ;
2016-08-31 16:20:29 +02:00
} else if ( ret1 ! = 0 ) {
2011-11-20 17:12:00 +01:00
pcursor - > close ( ) ;
fSuccess = false ;
break ;
}
if ( pszSkip & &
2017-01-09 17:39:08 +01:00
strncmp ( ssKey . data ( ) , pszSkip , std : : min ( ssKey . size ( ) , strlen ( pszSkip ) ) ) = = 0 )
2011-11-20 17:12:00 +01:00
continue ;
2017-01-09 17:39:08 +01:00
if ( strncmp ( ssKey . data ( ) , " \x07 version " , 8 ) = = 0 ) {
2011-11-20 17:12:00 +01:00
// Update version:
ssValue . clear ( ) ;
2011-12-16 22:26:14 +01:00
ssValue < < CLIENT_VERSION ;
2011-11-20 17:12:00 +01:00
}
2017-01-09 17:39:08 +01:00
Dbt datKey ( ssKey . data ( ) , ssKey . size ( ) ) ;
Dbt datValue ( ssValue . data ( ) , ssValue . size ( ) ) ;
2019-08-06 05:08:33 +02:00
int ret2 = pdbCopy - > put ( nullptr , & datKey , & datValue , DB_NOOVERWRITE ) ;
2011-11-20 17:12:00 +01:00
if ( ret2 > 0 )
fSuccess = false ;
2011-11-11 03:12:46 +01:00
}
2014-09-19 19:21:46 +02:00
if ( fSuccess ) {
2011-11-20 17:12:00 +01:00
db . Close ( ) ;
2017-04-24 14:43:38 +02:00
env - > CloseDb ( strFile ) ;
2011-11-20 17:12:00 +01:00
if ( pdbCopy - > close ( 0 ) )
2011-11-10 21:29:23 +01:00
fSuccess = false ;
2017-08-15 21:24:07 +02:00
} else {
pdbCopy - > close ( 0 ) ;
2011-11-10 21:29:23 +01:00
}
}
2014-09-19 19:21:46 +02:00
if ( fSuccess ) {
2017-11-09 21:22:08 +01:00
Db dbA ( env - > dbenv . get ( ) , 0 ) ;
2019-08-06 05:08:33 +02:00
if ( dbA . remove ( strFile . c_str ( ) , nullptr , 0 ) )
2011-11-10 21:29:23 +01:00
fSuccess = false ;
2017-11-09 21:22:08 +01:00
Db dbB ( env - > dbenv . get ( ) , 0 ) ;
2019-08-06 05:08:33 +02:00
if ( dbB . rename ( strFileRes . c_str ( ) , nullptr , strFile . c_str ( ) , 0 ) )
2011-11-10 21:29:23 +01:00
fSuccess = false ;
}
if ( ! fSuccess )
2020-04-15 00:13:51 +02:00
LogPrintf ( " BerkeleyBatch::Rewrite: Failed to rewrite database file %s \n " , strFileRes ) ;
2011-11-10 21:29:23 +01:00
return fSuccess ;
}
}
2021-07-13 11:31:17 +02:00
UninterruptibleSleep ( std : : chrono : : milliseconds { 100 } ) ;
2011-11-10 21:29:23 +01:00
}
}
2020-04-15 00:13:51 +02:00
void BerkeleyEnvironment : : Flush ( bool fShutdown )
2010-08-29 18:58:15 +02:00
{
2013-04-13 07:13:08 +02:00
int64_t nStart = GetTimeMillis ( ) ;
2014-01-22 14:41:24 +01:00
// Flush log data to the actual data file on all files that are not in use
2019-11-10 10:50:36 +01:00
LogPrint ( BCLog : : WALLETDB , " BerkeleyEnvironment::Flush: [%s] Flush(%s)%s \n " , strPath , fShutdown ? " true " : " false " , fDbEnvInit ? " " : " database not started " ) ;
2010-08-29 18:58:15 +02:00
if ( ! fDbEnvInit )
return ;
{
2012-04-06 18:39:12 +02:00
LOCK ( cs_db ) ;
2017-03-09 08:10:09 +01:00
std : : map < std : : string , int > : : iterator mi = mapFileUseCount . begin ( ) ;
2014-09-19 19:21:46 +02:00
while ( mi ! = mapFileUseCount . end ( ) ) {
2017-03-09 08:10:09 +01:00
std : : string strFile = ( * mi ) . first ;
2010-08-29 18:58:15 +02:00
int nRefCount = ( * mi ) . second ;
2019-11-10 10:50:36 +01:00
LogPrint ( BCLog : : WALLETDB , " BerkeleyEnvironment::Flush: Flushing %s (refcount = %d)... \n " , strFile , nRefCount ) ;
2014-09-19 19:21:46 +02:00
if ( nRefCount = = 0 ) {
2010-08-29 18:58:15 +02:00
// Move log data to the dat file
CloseDb ( strFile ) ;
2019-11-10 10:50:36 +01:00
LogPrint ( BCLog : : WALLETDB , " BerkeleyEnvironment::Flush: %s checkpoint \n " , strFile ) ;
2015-03-03 16:49:12 +01:00
dbenv - > txn_checkpoint ( 0 , 0 , 0 ) ;
2019-11-10 10:50:36 +01:00
LogPrint ( BCLog : : WALLETDB , " BerkeleyEnvironment::Flush: %s detach \n " , strFile ) ;
2012-11-04 12:48:45 +01:00
if ( ! fMockDb )
2015-03-03 16:49:12 +01:00
dbenv - > lsn_reset ( strFile . c_str ( ) , 0 ) ;
2019-11-10 10:50:36 +01:00
LogPrint ( BCLog : : WALLETDB , " BerkeleyEnvironment::Flush: %s closed \n " , strFile ) ;
2010-08-29 18:58:15 +02:00
mapFileUseCount . erase ( mi + + ) ;
2014-09-19 19:21:46 +02:00
} else
2010-08-29 18:58:15 +02:00
mi + + ;
}
2019-11-10 10:50:36 +01:00
LogPrint ( BCLog : : WALLETDB , " BerkeleyEnvironment::Flush: Flush(%s)%s took %15dms \n " , fShutdown ? " true " : " false " , fDbEnvInit ? " " : " database not started " , GetTimeMillis ( ) - nStart ) ;
2014-09-19 19:21:46 +02:00
if ( fShutdown ) {
2010-08-29 18:58:15 +02:00
char * * listp ;
2014-09-19 19:21:46 +02:00
if ( mapFileUseCount . empty ( ) ) {
2015-03-03 16:49:12 +01:00
dbenv - > log_archive ( & listp , DB_ARCH_REMOVE ) ;
2012-05-14 03:37:39 +02:00
Close ( ) ;
2018-06-21 16:24:01 +02:00
if ( ! fMockDb ) {
2017-04-06 20:19:21 +02:00
fs : : remove_all ( fs : : path ( strPath ) / " database " ) ;
2018-06-21 16:24:01 +02:00
}
2011-11-11 03:12:46 +01:00
}
2010-08-29 18:58:15 +02:00
}
}
}
2017-03-06 12:49:01 +01:00
2020-04-15 00:13:51 +02:00
bool BerkeleyBatch : : PeriodicFlush ( BerkeleyDatabase & database )
2017-03-06 12:49:01 +01:00
{
2020-04-15 00:13:51 +02:00
if ( database . IsDummy ( ) ) {
2017-04-24 14:43:38 +02:00
return true ;
}
2017-03-06 12:49:01 +01:00
bool ret = false ;
2019-01-31 18:01:30 +01:00
BerkeleyEnvironment * env = database . env . get ( ) ;
2020-04-15 00:13:51 +02:00
const std : : string & strFile = database . strFile ;
2018-03-07 17:05:08 +01:00
TRY_LOCK ( cs_db , lockDb ) ;
2017-03-06 12:49:01 +01:00
if ( lockDb )
{
// Don't do this if any databases are in use
int nRefCount = 0 ;
2017-04-24 14:43:38 +02:00
std : : map < std : : string , int > : : iterator mit = env - > mapFileUseCount . begin ( ) ;
while ( mit ! = env - > mapFileUseCount . end ( ) )
2017-03-06 12:49:01 +01:00
{
2017-03-18 11:00:00 +01:00
nRefCount + = ( * mit ) . second ;
mit + + ;
2017-03-06 12:49:01 +01:00
}
if ( nRefCount = = 0 )
{
2017-04-24 14:43:38 +02:00
std : : map < std : : string , int > : : iterator mi = env - > mapFileUseCount . find ( strFile ) ;
if ( mi ! = env - > mapFileUseCount . end ( ) )
2017-03-06 12:49:01 +01:00
{
2019-11-10 10:50:36 +01:00
LogPrint ( BCLog : : WALLETDB , " Flushing %s \n " , strFile ) ;
2017-03-06 12:49:01 +01:00
int64_t nStart = GetTimeMillis ( ) ;
// Flush wallet file so it's self contained
2017-04-24 14:43:38 +02:00
env - > CloseDb ( strFile ) ;
env - > CheckpointLSN ( strFile ) ;
2017-03-06 12:49:01 +01:00
2017-04-24 14:43:38 +02:00
env - > mapFileUseCount . erase ( mi + + ) ;
2019-11-10 10:50:36 +01:00
LogPrint ( BCLog : : WALLETDB , " Flushed %s %dms \n " , strFile , GetTimeMillis ( ) - nStart ) ;
2017-03-06 12:49:01 +01:00
ret = true ;
}
}
}
return ret ;
}
2017-04-24 14:43:38 +02:00
2020-04-15 00:13:51 +02:00
bool BerkeleyDatabase : : Rewrite ( const char * pszSkip )
2017-04-24 14:43:38 +02:00
{
2020-04-15 00:13:51 +02:00
return BerkeleyBatch : : Rewrite ( * this , pszSkip ) ;
2017-04-24 14:43:38 +02:00
}
2020-04-15 00:13:51 +02:00
bool BerkeleyDatabase : : Backup ( const std : : string & strDest )
2017-04-24 14:43:38 +02:00
{
if ( IsDummy ( ) ) {
return false ;
}
while ( true )
{
{
2018-03-07 17:05:08 +01:00
LOCK ( cs_db ) ;
2017-04-24 14:43:38 +02:00
if ( ! env - > mapFileUseCount . count ( strFile ) | | env - > mapFileUseCount [ strFile ] = = 0 )
{
// Flush log data to the dat file
env - > CloseDb ( strFile ) ;
env - > CheckpointLSN ( strFile ) ;
env - > mapFileUseCount . erase ( strFile ) ;
// Copy wallet file
2018-08-07 13:27:21 +02:00
fs : : path pathSrc = env - > Directory ( ) / strFile ;
2017-04-24 14:43:38 +02:00
fs : : path pathDest ( strDest ) ;
if ( fs : : is_directory ( pathDest ) )
pathDest / = strFile ;
try {
2017-11-01 17:26:23 +01:00
if ( fs : : equivalent ( pathSrc , pathDest ) ) {
LogPrintf ( " cannot backup to wallet source file %s \n " , pathDest . string ( ) ) ;
return false ;
}
2017-04-24 14:43:38 +02:00
fs : : copy_file ( pathSrc , pathDest , fs : : copy_option : : overwrite_if_exists ) ;
LogPrintf ( " copied %s to %s \n " , strFile , pathDest . string ( ) ) ;
return true ;
} catch ( const fs : : filesystem_error & e ) {
2018-09-10 20:08:56 +02:00
LogPrintf ( " error copying %s to %s - %s \n " , strFile , pathDest . string ( ) , fsbridge : : get_filesystem_error_message ( e ) ) ;
2017-04-24 14:43:38 +02:00
return false ;
}
}
}
2021-07-13 11:31:17 +02:00
UninterruptibleSleep ( std : : chrono : : milliseconds { 100 } ) ;
2017-04-24 14:43:38 +02:00
}
}
2020-04-15 00:13:51 +02:00
void BerkeleyDatabase : : Flush ( bool shutdown )
2017-04-24 14:43:38 +02:00
{
if ( ! IsDummy ( ) ) {
env - > Flush ( shutdown ) ;
2018-09-14 10:28:27 +02:00
if ( shutdown ) {
LOCK ( cs_db ) ;
g_dbenvs . erase ( env - > Directory ( ) . string ( ) ) ;
env = nullptr ;
2018-10-24 17:22:25 +02:00
} else {
// TODO: To avoid g_dbenvs.erase erasing the environment prematurely after the
// first database shutdown when multiple databases are open in the same
// environment, should replace raw database `env` pointers with shared or weak
// pointers, or else separate the database and environment shutdowns so
// environments can be shut down after databases.
env - > m_fileids . erase ( strFile ) ;
2018-09-14 10:28:27 +02:00
}
}
}
void BerkeleyDatabase : : ReloadDbEnv ( )
{
if ( ! IsDummy ( ) ) {
env - > ReloadDbEnv ( ) ;
2017-04-24 14:43:38 +02:00
}
}
2020-03-14 07:51:27 +01:00
std : : string BerkeleyDatabaseVersion ( )
{
return DbEnv : : version ( nullptr , nullptr , nullptr ) ;
}