2014-08-26 22:28:32 +02:00
// Copyright (c) 2009-2010 Satoshi Nakamoto
2016-11-07 12:51:36 +01:00
// Copyright (c) 2009-2015 The Bitcoin Core developers
2014-08-26 22:28:32 +02:00
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
2020-03-19 23:46:56 +01:00
# include <policy/fees.h>
# include <policy/policy.h>
2014-08-26 22:28:32 +02:00
2020-03-19 23:46:56 +01:00
# include <clientversion.h>
# include <primitives/transaction.h>
# include <streams.h>
# include <txmempool.h>
# include <util.h>
2014-08-26 22:28:32 +02:00
2017-05-17 22:02:50 +02:00
static constexpr double INF_FEERATE = 1e99 ;
2017-07-11 15:28:28 +02:00
std : : string StringForFeeEstimateHorizon ( FeeEstimateHorizon horizon ) {
static const std : : map < FeeEstimateHorizon , std : : string > horizon_strings = {
{ FeeEstimateHorizon : : SHORT_HALFLIFE , " short " } ,
{ FeeEstimateHorizon : : MED_HALFLIFE , " medium " } ,
{ FeeEstimateHorizon : : LONG_HALFLIFE , " long " } ,
} ;
auto horizon_string = horizon_strings . find ( horizon ) ;
if ( horizon_string = = horizon_strings . end ( ) ) return " unknown " ;
return horizon_string - > second ;
}
2017-06-15 13:56:16 +02:00
std : : string StringForFeeReason ( FeeReason reason ) {
static const std : : map < FeeReason , std : : string > fee_reason_strings = {
{ FeeReason : : NONE , " None " } ,
{ FeeReason : : HALF_ESTIMATE , " Half Target 60% Threshold " } ,
{ FeeReason : : FULL_ESTIMATE , " Target 85% Threshold " } ,
{ FeeReason : : DOUBLE_ESTIMATE , " Double Target 95% Threshold " } ,
{ FeeReason : : CONSERVATIVE , " Conservative Double Target longer horizon " } ,
{ FeeReason : : MEMPOOL_MIN , " Mempool Min Fee " } ,
{ FeeReason : : PAYTXFEE , " PayTxFee set " } ,
{ FeeReason : : FALLBACK , " Fallback fee " } ,
{ FeeReason : : REQUIRED , " Minimum Required Fee " } ,
{ FeeReason : : MAXTXFEE , " MaxTxFee limit " }
} ;
auto reason_string = fee_reason_strings . find ( reason ) ;
if ( reason_string = = fee_reason_strings . end ( ) ) return " Unknown " ;
return reason_string - > second ;
}
2017-07-11 11:57:24 +02:00
bool FeeModeFromString ( const std : : string & mode_string , FeeEstimateMode & fee_estimate_mode ) {
static const std : : map < std : : string , FeeEstimateMode > fee_modes = {
{ " UNSET " , FeeEstimateMode : : UNSET } ,
{ " ECONOMICAL " , FeeEstimateMode : : ECONOMICAL } ,
{ " CONSERVATIVE " , FeeEstimateMode : : CONSERVATIVE } ,
} ;
auto mode = fee_modes . find ( mode_string ) ;
if ( mode = = fee_modes . end ( ) ) return false ;
fee_estimate_mode = mode - > second ;
return true ;
}
2017-04-20 21:16:19 +02:00
/**
* We will instantiate an instance of this class to track transactions that were
* included in a block . We will lump transactions into a bucket according to their
* approximate feerate and then track how long it took for those txs to be included in a block
*
* The tracking of unconfirmed ( mempool ) transactions is completely independent of the
* historical tracking of transactions that have been confirmed in a block .
*/
class TxConfirmStats
{
private :
//Define the buckets we will group transactions into
2017-05-17 22:02:50 +02:00
const std : : vector < double > & buckets ; // The upper-bound of the range for the bucket (inclusive)
const std : : map < double , unsigned int > & bucketMap ; // Map of bucket upper-bound to index into all vectors by bucket
2017-04-20 21:16:19 +02:00
// For each bucket X:
// Count the total # of txs in each bucket
// Track the historical moving average of this total over blocks
std : : vector < double > txCtAvg ;
// Count the total # of txs confirmed within Y blocks in each bucket
// Track the historical moving average of theses totals over blocks
2017-05-17 22:02:50 +02:00
std : : vector < std : : vector < double > > confAvg ; // confAvg[Y][X]
// Track moving avg of txs which have been evicted from the mempool
// after failing to be confirmed within Y blocks
std : : vector < std : : vector < double > > failAvg ; // failAvg[Y][X]
2017-04-20 21:16:19 +02:00
// Sum the total feerate of all tx's in each bucket
// Track the historical moving average of this total over blocks
std : : vector < double > avg ;
// Combine the conf counts with tx counts to calculate the confirmation % for each Y,X
// Combine the total value with the tx counts to calculate the avg feerate per bucket
double decay ;
2017-05-17 22:02:50 +02:00
// Resolution (# of blocks) with which confirmations are tracked
unsigned int scale ;
2017-04-20 21:16:19 +02:00
// Mempool counts of outstanding transactions
// For each bucket X, track the number of transactions in the mempool
// that are unconfirmed for each possible confirmation value Y
std : : vector < std : : vector < int > > unconfTxs ; //unconfTxs[Y][X]
2017-05-17 22:02:50 +02:00
// transactions still unconfirmed after GetMaxConfirms for each bucket
2017-04-20 21:16:19 +02:00
std : : vector < int > oldUnconfTxs ;
2017-05-17 22:02:50 +02:00
void resizeInMemoryCounters ( size_t newbuckets ) ;
2017-04-20 21:16:19 +02:00
public :
/**
* Create new TxConfirmStats . This is called by BlockPolicyEstimator ' s
* constructor with default values .
* @ param defaultBuckets contains the upper limits for the bucket boundaries
2017-07-16 23:41:24 +02:00
* @ param maxPeriods max number of periods to track
2017-04-20 21:16:19 +02:00
* @ param decay how much to decay the historical moving average per block
*/
2017-05-17 22:02:50 +02:00
TxConfirmStats ( const std : : vector < double > & defaultBuckets , const std : : map < double , unsigned int > & defaultBucketMap ,
unsigned int maxPeriods , double decay , unsigned int scale ) ;
2017-04-20 21:16:19 +02:00
2017-05-17 22:02:50 +02:00
/** Roll the circular buffer for unconfirmed txs*/
2017-04-20 21:16:19 +02:00
void ClearCurrent ( unsigned int nBlockHeight ) ;
/**
* Record a new transaction data point in the current block stats
* @ param blocksToConfirm the number of blocks it took this transaction to confirm
* @ param val the feerate of the transaction
* @ warning blocksToConfirm is 1 - based and has to be > = 1
*/
void Record ( int blocksToConfirm , double val ) ;
/** Record a new transaction entering the mempool*/
unsigned int NewTx ( unsigned int nBlockHeight , double val ) ;
/** Remove a transaction from mempool tracking stats*/
void removeTx ( unsigned int entryHeight , unsigned int nBestSeenHeight ,
2017-05-17 22:02:50 +02:00
unsigned int bucketIndex , bool inBlock ) ;
2017-04-20 21:16:19 +02:00
/** Update our estimates by decaying our historical moving average and updating
with the data gathered from the current block */
void UpdateMovingAverages ( ) ;
/**
* Calculate a feerate estimate . Find the lowest value bucket ( or range of buckets
* to make sure we have enough data points ) whose transactions still have sufficient likelihood
* of being confirmed within the target number of confirmations
* @ param confTarget target number of confirmations
* @ param sufficientTxVal required average number of transactions per block in a bucket range
* @ param minSuccess the success probability we require
* @ param requireGreater return the lowest feerate such that all higher values pass minSuccess OR
* return the highest feerate such that all lower values fail minSuccess
* @ param nBlockHeight the current block height
*/
double EstimateMedianVal ( int confTarget , double sufficientTxVal ,
2017-05-17 22:02:50 +02:00
double minSuccess , bool requireGreater , unsigned int nBlockHeight ,
EstimationResult * result = nullptr ) const ;
2017-04-20 21:16:19 +02:00
/** Return the max number of confirms we're tracking */
2017-05-17 22:02:50 +02:00
unsigned int GetMaxConfirms ( ) const { return scale * confAvg . size ( ) ; }
2017-04-20 21:16:19 +02:00
/** Write state of estimation data to a file*/
void Write ( CAutoFile & fileout ) const ;
/**
* Read saved state of estimation data from a file and replace all internal data structures and
* variables with this state .
*/
2017-05-17 22:02:50 +02:00
void Read ( CAutoFile & filein , int nFileVersion , size_t numBuckets ) ;
2017-04-20 21:16:19 +02:00
} ;
TxConfirmStats : : TxConfirmStats ( const std : : vector < double > & defaultBuckets ,
2017-05-17 22:02:50 +02:00
const std : : map < double , unsigned int > & defaultBucketMap ,
unsigned int maxPeriods , double _decay , unsigned int _scale )
: buckets ( defaultBuckets ) , bucketMap ( defaultBucketMap )
2014-08-26 22:28:32 +02:00
{
decay = _decay ;
2017-10-15 01:55:54 +02:00
assert ( _scale ! = 0 & & " _scale must be non-zero " ) ;
2017-05-17 22:02:50 +02:00
scale = _scale ;
confAvg . resize ( maxPeriods ) ;
for ( unsigned int i = 0 ; i < maxPeriods ; i + + ) {
2014-08-26 22:28:32 +02:00
confAvg [ i ] . resize ( buckets . size ( ) ) ;
2017-05-17 22:02:50 +02:00
}
failAvg . resize ( maxPeriods ) ;
for ( unsigned int i = 0 ; i < maxPeriods ; i + + ) {
failAvg [ i ] . resize ( buckets . size ( ) ) ;
2014-08-26 22:28:32 +02:00
}
txCtAvg . resize ( buckets . size ( ) ) ;
avg . resize ( buckets . size ( ) ) ;
2017-05-17 22:02:50 +02:00
resizeInMemoryCounters ( buckets . size ( ) ) ;
2014-08-26 22:28:32 +02:00
}
2017-05-17 22:02:50 +02:00
void TxConfirmStats : : resizeInMemoryCounters ( size_t newbuckets ) {
// newbuckets must be passed in because the buckets referred to during Read have not been updated yet.
unconfTxs . resize ( GetMaxConfirms ( ) ) ;
for ( unsigned int i = 0 ; i < unconfTxs . size ( ) ; i + + ) {
unconfTxs [ i ] . resize ( newbuckets ) ;
}
oldUnconfTxs . resize ( newbuckets ) ;
}
// Roll the unconfirmed txs circular buffer
2014-08-26 22:28:32 +02:00
void TxConfirmStats : : ClearCurrent ( unsigned int nBlockHeight )
{
for ( unsigned int j = 0 ; j < buckets . size ( ) ; j + + ) {
oldUnconfTxs [ j ] + = unconfTxs [ nBlockHeight % unconfTxs . size ( ) ] [ j ] ;
unconfTxs [ nBlockHeight % unconfTxs . size ( ) ] [ j ] = 0 ;
}
}
void TxConfirmStats : : Record ( int blocksToConfirm , double val )
{
// blocksToConfirm is 1-based
if ( blocksToConfirm < 1 )
return ;
2017-05-17 22:02:50 +02:00
int periodsToConfirm = ( blocksToConfirm + scale - 1 ) / scale ;
2014-08-26 22:28:32 +02:00
unsigned int bucketindex = bucketMap . lower_bound ( val ) - > second ;
2017-05-17 22:02:50 +02:00
for ( size_t i = periodsToConfirm ; i < = confAvg . size ( ) ; i + + ) {
confAvg [ i - 1 ] [ bucketindex ] + + ;
2014-08-26 22:28:32 +02:00
}
2017-05-17 22:02:50 +02:00
txCtAvg [ bucketindex ] + + ;
avg [ bucketindex ] + = val ;
2014-08-26 22:28:32 +02:00
}
void TxConfirmStats : : UpdateMovingAverages ( )
{
for ( unsigned int j = 0 ; j < buckets . size ( ) ; j + + ) {
for ( unsigned int i = 0 ; i < confAvg . size ( ) ; i + + )
2017-05-17 22:02:50 +02:00
confAvg [ i ] [ j ] = confAvg [ i ] [ j ] * decay ;
for ( unsigned int i = 0 ; i < failAvg . size ( ) ; i + + )
failAvg [ i ] [ j ] = failAvg [ i ] [ j ] * decay ;
avg [ j ] = avg [ j ] * decay ;
txCtAvg [ j ] = txCtAvg [ j ] * decay ;
2014-08-26 22:28:32 +02:00
}
}
// returns -1 on error conditions
double TxConfirmStats : : EstimateMedianVal ( int confTarget , double sufficientTxVal ,
double successBreakPoint , bool requireGreater ,
2017-05-17 22:02:50 +02:00
unsigned int nBlockHeight , EstimationResult * result ) const
2014-08-26 22:28:32 +02:00
{
// Counters for a bucket (or range of buckets)
double nConf = 0 ; // Number of tx's confirmed within the confTarget
double totalNum = 0 ; // Total number of tx's that were ever confirmed
int extraNum = 0 ; // Number of tx's still in mempool for confTarget or longer
2017-05-17 22:02:50 +02:00
double failNum = 0 ; // Number of tx's that were never confirmed but removed from the mempool after confTarget
int periodTarget = ( confTarget + scale - 1 ) / scale ;
2014-08-26 22:28:32 +02:00
int maxbucketindex = buckets . size ( ) - 1 ;
2016-11-07 13:23:20 +01:00
// requireGreater means we are looking for the lowest feerate such that all higher
// values pass, so we start at maxbucketindex (highest feerate) and look at successively
2014-08-26 22:28:32 +02:00
// smaller buckets until we reach failure. Otherwise, we are looking for the highest
2016-11-07 13:23:20 +01:00
// feerate such that all lower values fail, and we go in the opposite direction.
2014-08-26 22:28:32 +02:00
unsigned int startbucket = requireGreater ? maxbucketindex : 0 ;
int step = requireGreater ? - 1 : 1 ;
// We'll combine buckets until we have enough samples.
// The near and far variables will define the range we've combined
// The best variables are the last range we saw which still had a high
// enough confirmation rate to count as success.
// The cur variables are the current range we're counting.
unsigned int curNearBucket = startbucket ;
unsigned int bestNearBucket = startbucket ;
unsigned int curFarBucket = startbucket ;
unsigned int bestFarBucket = startbucket ;
bool foundAnswer = false ;
unsigned int bins = unconfTxs . size ( ) ;
2017-05-17 22:02:50 +02:00
bool newBucketRange = true ;
bool passing = true ;
EstimatorBucket passBucket ;
EstimatorBucket failBucket ;
2014-08-26 22:28:32 +02:00
2016-11-07 13:23:20 +01:00
// Start counting from highest(default) or lowest feerate transactions
2014-08-26 22:28:32 +02:00
for ( int bucket = startbucket ; bucket > = 0 & & bucket < = maxbucketindex ; bucket + = step ) {
2017-05-17 22:02:50 +02:00
if ( newBucketRange ) {
curNearBucket = bucket ;
newBucketRange = false ;
}
2014-08-26 22:28:32 +02:00
curFarBucket = bucket ;
2017-05-17 22:02:50 +02:00
nConf + = confAvg [ periodTarget - 1 ] [ bucket ] ;
2014-08-26 22:28:32 +02:00
totalNum + = txCtAvg [ bucket ] ;
2017-05-17 22:02:50 +02:00
failNum + = failAvg [ periodTarget - 1 ] [ bucket ] ;
2014-08-26 22:28:32 +02:00
for ( unsigned int confct = confTarget ; confct < GetMaxConfirms ( ) ; confct + + )
extraNum + = unconfTxs [ ( nBlockHeight - confct ) % bins ] [ bucket ] ;
extraNum + = oldUnconfTxs [ bucket ] ;
// If we have enough transaction data points in this range of buckets,
// we can test for success
// (Only count the confirmed data points, so that each confirmation count
// will be looking at the same amount of data and same bucket breaks)
if ( totalNum > = sufficientTxVal / ( 1 - decay ) ) {
2017-05-17 22:02:50 +02:00
double curPct = nConf / ( totalNum + failNum + extraNum ) ;
2014-08-26 22:28:32 +02:00
// Check to see if we are no longer getting confirmed at the success rate
2017-05-17 22:02:50 +02:00
if ( ( requireGreater & & curPct < successBreakPoint ) | | ( ! requireGreater & & curPct > successBreakPoint ) ) {
if ( passing = = true ) {
// First time we hit a failure record the failed bucket
unsigned int failMinBucket = std : : min ( curNearBucket , curFarBucket ) ;
unsigned int failMaxBucket = std : : max ( curNearBucket , curFarBucket ) ;
failBucket . start = failMinBucket ? buckets [ failMinBucket - 1 ] : 0 ;
failBucket . end = buckets [ failMaxBucket ] ;
failBucket . withinTarget = nConf ;
failBucket . totalConfirmed = totalNum ;
failBucket . inMempool = extraNum ;
failBucket . leftMempool = failNum ;
passing = false ;
}
continue ;
}
2014-08-26 22:28:32 +02:00
// Otherwise update the cumulative stats, and the bucket variables
// and reset the counters
else {
2017-05-17 22:02:50 +02:00
failBucket = EstimatorBucket ( ) ; // Reset any failed bucket, currently passing
2014-08-26 22:28:32 +02:00
foundAnswer = true ;
2017-05-17 22:02:50 +02:00
passing = true ;
passBucket . withinTarget = nConf ;
2014-08-26 22:28:32 +02:00
nConf = 0 ;
2017-05-17 22:02:50 +02:00
passBucket . totalConfirmed = totalNum ;
2014-08-26 22:28:32 +02:00
totalNum = 0 ;
2017-05-17 22:02:50 +02:00
passBucket . inMempool = extraNum ;
passBucket . leftMempool = failNum ;
failNum = 0 ;
2014-08-26 22:28:32 +02:00
extraNum = 0 ;
bestNearBucket = curNearBucket ;
bestFarBucket = curFarBucket ;
2017-05-17 22:02:50 +02:00
newBucketRange = true ;
2014-08-26 22:28:32 +02:00
}
}
}
double median = - 1 ;
double txSum = 0 ;
2016-11-07 13:23:20 +01:00
// Calculate the "average" feerate of the best bucket range that met success conditions
// Find the bucket with the median transaction and then report the average feerate from that bucket
2014-08-26 22:28:32 +02:00
// This is a compromise between finding the median which we can't since we don't save all tx's
// and reporting the average which is less accurate
2017-05-17 22:02:50 +02:00
unsigned int minBucket = std : : min ( bestNearBucket , bestFarBucket ) ;
unsigned int maxBucket = std : : max ( bestNearBucket , bestFarBucket ) ;
2014-08-26 22:28:32 +02:00
for ( unsigned int j = minBucket ; j < = maxBucket ; j + + ) {
txSum + = txCtAvg [ j ] ;
}
if ( foundAnswer & & txSum ! = 0 ) {
txSum = txSum / 2 ;
for ( unsigned int j = minBucket ; j < = maxBucket ; j + + ) {
if ( txCtAvg [ j ] < txSum )
txSum - = txCtAvg [ j ] ;
else { // we're in the right bucket
median = avg [ j ] / txCtAvg [ j ] ;
break ;
}
}
2017-05-17 22:02:50 +02:00
passBucket . start = minBucket ? buckets [ minBucket - 1 ] : 0 ;
passBucket . end = buckets [ maxBucket ] ;
2014-08-26 22:28:32 +02:00
}
2017-05-17 22:02:50 +02:00
// If we were passing until we reached last few buckets with insufficient data, then report those as failed
if ( passing & & ! newBucketRange ) {
unsigned int failMinBucket = std : : min ( curNearBucket , curFarBucket ) ;
unsigned int failMaxBucket = std : : max ( curNearBucket , curFarBucket ) ;
failBucket . start = failMinBucket ? buckets [ failMinBucket - 1 ] : 0 ;
failBucket . end = buckets [ failMaxBucket ] ;
failBucket . withinTarget = nConf ;
failBucket . totalConfirmed = totalNum ;
failBucket . inMempool = extraNum ;
failBucket . leftMempool = failNum ;
}
2014-08-26 22:28:32 +02:00
2017-05-17 22:02:50 +02:00
LogPrint ( BCLog : : ESTIMATEFEE , " FeeEst: %d %s%.0f%% decay %.5f: feerate: %g from (%g - %g) %.2f%% %.1f/(%.1f %d mem %.1f out) Fail: (%g - %g) %.2f%% %.1f/(%.1f %d mem %.1f out) \n " ,
confTarget , requireGreater ? " > " : " < " , 100.0 * successBreakPoint , decay ,
median , passBucket . start , passBucket . end ,
100 * passBucket . withinTarget / ( passBucket . totalConfirmed + passBucket . inMempool + passBucket . leftMempool ) ,
passBucket . withinTarget , passBucket . totalConfirmed , passBucket . inMempool , passBucket . leftMempool ,
failBucket . start , failBucket . end ,
100 * failBucket . withinTarget / ( failBucket . totalConfirmed + failBucket . inMempool + failBucket . leftMempool ) ,
failBucket . withinTarget , failBucket . totalConfirmed , failBucket . inMempool , failBucket . leftMempool ) ;
if ( result ) {
result - > pass = passBucket ;
result - > fail = failBucket ;
result - > decay = decay ;
result - > scale = scale ;
}
2014-08-26 22:28:32 +02:00
return median ;
}
2017-04-20 21:16:19 +02:00
void TxConfirmStats : : Write ( CAutoFile & fileout ) const
2014-08-26 22:28:32 +02:00
{
fileout < < decay ;
2017-05-17 22:02:50 +02:00
fileout < < scale ;
2014-08-26 22:28:32 +02:00
fileout < < avg ;
fileout < < txCtAvg ;
fileout < < confAvg ;
2017-05-17 22:02:50 +02:00
fileout < < failAvg ;
2014-08-26 22:28:32 +02:00
}
2017-05-17 22:02:50 +02:00
void TxConfirmStats : : Read ( CAutoFile & filein , int nFileVersion , size_t numBuckets )
2014-08-26 22:28:32 +02:00
{
2017-05-17 22:02:50 +02:00
// Read data file and do some very basic sanity checking
// buckets and bucketMap are not updated yet, so don't access them
// If there is a read failure, we'll just discard this entire object anyway
size_t maxConfirms , maxPeriods ;
// The current version will store the decay with each individual TxConfirmStats and also keep a scale factor
2017-12-20 10:40:57 +01:00
filein > > decay ;
if ( decay < = 0 | | decay > = 1 ) {
throw std : : runtime_error ( " Corrupt estimates file. Decay must be between 0 and 1 (non-inclusive) " ) ;
}
filein > > scale ;
if ( scale = = 0 ) {
throw std : : runtime_error ( " Corrupt estimates file. Scale must be non-zero " ) ;
2017-05-17 22:02:50 +02:00
}
filein > > avg ;
if ( avg . size ( ) ! = numBuckets ) {
2016-11-07 13:23:20 +01:00
throw std : : runtime_error ( " Corrupt estimates file. Mismatch in feerate average bucket count " ) ;
2017-05-17 22:02:50 +02:00
}
filein > > txCtAvg ;
if ( txCtAvg . size ( ) ! = numBuckets ) {
2014-08-26 22:28:32 +02:00
throw std : : runtime_error ( " Corrupt estimates file. Mismatch in tx count bucket count " ) ;
}
2017-05-17 22:02:50 +02:00
filein > > confAvg ;
maxPeriods = confAvg . size ( ) ;
maxConfirms = scale * maxPeriods ;
2014-08-26 22:28:32 +02:00
2017-05-17 22:02:50 +02:00
if ( maxConfirms < = 0 | | maxConfirms > 6 * 24 * 7 ) { // one week
throw std : : runtime_error ( " Corrupt estimates file. Must maintain estimates for between 1 and 1008 (one week) confirms " ) ;
}
for ( unsigned int i = 0 ; i < maxPeriods ; i + + ) {
if ( confAvg [ i ] . size ( ) ! = numBuckets ) {
throw std : : runtime_error ( " Corrupt estimates file. Mismatch in feerate conf average bucket count " ) ;
}
2014-08-26 22:28:32 +02:00
}
2017-12-20 10:40:57 +01:00
filein > > failAvg ;
if ( maxPeriods ! = failAvg . size ( ) ) {
throw std : : runtime_error ( " Corrupt estimates file. Mismatch in confirms tracked for failures " ) ;
}
for ( unsigned int i = 0 ; i < maxPeriods ; i + + ) {
if ( failAvg [ i ] . size ( ) ! = numBuckets ) {
throw std : : runtime_error ( " Corrupt estimates file. Mismatch in one of failure average bucket counts " ) ;
2017-05-17 22:02:50 +02:00
}
2014-08-26 22:28:32 +02:00
}
2017-05-17 22:02:50 +02:00
// Resize the current block variables which aren't stored in the data file
// to match the number of confirms and buckets
resizeInMemoryCounters ( numBuckets ) ;
2014-08-26 22:28:32 +02:00
2019-05-22 23:51:39 +02:00
LogPrint ( BCLog : : ESTIMATEFEE , " Reading estimates: %u buckets counting confirms up to %u blocks \n " ,
2016-11-07 13:23:20 +01:00
numBuckets , maxConfirms ) ;
2014-08-26 22:28:32 +02:00
}
unsigned int TxConfirmStats : : NewTx ( unsigned int nBlockHeight , double val )
{
unsigned int bucketindex = bucketMap . lower_bound ( val ) - > second ;
unsigned int blockIndex = nBlockHeight % unconfTxs . size ( ) ;
unconfTxs [ blockIndex ] [ bucketindex ] + + ;
return bucketindex ;
}
2017-05-17 22:02:50 +02:00
void TxConfirmStats : : removeTx ( unsigned int entryHeight , unsigned int nBestSeenHeight , unsigned int bucketindex , bool inBlock )
2014-08-26 22:28:32 +02:00
{
//nBestSeenHeight is not updated yet for the new block
int blocksAgo = nBestSeenHeight - entryHeight ;
if ( nBestSeenHeight = = 0 ) // the BlockPolicyEstimator hasn't seen any blocks yet
blocksAgo = 0 ;
if ( blocksAgo < 0 ) {
2019-05-22 23:51:39 +02:00
LogPrint ( BCLog : : ESTIMATEFEE , " Blockpolicy error, blocks ago is negative for mempool tx \n " ) ;
2015-08-09 01:17:27 +02:00
return ; //This can't happen because we call this with our best seen height, no entries can have higher
2014-08-26 22:28:32 +02:00
}
if ( blocksAgo > = ( int ) unconfTxs . size ( ) ) {
2019-05-22 23:51:39 +02:00
if ( oldUnconfTxs [ bucketindex ] > 0 ) {
2014-08-26 22:28:32 +02:00
oldUnconfTxs [ bucketindex ] - - ;
2019-05-22 23:51:39 +02:00
} else {
LogPrint ( BCLog : : ESTIMATEFEE , " Blockpolicy error, mempool tx removed from >25 blocks,bucketIndex=%u already \n " ,
2014-08-26 22:28:32 +02:00
bucketindex ) ;
2019-05-22 23:51:39 +02:00
}
2014-08-26 22:28:32 +02:00
}
else {
unsigned int blockIndex = entryHeight % unconfTxs . size ( ) ;
2019-05-22 23:51:39 +02:00
if ( unconfTxs [ blockIndex ] [ bucketindex ] > 0 ) {
2014-08-26 22:28:32 +02:00
unconfTxs [ blockIndex ] [ bucketindex ] - - ;
2019-05-22 23:51:39 +02:00
} else {
LogPrint ( BCLog : : ESTIMATEFEE , " Blockpolicy error, mempool tx removed from blockIndex=%u,bucketIndex=%u already \n " ,
2014-08-26 22:28:32 +02:00
blockIndex , bucketindex ) ;
2019-05-22 23:51:39 +02:00
}
2014-08-26 22:28:32 +02:00
}
2017-05-17 22:02:50 +02:00
if ( ! inBlock & & ( unsigned int ) blocksAgo > = scale ) { // Only counts as a failure if not confirmed for entire period
2017-10-12 13:39:56 +02:00
assert ( scale ! = 0 ) ;
2017-05-17 22:02:50 +02:00
unsigned int periodsAgo = blocksAgo / scale ;
for ( size_t i = 0 ; i < periodsAgo & & i < failAvg . size ( ) ; i + + ) {
failAvg [ i ] [ bucketindex ] + + ;
}
}
2014-08-26 22:28:32 +02:00
}
2017-01-05 23:14:23 +01:00
// This function is called from CTxMemPool::removeUnchecked to ensure
// txs removed from the mempool for any reason are no longer
// tracked. Txs that were part of a block have already been removed in
// processBlockTx to ensure they are never double tracked, but it is
// of no harm to try to remove them again.
2017-05-17 22:02:50 +02:00
bool CBlockPolicyEstimator : : removeTx ( uint256 hash , bool inBlock )
2014-08-26 22:28:32 +02:00
{
2017-04-20 21:16:19 +02:00
LOCK ( cs_feeEstimator ) ;
2014-08-26 22:28:32 +02:00
std : : map < uint256 , TxStatsInfo > : : iterator pos = mapMemPoolTxs . find ( hash ) ;
2017-01-05 23:14:23 +01:00
if ( pos ! = mapMemPoolTxs . end ( ) ) {
2017-05-17 22:02:50 +02:00
feeStats - > removeTx ( pos - > second . blockHeight , nBestSeenHeight , pos - > second . bucketIndex , inBlock ) ;
shortStats - > removeTx ( pos - > second . blockHeight , nBestSeenHeight , pos - > second . bucketIndex , inBlock ) ;
longStats - > removeTx ( pos - > second . blockHeight , nBestSeenHeight , pos - > second . bucketIndex , inBlock ) ;
2017-01-05 23:14:23 +01:00
mapMemPoolTxs . erase ( hash ) ;
return true ;
} else {
return false ;
2014-08-26 22:28:32 +02:00
}
}
2017-03-07 19:38:49 +01:00
CBlockPolicyEstimator : : CBlockPolicyEstimator ( )
2017-05-17 22:02:50 +02:00
: nBestSeenHeight ( 0 ) , firstRecordedHeight ( 0 ) , historicalFirst ( 0 ) , historicalBest ( 0 ) , trackedTxs ( 0 ) , untrackedTxs ( 0 )
2014-08-26 22:28:32 +02:00
{
2017-03-07 19:38:49 +01:00
static_assert ( MIN_BUCKET_FEERATE > 0 , " Min feerate must be nonzero " ) ;
2017-05-17 22:02:50 +02:00
size_t bucketIndex = 0 ;
for ( double bucketBoundary = MIN_BUCKET_FEERATE ; bucketBoundary < = MAX_BUCKET_FEERATE ; bucketBoundary * = FEE_SPACING , bucketIndex + + ) {
buckets . push_back ( bucketBoundary ) ;
bucketMap [ bucketBoundary ] = bucketIndex ;
2014-08-26 22:28:32 +02:00
}
2017-05-17 22:02:50 +02:00
buckets . push_back ( INF_FEERATE ) ;
bucketMap [ INF_FEERATE ] = bucketIndex ;
assert ( bucketMap . size ( ) = = buckets . size ( ) ) ;
2017-11-09 21:22:08 +01:00
feeStats = std : : unique_ptr < TxConfirmStats > ( new TxConfirmStats ( buckets , bucketMap , MED_BLOCK_PERIODS , MED_DECAY , MED_SCALE ) ) ;
shortStats = std : : unique_ptr < TxConfirmStats > ( new TxConfirmStats ( buckets , bucketMap , SHORT_BLOCK_PERIODS , SHORT_DECAY , SHORT_SCALE ) ) ;
longStats = std : : unique_ptr < TxConfirmStats > ( new TxConfirmStats ( buckets , bucketMap , LONG_BLOCK_PERIODS , LONG_DECAY , LONG_SCALE ) ) ;
2017-04-20 21:16:19 +02:00
}
CBlockPolicyEstimator : : ~ CBlockPolicyEstimator ( )
{
2014-08-26 22:28:32 +02:00
}
2017-01-05 23:14:23 +01:00
void CBlockPolicyEstimator : : processTransaction ( const CTxMemPoolEntry & entry , bool validFeeEstimate )
2014-08-26 22:28:32 +02:00
{
2017-04-20 21:16:19 +02:00
LOCK ( cs_feeEstimator ) ;
2014-08-26 22:28:32 +02:00
unsigned int txHeight = entry . GetHeight ( ) ;
uint256 hash = entry . GetTx ( ) . GetHash ( ) ;
2016-11-07 13:23:20 +01:00
if ( mapMemPoolTxs . count ( hash ) ) {
2019-05-22 23:51:39 +02:00
LogPrint ( BCLog : : ESTIMATEFEE , " Blockpolicy error mempool tx %s already being tracked \n " , hash . ToString ( ) ) ;
2016-03-04 06:58:53 +01:00
return ;
2014-08-26 22:28:32 +02:00
}
2017-01-05 23:14:23 +01:00
if ( txHeight ! = nBestSeenHeight ) {
2014-08-26 22:28:32 +02:00
// Ignore side chains and re-orgs; assuming they are random they don't
// affect the estimate. We'll potentially double count transactions in 1-block reorgs.
2017-01-05 23:14:23 +01:00
// Ignore txs if BlockPolicyEstimator is not in sync with chainActive.Tip().
// It will be synced next time a block is processed.
2014-08-26 22:28:32 +02:00
return ;
}
// Only want to be updating estimates when our blockchain is synced,
// otherwise we'll miscalculate how many blocks its taking to get included.
2017-01-05 23:14:23 +01:00
if ( ! validFeeEstimate ) {
untrackedTxs + + ;
2014-08-26 22:28:32 +02:00
return ;
}
2017-01-05 23:14:23 +01:00
trackedTxs + + ;
2014-08-26 22:28:32 +02:00
2016-11-07 13:23:20 +01:00
// Feerates are stored and reported as BTC-per-kb:
2014-08-26 22:28:32 +02:00
CFeeRate feeRate ( entry . GetFee ( ) , entry . GetTxSize ( ) ) ;
mapMemPoolTxs [ hash ] . blockHeight = txHeight ;
2017-05-17 22:02:50 +02:00
unsigned int bucketIndex = feeStats - > NewTx ( txHeight , ( double ) feeRate . GetFeePerK ( ) ) ;
mapMemPoolTxs [ hash ] . bucketIndex = bucketIndex ;
unsigned int bucketIndex2 = shortStats - > NewTx ( txHeight , ( double ) feeRate . GetFeePerK ( ) ) ;
assert ( bucketIndex = = bucketIndex2 ) ;
unsigned int bucketIndex3 = longStats - > NewTx ( txHeight , ( double ) feeRate . GetFeePerK ( ) ) ;
assert ( bucketIndex = = bucketIndex3 ) ;
2014-08-26 22:28:32 +02:00
}
2017-01-05 23:14:23 +01:00
bool CBlockPolicyEstimator : : processBlockTx ( unsigned int nBlockHeight , const CTxMemPoolEntry * entry )
2014-08-26 22:28:32 +02:00
{
2017-05-17 22:02:50 +02:00
if ( ! removeTx ( entry - > GetTx ( ) . GetHash ( ) , true ) ) {
2017-01-05 23:14:23 +01:00
// This transaction wasn't being tracked for fee estimation
return false ;
2014-08-26 22:28:32 +02:00
}
// How many blocks did it take for miners to include this transaction?
// blocksToConfirm is 1-based, so a transaction included in the earliest
// possible block has confirmation count of 1
2017-01-05 23:14:23 +01:00
int blocksToConfirm = nBlockHeight - entry - > GetHeight ( ) ;
2014-08-26 22:28:32 +02:00
if ( blocksToConfirm < = 0 ) {
// This can't happen because we don't process transactions from a block with a height
// lower than our greatest seen height
2019-05-22 23:51:39 +02:00
LogPrint ( BCLog : : ESTIMATEFEE , " Blockpolicy error Transaction had negative blocksToConfirm \n " ) ;
2017-01-05 23:14:23 +01:00
return false ;
2014-08-26 22:28:32 +02:00
}
2016-11-07 13:23:20 +01:00
// Feerates are stored and reported as BTC-per-kb:
2017-01-05 23:14:23 +01:00
CFeeRate feeRate ( entry - > GetFee ( ) , entry - > GetTxSize ( ) ) ;
2014-08-26 22:28:32 +02:00
2017-04-20 21:16:19 +02:00
feeStats - > Record ( blocksToConfirm , ( double ) feeRate . GetFeePerK ( ) ) ;
2017-05-17 22:02:50 +02:00
shortStats - > Record ( blocksToConfirm , ( double ) feeRate . GetFeePerK ( ) ) ;
longStats - > Record ( blocksToConfirm , ( double ) feeRate . GetFeePerK ( ) ) ;
2017-01-05 23:14:23 +01:00
return true ;
2014-08-26 22:28:32 +02:00
}
void CBlockPolicyEstimator : : processBlock ( unsigned int nBlockHeight ,
2017-01-05 23:14:23 +01:00
std : : vector < const CTxMemPoolEntry * > & entries )
2014-08-26 22:28:32 +02:00
{
2017-04-20 21:16:19 +02:00
LOCK ( cs_feeEstimator ) ;
2014-08-26 22:28:32 +02:00
if ( nBlockHeight < = nBestSeenHeight ) {
// Ignore side chains and re-orgs; assuming they are random
// they don't affect the estimate.
// And if an attacker can re-org the chain at will, then
// you've got much bigger problems than "attacker can influence
// transaction fees."
return ;
}
2017-01-05 23:14:23 +01:00
// Must update nBestSeenHeight in sync with ClearCurrent so that
// calls to removeTx (via processBlockTx) correctly calculate age
// of unconfirmed txs to remove from tracking.
nBestSeenHeight = nBlockHeight ;
2014-08-26 22:28:32 +02:00
2017-05-17 22:02:50 +02:00
// Update unconfirmed circular buffer
2017-04-20 21:16:19 +02:00
feeStats - > ClearCurrent ( nBlockHeight ) ;
2017-05-17 22:02:50 +02:00
shortStats - > ClearCurrent ( nBlockHeight ) ;
longStats - > ClearCurrent ( nBlockHeight ) ;
// Decay all exponential averages
feeStats - > UpdateMovingAverages ( ) ;
shortStats - > UpdateMovingAverages ( ) ;
longStats - > UpdateMovingAverages ( ) ;
2014-08-26 22:28:32 +02:00
2017-01-05 23:14:23 +01:00
unsigned int countedTxs = 0 ;
2017-05-17 22:02:50 +02:00
// Update averages with data points from current block
2017-05-23 22:58:35 +02:00
for ( const auto & entry : entries ) {
if ( processBlockTx ( nBlockHeight , entry ) )
2017-01-05 23:14:23 +01:00
countedTxs + + ;
}
2014-08-26 22:28:32 +02:00
2017-05-17 22:02:50 +02:00
if ( firstRecordedHeight = = 0 & & countedTxs > 0 ) {
firstRecordedHeight = nBestSeenHeight ;
LogPrint ( BCLog : : ESTIMATEFEE , " Blockpolicy first recorded height %u \n " , firstRecordedHeight ) ;
}
2014-08-26 22:28:32 +02:00
2017-05-17 22:02:50 +02:00
LogPrint ( BCLog : : ESTIMATEFEE , " Blockpolicy estimates updated by %u of %u block txs, since last block %u of %u tracked, mempool map size %u, max target %u from %s \n " ,
countedTxs , entries . size ( ) , trackedTxs , trackedTxs + untrackedTxs , mapMemPoolTxs . size ( ) ,
MaxUsableEstimate ( ) , HistoricalBlockSpan ( ) > BlockSpan ( ) ? " historical " : " current " ) ;
2017-01-05 23:14:23 +01:00
trackedTxs = 0 ;
untrackedTxs = 0 ;
2014-08-26 22:28:32 +02:00
}
2017-04-20 21:16:19 +02:00
CFeeRate CBlockPolicyEstimator : : estimateFee ( int confTarget ) const
2014-08-26 22:28:32 +02:00
{
2017-05-17 22:02:50 +02:00
// It's not possible to get reasonable estimates for confTarget of 1
if ( confTarget < = 1 )
return CFeeRate ( 0 ) ;
return estimateRawFee ( confTarget , DOUBLE_SUCCESS_PCT , FeeEstimateHorizon : : MED_HALFLIFE ) ;
}
CFeeRate CBlockPolicyEstimator : : estimateRawFee ( int confTarget , double successThreshold , FeeEstimateHorizon horizon , EstimationResult * result ) const
{
TxConfirmStats * stats ;
double sufficientTxs = SUFFICIENT_FEETXS ;
switch ( horizon ) {
case FeeEstimateHorizon : : SHORT_HALFLIFE : {
2017-11-09 21:22:08 +01:00
stats = shortStats . get ( ) ;
2017-05-17 22:02:50 +02:00
sufficientTxs = SUFFICIENT_TXS_SHORT ;
break ;
}
case FeeEstimateHorizon : : MED_HALFLIFE : {
2017-11-09 21:22:08 +01:00
stats = feeStats . get ( ) ;
2017-05-17 22:02:50 +02:00
break ;
}
case FeeEstimateHorizon : : LONG_HALFLIFE : {
2017-11-09 21:22:08 +01:00
stats = longStats . get ( ) ;
2017-05-17 22:02:50 +02:00
break ;
}
default : {
2017-07-15 19:58:23 +02:00
throw std : : out_of_range ( " CBlockPolicyEstimator::estimateRawFee unknown FeeEstimateHorizon " ) ;
2017-05-17 22:02:50 +02:00
}
}
2017-04-20 21:16:19 +02:00
LOCK ( cs_feeEstimator ) ;
2014-08-26 22:28:32 +02:00
// Return failure if trying to analyze a target we're not tracking
2017-05-17 22:02:50 +02:00
if ( confTarget < = 0 | | ( unsigned int ) confTarget > stats - > GetMaxConfirms ( ) )
return CFeeRate ( 0 ) ;
if ( successThreshold > 1 )
2014-08-26 22:28:32 +02:00
return CFeeRate ( 0 ) ;
2017-05-17 22:02:50 +02:00
double median = stats - > EstimateMedianVal ( confTarget , sufficientTxs , successThreshold , true , nBestSeenHeight , result ) ;
2014-08-26 22:28:32 +02:00
if ( median < 0 )
return CFeeRate ( 0 ) ;
2017-09-30 18:07:44 +02:00
return CFeeRate ( llround ( median ) ) ;
2014-08-26 22:28:32 +02:00
}
2017-07-11 15:28:28 +02:00
unsigned int CBlockPolicyEstimator : : HighestTargetTracked ( FeeEstimateHorizon horizon ) const
{
switch ( horizon ) {
case FeeEstimateHorizon : : SHORT_HALFLIFE : {
return shortStats - > GetMaxConfirms ( ) ;
}
case FeeEstimateHorizon : : MED_HALFLIFE : {
return feeStats - > GetMaxConfirms ( ) ;
}
case FeeEstimateHorizon : : LONG_HALFLIFE : {
return longStats - > GetMaxConfirms ( ) ;
}
default : {
2017-07-15 19:58:23 +02:00
throw std : : out_of_range ( " CBlockPolicyEstimator::HighestTargetTracked unknown FeeEstimateHorizon " ) ;
2017-07-11 15:28:28 +02:00
}
}
}
2017-05-17 22:02:50 +02:00
unsigned int CBlockPolicyEstimator : : BlockSpan ( ) const
{
if ( firstRecordedHeight = = 0 ) return 0 ;
assert ( nBestSeenHeight > = firstRecordedHeight ) ;
return nBestSeenHeight - firstRecordedHeight ;
}
unsigned int CBlockPolicyEstimator : : HistoricalBlockSpan ( ) const
{
if ( historicalFirst = = 0 ) return 0 ;
assert ( historicalBest > = historicalFirst ) ;
if ( nBestSeenHeight - historicalBest > OLDEST_ESTIMATE_HISTORY ) return 0 ;
return historicalBest - historicalFirst ;
}
unsigned int CBlockPolicyEstimator : : MaxUsableEstimate ( ) const
{
// Block spans are divided by 2 to make sure there are enough potential failing data points for the estimate
return std : : min ( longStats - > GetMaxConfirms ( ) , std : : max ( BlockSpan ( ) , HistoricalBlockSpan ( ) ) / 2 ) ;
}
/** Return a fee estimate at the required successThreshold from the shortest
* time horizon which tracks confirmations up to the desired target . If
* checkShorterHorizon is requested , also allow short time horizon estimates
* for a lower target to reduce the given answer */
2017-06-15 13:56:16 +02:00
double CBlockPolicyEstimator : : estimateCombinedFee ( unsigned int confTarget , double successThreshold , bool checkShorterHorizon , EstimationResult * result ) const
2017-05-17 22:02:50 +02:00
{
double estimate = - 1 ;
if ( confTarget > = 1 & & confTarget < = longStats - > GetMaxConfirms ( ) ) {
// Find estimate from shortest time horizon possible
if ( confTarget < = shortStats - > GetMaxConfirms ( ) ) { // short horizon
2017-06-15 13:56:16 +02:00
estimate = shortStats - > EstimateMedianVal ( confTarget , SUFFICIENT_TXS_SHORT , successThreshold , true , nBestSeenHeight , result ) ;
2017-05-17 22:02:50 +02:00
}
else if ( confTarget < = feeStats - > GetMaxConfirms ( ) ) { // medium horizon
2017-06-15 13:56:16 +02:00
estimate = feeStats - > EstimateMedianVal ( confTarget , SUFFICIENT_FEETXS , successThreshold , true , nBestSeenHeight , result ) ;
2017-05-17 22:02:50 +02:00
}
else { // long horizon
2017-06-15 13:56:16 +02:00
estimate = longStats - > EstimateMedianVal ( confTarget , SUFFICIENT_FEETXS , successThreshold , true , nBestSeenHeight , result ) ;
2017-05-17 22:02:50 +02:00
}
if ( checkShorterHorizon ) {
2017-06-15 13:56:16 +02:00
EstimationResult tempResult ;
2017-05-17 22:02:50 +02:00
// If a lower confTarget from a more recent horizon returns a lower answer use it.
if ( confTarget > feeStats - > GetMaxConfirms ( ) ) {
2017-06-15 13:56:16 +02:00
double medMax = feeStats - > EstimateMedianVal ( feeStats - > GetMaxConfirms ( ) , SUFFICIENT_FEETXS , successThreshold , true , nBestSeenHeight , & tempResult ) ;
if ( medMax > 0 & & ( estimate = = - 1 | | medMax < estimate ) ) {
2017-05-17 22:02:50 +02:00
estimate = medMax ;
2017-06-15 13:56:16 +02:00
if ( result ) * result = tempResult ;
}
2017-05-17 22:02:50 +02:00
}
if ( confTarget > shortStats - > GetMaxConfirms ( ) ) {
2017-06-15 13:56:16 +02:00
double shortMax = shortStats - > EstimateMedianVal ( shortStats - > GetMaxConfirms ( ) , SUFFICIENT_TXS_SHORT , successThreshold , true , nBestSeenHeight , & tempResult ) ;
if ( shortMax > 0 & & ( estimate = = - 1 | | shortMax < estimate ) ) {
2017-05-17 22:02:50 +02:00
estimate = shortMax ;
2017-06-15 13:56:16 +02:00
if ( result ) * result = tempResult ;
}
2017-05-17 22:02:50 +02:00
}
}
}
return estimate ;
}
/** Ensure that for a conservative estimate, the DOUBLE_SUCCESS_PCT is also met
* at 2 * target for any longer time horizons .
*/
2017-06-15 13:56:16 +02:00
double CBlockPolicyEstimator : : estimateConservativeFee ( unsigned int doubleTarget , EstimationResult * result ) const
2017-05-17 22:02:50 +02:00
{
double estimate = - 1 ;
2017-06-15 13:56:16 +02:00
EstimationResult tempResult ;
2017-05-17 22:02:50 +02:00
if ( doubleTarget < = shortStats - > GetMaxConfirms ( ) ) {
2017-06-15 13:56:16 +02:00
estimate = feeStats - > EstimateMedianVal ( doubleTarget , SUFFICIENT_FEETXS , DOUBLE_SUCCESS_PCT , true , nBestSeenHeight , result ) ;
2017-05-17 22:02:50 +02:00
}
if ( doubleTarget < = feeStats - > GetMaxConfirms ( ) ) {
2017-06-15 13:56:16 +02:00
double longEstimate = longStats - > EstimateMedianVal ( doubleTarget , SUFFICIENT_FEETXS , DOUBLE_SUCCESS_PCT , true , nBestSeenHeight , & tempResult ) ;
2017-05-17 22:02:50 +02:00
if ( longEstimate > estimate ) {
estimate = longEstimate ;
2017-06-15 13:56:16 +02:00
if ( result ) * result = tempResult ;
2017-05-17 22:02:50 +02:00
}
}
return estimate ;
}
/** estimateSmartFee returns the max of the feerates calculated with a 60%
* threshold required at target / 2 , an 85 % threshold required at target and a
* 95 % threshold required at 2 * target . Each calculation is performed at the
* shortest time horizon which tracks the required target . Conservative
* estimates , however , required the 95 % threshold at 2 * target be met for any
* longer time horizons also .
*/
2017-07-17 08:51:41 +02:00
CFeeRate CBlockPolicyEstimator : : estimateSmartFee ( int confTarget , FeeCalculation * feeCalc , bool conservative ) const
2015-11-16 21:10:22 +01:00
{
2017-07-17 08:51:41 +02:00
LOCK ( cs_feeEstimator ) ;
2017-06-15 13:56:16 +02:00
if ( feeCalc ) {
feeCalc - > desiredTarget = confTarget ;
feeCalc - > returnedTarget = confTarget ;
}
2016-12-02 08:20:44 +01:00
2015-11-16 21:10:22 +01:00
double median = - 1 ;
2017-06-15 13:56:16 +02:00
EstimationResult tempResult ;
2017-04-20 21:16:19 +02:00
2017-07-17 08:51:41 +02:00
// Return failure if trying to analyze a target we're not tracking
2017-07-18 01:33:29 +02:00
if ( confTarget < = 0 | | ( unsigned int ) confTarget > longStats - > GetMaxConfirms ( ) ) {
2017-07-20 16:55:31 +02:00
return CFeeRate ( 0 ) ; // error condition
2017-07-18 01:33:29 +02:00
}
2017-04-20 21:16:19 +02:00
2017-07-17 08:51:41 +02:00
// It's not possible to get reasonable estimates for confTarget of 1
2017-07-18 01:33:29 +02:00
if ( confTarget = = 1 ) confTarget = 2 ;
2017-04-20 21:16:19 +02:00
2017-07-17 08:51:41 +02:00
unsigned int maxUsableEstimate = MaxUsableEstimate ( ) ;
if ( ( unsigned int ) confTarget > maxUsableEstimate ) {
confTarget = maxUsableEstimate ;
}
2017-07-18 01:33:29 +02:00
if ( feeCalc ) feeCalc - > returnedTarget = confTarget ;
2017-07-20 16:55:31 +02:00
if ( confTarget < = 1 ) return CFeeRate ( 0 ) ; // error condition
2017-05-17 22:02:50 +02:00
2017-07-17 08:51:41 +02:00
assert ( confTarget > 0 ) ; //estimateCombinedFee and estimateConservativeFee take unsigned ints
/** true is passed to estimateCombined fee for target/2 and target so
* that we check the max confirms for shorter time horizons as well .
* This is necessary to preserve monotonically increasing estimates .
* For non - conservative estimates we do the same thing for 2 * target , but
* for conservative estimates we want to skip these shorter horizons
* checks for 2 * target because we are taking the max over all time
* horizons so we already have monotonically increasing estimates and
* the purpose of conservative estimates is not to let short term
* fluctuations lower our estimates by too much .
*/
double halfEst = estimateCombinedFee ( confTarget / 2 , HALF_SUCCESS_PCT , true , & tempResult ) ;
if ( feeCalc ) {
feeCalc - > est = tempResult ;
feeCalc - > reason = FeeReason : : HALF_ESTIMATE ;
}
median = halfEst ;
double actualEst = estimateCombinedFee ( confTarget , SUCCESS_PCT , true , & tempResult ) ;
if ( actualEst > median ) {
median = actualEst ;
2017-06-15 13:56:16 +02:00
if ( feeCalc ) {
feeCalc - > est = tempResult ;
2017-07-17 08:51:41 +02:00
feeCalc - > reason = FeeReason : : FULL_ESTIMATE ;
2017-06-15 13:56:16 +02:00
}
2017-07-17 08:51:41 +02:00
}
double doubleEst = estimateCombinedFee ( 2 * confTarget , DOUBLE_SUCCESS_PCT , ! conservative , & tempResult ) ;
if ( doubleEst > median ) {
median = doubleEst ;
if ( feeCalc ) {
feeCalc - > est = tempResult ;
feeCalc - > reason = FeeReason : : DOUBLE_ESTIMATE ;
2017-05-17 22:02:50 +02:00
}
2017-07-17 08:51:41 +02:00
}
if ( conservative | | median = = - 1 ) {
double consEst = estimateConservativeFee ( 2 * confTarget , & tempResult ) ;
if ( consEst > median ) {
median = consEst ;
2017-06-15 13:56:16 +02:00
if ( feeCalc ) {
feeCalc - > est = tempResult ;
2017-07-17 08:51:41 +02:00
feeCalc - > reason = FeeReason : : CONSERVATIVE ;
2017-06-15 13:56:16 +02:00
}
2017-05-17 22:02:50 +02:00
}
2017-07-17 08:51:41 +02:00
}
2015-11-16 21:10:22 +01:00
2017-07-20 16:55:31 +02:00
if ( median < 0 ) return CFeeRate ( 0 ) ; // error condition
2015-11-16 21:10:22 +01:00
2017-09-30 18:07:44 +02:00
return CFeeRate ( llround ( median ) ) ;
2015-11-16 21:10:22 +01:00
}
2017-05-17 22:02:50 +02:00
2017-04-20 21:16:19 +02:00
bool CBlockPolicyEstimator : : Write ( CAutoFile & fileout ) const
2014-08-26 22:28:32 +02:00
{
2017-04-20 21:16:19 +02:00
try {
LOCK ( cs_feeEstimator ) ;
2019-05-28 15:42:15 +02:00
fileout < < 140100 ; // version required to read: 0.14.1 or later
2017-04-20 21:16:19 +02:00
fileout < < CLIENT_VERSION ; // version that wrote the file
fileout < < nBestSeenHeight ;
2017-05-17 22:02:50 +02:00
if ( BlockSpan ( ) > HistoricalBlockSpan ( ) / 2 ) {
fileout < < firstRecordedHeight < < nBestSeenHeight ;
}
else {
fileout < < historicalFirst < < historicalBest ;
}
fileout < < buckets ;
2017-04-20 21:16:19 +02:00
feeStats - > Write ( fileout ) ;
2017-05-17 22:02:50 +02:00
shortStats - > Write ( fileout ) ;
longStats - > Write ( fileout ) ;
2017-04-20 21:16:19 +02:00
}
catch ( const std : : exception & ) {
2017-04-25 13:28:07 +02:00
LogPrintf ( " CBlockPolicyEstimator::Write(): unable to write policy estimator data (non-fatal) \n " ) ;
2017-04-20 21:16:19 +02:00
return false ;
}
return true ;
2014-08-26 22:28:32 +02:00
}
2017-04-20 21:16:19 +02:00
bool CBlockPolicyEstimator : : Read ( CAutoFile & filein )
2014-08-26 22:28:32 +02:00
{
2017-04-20 21:16:19 +02:00
try {
LOCK ( cs_feeEstimator ) ;
2017-05-17 22:02:50 +02:00
int nVersionRequired , nVersionThatWrote ;
unsigned int nFileBestSeenHeight , nFileHistoricalFirst , nFileHistoricalBest ;
2017-04-20 21:16:19 +02:00
filein > > nVersionRequired > > nVersionThatWrote ;
if ( nVersionRequired > CLIENT_VERSION )
return error ( " CBlockPolicyEstimator::Read() : up - version ( % d ) fee estimate file " , nVersionRequired) ;
2017-05-17 22:02:50 +02:00
// Read fee estimates file into temporary variables so existing data
// structures aren't corrupted if there is an exception.
2017-04-20 21:16:19 +02:00
filein > > nFileBestSeenHeight ;
2017-05-17 22:02:50 +02:00
2017-12-19 17:03:27 +01:00
if ( nVersionRequired < 140100 ) {
LogPrintf ( " %s: incompatible old fee estimation data (non-fatal). Version: %d \n " , __func__ , nVersionRequired ) ;
} else { // New format introduced in 140100
unsigned int nFileHistoricalFirst , nFileHistoricalBest ;
2017-05-17 22:02:50 +02:00
filein > > nFileHistoricalFirst > > nFileHistoricalBest ;
if ( nFileHistoricalFirst > nFileHistoricalBest | | nFileHistoricalBest > nFileBestSeenHeight ) {
throw std : : runtime_error ( " Corrupt estimates file. Historical block range for estimates is invalid " ) ;
}
std : : vector < double > fileBuckets ;
filein > > fileBuckets ;
size_t numBuckets = fileBuckets . size ( ) ;
if ( numBuckets < = 1 | | numBuckets > 1000 )
throw std : : runtime_error ( " Corrupt estimates file. Must have between 2 and 1000 feerate buckets " ) ;
std : : unique_ptr < TxConfirmStats > fileFeeStats ( new TxConfirmStats ( buckets , bucketMap , MED_BLOCK_PERIODS , MED_DECAY , MED_SCALE ) ) ;
std : : unique_ptr < TxConfirmStats > fileShortStats ( new TxConfirmStats ( buckets , bucketMap , SHORT_BLOCK_PERIODS , SHORT_DECAY , SHORT_SCALE ) ) ;
std : : unique_ptr < TxConfirmStats > fileLongStats ( new TxConfirmStats ( buckets , bucketMap , LONG_BLOCK_PERIODS , LONG_DECAY , LONG_SCALE ) ) ;
fileFeeStats - > Read ( filein , nVersionThatWrote , numBuckets ) ;
fileShortStats - > Read ( filein , nVersionThatWrote , numBuckets ) ;
fileLongStats - > Read ( filein , nVersionThatWrote , numBuckets ) ;
// Fee estimates file parsed correctly
// Copy buckets from file and refresh our bucketmap
buckets = fileBuckets ;
bucketMap . clear ( ) ;
for ( unsigned int i = 0 ; i < buckets . size ( ) ; i + + ) {
bucketMap [ buckets [ i ] ] = i ;
}
// Destroy old TxConfirmStats and point to new ones that already reference buckets and bucketMap
2017-11-09 21:22:08 +01:00
feeStats = std : : move ( fileFeeStats ) ;
shortStats = std : : move ( fileShortStats ) ;
longStats = std : : move ( fileLongStats ) ;
2017-05-17 22:02:50 +02:00
nBestSeenHeight = nFileBestSeenHeight ;
historicalFirst = nFileHistoricalFirst ;
historicalBest = nFileHistoricalBest ;
}
2017-04-20 21:16:19 +02:00
}
2017-05-17 22:02:50 +02:00
catch ( const std : : exception & e ) {
LogPrintf ( " CBlockPolicyEstimator::Read(): unable to read policy estimator data (non-fatal): %s \n " , e . what ( ) ) ;
2017-04-20 21:16:19 +02:00
return false ;
}
return true ;
2014-08-26 22:28:32 +02:00
}
2016-03-21 18:02:47 +01:00
2018-02-08 22:05:00 +01:00
void CBlockPolicyEstimator : : FlushUnconfirmed ( ) {
2017-05-17 22:02:50 +02:00
int64_t startclear = GetTimeMicros ( ) ;
LOCK ( cs_feeEstimator ) ;
2018-02-08 22:05:00 +01:00
size_t num_entries = mapMemPoolTxs . size ( ) ;
// Remove every entry in mapMemPoolTxs
while ( ! mapMemPoolTxs . empty ( ) ) {
auto mi = mapMemPoolTxs . begin ( ) ;
removeTx ( mi - > first , false ) ; // this calls erase() on mapMemPoolTxs
2017-05-17 22:02:50 +02:00
}
int64_t endclear = GetTimeMicros ( ) ;
2018-02-08 22:05:00 +01:00
LogPrint ( BCLog : : ESTIMATEFEE , " Recorded %u unconfirmed txs from mempool in %ld micros \n " , num_entries , endclear - startclear ) ;
2017-05-17 22:02:50 +02:00
}