mirror of
https://github.com/dashpay/dash.git
synced 2024-12-25 03:52:49 +01:00
Merge #10199: Better fee estimates
38bc1ec
Make more json-like output from estimaterawfee (Alex Morcos)2d2e170
Comments and improved documentation (Alex Morcos)ef589f8
minor cleanup: remove unnecessary variable (Alex Morcos)3ee76d6
Introduce a scale factor (Alex Morcos)5f1f0c6
Historical block span (Alex Morcos)aa19b8e
Clean up fee estimate debug printing (Alex Morcos)10f7cbd
Track first recorded height (Alex Morcos)3810e97
Rewrite estimateSmartFee (Alex Morcos)c7447ec
Track failures in fee estimation. (Alex Morcos)4186d3f
Expose estimaterawfee (Alex Morcos)2681153
minor refactor: explicitly track start of new bucket range and don't update curNearBucket on final loop. (Alex Morcos)1ba43cc
Make EstimateMedianVal smarter about small failures. (Alex Morcos)d3e30bc
Refactor to update moving average on fly (Alex Morcos)e5007ba
Change parameters for fee estimation and estimates on all 3 time horizons. (Alex Morcos)c0a273f
Change file format for fee estimates. (Alex Morcos) Tree-SHA512: 186e7508d86a1f351bb656edcd84ee9091f5f2706331eda9ee29da9c8eb5bf67b8c1f2abf6662835560e7f613b1377099054f20767f41ddcdbc89c4f9e78946d
This commit is contained in:
parent
c7dcf79f02
commit
b430366dd9
@ -270,6 +270,7 @@ void PrepareShutdown()
|
||||
|
||||
if (fFeeEstimatesInitialized)
|
||||
{
|
||||
::feeEstimator.FlushUnconfirmed(::mempool);
|
||||
fs::path est_path = GetDataDir() / FEE_ESTIMATES_FILENAME;
|
||||
CAutoFile est_fileout(fsbridge::fopen(est_path, "wb"), SER_DISK, CLIENT_VERSION);
|
||||
if (!est_fileout.IsNull())
|
||||
|
@ -13,6 +13,8 @@
|
||||
#include "txmempool.h"
|
||||
#include "util.h"
|
||||
|
||||
static constexpr double INF_FEERATE = 1e99;
|
||||
|
||||
/**
|
||||
* We will instantiate an instance of this class to track transactions that were
|
||||
* included in a block. We will lump transactions into a bucket according to their
|
||||
@ -25,40 +27,43 @@ class TxConfirmStats
|
||||
{
|
||||
private:
|
||||
//Define the buckets we will group transactions into
|
||||
std::vector<double> buckets; // The upper-bound of the range for the bucket (inclusive)
|
||||
std::map<double, unsigned int> bucketMap; // Map of bucket upper-bound to index into all vectors by bucket
|
||||
const std::vector<double>& buckets; // The upper-bound of the range for the bucket (inclusive)
|
||||
const std::map<double, unsigned int>& bucketMap; // Map of bucket upper-bound to index into all vectors by bucket
|
||||
|
||||
// For each bucket X:
|
||||
// Count the total # of txs in each bucket
|
||||
// Track the historical moving average of this total over blocks
|
||||
std::vector<double> txCtAvg;
|
||||
// and calculate the total for the current block to update the moving average
|
||||
std::vector<int> curBlockTxCt;
|
||||
|
||||
// Count the total # of txs confirmed within Y blocks in each bucket
|
||||
// Track the historical moving average of theses totals over blocks
|
||||
std::vector<std::vector<double> > confAvg; // confAvg[Y][X]
|
||||
// and calculate the totals for the current block to update the moving averages
|
||||
std::vector<std::vector<int> > curBlockConf; // curBlockConf[Y][X]
|
||||
std::vector<std::vector<double>> confAvg; // confAvg[Y][X]
|
||||
|
||||
// Track moving avg of txs which have been evicted from the mempool
|
||||
// after failing to be confirmed within Y blocks
|
||||
std::vector<std::vector<double>> failAvg; // failAvg[Y][X]
|
||||
|
||||
// Sum the total feerate of all tx's in each bucket
|
||||
// Track the historical moving average of this total over blocks
|
||||
std::vector<double> avg;
|
||||
// and calculate the total for the current block to update the moving average
|
||||
std::vector<double> curBlockVal;
|
||||
|
||||
// Combine the conf counts with tx counts to calculate the confirmation % for each Y,X
|
||||
// Combine the total value with the tx counts to calculate the avg feerate per bucket
|
||||
|
||||
double decay;
|
||||
|
||||
// Resolution (# of blocks) with which confirmations are tracked
|
||||
unsigned int scale;
|
||||
|
||||
// Mempool counts of outstanding transactions
|
||||
// For each bucket X, track the number of transactions in the mempool
|
||||
// that are unconfirmed for each possible confirmation value Y
|
||||
std::vector<std::vector<int> > unconfTxs; //unconfTxs[Y][X]
|
||||
// transactions still unconfirmed after MAX_CONFIRMS for each bucket
|
||||
// transactions still unconfirmed after GetMaxConfirms for each bucket
|
||||
std::vector<int> oldUnconfTxs;
|
||||
|
||||
void resizeInMemoryCounters(size_t newbuckets);
|
||||
|
||||
public:
|
||||
/**
|
||||
* Create new TxConfirmStats. This is called by BlockPolicyEstimator's
|
||||
@ -67,9 +72,10 @@ public:
|
||||
* @param maxPeriods max number of periods to track
|
||||
* @param decay how much to decay the historical moving average per block
|
||||
*/
|
||||
TxConfirmStats(const std::vector<double>& defaultBuckets, unsigned int maxConfirms, double decay);
|
||||
TxConfirmStats(const std::vector<double>& defaultBuckets, const std::map<double, unsigned int>& defaultBucketMap,
|
||||
unsigned int maxPeriods, double decay, unsigned int scale);
|
||||
|
||||
/** Clear the state of the curBlock variables to start counting for the new block */
|
||||
/** Roll the circular buffer for unconfirmed txs*/
|
||||
void ClearCurrent(unsigned int nBlockHeight);
|
||||
|
||||
/**
|
||||
@ -85,7 +91,7 @@ public:
|
||||
|
||||
/** Remove a transaction from mempool tracking stats*/
|
||||
void removeTx(unsigned int entryHeight, unsigned int nBestSeenHeight,
|
||||
unsigned int bucketIndex);
|
||||
unsigned int bucketIndex, bool inBlock);
|
||||
|
||||
/** Update our estimates by decaying our historical moving average and updating
|
||||
with the data gathered from the current block */
|
||||
@ -103,10 +109,11 @@ public:
|
||||
* @param nBlockHeight the current block height
|
||||
*/
|
||||
double EstimateMedianVal(int confTarget, double sufficientTxVal,
|
||||
double minSuccess, bool requireGreater, unsigned int nBlockHeight) const;
|
||||
double minSuccess, bool requireGreater, unsigned int nBlockHeight,
|
||||
EstimationResult *result = nullptr) const;
|
||||
|
||||
/** Return the max number of confirms we're tracking */
|
||||
unsigned int GetMaxConfirms() const { return confAvg.size(); }
|
||||
unsigned int GetMaxConfirms() const { return scale * confAvg.size(); }
|
||||
|
||||
/** Write state of estimation data to a file*/
|
||||
void Write(CAutoFile& fileout) const;
|
||||
@ -115,44 +122,47 @@ public:
|
||||
* Read saved state of estimation data from a file and replace all internal data structures and
|
||||
* variables with this state.
|
||||
*/
|
||||
void Read(CAutoFile& filein);
|
||||
void Read(CAutoFile& filein, int nFileVersion, size_t numBuckets);
|
||||
};
|
||||
|
||||
|
||||
TxConfirmStats::TxConfirmStats(const std::vector<double>& defaultBuckets,
|
||||
unsigned int maxConfirms, double _decay)
|
||||
const std::map<double, unsigned int>& defaultBucketMap,
|
||||
unsigned int maxPeriods, double _decay, unsigned int _scale)
|
||||
: buckets(defaultBuckets), bucketMap(defaultBucketMap)
|
||||
{
|
||||
decay = _decay;
|
||||
for (unsigned int i = 0; i < defaultBuckets.size(); i++) {
|
||||
buckets.push_back(defaultBuckets[i]);
|
||||
bucketMap[defaultBuckets[i]] = i;
|
||||
}
|
||||
confAvg.resize(maxConfirms);
|
||||
curBlockConf.resize(maxConfirms);
|
||||
unconfTxs.resize(maxConfirms);
|
||||
for (unsigned int i = 0; i < maxConfirms; i++) {
|
||||
scale = _scale;
|
||||
confAvg.resize(maxPeriods);
|
||||
for (unsigned int i = 0; i < maxPeriods; i++) {
|
||||
confAvg[i].resize(buckets.size());
|
||||
curBlockConf[i].resize(buckets.size());
|
||||
unconfTxs[i].resize(buckets.size());
|
||||
}
|
||||
failAvg.resize(maxPeriods);
|
||||
for (unsigned int i = 0; i < maxPeriods; i++) {
|
||||
failAvg[i].resize(buckets.size());
|
||||
}
|
||||
|
||||
oldUnconfTxs.resize(buckets.size());
|
||||
curBlockTxCt.resize(buckets.size());
|
||||
txCtAvg.resize(buckets.size());
|
||||
curBlockVal.resize(buckets.size());
|
||||
avg.resize(buckets.size());
|
||||
|
||||
resizeInMemoryCounters(buckets.size());
|
||||
}
|
||||
|
||||
// Zero out the data for the current block
|
||||
void TxConfirmStats::resizeInMemoryCounters(size_t newbuckets) {
|
||||
// newbuckets must be passed in because the buckets referred to during Read have not been updated yet.
|
||||
unconfTxs.resize(GetMaxConfirms());
|
||||
for (unsigned int i = 0; i < unconfTxs.size(); i++) {
|
||||
unconfTxs[i].resize(newbuckets);
|
||||
}
|
||||
oldUnconfTxs.resize(newbuckets);
|
||||
}
|
||||
|
||||
// Roll the unconfirmed txs circular buffer
|
||||
void TxConfirmStats::ClearCurrent(unsigned int nBlockHeight)
|
||||
{
|
||||
for (unsigned int j = 0; j < buckets.size(); j++) {
|
||||
oldUnconfTxs[j] += unconfTxs[nBlockHeight%unconfTxs.size()][j];
|
||||
unconfTxs[nBlockHeight%unconfTxs.size()][j] = 0;
|
||||
for (unsigned int i = 0; i < curBlockConf.size(); i++)
|
||||
curBlockConf[i][j] = 0;
|
||||
curBlockTxCt[j] = 0;
|
||||
curBlockVal[j] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
@ -162,33 +172,38 @@ void TxConfirmStats::Record(int blocksToConfirm, double val)
|
||||
// blocksToConfirm is 1-based
|
||||
if (blocksToConfirm < 1)
|
||||
return;
|
||||
int periodsToConfirm = (blocksToConfirm + scale - 1)/scale;
|
||||
unsigned int bucketindex = bucketMap.lower_bound(val)->second;
|
||||
for (size_t i = blocksToConfirm; i <= curBlockConf.size(); i++) {
|
||||
curBlockConf[i - 1][bucketindex]++;
|
||||
for (size_t i = periodsToConfirm; i <= confAvg.size(); i++) {
|
||||
confAvg[i - 1][bucketindex]++;
|
||||
}
|
||||
curBlockTxCt[bucketindex]++;
|
||||
curBlockVal[bucketindex] += val;
|
||||
txCtAvg[bucketindex]++;
|
||||
avg[bucketindex] += val;
|
||||
}
|
||||
|
||||
void TxConfirmStats::UpdateMovingAverages()
|
||||
{
|
||||
for (unsigned int j = 0; j < buckets.size(); j++) {
|
||||
for (unsigned int i = 0; i < confAvg.size(); i++)
|
||||
confAvg[i][j] = confAvg[i][j] * decay + curBlockConf[i][j];
|
||||
avg[j] = avg[j] * decay + curBlockVal[j];
|
||||
txCtAvg[j] = txCtAvg[j] * decay + curBlockTxCt[j];
|
||||
confAvg[i][j] = confAvg[i][j] * decay;
|
||||
for (unsigned int i = 0; i < failAvg.size(); i++)
|
||||
failAvg[i][j] = failAvg[i][j] * decay;
|
||||
avg[j] = avg[j] * decay;
|
||||
txCtAvg[j] = txCtAvg[j] * decay;
|
||||
}
|
||||
}
|
||||
|
||||
// returns -1 on error conditions
|
||||
double TxConfirmStats::EstimateMedianVal(int confTarget, double sufficientTxVal,
|
||||
double successBreakPoint, bool requireGreater,
|
||||
unsigned int nBlockHeight) const
|
||||
unsigned int nBlockHeight, EstimationResult *result) const
|
||||
{
|
||||
// Counters for a bucket (or range of buckets)
|
||||
double nConf = 0; // Number of tx's confirmed within the confTarget
|
||||
double totalNum = 0; // Total number of tx's that were ever confirmed
|
||||
int extraNum = 0; // Number of tx's still in mempool for confTarget or longer
|
||||
double failNum = 0; // Number of tx's that were never confirmed but removed from the mempool after confTarget
|
||||
int periodTarget = (confTarget + scale - 1)/scale;
|
||||
|
||||
int maxbucketindex = buckets.size() - 1;
|
||||
|
||||
@ -211,12 +226,21 @@ double TxConfirmStats::EstimateMedianVal(int confTarget, double sufficientTxVal,
|
||||
|
||||
bool foundAnswer = false;
|
||||
unsigned int bins = unconfTxs.size();
|
||||
bool newBucketRange = true;
|
||||
bool passing = true;
|
||||
EstimatorBucket passBucket;
|
||||
EstimatorBucket failBucket;
|
||||
|
||||
// Start counting from highest(default) or lowest feerate transactions
|
||||
for (int bucket = startbucket; bucket >= 0 && bucket <= maxbucketindex; bucket += step) {
|
||||
if (newBucketRange) {
|
||||
curNearBucket = bucket;
|
||||
newBucketRange = false;
|
||||
}
|
||||
curFarBucket = bucket;
|
||||
nConf += confAvg[confTarget - 1][bucket];
|
||||
nConf += confAvg[periodTarget - 1][bucket];
|
||||
totalNum += txCtAvg[bucket];
|
||||
failNum += failAvg[periodTarget - 1][bucket];
|
||||
for (unsigned int confct = confTarget; confct < GetMaxConfirms(); confct++)
|
||||
extraNum += unconfTxs[(nBlockHeight - confct)%bins][bucket];
|
||||
extraNum += oldUnconfTxs[bucket];
|
||||
@ -225,24 +249,41 @@ double TxConfirmStats::EstimateMedianVal(int confTarget, double sufficientTxVal,
|
||||
// (Only count the confirmed data points, so that each confirmation count
|
||||
// will be looking at the same amount of data and same bucket breaks)
|
||||
if (totalNum >= sufficientTxVal / (1 - decay)) {
|
||||
double curPct = nConf / (totalNum + extraNum);
|
||||
double curPct = nConf / (totalNum + failNum + extraNum);
|
||||
|
||||
// Check to see if we are no longer getting confirmed at the success rate
|
||||
if (requireGreater && curPct < successBreakPoint)
|
||||
break;
|
||||
if (!requireGreater && curPct > successBreakPoint)
|
||||
break;
|
||||
|
||||
if ((requireGreater && curPct < successBreakPoint) || (!requireGreater && curPct > successBreakPoint)) {
|
||||
if (passing == true) {
|
||||
// First time we hit a failure record the failed bucket
|
||||
unsigned int failMinBucket = std::min(curNearBucket, curFarBucket);
|
||||
unsigned int failMaxBucket = std::max(curNearBucket, curFarBucket);
|
||||
failBucket.start = failMinBucket ? buckets[failMinBucket - 1] : 0;
|
||||
failBucket.end = buckets[failMaxBucket];
|
||||
failBucket.withinTarget = nConf;
|
||||
failBucket.totalConfirmed = totalNum;
|
||||
failBucket.inMempool = extraNum;
|
||||
failBucket.leftMempool = failNum;
|
||||
passing = false;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
// Otherwise update the cumulative stats, and the bucket variables
|
||||
// and reset the counters
|
||||
else {
|
||||
failBucket = EstimatorBucket(); // Reset any failed bucket, currently passing
|
||||
foundAnswer = true;
|
||||
passing = true;
|
||||
passBucket.withinTarget = nConf;
|
||||
nConf = 0;
|
||||
passBucket.totalConfirmed = totalNum;
|
||||
totalNum = 0;
|
||||
passBucket.inMempool = extraNum;
|
||||
passBucket.leftMempool = failNum;
|
||||
failNum = 0;
|
||||
extraNum = 0;
|
||||
bestNearBucket = curNearBucket;
|
||||
bestFarBucket = curFarBucket;
|
||||
curNearBucket = bucket + step;
|
||||
newBucketRange = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -254,8 +295,8 @@ double TxConfirmStats::EstimateMedianVal(int confTarget, double sufficientTxVal,
|
||||
// Find the bucket with the median transaction and then report the average feerate from that bucket
|
||||
// This is a compromise between finding the median which we can't since we don't save all tx's
|
||||
// and reporting the average which is less accurate
|
||||
unsigned int minBucket = bestNearBucket < bestFarBucket ? bestNearBucket : bestFarBucket;
|
||||
unsigned int maxBucket = bestNearBucket > bestFarBucket ? bestNearBucket : bestFarBucket;
|
||||
unsigned int minBucket = std::min(bestNearBucket, bestFarBucket);
|
||||
unsigned int maxBucket = std::max(bestNearBucket, bestFarBucket);
|
||||
for (unsigned int j = minBucket; j <= maxBucket; j++) {
|
||||
txSum += txCtAvg[j];
|
||||
}
|
||||
@ -269,83 +310,109 @@ double TxConfirmStats::EstimateMedianVal(int confTarget, double sufficientTxVal,
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
passBucket.start = minBucket ? buckets[minBucket-1] : 0;
|
||||
passBucket.end = buckets[maxBucket];
|
||||
}
|
||||
|
||||
LogPrint(BCLog::ESTIMATEFEE, "%3d: For conf success %s %4.2f need feerate %s: %12.5g from buckets %8g - %8g Cur Bucket stats %6.2f%% %8.1f/(%.1f+%d mempool)\n",
|
||||
confTarget, requireGreater ? ">" : "<", successBreakPoint,
|
||||
requireGreater ? ">" : "<", median, buckets[minBucket], buckets[maxBucket],
|
||||
100 * nConf / (totalNum + extraNum), nConf, totalNum, extraNum);
|
||||
// If we were passing until we reached last few buckets with insufficient data, then report those as failed
|
||||
if (passing && !newBucketRange) {
|
||||
unsigned int failMinBucket = std::min(curNearBucket, curFarBucket);
|
||||
unsigned int failMaxBucket = std::max(curNearBucket, curFarBucket);
|
||||
failBucket.start = failMinBucket ? buckets[failMinBucket - 1] : 0;
|
||||
failBucket.end = buckets[failMaxBucket];
|
||||
failBucket.withinTarget = nConf;
|
||||
failBucket.totalConfirmed = totalNum;
|
||||
failBucket.inMempool = extraNum;
|
||||
failBucket.leftMempool = failNum;
|
||||
}
|
||||
|
||||
LogPrint(BCLog::ESTIMATEFEE, "FeeEst: %d %s%.0f%% decay %.5f: feerate: %g from (%g - %g) %.2f%% %.1f/(%.1f %d mem %.1f out) Fail: (%g - %g) %.2f%% %.1f/(%.1f %d mem %.1f out)\n",
|
||||
confTarget, requireGreater ? ">" : "<", 100.0 * successBreakPoint, decay,
|
||||
median, passBucket.start, passBucket.end,
|
||||
100 * passBucket.withinTarget / (passBucket.totalConfirmed + passBucket.inMempool + passBucket.leftMempool),
|
||||
passBucket.withinTarget, passBucket.totalConfirmed, passBucket.inMempool, passBucket.leftMempool,
|
||||
failBucket.start, failBucket.end,
|
||||
100 * failBucket.withinTarget / (failBucket.totalConfirmed + failBucket.inMempool + failBucket.leftMempool),
|
||||
failBucket.withinTarget, failBucket.totalConfirmed, failBucket.inMempool, failBucket.leftMempool);
|
||||
|
||||
|
||||
if (result) {
|
||||
result->pass = passBucket;
|
||||
result->fail = failBucket;
|
||||
result->decay = decay;
|
||||
result->scale = scale;
|
||||
}
|
||||
return median;
|
||||
}
|
||||
|
||||
void TxConfirmStats::Write(CAutoFile& fileout) const
|
||||
{
|
||||
fileout << decay;
|
||||
fileout << buckets;
|
||||
fileout << scale;
|
||||
fileout << avg;
|
||||
fileout << txCtAvg;
|
||||
fileout << confAvg;
|
||||
fileout << failAvg;
|
||||
}
|
||||
|
||||
void TxConfirmStats::Read(CAutoFile& filein)
|
||||
void TxConfirmStats::Read(CAutoFile& filein, int nFileVersion, size_t numBuckets)
|
||||
{
|
||||
// Read data file into temporary variables and do some very basic sanity checking
|
||||
std::vector<double> fileBuckets;
|
||||
std::vector<double> fileAvg;
|
||||
std::vector<std::vector<double> > fileConfAvg;
|
||||
std::vector<double> fileTxCtAvg;
|
||||
double fileDecay;
|
||||
size_t maxConfirms;
|
||||
size_t numBuckets;
|
||||
// Read data file and do some very basic sanity checking
|
||||
// buckets and bucketMap are not updated yet, so don't access them
|
||||
// If there is a read failure, we'll just discard this entire object anyway
|
||||
size_t maxConfirms, maxPeriods;
|
||||
|
||||
filein >> fileDecay;
|
||||
if (fileDecay <= 0 || fileDecay >= 1)
|
||||
throw std::runtime_error("Corrupt estimates file. Decay must be between 0 and 1 (non-inclusive)");
|
||||
filein >> fileBuckets;
|
||||
numBuckets = fileBuckets.size();
|
||||
if (numBuckets <= 1 || numBuckets > 1000)
|
||||
throw std::runtime_error("Corrupt estimates file. Must have between 2 and 1000 feerate buckets");
|
||||
filein >> fileAvg;
|
||||
if (fileAvg.size() != numBuckets)
|
||||
throw std::runtime_error("Corrupt estimates file. Mismatch in feerate average bucket count");
|
||||
filein >> fileTxCtAvg;
|
||||
if (fileTxCtAvg.size() != numBuckets)
|
||||
throw std::runtime_error("Corrupt estimates file. Mismatch in tx count bucket count");
|
||||
filein >> fileConfAvg;
|
||||
maxConfirms = fileConfAvg.size();
|
||||
if (maxConfirms <= 0 || maxConfirms > 6 * 24 * 7) // one week
|
||||
throw std::runtime_error("Corrupt estimates file. Must maintain estimates for between 1 and 1008 (one week) confirms");
|
||||
for (unsigned int i = 0; i < maxConfirms; i++) {
|
||||
if (fileConfAvg[i].size() != numBuckets)
|
||||
throw std::runtime_error("Corrupt estimates file. Mismatch in feerate conf average bucket count");
|
||||
// The current version will store the decay with each individual TxConfirmStats and also keep a scale factor
|
||||
if (nFileVersion >= 149900) {
|
||||
filein >> decay;
|
||||
if (decay <= 0 || decay >= 1) {
|
||||
throw std::runtime_error("Corrupt estimates file. Decay must be between 0 and 1 (non-inclusive)");
|
||||
}
|
||||
filein >> scale;
|
||||
}
|
||||
|
||||
filein >> avg;
|
||||
if (avg.size() != numBuckets) {
|
||||
throw std::runtime_error("Corrupt estimates file. Mismatch in feerate average bucket count");
|
||||
}
|
||||
filein >> txCtAvg;
|
||||
if (txCtAvg.size() != numBuckets) {
|
||||
throw std::runtime_error("Corrupt estimates file. Mismatch in tx count bucket count");
|
||||
}
|
||||
filein >> confAvg;
|
||||
maxPeriods = confAvg.size();
|
||||
maxConfirms = scale * maxPeriods;
|
||||
|
||||
if (maxConfirms <= 0 || maxConfirms > 6 * 24 * 7) { // one week
|
||||
throw std::runtime_error("Corrupt estimates file. Must maintain estimates for between 1 and 1008 (one week) confirms");
|
||||
}
|
||||
for (unsigned int i = 0; i < maxPeriods; i++) {
|
||||
if (confAvg[i].size() != numBuckets) {
|
||||
throw std::runtime_error("Corrupt estimates file. Mismatch in feerate conf average bucket count");
|
||||
}
|
||||
}
|
||||
|
||||
if (nFileVersion >= 149900) {
|
||||
filein >> failAvg;
|
||||
if (maxPeriods != failAvg.size()) {
|
||||
throw std::runtime_error("Corrupt estimates file. Mismatch in confirms tracked for failures");
|
||||
}
|
||||
for (unsigned int i = 0; i < maxPeriods; i++) {
|
||||
if (failAvg[i].size() != numBuckets) {
|
||||
throw std::runtime_error("Corrupt estimates file. Mismatch in one of failure average bucket counts");
|
||||
}
|
||||
}
|
||||
} else {
|
||||
failAvg.resize(confAvg.size());
|
||||
for (unsigned int i = 0; i < failAvg.size(); i++) {
|
||||
failAvg[i].resize(numBuckets);
|
||||
}
|
||||
}
|
||||
// Now that we've processed the entire feerate estimate data file and not
|
||||
// thrown any errors, we can copy it to our data structures
|
||||
decay = fileDecay;
|
||||
buckets = fileBuckets;
|
||||
avg = fileAvg;
|
||||
confAvg = fileConfAvg;
|
||||
txCtAvg = fileTxCtAvg;
|
||||
bucketMap.clear();
|
||||
|
||||
// Resize the current block variables which aren't stored in the data file
|
||||
// to match the number of confirms and buckets
|
||||
curBlockConf.resize(maxConfirms);
|
||||
for (unsigned int i = 0; i < maxConfirms; i++) {
|
||||
curBlockConf[i].resize(buckets.size());
|
||||
}
|
||||
curBlockTxCt.resize(buckets.size());
|
||||
curBlockVal.resize(buckets.size());
|
||||
|
||||
unconfTxs.resize(maxConfirms);
|
||||
for (unsigned int i = 0; i < maxConfirms; i++) {
|
||||
unconfTxs[i].resize(buckets.size());
|
||||
}
|
||||
oldUnconfTxs.resize(buckets.size());
|
||||
|
||||
for (unsigned int i = 0; i < buckets.size(); i++)
|
||||
bucketMap[buckets[i]] = i;
|
||||
resizeInMemoryCounters(numBuckets);
|
||||
|
||||
LogPrint(BCLog::ESTIMATEFEE, "Reading estimates: %u buckets counting confirms up to %u blocks\n",
|
||||
numBuckets, maxConfirms);
|
||||
@ -359,7 +426,7 @@ unsigned int TxConfirmStats::NewTx(unsigned int nBlockHeight, double val)
|
||||
return bucketindex;
|
||||
}
|
||||
|
||||
void TxConfirmStats::removeTx(unsigned int entryHeight, unsigned int nBestSeenHeight, unsigned int bucketindex)
|
||||
void TxConfirmStats::removeTx(unsigned int entryHeight, unsigned int nBestSeenHeight, unsigned int bucketindex, bool inBlock)
|
||||
{
|
||||
//nBestSeenHeight is not updated yet for the new block
|
||||
int blocksAgo = nBestSeenHeight - entryHeight;
|
||||
@ -387,6 +454,12 @@ void TxConfirmStats::removeTx(unsigned int entryHeight, unsigned int nBestSeenHe
|
||||
blockIndex, bucketindex);
|
||||
}
|
||||
}
|
||||
if (!inBlock && (unsigned int)blocksAgo >= scale) { // Only counts as a failure if not confirmed for entire period
|
||||
unsigned int periodsAgo = blocksAgo / scale;
|
||||
for (size_t i = 0; i < periodsAgo && i < failAvg.size(); i++) {
|
||||
failAvg[i][bucketindex]++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This function is called from CTxMemPool::removeUnchecked to ensure
|
||||
@ -394,12 +467,14 @@ void TxConfirmStats::removeTx(unsigned int entryHeight, unsigned int nBestSeenHe
|
||||
// tracked. Txs that were part of a block have already been removed in
|
||||
// processBlockTx to ensure they are never double tracked, but it is
|
||||
// of no harm to try to remove them again.
|
||||
bool CBlockPolicyEstimator::removeTx(uint256 hash)
|
||||
bool CBlockPolicyEstimator::removeTx(uint256 hash, bool inBlock)
|
||||
{
|
||||
LOCK(cs_feeEstimator);
|
||||
std::map<uint256, TxStatsInfo>::iterator pos = mapMemPoolTxs.find(hash);
|
||||
if (pos != mapMemPoolTxs.end()) {
|
||||
feeStats->removeTx(pos->second.blockHeight, nBestSeenHeight, pos->second.bucketIndex);
|
||||
feeStats->removeTx(pos->second.blockHeight, nBestSeenHeight, pos->second.bucketIndex, inBlock);
|
||||
shortStats->removeTx(pos->second.blockHeight, nBestSeenHeight, pos->second.bucketIndex, inBlock);
|
||||
longStats->removeTx(pos->second.blockHeight, nBestSeenHeight, pos->second.bucketIndex, inBlock);
|
||||
mapMemPoolTxs.erase(hash);
|
||||
return true;
|
||||
} else {
|
||||
@ -408,21 +483,28 @@ bool CBlockPolicyEstimator::removeTx(uint256 hash)
|
||||
}
|
||||
|
||||
CBlockPolicyEstimator::CBlockPolicyEstimator()
|
||||
: nBestSeenHeight(0), trackedTxs(0), untrackedTxs(0)
|
||||
: nBestSeenHeight(0), firstRecordedHeight(0), historicalFirst(0), historicalBest(0), trackedTxs(0), untrackedTxs(0)
|
||||
{
|
||||
static_assert(MIN_BUCKET_FEERATE > 0, "Min feerate must be nonzero");
|
||||
minTrackedFee = CFeeRate(MIN_BUCKET_FEERATE);
|
||||
std::vector<double> vfeelist;
|
||||
for (double bucketBoundary = minTrackedFee.GetFeePerK(); bucketBoundary <= MAX_BUCKET_FEERATE; bucketBoundary *= FEE_SPACING) {
|
||||
vfeelist.push_back(bucketBoundary);
|
||||
size_t bucketIndex = 0;
|
||||
for (double bucketBoundary = MIN_BUCKET_FEERATE; bucketBoundary <= MAX_BUCKET_FEERATE; bucketBoundary *= FEE_SPACING, bucketIndex++) {
|
||||
buckets.push_back(bucketBoundary);
|
||||
bucketMap[bucketBoundary] = bucketIndex;
|
||||
}
|
||||
vfeelist.push_back(INF_FEERATE);
|
||||
feeStats = new TxConfirmStats(vfeelist, MAX_BLOCK_CONFIRMS, DEFAULT_DECAY);
|
||||
buckets.push_back(INF_FEERATE);
|
||||
bucketMap[INF_FEERATE] = bucketIndex;
|
||||
assert(bucketMap.size() == buckets.size());
|
||||
|
||||
feeStats = new TxConfirmStats(buckets, bucketMap, MED_BLOCK_PERIODS, MED_DECAY, MED_SCALE);
|
||||
shortStats = new TxConfirmStats(buckets, bucketMap, SHORT_BLOCK_PERIODS, SHORT_DECAY, SHORT_SCALE);
|
||||
longStats = new TxConfirmStats(buckets, bucketMap, LONG_BLOCK_PERIODS, LONG_DECAY, LONG_SCALE);
|
||||
}
|
||||
|
||||
CBlockPolicyEstimator::~CBlockPolicyEstimator()
|
||||
{
|
||||
delete feeStats;
|
||||
delete shortStats;
|
||||
delete longStats;
|
||||
}
|
||||
|
||||
void CBlockPolicyEstimator::processTransaction(const CTxMemPoolEntry& entry, bool validFeeEstimate)
|
||||
@ -455,12 +537,17 @@ void CBlockPolicyEstimator::processTransaction(const CTxMemPoolEntry& entry, boo
|
||||
CFeeRate feeRate(entry.GetFee(), entry.GetTxSize());
|
||||
|
||||
mapMemPoolTxs[hash].blockHeight = txHeight;
|
||||
mapMemPoolTxs[hash].bucketIndex = feeStats->NewTx(txHeight, (double)feeRate.GetFeePerK());
|
||||
unsigned int bucketIndex = feeStats->NewTx(txHeight, (double)feeRate.GetFeePerK());
|
||||
mapMemPoolTxs[hash].bucketIndex = bucketIndex;
|
||||
unsigned int bucketIndex2 = shortStats->NewTx(txHeight, (double)feeRate.GetFeePerK());
|
||||
assert(bucketIndex == bucketIndex2);
|
||||
unsigned int bucketIndex3 = longStats->NewTx(txHeight, (double)feeRate.GetFeePerK());
|
||||
assert(bucketIndex == bucketIndex3);
|
||||
}
|
||||
|
||||
bool CBlockPolicyEstimator::processBlockTx(unsigned int nBlockHeight, const CTxMemPoolEntry* entry)
|
||||
{
|
||||
if (!removeTx(entry->GetTx().GetHash())) {
|
||||
if (!removeTx(entry->GetTx().GetHash(), true)) {
|
||||
// This transaction wasn't being tracked for fee estimation
|
||||
return false;
|
||||
}
|
||||
@ -480,6 +567,8 @@ bool CBlockPolicyEstimator::processBlockTx(unsigned int nBlockHeight, const CTxM
|
||||
CFeeRate feeRate(entry->GetFee(), entry->GetTxSize());
|
||||
|
||||
feeStats->Record(blocksToConfirm, (double)feeRate.GetFeePerK());
|
||||
shortStats->Record(blocksToConfirm, (double)feeRate.GetFeePerK());
|
||||
longStats->Record(blocksToConfirm, (double)feeRate.GetFeePerK());
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -501,21 +590,32 @@ void CBlockPolicyEstimator::processBlock(unsigned int nBlockHeight,
|
||||
// of unconfirmed txs to remove from tracking.
|
||||
nBestSeenHeight = nBlockHeight;
|
||||
|
||||
// Clear the current block state and update unconfirmed circular buffer
|
||||
// Update unconfirmed circular buffer
|
||||
feeStats->ClearCurrent(nBlockHeight);
|
||||
shortStats->ClearCurrent(nBlockHeight);
|
||||
longStats->ClearCurrent(nBlockHeight);
|
||||
|
||||
// Decay all exponential averages
|
||||
feeStats->UpdateMovingAverages();
|
||||
shortStats->UpdateMovingAverages();
|
||||
longStats->UpdateMovingAverages();
|
||||
|
||||
unsigned int countedTxs = 0;
|
||||
// Repopulate the current block states
|
||||
// Update averages with data points from current block
|
||||
for (const auto& entry : entries) {
|
||||
if (processBlockTx(nBlockHeight, entry))
|
||||
countedTxs++;
|
||||
}
|
||||
|
||||
// Update all exponential averages with the current block state
|
||||
feeStats->UpdateMovingAverages();
|
||||
if (firstRecordedHeight == 0 && countedTxs > 0) {
|
||||
firstRecordedHeight = nBestSeenHeight;
|
||||
LogPrint(BCLog::ESTIMATEFEE, "Blockpolicy first recorded height %u\n", firstRecordedHeight);
|
||||
}
|
||||
|
||||
LogPrint(BCLog::ESTIMATEFEE, "Blockpolicy after updating estimates for %u of %u txs in block, since last block %u of %u tracked, new mempool map size %u\n",
|
||||
countedTxs, entries.size(), trackedTxs, trackedTxs + untrackedTxs, mapMemPoolTxs.size());
|
||||
|
||||
LogPrint(BCLog::ESTIMATEFEE, "Blockpolicy estimates updated by %u of %u block txs, since last block %u of %u tracked, mempool map size %u, max target %u from %s\n",
|
||||
countedTxs, entries.size(), trackedTxs, trackedTxs + untrackedTxs, mapMemPoolTxs.size(),
|
||||
MaxUsableEstimate(), HistoricalBlockSpan() > BlockSpan() ? "historical" : "current");
|
||||
|
||||
trackedTxs = 0;
|
||||
untrackedTxs = 0;
|
||||
@ -523,13 +623,44 @@ void CBlockPolicyEstimator::processBlock(unsigned int nBlockHeight,
|
||||
|
||||
CFeeRate CBlockPolicyEstimator::estimateFee(int confTarget) const
|
||||
{
|
||||
LOCK(cs_feeEstimator);
|
||||
// Return failure if trying to analyze a target we're not tracking
|
||||
// It's not possible to get reasonable estimates for confTarget of 1
|
||||
if (confTarget <= 1 || (unsigned int)confTarget > feeStats->GetMaxConfirms())
|
||||
if (confTarget <= 1)
|
||||
return CFeeRate(0);
|
||||
|
||||
double median = feeStats->EstimateMedianVal(confTarget, SUFFICIENT_FEETXS, MIN_SUCCESS_PCT, true, nBestSeenHeight);
|
||||
return estimateRawFee(confTarget, DOUBLE_SUCCESS_PCT, FeeEstimateHorizon::MED_HALFLIFE);
|
||||
}
|
||||
|
||||
CFeeRate CBlockPolicyEstimator::estimateRawFee(int confTarget, double successThreshold, FeeEstimateHorizon horizon, EstimationResult* result) const
|
||||
{
|
||||
TxConfirmStats* stats;
|
||||
double sufficientTxs = SUFFICIENT_FEETXS;
|
||||
switch (horizon) {
|
||||
case FeeEstimateHorizon::SHORT_HALFLIFE: {
|
||||
stats = shortStats;
|
||||
sufficientTxs = SUFFICIENT_TXS_SHORT;
|
||||
break;
|
||||
}
|
||||
case FeeEstimateHorizon::MED_HALFLIFE: {
|
||||
stats = feeStats;
|
||||
break;
|
||||
}
|
||||
case FeeEstimateHorizon::LONG_HALFLIFE: {
|
||||
stats = longStats;
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
return CFeeRate(0);
|
||||
}
|
||||
}
|
||||
|
||||
LOCK(cs_feeEstimator);
|
||||
// Return failure if trying to analyze a target we're not tracking
|
||||
if (confTarget <= 0 || (unsigned int)confTarget > stats->GetMaxConfirms())
|
||||
return CFeeRate(0);
|
||||
if (successThreshold > 1)
|
||||
return CFeeRate(0);
|
||||
|
||||
double median = stats->EstimateMedianVal(confTarget, sufficientTxs, successThreshold, true, nBestSeenHeight, result);
|
||||
|
||||
if (median < 0)
|
||||
return CFeeRate(0);
|
||||
@ -537,31 +668,148 @@ CFeeRate CBlockPolicyEstimator::estimateFee(int confTarget) const
|
||||
return CFeeRate(median);
|
||||
}
|
||||
|
||||
CFeeRate CBlockPolicyEstimator::estimateSmartFee(int confTarget, int *answerFoundAtTarget, const CTxMemPool& pool) const
|
||||
unsigned int CBlockPolicyEstimator::BlockSpan() const
|
||||
{
|
||||
if (firstRecordedHeight == 0) return 0;
|
||||
assert(nBestSeenHeight >= firstRecordedHeight);
|
||||
|
||||
return nBestSeenHeight - firstRecordedHeight;
|
||||
}
|
||||
|
||||
unsigned int CBlockPolicyEstimator::HistoricalBlockSpan() const
|
||||
{
|
||||
if (historicalFirst == 0) return 0;
|
||||
assert(historicalBest >= historicalFirst);
|
||||
|
||||
if (nBestSeenHeight - historicalBest > OLDEST_ESTIMATE_HISTORY) return 0;
|
||||
|
||||
return historicalBest - historicalFirst;
|
||||
}
|
||||
|
||||
unsigned int CBlockPolicyEstimator::MaxUsableEstimate() const
|
||||
{
|
||||
// Block spans are divided by 2 to make sure there are enough potential failing data points for the estimate
|
||||
return std::min(longStats->GetMaxConfirms(), std::max(BlockSpan(), HistoricalBlockSpan()) / 2);
|
||||
}
|
||||
|
||||
/** Return a fee estimate at the required successThreshold from the shortest
|
||||
* time horizon which tracks confirmations up to the desired target. If
|
||||
* checkShorterHorizon is requested, also allow short time horizon estimates
|
||||
* for a lower target to reduce the given answer */
|
||||
double CBlockPolicyEstimator::estimateCombinedFee(unsigned int confTarget, double successThreshold, bool checkShorterHorizon) const
|
||||
{
|
||||
double estimate = -1;
|
||||
if (confTarget >= 1 && confTarget <= longStats->GetMaxConfirms()) {
|
||||
// Find estimate from shortest time horizon possible
|
||||
if (confTarget <= shortStats->GetMaxConfirms()) { // short horizon
|
||||
estimate = shortStats->EstimateMedianVal(confTarget, SUFFICIENT_TXS_SHORT, successThreshold, true, nBestSeenHeight);
|
||||
}
|
||||
else if (confTarget <= feeStats->GetMaxConfirms()) { // medium horizon
|
||||
estimate = feeStats->EstimateMedianVal(confTarget, SUFFICIENT_FEETXS, successThreshold, true, nBestSeenHeight);
|
||||
}
|
||||
else { // long horizon
|
||||
estimate = longStats->EstimateMedianVal(confTarget, SUFFICIENT_FEETXS, successThreshold, true, nBestSeenHeight);
|
||||
}
|
||||
if (checkShorterHorizon) {
|
||||
// If a lower confTarget from a more recent horizon returns a lower answer use it.
|
||||
if (confTarget > feeStats->GetMaxConfirms()) {
|
||||
double medMax = feeStats->EstimateMedianVal(feeStats->GetMaxConfirms(), SUFFICIENT_FEETXS, successThreshold, true, nBestSeenHeight);
|
||||
if (medMax > 0 && (estimate == -1 || medMax < estimate))
|
||||
estimate = medMax;
|
||||
}
|
||||
if (confTarget > shortStats->GetMaxConfirms()) {
|
||||
double shortMax = shortStats->EstimateMedianVal(shortStats->GetMaxConfirms(), SUFFICIENT_TXS_SHORT, successThreshold, true, nBestSeenHeight);
|
||||
if (shortMax > 0 && (estimate == -1 || shortMax < estimate))
|
||||
estimate = shortMax;
|
||||
}
|
||||
}
|
||||
}
|
||||
return estimate;
|
||||
}
|
||||
|
||||
/** Ensure that for a conservative estimate, the DOUBLE_SUCCESS_PCT is also met
|
||||
* at 2 * target for any longer time horizons.
|
||||
*/
|
||||
double CBlockPolicyEstimator::estimateConservativeFee(unsigned int doubleTarget) const
|
||||
{
|
||||
double estimate = -1;
|
||||
if (doubleTarget <= shortStats->GetMaxConfirms()) {
|
||||
estimate = feeStats->EstimateMedianVal(doubleTarget, SUFFICIENT_FEETXS, DOUBLE_SUCCESS_PCT, true, nBestSeenHeight);
|
||||
}
|
||||
if (doubleTarget <= feeStats->GetMaxConfirms()) {
|
||||
double longEstimate = longStats->EstimateMedianVal(doubleTarget, SUFFICIENT_FEETXS, DOUBLE_SUCCESS_PCT, true, nBestSeenHeight);
|
||||
if (longEstimate > estimate) {
|
||||
estimate = longEstimate;
|
||||
}
|
||||
}
|
||||
return estimate;
|
||||
}
|
||||
|
||||
/** estimateSmartFee returns the max of the feerates calculated with a 60%
|
||||
* threshold required at target / 2, an 85% threshold required at target and a
|
||||
* 95% threshold required at 2 * target. Each calculation is performed at the
|
||||
* shortest time horizon which tracks the required target. Conservative
|
||||
* estimates, however, required the 95% threshold at 2 * target be met for any
|
||||
* longer time horizons also.
|
||||
*/
|
||||
CFeeRate CBlockPolicyEstimator::estimateSmartFee(int confTarget, int *answerFoundAtTarget, const CTxMemPool& pool, bool conservative) const
|
||||
{
|
||||
if (answerFoundAtTarget)
|
||||
*answerFoundAtTarget = confTarget;
|
||||
|
||||
double median = -1;
|
||||
|
||||
{
|
||||
LOCK(cs_feeEstimator);
|
||||
|
||||
// Return failure if trying to analyze a target we're not tracking
|
||||
if (confTarget <= 0 || (unsigned int)confTarget > feeStats->GetMaxConfirms())
|
||||
if (confTarget <= 0 || (unsigned int)confTarget > longStats->GetMaxConfirms())
|
||||
return CFeeRate(0);
|
||||
|
||||
// It's not possible to get reasonable estimates for confTarget of 1
|
||||
if (confTarget == 1)
|
||||
confTarget = 2;
|
||||
|
||||
while (median < 0 && (unsigned int)confTarget <= feeStats->GetMaxConfirms()) {
|
||||
median = feeStats->EstimateMedianVal(confTarget++, SUFFICIENT_FEETXS, MIN_SUCCESS_PCT, true, nBestSeenHeight);
|
||||
unsigned int maxUsableEstimate = MaxUsableEstimate();
|
||||
if (maxUsableEstimate <= 1)
|
||||
return CFeeRate(0);
|
||||
|
||||
if ((unsigned int)confTarget > maxUsableEstimate) {
|
||||
confTarget = maxUsableEstimate;
|
||||
}
|
||||
|
||||
assert(confTarget > 0); //estimateCombinedFee and estimateConservativeFee take unsigned ints
|
||||
|
||||
/** true is passed to estimateCombined fee for target/2 and target so
|
||||
* that we check the max confirms for shorter time horizons as well.
|
||||
* This is necessary to preserve monotonically increasing estimates.
|
||||
* For non-conservative estimates we do the same thing for 2*target, but
|
||||
* for conservative estimates we want to skip these shorter horizons
|
||||
* checks for 2*target becuase we are taking the max over all time
|
||||
* horizons so we already have monotonically increasing estimates and
|
||||
* the purpose of conservative estimates is not to let short term
|
||||
* fluctuations lower our estimates by too much.
|
||||
*/
|
||||
double halfEst = estimateCombinedFee(confTarget/2, HALF_SUCCESS_PCT, true);
|
||||
double actualEst = estimateCombinedFee(confTarget, SUCCESS_PCT, true);
|
||||
double doubleEst = estimateCombinedFee(2 * confTarget, DOUBLE_SUCCESS_PCT, !conservative);
|
||||
median = halfEst;
|
||||
if (actualEst > median) {
|
||||
median = actualEst;
|
||||
}
|
||||
if (doubleEst > median) {
|
||||
median = doubleEst;
|
||||
}
|
||||
|
||||
if (conservative || median == -1) {
|
||||
double consEst = estimateConservativeFee(2 * confTarget);
|
||||
if (consEst > median) {
|
||||
median = consEst;
|
||||
}
|
||||
}
|
||||
} // Must unlock cs_feeEstimator before taking mempool locks
|
||||
|
||||
if (answerFoundAtTarget)
|
||||
*answerFoundAtTarget = confTarget - 1;
|
||||
*answerFoundAtTarget = confTarget;
|
||||
|
||||
// If mempool is limiting txs , return at least the min feerate from the mempool
|
||||
CAmount minPoolFee = pool.GetMinFee(gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000).GetFeePerK();
|
||||
@ -574,6 +822,7 @@ CFeeRate CBlockPolicyEstimator::estimateSmartFee(int confTarget, int *answerFoun
|
||||
return CFeeRate(median);
|
||||
}
|
||||
|
||||
|
||||
bool CBlockPolicyEstimator::Write(CAutoFile& fileout) const
|
||||
{
|
||||
try {
|
||||
@ -581,7 +830,16 @@ bool CBlockPolicyEstimator::Write(CAutoFile& fileout) const
|
||||
fileout << 140100; // version required to read: 0.14.1 or later
|
||||
fileout << CLIENT_VERSION; // version that wrote the file
|
||||
fileout << nBestSeenHeight;
|
||||
if (BlockSpan() > HistoricalBlockSpan()/2) {
|
||||
fileout << firstRecordedHeight << nBestSeenHeight;
|
||||
}
|
||||
else {
|
||||
fileout << historicalFirst << historicalBest;
|
||||
}
|
||||
fileout << buckets;
|
||||
feeStats->Write(fileout);
|
||||
shortStats->Write(fileout);
|
||||
longStats->Write(fileout);
|
||||
}
|
||||
catch (const std::exception&) {
|
||||
LogPrintf("CBlockPolicyEstimator::Write(): unable to write policy estimator data (non-fatal)\n");
|
||||
@ -594,19 +852,95 @@ bool CBlockPolicyEstimator::Read(CAutoFile& filein)
|
||||
{
|
||||
try {
|
||||
LOCK(cs_feeEstimator);
|
||||
int nVersionRequired, nVersionThatWrote, nFileBestSeenHeight;
|
||||
int nVersionRequired, nVersionThatWrote;
|
||||
unsigned int nFileBestSeenHeight, nFileHistoricalFirst, nFileHistoricalBest;
|
||||
filein >> nVersionRequired >> nVersionThatWrote;
|
||||
if (nVersionRequired > CLIENT_VERSION)
|
||||
return error("CBlockPolicyEstimator::Read(): up-version (%d) fee estimate file", nVersionRequired);
|
||||
|
||||
// Read fee estimates file into temporary variables so existing data
|
||||
// structures aren't corrupted if there is an exception.
|
||||
filein >> nFileBestSeenHeight;
|
||||
feeStats->Read(filein);
|
||||
nBestSeenHeight = nFileBestSeenHeight;
|
||||
// if nVersionThatWrote < 120300 then another TxConfirmStats (for priority) follows but can be ignored.
|
||||
|
||||
if (nVersionThatWrote < 149900) {
|
||||
// Read the old fee estimates file for temporary use, but then discard. Will start collecting data from scratch.
|
||||
// decay is stored before buckets in old versions, so pre-read decay and pass into TxConfirmStats constructor
|
||||
double tempDecay;
|
||||
filein >> tempDecay;
|
||||
if (tempDecay <= 0 || tempDecay >= 1)
|
||||
throw std::runtime_error("Corrupt estimates file. Decay must be between 0 and 1 (non-inclusive)");
|
||||
|
||||
std::vector<double> tempBuckets;
|
||||
filein >> tempBuckets;
|
||||
size_t tempNum = tempBuckets.size();
|
||||
if (tempNum <= 1 || tempNum > 1000)
|
||||
throw std::runtime_error("Corrupt estimates file. Must have between 2 and 1000 feerate buckets");
|
||||
|
||||
std::map<double, unsigned int> tempMap;
|
||||
|
||||
std::unique_ptr<TxConfirmStats> tempFeeStats(new TxConfirmStats(tempBuckets, tempMap, MED_BLOCK_PERIODS, tempDecay, 1));
|
||||
tempFeeStats->Read(filein, nVersionThatWrote, tempNum);
|
||||
// if nVersionThatWrote < 120300 then another TxConfirmStats (for priority) follows but can be ignored.
|
||||
|
||||
tempMap.clear();
|
||||
for (unsigned int i = 0; i < tempBuckets.size(); i++) {
|
||||
tempMap[tempBuckets[i]] = i;
|
||||
}
|
||||
}
|
||||
else { // nVersionThatWrote >= 149900
|
||||
filein >> nFileHistoricalFirst >> nFileHistoricalBest;
|
||||
if (nFileHistoricalFirst > nFileHistoricalBest || nFileHistoricalBest > nFileBestSeenHeight) {
|
||||
throw std::runtime_error("Corrupt estimates file. Historical block range for estimates is invalid");
|
||||
}
|
||||
std::vector<double> fileBuckets;
|
||||
filein >> fileBuckets;
|
||||
size_t numBuckets = fileBuckets.size();
|
||||
if (numBuckets <= 1 || numBuckets > 1000)
|
||||
throw std::runtime_error("Corrupt estimates file. Must have between 2 and 1000 feerate buckets");
|
||||
|
||||
std::unique_ptr<TxConfirmStats> fileFeeStats(new TxConfirmStats(buckets, bucketMap, MED_BLOCK_PERIODS, MED_DECAY, MED_SCALE));
|
||||
std::unique_ptr<TxConfirmStats> fileShortStats(new TxConfirmStats(buckets, bucketMap, SHORT_BLOCK_PERIODS, SHORT_DECAY, SHORT_SCALE));
|
||||
std::unique_ptr<TxConfirmStats> fileLongStats(new TxConfirmStats(buckets, bucketMap, LONG_BLOCK_PERIODS, LONG_DECAY, LONG_SCALE));
|
||||
fileFeeStats->Read(filein, nVersionThatWrote, numBuckets);
|
||||
fileShortStats->Read(filein, nVersionThatWrote, numBuckets);
|
||||
fileLongStats->Read(filein, nVersionThatWrote, numBuckets);
|
||||
|
||||
// Fee estimates file parsed correctly
|
||||
// Copy buckets from file and refresh our bucketmap
|
||||
buckets = fileBuckets;
|
||||
bucketMap.clear();
|
||||
for (unsigned int i = 0; i < buckets.size(); i++) {
|
||||
bucketMap[buckets[i]] = i;
|
||||
}
|
||||
|
||||
// Destroy old TxConfirmStats and point to new ones that already reference buckets and bucketMap
|
||||
delete feeStats;
|
||||
delete shortStats;
|
||||
delete longStats;
|
||||
feeStats = fileFeeStats.release();
|
||||
shortStats = fileShortStats.release();
|
||||
longStats = fileLongStats.release();
|
||||
|
||||
nBestSeenHeight = nFileBestSeenHeight;
|
||||
historicalFirst = nFileHistoricalFirst;
|
||||
historicalBest = nFileHistoricalBest;
|
||||
}
|
||||
}
|
||||
catch (const std::exception&) {
|
||||
LogPrintf("CBlockPolicyEstimator::Read(): unable to read policy estimator data (non-fatal)\n");
|
||||
catch (const std::exception& e) {
|
||||
LogPrintf("CBlockPolicyEstimator::Read(): unable to read policy estimator data (non-fatal): %s\n",e.what());
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void CBlockPolicyEstimator::FlushUnconfirmed(CTxMemPool& pool) {
|
||||
int64_t startclear = GetTimeMicros();
|
||||
std::vector<uint256> txids;
|
||||
pool.queryHashes(txids);
|
||||
LOCK(cs_feeEstimator);
|
||||
for (auto& txid : txids) {
|
||||
removeTx(txid, false);
|
||||
}
|
||||
int64_t endclear = GetTimeMicros();
|
||||
LogPrint(BCLog::ESTIMATEFEE, "Recorded %u unconfirmed txs from mempool in %ld micros\n",txids.size(), endclear - startclear);
|
||||
}
|
||||
|
@ -42,53 +42,57 @@ class TxConfirmStats;
|
||||
* within your desired 5 blocks.
|
||||
*
|
||||
* Here is a brief description of the implementation:
|
||||
* When a transaction enters the mempool, we
|
||||
* track the height of the block chain at entry. Whenever a block comes in,
|
||||
* we count the number of transactions in each bucket and the total amount of feerate
|
||||
* paid in each bucket. Then we calculate how many blocks Y it took each
|
||||
* transaction to be mined and we track an array of counters in each bucket
|
||||
* for how long it to took transactions to get confirmed from 1 to a max of 25
|
||||
* and we increment all the counters from Y up to 25. This is because for any
|
||||
* number Z>=Y the transaction was successfully mined within Z blocks. We
|
||||
* want to save a history of this information, so at any time we have a
|
||||
* counter of the total number of transactions that happened in a given feerate
|
||||
* bucket and the total number that were confirmed in each number 1-25 blocks
|
||||
* or less for any bucket. We save this history by keeping an exponentially
|
||||
* decaying moving average of each one of these stats. Furthermore we also
|
||||
* keep track of the number unmined (in mempool) transactions in each bucket
|
||||
* and for how many blocks they have been outstanding and use that to increase
|
||||
* the number of transactions we've seen in that feerate bucket when calculating
|
||||
* an estimate for any number of confirmations below the number of blocks
|
||||
* they've been outstanding.
|
||||
* When a transaction enters the mempool, we track the height of the block chain
|
||||
* at entry. All further calculations are conducted only on this set of "seen"
|
||||
* transactions. Whenever a block comes in, we count the number of transactions
|
||||
* in each bucket and the total amount of feerate paid in each bucket. Then we
|
||||
* calculate how many blocks Y it took each transaction to be mined. We convert
|
||||
* from a number of blocks to a number of periods Y' each encompassing "scale"
|
||||
* blocks. This is is tracked in 3 different data sets each up to a maximum
|
||||
* number of periods. Within each data set we have an array of counters in each
|
||||
* feerate bucket and we increment all the counters from Y' up to max periods
|
||||
* representing that a tx was successfullly confirmed in less than or equal to
|
||||
* that many periods. We want to save a history of this information, so at any
|
||||
* time we have a counter of the total number of transactions that happened in a
|
||||
* given feerate bucket and the total number that were confirmed in each of the
|
||||
* periods or less for any bucket. We save this history by keeping an
|
||||
* exponentially decaying moving average of each one of these stats. This is
|
||||
* done for a different decay in each of the 3 data sets to keep relevant data
|
||||
* from different time horizons. Furthermore we also keep track of the number
|
||||
* unmined (in mempool or left mempool without being included in a block)
|
||||
* transactions in each bucket and for how many blocks they have been
|
||||
* outstanding and use both of these numbers to increase the number of transactions
|
||||
* we've seen in that feerate bucket when calculating an estimate for any number
|
||||
* of confirmations below the number of blocks they've been outstanding.
|
||||
*/
|
||||
|
||||
/** Track confirm delays up to 25 blocks, can't estimate beyond that */
|
||||
static const unsigned int MAX_BLOCK_CONFIRMS = 25;
|
||||
/* Identifier for each of the 3 different TxConfirmStats which will track
|
||||
* history over different time horizons. */
|
||||
enum FeeEstimateHorizon {
|
||||
SHORT_HALFLIFE = 0,
|
||||
MED_HALFLIFE = 1,
|
||||
LONG_HALFLIFE = 2
|
||||
};
|
||||
|
||||
/** Decay of .998 is a half-life of 346 blocks or about 14.4 hours */
|
||||
static const double DEFAULT_DECAY = .998;
|
||||
/* Used to return detailed information about a feerate bucket */
|
||||
struct EstimatorBucket
|
||||
{
|
||||
double start = -1;
|
||||
double end = -1;
|
||||
double withinTarget = 0;
|
||||
double totalConfirmed = 0;
|
||||
double inMempool = 0;
|
||||
double leftMempool = 0;
|
||||
};
|
||||
|
||||
/** Require greater than 95% of X feerate transactions to be confirmed within Y blocks for X to be big enough */
|
||||
static const double MIN_SUCCESS_PCT = .95;
|
||||
|
||||
/** Require an avg of 1 tx in the combined feerate bucket per block to have stat significance */
|
||||
static const double SUFFICIENT_FEETXS = 1;
|
||||
|
||||
// Minimum and Maximum values for tracking feerates
|
||||
// The MIN_BUCKET_FEERATE should just be set to the lowest reasonable feerate we
|
||||
// might ever want to track. Historically this has been 1000 since it was
|
||||
// inheriting DEFAULT_MIN_RELAY_TX_FEE and changing it is disruptive as it
|
||||
// invalidates old estimates files. So leave it at 1000 unless it becomes
|
||||
// necessary to lower it, and then lower it substantially.
|
||||
static constexpr double MIN_BUCKET_FEERATE = 1000;
|
||||
static const double MAX_BUCKET_FEERATE = 1e7;
|
||||
static const double INF_FEERATE = MAX_MONEY;
|
||||
|
||||
// We have to lump transactions into buckets based on feerate, but we want to be able
|
||||
// to give accurate estimates over a large range of potential feerates
|
||||
// Therefore it makes sense to exponentially space the buckets
|
||||
/** Spacing of FeeRate buckets */
|
||||
static const double FEE_SPACING = 1.1;
|
||||
/* Used to return detailed information about a fee estimate calculation */
|
||||
struct EstimationResult
|
||||
{
|
||||
EstimatorBucket pass;
|
||||
EstimatorBucket fail;
|
||||
double decay;
|
||||
unsigned int scale;
|
||||
};
|
||||
|
||||
/**
|
||||
* We want to be able to estimate feerates that are needed on tx's to be included in
|
||||
@ -97,6 +101,55 @@ static const double FEE_SPACING = 1.1;
|
||||
*/
|
||||
class CBlockPolicyEstimator
|
||||
{
|
||||
private:
|
||||
/** Track confirm delays up to 12 blocks for short horizon */
|
||||
static constexpr unsigned int SHORT_BLOCK_PERIODS = 12;
|
||||
static constexpr unsigned int SHORT_SCALE = 1;
|
||||
/** Track confirm delays up to 48 blocks for medium horizon */
|
||||
static constexpr unsigned int MED_BLOCK_PERIODS = 24;
|
||||
static constexpr unsigned int MED_SCALE = 2;
|
||||
/** Track confirm delays up to 1008 blocks for long horizon */
|
||||
static constexpr unsigned int LONG_BLOCK_PERIODS = 42;
|
||||
static constexpr unsigned int LONG_SCALE = 24;
|
||||
/** Historical estimates that are older than this aren't valid */
|
||||
static const unsigned int OLDEST_ESTIMATE_HISTORY = 6 * 1008;
|
||||
|
||||
/** Decay of .962 is a half-life of 18 blocks or about 3 hours */
|
||||
static constexpr double SHORT_DECAY = .962;
|
||||
/** Decay of .998 is a half-life of 144 blocks or about 1 day */
|
||||
static constexpr double MED_DECAY = .9952;
|
||||
/** Decay of .9995 is a half-life of 1008 blocks or about 1 week */
|
||||
static constexpr double LONG_DECAY = .99931;
|
||||
|
||||
/** Require greater than 60% of X feerate transactions to be confirmed within Y/2 blocks*/
|
||||
static constexpr double HALF_SUCCESS_PCT = .6;
|
||||
/** Require greater than 85% of X feerate transactions to be confirmed within Y blocks*/
|
||||
static constexpr double SUCCESS_PCT = .85;
|
||||
/** Require greater than 95% of X feerate transactions to be confirmed within 2 * Y blocks*/
|
||||
static constexpr double DOUBLE_SUCCESS_PCT = .95;
|
||||
|
||||
/** Require an avg of 0.1 tx in the combined feerate bucket per block to have stat significance */
|
||||
static constexpr double SUFFICIENT_FEETXS = 0.1;
|
||||
/** Require an avg of 0.5 tx when using short decay since there are fewer blocks considered*/
|
||||
static constexpr double SUFFICIENT_TXS_SHORT = 0.5;
|
||||
|
||||
/** Minimum and Maximum values for tracking feerates
|
||||
* The MIN_BUCKET_FEERATE should just be set to the lowest reasonable feerate we
|
||||
* might ever want to track. Historically this has been 1000 since it was
|
||||
* inheriting DEFAULT_MIN_RELAY_TX_FEE and changing it is disruptive as it
|
||||
* invalidates old estimates files. So leave it at 1000 unless it becomes
|
||||
* necessary to lower it, and then lower it substantially.
|
||||
*/
|
||||
static constexpr double MIN_BUCKET_FEERATE = 1000;
|
||||
static constexpr double MAX_BUCKET_FEERATE = 1e7;
|
||||
|
||||
/** Spacing of FeeRate buckets
|
||||
* We have to lump transactions into buckets based on feerate, but we want to be able
|
||||
* to give accurate estimates over a large range of potential feerates
|
||||
* Therefore it makes sense to exponentially space the buckets
|
||||
*/
|
||||
static constexpr double FEE_SPACING = 1.05;
|
||||
|
||||
public:
|
||||
/** Create new BlockPolicyEstimator and initialize stats tracking classes with default values */
|
||||
CBlockPolicyEstimator();
|
||||
@ -110,16 +163,23 @@ public:
|
||||
void processTransaction(const CTxMemPoolEntry& entry, bool validFeeEstimate);
|
||||
|
||||
/** Remove a transaction from the mempool tracking stats*/
|
||||
bool removeTx(uint256 hash);
|
||||
bool removeTx(uint256 hash, bool inBlock);
|
||||
|
||||
/** Return a feerate estimate */
|
||||
/** DEPRECATED. Return a feerate estimate */
|
||||
CFeeRate estimateFee(int confTarget) const;
|
||||
|
||||
/** Estimate feerate needed to get be included in a block within
|
||||
* confTarget blocks. If no answer can be given at confTarget, return an
|
||||
* estimate at the lowest target where one can be given.
|
||||
/** Estimate feerate needed to get be included in a block within confTarget
|
||||
* blocks. If no answer can be given at confTarget, return an estimate at
|
||||
* the closest target where one can be given. 'conservative' estimates are
|
||||
* valid over longer time horizons also.
|
||||
*/
|
||||
CFeeRate estimateSmartFee(int confTarget, int *answerFoundAtTarget, const CTxMemPool& pool) const;
|
||||
CFeeRate estimateSmartFee(int confTarget, int *answerFoundAtTarget, const CTxMemPool& pool, bool conservative = true) const;
|
||||
|
||||
/** Return a specific fee estimate calculation with a given success
|
||||
* threshold and time horizon, and optionally return detailed data about
|
||||
* calculation
|
||||
*/
|
||||
CFeeRate estimateRawFee(int confTarget, double successThreshold, FeeEstimateHorizon horizon, EstimationResult *result = nullptr) const;
|
||||
|
||||
/** Write estimation data to a file */
|
||||
bool Write(CAutoFile& fileout) const;
|
||||
@ -127,9 +187,15 @@ public:
|
||||
/** Read estimation data from a file */
|
||||
bool Read(CAutoFile& filein);
|
||||
|
||||
/** Empty mempool transactions on shutdown to record failure to confirm for txs still in mempool */
|
||||
void FlushUnconfirmed(CTxMemPool& pool);
|
||||
|
||||
private:
|
||||
CFeeRate minTrackedFee; //!< Passed to constructor to avoid dependency on main
|
||||
unsigned int nBestSeenHeight;
|
||||
unsigned int firstRecordedHeight;
|
||||
unsigned int historicalFirst;
|
||||
unsigned int historicalBest;
|
||||
|
||||
struct TxStatsInfo
|
||||
{
|
||||
unsigned int blockHeight;
|
||||
@ -142,14 +208,30 @@ private:
|
||||
|
||||
/** Classes to track historical data on transaction confirmations */
|
||||
TxConfirmStats* feeStats;
|
||||
TxConfirmStats* shortStats;
|
||||
TxConfirmStats* longStats;
|
||||
|
||||
unsigned int trackedTxs;
|
||||
unsigned int untrackedTxs;
|
||||
|
||||
std::vector<double> buckets; // The upper-bound of the range for the bucket (inclusive)
|
||||
std::map<double, unsigned int> bucketMap; // Map of bucket upper-bound to index into all vectors by bucket
|
||||
|
||||
mutable CCriticalSection cs_feeEstimator;
|
||||
|
||||
/** Process a transaction confirmed in a block*/
|
||||
bool processBlockTx(unsigned int nBlockHeight, const CTxMemPoolEntry* entry);
|
||||
|
||||
/** Helper for estimateSmartFee */
|
||||
double estimateCombinedFee(unsigned int confTarget, double successThreshold, bool checkShorterHorizon) const;
|
||||
/** Helper for estimateSmartFee */
|
||||
double estimateConservativeFee(unsigned int doubleTarget) const;
|
||||
/** Number of blocks of data recorded while fee estimates have been running */
|
||||
unsigned int BlockSpan() const;
|
||||
/** Number of blocks of recorded fee estimate data represented in saved data file */
|
||||
unsigned int HistoricalBlockSpan() const;
|
||||
/** Calculation of highest target that reasonable estimate can be provided for */
|
||||
unsigned int MaxUsableEstimate() const;
|
||||
};
|
||||
|
||||
#endif /*BITCOIN_POLICYESTIMATOR_H */
|
||||
|
@ -137,6 +137,10 @@ static const CRPCConvertParam vRPCConvertParams[] =
|
||||
{ "getrawmempool", 0, "verbose" },
|
||||
{ "estimatefee", 0, "nblocks" },
|
||||
{ "estimatesmartfee", 0, "nblocks" },
|
||||
{ "estimatesmartfee", 1, "conservative" },
|
||||
{ "estimaterawfee", 0, "nblocks" },
|
||||
{ "estimaterawfee", 1, "threshold" },
|
||||
{ "estimaterawfee", 2, "horizon" },
|
||||
{ "prioritisetransaction", 1, "fee_delta" },
|
||||
{ "setban", 2, "bantime" },
|
||||
{ "setban", 3, "absolute" },
|
||||
|
@ -825,6 +825,7 @@ UniValue estimatefee(const JSONRPCRequest& request)
|
||||
if (request.fHelp || request.params.size() != 1)
|
||||
throw std::runtime_error(
|
||||
"estimatefee nblocks\n"
|
||||
"\nDEPRECATED. Please use estimatesmartfee for more intelligent estimates."
|
||||
"\nEstimates the approximate fee per kilobyte needed for a transaction to begin\n"
|
||||
"confirmation within nblocks blocks.\n"
|
||||
"\nArguments:\n"
|
||||
@ -855,15 +856,18 @@ UniValue estimatefee(const JSONRPCRequest& request)
|
||||
|
||||
UniValue estimatesmartfee(const JSONRPCRequest& request)
|
||||
{
|
||||
if (request.fHelp || request.params.size() != 1)
|
||||
if (request.fHelp || request.params.size() < 1 || request.params.size() > 2)
|
||||
throw std::runtime_error(
|
||||
"estimatesmartfee nblocks\n"
|
||||
"\nWARNING: This interface is unstable and may disappear or change!\n"
|
||||
"estimatesmartfee nblocks (conservative)\n"
|
||||
"\nEstimates the approximate fee per kilobyte needed for a transaction to begin\n"
|
||||
"confirmation within nblocks blocks if possible and return the number of blocks\n"
|
||||
"for which the estimate is valid.\n"
|
||||
"\nArguments:\n"
|
||||
"1. nblocks (numeric)\n"
|
||||
"1. nblocks (numeric)\n"
|
||||
"2. conservative (bool, optional, default=true) Whether to return a more conservative estimate which\n"
|
||||
" also satisfies a longer history. A conservative estimate potentially returns a higher\n"
|
||||
" feerate and is more likely to be sufficient for the desired target, but is not as\n"
|
||||
" responsive to short term drops in the prevailing fee market\n"
|
||||
"\nResult:\n"
|
||||
"{\n"
|
||||
" \"feerate\" : x.x, (numeric) estimate fee-per-kilobyte (in " + CURRENCY_UNIT + ")\n"
|
||||
@ -880,15 +884,102 @@ UniValue estimatesmartfee(const JSONRPCRequest& request)
|
||||
RPCTypeCheck(request.params, {UniValue::VNUM});
|
||||
|
||||
int nBlocks = request.params[0].get_int();
|
||||
bool conservative = true;
|
||||
if (request.params.size() > 1 && !request.params[1].isNull()) {
|
||||
RPCTypeCheckArgument(request.params[1], UniValue::VBOOL);
|
||||
conservative = request.params[1].get_bool();
|
||||
}
|
||||
|
||||
UniValue result(UniValue::VOBJ);
|
||||
int answerFound;
|
||||
CFeeRate feeRate = ::feeEstimator.estimateSmartFee(nBlocks, &answerFound, ::mempool);
|
||||
CFeeRate feeRate = ::feeEstimator.estimateSmartFee(nBlocks, &answerFound, ::mempool, conservative);
|
||||
result.push_back(Pair("feerate", feeRate == CFeeRate(0) ? -1.0 : ValueFromAmount(feeRate.GetFeePerK())));
|
||||
result.push_back(Pair("blocks", answerFound));
|
||||
return result;
|
||||
}
|
||||
|
||||
UniValue estimaterawfee(const JSONRPCRequest& request)
|
||||
{
|
||||
if (request.fHelp || request.params.size() < 1|| request.params.size() > 3)
|
||||
throw std::runtime_error(
|
||||
"estimaterawfee nblocks (threshold horizon)\n"
|
||||
"\nWARNING: This interface is unstable and may disappear or change!\n"
|
||||
"\nWARNING: This is an advanced API call that is tightly coupled to the specific\n"
|
||||
" implementation of fee estimation. The parameters it can be called with\n"
|
||||
" and the results it returns will change if the internal implementation changes.\n"
|
||||
"\nEstimates the approximate fee per kilobyte needed for a transaction to begin\n"
|
||||
"confirmation within nblocks blocks if possible. Uses virtual transaction size as defined\n"
|
||||
"in BIP 141 (witness data is discounted).\n"
|
||||
"\nArguments:\n"
|
||||
"1. nblocks (numeric)\n"
|
||||
"2. threshold (numeric, optional) The proportion of transactions in a given feerate range that must have been\n"
|
||||
" confirmed within nblocks in order to consider those feerates as high enough and proceed to check\n"
|
||||
" lower buckets. Default: 0.95\n"
|
||||
"3. horizon (numeric, optional) How long a history of estimates to consider. 0=short, 1=medium, 2=long.\n"
|
||||
" Default: 1\n"
|
||||
"\nResult:\n"
|
||||
"{\n"
|
||||
" \"feerate\" : x.x, (numeric) estimate fee-per-kilobyte (in BTC)\n"
|
||||
" \"decay\" : x.x, (numeric) exponential decay (per block) for historical moving average of confirmation data\n"
|
||||
" \"scale\" : x, (numeric) The resolution of confirmation targets at this time horizon\n"
|
||||
" \"pass\" : { (json object) information about the lowest range of feerates to succeed in meeting the threshold\n"
|
||||
" \"startrange\" : x.x, (numeric) start of feerate range\n"
|
||||
" \"endrange\" : x.x, (numeric) end of feerate range\n"
|
||||
" \"withintarget\" : x.x, (numeric) number of txs over history horizon in the feerate range that were confirmed within target\n"
|
||||
" \"totalconfirmed\" : x.x, (numeric) number of txs over history horizon in the feerate range that were confirmed at any point\n"
|
||||
" \"inmempool\" : x.x, (numeric) current number of txs in mempool in the feerate range unconfirmed for at least target blocks\n"
|
||||
" \"leftmempool\" : x.x, (numeric) number of txs over history horizon in the feerate range that left mempool unconfirmed after target\n"
|
||||
" }\n"
|
||||
" \"fail\" : { ... } (json object) information about the highest range of feerates to fail to meet the threshold\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"A negative feerate is returned if no answer can be given.\n"
|
||||
"\nExample:\n"
|
||||
+ HelpExampleCli("estimaterawfee", "6 0.9 1")
|
||||
);
|
||||
|
||||
RPCTypeCheck(request.params, boost::assign::list_of(UniValue::VNUM)(UniValue::VNUM)(UniValue::VNUM), true);
|
||||
RPCTypeCheckArgument(request.params[0], UniValue::VNUM);
|
||||
int nBlocks = request.params[0].get_int();
|
||||
double threshold = 0.95;
|
||||
if (!request.params[1].isNull())
|
||||
threshold = request.params[1].get_real();
|
||||
FeeEstimateHorizon horizon = FeeEstimateHorizon::MED_HALFLIFE;
|
||||
if (!request.params[2].isNull()) {
|
||||
int horizonInt = request.params[2].get_int();
|
||||
if (horizonInt < 0 || horizonInt > 2) {
|
||||
throw JSONRPCError(RPC_TYPE_ERROR, "Invalid horizon for fee estimates");
|
||||
} else {
|
||||
horizon = (FeeEstimateHorizon)horizonInt;
|
||||
}
|
||||
}
|
||||
UniValue result(UniValue::VOBJ);
|
||||
CFeeRate feeRate;
|
||||
EstimationResult buckets;
|
||||
feeRate = ::feeEstimator.estimateRawFee(nBlocks, threshold, horizon, &buckets);
|
||||
|
||||
result.push_back(Pair("feerate", feeRate == CFeeRate(0) ? -1.0 : ValueFromAmount(feeRate.GetFeePerK())));
|
||||
result.push_back(Pair("decay", buckets.decay));
|
||||
result.push_back(Pair("scale", (int)buckets.scale));
|
||||
UniValue passbucket(UniValue::VOBJ);
|
||||
passbucket.push_back(Pair("startrange", round(buckets.pass.start)));
|
||||
passbucket.push_back(Pair("endrange", round(buckets.pass.end)));
|
||||
passbucket.push_back(Pair("withintarget", round(buckets.pass.withinTarget * 100.0) / 100.0));
|
||||
passbucket.push_back(Pair("totalconfirmed", round(buckets.pass.totalConfirmed * 100.0) / 100.0));
|
||||
passbucket.push_back(Pair("inmempool", round(buckets.pass.inMempool * 100.0) / 100.0));
|
||||
passbucket.push_back(Pair("leftmempool", round(buckets.pass.leftMempool * 100.0) / 100.0));
|
||||
result.push_back(Pair("pass", passbucket));
|
||||
UniValue failbucket(UniValue::VOBJ);
|
||||
failbucket.push_back(Pair("startrange", round(buckets.fail.start)));
|
||||
failbucket.push_back(Pair("endrange", round(buckets.fail.end)));
|
||||
failbucket.push_back(Pair("withintarget", round(buckets.fail.withinTarget * 100.0) / 100.0));
|
||||
failbucket.push_back(Pair("totalconfirmed", round(buckets.fail.totalConfirmed * 100.0) / 100.0));
|
||||
failbucket.push_back(Pair("inmempool", round(buckets.fail.inMempool * 100.0) / 100.0));
|
||||
failbucket.push_back(Pair("leftmempool", round(buckets.fail.leftMempool * 100.0) / 100.0));
|
||||
result.push_back(Pair("fail", failbucket));
|
||||
return result;
|
||||
}
|
||||
|
||||
static const CRPCCommand commands[] =
|
||||
{ // category name actor (function) okSafeMode
|
||||
// --------------------- ------------------------ ----------------------- ----------
|
||||
@ -903,7 +994,9 @@ static const CRPCCommand commands[] =
|
||||
{ "generating", "generatetoaddress", &generatetoaddress, true, {"nblocks","address","maxtries"} },
|
||||
#endif // ENABLE_MINER
|
||||
{ "util", "estimatefee", &estimatefee, true, {"nblocks"} },
|
||||
{ "util", "estimatesmartfee", &estimatesmartfee, true, {"nblocks"} },
|
||||
{ "util", "estimatesmartfee", &estimatesmartfee, true, {"nblocks", "conservative"} },
|
||||
|
||||
{ "hidden", "estimaterawfee", &estimaterawfee, true, {"nblocks", "threshold", "horizon"} },
|
||||
};
|
||||
|
||||
void RegisterMiningRPCCommands(CRPCTable &t)
|
||||
|
@ -49,8 +49,8 @@ BOOST_AUTO_TEST_CASE(BlockPolicyEstimates)
|
||||
int blocknum = 0;
|
||||
|
||||
// Loop through 200 blocks
|
||||
// At a decay .998 and 4 fee transactions per block
|
||||
// This makes the tx count about 1.33 per bucket, above the 1 threshold
|
||||
// At a decay .9952 and 4 fee transactions per block
|
||||
// This makes the tx count about 2.5 per bucket, well above the 0.1 threshold
|
||||
while (blocknum < 200) {
|
||||
for (int j = 0; j < 10; j++) { // For each fee
|
||||
for (int k = 0; k < 4; k++) { // add 4 fee txs
|
||||
@ -74,20 +74,14 @@ BOOST_AUTO_TEST_CASE(BlockPolicyEstimates)
|
||||
}
|
||||
mpool.removeForBlock(block, ++blocknum);
|
||||
block.clear();
|
||||
if (blocknum == 30) {
|
||||
// At this point we should need to combine 5 buckets to get enough data points
|
||||
// So estimateFee(1,2,3) should fail and estimateFee(4) should return somewhere around
|
||||
// 8*baserate. estimateFee(4) %'s are 100,100,100,100,90 = average 98%
|
||||
// Check after just a few txs that combining buckets works as expected
|
||||
if (blocknum == 3) {
|
||||
// At this point we should need to combine 3 buckets to get enough data points
|
||||
// So estimateFee(1) should fail and estimateFee(2) should return somewhere around
|
||||
// 9*baserate. estimateFee(2) %'s are 100,100,90 = average 97%
|
||||
BOOST_CHECK(feeEst.estimateFee(1) == CFeeRate(0));
|
||||
BOOST_CHECK(feeEst.estimateFee(2) == CFeeRate(0));
|
||||
BOOST_CHECK(feeEst.estimateFee(3) == CFeeRate(0));
|
||||
BOOST_CHECK(feeEst.estimateFee(4).GetFeePerK() < 8*baseRate.GetFeePerK() + deltaFee);
|
||||
BOOST_CHECK(feeEst.estimateFee(4).GetFeePerK() > 8*baseRate.GetFeePerK() - deltaFee);
|
||||
int answerFound;
|
||||
BOOST_CHECK(feeEst.estimateSmartFee(1, &answerFound, mpool) == feeEst.estimateFee(4) && answerFound == 4);
|
||||
BOOST_CHECK(feeEst.estimateSmartFee(3, &answerFound, mpool) == feeEst.estimateFee(4) && answerFound == 4);
|
||||
BOOST_CHECK(feeEst.estimateSmartFee(4, &answerFound, mpool) == feeEst.estimateFee(4) && answerFound == 4);
|
||||
BOOST_CHECK(feeEst.estimateSmartFee(8, &answerFound, mpool) == feeEst.estimateFee(8) && answerFound == 8);
|
||||
BOOST_CHECK(feeEst.estimateFee(2).GetFeePerK() < 9*baseRate.GetFeePerK() + deltaFee);
|
||||
BOOST_CHECK(feeEst.estimateFee(2).GetFeePerK() > 9*baseRate.GetFeePerK() - deltaFee);
|
||||
}
|
||||
}
|
||||
|
||||
@ -104,13 +98,14 @@ BOOST_AUTO_TEST_CASE(BlockPolicyEstimates)
|
||||
BOOST_CHECK(origFeeEst[i-1] <= origFeeEst[i-2]);
|
||||
}
|
||||
int mult = 11-i;
|
||||
if (i > 1) {
|
||||
if (i % 2 == 0) { //At scale 2, test logic is only correct for even targets
|
||||
BOOST_CHECK(origFeeEst[i-1] < mult*baseRate.GetFeePerK() + deltaFee);
|
||||
BOOST_CHECK(origFeeEst[i-1] > mult*baseRate.GetFeePerK() - deltaFee);
|
||||
}
|
||||
else {
|
||||
BOOST_CHECK(origFeeEst[i-1] == CFeeRate(0).GetFeePerK());
|
||||
}
|
||||
}
|
||||
// Fill out rest of the original estimates
|
||||
for (int i = 10; i <= 48; i++) {
|
||||
origFeeEst.push_back(feeEst.estimateFee(i).GetFeePerK());
|
||||
}
|
||||
|
||||
// Mine 50 more blocks with no transactions happening, estimates shouldn't change
|
||||
@ -139,10 +134,8 @@ BOOST_AUTO_TEST_CASE(BlockPolicyEstimates)
|
||||
mpool.removeForBlock(block, ++blocknum);
|
||||
}
|
||||
|
||||
int answerFound;
|
||||
for (int i = 1; i < 10;i++) {
|
||||
BOOST_CHECK(feeEst.estimateFee(i) == CFeeRate(0) || feeEst.estimateFee(i).GetFeePerK() > origFeeEst[i-1] - deltaFee);
|
||||
BOOST_CHECK(feeEst.estimateSmartFee(i, &answerFound, mpool).GetFeePerK() > origFeeEst[answerFound-1] - deltaFee);
|
||||
}
|
||||
|
||||
// Mine all those transactions
|
||||
@ -155,16 +148,16 @@ BOOST_AUTO_TEST_CASE(BlockPolicyEstimates)
|
||||
txHashes[j].pop_back();
|
||||
}
|
||||
}
|
||||
mpool.removeForBlock(block, 265);
|
||||
mpool.removeForBlock(block, 266);
|
||||
block.clear();
|
||||
BOOST_CHECK(feeEst.estimateFee(1) == CFeeRate(0));
|
||||
for (int i = 2; i < 10;i++) {
|
||||
BOOST_CHECK(feeEst.estimateFee(i).GetFeePerK() > origFeeEst[i-1] - deltaFee);
|
||||
BOOST_CHECK(feeEst.estimateFee(i) == CFeeRate(0) || feeEst.estimateFee(i).GetFeePerK() > origFeeEst[i-1] - deltaFee);
|
||||
}
|
||||
|
||||
// Mine 200 more blocks where everything is mined every block
|
||||
// Mine 400 more blocks where everything is mined every block
|
||||
// Estimates should be below original estimates
|
||||
while (blocknum < 465) {
|
||||
while (blocknum < 665) {
|
||||
for (int j = 0; j < 10; j++) { // For each fee multiple
|
||||
for (int k = 0; k < 4; k++) { // add 4 fee txs
|
||||
tx.vin[0].prevout.n = 10000*blocknum+100*j+k;
|
||||
@ -180,7 +173,7 @@ BOOST_AUTO_TEST_CASE(BlockPolicyEstimates)
|
||||
block.clear();
|
||||
}
|
||||
BOOST_CHECK(feeEst.estimateFee(1) == CFeeRate(0));
|
||||
for (int i = 2; i < 10; i++) {
|
||||
for (int i = 2; i < 9; i++) { // At 9, the original estimate was already at the bottom (b/c scale = 2)
|
||||
BOOST_CHECK(feeEst.estimateFee(i).GetFeePerK() < origFeeEst[i-1] - deltaFee);
|
||||
}
|
||||
|
||||
@ -190,7 +183,7 @@ BOOST_AUTO_TEST_CASE(BlockPolicyEstimates)
|
||||
mpool.TrimToSize(1);
|
||||
BOOST_CHECK(mpool.GetMinFee(1).GetFeePerK() > feeV[5]);
|
||||
for (int i = 1; i < 10; i++) {
|
||||
BOOST_CHECK(feeEst.estimateSmartFee(i, NULL, mpool).GetFeePerK() >= feeEst.estimateFee(i).GetFeePerK());
|
||||
BOOST_CHECK(feeEst.estimateSmartFee(i, NULL, mpool).GetFeePerK() >= feeEst.estimateRawFee(i, 0.85, FeeEstimateHorizon::MED_HALFLIFE).GetFeePerK());
|
||||
BOOST_CHECK(feeEst.estimateSmartFee(i, NULL, mpool).GetFeePerK() >= mpool.GetMinFee(1).GetFeePerK());
|
||||
}
|
||||
}
|
||||
|
@ -693,7 +693,7 @@ void CTxMemPool::removeUnchecked(txiter it, MemPoolRemovalReason reason)
|
||||
mapLinks.erase(it);
|
||||
mapTx.erase(it);
|
||||
nTransactionsUpdated++;
|
||||
if (minerPolicyEstimator) {minerPolicyEstimator->removeTx(hash);}
|
||||
if (minerPolicyEstimator) {minerPolicyEstimator->removeTx(hash, false);}
|
||||
removeAddressIndex(hash);
|
||||
removeSpentIndex(hash);
|
||||
}
|
||||
|
@ -116,8 +116,8 @@ def check_estimates(node, fees_seen, max_invalid, print_estimates = True):
|
||||
for i,e in enumerate(all_estimates): # estimate is for i+1
|
||||
if e >= 0:
|
||||
valid_estimate = True
|
||||
# estimatesmartfee should return the same result
|
||||
assert_equal(node.estimatesmartfee(i+1)["feerate"], e)
|
||||
if i >= 13: # for n>=14 estimatesmartfee(n/2) should be at least as high as estimatefee(n)
|
||||
assert(node.estimatesmartfee((i+1)//2)["feerate"] > float(e) - delta)
|
||||
|
||||
else:
|
||||
invalid_estimates += 1
|
||||
|
Loading…
Reference in New Issue
Block a user