dash/src/txmempool.cpp

1236 lines
48 KiB
C++
Raw Normal View History

// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2015 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "txmempool.h"
#include "clientversion.h"
#include "consensus/consensus.h"
#include "consensus/validation.h"
#include "validation.h"
#include "policy/fees.h"
#include "random.h"
#include "streams.h"
#include "timedata.h"
#include "util.h"
2014-04-23 00:46:19 +02:00
#include "utilmoneystr.h"
#include "utiltime.h"
#include "version.h"
using namespace std;
2014-04-23 00:46:19 +02:00
CTxMemPoolEntry::CTxMemPoolEntry(const CTransaction& _tx, const CAmount& _nFee,
int64_t _nTime, double _entryPriority, unsigned int _entryHeight,
bool poolHasNoInputsOf, CAmount _inChainInputValue,
bool _spendsCoinbase, unsigned int _sigOps, LockPoints lp):
tx(_tx), nFee(_nFee), nTime(_nTime), entryPriority(_entryPriority), entryHeight(_entryHeight),
hadNoDependencies(poolHasNoInputsOf), inChainInputValue(_inChainInputValue),
spendsCoinbase(_spendsCoinbase), sigOpCount(_sigOps), lockPoints(lp)
{
nTxSize = ::GetSerializeSize(tx, SER_NETWORK, PROTOCOL_VERSION);
nModSize = tx.CalculateModifiedSize(nTxSize);
nUsageSize = RecursiveDynamicUsage(tx);
nCountWithDescendants = 1;
nSizeWithDescendants = nTxSize;
nModFeesWithDescendants = nFee;
CAmount nValueIn = tx.GetValueOut()+nFee;
assert(inChainInputValue <= nValueIn);
feeDelta = 0;
nCountWithAncestors = 1;
nSizeWithAncestors = nTxSize;
nModFeesWithAncestors = nFee;
nSigOpCountWithAncestors = sigOpCount;
}
CTxMemPoolEntry::CTxMemPoolEntry(const CTxMemPoolEntry& other)
{
*this = other;
}
double
CTxMemPoolEntry::GetPriority(unsigned int currentHeight) const
{
double deltaPriority = ((double)(currentHeight-entryHeight)*inChainInputValue)/nModSize;
double dResult = entryPriority + deltaPriority;
if (dResult < 0) // This should only happen if it was called with a height below entry height
dResult = 0;
return dResult;
}
void CTxMemPoolEntry::UpdateFeeDelta(int64_t newFeeDelta)
estimatefee / estimatepriority RPC methods New RPC methods: return an estimate of the fee (or priority) a transaction needs to be likely to confirm in a given number of blocks. Mike Hearn created the first version of this method for estimating fees. It works as follows: For transactions that took 1 to N (I picked N=25) blocks to confirm, keep N buckets with at most 100 entries in each recording the fees-per-kilobyte paid by those transactions. (separate buckets are kept for transactions that confirmed because they are high-priority) The buckets are filled as blocks are found, and are saved/restored in a new fee_estiamtes.dat file in the data directory. A few variations on Mike's initial scheme: To estimate the fee needed for a transaction to confirm in X buckets, all of the samples in all of the buckets are used and a median of all of the data is used to make the estimate. For example, imagine 25 buckets each containing the full 100 entries. Those 2,500 samples are sorted, and the estimate of the fee needed to confirm in the very next block is the 50'th-highest-fee-entry in that sorted list; the estimate of the fee needed to confirm in the next two blocks is the 150'th-highest-fee-entry, etc. That algorithm has the nice property that estimates of how much fee you need to pay to get confirmed in block N will always be greater than or equal to the estimate for block N+1. It would clearly be wrong to say "pay 11 uBTC and you'll get confirmed in 3 blocks, but pay 12 uBTC and it will take LONGER". A single block will not contribute more than 10 entries to any one bucket, so a single miner and a large block cannot overwhelm the estimates.
2014-03-17 13:19:54 +01:00
{
nModFeesWithDescendants += newFeeDelta - feeDelta;
nModFeesWithAncestors += newFeeDelta - feeDelta;
feeDelta = newFeeDelta;
}
estimatefee / estimatepriority RPC methods New RPC methods: return an estimate of the fee (or priority) a transaction needs to be likely to confirm in a given number of blocks. Mike Hearn created the first version of this method for estimating fees. It works as follows: For transactions that took 1 to N (I picked N=25) blocks to confirm, keep N buckets with at most 100 entries in each recording the fees-per-kilobyte paid by those transactions. (separate buckets are kept for transactions that confirmed because they are high-priority) The buckets are filled as blocks are found, and are saved/restored in a new fee_estiamtes.dat file in the data directory. A few variations on Mike's initial scheme: To estimate the fee needed for a transaction to confirm in X buckets, all of the samples in all of the buckets are used and a median of all of the data is used to make the estimate. For example, imagine 25 buckets each containing the full 100 entries. Those 2,500 samples are sorted, and the estimate of the fee needed to confirm in the very next block is the 50'th-highest-fee-entry in that sorted list; the estimate of the fee needed to confirm in the next two blocks is the 150'th-highest-fee-entry, etc. That algorithm has the nice property that estimates of how much fee you need to pay to get confirmed in block N will always be greater than or equal to the estimate for block N+1. It would clearly be wrong to say "pay 11 uBTC and you'll get confirmed in 3 blocks, but pay 12 uBTC and it will take LONGER". A single block will not contribute more than 10 entries to any one bucket, so a single miner and a large block cannot overwhelm the estimates.
2014-03-17 13:19:54 +01:00
void CTxMemPoolEntry::UpdateLockPoints(const LockPoints& lp)
{
lockPoints = lp;
}
// Update the given tx for any in-mempool descendants.
// Assumes that setMemPoolChildren is correct for the given tx and all
// descendants.
void CTxMemPool::UpdateForDescendants(txiter updateIt, cacheMap &cachedDescendants, const std::set<uint256> &setExclude)
{
setEntries stageEntries, setAllDescendants;
stageEntries = GetMemPoolChildren(updateIt);
estimatefee / estimatepriority RPC methods New RPC methods: return an estimate of the fee (or priority) a transaction needs to be likely to confirm in a given number of blocks. Mike Hearn created the first version of this method for estimating fees. It works as follows: For transactions that took 1 to N (I picked N=25) blocks to confirm, keep N buckets with at most 100 entries in each recording the fees-per-kilobyte paid by those transactions. (separate buckets are kept for transactions that confirmed because they are high-priority) The buckets are filled as blocks are found, and are saved/restored in a new fee_estiamtes.dat file in the data directory. A few variations on Mike's initial scheme: To estimate the fee needed for a transaction to confirm in X buckets, all of the samples in all of the buckets are used and a median of all of the data is used to make the estimate. For example, imagine 25 buckets each containing the full 100 entries. Those 2,500 samples are sorted, and the estimate of the fee needed to confirm in the very next block is the 50'th-highest-fee-entry in that sorted list; the estimate of the fee needed to confirm in the next two blocks is the 150'th-highest-fee-entry, etc. That algorithm has the nice property that estimates of how much fee you need to pay to get confirmed in block N will always be greater than or equal to the estimate for block N+1. It would clearly be wrong to say "pay 11 uBTC and you'll get confirmed in 3 blocks, but pay 12 uBTC and it will take LONGER". A single block will not contribute more than 10 entries to any one bucket, so a single miner and a large block cannot overwhelm the estimates.
2014-03-17 13:19:54 +01:00
while (!stageEntries.empty()) {
const txiter cit = *stageEntries.begin();
setAllDescendants.insert(cit);
stageEntries.erase(cit);
const setEntries &setChildren = GetMemPoolChildren(cit);
BOOST_FOREACH(const txiter childEntry, setChildren) {
cacheMap::iterator cacheIt = cachedDescendants.find(childEntry);
if (cacheIt != cachedDescendants.end()) {
// We've already calculated this one, just add the entries for this set
// but don't traverse again.
BOOST_FOREACH(const txiter cacheEntry, cacheIt->second) {
setAllDescendants.insert(cacheEntry);
}
} else if (!setAllDescendants.count(childEntry)) {
// Schedule for later processing
stageEntries.insert(childEntry);
}
estimatefee / estimatepriority RPC methods New RPC methods: return an estimate of the fee (or priority) a transaction needs to be likely to confirm in a given number of blocks. Mike Hearn created the first version of this method for estimating fees. It works as follows: For transactions that took 1 to N (I picked N=25) blocks to confirm, keep N buckets with at most 100 entries in each recording the fees-per-kilobyte paid by those transactions. (separate buckets are kept for transactions that confirmed because they are high-priority) The buckets are filled as blocks are found, and are saved/restored in a new fee_estiamtes.dat file in the data directory. A few variations on Mike's initial scheme: To estimate the fee needed for a transaction to confirm in X buckets, all of the samples in all of the buckets are used and a median of all of the data is used to make the estimate. For example, imagine 25 buckets each containing the full 100 entries. Those 2,500 samples are sorted, and the estimate of the fee needed to confirm in the very next block is the 50'th-highest-fee-entry in that sorted list; the estimate of the fee needed to confirm in the next two blocks is the 150'th-highest-fee-entry, etc. That algorithm has the nice property that estimates of how much fee you need to pay to get confirmed in block N will always be greater than or equal to the estimate for block N+1. It would clearly be wrong to say "pay 11 uBTC and you'll get confirmed in 3 blocks, but pay 12 uBTC and it will take LONGER". A single block will not contribute more than 10 entries to any one bucket, so a single miner and a large block cannot overwhelm the estimates.
2014-03-17 13:19:54 +01:00
}
}
// setAllDescendants now contains all in-mempool descendants of updateIt.
// Update and add to cached descendant map
int64_t modifySize = 0;
CAmount modifyFee = 0;
int64_t modifyCount = 0;
BOOST_FOREACH(txiter cit, setAllDescendants) {
if (!setExclude.count(cit->GetTx().GetHash())) {
modifySize += cit->GetTxSize();
modifyFee += cit->GetModifiedFee();
modifyCount++;
cachedDescendants[updateIt].insert(cit);
// Update ancestor state for each descendant
mapTx.modify(cit, update_ancestor_state(updateIt->GetTxSize(), updateIt->GetModifiedFee(), 1, updateIt->GetSigOpCount()));
estimatefee / estimatepriority RPC methods New RPC methods: return an estimate of the fee (or priority) a transaction needs to be likely to confirm in a given number of blocks. Mike Hearn created the first version of this method for estimating fees. It works as follows: For transactions that took 1 to N (I picked N=25) blocks to confirm, keep N buckets with at most 100 entries in each recording the fees-per-kilobyte paid by those transactions. (separate buckets are kept for transactions that confirmed because they are high-priority) The buckets are filled as blocks are found, and are saved/restored in a new fee_estiamtes.dat file in the data directory. A few variations on Mike's initial scheme: To estimate the fee needed for a transaction to confirm in X buckets, all of the samples in all of the buckets are used and a median of all of the data is used to make the estimate. For example, imagine 25 buckets each containing the full 100 entries. Those 2,500 samples are sorted, and the estimate of the fee needed to confirm in the very next block is the 50'th-highest-fee-entry in that sorted list; the estimate of the fee needed to confirm in the next two blocks is the 150'th-highest-fee-entry, etc. That algorithm has the nice property that estimates of how much fee you need to pay to get confirmed in block N will always be greater than or equal to the estimate for block N+1. It would clearly be wrong to say "pay 11 uBTC and you'll get confirmed in 3 blocks, but pay 12 uBTC and it will take LONGER". A single block will not contribute more than 10 entries to any one bucket, so a single miner and a large block cannot overwhelm the estimates.
2014-03-17 13:19:54 +01:00
}
}
mapTx.modify(updateIt, update_descendant_state(modifySize, modifyFee, modifyCount));
}
estimatefee / estimatepriority RPC methods New RPC methods: return an estimate of the fee (or priority) a transaction needs to be likely to confirm in a given number of blocks. Mike Hearn created the first version of this method for estimating fees. It works as follows: For transactions that took 1 to N (I picked N=25) blocks to confirm, keep N buckets with at most 100 entries in each recording the fees-per-kilobyte paid by those transactions. (separate buckets are kept for transactions that confirmed because they are high-priority) The buckets are filled as blocks are found, and are saved/restored in a new fee_estiamtes.dat file in the data directory. A few variations on Mike's initial scheme: To estimate the fee needed for a transaction to confirm in X buckets, all of the samples in all of the buckets are used and a median of all of the data is used to make the estimate. For example, imagine 25 buckets each containing the full 100 entries. Those 2,500 samples are sorted, and the estimate of the fee needed to confirm in the very next block is the 50'th-highest-fee-entry in that sorted list; the estimate of the fee needed to confirm in the next two blocks is the 150'th-highest-fee-entry, etc. That algorithm has the nice property that estimates of how much fee you need to pay to get confirmed in block N will always be greater than or equal to the estimate for block N+1. It would clearly be wrong to say "pay 11 uBTC and you'll get confirmed in 3 blocks, but pay 12 uBTC and it will take LONGER". A single block will not contribute more than 10 entries to any one bucket, so a single miner and a large block cannot overwhelm the estimates.
2014-03-17 13:19:54 +01:00
// vHashesToUpdate is the set of transaction hashes from a disconnected block
// which has been re-added to the mempool.
// for each entry, look for descendants that are outside hashesToUpdate, and
// add fee/size information for such descendants to the parent.
// for each such descendant, also update the ancestor state to include the parent.
void CTxMemPool::UpdateTransactionsFromBlock(const std::vector<uint256> &vHashesToUpdate)
{
LOCK(cs);
// For each entry in vHashesToUpdate, store the set of in-mempool, but not
// in-vHashesToUpdate transactions, so that we don't have to recalculate
// descendants when we come across a previously seen entry.
cacheMap mapMemPoolDescendantsToUpdate;
// Use a set for lookups into vHashesToUpdate (these entries are already
// accounted for in the state of their ancestors)
std::set<uint256> setAlreadyIncluded(vHashesToUpdate.begin(), vHashesToUpdate.end());
// Iterate in reverse, so that whenever we are looking at at a transaction
// we are sure that all in-mempool descendants have already been processed.
// This maximizes the benefit of the descendant cache and guarantees that
// setMemPoolChildren will be updated, an assumption made in
// UpdateForDescendants.
BOOST_REVERSE_FOREACH(const uint256 &hash, vHashesToUpdate) {
// we cache the in-mempool children to avoid duplicate updates
setEntries setChildren;
// calculate children from mapNextTx
txiter it = mapTx.find(hash);
if (it == mapTx.end()) {
continue;
estimatefee / estimatepriority RPC methods New RPC methods: return an estimate of the fee (or priority) a transaction needs to be likely to confirm in a given number of blocks. Mike Hearn created the first version of this method for estimating fees. It works as follows: For transactions that took 1 to N (I picked N=25) blocks to confirm, keep N buckets with at most 100 entries in each recording the fees-per-kilobyte paid by those transactions. (separate buckets are kept for transactions that confirmed because they are high-priority) The buckets are filled as blocks are found, and are saved/restored in a new fee_estiamtes.dat file in the data directory. A few variations on Mike's initial scheme: To estimate the fee needed for a transaction to confirm in X buckets, all of the samples in all of the buckets are used and a median of all of the data is used to make the estimate. For example, imagine 25 buckets each containing the full 100 entries. Those 2,500 samples are sorted, and the estimate of the fee needed to confirm in the very next block is the 50'th-highest-fee-entry in that sorted list; the estimate of the fee needed to confirm in the next two blocks is the 150'th-highest-fee-entry, etc. That algorithm has the nice property that estimates of how much fee you need to pay to get confirmed in block N will always be greater than or equal to the estimate for block N+1. It would clearly be wrong to say "pay 11 uBTC and you'll get confirmed in 3 blocks, but pay 12 uBTC and it will take LONGER". A single block will not contribute more than 10 entries to any one bucket, so a single miner and a large block cannot overwhelm the estimates.
2014-03-17 13:19:54 +01:00
}
std::map<COutPoint, CInPoint>::iterator iter = mapNextTx.lower_bound(COutPoint(hash, 0));
// First calculate the children, and update setMemPoolChildren to
// include them, and update their setMemPoolParents to include this tx.
for (; iter != mapNextTx.end() && iter->first.hash == hash; ++iter) {
const uint256 &childHash = iter->second.ptx->GetHash();
txiter childIter = mapTx.find(childHash);
assert(childIter != mapTx.end());
// We can skip updating entries we've encountered before or that
// are in the block (which are already accounted for).
if (setChildren.insert(childIter).second && !setAlreadyIncluded.count(childHash)) {
UpdateChild(it, childIter, true);
UpdateParent(childIter, it, true);
estimatefee / estimatepriority RPC methods New RPC methods: return an estimate of the fee (or priority) a transaction needs to be likely to confirm in a given number of blocks. Mike Hearn created the first version of this method for estimating fees. It works as follows: For transactions that took 1 to N (I picked N=25) blocks to confirm, keep N buckets with at most 100 entries in each recording the fees-per-kilobyte paid by those transactions. (separate buckets are kept for transactions that confirmed because they are high-priority) The buckets are filled as blocks are found, and are saved/restored in a new fee_estiamtes.dat file in the data directory. A few variations on Mike's initial scheme: To estimate the fee needed for a transaction to confirm in X buckets, all of the samples in all of the buckets are used and a median of all of the data is used to make the estimate. For example, imagine 25 buckets each containing the full 100 entries. Those 2,500 samples are sorted, and the estimate of the fee needed to confirm in the very next block is the 50'th-highest-fee-entry in that sorted list; the estimate of the fee needed to confirm in the next two blocks is the 150'th-highest-fee-entry, etc. That algorithm has the nice property that estimates of how much fee you need to pay to get confirmed in block N will always be greater than or equal to the estimate for block N+1. It would clearly be wrong to say "pay 11 uBTC and you'll get confirmed in 3 blocks, but pay 12 uBTC and it will take LONGER". A single block will not contribute more than 10 entries to any one bucket, so a single miner and a large block cannot overwhelm the estimates.
2014-03-17 13:19:54 +01:00
}
}
UpdateForDescendants(it, mapMemPoolDescendantsToUpdate, setAlreadyIncluded);
}
}
bool CTxMemPool::CalculateMemPoolAncestors(const CTxMemPoolEntry &entry, setEntries &setAncestors, uint64_t limitAncestorCount, uint64_t limitAncestorSize, uint64_t limitDescendantCount, uint64_t limitDescendantSize, std::string &errString, bool fSearchForParents /* = true */) const
{
setEntries parentHashes;
const CTransaction &tx = entry.GetTx();
if (fSearchForParents) {
// Get parents of this transaction that are in the mempool
// GetMemPoolParents() is only valid for entries in the mempool, so we
// iterate mapTx to find parents.
for (unsigned int i = 0; i < tx.vin.size(); i++) {
txiter piter = mapTx.find(tx.vin[i].prevout.hash);
if (piter != mapTx.end()) {
parentHashes.insert(piter);
if (parentHashes.size() + 1 > limitAncestorCount) {
errString = strprintf("too many unconfirmed parents [limit: %u]", limitAncestorCount);
return false;
}
}
estimatefee / estimatepriority RPC methods New RPC methods: return an estimate of the fee (or priority) a transaction needs to be likely to confirm in a given number of blocks. Mike Hearn created the first version of this method for estimating fees. It works as follows: For transactions that took 1 to N (I picked N=25) blocks to confirm, keep N buckets with at most 100 entries in each recording the fees-per-kilobyte paid by those transactions. (separate buckets are kept for transactions that confirmed because they are high-priority) The buckets are filled as blocks are found, and are saved/restored in a new fee_estiamtes.dat file in the data directory. A few variations on Mike's initial scheme: To estimate the fee needed for a transaction to confirm in X buckets, all of the samples in all of the buckets are used and a median of all of the data is used to make the estimate. For example, imagine 25 buckets each containing the full 100 entries. Those 2,500 samples are sorted, and the estimate of the fee needed to confirm in the very next block is the 50'th-highest-fee-entry in that sorted list; the estimate of the fee needed to confirm in the next two blocks is the 150'th-highest-fee-entry, etc. That algorithm has the nice property that estimates of how much fee you need to pay to get confirmed in block N will always be greater than or equal to the estimate for block N+1. It would clearly be wrong to say "pay 11 uBTC and you'll get confirmed in 3 blocks, but pay 12 uBTC and it will take LONGER". A single block will not contribute more than 10 entries to any one bucket, so a single miner and a large block cannot overwhelm the estimates.
2014-03-17 13:19:54 +01:00
}
} else {
// If we're not searching for parents, we require this to be an
// entry in the mempool already.
txiter it = mapTx.iterator_to(entry);
parentHashes = GetMemPoolParents(it);
estimatefee / estimatepriority RPC methods New RPC methods: return an estimate of the fee (or priority) a transaction needs to be likely to confirm in a given number of blocks. Mike Hearn created the first version of this method for estimating fees. It works as follows: For transactions that took 1 to N (I picked N=25) blocks to confirm, keep N buckets with at most 100 entries in each recording the fees-per-kilobyte paid by those transactions. (separate buckets are kept for transactions that confirmed because they are high-priority) The buckets are filled as blocks are found, and are saved/restored in a new fee_estiamtes.dat file in the data directory. A few variations on Mike's initial scheme: To estimate the fee needed for a transaction to confirm in X buckets, all of the samples in all of the buckets are used and a median of all of the data is used to make the estimate. For example, imagine 25 buckets each containing the full 100 entries. Those 2,500 samples are sorted, and the estimate of the fee needed to confirm in the very next block is the 50'th-highest-fee-entry in that sorted list; the estimate of the fee needed to confirm in the next two blocks is the 150'th-highest-fee-entry, etc. That algorithm has the nice property that estimates of how much fee you need to pay to get confirmed in block N will always be greater than or equal to the estimate for block N+1. It would clearly be wrong to say "pay 11 uBTC and you'll get confirmed in 3 blocks, but pay 12 uBTC and it will take LONGER". A single block will not contribute more than 10 entries to any one bucket, so a single miner and a large block cannot overwhelm the estimates.
2014-03-17 13:19:54 +01:00
}
size_t totalSizeWithAncestors = entry.GetTxSize();
estimatefee / estimatepriority RPC methods New RPC methods: return an estimate of the fee (or priority) a transaction needs to be likely to confirm in a given number of blocks. Mike Hearn created the first version of this method for estimating fees. It works as follows: For transactions that took 1 to N (I picked N=25) blocks to confirm, keep N buckets with at most 100 entries in each recording the fees-per-kilobyte paid by those transactions. (separate buckets are kept for transactions that confirmed because they are high-priority) The buckets are filled as blocks are found, and are saved/restored in a new fee_estiamtes.dat file in the data directory. A few variations on Mike's initial scheme: To estimate the fee needed for a transaction to confirm in X buckets, all of the samples in all of the buckets are used and a median of all of the data is used to make the estimate. For example, imagine 25 buckets each containing the full 100 entries. Those 2,500 samples are sorted, and the estimate of the fee needed to confirm in the very next block is the 50'th-highest-fee-entry in that sorted list; the estimate of the fee needed to confirm in the next two blocks is the 150'th-highest-fee-entry, etc. That algorithm has the nice property that estimates of how much fee you need to pay to get confirmed in block N will always be greater than or equal to the estimate for block N+1. It would clearly be wrong to say "pay 11 uBTC and you'll get confirmed in 3 blocks, but pay 12 uBTC and it will take LONGER". A single block will not contribute more than 10 entries to any one bucket, so a single miner and a large block cannot overwhelm the estimates.
2014-03-17 13:19:54 +01:00
while (!parentHashes.empty()) {
txiter stageit = *parentHashes.begin();
estimatefee / estimatepriority RPC methods New RPC methods: return an estimate of the fee (or priority) a transaction needs to be likely to confirm in a given number of blocks. Mike Hearn created the first version of this method for estimating fees. It works as follows: For transactions that took 1 to N (I picked N=25) blocks to confirm, keep N buckets with at most 100 entries in each recording the fees-per-kilobyte paid by those transactions. (separate buckets are kept for transactions that confirmed because they are high-priority) The buckets are filled as blocks are found, and are saved/restored in a new fee_estiamtes.dat file in the data directory. A few variations on Mike's initial scheme: To estimate the fee needed for a transaction to confirm in X buckets, all of the samples in all of the buckets are used and a median of all of the data is used to make the estimate. For example, imagine 25 buckets each containing the full 100 entries. Those 2,500 samples are sorted, and the estimate of the fee needed to confirm in the very next block is the 50'th-highest-fee-entry in that sorted list; the estimate of the fee needed to confirm in the next two blocks is the 150'th-highest-fee-entry, etc. That algorithm has the nice property that estimates of how much fee you need to pay to get confirmed in block N will always be greater than or equal to the estimate for block N+1. It would clearly be wrong to say "pay 11 uBTC and you'll get confirmed in 3 blocks, but pay 12 uBTC and it will take LONGER". A single block will not contribute more than 10 entries to any one bucket, so a single miner and a large block cannot overwhelm the estimates.
2014-03-17 13:19:54 +01:00
setAncestors.insert(stageit);
parentHashes.erase(stageit);
totalSizeWithAncestors += stageit->GetTxSize();
estimatefee / estimatepriority RPC methods New RPC methods: return an estimate of the fee (or priority) a transaction needs to be likely to confirm in a given number of blocks. Mike Hearn created the first version of this method for estimating fees. It works as follows: For transactions that took 1 to N (I picked N=25) blocks to confirm, keep N buckets with at most 100 entries in each recording the fees-per-kilobyte paid by those transactions. (separate buckets are kept for transactions that confirmed because they are high-priority) The buckets are filled as blocks are found, and are saved/restored in a new fee_estiamtes.dat file in the data directory. A few variations on Mike's initial scheme: To estimate the fee needed for a transaction to confirm in X buckets, all of the samples in all of the buckets are used and a median of all of the data is used to make the estimate. For example, imagine 25 buckets each containing the full 100 entries. Those 2,500 samples are sorted, and the estimate of the fee needed to confirm in the very next block is the 50'th-highest-fee-entry in that sorted list; the estimate of the fee needed to confirm in the next two blocks is the 150'th-highest-fee-entry, etc. That algorithm has the nice property that estimates of how much fee you need to pay to get confirmed in block N will always be greater than or equal to the estimate for block N+1. It would clearly be wrong to say "pay 11 uBTC and you'll get confirmed in 3 blocks, but pay 12 uBTC and it will take LONGER". A single block will not contribute more than 10 entries to any one bucket, so a single miner and a large block cannot overwhelm the estimates.
2014-03-17 13:19:54 +01:00
if (stageit->GetSizeWithDescendants() + entry.GetTxSize() > limitDescendantSize) {
errString = strprintf("exceeds descendant size limit for tx %s [limit: %u]", stageit->GetTx().GetHash().ToString(), limitDescendantSize);
return false;
} else if (stageit->GetCountWithDescendants() + 1 > limitDescendantCount) {
errString = strprintf("too many descendants for tx %s [limit: %u]", stageit->GetTx().GetHash().ToString(), limitDescendantCount);
return false;
} else if (totalSizeWithAncestors > limitAncestorSize) {
errString = strprintf("exceeds ancestor size limit [limit: %u]", limitAncestorSize);
return false;
}
estimatefee / estimatepriority RPC methods New RPC methods: return an estimate of the fee (or priority) a transaction needs to be likely to confirm in a given number of blocks. Mike Hearn created the first version of this method for estimating fees. It works as follows: For transactions that took 1 to N (I picked N=25) blocks to confirm, keep N buckets with at most 100 entries in each recording the fees-per-kilobyte paid by those transactions. (separate buckets are kept for transactions that confirmed because they are high-priority) The buckets are filled as blocks are found, and are saved/restored in a new fee_estiamtes.dat file in the data directory. A few variations on Mike's initial scheme: To estimate the fee needed for a transaction to confirm in X buckets, all of the samples in all of the buckets are used and a median of all of the data is used to make the estimate. For example, imagine 25 buckets each containing the full 100 entries. Those 2,500 samples are sorted, and the estimate of the fee needed to confirm in the very next block is the 50'th-highest-fee-entry in that sorted list; the estimate of the fee needed to confirm in the next two blocks is the 150'th-highest-fee-entry, etc. That algorithm has the nice property that estimates of how much fee you need to pay to get confirmed in block N will always be greater than or equal to the estimate for block N+1. It would clearly be wrong to say "pay 11 uBTC and you'll get confirmed in 3 blocks, but pay 12 uBTC and it will take LONGER". A single block will not contribute more than 10 entries to any one bucket, so a single miner and a large block cannot overwhelm the estimates.
2014-03-17 13:19:54 +01:00
const setEntries & setMemPoolParents = GetMemPoolParents(stageit);
BOOST_FOREACH(const txiter &phash, setMemPoolParents) {
// If this is a new ancestor, add it.
if (setAncestors.count(phash) == 0) {
parentHashes.insert(phash);
}
if (parentHashes.size() + setAncestors.size() + 1 > limitAncestorCount) {
errString = strprintf("too many unconfirmed ancestors [limit: %u]", limitAncestorCount);
return false;
}
}
estimatefee / estimatepriority RPC methods New RPC methods: return an estimate of the fee (or priority) a transaction needs to be likely to confirm in a given number of blocks. Mike Hearn created the first version of this method for estimating fees. It works as follows: For transactions that took 1 to N (I picked N=25) blocks to confirm, keep N buckets with at most 100 entries in each recording the fees-per-kilobyte paid by those transactions. (separate buckets are kept for transactions that confirmed because they are high-priority) The buckets are filled as blocks are found, and are saved/restored in a new fee_estiamtes.dat file in the data directory. A few variations on Mike's initial scheme: To estimate the fee needed for a transaction to confirm in X buckets, all of the samples in all of the buckets are used and a median of all of the data is used to make the estimate. For example, imagine 25 buckets each containing the full 100 entries. Those 2,500 samples are sorted, and the estimate of the fee needed to confirm in the very next block is the 50'th-highest-fee-entry in that sorted list; the estimate of the fee needed to confirm in the next two blocks is the 150'th-highest-fee-entry, etc. That algorithm has the nice property that estimates of how much fee you need to pay to get confirmed in block N will always be greater than or equal to the estimate for block N+1. It would clearly be wrong to say "pay 11 uBTC and you'll get confirmed in 3 blocks, but pay 12 uBTC and it will take LONGER". A single block will not contribute more than 10 entries to any one bucket, so a single miner and a large block cannot overwhelm the estimates.
2014-03-17 13:19:54 +01:00
}
return true;
}
estimatefee / estimatepriority RPC methods New RPC methods: return an estimate of the fee (or priority) a transaction needs to be likely to confirm in a given number of blocks. Mike Hearn created the first version of this method for estimating fees. It works as follows: For transactions that took 1 to N (I picked N=25) blocks to confirm, keep N buckets with at most 100 entries in each recording the fees-per-kilobyte paid by those transactions. (separate buckets are kept for transactions that confirmed because they are high-priority) The buckets are filled as blocks are found, and are saved/restored in a new fee_estiamtes.dat file in the data directory. A few variations on Mike's initial scheme: To estimate the fee needed for a transaction to confirm in X buckets, all of the samples in all of the buckets are used and a median of all of the data is used to make the estimate. For example, imagine 25 buckets each containing the full 100 entries. Those 2,500 samples are sorted, and the estimate of the fee needed to confirm in the very next block is the 50'th-highest-fee-entry in that sorted list; the estimate of the fee needed to confirm in the next two blocks is the 150'th-highest-fee-entry, etc. That algorithm has the nice property that estimates of how much fee you need to pay to get confirmed in block N will always be greater than or equal to the estimate for block N+1. It would clearly be wrong to say "pay 11 uBTC and you'll get confirmed in 3 blocks, but pay 12 uBTC and it will take LONGER". A single block will not contribute more than 10 entries to any one bucket, so a single miner and a large block cannot overwhelm the estimates.
2014-03-17 13:19:54 +01:00
void CTxMemPool::UpdateAncestorsOf(bool add, txiter it, setEntries &setAncestors)
{
setEntries parentIters = GetMemPoolParents(it);
// add or remove this tx as a child of each parent
BOOST_FOREACH(txiter piter, parentIters) {
UpdateChild(piter, it, add);
}
const int64_t updateCount = (add ? 1 : -1);
const int64_t updateSize = updateCount * it->GetTxSize();
const CAmount updateFee = updateCount * it->GetModifiedFee();
BOOST_FOREACH(txiter ancestorIt, setAncestors) {
mapTx.modify(ancestorIt, update_descendant_state(updateSize, updateFee, updateCount));
}
}
estimatefee / estimatepriority RPC methods New RPC methods: return an estimate of the fee (or priority) a transaction needs to be likely to confirm in a given number of blocks. Mike Hearn created the first version of this method for estimating fees. It works as follows: For transactions that took 1 to N (I picked N=25) blocks to confirm, keep N buckets with at most 100 entries in each recording the fees-per-kilobyte paid by those transactions. (separate buckets are kept for transactions that confirmed because they are high-priority) The buckets are filled as blocks are found, and are saved/restored in a new fee_estiamtes.dat file in the data directory. A few variations on Mike's initial scheme: To estimate the fee needed for a transaction to confirm in X buckets, all of the samples in all of the buckets are used and a median of all of the data is used to make the estimate. For example, imagine 25 buckets each containing the full 100 entries. Those 2,500 samples are sorted, and the estimate of the fee needed to confirm in the very next block is the 50'th-highest-fee-entry in that sorted list; the estimate of the fee needed to confirm in the next two blocks is the 150'th-highest-fee-entry, etc. That algorithm has the nice property that estimates of how much fee you need to pay to get confirmed in block N will always be greater than or equal to the estimate for block N+1. It would clearly be wrong to say "pay 11 uBTC and you'll get confirmed in 3 blocks, but pay 12 uBTC and it will take LONGER". A single block will not contribute more than 10 entries to any one bucket, so a single miner and a large block cannot overwhelm the estimates.
2014-03-17 13:19:54 +01:00
void CTxMemPool::UpdateEntryForAncestors(txiter it, const setEntries &setAncestors)
{
int64_t updateCount = setAncestors.size();
int64_t updateSize = 0;
CAmount updateFee = 0;
int updateSigOps = 0;
BOOST_FOREACH(txiter ancestorIt, setAncestors) {
updateSize += ancestorIt->GetTxSize();
updateFee += ancestorIt->GetModifiedFee();
updateSigOps += ancestorIt->GetSigOpCount();
}
mapTx.modify(it, update_ancestor_state(updateSize, updateFee, updateCount, updateSigOps));
}
void CTxMemPool::UpdateChildrenForRemoval(txiter it)
{
const setEntries &setMemPoolChildren = GetMemPoolChildren(it);
BOOST_FOREACH(txiter updateIt, setMemPoolChildren) {
UpdateParent(updateIt, it, false);
estimatefee / estimatepriority RPC methods New RPC methods: return an estimate of the fee (or priority) a transaction needs to be likely to confirm in a given number of blocks. Mike Hearn created the first version of this method for estimating fees. It works as follows: For transactions that took 1 to N (I picked N=25) blocks to confirm, keep N buckets with at most 100 entries in each recording the fees-per-kilobyte paid by those transactions. (separate buckets are kept for transactions that confirmed because they are high-priority) The buckets are filled as blocks are found, and are saved/restored in a new fee_estiamtes.dat file in the data directory. A few variations on Mike's initial scheme: To estimate the fee needed for a transaction to confirm in X buckets, all of the samples in all of the buckets are used and a median of all of the data is used to make the estimate. For example, imagine 25 buckets each containing the full 100 entries. Those 2,500 samples are sorted, and the estimate of the fee needed to confirm in the very next block is the 50'th-highest-fee-entry in that sorted list; the estimate of the fee needed to confirm in the next two blocks is the 150'th-highest-fee-entry, etc. That algorithm has the nice property that estimates of how much fee you need to pay to get confirmed in block N will always be greater than or equal to the estimate for block N+1. It would clearly be wrong to say "pay 11 uBTC and you'll get confirmed in 3 blocks, but pay 12 uBTC and it will take LONGER". A single block will not contribute more than 10 entries to any one bucket, so a single miner and a large block cannot overwhelm the estimates.
2014-03-17 13:19:54 +01:00
}
}
estimatefee / estimatepriority RPC methods New RPC methods: return an estimate of the fee (or priority) a transaction needs to be likely to confirm in a given number of blocks. Mike Hearn created the first version of this method for estimating fees. It works as follows: For transactions that took 1 to N (I picked N=25) blocks to confirm, keep N buckets with at most 100 entries in each recording the fees-per-kilobyte paid by those transactions. (separate buckets are kept for transactions that confirmed because they are high-priority) The buckets are filled as blocks are found, and are saved/restored in a new fee_estiamtes.dat file in the data directory. A few variations on Mike's initial scheme: To estimate the fee needed for a transaction to confirm in X buckets, all of the samples in all of the buckets are used and a median of all of the data is used to make the estimate. For example, imagine 25 buckets each containing the full 100 entries. Those 2,500 samples are sorted, and the estimate of the fee needed to confirm in the very next block is the 50'th-highest-fee-entry in that sorted list; the estimate of the fee needed to confirm in the next two blocks is the 150'th-highest-fee-entry, etc. That algorithm has the nice property that estimates of how much fee you need to pay to get confirmed in block N will always be greater than or equal to the estimate for block N+1. It would clearly be wrong to say "pay 11 uBTC and you'll get confirmed in 3 blocks, but pay 12 uBTC and it will take LONGER". A single block will not contribute more than 10 entries to any one bucket, so a single miner and a large block cannot overwhelm the estimates.
2014-03-17 13:19:54 +01:00
void CTxMemPool::UpdateForRemoveFromMempool(const setEntries &entriesToRemove, bool updateDescendants)
{
// For each entry, walk back all ancestors and decrement size associated with this
// transaction
const uint64_t nNoLimit = std::numeric_limits<uint64_t>::max();
if (updateDescendants) {
// updateDescendants should be true whenever we're not recursively
// removing a tx and all its descendants, eg when a transaction is
// confirmed in a block.
// Here we only update statistics and not data in mapLinks (which
// we need to preserve until we're finished with all operations that
// need to traverse the mempool).
BOOST_FOREACH(txiter removeIt, entriesToRemove) {
setEntries setDescendants;
CalculateDescendants(removeIt, setDescendants);
setDescendants.erase(removeIt); // don't update state for self
int64_t modifySize = -((int64_t)removeIt->GetTxSize());
CAmount modifyFee = -removeIt->GetModifiedFee();
int modifySigOps = -removeIt->GetSigOpCount();
BOOST_FOREACH(txiter dit, setDescendants) {
mapTx.modify(dit, update_ancestor_state(modifySize, modifyFee, -1, modifySigOps));
}
}
}
BOOST_FOREACH(txiter removeIt, entriesToRemove) {
setEntries setAncestors;
const CTxMemPoolEntry &entry = *removeIt;
std::string dummy;
// Since this is a tx that is already in the mempool, we can call CMPA
// with fSearchForParents = false. If the mempool is in a consistent
// state, then using true or false should both be correct, though false
// should be a bit faster.
// However, if we happen to be in the middle of processing a reorg, then
// the mempool can be in an inconsistent state. In this case, the set
// of ancestors reachable via mapLinks will be the same as the set of
// ancestors whose packages include this transaction, because when we
// add a new transaction to the mempool in addUnchecked(), we assume it
// has no children, and in the case of a reorg where that assumption is
// false, the in-mempool children aren't linked to the in-block tx's
// until UpdateTransactionsFromBlock() is called.
// So if we're being called during a reorg, ie before
// UpdateTransactionsFromBlock() has been called, then mapLinks[] will
// differ from the set of mempool parents we'd calculate by searching,
// and it's important that we use the mapLinks[] notion of ancestor
// transactions as the set of things to update for removal.
CalculateMemPoolAncestors(entry, setAncestors, nNoLimit, nNoLimit, nNoLimit, nNoLimit, dummy, false);
// Note that UpdateAncestorsOf severs the child links that point to
// removeIt in the entries for the parents of removeIt.
UpdateAncestorsOf(false, removeIt, setAncestors);
}
// After updating all the ancestor sizes, we can now sever the link between each
// transaction being removed and any mempool children (ie, update setMemPoolParents
// for each direct child of a transaction being removed).
BOOST_FOREACH(txiter removeIt, entriesToRemove) {
UpdateChildrenForRemoval(removeIt);
estimatefee / estimatepriority RPC methods New RPC methods: return an estimate of the fee (or priority) a transaction needs to be likely to confirm in a given number of blocks. Mike Hearn created the first version of this method for estimating fees. It works as follows: For transactions that took 1 to N (I picked N=25) blocks to confirm, keep N buckets with at most 100 entries in each recording the fees-per-kilobyte paid by those transactions. (separate buckets are kept for transactions that confirmed because they are high-priority) The buckets are filled as blocks are found, and are saved/restored in a new fee_estiamtes.dat file in the data directory. A few variations on Mike's initial scheme: To estimate the fee needed for a transaction to confirm in X buckets, all of the samples in all of the buckets are used and a median of all of the data is used to make the estimate. For example, imagine 25 buckets each containing the full 100 entries. Those 2,500 samples are sorted, and the estimate of the fee needed to confirm in the very next block is the 50'th-highest-fee-entry in that sorted list; the estimate of the fee needed to confirm in the next two blocks is the 150'th-highest-fee-entry, etc. That algorithm has the nice property that estimates of how much fee you need to pay to get confirmed in block N will always be greater than or equal to the estimate for block N+1. It would clearly be wrong to say "pay 11 uBTC and you'll get confirmed in 3 blocks, but pay 12 uBTC and it will take LONGER". A single block will not contribute more than 10 entries to any one bucket, so a single miner and a large block cannot overwhelm the estimates.
2014-03-17 13:19:54 +01:00
}
}
estimatefee / estimatepriority RPC methods New RPC methods: return an estimate of the fee (or priority) a transaction needs to be likely to confirm in a given number of blocks. Mike Hearn created the first version of this method for estimating fees. It works as follows: For transactions that took 1 to N (I picked N=25) blocks to confirm, keep N buckets with at most 100 entries in each recording the fees-per-kilobyte paid by those transactions. (separate buckets are kept for transactions that confirmed because they are high-priority) The buckets are filled as blocks are found, and are saved/restored in a new fee_estiamtes.dat file in the data directory. A few variations on Mike's initial scheme: To estimate the fee needed for a transaction to confirm in X buckets, all of the samples in all of the buckets are used and a median of all of the data is used to make the estimate. For example, imagine 25 buckets each containing the full 100 entries. Those 2,500 samples are sorted, and the estimate of the fee needed to confirm in the very next block is the 50'th-highest-fee-entry in that sorted list; the estimate of the fee needed to confirm in the next two blocks is the 150'th-highest-fee-entry, etc. That algorithm has the nice property that estimates of how much fee you need to pay to get confirmed in block N will always be greater than or equal to the estimate for block N+1. It would clearly be wrong to say "pay 11 uBTC and you'll get confirmed in 3 blocks, but pay 12 uBTC and it will take LONGER". A single block will not contribute more than 10 entries to any one bucket, so a single miner and a large block cannot overwhelm the estimates.
2014-03-17 13:19:54 +01:00
void CTxMemPoolEntry::UpdateDescendantState(int64_t modifySize, CAmount modifyFee, int64_t modifyCount)
{
nSizeWithDescendants += modifySize;
assert(int64_t(nSizeWithDescendants) > 0);
nModFeesWithDescendants += modifyFee;
nCountWithDescendants += modifyCount;
assert(int64_t(nCountWithDescendants) > 0);
}
void CTxMemPoolEntry::UpdateAncestorState(int64_t modifySize, CAmount modifyFee, int64_t modifyCount, int modifySigOps)
{
nSizeWithAncestors += modifySize;
assert(int64_t(nSizeWithAncestors) > 0);
nModFeesWithAncestors += modifyFee;
nCountWithAncestors += modifyCount;
assert(int64_t(nCountWithAncestors) > 0);
nSigOpCountWithAncestors += modifySigOps;
assert(int(nSigOpCountWithAncestors) >= 0);
}
estimatefee / estimatepriority RPC methods New RPC methods: return an estimate of the fee (or priority) a transaction needs to be likely to confirm in a given number of blocks. Mike Hearn created the first version of this method for estimating fees. It works as follows: For transactions that took 1 to N (I picked N=25) blocks to confirm, keep N buckets with at most 100 entries in each recording the fees-per-kilobyte paid by those transactions. (separate buckets are kept for transactions that confirmed because they are high-priority) The buckets are filled as blocks are found, and are saved/restored in a new fee_estiamtes.dat file in the data directory. A few variations on Mike's initial scheme: To estimate the fee needed for a transaction to confirm in X buckets, all of the samples in all of the buckets are used and a median of all of the data is used to make the estimate. For example, imagine 25 buckets each containing the full 100 entries. Those 2,500 samples are sorted, and the estimate of the fee needed to confirm in the very next block is the 50'th-highest-fee-entry in that sorted list; the estimate of the fee needed to confirm in the next two blocks is the 150'th-highest-fee-entry, etc. That algorithm has the nice property that estimates of how much fee you need to pay to get confirmed in block N will always be greater than or equal to the estimate for block N+1. It would clearly be wrong to say "pay 11 uBTC and you'll get confirmed in 3 blocks, but pay 12 uBTC and it will take LONGER". A single block will not contribute more than 10 entries to any one bucket, so a single miner and a large block cannot overwhelm the estimates.
2014-03-17 13:19:54 +01:00
CTxMemPool::CTxMemPool(const CFeeRate& _minReasonableRelayFee) :
nTransactionsUpdated(0)
{
_clear(); //lock free clear
// Sanity checks off by default for performance, because otherwise
// accepting transactions becomes O(N^2) where N is the number
// of transactions in the pool
nCheckFrequency = 0;
estimatefee / estimatepriority RPC methods New RPC methods: return an estimate of the fee (or priority) a transaction needs to be likely to confirm in a given number of blocks. Mike Hearn created the first version of this method for estimating fees. It works as follows: For transactions that took 1 to N (I picked N=25) blocks to confirm, keep N buckets with at most 100 entries in each recording the fees-per-kilobyte paid by those transactions. (separate buckets are kept for transactions that confirmed because they are high-priority) The buckets are filled as blocks are found, and are saved/restored in a new fee_estiamtes.dat file in the data directory. A few variations on Mike's initial scheme: To estimate the fee needed for a transaction to confirm in X buckets, all of the samples in all of the buckets are used and a median of all of the data is used to make the estimate. For example, imagine 25 buckets each containing the full 100 entries. Those 2,500 samples are sorted, and the estimate of the fee needed to confirm in the very next block is the 50'th-highest-fee-entry in that sorted list; the estimate of the fee needed to confirm in the next two blocks is the 150'th-highest-fee-entry, etc. That algorithm has the nice property that estimates of how much fee you need to pay to get confirmed in block N will always be greater than or equal to the estimate for block N+1. It would clearly be wrong to say "pay 11 uBTC and you'll get confirmed in 3 blocks, but pay 12 uBTC and it will take LONGER". A single block will not contribute more than 10 entries to any one bucket, so a single miner and a large block cannot overwhelm the estimates.
2014-03-17 13:19:54 +01:00
minerPolicyEstimator = new CBlockPolicyEstimator(_minReasonableRelayFee);
minReasonableRelayFee = _minReasonableRelayFee;
estimatefee / estimatepriority RPC methods New RPC methods: return an estimate of the fee (or priority) a transaction needs to be likely to confirm in a given number of blocks. Mike Hearn created the first version of this method for estimating fees. It works as follows: For transactions that took 1 to N (I picked N=25) blocks to confirm, keep N buckets with at most 100 entries in each recording the fees-per-kilobyte paid by those transactions. (separate buckets are kept for transactions that confirmed because they are high-priority) The buckets are filled as blocks are found, and are saved/restored in a new fee_estiamtes.dat file in the data directory. A few variations on Mike's initial scheme: To estimate the fee needed for a transaction to confirm in X buckets, all of the samples in all of the buckets are used and a median of all of the data is used to make the estimate. For example, imagine 25 buckets each containing the full 100 entries. Those 2,500 samples are sorted, and the estimate of the fee needed to confirm in the very next block is the 50'th-highest-fee-entry in that sorted list; the estimate of the fee needed to confirm in the next two blocks is the 150'th-highest-fee-entry, etc. That algorithm has the nice property that estimates of how much fee you need to pay to get confirmed in block N will always be greater than or equal to the estimate for block N+1. It would clearly be wrong to say "pay 11 uBTC and you'll get confirmed in 3 blocks, but pay 12 uBTC and it will take LONGER". A single block will not contribute more than 10 entries to any one bucket, so a single miner and a large block cannot overwhelm the estimates.
2014-03-17 13:19:54 +01:00
}
CTxMemPool::~CTxMemPool()
{
delete minerPolicyEstimator;
}
Merge #10195: Switch chainstate db and cache to per-txout model 589827975 scripted-diff: various renames for per-utxo consistency (Pieter Wuille) a5e02bc7f Increase travis unit test timeout (Pieter Wuille) 73de2c1ff Rename CCoinsCacheEntry::coins to coin (Pieter Wuille) 119e552f7 Merge CCoinsViewCache's GetOutputFor and AccessCoin (Pieter Wuille) 580b02309 [MOVEONLY] Move old CCoins class to txdb.cpp (Pieter Wuille) 8b25d2c0c Upgrade from per-tx database to per-txout (Pieter Wuille) b2af357f3 Reduce reserved memory space for flushing (Pieter Wuille) 41aa5b79a Pack Coin more tightly (Pieter Wuille) 97072d668 Remove unused CCoins methods (Pieter Wuille) ce23efaa5 Extend coins_tests (Pieter Wuille) 508307968 Switch CCoinsView and chainstate db from per-txid to per-txout (Pieter Wuille) 4ec0d9e79 Refactor GetUTXOStats in preparation for per-COutPoint iteration (Pieter Wuille) 13870b56f Replace CCoins-based CTxMemPool::pruneSpent with isSpent (Pieter Wuille) 05293f3cb Remove ModifyCoins/ModifyNewCoins (Pieter Wuille) 961e48397 Switch tests from ModifyCoins to AddCoin/SpendCoin (Pieter Wuille) 8b3868c1b Switch CScriptCheck to use Coin instead of CCoins (Pieter Wuille) c87b957a3 Only pass things committed to by tx's witness hash to CScriptCheck (Matt Corallo) f68cdfe92 Switch from per-tx to per-txout CCoinsViewCache methods in some places (Pieter Wuille) 000391132 Introduce new per-txout CCoinsViewCache functions (Pieter Wuille) bd83111a0 Optimization: Coin&& to ApplyTxInUndo (Pieter Wuille) cb2c7fdac Replace CTxInUndo with Coin (Pieter Wuille) 422634e2f Introduce Coin, a single unspent output (Pieter Wuille) 7d991b55d Store/allow tx metadata in all undo records (Pieter Wuille) c3aa0c119 Report on-disk size in gettxoutsetinfo (Pieter Wuille) d34242430 Remove/ignore tx version in utxo and undo (Pieter Wuille) 7e0032290 Add specialization of SipHash for 256 + 32 bit data (Pieter Wuille) e484652fc Introduce CHashVerifier to hash read data (Pieter Wuille) f54580e7e error() in disconnect for disk corruption, not inconsistency (Pieter Wuille) e66dbde6d Add SizeEstimate to CDBBatch (Pieter Wuille) Tree-SHA512: ce1fb1e40c77d38915cd02189fab7a8b125c7f44d425c85579d872c3bede3a437760997907c99d7b3017ced1c2de54b2ac7223d99d83a6658fe5ef61edef1de3
2017-06-02 00:47:58 +02:00
bool CTxMemPool::isSpent(const COutPoint& outpoint)
{
LOCK(cs);
Merge #10195: Switch chainstate db and cache to per-txout model 589827975 scripted-diff: various renames for per-utxo consistency (Pieter Wuille) a5e02bc7f Increase travis unit test timeout (Pieter Wuille) 73de2c1ff Rename CCoinsCacheEntry::coins to coin (Pieter Wuille) 119e552f7 Merge CCoinsViewCache's GetOutputFor and AccessCoin (Pieter Wuille) 580b02309 [MOVEONLY] Move old CCoins class to txdb.cpp (Pieter Wuille) 8b25d2c0c Upgrade from per-tx database to per-txout (Pieter Wuille) b2af357f3 Reduce reserved memory space for flushing (Pieter Wuille) 41aa5b79a Pack Coin more tightly (Pieter Wuille) 97072d668 Remove unused CCoins methods (Pieter Wuille) ce23efaa5 Extend coins_tests (Pieter Wuille) 508307968 Switch CCoinsView and chainstate db from per-txid to per-txout (Pieter Wuille) 4ec0d9e79 Refactor GetUTXOStats in preparation for per-COutPoint iteration (Pieter Wuille) 13870b56f Replace CCoins-based CTxMemPool::pruneSpent with isSpent (Pieter Wuille) 05293f3cb Remove ModifyCoins/ModifyNewCoins (Pieter Wuille) 961e48397 Switch tests from ModifyCoins to AddCoin/SpendCoin (Pieter Wuille) 8b3868c1b Switch CScriptCheck to use Coin instead of CCoins (Pieter Wuille) c87b957a3 Only pass things committed to by tx's witness hash to CScriptCheck (Matt Corallo) f68cdfe92 Switch from per-tx to per-txout CCoinsViewCache methods in some places (Pieter Wuille) 000391132 Introduce new per-txout CCoinsViewCache functions (Pieter Wuille) bd83111a0 Optimization: Coin&& to ApplyTxInUndo (Pieter Wuille) cb2c7fdac Replace CTxInUndo with Coin (Pieter Wuille) 422634e2f Introduce Coin, a single unspent output (Pieter Wuille) 7d991b55d Store/allow tx metadata in all undo records (Pieter Wuille) c3aa0c119 Report on-disk size in gettxoutsetinfo (Pieter Wuille) d34242430 Remove/ignore tx version in utxo and undo (Pieter Wuille) 7e0032290 Add specialization of SipHash for 256 + 32 bit data (Pieter Wuille) e484652fc Introduce CHashVerifier to hash read data (Pieter Wuille) f54580e7e error() in disconnect for disk corruption, not inconsistency (Pieter Wuille) e66dbde6d Add SizeEstimate to CDBBatch (Pieter Wuille) Tree-SHA512: ce1fb1e40c77d38915cd02189fab7a8b125c7f44d425c85579d872c3bede3a437760997907c99d7b3017ced1c2de54b2ac7223d99d83a6658fe5ef61edef1de3
2017-06-02 00:47:58 +02:00
return mapNextTx.count(outpoint);
}
unsigned int CTxMemPool::GetTransactionsUpdated() const
{
LOCK(cs);
return nTransactionsUpdated;
}
void CTxMemPool::AddTransactionsUpdated(unsigned int n)
{
LOCK(cs);
nTransactionsUpdated += n;
}
bool CTxMemPool::addUnchecked(const uint256& hash, const CTxMemPoolEntry &entry, setEntries &setAncestors, bool fCurrentEstimate)
{
// Add to memory pool without checking anything.
// Used by main.cpp AcceptToMemoryPool(), which DOES do
// all the appropriate checks.
LOCK(cs);
indexed_transaction_set::iterator newit = mapTx.insert(entry).first;
mapLinks.insert(make_pair(newit, TxLinks()));
// Update transaction for any feeDelta created by PrioritiseTransaction
// TODO: refactor so that the fee delta is calculated before inserting
// into mapTx.
std::map<uint256, std::pair<double, CAmount> >::const_iterator pos = mapDeltas.find(hash);
if (pos != mapDeltas.end()) {
const std::pair<double, CAmount> &deltas = pos->second;
if (deltas.second) {
mapTx.modify(newit, update_fee_delta(deltas.second));
}
}
// Update cachedInnerUsage to include contained transaction's usage.
// (When we update the entry for in-mempool parents, memory usage will be
// further updated.)
cachedInnerUsage += entry.DynamicMemoryUsage();
const CTransaction& tx = newit->GetTx();
std::set<uint256> setParentTransactions;
for (unsigned int i = 0; i < tx.vin.size(); i++) {
mapNextTx[tx.vin[i].prevout] = CInPoint(&tx, i);
setParentTransactions.insert(tx.vin[i].prevout.hash);
}
// Don't bother worrying about child transactions of this one.
// Normal case of a new transaction arriving is that there can't be any
// children, because such children would be orphans.
// An exception to that is if a transaction enters that used to be in a block.
// In that case, our disconnect block logic will call UpdateTransactionsFromBlock
// to clean up the mess we're leaving here.
// Update ancestors with information about this tx
BOOST_FOREACH (const uint256 &phash, setParentTransactions) {
txiter pit = mapTx.find(phash);
if (pit != mapTx.end()) {
UpdateParent(newit, pit, true);
}
}
UpdateAncestorsOf(true, newit, setAncestors);
UpdateEntryForAncestors(newit, setAncestors);
nTransactionsUpdated++;
totalTxSize += entry.GetTxSize();
minerPolicyEstimator->processTransaction(entry, fCurrentEstimate);
return true;
}
2016-04-04 22:37:43 +02:00
void CTxMemPool::addAddressIndex(const CTxMemPoolEntry &entry, const CCoinsViewCache &view)
{
LOCK(cs);
const CTransaction& tx = entry.GetTx();
std::vector<CMempoolAddressDeltaKey> inserted;
uint256 txhash = tx.GetHash();
for (unsigned int j = 0; j < tx.vin.size(); j++) {
const CTxIn input = tx.vin[j];
const Coin& coin = view.AccessCoin(input.prevout);
const CTxOut &prevout = coin.out;
2016-04-04 22:37:43 +02:00
if (prevout.scriptPubKey.IsPayToScriptHash()) {
vector<unsigned char> hashBytes(prevout.scriptPubKey.begin()+2, prevout.scriptPubKey.begin()+22);
CMempoolAddressDeltaKey key(2, uint160(hashBytes), txhash, j, 1);
2016-04-04 22:37:43 +02:00
CMempoolAddressDelta delta(entry.GetTime(), prevout.nValue * -1, input.prevout.hash, input.prevout.n);
mapAddress.insert(make_pair(key, delta));
inserted.push_back(key);
} else if (prevout.scriptPubKey.IsPayToPublicKeyHash()) {
vector<unsigned char> hashBytes(prevout.scriptPubKey.begin()+3, prevout.scriptPubKey.begin()+23);
CMempoolAddressDeltaKey key(1, uint160(hashBytes), txhash, j, 1);
2016-04-04 22:37:43 +02:00
CMempoolAddressDelta delta(entry.GetTime(), prevout.nValue * -1, input.prevout.hash, input.prevout.n);
mapAddress.insert(make_pair(key, delta));
inserted.push_back(key);
}
}
for (unsigned int k = 0; k < tx.vout.size(); k++) {
const CTxOut &out = tx.vout[k];
if (out.scriptPubKey.IsPayToScriptHash()) {
vector<unsigned char> hashBytes(out.scriptPubKey.begin()+2, out.scriptPubKey.begin()+22);
CMempoolAddressDeltaKey key(2, uint160(hashBytes), txhash, k, 0);
2016-04-04 22:37:43 +02:00
mapAddress.insert(make_pair(key, CMempoolAddressDelta(entry.GetTime(), out.nValue)));
inserted.push_back(key);
} else if (out.scriptPubKey.IsPayToPublicKeyHash()) {
vector<unsigned char> hashBytes(out.scriptPubKey.begin()+3, out.scriptPubKey.begin()+23);
std::pair<addressDeltaMap::iterator,bool> ret;
CMempoolAddressDeltaKey key(1, uint160(hashBytes), txhash, k, 0);
2016-04-04 22:37:43 +02:00
mapAddress.insert(make_pair(key, CMempoolAddressDelta(entry.GetTime(), out.nValue)));
inserted.push_back(key);
}
}
mapAddressInserted.insert(make_pair(txhash, inserted));
}
bool CTxMemPool::getAddressIndex(std::vector<std::pair<uint160, int> > &addresses,
std::vector<std::pair<CMempoolAddressDeltaKey, CMempoolAddressDelta> > &results)
{
LOCK(cs);
for (std::vector<std::pair<uint160, int> >::iterator it = addresses.begin(); it != addresses.end(); it++) {
addressDeltaMap::iterator ait = mapAddress.lower_bound(CMempoolAddressDeltaKey((*it).second, (*it).first));
while (ait != mapAddress.end() && (*ait).first.addressBytes == (*it).first && (*ait).first.type == (*it).second) {
2016-04-04 22:37:43 +02:00
results.push_back(*ait);
ait++;
}
}
return true;
}
bool CTxMemPool::removeAddressIndex(const uint256 txhash)
{
LOCK(cs);
addressDeltaMapInserted::iterator it = mapAddressInserted.find(txhash);
if (it != mapAddressInserted.end()) {
std::vector<CMempoolAddressDeltaKey> keys = (*it).second;
for (std::vector<CMempoolAddressDeltaKey>::iterator mit = keys.begin(); mit != keys.end(); mit++) {
mapAddress.erase(*mit);
}
mapAddressInserted.erase(it);
}
return true;
}
2016-05-16 20:23:01 +02:00
void CTxMemPool::addSpentIndex(const CTxMemPoolEntry &entry, const CCoinsViewCache &view)
{
LOCK(cs);
const CTransaction& tx = entry.GetTx();
std::vector<CSpentIndexKey> inserted;
uint256 txhash = tx.GetHash();
for (unsigned int j = 0; j < tx.vin.size(); j++) {
const CTxIn input = tx.vin[j];
const Coin& coin = view.AccessCoin(input.prevout);
const CTxOut &prevout = coin.out;
2016-05-16 20:23:01 +02:00
uint160 addressHash;
int addressType;
if (prevout.scriptPubKey.IsPayToScriptHash()) {
addressHash = uint160(vector<unsigned char> (prevout.scriptPubKey.begin()+2, prevout.scriptPubKey.begin()+22));
addressType = 2;
} else if (prevout.scriptPubKey.IsPayToPublicKeyHash()) {
addressHash = uint160(vector<unsigned char> (prevout.scriptPubKey.begin()+3, prevout.scriptPubKey.begin()+23));
addressType = 1;
} else {
addressHash.SetNull();
addressType = 0;
}
CSpentIndexKey key = CSpentIndexKey(input.prevout.hash, input.prevout.n);
CSpentIndexValue value = CSpentIndexValue(txhash, j, -1, prevout.nValue, addressType, addressHash);
mapSpent.insert(make_pair(key, value));
inserted.push_back(key);
}
mapSpentInserted.insert(make_pair(txhash, inserted));
}
bool CTxMemPool::getSpentIndex(CSpentIndexKey &key, CSpentIndexValue &value)
{
LOCK(cs);
mapSpentIndex::iterator it;
it = mapSpent.find(key);
if (it != mapSpent.end()) {
value = it->second;
return true;
}
return false;
}
bool CTxMemPool::removeSpentIndex(const uint256 txhash)
{
LOCK(cs);
mapSpentIndexInserted::iterator it = mapSpentInserted.find(txhash);
if (it != mapSpentInserted.end()) {
std::vector<CSpentIndexKey> keys = (*it).second;
for (std::vector<CSpentIndexKey>::iterator mit = keys.begin(); mit != keys.end(); mit++) {
mapSpent.erase(*mit);
}
mapSpentInserted.erase(it);
}
return true;
}
void CTxMemPool::removeUnchecked(txiter it)
{
const uint256 hash = it->GetTx().GetHash();
BOOST_FOREACH(const CTxIn& txin, it->GetTx().vin)
mapNextTx.erase(txin.prevout);
totalTxSize -= it->GetTxSize();
cachedInnerUsage -= it->DynamicMemoryUsage();
cachedInnerUsage -= memusage::DynamicUsage(mapLinks[it].parents) + memusage::DynamicUsage(mapLinks[it].children);
mapLinks.erase(it);
mapTx.erase(it);
nTransactionsUpdated++;
minerPolicyEstimator->removeTx(hash);
2016-04-04 22:37:43 +02:00
removeAddressIndex(hash);
2016-05-16 20:23:01 +02:00
removeSpentIndex(hash);
}
// Calculates descendants of entry that are not already in setDescendants, and adds to
// setDescendants. Assumes entryit is already a tx in the mempool and setMemPoolChildren
// is correct for tx and all descendants.
// Also assumes that if an entry is in setDescendants already, then all
// in-mempool descendants of it are already in setDescendants as well, so that we
// can save time by not iterating over those entries.
void CTxMemPool::CalculateDescendants(txiter entryit, setEntries &setDescendants)
{
setEntries stage;
if (setDescendants.count(entryit) == 0) {
stage.insert(entryit);
}
// Traverse down the children of entry, only adding children that are not
// accounted for in setDescendants already (because those children have either
// already been walked, or will be walked in this iteration).
while (!stage.empty()) {
txiter it = *stage.begin();
setDescendants.insert(it);
stage.erase(it);
const setEntries &setChildren = GetMemPoolChildren(it);
BOOST_FOREACH(const txiter &childiter, setChildren) {
if (!setDescendants.count(childiter)) {
stage.insert(childiter);
}
}
}
}
void CTxMemPool::removeRecursive(const CTransaction &origTx, std::list<CTransaction>& removed)
{
// Remove transaction from memory pool
{
LOCK(cs);
setEntries txToRemove;
txiter origit = mapTx.find(origTx.GetHash());
if (origit != mapTx.end()) {
txToRemove.insert(origit);
} else {
// When recursively removing but origTx isn't in the mempool
// be sure to remove any children that are in the pool. This can
// happen during chain re-orgs if origTx isn't re-accepted into
// the mempool for any reason.
for (unsigned int i = 0; i < origTx.vout.size(); i++) {
std::map<COutPoint, CInPoint>::iterator it = mapNextTx.find(COutPoint(origTx.GetHash(), i));
if (it == mapNextTx.end())
continue;
txiter nextit = mapTx.find(it->second.ptx->GetHash());
assert(nextit != mapTx.end());
txToRemove.insert(nextit);
}
}
setEntries setAllRemoves;
BOOST_FOREACH(txiter it, txToRemove) {
CalculateDescendants(it, setAllRemoves);
}
BOOST_FOREACH(txiter it, setAllRemoves) {
removed.push_back(it->GetTx());
}
RemoveStaged(setAllRemoves, false);
}
}
void CTxMemPool::removeForReorg(const CCoinsViewCache *pcoins, unsigned int nMemPoolHeight, int flags)
{
2015-09-06 06:40:21 +02:00
// Remove transactions spending a coinbase which are now immature and no-longer-final transactions
LOCK(cs);
list<CTransaction> transactionsToRemove;
for (indexed_transaction_set::const_iterator it = mapTx.begin(); it != mapTx.end(); it++) {
const CTransaction& tx = it->GetTx();
LockPoints lp = it->GetLockPoints();
bool validLP = TestLockPointValidity(&lp);
if (!CheckFinalTx(tx, flags) || !CheckSequenceLocks(tx, flags, &lp, validLP)) {
// Note if CheckSequenceLocks fails the LockPoints may still be invalid
// So it's critical that we remove the tx and not depend on the LockPoints.
transactionsToRemove.push_back(tx);
} else if (it->GetSpendsCoinbase()) {
BOOST_FOREACH(const CTxIn& txin, tx.vin) {
indexed_transaction_set::const_iterator it2 = mapTx.find(txin.prevout.hash);
if (it2 != mapTx.end())
continue;
Merge #10195: Switch chainstate db and cache to per-txout model 589827975 scripted-diff: various renames for per-utxo consistency (Pieter Wuille) a5e02bc7f Increase travis unit test timeout (Pieter Wuille) 73de2c1ff Rename CCoinsCacheEntry::coins to coin (Pieter Wuille) 119e552f7 Merge CCoinsViewCache's GetOutputFor and AccessCoin (Pieter Wuille) 580b02309 [MOVEONLY] Move old CCoins class to txdb.cpp (Pieter Wuille) 8b25d2c0c Upgrade from per-tx database to per-txout (Pieter Wuille) b2af357f3 Reduce reserved memory space for flushing (Pieter Wuille) 41aa5b79a Pack Coin more tightly (Pieter Wuille) 97072d668 Remove unused CCoins methods (Pieter Wuille) ce23efaa5 Extend coins_tests (Pieter Wuille) 508307968 Switch CCoinsView and chainstate db from per-txid to per-txout (Pieter Wuille) 4ec0d9e79 Refactor GetUTXOStats in preparation for per-COutPoint iteration (Pieter Wuille) 13870b56f Replace CCoins-based CTxMemPool::pruneSpent with isSpent (Pieter Wuille) 05293f3cb Remove ModifyCoins/ModifyNewCoins (Pieter Wuille) 961e48397 Switch tests from ModifyCoins to AddCoin/SpendCoin (Pieter Wuille) 8b3868c1b Switch CScriptCheck to use Coin instead of CCoins (Pieter Wuille) c87b957a3 Only pass things committed to by tx's witness hash to CScriptCheck (Matt Corallo) f68cdfe92 Switch from per-tx to per-txout CCoinsViewCache methods in some places (Pieter Wuille) 000391132 Introduce new per-txout CCoinsViewCache functions (Pieter Wuille) bd83111a0 Optimization: Coin&& to ApplyTxInUndo (Pieter Wuille) cb2c7fdac Replace CTxInUndo with Coin (Pieter Wuille) 422634e2f Introduce Coin, a single unspent output (Pieter Wuille) 7d991b55d Store/allow tx metadata in all undo records (Pieter Wuille) c3aa0c119 Report on-disk size in gettxoutsetinfo (Pieter Wuille) d34242430 Remove/ignore tx version in utxo and undo (Pieter Wuille) 7e0032290 Add specialization of SipHash for 256 + 32 bit data (Pieter Wuille) e484652fc Introduce CHashVerifier to hash read data (Pieter Wuille) f54580e7e error() in disconnect for disk corruption, not inconsistency (Pieter Wuille) e66dbde6d Add SizeEstimate to CDBBatch (Pieter Wuille) Tree-SHA512: ce1fb1e40c77d38915cd02189fab7a8b125c7f44d425c85579d872c3bede3a437760997907c99d7b3017ced1c2de54b2ac7223d99d83a6658fe5ef61edef1de3
2017-06-02 00:47:58 +02:00
const Coin &coin = pcoins->AccessCoin(txin.prevout);
if (nCheckFrequency != 0) assert(!coin.IsSpent());
if (coin.IsSpent() || (coin.IsCoinBase() && ((signed long)nMemPoolHeight) - coin.nHeight < COINBASE_MATURITY)) {
transactionsToRemove.push_back(tx);
break;
}
}
}
if (!validLP) {
mapTx.modify(it, update_lock_points(lp));
}
}
BOOST_FOREACH(const CTransaction& tx, transactionsToRemove) {
list<CTransaction> removed;
removeRecursive(tx, removed);
}
}
void CTxMemPool::removeConflicts(const CTransaction &tx, std::list<CTransaction>& removed)
{
// Remove transactions which depend on inputs of tx, recursively
list<CTransaction> result;
LOCK(cs);
BOOST_FOREACH(const CTxIn &txin, tx.vin) {
std::map<COutPoint, CInPoint>::iterator it = mapNextTx.find(txin.prevout);
if (it != mapNextTx.end()) {
const CTransaction &txConflict = *it->second.ptx;
if (txConflict != tx)
{
removeRecursive(txConflict, removed);
ClearPrioritisation(txConflict.GetHash());
}
}
}
}
/**
* Called when a block is connected. Removes from mempool and updates the miner fee estimator.
*/
estimatefee / estimatepriority RPC methods New RPC methods: return an estimate of the fee (or priority) a transaction needs to be likely to confirm in a given number of blocks. Mike Hearn created the first version of this method for estimating fees. It works as follows: For transactions that took 1 to N (I picked N=25) blocks to confirm, keep N buckets with at most 100 entries in each recording the fees-per-kilobyte paid by those transactions. (separate buckets are kept for transactions that confirmed because they are high-priority) The buckets are filled as blocks are found, and are saved/restored in a new fee_estiamtes.dat file in the data directory. A few variations on Mike's initial scheme: To estimate the fee needed for a transaction to confirm in X buckets, all of the samples in all of the buckets are used and a median of all of the data is used to make the estimate. For example, imagine 25 buckets each containing the full 100 entries. Those 2,500 samples are sorted, and the estimate of the fee needed to confirm in the very next block is the 50'th-highest-fee-entry in that sorted list; the estimate of the fee needed to confirm in the next two blocks is the 150'th-highest-fee-entry, etc. That algorithm has the nice property that estimates of how much fee you need to pay to get confirmed in block N will always be greater than or equal to the estimate for block N+1. It would clearly be wrong to say "pay 11 uBTC and you'll get confirmed in 3 blocks, but pay 12 uBTC and it will take LONGER". A single block will not contribute more than 10 entries to any one bucket, so a single miner and a large block cannot overwhelm the estimates.
2014-03-17 13:19:54 +01:00
void CTxMemPool::removeForBlock(const std::vector<CTransaction>& vtx, unsigned int nBlockHeight,
std::list<CTransaction>& conflicts, bool fCurrentEstimate)
estimatefee / estimatepriority RPC methods New RPC methods: return an estimate of the fee (or priority) a transaction needs to be likely to confirm in a given number of blocks. Mike Hearn created the first version of this method for estimating fees. It works as follows: For transactions that took 1 to N (I picked N=25) blocks to confirm, keep N buckets with at most 100 entries in each recording the fees-per-kilobyte paid by those transactions. (separate buckets are kept for transactions that confirmed because they are high-priority) The buckets are filled as blocks are found, and are saved/restored in a new fee_estiamtes.dat file in the data directory. A few variations on Mike's initial scheme: To estimate the fee needed for a transaction to confirm in X buckets, all of the samples in all of the buckets are used and a median of all of the data is used to make the estimate. For example, imagine 25 buckets each containing the full 100 entries. Those 2,500 samples are sorted, and the estimate of the fee needed to confirm in the very next block is the 50'th-highest-fee-entry in that sorted list; the estimate of the fee needed to confirm in the next two blocks is the 150'th-highest-fee-entry, etc. That algorithm has the nice property that estimates of how much fee you need to pay to get confirmed in block N will always be greater than or equal to the estimate for block N+1. It would clearly be wrong to say "pay 11 uBTC and you'll get confirmed in 3 blocks, but pay 12 uBTC and it will take LONGER". A single block will not contribute more than 10 entries to any one bucket, so a single miner and a large block cannot overwhelm the estimates.
2014-03-17 13:19:54 +01:00
{
LOCK(cs);
std::vector<CTxMemPoolEntry> entries;
BOOST_FOREACH(const CTransaction& tx, vtx)
{
uint256 hash = tx.GetHash();
indexed_transaction_set::iterator i = mapTx.find(hash);
if (i != mapTx.end())
entries.push_back(*i);
estimatefee / estimatepriority RPC methods New RPC methods: return an estimate of the fee (or priority) a transaction needs to be likely to confirm in a given number of blocks. Mike Hearn created the first version of this method for estimating fees. It works as follows: For transactions that took 1 to N (I picked N=25) blocks to confirm, keep N buckets with at most 100 entries in each recording the fees-per-kilobyte paid by those transactions. (separate buckets are kept for transactions that confirmed because they are high-priority) The buckets are filled as blocks are found, and are saved/restored in a new fee_estiamtes.dat file in the data directory. A few variations on Mike's initial scheme: To estimate the fee needed for a transaction to confirm in X buckets, all of the samples in all of the buckets are used and a median of all of the data is used to make the estimate. For example, imagine 25 buckets each containing the full 100 entries. Those 2,500 samples are sorted, and the estimate of the fee needed to confirm in the very next block is the 50'th-highest-fee-entry in that sorted list; the estimate of the fee needed to confirm in the next two blocks is the 150'th-highest-fee-entry, etc. That algorithm has the nice property that estimates of how much fee you need to pay to get confirmed in block N will always be greater than or equal to the estimate for block N+1. It would clearly be wrong to say "pay 11 uBTC and you'll get confirmed in 3 blocks, but pay 12 uBTC and it will take LONGER". A single block will not contribute more than 10 entries to any one bucket, so a single miner and a large block cannot overwhelm the estimates.
2014-03-17 13:19:54 +01:00
}
BOOST_FOREACH(const CTransaction& tx, vtx)
{
txiter it = mapTx.find(tx.GetHash());
if (it != mapTx.end()) {
setEntries stage;
stage.insert(it);
RemoveStaged(stage, true);
}
estimatefee / estimatepriority RPC methods New RPC methods: return an estimate of the fee (or priority) a transaction needs to be likely to confirm in a given number of blocks. Mike Hearn created the first version of this method for estimating fees. It works as follows: For transactions that took 1 to N (I picked N=25) blocks to confirm, keep N buckets with at most 100 entries in each recording the fees-per-kilobyte paid by those transactions. (separate buckets are kept for transactions that confirmed because they are high-priority) The buckets are filled as blocks are found, and are saved/restored in a new fee_estiamtes.dat file in the data directory. A few variations on Mike's initial scheme: To estimate the fee needed for a transaction to confirm in X buckets, all of the samples in all of the buckets are used and a median of all of the data is used to make the estimate. For example, imagine 25 buckets each containing the full 100 entries. Those 2,500 samples are sorted, and the estimate of the fee needed to confirm in the very next block is the 50'th-highest-fee-entry in that sorted list; the estimate of the fee needed to confirm in the next two blocks is the 150'th-highest-fee-entry, etc. That algorithm has the nice property that estimates of how much fee you need to pay to get confirmed in block N will always be greater than or equal to the estimate for block N+1. It would clearly be wrong to say "pay 11 uBTC and you'll get confirmed in 3 blocks, but pay 12 uBTC and it will take LONGER". A single block will not contribute more than 10 entries to any one bucket, so a single miner and a large block cannot overwhelm the estimates.
2014-03-17 13:19:54 +01:00
removeConflicts(tx, conflicts);
ClearPrioritisation(tx.GetHash());
estimatefee / estimatepriority RPC methods New RPC methods: return an estimate of the fee (or priority) a transaction needs to be likely to confirm in a given number of blocks. Mike Hearn created the first version of this method for estimating fees. It works as follows: For transactions that took 1 to N (I picked N=25) blocks to confirm, keep N buckets with at most 100 entries in each recording the fees-per-kilobyte paid by those transactions. (separate buckets are kept for transactions that confirmed because they are high-priority) The buckets are filled as blocks are found, and are saved/restored in a new fee_estiamtes.dat file in the data directory. A few variations on Mike's initial scheme: To estimate the fee needed for a transaction to confirm in X buckets, all of the samples in all of the buckets are used and a median of all of the data is used to make the estimate. For example, imagine 25 buckets each containing the full 100 entries. Those 2,500 samples are sorted, and the estimate of the fee needed to confirm in the very next block is the 50'th-highest-fee-entry in that sorted list; the estimate of the fee needed to confirm in the next two blocks is the 150'th-highest-fee-entry, etc. That algorithm has the nice property that estimates of how much fee you need to pay to get confirmed in block N will always be greater than or equal to the estimate for block N+1. It would clearly be wrong to say "pay 11 uBTC and you'll get confirmed in 3 blocks, but pay 12 uBTC and it will take LONGER". A single block will not contribute more than 10 entries to any one bucket, so a single miner and a large block cannot overwhelm the estimates.
2014-03-17 13:19:54 +01:00
}
// After the txs in the new block have been removed from the mempool, update policy estimates
minerPolicyEstimator->processBlock(nBlockHeight, entries, fCurrentEstimate);
lastRollingFeeUpdate = GetTime();
blockSinceLastRollingFeeBump = true;
estimatefee / estimatepriority RPC methods New RPC methods: return an estimate of the fee (or priority) a transaction needs to be likely to confirm in a given number of blocks. Mike Hearn created the first version of this method for estimating fees. It works as follows: For transactions that took 1 to N (I picked N=25) blocks to confirm, keep N buckets with at most 100 entries in each recording the fees-per-kilobyte paid by those transactions. (separate buckets are kept for transactions that confirmed because they are high-priority) The buckets are filled as blocks are found, and are saved/restored in a new fee_estiamtes.dat file in the data directory. A few variations on Mike's initial scheme: To estimate the fee needed for a transaction to confirm in X buckets, all of the samples in all of the buckets are used and a median of all of the data is used to make the estimate. For example, imagine 25 buckets each containing the full 100 entries. Those 2,500 samples are sorted, and the estimate of the fee needed to confirm in the very next block is the 50'th-highest-fee-entry in that sorted list; the estimate of the fee needed to confirm in the next two blocks is the 150'th-highest-fee-entry, etc. That algorithm has the nice property that estimates of how much fee you need to pay to get confirmed in block N will always be greater than or equal to the estimate for block N+1. It would clearly be wrong to say "pay 11 uBTC and you'll get confirmed in 3 blocks, but pay 12 uBTC and it will take LONGER". A single block will not contribute more than 10 entries to any one bucket, so a single miner and a large block cannot overwhelm the estimates.
2014-03-17 13:19:54 +01:00
}
void CTxMemPool::_clear()
{
mapLinks.clear();
mapTx.clear();
mapNextTx.clear();
totalTxSize = 0;
cachedInnerUsage = 0;
lastRollingFeeUpdate = GetTime();
blockSinceLastRollingFeeBump = false;
rollingMinimumFeeRate = 0;
++nTransactionsUpdated;
}
void CTxMemPool::clear()
{
LOCK(cs);
_clear();
}
void CTxMemPool::check(const CCoinsViewCache *pcoins) const
{
if (nCheckFrequency == 0)
return;
if (insecure_rand() >= nCheckFrequency)
return;
LogPrint("mempool", "Checking mempool with %u transactions and %u inputs\n", (unsigned int)mapTx.size(), (unsigned int)mapNextTx.size());
uint64_t checkTotal = 0;
uint64_t innerUsage = 0;
CCoinsViewCache mempoolDuplicate(const_cast<CCoinsViewCache*>(pcoins));
LOCK(cs);
list<const CTxMemPoolEntry*> waitingOnDependants;
for (indexed_transaction_set::const_iterator it = mapTx.begin(); it != mapTx.end(); it++) {
unsigned int i = 0;
checkTotal += it->GetTxSize();
innerUsage += it->DynamicMemoryUsage();
const CTransaction& tx = it->GetTx();
txlinksMap::const_iterator linksiter = mapLinks.find(it);
assert(linksiter != mapLinks.end());
const TxLinks &links = linksiter->second;
innerUsage += memusage::DynamicUsage(links.parents) + memusage::DynamicUsage(links.children);
bool fDependsWait = false;
setEntries setParentCheck;
int64_t parentSizes = 0;
unsigned int parentSigOpCount = 0;
BOOST_FOREACH(const CTxIn &txin, tx.vin) {
// Check that every mempool transaction's inputs refer to available coins, or other mempool tx's.
indexed_transaction_set::const_iterator it2 = mapTx.find(txin.prevout.hash);
if (it2 != mapTx.end()) {
const CTransaction& tx2 = it2->GetTx();
assert(tx2.vout.size() > txin.prevout.n && !tx2.vout[txin.prevout.n].IsNull());
fDependsWait = true;
if (setParentCheck.insert(it2).second) {
parentSizes += it2->GetTxSize();
parentSigOpCount += it2->GetSigOpCount();
}
} else {
Merge #10195: Switch chainstate db and cache to per-txout model 589827975 scripted-diff: various renames for per-utxo consistency (Pieter Wuille) a5e02bc7f Increase travis unit test timeout (Pieter Wuille) 73de2c1ff Rename CCoinsCacheEntry::coins to coin (Pieter Wuille) 119e552f7 Merge CCoinsViewCache's GetOutputFor and AccessCoin (Pieter Wuille) 580b02309 [MOVEONLY] Move old CCoins class to txdb.cpp (Pieter Wuille) 8b25d2c0c Upgrade from per-tx database to per-txout (Pieter Wuille) b2af357f3 Reduce reserved memory space for flushing (Pieter Wuille) 41aa5b79a Pack Coin more tightly (Pieter Wuille) 97072d668 Remove unused CCoins methods (Pieter Wuille) ce23efaa5 Extend coins_tests (Pieter Wuille) 508307968 Switch CCoinsView and chainstate db from per-txid to per-txout (Pieter Wuille) 4ec0d9e79 Refactor GetUTXOStats in preparation for per-COutPoint iteration (Pieter Wuille) 13870b56f Replace CCoins-based CTxMemPool::pruneSpent with isSpent (Pieter Wuille) 05293f3cb Remove ModifyCoins/ModifyNewCoins (Pieter Wuille) 961e48397 Switch tests from ModifyCoins to AddCoin/SpendCoin (Pieter Wuille) 8b3868c1b Switch CScriptCheck to use Coin instead of CCoins (Pieter Wuille) c87b957a3 Only pass things committed to by tx's witness hash to CScriptCheck (Matt Corallo) f68cdfe92 Switch from per-tx to per-txout CCoinsViewCache methods in some places (Pieter Wuille) 000391132 Introduce new per-txout CCoinsViewCache functions (Pieter Wuille) bd83111a0 Optimization: Coin&& to ApplyTxInUndo (Pieter Wuille) cb2c7fdac Replace CTxInUndo with Coin (Pieter Wuille) 422634e2f Introduce Coin, a single unspent output (Pieter Wuille) 7d991b55d Store/allow tx metadata in all undo records (Pieter Wuille) c3aa0c119 Report on-disk size in gettxoutsetinfo (Pieter Wuille) d34242430 Remove/ignore tx version in utxo and undo (Pieter Wuille) 7e0032290 Add specialization of SipHash for 256 + 32 bit data (Pieter Wuille) e484652fc Introduce CHashVerifier to hash read data (Pieter Wuille) f54580e7e error() in disconnect for disk corruption, not inconsistency (Pieter Wuille) e66dbde6d Add SizeEstimate to CDBBatch (Pieter Wuille) Tree-SHA512: ce1fb1e40c77d38915cd02189fab7a8b125c7f44d425c85579d872c3bede3a437760997907c99d7b3017ced1c2de54b2ac7223d99d83a6658fe5ef61edef1de3
2017-06-02 00:47:58 +02:00
assert(pcoins->HaveCoin(txin.prevout));
}
// Check whether its inputs are marked in mapNextTx.
std::map<COutPoint, CInPoint>::const_iterator it3 = mapNextTx.find(txin.prevout);
assert(it3 != mapNextTx.end());
assert(it3->second.ptx == &tx);
assert(it3->second.n == i);
i++;
}
assert(setParentCheck == GetMemPoolParents(it));
// Verify ancestor state is correct.
setEntries setAncestors;
uint64_t nNoLimit = std::numeric_limits<uint64_t>::max();
std::string dummy;
CalculateMemPoolAncestors(*it, setAncestors, nNoLimit, nNoLimit, nNoLimit, nNoLimit, dummy);
uint64_t nCountCheck = setAncestors.size() + 1;
uint64_t nSizeCheck = it->GetTxSize();
CAmount nFeesCheck = it->GetModifiedFee();
unsigned int nSigOpCheck = it->GetSigOpCount();
BOOST_FOREACH(txiter ancestorIt, setAncestors) {
nSizeCheck += ancestorIt->GetTxSize();
nFeesCheck += ancestorIt->GetModifiedFee();
nSigOpCheck += ancestorIt->GetSigOpCount();
}
assert(it->GetCountWithAncestors() == nCountCheck);
assert(it->GetSizeWithAncestors() == nSizeCheck);
assert(it->GetSigOpCountWithAncestors() == nSigOpCheck);
assert(it->GetModFeesWithAncestors() == nFeesCheck);
// Check children against mapNextTx
CTxMemPool::setEntries setChildrenCheck;
std::map<COutPoint, CInPoint>::const_iterator iter = mapNextTx.lower_bound(COutPoint(it->GetTx().GetHash(), 0));
int64_t childSizes = 0;
for (; iter != mapNextTx.end() && iter->first.hash == it->GetTx().GetHash(); ++iter) {
txiter childit = mapTx.find(iter->second.ptx->GetHash());
assert(childit != mapTx.end()); // mapNextTx points to in-mempool transactions
if (setChildrenCheck.insert(childit).second) {
childSizes += childit->GetTxSize();
}
}
assert(setChildrenCheck == GetMemPoolChildren(it));
// Also check to make sure size is greater than sum with immediate children.
// just a sanity check, not definitive that this calc is correct...
assert(it->GetSizeWithDescendants() >= childSizes + it->GetTxSize());
if (fDependsWait)
waitingOnDependants.push_back(&(*it));
else {
CValidationState state;
assert(CheckInputs(tx, state, mempoolDuplicate, false, 0, false, NULL));
UpdateCoins(tx, mempoolDuplicate, 1000000);
}
}
unsigned int stepsSinceLastRemove = 0;
while (!waitingOnDependants.empty()) {
const CTxMemPoolEntry* entry = waitingOnDependants.front();
waitingOnDependants.pop_front();
CValidationState state;
if (!mempoolDuplicate.HaveInputs(entry->GetTx())) {
waitingOnDependants.push_back(entry);
stepsSinceLastRemove++;
assert(stepsSinceLastRemove < waitingOnDependants.size());
} else {
assert(CheckInputs(entry->GetTx(), state, mempoolDuplicate, false, 0, false, NULL));
UpdateCoins(entry->GetTx(), mempoolDuplicate, 1000000);
stepsSinceLastRemove = 0;
}
}
for (std::map<COutPoint, CInPoint>::const_iterator it = mapNextTx.begin(); it != mapNextTx.end(); it++) {
uint256 hash = it->second.ptx->GetHash();
indexed_transaction_set::const_iterator it2 = mapTx.find(hash);
const CTransaction& tx = it2->GetTx();
assert(it2 != mapTx.end());
assert(&tx == it->second.ptx);
assert(tx.vin.size() > it->second.n);
assert(it->first == it->second.ptx->vin[it->second.n].prevout);
}
assert(totalTxSize == checkTotal);
assert(innerUsage == cachedInnerUsage);
}
bool CTxMemPool::CompareDepthAndScore(const uint256& hasha, const uint256& hashb)
{
LOCK(cs);
indexed_transaction_set::const_iterator i = mapTx.find(hasha);
if (i == mapTx.end()) return false;
indexed_transaction_set::const_iterator j = mapTx.find(hashb);
if (j == mapTx.end()) return true;
uint64_t counta = i->GetCountWithAncestors();
uint64_t countb = j->GetCountWithAncestors();
if (counta == countb) {
return CompareTxMemPoolEntryByScore()(*i, *j);
}
return counta < countb;
}
namespace {
class DepthAndScoreComparator
{
CTxMemPool *mp;
public:
DepthAndScoreComparator(CTxMemPool *mempool) : mp(mempool) {}
bool operator()(const uint256& a, const uint256& b) { return mp->CompareDepthAndScore(a, b); }
};
}
void CTxMemPool::queryHashes(vector<uint256>& vtxid)
{
vtxid.clear();
LOCK(cs);
vtxid.reserve(mapTx.size());
for (indexed_transaction_set::iterator mi = mapTx.begin(); mi != mapTx.end(); ++mi)
vtxid.push_back(mi->GetTx().GetHash());
std::sort(vtxid.begin(), vtxid.end(), DepthAndScoreComparator(this));
}
bool CTxMemPool::lookup(uint256 hash, CTransaction& result, int64_t& time) const
{
LOCK(cs);
indexed_transaction_set::const_iterator i = mapTx.find(hash);
if (i == mapTx.end()) return false;
result = i->GetTx();
time = i->GetTime();
return true;
}
bool CTxMemPool::lookup(uint256 hash, CTransaction& result) const
{
int64_t time;
return CTxMemPool::lookup(hash, result, time);
}
bool CTxMemPool::lookupFeeRate(const uint256& hash, CFeeRate& feeRate) const
{
LOCK(cs);
indexed_transaction_set::const_iterator i = mapTx.find(hash);
if (i == mapTx.end())
return false;
feeRate = CFeeRate(i->GetFee(), i->GetTxSize());
return true;
}
estimatefee / estimatepriority RPC methods New RPC methods: return an estimate of the fee (or priority) a transaction needs to be likely to confirm in a given number of blocks. Mike Hearn created the first version of this method for estimating fees. It works as follows: For transactions that took 1 to N (I picked N=25) blocks to confirm, keep N buckets with at most 100 entries in each recording the fees-per-kilobyte paid by those transactions. (separate buckets are kept for transactions that confirmed because they are high-priority) The buckets are filled as blocks are found, and are saved/restored in a new fee_estiamtes.dat file in the data directory. A few variations on Mike's initial scheme: To estimate the fee needed for a transaction to confirm in X buckets, all of the samples in all of the buckets are used and a median of all of the data is used to make the estimate. For example, imagine 25 buckets each containing the full 100 entries. Those 2,500 samples are sorted, and the estimate of the fee needed to confirm in the very next block is the 50'th-highest-fee-entry in that sorted list; the estimate of the fee needed to confirm in the next two blocks is the 150'th-highest-fee-entry, etc. That algorithm has the nice property that estimates of how much fee you need to pay to get confirmed in block N will always be greater than or equal to the estimate for block N+1. It would clearly be wrong to say "pay 11 uBTC and you'll get confirmed in 3 blocks, but pay 12 uBTC and it will take LONGER". A single block will not contribute more than 10 entries to any one bucket, so a single miner and a large block cannot overwhelm the estimates.
2014-03-17 13:19:54 +01:00
CFeeRate CTxMemPool::estimateFee(int nBlocks) const
{
LOCK(cs);
return minerPolicyEstimator->estimateFee(nBlocks);
}
CFeeRate CTxMemPool::estimateSmartFee(int nBlocks, int *answerFoundAtBlocks) const
{
LOCK(cs);
return minerPolicyEstimator->estimateSmartFee(nBlocks, answerFoundAtBlocks, *this);
}
estimatefee / estimatepriority RPC methods New RPC methods: return an estimate of the fee (or priority) a transaction needs to be likely to confirm in a given number of blocks. Mike Hearn created the first version of this method for estimating fees. It works as follows: For transactions that took 1 to N (I picked N=25) blocks to confirm, keep N buckets with at most 100 entries in each recording the fees-per-kilobyte paid by those transactions. (separate buckets are kept for transactions that confirmed because they are high-priority) The buckets are filled as blocks are found, and are saved/restored in a new fee_estiamtes.dat file in the data directory. A few variations on Mike's initial scheme: To estimate the fee needed for a transaction to confirm in X buckets, all of the samples in all of the buckets are used and a median of all of the data is used to make the estimate. For example, imagine 25 buckets each containing the full 100 entries. Those 2,500 samples are sorted, and the estimate of the fee needed to confirm in the very next block is the 50'th-highest-fee-entry in that sorted list; the estimate of the fee needed to confirm in the next two blocks is the 150'th-highest-fee-entry, etc. That algorithm has the nice property that estimates of how much fee you need to pay to get confirmed in block N will always be greater than or equal to the estimate for block N+1. It would clearly be wrong to say "pay 11 uBTC and you'll get confirmed in 3 blocks, but pay 12 uBTC and it will take LONGER". A single block will not contribute more than 10 entries to any one bucket, so a single miner and a large block cannot overwhelm the estimates.
2014-03-17 13:19:54 +01:00
double CTxMemPool::estimatePriority(int nBlocks) const
{
LOCK(cs);
return minerPolicyEstimator->estimatePriority(nBlocks);
}
double CTxMemPool::estimateSmartPriority(int nBlocks, int *answerFoundAtBlocks) const
{
LOCK(cs);
return minerPolicyEstimator->estimateSmartPriority(nBlocks, answerFoundAtBlocks, *this);
}
estimatefee / estimatepriority RPC methods New RPC methods: return an estimate of the fee (or priority) a transaction needs to be likely to confirm in a given number of blocks. Mike Hearn created the first version of this method for estimating fees. It works as follows: For transactions that took 1 to N (I picked N=25) blocks to confirm, keep N buckets with at most 100 entries in each recording the fees-per-kilobyte paid by those transactions. (separate buckets are kept for transactions that confirmed because they are high-priority) The buckets are filled as blocks are found, and are saved/restored in a new fee_estiamtes.dat file in the data directory. A few variations on Mike's initial scheme: To estimate the fee needed for a transaction to confirm in X buckets, all of the samples in all of the buckets are used and a median of all of the data is used to make the estimate. For example, imagine 25 buckets each containing the full 100 entries. Those 2,500 samples are sorted, and the estimate of the fee needed to confirm in the very next block is the 50'th-highest-fee-entry in that sorted list; the estimate of the fee needed to confirm in the next two blocks is the 150'th-highest-fee-entry, etc. That algorithm has the nice property that estimates of how much fee you need to pay to get confirmed in block N will always be greater than or equal to the estimate for block N+1. It would clearly be wrong to say "pay 11 uBTC and you'll get confirmed in 3 blocks, but pay 12 uBTC and it will take LONGER". A single block will not contribute more than 10 entries to any one bucket, so a single miner and a large block cannot overwhelm the estimates.
2014-03-17 13:19:54 +01:00
bool
CTxMemPool::WriteFeeEstimates(CAutoFile& fileout) const
{
try {
LOCK(cs);
fileout << 120000; // version required to read: 0.12.00 or later
estimatefee / estimatepriority RPC methods New RPC methods: return an estimate of the fee (or priority) a transaction needs to be likely to confirm in a given number of blocks. Mike Hearn created the first version of this method for estimating fees. It works as follows: For transactions that took 1 to N (I picked N=25) blocks to confirm, keep N buckets with at most 100 entries in each recording the fees-per-kilobyte paid by those transactions. (separate buckets are kept for transactions that confirmed because they are high-priority) The buckets are filled as blocks are found, and are saved/restored in a new fee_estiamtes.dat file in the data directory. A few variations on Mike's initial scheme: To estimate the fee needed for a transaction to confirm in X buckets, all of the samples in all of the buckets are used and a median of all of the data is used to make the estimate. For example, imagine 25 buckets each containing the full 100 entries. Those 2,500 samples are sorted, and the estimate of the fee needed to confirm in the very next block is the 50'th-highest-fee-entry in that sorted list; the estimate of the fee needed to confirm in the next two blocks is the 150'th-highest-fee-entry, etc. That algorithm has the nice property that estimates of how much fee you need to pay to get confirmed in block N will always be greater than or equal to the estimate for block N+1. It would clearly be wrong to say "pay 11 uBTC and you'll get confirmed in 3 blocks, but pay 12 uBTC and it will take LONGER". A single block will not contribute more than 10 entries to any one bucket, so a single miner and a large block cannot overwhelm the estimates.
2014-03-17 13:19:54 +01:00
fileout << CLIENT_VERSION; // version that wrote the file
minerPolicyEstimator->Write(fileout);
}
catch (const std::exception&) {
LogPrintf("CTxMemPool::WriteFeeEstimates(): unable to write policy estimator data (non-fatal)\n");
estimatefee / estimatepriority RPC methods New RPC methods: return an estimate of the fee (or priority) a transaction needs to be likely to confirm in a given number of blocks. Mike Hearn created the first version of this method for estimating fees. It works as follows: For transactions that took 1 to N (I picked N=25) blocks to confirm, keep N buckets with at most 100 entries in each recording the fees-per-kilobyte paid by those transactions. (separate buckets are kept for transactions that confirmed because they are high-priority) The buckets are filled as blocks are found, and are saved/restored in a new fee_estiamtes.dat file in the data directory. A few variations on Mike's initial scheme: To estimate the fee needed for a transaction to confirm in X buckets, all of the samples in all of the buckets are used and a median of all of the data is used to make the estimate. For example, imagine 25 buckets each containing the full 100 entries. Those 2,500 samples are sorted, and the estimate of the fee needed to confirm in the very next block is the 50'th-highest-fee-entry in that sorted list; the estimate of the fee needed to confirm in the next two blocks is the 150'th-highest-fee-entry, etc. That algorithm has the nice property that estimates of how much fee you need to pay to get confirmed in block N will always be greater than or equal to the estimate for block N+1. It would clearly be wrong to say "pay 11 uBTC and you'll get confirmed in 3 blocks, but pay 12 uBTC and it will take LONGER". A single block will not contribute more than 10 entries to any one bucket, so a single miner and a large block cannot overwhelm the estimates.
2014-03-17 13:19:54 +01:00
return false;
}
return true;
}
bool
CTxMemPool::ReadFeeEstimates(CAutoFile& filein)
{
try {
int nVersionRequired, nVersionThatWrote;
filein >> nVersionRequired >> nVersionThatWrote;
if (nVersionRequired > CLIENT_VERSION)
return error("CTxMemPool::ReadFeeEstimates(): up-version (%d) fee estimate file", nVersionRequired);
estimatefee / estimatepriority RPC methods New RPC methods: return an estimate of the fee (or priority) a transaction needs to be likely to confirm in a given number of blocks. Mike Hearn created the first version of this method for estimating fees. It works as follows: For transactions that took 1 to N (I picked N=25) blocks to confirm, keep N buckets with at most 100 entries in each recording the fees-per-kilobyte paid by those transactions. (separate buckets are kept for transactions that confirmed because they are high-priority) The buckets are filled as blocks are found, and are saved/restored in a new fee_estiamtes.dat file in the data directory. A few variations on Mike's initial scheme: To estimate the fee needed for a transaction to confirm in X buckets, all of the samples in all of the buckets are used and a median of all of the data is used to make the estimate. For example, imagine 25 buckets each containing the full 100 entries. Those 2,500 samples are sorted, and the estimate of the fee needed to confirm in the very next block is the 50'th-highest-fee-entry in that sorted list; the estimate of the fee needed to confirm in the next two blocks is the 150'th-highest-fee-entry, etc. That algorithm has the nice property that estimates of how much fee you need to pay to get confirmed in block N will always be greater than or equal to the estimate for block N+1. It would clearly be wrong to say "pay 11 uBTC and you'll get confirmed in 3 blocks, but pay 12 uBTC and it will take LONGER". A single block will not contribute more than 10 entries to any one bucket, so a single miner and a large block cannot overwhelm the estimates.
2014-03-17 13:19:54 +01:00
LOCK(cs);
minerPolicyEstimator->Read(filein);
estimatefee / estimatepriority RPC methods New RPC methods: return an estimate of the fee (or priority) a transaction needs to be likely to confirm in a given number of blocks. Mike Hearn created the first version of this method for estimating fees. It works as follows: For transactions that took 1 to N (I picked N=25) blocks to confirm, keep N buckets with at most 100 entries in each recording the fees-per-kilobyte paid by those transactions. (separate buckets are kept for transactions that confirmed because they are high-priority) The buckets are filled as blocks are found, and are saved/restored in a new fee_estiamtes.dat file in the data directory. A few variations on Mike's initial scheme: To estimate the fee needed for a transaction to confirm in X buckets, all of the samples in all of the buckets are used and a median of all of the data is used to make the estimate. For example, imagine 25 buckets each containing the full 100 entries. Those 2,500 samples are sorted, and the estimate of the fee needed to confirm in the very next block is the 50'th-highest-fee-entry in that sorted list; the estimate of the fee needed to confirm in the next two blocks is the 150'th-highest-fee-entry, etc. That algorithm has the nice property that estimates of how much fee you need to pay to get confirmed in block N will always be greater than or equal to the estimate for block N+1. It would clearly be wrong to say "pay 11 uBTC and you'll get confirmed in 3 blocks, but pay 12 uBTC and it will take LONGER". A single block will not contribute more than 10 entries to any one bucket, so a single miner and a large block cannot overwhelm the estimates.
2014-03-17 13:19:54 +01:00
}
catch (const std::exception&) {
LogPrintf("CTxMemPool::ReadFeeEstimates(): unable to read policy estimator data (non-fatal)\n");
estimatefee / estimatepriority RPC methods New RPC methods: return an estimate of the fee (or priority) a transaction needs to be likely to confirm in a given number of blocks. Mike Hearn created the first version of this method for estimating fees. It works as follows: For transactions that took 1 to N (I picked N=25) blocks to confirm, keep N buckets with at most 100 entries in each recording the fees-per-kilobyte paid by those transactions. (separate buckets are kept for transactions that confirmed because they are high-priority) The buckets are filled as blocks are found, and are saved/restored in a new fee_estiamtes.dat file in the data directory. A few variations on Mike's initial scheme: To estimate the fee needed for a transaction to confirm in X buckets, all of the samples in all of the buckets are used and a median of all of the data is used to make the estimate. For example, imagine 25 buckets each containing the full 100 entries. Those 2,500 samples are sorted, and the estimate of the fee needed to confirm in the very next block is the 50'th-highest-fee-entry in that sorted list; the estimate of the fee needed to confirm in the next two blocks is the 150'th-highest-fee-entry, etc. That algorithm has the nice property that estimates of how much fee you need to pay to get confirmed in block N will always be greater than or equal to the estimate for block N+1. It would clearly be wrong to say "pay 11 uBTC and you'll get confirmed in 3 blocks, but pay 12 uBTC and it will take LONGER". A single block will not contribute more than 10 entries to any one bucket, so a single miner and a large block cannot overwhelm the estimates.
2014-03-17 13:19:54 +01:00
return false;
}
return true;
}
2014-04-23 00:46:19 +02:00
void CTxMemPool::PrioritiseTransaction(const uint256 hash, const string strHash, double dPriorityDelta, const CAmount& nFeeDelta)
{
{
LOCK(cs);
2014-04-23 00:46:19 +02:00
std::pair<double, CAmount> &deltas = mapDeltas[hash];
deltas.first += dPriorityDelta;
deltas.second += nFeeDelta;
txiter it = mapTx.find(hash);
if (it != mapTx.end()) {
mapTx.modify(it, update_fee_delta(deltas.second));
// Now update all ancestors' modified fees with descendants
setEntries setAncestors;
uint64_t nNoLimit = std::numeric_limits<uint64_t>::max();
std::string dummy;
CalculateMemPoolAncestors(*it, setAncestors, nNoLimit, nNoLimit, nNoLimit, nNoLimit, dummy, false);
BOOST_FOREACH(txiter ancestorIt, setAncestors) {
mapTx.modify(ancestorIt, update_descendant_state(0, nFeeDelta, 0));
}
}
}
2014-04-23 00:46:19 +02:00
LogPrintf("PrioritiseTransaction: %s priority += %f, fee += %d\n", strHash, dPriorityDelta, FormatMoney(nFeeDelta));
}
void CTxMemPool::ApplyDeltas(const uint256 hash, double &dPriorityDelta, CAmount &nFeeDelta) const
{
LOCK(cs);
std::map<uint256, std::pair<double, CAmount> >::const_iterator pos = mapDeltas.find(hash);
if (pos == mapDeltas.end())
return;
2014-04-23 00:46:19 +02:00
const std::pair<double, CAmount> &deltas = pos->second;
dPriorityDelta += deltas.first;
nFeeDelta += deltas.second;
}
void CTxMemPool::ClearPrioritisation(const uint256 hash)
{
LOCK(cs);
mapDeltas.erase(hash);
}
bool CTxMemPool::HasNoInputsOf(const CTransaction &tx) const
{
for (unsigned int i = 0; i < tx.vin.size(); i++)
if (exists(tx.vin[i].prevout.hash))
return false;
return true;
}
estimatefee / estimatepriority RPC methods New RPC methods: return an estimate of the fee (or priority) a transaction needs to be likely to confirm in a given number of blocks. Mike Hearn created the first version of this method for estimating fees. It works as follows: For transactions that took 1 to N (I picked N=25) blocks to confirm, keep N buckets with at most 100 entries in each recording the fees-per-kilobyte paid by those transactions. (separate buckets are kept for transactions that confirmed because they are high-priority) The buckets are filled as blocks are found, and are saved/restored in a new fee_estiamtes.dat file in the data directory. A few variations on Mike's initial scheme: To estimate the fee needed for a transaction to confirm in X buckets, all of the samples in all of the buckets are used and a median of all of the data is used to make the estimate. For example, imagine 25 buckets each containing the full 100 entries. Those 2,500 samples are sorted, and the estimate of the fee needed to confirm in the very next block is the 50'th-highest-fee-entry in that sorted list; the estimate of the fee needed to confirm in the next two blocks is the 150'th-highest-fee-entry, etc. That algorithm has the nice property that estimates of how much fee you need to pay to get confirmed in block N will always be greater than or equal to the estimate for block N+1. It would clearly be wrong to say "pay 11 uBTC and you'll get confirmed in 3 blocks, but pay 12 uBTC and it will take LONGER". A single block will not contribute more than 10 entries to any one bucket, so a single miner and a large block cannot overwhelm the estimates.
2014-03-17 13:19:54 +01:00
CCoinsViewMemPool::CCoinsViewMemPool(CCoinsView* baseIn, const CTxMemPool& mempoolIn) : CCoinsViewBacked(baseIn), mempool(mempoolIn) { }
Merge #10195: Switch chainstate db and cache to per-txout model 589827975 scripted-diff: various renames for per-utxo consistency (Pieter Wuille) a5e02bc7f Increase travis unit test timeout (Pieter Wuille) 73de2c1ff Rename CCoinsCacheEntry::coins to coin (Pieter Wuille) 119e552f7 Merge CCoinsViewCache's GetOutputFor and AccessCoin (Pieter Wuille) 580b02309 [MOVEONLY] Move old CCoins class to txdb.cpp (Pieter Wuille) 8b25d2c0c Upgrade from per-tx database to per-txout (Pieter Wuille) b2af357f3 Reduce reserved memory space for flushing (Pieter Wuille) 41aa5b79a Pack Coin more tightly (Pieter Wuille) 97072d668 Remove unused CCoins methods (Pieter Wuille) ce23efaa5 Extend coins_tests (Pieter Wuille) 508307968 Switch CCoinsView and chainstate db from per-txid to per-txout (Pieter Wuille) 4ec0d9e79 Refactor GetUTXOStats in preparation for per-COutPoint iteration (Pieter Wuille) 13870b56f Replace CCoins-based CTxMemPool::pruneSpent with isSpent (Pieter Wuille) 05293f3cb Remove ModifyCoins/ModifyNewCoins (Pieter Wuille) 961e48397 Switch tests from ModifyCoins to AddCoin/SpendCoin (Pieter Wuille) 8b3868c1b Switch CScriptCheck to use Coin instead of CCoins (Pieter Wuille) c87b957a3 Only pass things committed to by tx's witness hash to CScriptCheck (Matt Corallo) f68cdfe92 Switch from per-tx to per-txout CCoinsViewCache methods in some places (Pieter Wuille) 000391132 Introduce new per-txout CCoinsViewCache functions (Pieter Wuille) bd83111a0 Optimization: Coin&& to ApplyTxInUndo (Pieter Wuille) cb2c7fdac Replace CTxInUndo with Coin (Pieter Wuille) 422634e2f Introduce Coin, a single unspent output (Pieter Wuille) 7d991b55d Store/allow tx metadata in all undo records (Pieter Wuille) c3aa0c119 Report on-disk size in gettxoutsetinfo (Pieter Wuille) d34242430 Remove/ignore tx version in utxo and undo (Pieter Wuille) 7e0032290 Add specialization of SipHash for 256 + 32 bit data (Pieter Wuille) e484652fc Introduce CHashVerifier to hash read data (Pieter Wuille) f54580e7e error() in disconnect for disk corruption, not inconsistency (Pieter Wuille) e66dbde6d Add SizeEstimate to CDBBatch (Pieter Wuille) Tree-SHA512: ce1fb1e40c77d38915cd02189fab7a8b125c7f44d425c85579d872c3bede3a437760997907c99d7b3017ced1c2de54b2ac7223d99d83a6658fe5ef61edef1de3
2017-06-02 00:47:58 +02:00
bool CCoinsViewMemPool::GetCoin(const COutPoint &outpoint, Coin &coin) const {
// If an entry in the mempool exists, always return that one, as it's guaranteed to never
// conflict with the underlying cache, and it cannot have pruned entries (as it contains full)
// transactions. First checking the underlying cache risks returning a pruned entry instead.
CTransaction tx;
Merge #10195: Switch chainstate db and cache to per-txout model 589827975 scripted-diff: various renames for per-utxo consistency (Pieter Wuille) a5e02bc7f Increase travis unit test timeout (Pieter Wuille) 73de2c1ff Rename CCoinsCacheEntry::coins to coin (Pieter Wuille) 119e552f7 Merge CCoinsViewCache's GetOutputFor and AccessCoin (Pieter Wuille) 580b02309 [MOVEONLY] Move old CCoins class to txdb.cpp (Pieter Wuille) 8b25d2c0c Upgrade from per-tx database to per-txout (Pieter Wuille) b2af357f3 Reduce reserved memory space for flushing (Pieter Wuille) 41aa5b79a Pack Coin more tightly (Pieter Wuille) 97072d668 Remove unused CCoins methods (Pieter Wuille) ce23efaa5 Extend coins_tests (Pieter Wuille) 508307968 Switch CCoinsView and chainstate db from per-txid to per-txout (Pieter Wuille) 4ec0d9e79 Refactor GetUTXOStats in preparation for per-COutPoint iteration (Pieter Wuille) 13870b56f Replace CCoins-based CTxMemPool::pruneSpent with isSpent (Pieter Wuille) 05293f3cb Remove ModifyCoins/ModifyNewCoins (Pieter Wuille) 961e48397 Switch tests from ModifyCoins to AddCoin/SpendCoin (Pieter Wuille) 8b3868c1b Switch CScriptCheck to use Coin instead of CCoins (Pieter Wuille) c87b957a3 Only pass things committed to by tx's witness hash to CScriptCheck (Matt Corallo) f68cdfe92 Switch from per-tx to per-txout CCoinsViewCache methods in some places (Pieter Wuille) 000391132 Introduce new per-txout CCoinsViewCache functions (Pieter Wuille) bd83111a0 Optimization: Coin&& to ApplyTxInUndo (Pieter Wuille) cb2c7fdac Replace CTxInUndo with Coin (Pieter Wuille) 422634e2f Introduce Coin, a single unspent output (Pieter Wuille) 7d991b55d Store/allow tx metadata in all undo records (Pieter Wuille) c3aa0c119 Report on-disk size in gettxoutsetinfo (Pieter Wuille) d34242430 Remove/ignore tx version in utxo and undo (Pieter Wuille) 7e0032290 Add specialization of SipHash for 256 + 32 bit data (Pieter Wuille) e484652fc Introduce CHashVerifier to hash read data (Pieter Wuille) f54580e7e error() in disconnect for disk corruption, not inconsistency (Pieter Wuille) e66dbde6d Add SizeEstimate to CDBBatch (Pieter Wuille) Tree-SHA512: ce1fb1e40c77d38915cd02189fab7a8b125c7f44d425c85579d872c3bede3a437760997907c99d7b3017ced1c2de54b2ac7223d99d83a6658fe5ef61edef1de3
2017-06-02 00:47:58 +02:00
if (mempool.lookup(outpoint.hash, tx)) {
if (outpoint.n < tx.vout.size()) {
coin = Coin(tx.vout[outpoint.n], MEMPOOL_HEIGHT, false);
return true;
} else {
return false;
}
}
return base->GetCoin(outpoint, coin);
}
size_t CTxMemPool::DynamicMemoryUsage() const {
LOCK(cs);
// Estimate the overhead of mapTx to be 15 pointers + an allocation, as no exact formula for boost::multi_index_contained is implemented.
return memusage::MallocUsage(sizeof(CTxMemPoolEntry) + 15 * sizeof(void*)) * mapTx.size() + memusage::DynamicUsage(mapNextTx) + memusage::DynamicUsage(mapDeltas) + memusage::DynamicUsage(mapLinks) + cachedInnerUsage;
}
void CTxMemPool::RemoveStaged(setEntries &stage, bool updateDescendants) {
AssertLockHeld(cs);
UpdateForRemoveFromMempool(stage, updateDescendants);
BOOST_FOREACH(const txiter& it, stage) {
removeUnchecked(it);
}
}
int CTxMemPool::Expire(int64_t time) {
LOCK(cs);
indexed_transaction_set::index<entry_time>::type::iterator it = mapTx.get<entry_time>().begin();
setEntries toremove;
while (it != mapTx.get<entry_time>().end() && it->GetTime() < time) {
toremove.insert(mapTx.project<0>(it));
it++;
}
setEntries stage;
BOOST_FOREACH(txiter removeit, toremove) {
CalculateDescendants(removeit, stage);
}
RemoveStaged(stage, false);
return stage.size();
}
bool CTxMemPool::addUnchecked(const uint256&hash, const CTxMemPoolEntry &entry, bool fCurrentEstimate)
{
LOCK(cs);
setEntries setAncestors;
uint64_t nNoLimit = std::numeric_limits<uint64_t>::max();
std::string dummy;
CalculateMemPoolAncestors(entry, setAncestors, nNoLimit, nNoLimit, nNoLimit, nNoLimit, dummy);
return addUnchecked(hash, entry, setAncestors, fCurrentEstimate);
}
void CTxMemPool::UpdateChild(txiter entry, txiter child, bool add)
{
setEntries s;
if (add && mapLinks[entry].children.insert(child).second) {
cachedInnerUsage += memusage::IncrementalDynamicUsage(s);
} else if (!add && mapLinks[entry].children.erase(child)) {
cachedInnerUsage -= memusage::IncrementalDynamicUsage(s);
}
}
void CTxMemPool::UpdateParent(txiter entry, txiter parent, bool add)
{
setEntries s;
if (add && mapLinks[entry].parents.insert(parent).second) {
cachedInnerUsage += memusage::IncrementalDynamicUsage(s);
} else if (!add && mapLinks[entry].parents.erase(parent)) {
cachedInnerUsage -= memusage::IncrementalDynamicUsage(s);
}
}
const CTxMemPool::setEntries & CTxMemPool::GetMemPoolParents(txiter entry) const
{
assert (entry != mapTx.end());
txlinksMap::const_iterator it = mapLinks.find(entry);
assert(it != mapLinks.end());
return it->second.parents;
}
const CTxMemPool::setEntries & CTxMemPool::GetMemPoolChildren(txiter entry) const
{
assert (entry != mapTx.end());
txlinksMap::const_iterator it = mapLinks.find(entry);
assert(it != mapLinks.end());
return it->second.children;
}
CFeeRate CTxMemPool::GetMinFee(size_t sizelimit) const {
LOCK(cs);
if (!blockSinceLastRollingFeeBump || rollingMinimumFeeRate == 0)
return CFeeRate(rollingMinimumFeeRate);
int64_t time = GetTime();
if (time > lastRollingFeeUpdate + 10) {
double halflife = ROLLING_FEE_HALFLIFE;
if (DynamicMemoryUsage() < sizelimit / 4)
halflife /= 4;
else if (DynamicMemoryUsage() < sizelimit / 2)
halflife /= 2;
rollingMinimumFeeRate = rollingMinimumFeeRate / pow(2.0, (time - lastRollingFeeUpdate) / halflife);
lastRollingFeeUpdate = time;
if (rollingMinimumFeeRate < minReasonableRelayFee.GetFeePerK() / 2) {
rollingMinimumFeeRate = 0;
return CFeeRate(0);
}
}
return std::max(CFeeRate(rollingMinimumFeeRate), minReasonableRelayFee);
}
void CTxMemPool::UpdateMinFee(const CFeeRate& _minReasonableRelayFee)
{
LOCK(cs);
delete minerPolicyEstimator;
minerPolicyEstimator = new CBlockPolicyEstimator(_minReasonableRelayFee);
minReasonableRelayFee = _minReasonableRelayFee;
}
void CTxMemPool::trackPackageRemoved(const CFeeRate& rate) {
AssertLockHeld(cs);
if (rate.GetFeePerK() > rollingMinimumFeeRate) {
rollingMinimumFeeRate = rate.GetFeePerK();
blockSinceLastRollingFeeBump = false;
}
}
Merge #10195: Switch chainstate db and cache to per-txout model 589827975 scripted-diff: various renames for per-utxo consistency (Pieter Wuille) a5e02bc7f Increase travis unit test timeout (Pieter Wuille) 73de2c1ff Rename CCoinsCacheEntry::coins to coin (Pieter Wuille) 119e552f7 Merge CCoinsViewCache's GetOutputFor and AccessCoin (Pieter Wuille) 580b02309 [MOVEONLY] Move old CCoins class to txdb.cpp (Pieter Wuille) 8b25d2c0c Upgrade from per-tx database to per-txout (Pieter Wuille) b2af357f3 Reduce reserved memory space for flushing (Pieter Wuille) 41aa5b79a Pack Coin more tightly (Pieter Wuille) 97072d668 Remove unused CCoins methods (Pieter Wuille) ce23efaa5 Extend coins_tests (Pieter Wuille) 508307968 Switch CCoinsView and chainstate db from per-txid to per-txout (Pieter Wuille) 4ec0d9e79 Refactor GetUTXOStats in preparation for per-COutPoint iteration (Pieter Wuille) 13870b56f Replace CCoins-based CTxMemPool::pruneSpent with isSpent (Pieter Wuille) 05293f3cb Remove ModifyCoins/ModifyNewCoins (Pieter Wuille) 961e48397 Switch tests from ModifyCoins to AddCoin/SpendCoin (Pieter Wuille) 8b3868c1b Switch CScriptCheck to use Coin instead of CCoins (Pieter Wuille) c87b957a3 Only pass things committed to by tx's witness hash to CScriptCheck (Matt Corallo) f68cdfe92 Switch from per-tx to per-txout CCoinsViewCache methods in some places (Pieter Wuille) 000391132 Introduce new per-txout CCoinsViewCache functions (Pieter Wuille) bd83111a0 Optimization: Coin&& to ApplyTxInUndo (Pieter Wuille) cb2c7fdac Replace CTxInUndo with Coin (Pieter Wuille) 422634e2f Introduce Coin, a single unspent output (Pieter Wuille) 7d991b55d Store/allow tx metadata in all undo records (Pieter Wuille) c3aa0c119 Report on-disk size in gettxoutsetinfo (Pieter Wuille) d34242430 Remove/ignore tx version in utxo and undo (Pieter Wuille) 7e0032290 Add specialization of SipHash for 256 + 32 bit data (Pieter Wuille) e484652fc Introduce CHashVerifier to hash read data (Pieter Wuille) f54580e7e error() in disconnect for disk corruption, not inconsistency (Pieter Wuille) e66dbde6d Add SizeEstimate to CDBBatch (Pieter Wuille) Tree-SHA512: ce1fb1e40c77d38915cd02189fab7a8b125c7f44d425c85579d872c3bede3a437760997907c99d7b3017ced1c2de54b2ac7223d99d83a6658fe5ef61edef1de3
2017-06-02 00:47:58 +02:00
void CTxMemPool::TrimToSize(size_t sizelimit, std::vector<COutPoint>* pvNoSpendsRemaining) {
LOCK(cs);
unsigned nTxnRemoved = 0;
CFeeRate maxFeeRateRemoved(0);
while (DynamicMemoryUsage() > sizelimit) {
indexed_transaction_set::index<descendant_score>::type::iterator it = mapTx.get<descendant_score>().begin();
// We set the new mempool min fee to the feerate of the removed set, plus the
// "minimum reasonable fee rate" (ie some value under which we consider txn
// to have 0 fee). This way, we don't allow txn to enter mempool with feerate
// equal to txn which were removed with no block in between.
CFeeRate removed(it->GetModFeesWithDescendants(), it->GetSizeWithDescendants());
removed += minReasonableRelayFee;
trackPackageRemoved(removed);
maxFeeRateRemoved = std::max(maxFeeRateRemoved, removed);
setEntries stage;
CalculateDescendants(mapTx.project<0>(it), stage);
nTxnRemoved += stage.size();
std::vector<CTransaction> txn;
if (pvNoSpendsRemaining) {
txn.reserve(stage.size());
BOOST_FOREACH(txiter it, stage)
txn.push_back(it->GetTx());
}
RemoveStaged(stage, false);
if (pvNoSpendsRemaining) {
BOOST_FOREACH(const CTransaction& tx, txn) {
BOOST_FOREACH(const CTxIn& txin, tx.vin) {
Merge #10195: Switch chainstate db and cache to per-txout model 589827975 scripted-diff: various renames for per-utxo consistency (Pieter Wuille) a5e02bc7f Increase travis unit test timeout (Pieter Wuille) 73de2c1ff Rename CCoinsCacheEntry::coins to coin (Pieter Wuille) 119e552f7 Merge CCoinsViewCache's GetOutputFor and AccessCoin (Pieter Wuille) 580b02309 [MOVEONLY] Move old CCoins class to txdb.cpp (Pieter Wuille) 8b25d2c0c Upgrade from per-tx database to per-txout (Pieter Wuille) b2af357f3 Reduce reserved memory space for flushing (Pieter Wuille) 41aa5b79a Pack Coin more tightly (Pieter Wuille) 97072d668 Remove unused CCoins methods (Pieter Wuille) ce23efaa5 Extend coins_tests (Pieter Wuille) 508307968 Switch CCoinsView and chainstate db from per-txid to per-txout (Pieter Wuille) 4ec0d9e79 Refactor GetUTXOStats in preparation for per-COutPoint iteration (Pieter Wuille) 13870b56f Replace CCoins-based CTxMemPool::pruneSpent with isSpent (Pieter Wuille) 05293f3cb Remove ModifyCoins/ModifyNewCoins (Pieter Wuille) 961e48397 Switch tests from ModifyCoins to AddCoin/SpendCoin (Pieter Wuille) 8b3868c1b Switch CScriptCheck to use Coin instead of CCoins (Pieter Wuille) c87b957a3 Only pass things committed to by tx's witness hash to CScriptCheck (Matt Corallo) f68cdfe92 Switch from per-tx to per-txout CCoinsViewCache methods in some places (Pieter Wuille) 000391132 Introduce new per-txout CCoinsViewCache functions (Pieter Wuille) bd83111a0 Optimization: Coin&& to ApplyTxInUndo (Pieter Wuille) cb2c7fdac Replace CTxInUndo with Coin (Pieter Wuille) 422634e2f Introduce Coin, a single unspent output (Pieter Wuille) 7d991b55d Store/allow tx metadata in all undo records (Pieter Wuille) c3aa0c119 Report on-disk size in gettxoutsetinfo (Pieter Wuille) d34242430 Remove/ignore tx version in utxo and undo (Pieter Wuille) 7e0032290 Add specialization of SipHash for 256 + 32 bit data (Pieter Wuille) e484652fc Introduce CHashVerifier to hash read data (Pieter Wuille) f54580e7e error() in disconnect for disk corruption, not inconsistency (Pieter Wuille) e66dbde6d Add SizeEstimate to CDBBatch (Pieter Wuille) Tree-SHA512: ce1fb1e40c77d38915cd02189fab7a8b125c7f44d425c85579d872c3bede3a437760997907c99d7b3017ced1c2de54b2ac7223d99d83a6658fe5ef61edef1de3
2017-06-02 00:47:58 +02:00
if (exists(txin.prevout.hash)) continue;
pvNoSpendsRemaining->push_back(txin.prevout);
}
}
}
}
if (maxFeeRateRemoved > CFeeRate(0))
LogPrint("mempool", "Removed %u txn, rolling minimum fee bumped to %s\n", nTxnRemoved, maxFeeRateRemoved.ToString());
}
Merge #10195: Switch chainstate db and cache to per-txout model 589827975 scripted-diff: various renames for per-utxo consistency (Pieter Wuille) a5e02bc7f Increase travis unit test timeout (Pieter Wuille) 73de2c1ff Rename CCoinsCacheEntry::coins to coin (Pieter Wuille) 119e552f7 Merge CCoinsViewCache's GetOutputFor and AccessCoin (Pieter Wuille) 580b02309 [MOVEONLY] Move old CCoins class to txdb.cpp (Pieter Wuille) 8b25d2c0c Upgrade from per-tx database to per-txout (Pieter Wuille) b2af357f3 Reduce reserved memory space for flushing (Pieter Wuille) 41aa5b79a Pack Coin more tightly (Pieter Wuille) 97072d668 Remove unused CCoins methods (Pieter Wuille) ce23efaa5 Extend coins_tests (Pieter Wuille) 508307968 Switch CCoinsView and chainstate db from per-txid to per-txout (Pieter Wuille) 4ec0d9e79 Refactor GetUTXOStats in preparation for per-COutPoint iteration (Pieter Wuille) 13870b56f Replace CCoins-based CTxMemPool::pruneSpent with isSpent (Pieter Wuille) 05293f3cb Remove ModifyCoins/ModifyNewCoins (Pieter Wuille) 961e48397 Switch tests from ModifyCoins to AddCoin/SpendCoin (Pieter Wuille) 8b3868c1b Switch CScriptCheck to use Coin instead of CCoins (Pieter Wuille) c87b957a3 Only pass things committed to by tx's witness hash to CScriptCheck (Matt Corallo) f68cdfe92 Switch from per-tx to per-txout CCoinsViewCache methods in some places (Pieter Wuille) 000391132 Introduce new per-txout CCoinsViewCache functions (Pieter Wuille) bd83111a0 Optimization: Coin&& to ApplyTxInUndo (Pieter Wuille) cb2c7fdac Replace CTxInUndo with Coin (Pieter Wuille) 422634e2f Introduce Coin, a single unspent output (Pieter Wuille) 7d991b55d Store/allow tx metadata in all undo records (Pieter Wuille) c3aa0c119 Report on-disk size in gettxoutsetinfo (Pieter Wuille) d34242430 Remove/ignore tx version in utxo and undo (Pieter Wuille) 7e0032290 Add specialization of SipHash for 256 + 32 bit data (Pieter Wuille) e484652fc Introduce CHashVerifier to hash read data (Pieter Wuille) f54580e7e error() in disconnect for disk corruption, not inconsistency (Pieter Wuille) e66dbde6d Add SizeEstimate to CDBBatch (Pieter Wuille) Tree-SHA512: ce1fb1e40c77d38915cd02189fab7a8b125c7f44d425c85579d872c3bede3a437760997907c99d7b3017ced1c2de54b2ac7223d99d83a6658fe5ef61edef1de3
2017-06-02 00:47:58 +02:00
SaltedTxidHasher::SaltedTxidHasher() : k0(GetRand(std::numeric_limits<uint64_t>::max())), k1(GetRand(std::numeric_limits<uint64_t>::max())) {}