Fix (dead)locks (#1169)

* locks in PS

* lock in governance

* locks in IS

* lock in ProcessGetData

* locks in CMasternodeSync

* centralize mnodeman.Check call

* locks order in mnpayments

* use current block chainTip when possible (less locks)

* add missing lock in CountInputsWithAmount

* fix deadlock RequestLowDataPaymentBlocks/IsTransactionValid

* LOCK2 in CheckMnbAndUpdateMasternodeList, CheckAndUpdate, SendVerifyRequest

* LOCK(cs) is not needed here

* Decouple governance init actions from serialization
Should fix this:
```
Assertion failed: lock governance.cs not held in governance-classes.cpp:117; locks held:
cs_Shutdown  init.cpp:200 (TRY)
cs  ./governance.h:195
cs  governance.cpp:835
Abort trap: 6
```
This commit is contained in:
UdjinM6 2016-11-28 18:21:50 +04:00 committed by GitHub
parent ecdc160119
commit 0600a6ce1a
12 changed files with 128 additions and 79 deletions

View File

@ -724,6 +724,8 @@ void CDarksendPool::ChargeFees()
LogPrintf("CDarksendPool::ChargeFees -- found uncooperative node (didn't %s transaction), charging fees: %s\n",
(nState == POOL_STATE_SIGNING) ? "sign" : "send", vecOffendersCollaterals[0].ToString());
LOCK(cs_main);
CValidationState state;
bool fMissingInputs;
if(!AcceptToMemoryPool(mempool, state, vecOffendersCollaterals[0], false, &fMissingInputs, false, true)) {
@ -751,6 +753,8 @@ void CDarksendPool::ChargeRandomFees()
{
if(!fMasterNode) return;
LOCK(cs_main);
BOOST_FOREACH(const CTransaction& txCollateral, vecSessionCollaterals) {
if(GetRandInt(100) > 10) return;
@ -773,13 +777,18 @@ void CDarksendPool::ChargeRandomFees()
//
void CDarksendPool::CheckTimeout()
{
// check mixing queue objects for timeouts
std::vector<CDarksendQueue>::iterator it = vecDarksendQueue.begin();
while(it != vecDarksendQueue.end()) {
if((*it).IsExpired()) {
LogPrint("privatesend", "CDarksendPool::CheckTimeout -- Removing expired queue (%s)\n", (*it).ToString());
it = vecDarksendQueue.erase(it);
} else ++it;
{
TRY_LOCK(cs_darksend, lockDS);
if(!lockDS) return; // it's ok to fail here, we run this quite frequently
// check mixing queue objects for timeouts
std::vector<CDarksendQueue>::iterator it = vecDarksendQueue.begin();
while(it != vecDarksendQueue.end()) {
if((*it).IsExpired()) {
LogPrint("privatesend", "CDarksendPool::CheckTimeout -- Removing expired queue (%s)\n", (*it).ToString());
it = vecDarksendQueue.erase(it);
} else ++it;
}
}
if(!fEnablePrivateSend && !fMasterNode) return;
@ -2335,11 +2344,22 @@ bool CDarksendQueue::CheckSignature(const CPubKey& pubKeyMasternode)
bool CDarksendQueue::Relay()
{
LOCK(cs_vNodes);
BOOST_FOREACH(CNode* pnode, vNodes)
std::vector<CNode*> vNodesCopy;
{
LOCK(cs_vNodes);
vNodesCopy = vNodes;
BOOST_FOREACH(CNode* pnode, vNodesCopy)
pnode->AddRef();
}
BOOST_FOREACH(CNode* pnode, vNodesCopy)
if(pnode->nVersion >= MIN_PRIVATESEND_PEER_PROTO_VERSION)
pnode->PushMessage(NetMsgType::DSQUEUE, (*this));
{
LOCK(cs_vNodes);
BOOST_FOREACH(CNode* pnode, vNodesCopy)
pnode->Release();
}
return true;
}
@ -2463,6 +2483,8 @@ void ThreadCheckDarkSendPool()
if(nTick % MASTERNODE_MIN_MNP_SECONDS == 1)
activeMasternode.ManageState();
mnodeman.Check();
if(nTick % 60 == 0) {
mnodeman.CheckAndRemove();
mnodeman.ProcessMasternodeConnections();

View File

@ -111,8 +111,6 @@ void CGovernanceManager::ProcessMessage(CNode* pfrom, std::string& strCommand, C
if(pfrom->nVersion < MIN_GOVERNANCE_PEER_PROTO_VERSION) return;
LOCK(cs);
// ANOTHER USER IS ASKING US TO HELP THEM SYNC GOVERNANCE OBJECT DATA
if (strCommand == NetMsgType::MNGOVERNANCESYNC)
{
@ -165,6 +163,8 @@ void CGovernanceManager::ProcessMessage(CNode* pfrom, std::string& strCommand, C
return;
}
LOCK(cs);
if(mapSeenGovernanceObjects.count(nHash)) {
// TODO - print error code? what if it's GOVOBJ_ERROR_IMMATURE?
LogPrint("gobject", "CGovernanceManager -- Received already seen object: %s\n", strHash);
@ -870,6 +870,14 @@ void CGovernanceManager::AddCachedTriggers()
}
}
void CGovernanceManager::InitOnLoad()
{
LOCK(cs);
RebuildIndexes();
AddCachedTriggers();
ClearSeen();
}
std::string CGovernanceManager::ToString() const
{
std::ostringstream info;

View File

@ -218,10 +218,6 @@ public:
Clear();
return;
}
if(ser_action.ForRead()) {
RebuildIndexes();
AddCachedTriggers();
}
}
void UpdatedBlockTip(const CBlockIndex *pindex);
@ -258,6 +254,8 @@ public:
return fRateChecksEnabled;
}
void InitOnLoad();
private:
void RequestGovernanceObject(CNode* pfrom, const uint256& nHash);

View File

@ -1875,7 +1875,7 @@ bool AppInit2(boost::thread_group& threadGroup, CScheduler& scheduler)
if(!flatdb3.Load(governance)) {
return InitError("Failed to load governance cache from governance.dat");
}
governance.ClearSeen();
governance.InitOnLoad();
uiInterface.InitMessage(_("Loading fullfiled requests cache..."));
CFlatDB<CNetFulfilledRequestManager> flatdb4("netfulfilled.dat", "magicFulfilledCache");

View File

@ -72,9 +72,12 @@ bool IsInstantSendTxValid(const CTransaction& txCandidate)
{
if(txCandidate.vout.size() < 1) return false;
if(!CheckFinalTx(txCandidate)) {
LogPrint("instantsend", "IsInstantSendTxValid -- Transaction is not final: txCandidate=%s", txCandidate.ToString());
return false;
{
LOCK(cs_main);
if(!CheckFinalTx(txCandidate)) {
LogPrint("instantsend", "IsInstantSendTxValid -- Transaction is not final: txCandidate=%s", txCandidate.ToString());
return false;
}
}
int64_t nValueIn = 0;
@ -460,16 +463,16 @@ int64_t GetAverageMasternodeOrphanVoteTime()
void CleanTxLockCandidates()
{
LOCK(cs_instantsend);
std::map<uint256, CTxLockCandidate>::iterator it = mapTxLockCandidates.begin();
int nHeight;
{
LOCK(cs_main);
nHeight = chainActive.Height();
}
LOCK(cs_instantsend);
std::map<uint256, CTxLockCandidate>::iterator it = mapTxLockCandidates.begin();
while(it != mapTxLockCandidates.end()) {
CTxLockCandidate &txLockCandidate = it->second;
if(nHeight > txLockCandidate.nExpirationBlock) {

View File

@ -5057,14 +5057,16 @@ void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParam
{
// Send stream from relay memory
bool pushed = false;
map<CInv, CDataStream>::iterator mi;
{
LOCK(cs_mapRelay);
map<CInv, CDataStream>::iterator mi = mapRelay.find(inv);
mi = mapRelay.find(inv);
if (mi != mapRelay.end()) {
pfrom->PushMessage(inv.GetCommand(), (*mi).second);
pushed = true;
}
}
if(pushed)
pfrom->PushMessage(inv.GetCommand(), (*mi).second);
if (!pushed && inv.type == MSG_TX) {
CTransaction tx;

View File

@ -416,7 +416,7 @@ bool CMasternodePayments::AddPaymentVote(const CMasternodePaymentVote& vote)
uint256 blockHash = uint256();
if(!GetBlockHash(blockHash, vote.nBlockHeight - 101)) return false;
LOCK2(cs_mapMasternodePaymentVotes, cs_mapMasternodeBlocks);
LOCK2(cs_mapMasternodeBlocks, cs_mapMasternodePaymentVotes);
if(mapMasternodePaymentVotes.count(vote.GetHash())) return false;
@ -573,7 +573,7 @@ void CMasternodePayments::CheckAndRemove()
{
if(!pCurrentBlockIndex) return;
LOCK2(cs_mapMasternodePaymentVotes, cs_mapMasternodeBlocks);
LOCK2(cs_mapMasternodeBlocks, cs_mapMasternodePaymentVotes);
int nLimit = GetStorageLimit();
@ -781,7 +781,7 @@ void CMasternodePayments::RequestLowDataPaymentBlocks(CNode* pnode)
// Old nodes can't process this
if(pnode->nVersion < 70202) return;
LOCK(cs_mapMasternodeBlocks);
LOCK2(cs_main, cs_mapMasternodeBlocks);
std::vector<CInv> vToFetch;
std::map<int, CMasternodeBlockPayees>::iterator it = mapMasternodeBlocks.begin();

View File

@ -162,6 +162,13 @@ void CMasternodeSync::ClearFulfilledRequests()
}
}
void ReleaseNodes(const std::vector<CNode*> &vNodesCopy)
{
LOCK(cs_vNodes);
BOOST_FOREACH(CNode* pnode, vNodesCopy)
pnode->Release();
}
void CMasternodeSync::ProcessTick()
{
static int nTick = 0;
@ -210,15 +217,21 @@ void CMasternodeSync::ProcessTick()
return;
}
LOCK2(mnodeman.cs, cs_vNodes);
if(nRequestedMasternodeAssets == MASTERNODE_SYNC_INITIAL ||
(nRequestedMasternodeAssets == MASTERNODE_SYNC_SPORKS && IsBlockchainSynced()))
{
SwitchToNextAsset();
}
BOOST_FOREACH(CNode* pnode, vNodes)
std::vector<CNode*> vNodesCopy;
{
LOCK(cs_vNodes);
vNodesCopy = vNodes;
BOOST_FOREACH(CNode* pnode, vNodesCopy)
pnode->AddRef();
}
BOOST_FOREACH(CNode* pnode, vNodesCopy)
{
// QUICK MODE (REGTEST ONLY!)
if(Params().NetworkIDString() == CBaseChainParams::REGTEST)
@ -236,6 +249,7 @@ void CMasternodeSync::ProcessTick()
nRequestedMasternodeAssets = MASTERNODE_SYNC_FINISHED;
}
nRequestedMasternodeAttempt++;
ReleaseNodes(vNodesCopy);
return;
}
@ -271,9 +285,11 @@ void CMasternodeSync::ProcessTick()
LogPrintf("CMasternodeSync::ProcessTick -- ERROR: failed to sync %s\n", GetAssetName());
// there is no way we can continue without masternode list, fail here and try later
Fail();
ReleaseNodes(vNodesCopy);
return;
}
SwitchToNextAsset();
ReleaseNodes(vNodesCopy);
return;
}
@ -288,6 +304,7 @@ void CMasternodeSync::ProcessTick()
if(nRequestedMasternodeAttempt > 1 && nMnCount > nMnCountEstimated) {
LogPrintf("CMasternodeSync::ProcessTick -- nTick %d nRequestedMasternodeAssets %d -- found enough data\n", nTick, nRequestedMasternodeAssets);
SwitchToNextAsset();
ReleaseNodes(vNodesCopy);
return;
}
@ -300,6 +317,7 @@ void CMasternodeSync::ProcessTick()
mnodeman.DsegUpdate(pnode);
ReleaseNodes(vNodesCopy);
return; //this will cause each peer to get one request each six seconds for the various assets we need
}
@ -316,9 +334,11 @@ void CMasternodeSync::ProcessTick()
LogPrintf("CMasternodeSync::ProcessTick -- ERROR: failed to sync %s\n", GetAssetName());
// probably not a good idea to proceed without winner list
Fail();
ReleaseNodes(vNodesCopy);
return;
}
SwitchToNextAsset();
ReleaseNodes(vNodesCopy);
return;
}
@ -328,6 +348,7 @@ void CMasternodeSync::ProcessTick()
if(nRequestedMasternodeAttempt > 1 && mnpayments.IsEnoughData()) {
LogPrintf("CMasternodeSync::ProcessTick -- nTick %d nRequestedMasternodeAssets %d -- found enough data\n", nTick, nRequestedMasternodeAssets);
SwitchToNextAsset();
ReleaseNodes(vNodesCopy);
return;
}
@ -343,6 +364,7 @@ void CMasternodeSync::ProcessTick()
// ask node for missing pieces only (old nodes will not be asked)
mnpayments.RequestLowDataPaymentBlocks(pnode);
ReleaseNodes(vNodesCopy);
return; //this will cause each peer to get one request each six seconds for the various assets we need
}
@ -359,6 +381,7 @@ void CMasternodeSync::ProcessTick()
// it's kind of ok to skip this for now, hopefully we'll catch up later?
}
SwitchToNextAsset();
ReleaseNodes(vNodesCopy);
return;
}
@ -384,6 +407,7 @@ void CMasternodeSync::ProcessTick()
pnode->PushMessage(NetMsgType::MNGOVERNANCESYNC, uint256()); //sync masternode votes
ReleaseNodes(vNodesCopy);
return; //this will cause each peer to get one request each six seconds for the various assets we need
}
}

View File

@ -320,7 +320,6 @@ int CMasternodeMan::CountEnabled(int nProtocolVersion)
nProtocolVersion = nProtocolVersion == -1 ? mnpayments.GetMinMasternodePaymentsProto() : nProtocolVersion;
BOOST_FOREACH(CMasternode& mn, vMasternodes) {
mn.Check();
if(mn.nProtocolVersion < nProtocolVersion || !mn.IsEnabled()) continue;
nCount++;
}
@ -460,6 +459,15 @@ bool CMasternodeMan::Has(const CTxIn& vin)
//
// Deterministically select the oldest/best masternode to pay on the network
//
CMasternode* CMasternodeMan::GetNextMasternodeInQueueForPayment(bool fFilterSigTime, int& nCount)
{
if(!pCurrentBlockIndex) {
nCount = 0;
return NULL;
}
return GetNextMasternodeInQueueForPayment(pCurrentBlockIndex->nHeight, fFilterSigTime, nCount);
}
CMasternode* CMasternodeMan::GetNextMasternodeInQueueForPayment(int nBlockHeight, bool fFilterSigTime, int& nCount)
{
// Need LOCK2 here to ensure consistent locking order because the GetBlockHash call below locks cs_main
@ -475,7 +483,6 @@ CMasternode* CMasternodeMan::GetNextMasternodeInQueueForPayment(int nBlockHeight
int nMnCount = CountEnabled();
BOOST_FOREACH(CMasternode &mn, vMasternodes)
{
mn.Check();
if(!mn.IsValidForPayment()) continue;
// //check protocol version
@ -581,7 +588,6 @@ int CMasternodeMan::GetMasternodeRank(const CTxIn& vin, int nBlockHeight, int nM
// scan for winner
BOOST_FOREACH(CMasternode& mn, vMasternodes) {
if(mn.nProtocolVersion < nMinProtocol) continue;
mn.Check();
if(fOnlyActive) {
if(!mn.IsEnabled()) continue;
}
@ -618,8 +624,6 @@ std::vector<std::pair<int, CMasternode> > CMasternodeMan::GetMasternodeRanks(int
// scan for winner
BOOST_FOREACH(CMasternode& mn, vMasternodes) {
mn.Check();
if(mn.nProtocolVersion < nMinProtocol || !mn.IsEnabled()) continue;
int64_t nScore = mn.CalculateScore(blockHash).GetCompact(false);
@ -654,10 +658,7 @@ CMasternode* CMasternodeMan::GetMasternodeByRank(int nRank, int nBlockHeight, in
BOOST_FOREACH(CMasternode& mn, vMasternodes) {
if(mn.nProtocolVersion < nMinProtocol) continue;
if(fOnlyActive) {
mn.Check();
if(!mn.IsEnabled()) continue;
}
if(fOnlyActive && !mn.IsEnabled()) continue;
int64_t nScore = mn.CalculateScore(blockHash).GetCompact(false);
@ -699,21 +700,18 @@ void CMasternodeMan::ProcessMessage(CNode* pfrom, std::string& strCommand, CData
if (strCommand == NetMsgType::MNANNOUNCE) { //Masternode Broadcast
{
LOCK(cs);
CMasternodeBroadcast mnb;
vRecv >> mnb;
CMasternodeBroadcast mnb;
vRecv >> mnb;
int nDos = 0;
int nDos = 0;
if (CheckMnbAndUpdateMasternodeList(mnb, nDos)) {
// use announced Masternode as a peer
addrman.Add(CAddress(mnb.addr), pfrom->addr, 2*60*60);
} else if(nDos > 0) {
Misbehaving(pfrom->GetId(), nDos);
}
if (CheckMnbAndUpdateMasternodeList(mnb, nDos)) {
// use announced Masternode as a peer
addrman.Add(CAddress(mnb.addr), pfrom->addr, 2*60*60);
} else if(nDos > 0) {
Misbehaving(pfrom->GetId(), nDos);
}
if(fMasternodesAdded) {
NotifyMasternodeUpdates();
}
@ -726,7 +724,8 @@ void CMasternodeMan::ProcessMessage(CNode* pfrom, std::string& strCommand, CData
LogPrint("masternode", "MNPING -- Masternode ping, masternode=%s\n", mnp.vin.prevout.ToStringShort());
LOCK(cs);
// Need LOCK2 here to ensure consistent locking order because the CheckAndUpdate call below locks cs_main
LOCK2(cs_main, cs);
if(mapSeenMasternodePing.count(mnp.GetHash())) return; //seen
mapSeenMasternodePing.insert(std::make_pair(mnp.GetHash(), mnp));
@ -840,7 +839,9 @@ void CMasternodeMan::DoFullVerificationStep()
std::vector<std::pair<int, CMasternode> > vecMasternodeRanks = GetMasternodeRanks(pCurrentBlockIndex->nHeight - 1, MIN_POSE_PROTO_VERSION);
LOCK(cs);
// Need LOCK2 here to ensure consistent locking order because the SendVerifyRequest call below locks cs_main
// through GetHeight() signal in ConnectNode
LOCK2(cs_main, cs);
int nCount = 0;
int nCountMax = std::max(10, (int)vMasternodes.size() / 100); // verify at least 10 masternode at once but at most 1% of all known masternodes
@ -1294,7 +1295,8 @@ void CMasternodeMan::UpdateMasternodeList(CMasternodeBroadcast mnb)
bool CMasternodeMan::CheckMnbAndUpdateMasternodeList(CMasternodeBroadcast mnb, int& nDos)
{
LOCK(cs);
// Need LOCK2 here to ensure consistent locking order because the SimpleCheck call below locks cs_main
LOCK2(cs_main, cs);
nDos = 0;
LogPrint("masternode", "CMasternodeMan::CheckMnbAndUpdateMasternodeList -- masternode=%s\n", mnb.vin.prevout.ToStringShort());
@ -1347,11 +1349,12 @@ bool CMasternodeMan::CheckMnbAndUpdateMasternodeList(CMasternodeBroadcast mnb, i
return true;
}
void CMasternodeMan::UpdateLastPaid(const CBlockIndex *pindex)
void CMasternodeMan::UpdateLastPaid()
{
LOCK(cs);
if(fLiteMode) return;
if(!pCurrentBlockIndex) return;
static bool IsFirstRun = true;
// Do full scan on first run or if we are not a masternode
@ -1359,10 +1362,10 @@ void CMasternodeMan::UpdateLastPaid(const CBlockIndex *pindex)
int nMaxBlocksToScanBack = (IsFirstRun || !fMasterNode) ? mnpayments.GetStorageLimit() : LAST_PAID_SCAN_BLOCKS;
// LogPrint("mnpayments", "CMasternodeMan::UpdateLastPaid -- nHeight=%d, nMaxBlocksToScanBack=%d, IsFirstRun=%s\n",
// pindex->nHeight, nMaxBlocksToScanBack, IsFirstRun ? "true" : "false");
// pCurrentBlockIndex->nHeight, nMaxBlocksToScanBack, IsFirstRun ? "true" : "false");
BOOST_FOREACH(CMasternode& mn, vMasternodes) {
mn.UpdateLastPaid(pindex, nMaxBlocksToScanBack);
mn.UpdateLastPaid(pCurrentBlockIndex, nMaxBlocksToScanBack);
}
// every time is like the first time if winners list is not synced
@ -1508,7 +1511,7 @@ void CMasternodeMan::UpdatedBlockTip(const CBlockIndex *pindex)
if(fMasterNode) {
DoFullVerificationStep();
// normal wallet does not need to update this every block, doing update on rpc call should be enough
UpdateLastPaid(pindex);
UpdateLastPaid();
}
}

View File

@ -271,11 +271,13 @@ public:
/// Find an entry in the masternode list that is next to be paid
CMasternode* GetNextMasternodeInQueueForPayment(int nBlockHeight, bool fFilterSigTime, int& nCount);
/// Same as above but use current block height
CMasternode* GetNextMasternodeInQueueForPayment(bool fFilterSigTime, int& nCount);
/// Find a random entry
CMasternode* FindRandomNotInVec(const std::vector<CTxIn> &vecToExclude, int nProtocolVersion = -1);
std::vector<CMasternode> GetFullMasternodeVector() { Check(); return vMasternodes; }
std::vector<CMasternode> GetFullMasternodeVector() { return vMasternodes; }
std::vector<std::pair<int, CMasternode> > GetMasternodeRanks(int nBlockHeight = -1, int nMinProtocol=0);
int GetMasternodeRank(const CTxIn &vin, int nBlockHeight, int nMinProtocol=0, bool fOnlyActive=true);
@ -304,7 +306,7 @@ public:
/// Perform complete check and only then update list and maps
bool CheckMnbAndUpdateMasternodeList(CMasternodeBroadcast mnb, int& nDos);
void UpdateLastPaid(const CBlockIndex *pindex);
void UpdateLastPaid();
void CheckAndRebuildMasternodeIndex();

View File

@ -167,9 +167,8 @@ UniValue masternode(const UniValue& params, bool fHelp)
if (strMode == "enabled")
return mnodeman.CountEnabled();
LOCK(cs_main);
int nCount;
mnodeman.GetNextMasternodeInQueueForPayment(chainActive.Height(), true, nCount);
mnodeman.GetNextMasternodeInQueueForPayment(true, nCount);
if (strMode == "qualify")
return nCount;
@ -184,14 +183,12 @@ UniValue masternode(const UniValue& params, bool fHelp)
{
int nCount;
int nHeight;
CBlockIndex* pindex;
CMasternode* winner = NULL;
{
LOCK(cs_main);
nHeight = chainActive.Height() + (strCommand == "current" ? 1 : 10);
pindex = chainActive.Tip();
}
mnodeman.UpdateLastPaid(pindex);
mnodeman.UpdateLastPaid();
winner = mnodeman.GetNextMasternodeInQueueForPayment(nHeight, true, nCount);
if(!winner) return "unknown";
@ -482,22 +479,12 @@ UniValue masternodelist(const UniValue& params, bool fHelp)
}
if (strMode == "full" || strMode == "lastpaidtime" || strMode == "lastpaidblock") {
CBlockIndex* pindex;
{
LOCK(cs_main);
pindex = chainActive.Tip();
}
mnodeman.UpdateLastPaid(pindex);
mnodeman.UpdateLastPaid();
}
UniValue obj(UniValue::VOBJ);
if (strMode == "rank") {
int nHeight;
{
LOCK(cs_main);
nHeight = chainActive.Height();
}
std::vector<std::pair<int, CMasternode> > vMasternodeRanks = mnodeman.GetMasternodeRanks(nHeight);
std::vector<std::pair<int, CMasternode> > vMasternodeRanks = mnodeman.GetMasternodeRanks();
BOOST_FOREACH(PAIRTYPE(int, CMasternode)& s, vMasternodeRanks) {
std::string strOutpoint = s.second.vin.prevout.ToStringShort();
if (strFilter !="" && strOutpoint.find(strFilter) == std::string::npos) continue;

View File

@ -2754,7 +2754,7 @@ int CWallet::CountInputsWithAmount(CAmount nInputAmount)
{
CAmount nTotal = 0;
{
LOCK(cs_wallet);
LOCK2(cs_main, cs_wallet);
for (map<uint256, CWalletTx>::const_iterator it = mapWallet.begin(); it != mapWallet.end(); ++it)
{
const CWalletTx* pcoin = &(*it).second;