Merge #6004: backport: merge bitcoin#21167, #22782, #21943, #22829, #24079, #24108, #24157, #25109 (network backports: part 5)

5dde8e7b33 merge bitcoin#25109: Strengthen AssertLockNotHeld assertions (Kittywhiskers Van Gogh)
a1f005ee71 merge bitcoin#24157: Replace RecursiveMutex cs_totalBytesSent with Mutex and rename it (Kittywhiskers Van Gogh)
de4b4bf9ee merge bitcoin#24108: Replace RecursiveMutex cs_addrLocal with Mutex, and rename it (Kittywhiskers Van Gogh)
2f7a138452 merge bitcoin#24079: replace RecursiveMutex cs_SubVer with Mutex (and rename) (Kittywhiskers Van Gogh)
23b152cd37 merge bitcoin#22829: various RecursiveMutex replacements in CConnman (Kittywhiskers Van Gogh)
362e3101ad merge bitcoin#21943: Dedup and RAII-fy the creation of a copy of CConnman::vNodes (Kittywhiskers Van Gogh)
bf98ad6a42 merge bitcoin#22782: Remove unused MaybeSetAddrName (Kittywhiskers Van Gogh)
2b65526818 merge bitcoin#21167: make CNode::m_inbound_onion public, initialize explicitly (Kittywhiskers Van Gogh)

Pull request description:

  ## Additional Information

  * Dependent on https://github.com/dashpay/dash/pull/6001
  * Dependency for https://github.com/dashpay/dash/pull/6018
  * Partially reverts ff69e0d575 from https://github.com/dashpay/dash/pull/5336 due to `Span<CNode*>`'s incompatibility with `CConnman::NodesSnapshot::Snap()` (returning `const std::vector<CNode*>&`)

    ```
    masternode/sync.cpp:147:18: error: no matching member function for call to 'RequestGovernanceObjectVotes'
            m_govman.RequestGovernanceObjectVotes(snap.Nodes(), connman);
            ~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~
    ./governance/governance.h:360:9: note: candidate function not viable: no known conversion from 'const
    std::vector<CNode *>' to 'CNode &' for 1st argument
        int RequestGovernanceObjectVotes(CNode& peer, CConnman& connman) const;
          ^
    ./governance/governance.h:361:9: note: candidate function not viable: no known conversion from 'const std::vector<CNode *>' to 'Span<CNode *>' for 1st argument
        int RequestGovernanceObjectVotes(Span<CNode*> vNodesCopy, CConnman& connman) const;
          ^
    1 error generated.
    ```
  * Dash already implements its own `CNode*` iteration logic in [dash#1382](https://github.com/dashpay/dash/pull/1382) and implemented additional capabilities in [dash#1575](https://github.com/dashpay/dash/pull/1575), which meant backporting [bitcoin#21943](https://github.com/bitcoin/bitcoin/pull/21943) involved migrating Dash-specific code to upstream logic that needed to be modified to implement expected functionality.

  * Unlike Bitcoin, Dash maintains a map of every raw `SOCKET` corresponding to a pointer of their `CNode` instance and uses it to translate socket sets to their corresponding `CNode*` sets. This is done to accommodate for edge-triggered modes which have an event-socket relationship, as opposed to level-triggered modes, which have a socket-event relationship.

    This means that `CConnman::SocketHandlerConnected()` doesn't require access to a vector of all `CNode` pointers and therefore, the argument `nodes` has been omitted.

  ## Checklist:

  - [x] I have performed a self-review of my own code
  - [x] I have commented my code, particularly in hard-to-understand areas **(note: N/A)**
  - [x] I have added or updated relevant unit/integration/functional/e2e tests **(note: N/A)**
  - [x] I have made corresponding changes to the documentation **(note: N/A)**
  - [x] I have assigned this pull request to a milestone _(for repository code-owners and collaborators only)_

ACKs for top commit:
  PastaPastaPasta:
    utACK 5dde8e7b33

Tree-SHA512: 5685d8ebb4fa1f10d018e60d9b0efc3100ea13ac437e7892a09ad3f86d6ac6756e4b5a08ebe70de2eabb27740678e10b975d319f2d553ae5b27dafa71dba0a9f
This commit is contained in:
pasta 2024-05-10 11:00:12 -05:00
commit 26cfbb00e2
No known key found for this signature in database
GPG Key ID: 52527BEDABE87984
27 changed files with 595 additions and 448 deletions

View File

@ -65,7 +65,7 @@ private:
bool m_request_stop GUARDED_BY(m_mutex){false};
/** Internal function that does bulk of the verification work. */
bool Loop(bool fMaster)
bool Loop(bool fMaster) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
{
std::condition_variable& cond = fMaster ? m_master_cv : m_worker_cv;
std::vector<T> vChecks;
@ -139,7 +139,7 @@ public:
}
//! Create a pool of new worker threads.
void StartWorkerThreads(const int threads_num)
void StartWorkerThreads(const int threads_num) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
{
{
LOCK(m_mutex);
@ -157,13 +157,13 @@ public:
}
//! Wait until execution finishes, and return whether all evaluations were successful.
bool Wait()
bool Wait() EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
{
return Loop(true /* master thread */);
}
//! Add a batch of checks to the queue
void Add(std::vector<T>& vChecks)
void Add(std::vector<T>& vChecks) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
{
if (vChecks.empty()) {
return;
@ -186,7 +186,7 @@ public:
}
//! Stop all of the worker threads.
void StopWorkerThreads()
void StopWorkerThreads() EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
{
WITH_LOCK(m_mutex, m_request_stop = true);
m_worker_cv.notify_all();

View File

@ -368,15 +368,22 @@ public:
void AddDSTX(const CCoinJoinBroadcastTx& dstx) EXCLUSIVE_LOCKS_REQUIRED(!cs_mapdstx);
CCoinJoinBroadcastTx GetDSTX(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(!cs_mapdstx);
void UpdatedBlockTip(const CBlockIndex* pindex, const llmq::CChainLocksHandler& clhandler, const CMasternodeSync& mn_sync);
void NotifyChainLock(const CBlockIndex* pindex, const llmq::CChainLocksHandler& clhandler, const CMasternodeSync& mn_sync);
void UpdatedBlockTip(const CBlockIndex* pindex, const llmq::CChainLocksHandler& clhandler,
const CMasternodeSync& mn_sync)
EXCLUSIVE_LOCKS_REQUIRED(!cs_mapdstx);
void NotifyChainLock(const CBlockIndex* pindex, const llmq::CChainLocksHandler& clhandler,
const CMasternodeSync& mn_sync)
EXCLUSIVE_LOCKS_REQUIRED(!cs_mapdstx);
void TransactionAddedToMempool(const CTransactionRef& tx) EXCLUSIVE_LOCKS_REQUIRED(!cs_mapdstx);
void BlockConnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindex) EXCLUSIVE_LOCKS_REQUIRED(!cs_mapdstx);
void BlockDisconnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex*) EXCLUSIVE_LOCKS_REQUIRED(!cs_mapdstx);
void BlockConnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindex)
EXCLUSIVE_LOCKS_REQUIRED(!cs_mapdstx);
void BlockDisconnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex*)
EXCLUSIVE_LOCKS_REQUIRED(!cs_mapdstx);
private:
void CheckDSTXes(const CBlockIndex* pindex, const llmq::CChainLocksHandler& clhandler);
void CheckDSTXes(const CBlockIndex* pindex, const llmq::CChainLocksHandler& clhandler)
EXCLUSIVE_LOCKS_REQUIRED(!cs_mapdstx);
void UpdateDSTXConfirmedHeight(const CTransactionRef& tx, std::optional<int> nHeight);
};

View File

@ -1223,11 +1223,11 @@ void CGovernanceManager::RequestGovernanceObject(CNode* pfrom, const uint256& nH
int CGovernanceManager::RequestGovernanceObjectVotes(CNode& peer, CConnman& connman) const
{
std::array<CNode*, 1> nodeCopy{&peer};
return RequestGovernanceObjectVotes(nodeCopy, connman);
const std::vector<CNode*> vNodeCopy{&peer};
return RequestGovernanceObjectVotes(vNodeCopy, connman);
}
int CGovernanceManager::RequestGovernanceObjectVotes(Span<CNode*> vNodesCopy, CConnman& connman) const
int CGovernanceManager::RequestGovernanceObjectVotes(const std::vector<CNode*>& vNodesCopy, CConnman& connman) const
{
static std::map<uint256, std::map<CService, int64_t> > mapAskedRecently;
@ -1501,7 +1501,7 @@ void CGovernanceManager::UpdatedBlockTip(const CBlockIndex* pindex, CConnman& co
void CGovernanceManager::RequestOrphanObjects(CConnman& connman)
{
std::vector<CNode*> vNodesCopy = connman.CopyNodeVector(CConnman::FullyConnectedOnly);
const CConnman::NodesSnapshot snap{connman, /* filter = */ CConnman::FullyConnectedOnly};
std::vector<uint256> vecHashesFiltered;
{
@ -1517,15 +1517,13 @@ void CGovernanceManager::RequestOrphanObjects(CConnman& connman)
LogPrint(BCLog::GOBJECT, "CGovernanceObject::RequestOrphanObjects -- number objects = %d\n", vecHashesFiltered.size());
for (const uint256& nHash : vecHashesFiltered) {
for (CNode* pnode : vNodesCopy) {
for (CNode* pnode : snap.Nodes()) {
if (!pnode->CanRelay()) {
continue;
}
RequestGovernanceObject(pnode, nHash, connman);
}
}
connman.ReleaseNodeVector(vNodesCopy);
}
void CGovernanceManager::CleanOrphanObjects()

View File

@ -358,7 +358,7 @@ public:
void InitOnLoad();
int RequestGovernanceObjectVotes(CNode& peer, CConnman& connman) const;
int RequestGovernanceObjectVotes(Span<CNode*> vNodesCopy, CConnman& connman) const;
int RequestGovernanceObjectVotes(const std::vector<CNode*>& vNodesCopy, CConnman& connman) const;
/*
* Trigger Management (formerly CGovernanceTriggerManager)

View File

@ -83,7 +83,7 @@ public:
{
}
/** Enqueue a work item */
bool Enqueue(WorkItem* item)
bool Enqueue(WorkItem* item) EXCLUSIVE_LOCKS_REQUIRED(!cs)
{
LOCK(cs);
if (!running || queue.size() >= maxDepth) {
@ -94,7 +94,7 @@ public:
return true;
}
/** Thread function */
void Run()
void Run() EXCLUSIVE_LOCKS_REQUIRED(!cs)
{
while (true) {
std::unique_ptr<WorkItem> i;
@ -111,7 +111,7 @@ public:
}
}
/** Interrupt and exit loops */
void Interrupt()
void Interrupt() EXCLUSIVE_LOCKS_REQUIRED(!cs)
{
LOCK(cs);
running = false;

View File

@ -84,7 +84,7 @@ public:
* to the listening socket and address.
* @return true on success
*/
bool Listen(Connection& conn);
bool Listen(Connection& conn) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex);
/**
* Wait for and accept a new incoming connection.
@ -103,7 +103,7 @@ public:
* it is set to `false`. Only set if `false` is returned.
* @return true on success
*/
bool Connect(const CService& to, Connection& conn, bool& proxy_error);
bool Connect(const CService& to, Connection& conn, bool& proxy_error) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex);
private:
/**
@ -172,7 +172,7 @@ private:
/**
* Check the control socket for errors and possibly disconnect.
*/
void CheckControlSock();
void CheckControlSock() EXCLUSIVE_LOCKS_REQUIRED(!m_mutex);
/**
* Generate a new destination with the SAM proxy and set `m_private_key` to it.

View File

@ -63,7 +63,7 @@ public:
bool LookupFilter(const CBlockIndex* block_index, BlockFilter& filter_out) const;
/** Get a single filter header by block. */
bool LookupFilterHeader(const CBlockIndex* block_index, uint256& header_out);
bool LookupFilterHeader(const CBlockIndex* block_index, uint256& header_out) EXCLUSIVE_LOCKS_REQUIRED(!m_cs_headers_cache);
/** Get a range of filters between two heights on a chain. */
bool LookupFilterRange(int start_height, const CBlockIndex* stop_index,

View File

@ -1336,7 +1336,7 @@ void CDKGSession::RelayInvToParticipants(const CInv& inv) const
if (pnode->GetVerifiedProRegTxHash().IsNull()) {
logger.Batch("node[%d:%s] not mn",
pnode->GetId(),
pnode->GetAddrName());
pnode->m_addr_name);
} else if (relayMembers.count(pnode->GetVerifiedProRegTxHash()) == 0) {
ss2 << pnode->GetVerifiedProRegTxHash().ToString().substr(0, 4) << " | ";
}

View File

@ -1092,14 +1092,13 @@ bool CSigSharesManager::SendMessages()
return session->sendSessionId;
};
std::vector<CNode*> vNodesCopy = connman.CopyNodeVector(CConnman::FullyConnectedOnly);
const CConnman::NodesSnapshot snap{connman, /* filter = */ CConnman::FullyConnectedOnly};
{
LOCK(cs);
CollectSigSharesToRequest(sigSharesToRequest);
CollectSigSharesToSend(sigShareBatchesToSend);
CollectSigSharesToAnnounce(sigSharesToAnnounce);
CollectSigSharesToSendConcentrated(sigSharesToSend, vNodesCopy);
CollectSigSharesToSendConcentrated(sigSharesToSend, snap.Nodes());
for (auto& [nodeId, sigShareMap] : sigSharesToRequest) {
for (auto& [hash, sigShareInv] : sigShareMap) {
@ -1120,7 +1119,7 @@ bool CSigSharesManager::SendMessages()
bool didSend = false;
for (auto& pnode : vNodesCopy) {
for (auto& pnode : snap.Nodes()) {
CNetMsgMaker msgMaker(pnode->GetCommonVersion());
if (const auto it1 = sigSessionAnnouncements.find(pnode->GetId()); it1 != sigSessionAnnouncements.end()) {
@ -1222,9 +1221,6 @@ bool CSigSharesManager::SendMessages()
}
}
// looped through all nodes, release them
connman.ReleaseNodeVector(vNodesCopy);
return didSend;
}

View File

@ -140,12 +140,11 @@ void CMasternodeSync::ProcessTick()
}
nTimeLastProcess = GetTime();
std::vector<CNode*> vNodesCopy = connman.CopyNodeVector(CConnman::FullyConnectedOnly);
const CConnman::NodesSnapshot snap{connman, /* filter = */ CConnman::FullyConnectedOnly};
// gradually request the rest of the votes after sync finished
if(IsSynced()) {
m_govman.RequestGovernanceObjectVotes(vNodesCopy, connman);
connman.ReleaseNodeVector(vNodesCopy);
m_govman.RequestGovernanceObjectVotes(snap.Nodes(), connman);
return;
}
@ -154,7 +153,7 @@ void CMasternodeSync::ProcessTick()
LogPrint(BCLog::MNSYNC, "CMasternodeSync::ProcessTick -- nTick %d nCurrentAsset %d nTriedPeerCount %d nSyncProgress %f\n", nTick, nCurrentAsset, nTriedPeerCount, nSyncProgress);
uiInterface.NotifyAdditionalDataSyncProgressChanged(nSyncProgress);
for (auto& pnode : vNodesCopy)
for (auto& pnode : snap.Nodes())
{
CNetMsgMaker msgMaker(pnode->GetCommonVersion());
@ -189,7 +188,7 @@ void CMasternodeSync::ProcessTick()
}
if (nCurrentAsset == MASTERNODE_SYNC_BLOCKCHAIN) {
int64_t nTimeSyncTimeout = vNodesCopy.size() > 3 ? MASTERNODE_SYNC_TICK_SECONDS : MASTERNODE_SYNC_TIMEOUT_SECONDS;
int64_t nTimeSyncTimeout = snap.Nodes().size() > 3 ? MASTERNODE_SYNC_TICK_SECONDS : MASTERNODE_SYNC_TIMEOUT_SECONDS;
if (fReachedBestHeader && (GetTime() - nTimeLastBumped > nTimeSyncTimeout)) {
// At this point we know that:
// a) there are peers (because we are looping on at least one of them);
@ -205,7 +204,7 @@ void CMasternodeSync::ProcessTick()
if (gArgs.GetBoolArg("-syncmempool", DEFAULT_SYNC_MEMPOOL)) {
// Now that the blockchain is synced request the mempool from the connected outbound nodes if possible
for (auto pNodeTmp : vNodesCopy) {
for (auto pNodeTmp : snap.Nodes()) {
bool fRequestedEarlier = m_netfulfilledman.HasFulfilledRequest(pNodeTmp->addr, "mempool-sync");
if (pNodeTmp->nVersion >= 70216 && !pNodeTmp->IsInboundConn() && !fRequestedEarlier && !pNodeTmp->IsBlockRelayOnly()) {
m_netfulfilledman.AddFulfilledRequest(pNodeTmp->addr, "mempool-sync");
@ -222,7 +221,6 @@ void CMasternodeSync::ProcessTick()
if(nCurrentAsset == MASTERNODE_SYNC_GOVERNANCE) {
if (!m_govman.IsValid()) {
SwitchToNextAsset();
connman.ReleaseNodeVector(vNodesCopy);
return;
}
LogPrint(BCLog::GOBJECT, "CMasternodeSync::ProcessTick -- nTick %d nCurrentAsset %d nTimeLastBumped %lld GetTime() %lld diff %lld\n", nTick, nCurrentAsset, nTimeLastBumped, GetTime(), GetTime() - nTimeLastBumped);
@ -235,7 +233,6 @@ void CMasternodeSync::ProcessTick()
// it's kind of ok to skip this for now, hopefully we'll catch up later?
}
SwitchToNextAsset();
connman.ReleaseNodeVector(vNodesCopy);
return;
}
@ -259,12 +256,11 @@ void CMasternodeSync::ProcessTick()
if (nCurrentAsset != MASTERNODE_SYNC_GOVERNANCE) {
// looped through all nodes and not syncing governance yet/already, release them
connman.ReleaseNodeVector(vNodesCopy);
return;
}
// request votes on per-obj basis from each node
for (const auto& pnode : vNodesCopy) {
for (const auto& pnode : snap.Nodes()) {
if(!m_netfulfilledman.HasFulfilledRequest(pnode->addr, "governance-sync")) {
continue; // to early for this node
}
@ -291,16 +287,12 @@ void CMasternodeSync::ProcessTick()
// reset nTimeNoObjectsLeft to be able to use the same condition on resync
nTimeNoObjectsLeft = 0;
SwitchToNextAsset();
connman.ReleaseNodeVector(vNodesCopy);
return;
}
nLastTick = nTick;
nLastVotes = m_govman.GetVoteCount();
}
}
// looped through all nodes, release them
connman.ReleaseNodeVector(vNodesCopy);
}
void CMasternodeSync::SendGovernanceSyncRequest(CNode* pnode) const

File diff suppressed because it is too large Load Diff

281
src/net.h
View File

@ -281,7 +281,7 @@ public:
int64_t nLastBlockTime;
int64_t nTimeConnected;
int64_t nTimeOffset;
std::string addrName;
std::string m_addr_name;
int nVersion;
std::string cleanSubVer;
bool fInbound;
@ -471,14 +471,17 @@ public:
const CAddress addr;
// Bind address of our side of the connection
const CAddress addrBind;
const std::string m_addr_name;
//! Whether this peer is an inbound onion, i.e. connected via our Tor onion service.
const bool m_inbound_onion;
std::atomic<int> nNumWarningsSkipped{0};
std::atomic<int> nVersion{0};
Mutex m_subver_mutex;
/**
* cleanSubVer is a sanitized string of the user agent byte array we read
* from the wire. This cleaned string can safely be logged or displayed.
*/
std::string cleanSubVer GUARDED_BY(cs_SubVer){};
RecursiveMutex cs_SubVer; // used for both cleanSubVer and strSubVer
std::string cleanSubVer GUARDED_BY(m_subver_mutex){};
bool m_prefer_evict{false}; // This peer is preferred for eviction.
bool HasPermission(NetPermissionFlags permission) const {
return NetPermissions::HasFlag(m_permissionFlags, permission);
@ -621,7 +624,7 @@ public:
bool IsBlockRelayOnly() const;
CNode(NodeId id, ServiceFlags nLocalServicesIn, SOCKET hSocketIn, const CAddress &addrIn, uint64_t nKeyedNetGroupIn, uint64_t nLocalHostNonceIn, const CAddress &addrBindIn, const std::string &addrNameIn, ConnectionType conn_type_in, bool inbound_onion = false);
CNode(NodeId id, ServiceFlags nLocalServicesIn, SOCKET hSocketIn, const CAddress &addrIn, uint64_t nKeyedNetGroupIn, uint64_t nLocalHostNonceIn, const CAddress &addrBindIn, const std::string &addrNameIn, ConnectionType conn_type_in, bool inbound_onion);
~CNode();
CNode(const CNode&) = delete;
CNode& operator=(const CNode&) = delete;
@ -649,7 +652,7 @@ public:
* @return True if the peer should stay connected,
* False if the peer should be disconnected from.
*/
bool ReceiveMsgBytes(Span<const uint8_t> msg_bytes, bool& complete);
bool ReceiveMsgBytes(Span<const uint8_t> msg_bytes, bool& complete) EXCLUSIVE_LOCKS_REQUIRED(!cs_vRecv);
void SetCommonVersion(int greatest_common_version)
{
@ -661,9 +664,9 @@ public:
return m_greatest_common_version;
}
CService GetAddrLocal() const;
CService GetAddrLocal() const EXCLUSIVE_LOCKS_REQUIRED(!m_addr_local_mutex);
//! May not be called more than once
void SetAddrLocal(const CService& addrLocalIn);
void SetAddrLocal(const CService& addrLocalIn) EXCLUSIVE_LOCKS_REQUIRED(!m_addr_local_mutex);
CNode* AddRef()
{
@ -676,20 +679,15 @@ public:
nRefCount--;
}
void CloseSocketDisconnect(CConnman* connman);
void CloseSocketDisconnect(CConnman* connman) EXCLUSIVE_LOCKS_REQUIRED(!cs_hSocket);
void copyStats(CNodeStats &stats, const std::vector<bool> &m_asmap);
void copyStats(CNodeStats &stats, const std::vector<bool> &m_asmap) EXCLUSIVE_LOCKS_REQUIRED(!m_subver_mutex, !m_addr_local_mutex, !cs_vSend, !cs_vRecv);
ServiceFlags GetLocalServices() const
{
return nLocalServices;
}
std::string GetAddrName() const;
//! Sets the addrName only if it was not previously set
void MaybeSetAddrName(const std::string& addrNameIn);
std::string ConnectionTypeAsString() const { return ::ConnectionTypeAsString(m_conn_type); }
/** A ping-pong round trip has completed successfully. Update latest and minimum ping times. */
@ -698,8 +696,6 @@ public:
m_min_ping_time = std::min(m_min_ping_time.load(), ping_time);
}
/** Whether this peer is an inbound onion, e.g. connected via our Tor onion service. */
bool IsInboundOnion() const { return m_inbound_onion; }
std::string GetLogString() const;
bool CanRelay() const { return !m_masternode_connection || m_masternode_iqr_connection; }
@ -769,15 +765,9 @@ private:
std::list<CNetMessage> vRecvMsg; // Used only by SocketHandler thread
mutable RecursiveMutex cs_addrName;
std::string addrName GUARDED_BY(cs_addrName);
// Our address, as reported by the peer
CService addrLocal GUARDED_BY(cs_addrLocal);
mutable RecursiveMutex cs_addrLocal;
//! Whether this peer is an inbound onion, e.g. connected via our Tor onion service.
const bool m_inbound_onion{false};
CService addrLocal GUARDED_BY(m_addr_local_mutex);
mutable Mutex m_addr_local_mutex;
// Challenge sent in VERSION to be answered with MNAUTH (only happens between MNs)
mutable Mutex cs_mnauth;
@ -867,7 +857,10 @@ public:
bool m_i2p_accept_incoming;
};
void Init(const Options& connOptions) {
void Init(const Options& connOptions) EXCLUSIVE_LOCKS_REQUIRED(!m_added_nodes_mutex, !m_total_bytes_sent_mutex)
{
AssertLockNotHeld(m_total_bytes_sent_mutex);
nLocalServices = connOptions.nLocalServices;
nMaxConnections = connOptions.nMaxConnections;
m_max_outbound_full_relay = std::min(connOptions.m_max_outbound_full_relay, connOptions.nMaxConnections);
@ -883,13 +876,13 @@ public:
nReceiveFloodSize = connOptions.nReceiveFloodSize;
m_peer_connect_timeout = std::chrono::seconds{connOptions.m_peer_connect_timeout};
{
LOCK(cs_totalBytesSent);
LOCK(m_total_bytes_sent_mutex);
nMaxOutboundLimit = connOptions.nMaxOutboundLimit;
}
vWhitelistedRange = connOptions.vWhitelistedRange;
{
LOCK(cs_vAddedNodes);
vAddedNodes = connOptions.m_added_nodes;
LOCK(m_added_nodes_mutex);
m_added_nodes = connOptions.m_added_nodes;
}
socketEventsMode = connOptions.socketEventsMode;
m_onion_binds = connOptions.onion_binds;
@ -898,7 +891,8 @@ public:
CConnman(uint64_t seed0, uint64_t seed1, CAddrMan& addrman, bool network_active = true);
~CConnman();
bool Start(CDeterministicMNManager& dmnman, CMasternodeMetaMan& mn_metaman, CMasternodeSync& mn_sync,
CScheduler& scheduler, const Options& options);
CScheduler& scheduler, const Options& options)
EXCLUSIVE_LOCKS_REQUIRED(!m_total_bytes_sent_mutex, !m_added_nodes_mutex, !m_addr_fetches_mutex, !mutexMsgProc);
void StopThreads();
void StopNodes();
@ -908,7 +902,7 @@ public:
StopNodes();
};
void Interrupt();
void Interrupt() EXCLUSIVE_LOCKS_REQUIRED(!mutexMsgProc);
bool GetNetworkActive() const { return fNetworkActive; };
bool GetUseAddrmanOutgoing() const { return m_use_addrman_outgoing; };
void SetNetworkActive(bool active, CMasternodeSync* const mn_sync);
@ -924,8 +918,13 @@ public:
IsConnection,
};
void OpenNetworkConnection(const CAddress& addrConnect, bool fCountFailure, CSemaphoreGrant* grantOutbound, const char* strDest, ConnectionType conn_type, MasternodeConn masternode_connection = MasternodeConn::IsNotConnection, MasternodeProbeConn masternode_probe_connection = MasternodeProbeConn::IsNotConnection);
void OpenMasternodeConnection(const CAddress& addrConnect, MasternodeProbeConn probe = MasternodeProbeConn::IsConnection);
void OpenNetworkConnection(const CAddress& addrConnect, bool fCountFailure, CSemaphoreGrant* grantOutbound,
const char* strDest, ConnectionType conn_type,
MasternodeConn masternode_connection = MasternodeConn::IsNotConnection,
MasternodeProbeConn masternode_probe_connection = MasternodeProbeConn::IsNotConnection)
EXCLUSIVE_LOCKS_REQUIRED(!mutexMsgProc);
void OpenMasternodeConnection(const CAddress& addrConnect, MasternodeProbeConn probe = MasternodeProbeConn::IsConnection)
EXCLUSIVE_LOCKS_REQUIRED(!mutexMsgProc);
bool CheckIncomingNonce(uint64_t nonce);
struct CFullyConnectedOnly {
@ -966,13 +965,14 @@ public:
bool IsMasternodeOrDisconnectRequested(const CService& addr);
void PushMessage(CNode* pnode, CSerializedNetMsg&& msg);
void PushMessage(CNode* pnode, CSerializedNetMsg&& msg)
EXCLUSIVE_LOCKS_REQUIRED(!mutexMsgProc, !m_total_bytes_sent_mutex);
template<typename Condition, typename Callable>
bool ForEachNodeContinueIf(const Condition& cond, Callable&& func)
{
LOCK(cs_vNodes);
for (auto&& node : vNodes)
LOCK(m_nodes_mutex);
for (auto&& node : m_nodes)
if (cond(node))
if(!func(node))
return false;
@ -988,8 +988,8 @@ public:
template<typename Condition, typename Callable>
bool ForEachNodeContinueIf(const Condition& cond, Callable&& func) const
{
LOCK(cs_vNodes);
for (const auto& node : vNodes)
LOCK(m_nodes_mutex);
for (const auto& node : m_nodes)
if (cond(node))
if(!func(node))
return false;
@ -1005,8 +1005,8 @@ public:
template<typename Condition, typename Callable>
void ForEachNode(const Condition& cond, Callable&& func)
{
LOCK(cs_vNodes);
for (auto&& node : vNodes) {
LOCK(m_nodes_mutex);
for (auto&& node : m_nodes) {
if (cond(node))
func(node);
}
@ -1021,8 +1021,8 @@ public:
template<typename Condition, typename Callable>
void ForEachNode(const Condition& cond, Callable&& func) const
{
LOCK(cs_vNodes);
for (auto&& node : vNodes) {
LOCK(m_nodes_mutex);
for (auto&& node : m_nodes) {
if (cond(node))
func(node);
}
@ -1037,8 +1037,8 @@ public:
template<typename Condition, typename Callable, typename CallableAfter>
void ForEachNodeThen(const Condition& cond, Callable&& pre, CallableAfter&& post)
{
LOCK(cs_vNodes);
for (auto&& node : vNodes) {
LOCK(m_nodes_mutex);
for (auto&& node : m_nodes) {
if (cond(node))
pre(node);
}
@ -1054,8 +1054,8 @@ public:
template<typename Condition, typename Callable, typename CallableAfter>
void ForEachNodeThen(const Condition& cond, Callable&& pre, CallableAfter&& post) const
{
LOCK(cs_vNodes);
for (auto&& node : vNodes) {
LOCK(m_nodes_mutex);
for (auto&& node : m_nodes) {
if (cond(node))
pre(node);
}
@ -1068,9 +1068,6 @@ public:
ForEachNodeThen(FullyConnectedOnly, pre, post);
}
std::vector<CNode*> CopyNodeVector(std::function<bool(const CNode* pnode)> cond = AllNodes);
void ReleaseNodeVector(const std::vector<CNode*>& vecNodes);
// Addrman functions
/**
* Return all or many randomly selected addresses, optionally by network.
@ -1109,9 +1106,9 @@ public:
// Count the number of block-relay-only peers we have over our limit.
int GetExtraBlockRelayCount() const;
bool AddNode(const std::string& node);
bool RemoveAddedNode(const std::string& node);
std::vector<AddedNodeInfo> GetAddedNodeInfo() const;
bool AddNode(const std::string& node) EXCLUSIVE_LOCKS_REQUIRED(!m_added_nodes_mutex);
bool RemoveAddedNode(const std::string& node) EXCLUSIVE_LOCKS_REQUIRED(!m_added_nodes_mutex);
std::vector<AddedNodeInfo> GetAddedNodeInfo() const EXCLUSIVE_LOCKS_REQUIRED(!m_added_nodes_mutex);
/**
* Attempts to open a connection. Currently only used from tests.
@ -1124,7 +1121,7 @@ public:
* - Max total outbound connection capacity filled
* - Max connection capacity for type is filled
*/
bool AddConnection(const std::string& address, ConnectionType conn_type);
bool AddConnection(const std::string& address, ConnectionType conn_type) EXCLUSIVE_LOCKS_REQUIRED(!mutexMsgProc);
bool AddPendingMasternode(const uint256& proTxHash);
void SetMasternodeQuorumNodes(Consensus::LLMQType llmqType, const uint256& quorumHash, const std::set<uint256>& proTxHashes);
@ -1154,32 +1151,30 @@ public:
//! that peer during `net_processing.cpp:PushNodeVersion()`.
ServiceFlags GetLocalServices() const;
uint64_t GetMaxOutboundTarget() const;
uint64_t GetMaxOutboundTarget() const EXCLUSIVE_LOCKS_REQUIRED(!m_total_bytes_sent_mutex);
std::chrono::seconds GetMaxOutboundTimeframe() const;
//! check if the outbound target is reached
//! if param historicalBlockServingLimit is set true, the function will
//! response true if the limit for serving historical blocks has been reached
bool OutboundTargetReached(bool historicalBlockServingLimit) const;
bool OutboundTargetReached(bool historicalBlockServingLimit) const EXCLUSIVE_LOCKS_REQUIRED(!m_total_bytes_sent_mutex);
//! response the bytes left in the current max outbound cycle
//! in case of no limit, it will always response 0
uint64_t GetOutboundTargetBytesLeft() const;
uint64_t GetOutboundTargetBytesLeft() const EXCLUSIVE_LOCKS_REQUIRED(!m_total_bytes_sent_mutex);
//! returns the time left in the current max outbound cycle
//! in case of no limit, it will always return 0
std::chrono::seconds GetMaxOutboundTimeLeftInCycle() const;
std::chrono::seconds GetMaxOutboundTimeLeftInCycle() const EXCLUSIVE_LOCKS_REQUIRED(!m_total_bytes_sent_mutex);
uint64_t GetTotalBytesRecv() const;
uint64_t GetTotalBytesSent() const;
uint64_t GetTotalBytesSent() const EXCLUSIVE_LOCKS_REQUIRED(!m_total_bytes_sent_mutex);
/** Get a unique deterministic randomizer. */
CSipHasher GetDeterministicRandomizer(uint64_t id) const;
unsigned int GetReceiveFloodSize() const;
void WakeMessageHandler();
void WakeSelect();
void WakeMessageHandler() EXCLUSIVE_LOCKS_REQUIRED(!mutexMsgProc);
void WakeSelect() EXCLUSIVE_LOCKS_REQUIRED(!mutexMsgProc);
/** Attempts to obfuscate tx time through exponentially distributed emitting.
Works assuming that a single interval is used.
@ -1192,6 +1187,26 @@ public:
/** Return true if we should disconnect the peer for failing an inactivity check. */
bool ShouldRunInactivityChecks(const CNode& node, std::chrono::seconds now) const;
/**
* RAII helper to atomically create a copy of `m_nodes` and add a reference
* to each of the nodes. The nodes are released when this object is destroyed.
*/
class NodesSnapshot
{
public:
explicit NodesSnapshot(const CConnman& connman, std::function<bool(const CNode* pnode)> cond = AllNodes,
bool shuffle = false);
~NodesSnapshot();
const std::vector<CNode*>& Nodes() const
{
return m_nodes_copy;
}
private:
std::vector<CNode*> m_nodes_copy;
};
private:
struct ListenSocket {
public:
@ -1202,6 +1217,10 @@ private:
NetPermissionFlags m_permissions;
};
//! returns the time left in the current max outbound cycle
//! in case of no limit, it will always return 0
std::chrono::seconds GetMaxOutboundTimeLeftInCycle_() const EXCLUSIVE_LOCKS_REQUIRED(m_total_bytes_sent_mutex);
bool BindListenPort(const CService& bindAddr, bilingual_str& strError, NetPermissionFlags permissions);
bool Bind(const CService& addr, unsigned int flags, NetPermissionFlags permissions);
bool InitBinds(
@ -1209,17 +1228,19 @@ private:
const std::vector<NetWhitebindPermissions>& whiteBinds,
const std::vector<CService>& onion_binds);
void ThreadOpenAddedConnections();
void AddAddrFetch(const std::string& strDest);
void ProcessAddrFetch();
void ThreadOpenConnections(const std::vector<std::string> connect, CDeterministicMNManager& dmnman);
void ThreadMessageHandler();
void ThreadI2PAcceptIncoming(CMasternodeSync& mn_sync);
void AcceptConnection(const ListenSocket& hListenSocket, CMasternodeSync& mn_sync);
void ThreadOpenAddedConnections() EXCLUSIVE_LOCKS_REQUIRED(!m_added_nodes_mutex, !mutexMsgProc);
void AddAddrFetch(const std::string& strDest) EXCLUSIVE_LOCKS_REQUIRED(!m_addr_fetches_mutex);
void ProcessAddrFetch() EXCLUSIVE_LOCKS_REQUIRED(!m_addr_fetches_mutex, !mutexMsgProc);
void ThreadOpenConnections(const std::vector<std::string> connect, CDeterministicMNManager& dmnman)
EXCLUSIVE_LOCKS_REQUIRED(!m_addr_fetches_mutex, !m_added_nodes_mutex, !m_nodes_mutex, !mutexMsgProc);
void ThreadMessageHandler() EXCLUSIVE_LOCKS_REQUIRED(!mutexMsgProc);
void ThreadI2PAcceptIncoming(CMasternodeSync& mn_sync) EXCLUSIVE_LOCKS_REQUIRED(!mutexMsgProc);
void AcceptConnection(const ListenSocket& hListenSocket, CMasternodeSync& mn_sync)
EXCLUSIVE_LOCKS_REQUIRED(!mutexMsgProc);
/**
* Create a `CNode` object from a socket that has just been accepted and add the node to
* the `vNodes` member.
* the `m_nodes` member.
* @param[in] hSocket Connected socket to communicate with the peer.
* @param[in] permissionFlags The peer's permissions.
* @param[in] addr_bind The address and port at our side of the connection.
@ -1229,30 +1250,95 @@ private:
NetPermissionFlags permissionFlags,
const CAddress& addr_bind,
const CAddress& addr,
CMasternodeSync& mn_sync);
CMasternodeSync& mn_sync) EXCLUSIVE_LOCKS_REQUIRED(!mutexMsgProc);
void DisconnectNodes();
void NotifyNumConnectionsChanged(CMasternodeSync& mn_sync);
void CalculateNumConnectionsChangedStats();
/** Return true if the peer is inactive and should be disconnected. */
bool InactivityCheck(const CNode& node) const;
bool GenerateSelectSet(std::set<SOCKET> &recv_set, std::set<SOCKET> &send_set, std::set<SOCKET> &error_set);
/**
* Generate a collection of sockets to check for IO readiness.
* @param[in] nodes Select from these nodes' sockets.
* @param[out] recv_set Sockets to check for read readiness.
* @param[out] send_set Sockets to check for write readiness.
* @param[out] error_set Sockets to check for errors.
* @return true if at least one socket is to be checked (the returned set is not empty)
*/
bool GenerateSelectSet(const std::vector<CNode*>& nodes,
std::set<SOCKET>& recv_set,
std::set<SOCKET>& send_set,
std::set<SOCKET>& error_set);
/**
* Check which sockets are ready for IO.
* @param[in] nodes Select from these nodes' sockets (in supported event methods).
* @param[in] only_poll Permit zero timeout polling
* @param[out] recv_set Sockets which are ready for read.
* @param[out] send_set Sockets which are ready for write.
* @param[out] error_set Sockets which have errors.
* This calls `GenerateSelectSet()` to gather a list of sockets to check.
*/
void SocketEvents(const std::vector<CNode*>& nodes,
std::set<SOCKET>& recv_set,
std::set<SOCKET>& send_set,
std::set<SOCKET>& error_set,
bool only_poll);
#ifdef USE_KQUEUE
void SocketEventsKqueue(std::set<SOCKET> &recv_set, std::set<SOCKET> &send_set, std::set<SOCKET> &error_set, bool fOnlyPoll);
void SocketEventsKqueue(std::set<SOCKET>& recv_set,
std::set<SOCKET>& send_set,
std::set<SOCKET>& error_set,
bool only_poll);
#endif
#ifdef USE_EPOLL
void SocketEventsEpoll(std::set<SOCKET> &recv_set, std::set<SOCKET> &send_set, std::set<SOCKET> &error_set, bool fOnlyPoll);
void SocketEventsEpoll(std::set<SOCKET>& recv_set,
std::set<SOCKET>& send_set,
std::set<SOCKET>& error_set,
bool only_poll);
#endif
#ifdef USE_POLL
void SocketEventsPoll(std::set<SOCKET> &recv_set, std::set<SOCKET> &send_set, std::set<SOCKET> &error_set, bool fOnlyPoll);
void SocketEventsPoll(const std::vector<CNode*>& nodes,
std::set<SOCKET>& recv_set,
std::set<SOCKET>& send_set,
std::set<SOCKET>& error_set,
bool only_poll);
#endif
void SocketEventsSelect(std::set<SOCKET> &recv_set, std::set<SOCKET> &send_set, std::set<SOCKET> &error_set, bool fOnlyPoll);
void SocketEvents(std::set<SOCKET> &recv_set, std::set<SOCKET> &send_set, std::set<SOCKET> &error_set, bool fOnlyPoll);
void SocketHandler(CMasternodeSync& mn_sync);
void ThreadSocketHandler(CMasternodeSync& mn_sync);
void ThreadDNSAddressSeed();
void SocketEventsSelect(const std::vector<CNode*>& nodes,
std::set<SOCKET>& recv_set,
std::set<SOCKET>& send_set,
std::set<SOCKET>& error_set,
bool only_poll);
/**
* Check connected and listening sockets for IO readiness and process them accordingly.
*/
void SocketHandler(CMasternodeSync& mn_sync) EXCLUSIVE_LOCKS_REQUIRED(!m_total_bytes_sent_mutex, !mutexMsgProc);
/**
* Do the read/write for connected sockets that are ready for IO.
* @param[in] recv_set Sockets that are ready for read.
* @param[in] send_set Sockets that are ready for send.
* @param[in] error_set Sockets that have an exceptional condition (error).
*/
void SocketHandlerConnected(const std::set<SOCKET>& recv_set,
const std::set<SOCKET>& send_set,
const std::set<SOCKET>& error_set)
EXCLUSIVE_LOCKS_REQUIRED(!m_total_bytes_sent_mutex, !mutexMsgProc);
/**
* Accept incoming connections, one from each read-ready listening socket.
* @param[in] recv_set Sockets that are ready for read.
*/
void SocketHandlerListening(const std::set<SOCKET>& recv_set, CMasternodeSync& mn_sync)
EXCLUSIVE_LOCKS_REQUIRED(!mutexMsgProc);
void ThreadSocketHandler(CMasternodeSync& mn_sync) EXCLUSIVE_LOCKS_REQUIRED(!m_total_bytes_sent_mutex, !mutexMsgProc);
void ThreadDNSAddressSeed() EXCLUSIVE_LOCKS_REQUIRED(!m_addr_fetches_mutex, !m_nodes_mutex);
void ThreadOpenMasternodeConnections(CDeterministicMNManager& dmnman, CMasternodeMetaMan& mn_metaman,
CMasternodeSync& mn_sync);
CMasternodeSync& mn_sync)
EXCLUSIVE_LOCKS_REQUIRED(!m_addr_fetches_mutex, !m_nodes_mutex, !mutexMsgProc);
uint64_t CalculateKeyedNetGroup(const CAddress& ad) const;
@ -1276,12 +1362,12 @@ private:
NodeId GetNewNodeId();
size_t SocketSendData(CNode& node) EXCLUSIVE_LOCKS_REQUIRED(node.cs_vSend);
size_t SocketRecvData(CNode* pnode);
size_t SocketRecvData(CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(!mutexMsgProc);
void DumpAddresses();
// Network stats
void RecordBytesRecv(uint64_t bytes);
void RecordBytesSent(uint64_t bytes);
void RecordBytesSent(uint64_t bytes) EXCLUSIVE_LOCKS_REQUIRED(!m_total_bytes_sent_mutex);
/**
* Return vector of current BLOCK_RELAY peers.
@ -1295,15 +1381,14 @@ private:
void UnregisterEvents(CNode* pnode);
// Network usage totals
mutable RecursiveMutex cs_totalBytesRecv;
mutable RecursiveMutex cs_totalBytesSent;
uint64_t nTotalBytesRecv GUARDED_BY(cs_totalBytesRecv) {0};
uint64_t nTotalBytesSent GUARDED_BY(cs_totalBytesSent) {0};
mutable Mutex m_total_bytes_sent_mutex;
std::atomic<uint64_t> nTotalBytesRecv{0};
uint64_t nTotalBytesSent GUARDED_BY(m_total_bytes_sent_mutex) {0};
// outbound limit & stats
uint64_t nMaxOutboundTotalBytesSentInCycle GUARDED_BY(cs_totalBytesSent) {0};
std::chrono::seconds nMaxOutboundCycleStartTime GUARDED_BY(cs_totalBytesSent) {0};
uint64_t nMaxOutboundLimit GUARDED_BY(cs_totalBytesSent);
uint64_t nMaxOutboundTotalBytesSentInCycle GUARDED_BY(m_total_bytes_sent_mutex) {0};
std::chrono::seconds nMaxOutboundCycleStartTime GUARDED_BY(m_total_bytes_sent_mutex) {0};
uint64_t nMaxOutboundLimit GUARDED_BY(m_total_bytes_sent_mutex);
// P2P timeout in seconds
std::chrono::seconds m_peer_connect_timeout;
@ -1320,21 +1405,23 @@ private:
bool fAddressesInitialized{false};
CAddrMan& addrman;
std::deque<std::string> m_addr_fetches GUARDED_BY(m_addr_fetches_mutex);
RecursiveMutex m_addr_fetches_mutex;
std::vector<std::string> vAddedNodes GUARDED_BY(cs_vAddedNodes);
mutable RecursiveMutex cs_vAddedNodes;
Mutex m_addr_fetches_mutex;
std::vector<std::string> m_added_nodes GUARDED_BY(m_added_nodes_mutex);
mutable Mutex m_added_nodes_mutex;
std::vector<CNode*> m_nodes GUARDED_BY(m_nodes_mutex);
std::list<CNode*> m_nodes_disconnected;
mutable RecursiveMutex m_nodes_mutex;
std::atomic<NodeId> nLastNodeId{0};
unsigned int nPrevNodeCount{0};
std::vector<uint256> vPendingMasternodes;
mutable RecursiveMutex cs_vPendingMasternodes;
std::map<std::pair<Consensus::LLMQType, uint256>, std::set<uint256>> masternodeQuorumNodes GUARDED_BY(cs_vPendingMasternodes);
std::map<std::pair<Consensus::LLMQType, uint256>, std::set<uint256>> masternodeQuorumRelayMembers GUARDED_BY(cs_vPendingMasternodes);
std::set<uint256> masternodePendingProbes GUARDED_BY(cs_vPendingMasternodes);
std::vector<CNode*> vNodes GUARDED_BY(cs_vNodes);
std::list<CNode*> vNodesDisconnected;
mutable Mutex cs_mapSocketToNode;
std::unordered_map<SOCKET, CNode*> mapSocketToNode GUARDED_BY(cs_mapSocketToNode);
mutable RecursiveMutex cs_vNodes;
std::atomic<NodeId> nLastNodeId{0};
unsigned int nPrevNodeCount{0};
/**
* Cache responses to addr requests to minimize privacy leak.

View File

@ -374,38 +374,45 @@ public:
bool ignore_incoming_txs);
/** Overridden from CValidationInterface. */
void BlockConnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindexConnected) override;
void BlockDisconnected(const std::shared_ptr<const CBlock> &block, const CBlockIndex* pindex) override;
void UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) override;
void BlockChecked(const CBlock& block, const BlockValidationState& state) override;
void BlockConnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindexConnected) override
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_recent_confirmed_transactions_mutex);
void BlockDisconnected(const std::shared_ptr<const CBlock> &block, const CBlockIndex* pindex) override
EXCLUSIVE_LOCKS_REQUIRED(!m_recent_confirmed_transactions_mutex);
void UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) override
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
void BlockChecked(const CBlock& block, const BlockValidationState& state) override
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
void NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr<const CBlock>& pblock) override;
/** Implement NetEventsInterface */
void InitializeNode(CNode* pnode) override;
void FinalizeNode(const CNode& node) override;
bool ProcessMessages(CNode* pfrom, std::atomic<bool>& interrupt) override;
bool SendMessages(CNode* pto) override EXCLUSIVE_LOCKS_REQUIRED(pto->cs_sendProcessing);
void InitializeNode(CNode* pnode) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
void FinalizeNode(const CNode& node) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
bool ProcessMessages(CNode* pfrom, std::atomic<bool>& interrupt) override
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_recent_confirmed_transactions_mutex);
bool SendMessages(CNode* pto) override EXCLUSIVE_LOCKS_REQUIRED(pto->cs_sendProcessing)
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_recent_confirmed_transactions_mutex);
/** Implement PeerManager */
void CheckForStaleTipAndEvictPeers() override;
bool GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) const override;
bool GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) const override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
bool IgnoresIncomingTxs() override { return m_ignore_incoming_txs; }
void SendPings() override;
void PushInventory(NodeId nodeid, const CInv& inv) override;
void RelayInv(CInv &inv, const int minProtoVersion) override;
void RelayInvFiltered(CInv &inv, const CTransaction &relatedTx, const int minProtoVersion) override;
void RelayInvFiltered(CInv &inv, const uint256 &relatedTxHash, const int minProtoVersion) override;
void RelayTransaction(const uint256& txid) override;
void SendPings() override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);;
void PushInventory(NodeId nodeid, const CInv& inv) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
void RelayInv(CInv &inv, const int minProtoVersion) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
void RelayInvFiltered(CInv &inv, const CTransaction &relatedTx, const int minProtoVersion) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
void RelayInvFiltered(CInv &inv, const uint256 &relatedTxHash, const int minProtoVersion) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
void RelayTransaction(const uint256& txid) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
void SetBestHeight(int height) override { m_best_height = height; };
void Misbehaving(const NodeId pnode, const int howmuch, const std::string& message = "") override;
void Misbehaving(const NodeId pnode, const int howmuch, const std::string& message = "") override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
void ProcessMessage(CNode& pfrom, const std::string& msg_type, CDataStream& vRecv,
const std::chrono::microseconds time_received, const std::atomic<bool>& interruptMsgProc) override;
bool IsBanned(NodeId pnode) override EXCLUSIVE_LOCKS_REQUIRED(cs_main);
bool IsInvInFilter(NodeId nodeid, const uint256& hash) const override;
const std::chrono::microseconds time_received, const std::atomic<bool>& interruptMsgProc) override
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_recent_confirmed_transactions_mutex);
bool IsBanned(NodeId pnode) override EXCLUSIVE_LOCKS_REQUIRED(cs_main, !m_peer_mutex);
bool IsInvInFilter(NodeId nodeid, const uint256& hash) const override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
private:
/** Helper to process result of external handlers of message */
void ProcessPeerMsgRet(const PeerMsgRet& ret, CNode& pfrom);
void ProcessPeerMsgRet(const PeerMsgRet& ret, CNode& pfrom) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
/** Consider evicting an outbound peer based on the amount of time they've been behind our tip */
void ConsiderEviction(CNode& pto, int64_t time_in_seconds) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
@ -414,15 +421,15 @@ private:
void EvictExtraOutboundPeers(int64_t time_in_seconds) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
/** Retrieve unbroadcast transactions from the mempool and reattempt sending to peers */
void ReattemptInitialBroadcast(CScheduler& scheduler);
void ReattemptInitialBroadcast(CScheduler& scheduler) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
/** Get a shared pointer to the Peer object.
* May return an empty shared_ptr if the Peer object can't be found. */
PeerRef GetPeerRef(NodeId id) const;
PeerRef GetPeerRef(NodeId id) const EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
/** Get a shared pointer to the Peer object and remove it from m_peer_map.
* May return an empty shared_ptr if the Peer object can't be found. */
PeerRef RemovePeer(NodeId id);
PeerRef RemovePeer(NodeId id) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
/**
* Potentially mark a node discouraged based on the contents of a BlockValidationState object
@ -435,7 +442,8 @@ private:
* @return Returns true if the peer was punished (probably disconnected)
*/
bool MaybePunishNodeForBlock(NodeId nodeid, const BlockValidationState& state,
bool via_compact_block, const std::string& message = "");
bool via_compact_block, const std::string& message = "")
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
/**
* Potentially ban a node based on the contents of a TxValidationState object
@ -444,7 +452,8 @@ private:
*
* Changes here may need to be reflected in TxRelayMayResultInDisconnect().
*/
bool MaybePunishNodeForTx(NodeId nodeid, const TxValidationState& state, const std::string& message = "");
bool MaybePunishNodeForTx(NodeId nodeid, const TxValidationState& state, const std::string& message = "")
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
/** Maybe disconnect a peer and discourage future connections from its address.
*
@ -454,14 +463,16 @@ private:
*/
bool MaybeDiscourageAndDisconnect(CNode& pnode, Peer& peer);
void ProcessOrphanTx(std::set<uint256>& orphan_work_set)
EXCLUSIVE_LOCKS_REQUIRED(cs_main, g_cs_orphans);
void ProcessOrphanTx(std::set<uint256>& orphan_work_set) EXCLUSIVE_LOCKS_REQUIRED(cs_main, g_cs_orphans)
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
/** Process a single headers message from a peer. */
void ProcessHeadersMessage(CNode& pfrom, const Peer& peer,
const std::vector<CBlockHeader>& headers,
bool via_compact_block);
bool via_compact_block)
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
void SendBlockTransactions(CNode& pfrom, const CBlock& block, const BlockTransactionsRequest& req);
void SendBlockTransactions(CNode& pfrom, const CBlock& block, const BlockTransactionsRequest& req)
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
/** Send a version message to a peer */
void PushNodeVersion(CNode& pnode, const Peer& peer);
@ -482,7 +493,7 @@ private:
* @param[in] fReachable Whether the address' network is reachable. We relay unreachable
* addresses less.
*/
void RelayAddress(NodeId originator, const CAddress& addr, bool fReachable);
void RelayAddress(NodeId originator, const CAddress& addr, bool fReachable) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
const CChainParams& m_chainparams;
CConnman& m_connman;
@ -608,7 +619,8 @@ private:
/** Number of outbound peers with m_chain_sync.m_protect. */
int m_outbound_peers_with_protect_from_disconnect GUARDED_BY(cs_main) = 0;
bool AlreadyHave(const CInv& inv) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
bool AlreadyHave(const CInv& inv)
EXCLUSIVE_LOCKS_REQUIRED(cs_main, !m_recent_confirmed_transactions_mutex);
/**
* Filter for transactions that were recently rejected by
@ -3357,7 +3369,7 @@ void PeerManagerImpl::ProcessMessage(
pfrom.nServices = nServices;
pfrom.SetAddrLocal(addrMe);
{
LOCK(pfrom.cs_SubVer);
LOCK(pfrom.m_subver_mutex);
pfrom.cleanSubVer = cleanSubVer;
}
peer->m_starting_height = starting_height;

View File

@ -63,7 +63,7 @@ public:
//! Return number of connections, default is in- and outbound (total)
int getNumConnections(unsigned int flags = CONNECTIONS_ALL) const;
int getNumBlocks() const;
uint256 getBestBlockHash();
uint256 getBestBlockHash() EXCLUSIVE_LOCKS_REQUIRED(!m_cached_tip_mutex);
int getHeaderTipHeight() const;
int64_t getHeaderTipTime() const;

View File

@ -28,7 +28,7 @@ bool NodeLessThan::operator()(const CNodeCombinedStats &left, const CNodeCombine
case PeerTableModel::NetNodeId:
return pLeft->nodeid < pRight->nodeid;
case PeerTableModel::Address:
return pLeft->addrName.compare(pRight->addrName) < 0;
return pLeft->m_addr_name.compare(pRight->m_addr_name) < 0;
case PeerTableModel::Network:
return pLeft->m_network < pRight->m_network;
case PeerTableModel::Ping:
@ -163,7 +163,7 @@ QVariant PeerTableModel::data(const QModelIndex &index, int role) const
return (qint64)rec->nodeStats.nodeid;
case Address:
// prepend to peer address down-arrow symbol for inbound connection and up-arrow for outbound connection
return QString(rec->nodeStats.fInbound ? "" : "") + QString::fromStdString(rec->nodeStats.addrName);
return QString(rec->nodeStats.fInbound ? "" : "") + QString::fromStdString(rec->nodeStats.m_addr_name);
case Network:
return GUIUtil::NetworkToQString(rec->nodeStats.m_network);
case Ping:

View File

@ -1231,7 +1231,7 @@ void RPCConsole::updateDetailWidget()
}
const CNodeCombinedStats *stats = clientModel->getPeerTableModel()->getNodeStats(selected_rows.first().row());
// update the detail ui with latest node information
QString peerAddrDetails(QString::fromStdString(stats->nodeStats.addrName) + " ");
QString peerAddrDetails(QString::fromStdString(stats->nodeStats.m_addr_name) + " ");
peerAddrDetails += tr("(peer id: %1)").arg(QString::number(stats->nodeStats.nodeid));
if (!stats->nodeStats.addrLocal.empty())
peerAddrDetails += "<br />" + tr("via %1").arg(QString::fromStdString(stats->nodeStats.addrLocal));

View File

@ -377,7 +377,7 @@ public:
{
}
void AddEvent(uint32_t event_info) noexcept
void AddEvent(uint32_t event_info) noexcept EXCLUSIVE_LOCKS_REQUIRED(!m_events_mutex)
{
LOCK(m_events_mutex);
@ -391,7 +391,7 @@ public:
/**
* Feed (the hash of) all events added through AddEvent() to hasher.
*/
void SeedEvents(CSHA512& hasher) noexcept
void SeedEvents(CSHA512& hasher) noexcept EXCLUSIVE_LOCKS_REQUIRED(!m_events_mutex)
{
// We use only SHA256 for the events hashing to get the ASM speedups we have for SHA256,
// since we want it to be fast as network peers may be able to trigger it repeatedly.
@ -410,7 +410,7 @@ public:
*
* If this function has never been called with strong_seed = true, false is returned.
*/
bool MixExtract(unsigned char* out, size_t num, CSHA512&& hasher, bool strong_seed) noexcept
bool MixExtract(unsigned char* out, size_t num, CSHA512&& hasher, bool strong_seed) noexcept EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
{
assert(num <= 32);
unsigned char buf[64];

View File

@ -200,7 +200,7 @@ static RPCHelpMan getpeerinfo()
CNodeStateStats statestats;
bool fStateStats = peerman.GetNodeStateStats(stats.nodeid, statestats);
obj.pushKV("id", stats.nodeid);
obj.pushKV("addr", stats.addrName);
obj.pushKV("addr", stats.m_addr_name);
if (stats.addrBind.IsValid()) {
obj.pushKV("addrbind", stats.addrBind.ToString());
}

View File

@ -46,10 +46,10 @@ public:
typedef std::function<void()> Function;
/** Call func at/after time t */
void schedule(Function f, std::chrono::system_clock::time_point t);
void schedule(Function f, std::chrono::system_clock::time_point t) EXCLUSIVE_LOCKS_REQUIRED(!newTaskMutex);
/** Call f once after the delta has passed */
void scheduleFromNow(Function f, std::chrono::milliseconds delta)
void scheduleFromNow(Function f, std::chrono::milliseconds delta) EXCLUSIVE_LOCKS_REQUIRED(!newTaskMutex)
{
schedule(std::move(f), std::chrono::system_clock::now() + delta);
}
@ -60,29 +60,29 @@ public:
* The timing is not exact: Every time f is finished, it is rescheduled to run again after delta. If you need more
* accurate scheduling, don't use this method.
*/
void scheduleEvery(Function f, std::chrono::milliseconds delta);
void scheduleEvery(Function f, std::chrono::milliseconds delta) EXCLUSIVE_LOCKS_REQUIRED(!newTaskMutex);
/**
* Mock the scheduler to fast forward in time.
* Iterates through items on taskQueue and reschedules them
* to be delta_seconds sooner.
*/
void MockForward(std::chrono::seconds delta_seconds);
void MockForward(std::chrono::seconds delta_seconds) EXCLUSIVE_LOCKS_REQUIRED(!newTaskMutex);
/**
* Services the queue 'forever'. Should be run in a thread.
*/
void serviceQueue();
void serviceQueue() EXCLUSIVE_LOCKS_REQUIRED(!newTaskMutex);
/** Tell any threads running serviceQueue to stop as soon as the current task is done */
void stop()
void stop() EXCLUSIVE_LOCKS_REQUIRED(!newTaskMutex)
{
WITH_LOCK(newTaskMutex, stopRequested = true);
newTaskScheduled.notify_all();
if (m_service_thread.joinable()) m_service_thread.join();
}
/** Tell any threads running serviceQueue to stop when there is no work left to be done */
void StopWhenDrained()
void StopWhenDrained() EXCLUSIVE_LOCKS_REQUIRED(!newTaskMutex)
{
WITH_LOCK(newTaskMutex, stopWhenEmpty = true);
newTaskScheduled.notify_all();
@ -94,10 +94,11 @@ public:
* and first and last task times
*/
size_t getQueueInfo(std::chrono::system_clock::time_point& first,
std::chrono::system_clock::time_point& last) const;
std::chrono::system_clock::time_point& last) const
EXCLUSIVE_LOCKS_REQUIRED(!newTaskMutex);
/** Returns true if there are threads actively running in serviceQueue() */
bool AreThreadsServicingQueue() const;
bool AreThreadsServicingQueue() const EXCLUSIVE_LOCKS_REQUIRED(!newTaskMutex);
private:
mutable Mutex newTaskMutex;
@ -128,8 +129,8 @@ private:
std::list<std::function<void()>> m_callbacks_pending GUARDED_BY(m_callbacks_mutex);
bool m_are_callbacks_running GUARDED_BY(m_callbacks_mutex) = false;
void MaybeScheduleProcessQueue();
void ProcessQueue();
void MaybeScheduleProcessQueue() EXCLUSIVE_LOCKS_REQUIRED(!m_callbacks_mutex);
void ProcessQueue() EXCLUSIVE_LOCKS_REQUIRED(!m_callbacks_mutex);
public:
explicit SingleThreadedSchedulerClient(CScheduler& scheduler LIFETIMEBOUND) : m_scheduler{scheduler} {}
@ -140,15 +141,15 @@ public:
* Practically, this means that callbacks can behave as if they are executed
* in order by a single thread.
*/
void AddToProcessQueue(std::function<void()> func);
void AddToProcessQueue(std::function<void()> func) EXCLUSIVE_LOCKS_REQUIRED(!m_callbacks_mutex);
/**
* Processes all remaining queue members on the calling thread, blocking until queue is empty
* Must be called after the CScheduler has no remaining processing threads!
*/
void EmptyQueue();
void EmptyQueue() EXCLUSIVE_LOCKS_REQUIRED(!m_callbacks_mutex);
size_t CallbacksPending();
size_t CallbacksPending() EXCLUSIVE_LOCKS_REQUIRED(!m_callbacks_mutex);
};
#endif

View File

@ -77,8 +77,6 @@ inline void AssertLockNotHeldInternal(const char* pszName, const char* pszFile,
inline void DeleteLock(void* cs) {}
inline bool LockStackEmpty() { return true; }
#endif
#define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs)
#define AssertLockNotHeld(cs) AssertLockNotHeldInternal(#cs, __FILE__, __LINE__, &cs)
/**
* Template mixin that adds -Wthread-safety locking annotations and lock order
@ -138,10 +136,18 @@ public:
using RecursiveMutex = AnnotatedMixin<std::recursive_mutex>;
/** Wrapped mutex: supports waiting but not recursive locking */
typedef AnnotatedMixin<std::mutex> Mutex;
using Mutex = AnnotatedMixin<std::mutex>;
/** Wrapped shared mutex: supports read locking via .shared_lock, exlusive locking via .lock;
* does not support recursive locking */
typedef SharedAnnotatedMixin<std::shared_mutex> SharedMutex;
using SharedMutex = SharedAnnotatedMixin<std::shared_mutex>;
#define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs)
inline void AssertLockNotHeldInline(const char* name, const char* file, int line, Mutex* cs) EXCLUSIVE_LOCKS_REQUIRED(!cs) { AssertLockNotHeldInternal(name, file, line, cs); }
inline void AssertLockNotHeldInline(const char* name, const char* file, int line, RecursiveMutex* cs) LOCKS_EXCLUDED(cs) { AssertLockNotHeldInternal(name, file, line, cs); }
inline void AssertLockNotHeldInline(const char* name, const char* file, int line, SharedMutex* cs) LOCKS_EXCLUDED(cs) { AssertLockNotHeldInternal(name, file, line, cs); }
#define AssertLockNotHeld(cs) AssertLockNotHeldInline(#cs, __FILE__, __LINE__, &cs)
/** Prints a lock contention to the log */
void LockContention(const char* pszName, const char* pszFile, int nLine);

View File

@ -74,7 +74,7 @@ BOOST_AUTO_TEST_CASE(outbound_slow_chain_eviction)
// Mock an outbound peer
CAddress addr1(ip(0xa0b0c001), NODE_NONE);
CNode dummyNode1(id++, ServiceFlags(NODE_NETWORK), INVALID_SOCKET, addr1, 0, 0, CAddress(), "", ConnectionType::OUTBOUND_FULL_RELAY);
CNode dummyNode1(id++, ServiceFlags(NODE_NETWORK), INVALID_SOCKET, addr1, /* nKeyedNetGroupIn */ 0, /* nLocalHostNonceIn */ 0, CAddress(), /* pszDest */ "", ConnectionType::OUTBOUND_FULL_RELAY, /* inbound_onion */ false);
dummyNode1.SetCommonVersion(PROTOCOL_VERSION);
peerLogic->InitializeNode(&dummyNode1);
@ -124,7 +124,7 @@ BOOST_AUTO_TEST_CASE(outbound_slow_chain_eviction)
static void AddRandomOutboundPeer(std::vector<CNode*>& vNodes, PeerManager& peerLogic, ConnmanTestMsg& connman)
{
CAddress addr(ip(g_insecure_rand_ctx.randbits(32)), NODE_NONE);
vNodes.emplace_back(new CNode(id++, ServiceFlags(NODE_NETWORK), INVALID_SOCKET, addr, 0, 0, CAddress(), "", ConnectionType::OUTBOUND_FULL_RELAY));
vNodes.emplace_back(new CNode(id++, ServiceFlags(NODE_NETWORK), INVALID_SOCKET, addr, /* nKeyedNetGroupIn */ 0, /* nLocalHostNonceIn */ 0, CAddress(), /* pszDest */ "", ConnectionType::OUTBOUND_FULL_RELAY, /* inbound_onion */ false));
CNode &node = *vNodes.back();
node.SetCommonVersion(PROTOCOL_VERSION);
@ -220,7 +220,7 @@ BOOST_AUTO_TEST_CASE(peer_discouragement)
banman->ClearBanned();
CAddress addr1(ip(0xa0b0c001), NODE_NONE);
CNode dummyNode1(id++, NODE_NETWORK, INVALID_SOCKET, addr1, 0, 0, CAddress(), "", ConnectionType::INBOUND);
CNode dummyNode1(id++, NODE_NETWORK, INVALID_SOCKET, addr1, /* nKeyedNetGroupIn */ 0, /* nLocalHostNonceIn */ 0, CAddress(), /* pszDest */ "", ConnectionType::INBOUND, /* inbound_onion */ false);
dummyNode1.SetCommonVersion(PROTOCOL_VERSION);
peerLogic->InitializeNode(&dummyNode1);
dummyNode1.fSuccessfullyConnected = true;
@ -233,7 +233,7 @@ BOOST_AUTO_TEST_CASE(peer_discouragement)
BOOST_CHECK(!banman->IsDiscouraged(ip(0xa0b0c001|0x0000ff00))); // Different IP, not discouraged
CAddress addr2(ip(0xa0b0c002), NODE_NONE);
CNode dummyNode2(id++, NODE_NETWORK, INVALID_SOCKET, addr2, 1, 1, CAddress(), "", ConnectionType::INBOUND);
CNode dummyNode2(id++, NODE_NETWORK, INVALID_SOCKET, addr2, /* nKeyedNetGroupIn */ 1, /* nLocalHostNonceIn */ 1, CAddress(), /* pszDest */ "", ConnectionType::INBOUND, /* inbound_onion */ false);
dummyNode2.SetCommonVersion(PROTOCOL_VERSION);
peerLogic->InitializeNode(&dummyNode2);
dummyNode2.fSuccessfullyConnected = true;
@ -271,7 +271,7 @@ BOOST_AUTO_TEST_CASE(DoS_bantime)
SetMockTime(nStartTime); // Overrides future calls to GetTime()
CAddress addr(ip(0xa0b0c001), NODE_NONE);
CNode dummyNode(id++, NODE_NETWORK, INVALID_SOCKET, addr, 4, 4, CAddress(), "", ConnectionType::INBOUND);
CNode dummyNode(id++, NODE_NETWORK, INVALID_SOCKET, addr, /* nKeyedNetGroupIn */ 4, /* nLocalHostNonceIn */ 4, CAddress(), /* pszDest */ "", ConnectionType::INBOUND, /* inbound_onion */ false);
dummyNode.SetCommonVersion(PROTOCOL_VERSION);
peerLogic->InitializeNode(&dummyNode);
dummyNode.fSuccessfullyConnected = true;

View File

@ -40,9 +40,6 @@ FUZZ_TARGET_INIT(net, initialize_net)
CConnman connman{fuzzed_data_provider.ConsumeIntegral<uint64_t>(), fuzzed_data_provider.ConsumeIntegral<uint64_t>(), addrman};
node.CloseSocketDisconnect(&connman);
},
[&] {
node.MaybeSetAddrName(fuzzed_data_provider.ConsumeRandomLengthString(32));
},
[&] {
const std::vector<bool> asmap = ConsumeRandomLengthBitVector(fuzzed_data_provider);
if (!SanityCheckASMap(asmap)) {
@ -75,7 +72,6 @@ FUZZ_TARGET_INIT(net, initialize_net)
}
(void)node.GetAddrLocal();
(void)node.GetAddrName();
(void)node.GetId();
(void)node.GetLocalNonce();
(void)node.GetLocalServices();

View File

@ -194,14 +194,15 @@ BOOST_AUTO_TEST_CASE(cnode_simple_test)
id++, NODE_NETWORK, hSocket, addr,
/* nKeyedNetGroupIn = */ 0,
/* nLocalHostNonceIn = */ 0,
CAddress(), pszDest, ConnectionType::OUTBOUND_FULL_RELAY);
CAddress(), pszDest, ConnectionType::OUTBOUND_FULL_RELAY,
/* inbound_onion = */ false);
BOOST_CHECK(pnode1->IsFullOutboundConn() == true);
BOOST_CHECK(pnode1->IsManualConn() == false);
BOOST_CHECK(pnode1->IsBlockOnlyConn() == false);
BOOST_CHECK(pnode1->IsFeelerConn() == false);
BOOST_CHECK(pnode1->IsAddrFetchConn() == false);
BOOST_CHECK(pnode1->IsInboundConn() == false);
BOOST_CHECK(pnode1->IsInboundOnion() == false);
BOOST_CHECK(pnode1->m_inbound_onion == false);
BOOST_CHECK_EQUAL(pnode1->ConnectedThroughNetwork(), Network::NET_IPV4);
std::unique_ptr<CNode> pnode2 = std::make_unique<CNode>(
@ -216,7 +217,7 @@ BOOST_AUTO_TEST_CASE(cnode_simple_test)
BOOST_CHECK(pnode2->IsFeelerConn() == false);
BOOST_CHECK(pnode2->IsAddrFetchConn() == false);
BOOST_CHECK(pnode2->IsInboundConn() == true);
BOOST_CHECK(pnode2->IsInboundOnion() == false);
BOOST_CHECK(pnode2->m_inbound_onion == false);
BOOST_CHECK_EQUAL(pnode2->ConnectedThroughNetwork(), Network::NET_IPV4);
std::unique_ptr<CNode> pnode3 = std::make_unique<CNode>(
@ -231,7 +232,7 @@ BOOST_AUTO_TEST_CASE(cnode_simple_test)
BOOST_CHECK(pnode3->IsFeelerConn() == false);
BOOST_CHECK(pnode3->IsAddrFetchConn() == false);
BOOST_CHECK(pnode3->IsInboundConn() == false);
BOOST_CHECK(pnode3->IsInboundOnion() == false);
BOOST_CHECK(pnode3->m_inbound_onion == false);
BOOST_CHECK_EQUAL(pnode3->ConnectedThroughNetwork(), Network::NET_IPV4);
std::unique_ptr<CNode> pnode4 = std::make_unique<CNode>(
@ -246,7 +247,7 @@ BOOST_AUTO_TEST_CASE(cnode_simple_test)
BOOST_CHECK(pnode4->IsFeelerConn() == false);
BOOST_CHECK(pnode4->IsAddrFetchConn() == false);
BOOST_CHECK(pnode4->IsInboundConn() == true);
BOOST_CHECK(pnode4->IsInboundOnion() == true);
BOOST_CHECK(pnode4->m_inbound_onion == true);
BOOST_CHECK_EQUAL(pnode4->ConnectedThroughNetwork(), Network::NET_ONION);
}
@ -740,7 +741,7 @@ BOOST_AUTO_TEST_CASE(ipv4_peer_with_ipv6_addrMe_test)
in_addr ipv4AddrPeer;
ipv4AddrPeer.s_addr = 0xa0b0c001;
CAddress addr = CAddress(CService(ipv4AddrPeer, 7777), NODE_NETWORK);
std::unique_ptr<CNode> pnode = std::make_unique<CNode>(0, NODE_NETWORK, INVALID_SOCKET, addr, 0, 0, CAddress{}, std::string{}, ConnectionType::OUTBOUND_FULL_RELAY);
std::unique_ptr<CNode> pnode = std::make_unique<CNode>(0, NODE_NETWORK, INVALID_SOCKET, addr, /* nKeyedNetGroupIn */ 0, /* nLocalHostNonceIn */ 0, CAddress{}, /* pszDest */ std::string{}, ConnectionType::OUTBOUND_FULL_RELAY, /* inbound_onion */ false);
pnode->fSuccessfullyConnected.store(true);
// the peer claims to be reaching us via IPv6

View File

@ -23,16 +23,16 @@ struct ConnmanTestMsg : public CConnman {
void AddTestNode(CNode& node)
{
LOCK(cs_vNodes);
vNodes.push_back(&node);
LOCK(m_nodes_mutex);
m_nodes.push_back(&node);
}
void ClearTestNodes()
{
LOCK(cs_vNodes);
for (CNode* node : vNodes) {
LOCK(m_nodes_mutex);
for (CNode* node : m_nodes) {
delete node;
}
vNodes.clear();
m_nodes.clear();
}
void ProcessMessagesOnce(CNode& node) { m_msgproc->ProcessMessages(&node, flagInterruptMsgProc); }

View File

@ -22,7 +22,7 @@ public:
using Clock = std::chrono::steady_clock;
CThreadInterrupt();
explicit operator bool() const;
void operator()();
void operator()() EXCLUSIVE_LOCKS_REQUIRED(!mut);
void reset();
bool sleep_for(Clock::duration rel_time) EXCLUSIVE_LOCKS_REQUIRED(!mut);

View File

@ -47,7 +47,7 @@ public:
explicit MainSignalsInstance(CScheduler& scheduler LIFETIMEBOUND) : m_schedulerClient(scheduler) {}
void Register(std::shared_ptr<CValidationInterface> callbacks)
void Register(std::shared_ptr<CValidationInterface> callbacks) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
{
LOCK(m_mutex);
auto inserted = m_map.emplace(callbacks.get(), m_list.end());
@ -55,7 +55,7 @@ public:
inserted.first->second->callbacks = std::move(callbacks);
}
void Unregister(CValidationInterface* callbacks)
void Unregister(CValidationInterface* callbacks) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
{
LOCK(m_mutex);
auto it = m_map.find(callbacks);
@ -69,7 +69,7 @@ public:
//! map entry. After this call, the list may still contain callbacks that
//! are currently executing, but it will be cleared when they are done
//! executing.
void Clear()
void Clear() EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
{
LOCK(m_mutex);
for (const auto& entry : m_map) {
@ -78,7 +78,7 @@ public:
m_map.clear();
}
template<typename F> void Iterate(F&& f)
template<typename F> void Iterate(F&& f) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
{
WAIT_LOCK(m_mutex, lock);
for (auto it = m_list.begin(); it != m_list.end();) {

View File

@ -90,16 +90,16 @@ public:
static uint32_t Mask(const Consensus::Params& params, Consensus::DeploymentPos pos);
/** Get the BIP9 state for a given deployment for the block after pindexPrev. */
ThresholdState State(const CBlockIndex* pindexPrev, const Consensus::Params& params, Consensus::DeploymentPos pos);
ThresholdState State(const CBlockIndex* pindexPrev, const Consensus::Params& params, Consensus::DeploymentPos pos) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex);
/** Get the block height at which the BIP9 deployment switched into the state for the block after pindexPrev. */
int StateSinceHeight(const CBlockIndex* pindexPrev, const Consensus::Params& params, Consensus::DeploymentPos pos);
int StateSinceHeight(const CBlockIndex* pindexPrev, const Consensus::Params& params, Consensus::DeploymentPos pos) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex);
/** Determine what nVersion a new block should use
*/
int32_t ComputeBlockVersion(const CBlockIndex* pindexPrev, const Consensus::Params& params);
int32_t ComputeBlockVersion(const CBlockIndex* pindexPrev, const Consensus::Params& params) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex);
void Clear();
void Clear() EXCLUSIVE_LOCKS_REQUIRED(!m_mutex);
};
class AbstractEHFManager