diff --git a/src/llmq/quorums_signing_shares.cpp b/src/llmq/quorums_signing_shares.cpp index 2fb9d845e..9a6d51dec 100644 --- a/src/llmq/quorums_signing_shares.cpp +++ b/src/llmq/quorums_signing_shares.cpp @@ -1126,10 +1126,6 @@ bool CSigSharesManager::SendMessages() // looped through all nodes, release them g_connman->ReleaseNodeVector(vNodesCopy); - if (didSend) { - g_connman->WakeSelect(); - } - return didSend; } diff --git a/src/net.cpp b/src/net.cpp index 6715da72f..7f46d85d1 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -1321,8 +1321,10 @@ void CConnman::ThreadSocketHandler() } } + isInSelect = true; int nSelect = select(have_fds ? hSocketMax + 1 : 0, &fdsetRecv, &fdsetSend, &fdsetError, &timeout); + isInSelect = false; if (interruptNet) return; @@ -3217,6 +3219,7 @@ void CConnman::PushMessage(CNode* pnode, CSerializedNetMsg&& msg, bool allowOpti size_t nBytesSent = 0; { LOCK(pnode->cs_vSend); + bool hasPendingData = !pnode->vSendMsg.empty(); bool optimisticSend(allowOptimisticSend && pnode->vSendMsg.empty()); //log total amount of bytes per command @@ -3232,6 +3235,9 @@ void CConnman::PushMessage(CNode* pnode, CSerializedNetMsg&& msg, bool allowOpti // If write queue empty, attempt "optimistic write" if (optimisticSend == true) nBytesSent = SocketSendData(pnode); + // wake up select() call in case there was no pending data before (so it was not selecting this socket for sending) + else if (!hasPendingData && isInSelect) + WakeSelect(); } if (nBytesSent) RecordBytesSent(nBytesSent); diff --git a/src/net.h b/src/net.h index fdff3d6f1..a09b6b850 100644 --- a/src/net.h +++ b/src/net.h @@ -39,6 +39,19 @@ #include #include +// "Optimistic send" was introduced in the beginning of the Bitcoin project. I assume this was done because it was +// thought that "send" would be very cheap when the send buffer is empty. This is not true, as shown by profiling. +// When a lot of load is seen on the network, the "send" call done in the message handler thread can easily use up 20% +// of time, effectively blocking things that could be done in parallel. We have introduced a way to wake up the select() +// call in the network thread, which allows us to disable optimistic send without introducing an artificial latency/delay +// when sending data. This however only works on non-WIN32 platforms for now. When we add support for WIN32 platforms, +// we can completely remove optimistic send. +#ifdef WIN32 +#define DEFAULT_ALLOW_OPTIMISTIC_SEND true +#else +#define DEFAULT_ALLOW_OPTIMISTIC_SEND false +#endif + class CAddrMan; class CScheduler; class CNode; @@ -206,7 +219,7 @@ public: bool IsMasternodeOrDisconnectRequested(const CService& addr); - void PushMessage(CNode* pnode, CSerializedNetMsg&& msg, bool allowOptimisticSend = true); + void PushMessage(CNode* pnode, CSerializedNetMsg&& msg, bool allowOptimisticSend = DEFAULT_ALLOW_OPTIMISTIC_SEND); template bool ForEachNodeContinueIf(const Condition& cond, Callable&& func) @@ -533,6 +546,7 @@ private: /** a pipe which is added to select() calls to wakeup before the timeout */ int wakeupPipe[2]{-1,-1}; #endif + std::atomic isInSelect{false}; std::thread threadDNSAddressSeed; std::thread threadSocketHandler;