mirror of
https://github.com/dashpay/dash.git
synced 2024-12-25 03:52:49 +01:00
Merge bitcoin/bitcoin#24069: refactor: replace RecursiveMutex m_cs_callbacks_pending
with Mutex (and rename)
5574e6ed52d6effd3b7beff0f09b44449202a585 refactor: replace RecursiveMutex `m_callbacks_mutex` with Mutex (Sebastian Falbesoner) 3aa258109e3f3e0b1bfc4f811cbadfd6d516208c scripted-diff: rename `m_cs_callbacks_pending` -> `m_callbacks_mutex` (Sebastian Falbesoner) Pull request description: This PR is related to #19303 and gets rid of the RecursiveMutex `m_cs_callbacks_pending`. All of the critical sections (6 in total) only directly access the guarded elements, i.e. it is not possible that within one section another one is called, and we can use a regular Mutex:807169e10b/src/scheduler.cpp (L138-L145)
807169e10b/src/scheduler.cpp (L152-L160)
807169e10b/src/scheduler.cpp (L169-L172)
807169e10b/src/scheduler.cpp (L184-L187)
807169e10b/src/scheduler.cpp (L197-L199)
807169e10b/src/scheduler.cpp (L203-L206)
Also, it is renamed to adapt to the (unwritten) naming convention to use the `_mutex` suffix rather than the `cs_` prefix. ACKs for top commit: hebasto: ACK 5574e6ed52d6effd3b7beff0f09b44449202a585, I have reviewed the code and it looks OK, I agree it can be merged. w0xlt: crACK 5574e6e Tree-SHA512: ba4b151d956582f4c7183a1d51702b269158fc5e2902c51e6a242aaeb1c72cfcdf398f9ffa42e3072f5aba21a8c493086a5fe7c450c271322da69bd54c37ed1f
This commit is contained in:
parent
c9f072febc
commit
67ca59ef50
@ -132,7 +132,7 @@ bool CScheduler::AreThreadsServicingQueue() const
|
||||
void SingleThreadedSchedulerClient::MaybeScheduleProcessQueue()
|
||||
{
|
||||
{
|
||||
LOCK(m_cs_callbacks_pending);
|
||||
LOCK(m_callbacks_mutex);
|
||||
// Try to avoid scheduling too many copies here, but if we
|
||||
// accidentally have two ProcessQueue's scheduled at once its
|
||||
// not a big deal.
|
||||
@ -146,7 +146,7 @@ void SingleThreadedSchedulerClient::ProcessQueue()
|
||||
{
|
||||
std::function<void()> callback;
|
||||
{
|
||||
LOCK(m_cs_callbacks_pending);
|
||||
LOCK(m_callbacks_mutex);
|
||||
if (m_are_callbacks_running) return;
|
||||
if (m_callbacks_pending.empty()) return;
|
||||
m_are_callbacks_running = true;
|
||||
@ -163,7 +163,7 @@ void SingleThreadedSchedulerClient::ProcessQueue()
|
||||
~RAIICallbacksRunning()
|
||||
{
|
||||
{
|
||||
LOCK(instance->m_cs_callbacks_pending);
|
||||
LOCK(instance->m_callbacks_mutex);
|
||||
instance->m_are_callbacks_running = false;
|
||||
}
|
||||
instance->MaybeScheduleProcessQueue();
|
||||
@ -178,7 +178,7 @@ void SingleThreadedSchedulerClient::AddToProcessQueue(std::function<void()> func
|
||||
assert(m_pscheduler);
|
||||
|
||||
{
|
||||
LOCK(m_cs_callbacks_pending);
|
||||
LOCK(m_callbacks_mutex);
|
||||
m_callbacks_pending.emplace_back(std::move(func));
|
||||
}
|
||||
MaybeScheduleProcessQueue();
|
||||
@ -190,13 +190,13 @@ void SingleThreadedSchedulerClient::EmptyQueue()
|
||||
bool should_continue = true;
|
||||
while (should_continue) {
|
||||
ProcessQueue();
|
||||
LOCK(m_cs_callbacks_pending);
|
||||
LOCK(m_callbacks_mutex);
|
||||
should_continue = !m_callbacks_pending.empty();
|
||||
}
|
||||
}
|
||||
|
||||
size_t SingleThreadedSchedulerClient::CallbacksPending()
|
||||
{
|
||||
LOCK(m_cs_callbacks_pending);
|
||||
LOCK(m_callbacks_mutex);
|
||||
return m_callbacks_pending.size();
|
||||
}
|
||||
|
@ -119,9 +119,9 @@ class SingleThreadedSchedulerClient
|
||||
private:
|
||||
CScheduler* m_pscheduler;
|
||||
|
||||
RecursiveMutex m_cs_callbacks_pending;
|
||||
std::list<std::function<void()>> m_callbacks_pending GUARDED_BY(m_cs_callbacks_pending);
|
||||
bool m_are_callbacks_running GUARDED_BY(m_cs_callbacks_pending) = false;
|
||||
Mutex m_callbacks_mutex;
|
||||
std::list<std::function<void()>> m_callbacks_pending GUARDED_BY(m_callbacks_mutex);
|
||||
bool m_are_callbacks_running GUARDED_BY(m_callbacks_mutex) = false;
|
||||
|
||||
void MaybeScheduleProcessQueue();
|
||||
void ProcessQueue();
|
||||
|
Loading…
Reference in New Issue
Block a user