2015-04-02 16:33:45 +02:00
|
|
|
// Copyright (c) 2015 The Bitcoin Core developers
|
|
|
|
// Distributed under the MIT software license, see the accompanying
|
|
|
|
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
|
|
|
|
|
|
|
#include "scheduler.h"
|
|
|
|
|
2017-05-23 17:53:00 +02:00
|
|
|
#include "random.h"
|
2015-09-03 18:53:00 +02:00
|
|
|
#include "reverselock.h"
|
|
|
|
|
2015-04-02 16:33:45 +02:00
|
|
|
#include <assert.h>
|
|
|
|
#include <boost/bind.hpp>
|
|
|
|
#include <utility>
|
|
|
|
|
2015-05-15 18:40:36 +02:00
|
|
|
CScheduler::CScheduler() : nThreadsServicingQueue(0), stopRequested(false), stopWhenEmpty(false)
|
2015-04-02 16:33:45 +02:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
CScheduler::~CScheduler()
|
|
|
|
{
|
|
|
|
assert(nThreadsServicingQueue == 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
#if BOOST_VERSION < 105000
|
|
|
|
static boost::system_time toPosixTime(const boost::chrono::system_clock::time_point& t)
|
|
|
|
{
|
2017-04-01 12:25:50 +02:00
|
|
|
// Creating the posix_time using from_time_t loses sub-second precision. So rather than exporting the time_point to time_t,
|
|
|
|
// start with a posix_time at the epoch (0) and add the milliseconds that have passed since then.
|
|
|
|
return boost::posix_time::from_time_t(0) + boost::posix_time::milliseconds(boost::chrono::duration_cast<boost::chrono::milliseconds>(t.time_since_epoch()).count());
|
2015-04-02 16:33:45 +02:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
void CScheduler::serviceQueue()
|
|
|
|
{
|
|
|
|
boost::unique_lock<boost::mutex> lock(newTaskMutex);
|
|
|
|
++nThreadsServicingQueue;
|
|
|
|
|
|
|
|
// newTaskMutex is locked throughout this loop EXCEPT
|
|
|
|
// when the thread is waiting or when the user's function
|
|
|
|
// is called.
|
2015-05-15 18:40:36 +02:00
|
|
|
while (!shouldStop()) {
|
2015-04-02 16:33:45 +02:00
|
|
|
try {
|
2017-05-23 17:53:00 +02:00
|
|
|
if (!shouldStop() && taskQueue.empty()) {
|
|
|
|
reverse_lock<boost::unique_lock<boost::mutex> > rlock(lock);
|
|
|
|
// Use this chance to get a tiny bit more entropy
|
|
|
|
RandAddSeedSleep();
|
|
|
|
}
|
2015-05-15 18:40:36 +02:00
|
|
|
while (!shouldStop() && taskQueue.empty()) {
|
2015-04-02 16:33:45 +02:00
|
|
|
// Wait until there is something to do.
|
|
|
|
newTaskScheduled.wait(lock);
|
|
|
|
}
|
2015-05-15 18:40:36 +02:00
|
|
|
|
|
|
|
// Wait until either there is a new task, or until
|
|
|
|
// the time of the first item on the queue:
|
2015-04-02 16:33:45 +02:00
|
|
|
|
|
|
|
// wait_until needs boost 1.50 or later; older versions have timed_wait:
|
|
|
|
#if BOOST_VERSION < 105000
|
2015-05-15 18:40:36 +02:00
|
|
|
while (!shouldStop() && !taskQueue.empty() &&
|
|
|
|
newTaskScheduled.timed_wait(lock, toPosixTime(taskQueue.begin()->first))) {
|
2015-04-02 16:33:45 +02:00
|
|
|
// Keep waiting until timeout
|
|
|
|
}
|
|
|
|
#else
|
2015-06-15 21:46:02 +02:00
|
|
|
// Some boost versions have a conflicting overload of wait_until that returns void.
|
|
|
|
// Explicitly use a template here to avoid hitting that overload.
|
2016-11-19 16:18:19 +01:00
|
|
|
while (!shouldStop() && !taskQueue.empty()) {
|
|
|
|
boost::chrono::system_clock::time_point timeToWaitFor = taskQueue.begin()->first;
|
|
|
|
if (newTaskScheduled.wait_until<>(lock, timeToWaitFor) == boost::cv_status::timeout)
|
|
|
|
break; // Exit loop after timeout, it means we reached the time of the event
|
2015-04-02 16:33:45 +02:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
// If there are multiple threads, the queue can empty while we're waiting (another
|
|
|
|
// thread may service the task we were waiting on).
|
2015-05-15 18:40:36 +02:00
|
|
|
if (shouldStop() || taskQueue.empty())
|
2015-04-02 16:33:45 +02:00
|
|
|
continue;
|
|
|
|
|
|
|
|
Function f = taskQueue.begin()->second;
|
|
|
|
taskQueue.erase(taskQueue.begin());
|
|
|
|
|
2015-08-17 23:30:46 +02:00
|
|
|
{
|
|
|
|
// Unlock before calling f, so it can reschedule itself or another task
|
|
|
|
// without deadlocking:
|
2015-09-03 18:53:00 +02:00
|
|
|
reverse_lock<boost::unique_lock<boost::mutex> > rlock(lock);
|
2015-08-17 23:30:46 +02:00
|
|
|
f();
|
|
|
|
}
|
2015-04-02 16:33:45 +02:00
|
|
|
} catch (...) {
|
|
|
|
--nThreadsServicingQueue;
|
|
|
|
throw;
|
|
|
|
}
|
|
|
|
}
|
2015-05-15 18:40:36 +02:00
|
|
|
--nThreadsServicingQueue;
|
2016-05-06 11:00:01 +02:00
|
|
|
newTaskScheduled.notify_one();
|
2015-05-15 18:40:36 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void CScheduler::stop(bool drain)
|
|
|
|
{
|
|
|
|
{
|
|
|
|
boost::unique_lock<boost::mutex> lock(newTaskMutex);
|
|
|
|
if (drain)
|
|
|
|
stopWhenEmpty = true;
|
|
|
|
else
|
|
|
|
stopRequested = true;
|
|
|
|
}
|
|
|
|
newTaskScheduled.notify_all();
|
2015-04-02 16:33:45 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void CScheduler::schedule(CScheduler::Function f, boost::chrono::system_clock::time_point t)
|
|
|
|
{
|
|
|
|
{
|
|
|
|
boost::unique_lock<boost::mutex> lock(newTaskMutex);
|
|
|
|
taskQueue.insert(std::make_pair(t, f));
|
|
|
|
}
|
|
|
|
newTaskScheduled.notify_one();
|
|
|
|
}
|
|
|
|
|
2017-03-07 11:00:46 +01:00
|
|
|
void CScheduler::scheduleFromNow(CScheduler::Function f, int64_t deltaMilliSeconds)
|
2015-04-02 16:33:45 +02:00
|
|
|
{
|
2017-03-07 11:00:46 +01:00
|
|
|
schedule(f, boost::chrono::system_clock::now() + boost::chrono::milliseconds(deltaMilliSeconds));
|
2015-04-02 16:33:45 +02:00
|
|
|
}
|
|
|
|
|
2017-03-07 11:00:46 +01:00
|
|
|
static void Repeat(CScheduler* s, CScheduler::Function f, int64_t deltaMilliSeconds)
|
2015-04-02 16:33:45 +02:00
|
|
|
{
|
|
|
|
f();
|
2017-03-07 11:00:46 +01:00
|
|
|
s->scheduleFromNow(boost::bind(&Repeat, s, f, deltaMilliSeconds), deltaMilliSeconds);
|
2015-04-02 16:33:45 +02:00
|
|
|
}
|
|
|
|
|
2017-03-07 11:00:46 +01:00
|
|
|
void CScheduler::scheduleEvery(CScheduler::Function f, int64_t deltaMilliSeconds)
|
2015-04-02 16:33:45 +02:00
|
|
|
{
|
2017-03-07 11:00:46 +01:00
|
|
|
scheduleFromNow(boost::bind(&Repeat, this, f, deltaMilliSeconds), deltaMilliSeconds);
|
2015-04-02 16:33:45 +02:00
|
|
|
}
|
2015-05-15 18:40:36 +02:00
|
|
|
|
|
|
|
size_t CScheduler::getQueueInfo(boost::chrono::system_clock::time_point &first,
|
|
|
|
boost::chrono::system_clock::time_point &last) const
|
|
|
|
{
|
|
|
|
boost::unique_lock<boost::mutex> lock(newTaskMutex);
|
|
|
|
size_t result = taskQueue.size();
|
|
|
|
if (!taskQueue.empty()) {
|
|
|
|
first = taskQueue.begin()->first;
|
|
|
|
last = taskQueue.rbegin()->first;
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
2017-07-11 09:30:36 +02:00
|
|
|
|
|
|
|
bool CScheduler::AreThreadsServicingQueue() const {
|
2017-07-26 08:15:06 +02:00
|
|
|
boost::unique_lock<boost::mutex> lock(newTaskMutex);
|
2017-07-11 09:30:36 +02:00
|
|
|
return nThreadsServicingQueue;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void SingleThreadedSchedulerClient::MaybeScheduleProcessQueue() {
|
|
|
|
{
|
|
|
|
LOCK(m_cs_callbacks_pending);
|
|
|
|
// Try to avoid scheduling too many copies here, but if we
|
|
|
|
// accidentally have two ProcessQueue's scheduled at once its
|
|
|
|
// not a big deal.
|
|
|
|
if (m_are_callbacks_running) return;
|
|
|
|
if (m_callbacks_pending.empty()) return;
|
|
|
|
}
|
|
|
|
m_pscheduler->schedule(std::bind(&SingleThreadedSchedulerClient::ProcessQueue, this));
|
|
|
|
}
|
|
|
|
|
|
|
|
void SingleThreadedSchedulerClient::ProcessQueue() {
|
|
|
|
std::function<void (void)> callback;
|
|
|
|
{
|
|
|
|
LOCK(m_cs_callbacks_pending);
|
|
|
|
if (m_are_callbacks_running) return;
|
|
|
|
if (m_callbacks_pending.empty()) return;
|
|
|
|
m_are_callbacks_running = true;
|
|
|
|
|
|
|
|
callback = std::move(m_callbacks_pending.front());
|
|
|
|
m_callbacks_pending.pop_front();
|
|
|
|
}
|
|
|
|
|
|
|
|
// RAII the setting of fCallbacksRunning and calling MaybeScheduleProcessQueue
|
|
|
|
// to ensure both happen safely even if callback() throws.
|
|
|
|
struct RAIICallbacksRunning {
|
|
|
|
SingleThreadedSchedulerClient* instance;
|
|
|
|
RAIICallbacksRunning(SingleThreadedSchedulerClient* _instance) : instance(_instance) {}
|
|
|
|
~RAIICallbacksRunning() {
|
|
|
|
{
|
|
|
|
LOCK(instance->m_cs_callbacks_pending);
|
|
|
|
instance->m_are_callbacks_running = false;
|
|
|
|
}
|
|
|
|
instance->MaybeScheduleProcessQueue();
|
|
|
|
}
|
|
|
|
} raiicallbacksrunning(this);
|
|
|
|
|
|
|
|
callback();
|
|
|
|
}
|
|
|
|
|
|
|
|
void SingleThreadedSchedulerClient::AddToProcessQueue(std::function<void (void)> func) {
|
|
|
|
assert(m_pscheduler);
|
|
|
|
|
|
|
|
{
|
|
|
|
LOCK(m_cs_callbacks_pending);
|
|
|
|
m_callbacks_pending.emplace_back(std::move(func));
|
|
|
|
}
|
|
|
|
MaybeScheduleProcessQueue();
|
|
|
|
}
|
|
|
|
|
|
|
|
void SingleThreadedSchedulerClient::EmptyQueue() {
|
|
|
|
assert(!m_pscheduler->AreThreadsServicingQueue());
|
|
|
|
bool should_continue = true;
|
|
|
|
while (should_continue) {
|
|
|
|
ProcessQueue();
|
|
|
|
LOCK(m_cs_callbacks_pending);
|
|
|
|
should_continue = !m_callbacks_pending.empty();
|
|
|
|
}
|
|
|
|
}
|