2017-10-05 22:09:58 +00:00
|
|
|
//
|
|
|
|
// BestEffortUpdater.hpp
|
|
|
|
// Clock Signal
|
|
|
|
//
|
|
|
|
// Created by Thomas Harte on 04/10/2017.
|
2018-05-13 19:19:52 +00:00
|
|
|
// Copyright 2017 Thomas Harte. All rights reserved.
|
2017-10-05 22:09:58 +00:00
|
|
|
//
|
|
|
|
|
|
|
|
#ifndef BestEffortUpdater_hpp
|
|
|
|
#define BestEffortUpdater_hpp
|
|
|
|
|
|
|
|
#include <atomic>
|
|
|
|
#include <chrono>
|
2020-01-20 18:38:46 +00:00
|
|
|
#include <condition_variable>
|
|
|
|
#include <mutex>
|
|
|
|
#include <thread>
|
2017-10-05 22:09:58 +00:00
|
|
|
|
2018-03-23 22:05:51 +00:00
|
|
|
#include "../ClockReceiver/TimeTypes.hpp"
|
2017-10-05 22:09:58 +00:00
|
|
|
|
|
|
|
namespace Concurrency {
|
|
|
|
|
|
|
|
/*!
|
|
|
|
Accepts timing cues from multiple threads and ensures that a delegate receives calls to total
|
|
|
|
a certain number of cycles per second, that those calls are strictly serialised, and that no
|
|
|
|
backlog of calls accrues.
|
2017-10-05 22:12:33 +00:00
|
|
|
|
|
|
|
No guarantees about the thread that the delegate will be called on are made.
|
2017-10-05 22:09:58 +00:00
|
|
|
*/
|
|
|
|
class BestEffortUpdater {
|
|
|
|
public:
|
2017-11-14 03:51:42 +00:00
|
|
|
BestEffortUpdater();
|
2018-03-01 03:15:22 +00:00
|
|
|
~BestEffortUpdater();
|
2017-11-14 03:51:42 +00:00
|
|
|
|
2017-10-05 22:09:58 +00:00
|
|
|
/// A delegate receives timing cues.
|
|
|
|
struct Delegate {
|
2020-01-20 22:38:25 +00:00
|
|
|
/*!
|
|
|
|
Instructs the delegate to run for at least @c duration, providing hints as to whether multiple updates were requested before the previous had completed
|
|
|
|
(as @c did_skip_previous_update) and providing the union of any flags supplied to @c update.
|
|
|
|
|
|
|
|
@returns The amount of time actually run for.
|
|
|
|
*/
|
|
|
|
virtual Time::Seconds update(BestEffortUpdater *updater, Time::Seconds duration, bool did_skip_previous_update, int flags) = 0;
|
2017-10-05 22:09:58 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/// Sets the current delegate.
|
|
|
|
void set_delegate(Delegate *);
|
|
|
|
|
|
|
|
/*!
|
|
|
|
If the delegate is not currently in the process of an `update` call, calls it now to catch up to the current time.
|
|
|
|
The call is asynchronous; this method will return immediately.
|
|
|
|
*/
|
2020-01-20 22:09:01 +00:00
|
|
|
void update(int flags = 0);
|
2017-10-05 22:09:58 +00:00
|
|
|
|
2020-01-20 18:38:46 +00:00
|
|
|
/// Blocks until any ongoing update is complete; may spin.
|
2017-10-05 22:09:58 +00:00
|
|
|
void flush();
|
|
|
|
|
|
|
|
private:
|
2020-01-20 18:38:46 +00:00
|
|
|
std::atomic<bool> should_quit_;
|
|
|
|
std::atomic<bool> is_updating_;
|
|
|
|
|
2020-01-20 22:38:25 +00:00
|
|
|
int64_t target_time_;
|
2020-01-20 22:09:01 +00:00
|
|
|
int flags_ = 0;
|
2020-01-20 18:38:46 +00:00
|
|
|
bool update_requested_;
|
|
|
|
std::mutex update_mutex_;
|
|
|
|
std::condition_variable update_condition_;
|
2017-10-05 22:09:58 +00:00
|
|
|
|
2020-01-20 22:38:25 +00:00
|
|
|
decltype(target_time_) previous_time_point_;
|
2017-10-05 22:09:58 +00:00
|
|
|
bool has_previous_time_point_ = false;
|
2020-01-20 18:38:46 +00:00
|
|
|
std::atomic<bool> has_skipped_ = false;
|
|
|
|
|
|
|
|
std::atomic<Delegate *>delegate_ = nullptr;
|
|
|
|
|
|
|
|
void update_loop();
|
2017-10-05 22:09:58 +00:00
|
|
|
|
2020-01-20 18:38:46 +00:00
|
|
|
// This is deliberately at the bottom, to ensure it constructs after the various
|
|
|
|
// mutexs, conditions, etc, that it'll depend upon.
|
|
|
|
std::thread update_thread_;
|
2017-10-05 22:09:58 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* BestEffortUpdater_hpp */
|