2017-10-05 22:09:58 +00:00
|
|
|
//
|
|
|
|
// BestEffortUpdater.cpp
|
|
|
|
// Clock Signal
|
|
|
|
//
|
|
|
|
// Created by Thomas Harte on 04/10/2017.
|
2018-05-13 19:19:52 +00:00
|
|
|
// Copyright 2017 Thomas Harte. All rights reserved.
|
2017-10-05 22:09:58 +00:00
|
|
|
//
|
|
|
|
|
|
|
|
#include "BestEffortUpdater.hpp"
|
|
|
|
|
|
|
|
#include <cmath>
|
|
|
|
|
|
|
|
using namespace Concurrency;
|
|
|
|
|
2017-11-14 03:51:42 +00:00
|
|
|
BestEffortUpdater::BestEffortUpdater() {
|
|
|
|
// ATOMIC_FLAG_INIT isn't necessarily safe to use, so establish default state by other means.
|
|
|
|
update_is_ongoing_.clear();
|
|
|
|
}
|
|
|
|
|
2018-03-01 03:15:22 +00:00
|
|
|
BestEffortUpdater::~BestEffortUpdater() {
|
|
|
|
// Don't allow further deconstruction until the task queue is stopped.
|
|
|
|
flush();
|
|
|
|
}
|
|
|
|
|
2017-10-05 22:09:58 +00:00
|
|
|
void BestEffortUpdater::update() {
|
2017-10-05 22:12:33 +00:00
|
|
|
// Perform an update only if one is not currently ongoing.
|
2017-10-05 22:09:58 +00:00
|
|
|
if(!update_is_ongoing_.test_and_set()) {
|
|
|
|
async_task_queue_.enqueue([this]() {
|
2017-10-05 22:12:33 +00:00
|
|
|
// Get time now using the highest-resolution clock provided by the implementation, and determine
|
|
|
|
// the duration since the last time this section was entered.
|
2017-10-05 22:23:56 +00:00
|
|
|
const std::chrono::time_point<std::chrono::high_resolution_clock> now = std::chrono::high_resolution_clock::now();
|
|
|
|
const auto elapsed = now - previous_time_point_;
|
2017-10-05 22:09:58 +00:00
|
|
|
previous_time_point_ = now;
|
|
|
|
|
|
|
|
if(has_previous_time_point_) {
|
2017-10-05 22:12:33 +00:00
|
|
|
// If the duration is valid, convert it to integer cycles, maintaining a rolling error and call the delegate
|
2018-05-13 19:34:31 +00:00
|
|
|
// if there is one. Proceed only if the number of cycles is positive, and cap it to the per-second maximum as
|
2017-10-05 22:23:56 +00:00
|
|
|
// it's possible this is an adjustable clock so be ready to swallow unexpected adjustments.
|
2018-03-22 02:18:13 +00:00
|
|
|
const int64_t integer_duration = std::chrono::duration_cast<std::chrono::nanoseconds>(elapsed).count();
|
|
|
|
if(integer_duration > 0) {
|
2017-10-05 22:23:56 +00:00
|
|
|
if(delegate_) {
|
2018-05-17 01:42:05 +00:00
|
|
|
// Cap running at 1/5th of a second, to avoid doing a huge amount of work after any
|
|
|
|
// brief system interruption.
|
|
|
|
const double duration = std::min(static_cast<double>(integer_duration) / 1e9, 0.2);
|
2018-03-22 02:18:13 +00:00
|
|
|
delegate_->update(this, duration, has_skipped_);
|
2017-10-05 22:23:56 +00:00
|
|
|
}
|
|
|
|
has_skipped_ = false;
|
2017-10-05 22:09:58 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
has_previous_time_point_ = true;
|
|
|
|
}
|
|
|
|
|
2017-10-05 22:12:33 +00:00
|
|
|
// Allow furthers updates to occur.
|
2017-10-05 22:09:58 +00:00
|
|
|
update_is_ongoing_.clear();
|
|
|
|
});
|
|
|
|
} else {
|
|
|
|
async_task_queue_.enqueue([this]() {
|
|
|
|
has_skipped_ = true;
|
|
|
|
});
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void BestEffortUpdater::flush() {
|
|
|
|
async_task_queue_.flush();
|
|
|
|
}
|
|
|
|
|
2017-10-05 22:23:56 +00:00
|
|
|
void BestEffortUpdater::set_delegate(Delegate *const delegate) {
|
2017-10-05 22:09:58 +00:00
|
|
|
async_task_queue_.enqueue([this, delegate]() {
|
|
|
|
delegate_ = delegate;
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|