2017-10-05 22:09:58 +00:00
|
|
|
//
|
|
|
|
// BestEffortUpdater.cpp
|
|
|
|
// Clock Signal
|
|
|
|
//
|
|
|
|
// Created by Thomas Harte on 04/10/2017.
|
|
|
|
// Copyright © 2017 Thomas Harte. All rights reserved.
|
|
|
|
//
|
|
|
|
|
|
|
|
#include "BestEffortUpdater.hpp"
|
|
|
|
|
|
|
|
#include <cmath>
|
|
|
|
|
|
|
|
using namespace Concurrency;
|
|
|
|
|
2017-11-14 03:51:42 +00:00
|
|
|
BestEffortUpdater::BestEffortUpdater() {
|
|
|
|
// ATOMIC_FLAG_INIT isn't necessarily safe to use, so establish default state by other means.
|
|
|
|
update_is_ongoing_.clear();
|
|
|
|
}
|
|
|
|
|
2017-10-05 22:09:58 +00:00
|
|
|
void BestEffortUpdater::update() {
|
2017-10-05 22:12:33 +00:00
|
|
|
// Perform an update only if one is not currently ongoing.
|
2017-10-05 22:09:58 +00:00
|
|
|
if(!update_is_ongoing_.test_and_set()) {
|
|
|
|
async_task_queue_.enqueue([this]() {
|
2017-10-05 22:12:33 +00:00
|
|
|
// Get time now using the highest-resolution clock provided by the implementation, and determine
|
|
|
|
// the duration since the last time this section was entered.
|
2017-10-05 22:23:56 +00:00
|
|
|
const std::chrono::time_point<std::chrono::high_resolution_clock> now = std::chrono::high_resolution_clock::now();
|
|
|
|
const auto elapsed = now - previous_time_point_;
|
2017-10-05 22:09:58 +00:00
|
|
|
previous_time_point_ = now;
|
|
|
|
|
|
|
|
if(has_previous_time_point_) {
|
2017-10-05 22:12:33 +00:00
|
|
|
// If the duration is valid, convert it to integer cycles, maintaining a rolling error and call the delegate
|
2017-10-05 22:23:56 +00:00
|
|
|
// if there is one. Proceed only if the number of cycles is positive, and cap it to the per-second maximum —
|
|
|
|
// it's possible this is an adjustable clock so be ready to swallow unexpected adjustments.
|
|
|
|
const int64_t duration = std::chrono::duration_cast<std::chrono::nanoseconds>(elapsed).count();
|
|
|
|
if(duration > 0) {
|
|
|
|
double cycles = ((static_cast<double>(duration) * clock_rate_) / 1e9) + error_;
|
|
|
|
error_ = fmod(cycles, 1.0);
|
2017-10-05 22:09:58 +00:00
|
|
|
|
2017-10-05 22:23:56 +00:00
|
|
|
if(delegate_) {
|
|
|
|
delegate_->update(this, static_cast<int>(std::min(cycles, clock_rate_)), has_skipped_);
|
|
|
|
}
|
|
|
|
has_skipped_ = false;
|
2017-10-05 22:09:58 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
has_previous_time_point_ = true;
|
|
|
|
}
|
|
|
|
|
2017-10-05 22:12:33 +00:00
|
|
|
// Allow furthers updates to occur.
|
2017-10-05 22:09:58 +00:00
|
|
|
update_is_ongoing_.clear();
|
|
|
|
});
|
|
|
|
} else {
|
|
|
|
async_task_queue_.enqueue([this]() {
|
|
|
|
has_skipped_ = true;
|
|
|
|
});
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void BestEffortUpdater::flush() {
|
|
|
|
async_task_queue_.flush();
|
|
|
|
}
|
|
|
|
|
2017-10-05 22:23:56 +00:00
|
|
|
void BestEffortUpdater::set_delegate(Delegate *const delegate) {
|
2017-10-05 22:09:58 +00:00
|
|
|
async_task_queue_.enqueue([this, delegate]() {
|
|
|
|
delegate_ = delegate;
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2017-10-05 22:23:56 +00:00
|
|
|
void BestEffortUpdater::set_clock_rate(const double clock_rate) {
|
2017-10-05 22:09:58 +00:00
|
|
|
async_task_queue_.enqueue([this, clock_rate]() {
|
|
|
|
this->clock_rate_ = clock_rate;
|
|
|
|
});
|
|
|
|
}
|