1
0
mirror of https://github.com/TomHarte/CLK.git synced 2024-12-26 09:29:45 +00:00
CLK/Concurrency/BestEffortUpdater.cpp
2018-05-16 21:42:05 -04:00

73 lines
2.3 KiB
C++

//
// BestEffortUpdater.cpp
// Clock Signal
//
// Created by Thomas Harte on 04/10/2017.
// Copyright 2017 Thomas Harte. All rights reserved.
//
#include "BestEffortUpdater.hpp"
#include <cmath>
using namespace Concurrency;
BestEffortUpdater::BestEffortUpdater() {
// ATOMIC_FLAG_INIT isn't necessarily safe to use, so establish default state by other means.
update_is_ongoing_.clear();
}
BestEffortUpdater::~BestEffortUpdater() {
// Don't allow further deconstruction until the task queue is stopped.
flush();
}
void BestEffortUpdater::update() {
// Perform an update only if one is not currently ongoing.
if(!update_is_ongoing_.test_and_set()) {
async_task_queue_.enqueue([this]() {
// Get time now using the highest-resolution clock provided by the implementation, and determine
// the duration since the last time this section was entered.
const std::chrono::time_point<std::chrono::high_resolution_clock> now = std::chrono::high_resolution_clock::now();
const auto elapsed = now - previous_time_point_;
previous_time_point_ = now;
if(has_previous_time_point_) {
// If the duration is valid, convert it to integer cycles, maintaining a rolling error and call the delegate
// if there is one. Proceed only if the number of cycles is positive, and cap it to the per-second maximum as
// it's possible this is an adjustable clock so be ready to swallow unexpected adjustments.
const int64_t integer_duration = std::chrono::duration_cast<std::chrono::nanoseconds>(elapsed).count();
if(integer_duration > 0) {
if(delegate_) {
// Cap running at 1/5th of a second, to avoid doing a huge amount of work after any
// brief system interruption.
const double duration = std::min(static_cast<double>(integer_duration) / 1e9, 0.2);
delegate_->update(this, duration, has_skipped_);
}
has_skipped_ = false;
}
} else {
has_previous_time_point_ = true;
}
// Allow furthers updates to occur.
update_is_ongoing_.clear();
});
} else {
async_task_queue_.enqueue([this]() {
has_skipped_ = true;
});
}
}
void BestEffortUpdater::flush() {
async_task_queue_.flush();
}
void BestEffortUpdater::set_delegate(Delegate *const delegate) {
async_task_queue_.enqueue([this, delegate]() {
delegate_ = delegate;
});
}