1
0
mirror of https://github.com/TomHarte/CLK.git synced 2025-01-20 21:30:59 +00:00

Reformat ClockReceiver.

This commit is contained in:
Thomas Harte 2024-11-29 22:12:57 -05:00
parent abfc73299e
commit 86fa8da8c5
12 changed files with 715 additions and 710 deletions

View File

@ -56,218 +56,225 @@
Boolean operators, but forcing callers and receivers to be explicit as to usage.
*/
template <class T> class WrappedInt {
public:
using IntType = int64_t;
public:
using IntType = int64_t;
forceinline constexpr WrappedInt(IntType l) noexcept : length_(l) {}
forceinline constexpr WrappedInt() noexcept : length_(0) {}
forceinline constexpr WrappedInt(IntType l) noexcept : length_(l) {}
forceinline constexpr WrappedInt() noexcept : length_(0) {}
forceinline T &operator =(const T &rhs) {
length_ = rhs.length_;
return *this;
}
forceinline T &operator =(const T &rhs) {
length_ = rhs.length_;
return *this;
}
forceinline T &operator +=(const T &rhs) {
length_ += rhs.length_;
return *static_cast<T *>(this);
}
forceinline T &operator +=(const T &rhs) {
length_ += rhs.length_;
return *static_cast<T *>(this);
}
forceinline T &operator -=(const T &rhs) {
length_ -= rhs.length_;
return *static_cast<T *>(this);
}
forceinline T &operator -=(const T &rhs) {
length_ -= rhs.length_;
return *static_cast<T *>(this);
}
forceinline T &operator ++() {
++ length_;
return *static_cast<T *>(this);
}
forceinline T &operator ++() {
++ length_;
return *static_cast<T *>(this);
}
forceinline T &operator ++(int) {
length_ ++;
return *static_cast<T *>(this);
}
forceinline T &operator ++(int) {
length_ ++;
return *static_cast<T *>(this);
}
forceinline T &operator --() {
-- length_;
return *static_cast<T *>(this);
}
forceinline T &operator --() {
-- length_;
return *static_cast<T *>(this);
}
forceinline T &operator --(int) {
length_ --;
return *static_cast<T *>(this);
}
forceinline T &operator --(int) {
length_ --;
return *static_cast<T *>(this);
}
forceinline T &operator *=(const T &rhs) {
length_ *= rhs.length_;
return *static_cast<T *>(this);
}
forceinline T &operator *=(const T &rhs) {
length_ *= rhs.length_;
return *static_cast<T *>(this);
}
forceinline T &operator /=(const T &rhs) {
length_ /= rhs.length_;
return *static_cast<T *>(this);
}
forceinline T &operator /=(const T &rhs) {
length_ /= rhs.length_;
return *static_cast<T *>(this);
}
forceinline T &operator %=(const T &rhs) {
length_ %= rhs.length_;
return *static_cast<T *>(this);
}
forceinline T &operator %=(const T &rhs) {
length_ %= rhs.length_;
return *static_cast<T *>(this);
}
forceinline T &operator &=(const T &rhs) {
length_ &= rhs.length_;
return *static_cast<T *>(this);
}
forceinline T &operator &=(const T &rhs) {
length_ &= rhs.length_;
return *static_cast<T *>(this);
}
forceinline constexpr T operator +(const T &rhs) const { return T(length_ + rhs.length_); }
forceinline constexpr T operator -(const T &rhs) const { return T(length_ - rhs.length_); }
forceinline constexpr T operator +(const T &rhs) const { return T(length_ + rhs.length_); }
forceinline constexpr T operator -(const T &rhs) const { return T(length_ - rhs.length_); }
forceinline constexpr T operator *(const T &rhs) const { return T(length_ * rhs.length_); }
forceinline constexpr T operator /(const T &rhs) const { return T(length_ / rhs.length_); }
forceinline constexpr T operator *(const T &rhs) const { return T(length_ * rhs.length_); }
forceinline constexpr T operator /(const T &rhs) const { return T(length_ / rhs.length_); }
forceinline constexpr T operator %(const T &rhs) const { return T(length_ % rhs.length_); }
forceinline constexpr T operator &(const T &rhs) const { return T(length_ & rhs.length_); }
forceinline constexpr T operator %(const T &rhs) const { return T(length_ % rhs.length_); }
forceinline constexpr T operator &(const T &rhs) const { return T(length_ & rhs.length_); }
forceinline constexpr T operator -() const { return T(- length_); }
forceinline constexpr T operator -() const { return T(- length_); }
forceinline constexpr bool operator <(const T &rhs) const { return length_ < rhs.length_; }
forceinline constexpr bool operator >(const T &rhs) const { return length_ > rhs.length_; }
forceinline constexpr bool operator <=(const T &rhs) const { return length_ <= rhs.length_; }
forceinline constexpr bool operator >=(const T &rhs) const { return length_ >= rhs.length_; }
forceinline constexpr bool operator ==(const T &rhs) const { return length_ == rhs.length_; }
forceinline constexpr bool operator !=(const T &rhs) const { return length_ != rhs.length_; }
forceinline constexpr bool operator <(const T &rhs) const { return length_ < rhs.length_; }
forceinline constexpr bool operator >(const T &rhs) const { return length_ > rhs.length_; }
forceinline constexpr bool operator <=(const T &rhs) const { return length_ <= rhs.length_; }
forceinline constexpr bool operator >=(const T &rhs) const { return length_ >= rhs.length_; }
forceinline constexpr bool operator ==(const T &rhs) const { return length_ == rhs.length_; }
forceinline constexpr bool operator !=(const T &rhs) const { return length_ != rhs.length_; }
forceinline constexpr bool operator !() const { return !length_; }
// bool operator () is not supported because it offers an implicit cast to int, which is prone silently to permit misuse
forceinline constexpr bool operator !() const { return !length_; }
// bool operator () is not supported because it offers an implicit cast to int,
// which is prone silently to permit misuse.
/// @returns The underlying int, converted to an integral type of your choosing, clamped to that int's range.
template<typename Type = IntType> forceinline constexpr Type as() const {
if constexpr (sizeof(Type) == sizeof(IntType)) {
if constexpr (std::is_same_v<Type, IntType>) {
return length_;
} else if constexpr (std::is_signed_v<Type>) {
// Both integers are the same size, but a signed result is being asked for
// from an unsigned original.
return length_ > Type(std::numeric_limits<Type>::max()) ? Type(std::numeric_limits<Type>::max()) : Type(length_);
} else {
// An unsigned result is being asked for from a signed original.
return length_ < 0 ? 0 : Type(length_);
}
/// @returns The underlying int, converted to an integral type of your choosing, clamped to that int's range.
template<typename Type = IntType> forceinline constexpr Type as() const {
if constexpr (sizeof(Type) == sizeof(IntType)) {
if constexpr (std::is_same_v<Type, IntType>) {
return length_;
} else if constexpr (std::is_signed_v<Type>) {
// Both integers are the same size, but a signed result is being asked for
// from an unsigned original.
return length_ > Type(std::numeric_limits<Type>::max()) ?
Type(std::numeric_limits<Type>::max()) : Type(length_);
} else {
// An unsigned result is being asked for from a signed original.
return length_ < 0 ? 0 : Type(length_);
}
const auto clamped = std::clamp(length_, IntType(std::numeric_limits<Type>::min()), IntType(std::numeric_limits<Type>::max()));
return Type(clamped);
}
/// @returns The underlying int, in its native form.
forceinline constexpr IntType as_integral() const { return length_; }
const auto clamped = std::clamp(
length_,
IntType(std::numeric_limits<Type>::min()),
IntType(std::numeric_limits<Type>::max())
);
return Type(clamped);
}
/*!
Severs from @c this the effect of dividing by @c divisor; @c this will end up with
the value of @c this modulo @c divisor and @c divided by @c divisor is returned.
*/
template <typename Result = T> forceinline Result divide(const T &divisor) {
Result r;
static_cast<T *>(this)->fill(r, divisor);
return r;
}
/// @returns The underlying int, in its native form.
forceinline constexpr IntType as_integral() const { return length_; }
/*!
Flushes the value in @c this. The current value is returned, and the internal value
is reset to zero.
*/
template <typename Result> Result flush() {
// Jiggery pokery here; switching to function overloading avoids
// the namespace-level requirement for template specialisation.
Result r;
static_cast<T *>(this)->fill(r);
return r;
}
/*!
Severs from @c this the effect of dividing by @c divisor; @c this will end up with
the value of @c this modulo @c divisor and @c divided by @c divisor is returned.
*/
template <typename Result = T> forceinline Result divide(const T &divisor) {
Result r;
static_cast<T *>(this)->fill(r, divisor);
return r;
}
// operator int() is deliberately not provided, to avoid accidental subtitution of
// classes that use this template.
/*!
Flushes the value in @c this. The current value is returned, and the internal value
is reset to zero.
*/
template <typename Result> Result flush() {
// Jiggery pokery here; switching to function overloading avoids
// the namespace-level requirement for template specialisation.
Result r;
static_cast<T *>(this)->fill(r);
return r;
}
protected:
IntType length_;
// operator int() is deliberately not provided, to avoid accidental subtitution of
// classes that use this template.
protected:
IntType length_;
};
/// Describes an integer number of whole cycles: pairs of clock signal transitions.
class Cycles: public WrappedInt<Cycles> {
public:
forceinline constexpr Cycles(IntType l) noexcept : WrappedInt<Cycles>(l) {}
forceinline constexpr Cycles() noexcept : WrappedInt<Cycles>() {}
forceinline static constexpr Cycles max() {
return Cycles(std::numeric_limits<IntType>::max());
}
public:
forceinline constexpr Cycles(IntType l) noexcept : WrappedInt<Cycles>(l) {}
forceinline constexpr Cycles() noexcept : WrappedInt<Cycles>() {}
forceinline static constexpr Cycles max() {
return Cycles(std::numeric_limits<IntType>::max());
}
private:
friend WrappedInt;
void fill(Cycles &result) {
result.length_ = length_;
length_ = 0;
}
private:
friend WrappedInt;
void fill(Cycles &result) {
result.length_ = length_;
length_ = 0;
}
void fill(Cycles &result, const Cycles &divisor) {
result.length_ = length_ / divisor.length_;
length_ %= divisor.length_;
}
void fill(Cycles &result, const Cycles &divisor) {
result.length_ = length_ / divisor.length_;
length_ %= divisor.length_;
}
};
/// Describes an integer number of half cycles: single clock signal transitions.
class HalfCycles: public WrappedInt<HalfCycles> {
public:
forceinline constexpr HalfCycles(IntType l) noexcept : WrappedInt<HalfCycles>(l) {}
forceinline constexpr HalfCycles() noexcept : WrappedInt<HalfCycles>() {}
forceinline static constexpr HalfCycles max() {
return HalfCycles(std::numeric_limits<IntType>::max());
}
public:
forceinline constexpr HalfCycles(IntType l) noexcept : WrappedInt<HalfCycles>(l) {}
forceinline constexpr HalfCycles() noexcept : WrappedInt<HalfCycles>() {}
forceinline static constexpr HalfCycles max() {
return HalfCycles(std::numeric_limits<IntType>::max());
}
forceinline constexpr HalfCycles(const Cycles &cycles) noexcept : WrappedInt<HalfCycles>(cycles.as_integral() * 2) {}
forceinline constexpr HalfCycles(const Cycles &cycles) noexcept :
WrappedInt<HalfCycles>(cycles.as_integral() * 2) {}
/// @returns The number of whole cycles completely covered by this span of half cycles.
forceinline constexpr Cycles cycles() const {
return Cycles(length_ >> 1);
}
/// @returns The number of whole cycles completely covered by this span of half cycles.
forceinline constexpr Cycles cycles() const {
return Cycles(length_ >> 1);
}
/*!
Severs from @c this the effect of dividing by @c divisor; @c this will end up with
the value of @c this modulo @c divisor . @c this divided by @c divisor is returned.
*/
forceinline Cycles divide_cycles(const Cycles &divisor) {
const HalfCycles half_divisor = HalfCycles(divisor);
const Cycles result(length_ / half_divisor.length_);
length_ %= half_divisor.length_;
return result;
}
/*!
Severs from @c this the effect of dividing by @c divisor; @c this will end up with
the value of @c this modulo @c divisor . @c this divided by @c divisor is returned.
*/
forceinline Cycles divide_cycles(const Cycles &divisor) {
const HalfCycles half_divisor = HalfCycles(divisor);
const Cycles result(length_ / half_divisor.length_);
length_ %= half_divisor.length_;
return result;
}
/*!
Equivalent to @c divide_cycles(Cycles(1)) but faster.
*/
forceinline Cycles divide_cycles() {
const Cycles result(length_ >> 1);
length_ &= 1;
return result;
}
/*!
Equivalent to @c divide_cycles(Cycles(1)) but faster.
*/
forceinline Cycles divide_cycles() {
const Cycles result(length_ >> 1);
length_ &= 1;
return result;
}
private:
friend WrappedInt;
void fill(Cycles &result) {
result = Cycles(length_ >> 1);
length_ &= 1;
}
private:
friend WrappedInt;
void fill(Cycles &result) {
result = Cycles(length_ >> 1);
length_ &= 1;
}
void fill(HalfCycles &result) {
result.length_ = length_;
length_ = 0;
}
void fill(HalfCycles &result) {
result.length_ = length_;
length_ = 0;
}
void fill(Cycles &result, const HalfCycles &divisor) {
result = Cycles(length_ / (divisor.length_ << 1));
length_ %= (divisor.length_ << 1);
}
void fill(Cycles &result, const HalfCycles &divisor) {
result = Cycles(length_ / (divisor.length_ << 1));
length_ %= (divisor.length_ << 1);
}
void fill(HalfCycles &result, const HalfCycles &divisor) {
result.length_ = length_ / divisor.length_;
length_ %= divisor.length_;
}
void fill(HalfCycles &result, const HalfCycles &divisor) {
result.length_ = length_ / divisor.length_;
length_ %= divisor.length_;
}
};
// Create a specialisation of WrappedInt::flush for converting HalfCycles to Cycles

View File

@ -58,28 +58,28 @@ struct Observer {
The hint provided is just that: a hint. Owners may perform ::run_for at a greater frequency.
*/
class Source {
public:
/// Registers @c observer as the new clocking observer.
void set_clocking_hint_observer(Observer *observer) {
observer_ = observer;
update_clocking_observer();
}
public:
/// Registers @c observer as the new clocking observer.
void set_clocking_hint_observer(Observer *observer) {
observer_ = observer;
update_clocking_observer();
}
/// @returns the current preferred clocking strategy.
virtual Preference preferred_clocking() const = 0;
/// @returns the current preferred clocking strategy.
virtual Preference preferred_clocking() const = 0;
private:
Observer *observer_ = nullptr;
private:
Observer *observer_ = nullptr;
protected:
/*!
Provided for subclasses; call this whenever the clocking preference might have changed.
This will notify the observer if there is one.
*/
void update_clocking_observer() {
if(!observer_) return;
observer_->set_component_prefers_clocking(this, preferred_clocking());
}
protected:
/*!
Provided for subclasses; call this whenever the clocking preference might have changed.
This will notify the observer if there is one.
*/
void update_clocking_observer() {
if(!observer_) return;
observer_->set_component_prefers_clocking(this, preferred_clocking());
}
};
}

View File

@ -15,78 +15,79 @@
Provides the logic to insert into and traverse a list of future scheduled items.
*/
template <typename TimeUnit> class DeferredQueue {
public:
/*!
Schedules @c action to occur in @c delay units of time.
*/
void defer(TimeUnit delay, const std::function<void(void)> &action) {
// Apply immediately if there's no delay (or a negative delay).
if(delay <= TimeUnit(0)) {
action();
return;
public:
/*!
Schedules @c action to occur in @c delay units of time.
*/
void defer(TimeUnit delay, const std::function<void(void)> &action) {
// Apply immediately if there's no delay (or a negative delay).
if(delay <= TimeUnit(0)) {
action();
return;
}
if(!pending_actions_.empty()) {
// Otherwise enqueue, having subtracted the delay for any preceding events,
// and subtracting from the subsequent, if any.
auto insertion_point = pending_actions_.begin();
while(insertion_point != pending_actions_.end() && insertion_point->delay < delay) {
delay -= insertion_point->delay;
++insertion_point;
}
if(insertion_point != pending_actions_.end()) {
insertion_point->delay -= delay;
}
if(!pending_actions_.empty()) {
// Otherwise enqueue, having subtracted the delay for any preceding events,
// and subtracting from the subsequent, if any.
auto insertion_point = pending_actions_.begin();
while(insertion_point != pending_actions_.end() && insertion_point->delay < delay) {
delay -= insertion_point->delay;
++insertion_point;
}
if(insertion_point != pending_actions_.end()) {
insertion_point->delay -= delay;
}
pending_actions_.emplace(insertion_point, delay, action);
} else {
pending_actions_.emplace_back(delay, action);
}
}
pending_actions_.emplace(insertion_point, delay, action);
/*!
@returns The amount of time until the next enqueued action will occur,
or TimeUnit(-1) if the queue is empty.
*/
TimeUnit time_until_next_action() const {
if(pending_actions_.empty()) return TimeUnit(-1);
return pending_actions_.front().delay;
}
/*!
Advances the queue the specified amount of time, performing any actions it reaches.
*/
void advance(TimeUnit time) {
auto erase_iterator = pending_actions_.begin();
while(erase_iterator != pending_actions_.end()) {
erase_iterator->delay -= time;
if(erase_iterator->delay <= TimeUnit(0)) {
time = -erase_iterator->delay;
erase_iterator->action();
++erase_iterator;
} else {
pending_actions_.emplace_back(delay, action);
break;
}
}
/*!
@returns The amount of time until the next enqueued action will occur,
or TimeUnit(-1) if the queue is empty.
*/
TimeUnit time_until_next_action() const {
if(pending_actions_.empty()) return TimeUnit(-1);
return pending_actions_.front().delay;
if(erase_iterator != pending_actions_.begin()) {
pending_actions_.erase(pending_actions_.begin(), erase_iterator);
}
}
/*!
Advances the queue the specified amount of time, performing any actions it reaches.
*/
void advance(TimeUnit time) {
auto erase_iterator = pending_actions_.begin();
while(erase_iterator != pending_actions_.end()) {
erase_iterator->delay -= time;
if(erase_iterator->delay <= TimeUnit(0)) {
time = -erase_iterator->delay;
erase_iterator->action();
++erase_iterator;
} else {
break;
}
}
if(erase_iterator != pending_actions_.begin()) {
pending_actions_.erase(pending_actions_.begin(), erase_iterator);
}
}
/// @returns @c true if no actions are enqueued; @c false otherwise.
bool empty() const {
return pending_actions_.empty();
}
/// @returns @c true if no actions are enqueued; @c false otherwise.
bool empty() const {
return pending_actions_.empty();
}
private:
// The list of deferred actions.
struct DeferredAction {
TimeUnit delay;
std::function<void(void)> action;
private:
// The list of deferred actions.
struct DeferredAction {
TimeUnit delay;
std::function<void(void)> action;
DeferredAction(TimeUnit delay, const std::function<void(void)> &action) : delay(delay), action(std::move(action)) {}
};
std::vector<DeferredAction> pending_actions_;
DeferredAction(TimeUnit delay, const std::function<void(void)> &action) :
delay(delay), action(std::move(action)) {}
};
std::vector<DeferredAction> pending_actions_;
};
/*!
@ -117,8 +118,6 @@ template <typename TimeUnit> class DeferredQueuePerformer: public DeferredQueue<
DeferredQueue<TimeUnit>::advance(length);
target_(length);
// TODO: optimise this to avoid the multiple std::vector deletes. Find a neat way to expose that solution, maybe?
}
private:

View File

@ -13,33 +13,33 @@
of future values.
*/
template <int DeferredDepth, typename ValueT> class DeferredValue {
private:
static_assert(sizeof(ValueT) <= 4);
private:
static_assert(sizeof(ValueT) <= 4);
constexpr int elements_per_uint32 = sizeof(uint32_t) / sizeof(ValueT);
constexpr int unit_shift = sizeof(ValueT) * 8;
constexpr int insert_shift = (DeferredDepth & (elements_per_uint32 - 1)) * unit_shift;
constexpr uint32_t insert_mask = ~(0xffff'ffff << insert_shift);
constexpr int elements_per_uint32 = sizeof(uint32_t) / sizeof(ValueT);
constexpr int unit_shift = sizeof(ValueT) * 8;
constexpr int insert_shift = (DeferredDepth & (elements_per_uint32 - 1)) * unit_shift;
constexpr uint32_t insert_mask = ~(0xffff'ffff << insert_shift);
std::array<uint32_t, (DeferredDepth + elements_per_uint32 - 1) / elements_per_uint32> backlog;
std::array<uint32_t, (DeferredDepth + elements_per_uint32 - 1) / elements_per_uint32> backlog;
public:
/// @returns the current value.
ValueT value() const {
return uint8_t(backlog[0]);
public:
/// @returns the current value.
ValueT value() const {
return uint8_t(backlog[0]);
}
/// Advances to the next enqueued value.
void advance() {
for(size_t c = 0; c < backlog.size() - 1; c--) {
backlog[c] = (backlog[c] >> unit_shift) | (backlog[c+1] << (32 - unit_shift));
}
backlog[backlog.size() - 1] >>= unit_shift;
}
/// Advances to the next enqueued value.
void advance() {
for(size_t c = 0; c < backlog.size() - 1; c--) {
backlog[c] = (backlog[c] >> unit_shift) | (backlog[c+1] << (32 - unit_shift));
}
backlog[backlog.size() - 1] >>= unit_shift;
}
/// Inserts a new value, replacing whatever is currently at the end of the queue.
void insert(ValueT value) {
backlog[DeferredDepth / elements_per_uint32] =
(backlog[DeferredDepth / elements_per_uint32] & insert_mask) | (value << insert_shift);
}
/// Inserts a new value, replacing whatever is currently at the end of the queue.
void insert(const ValueT value) {
backlog[DeferredDepth / elements_per_uint32] =
(backlog[DeferredDepth / elements_per_uint32] & insert_mask) | (value << insert_shift);
}
};

View File

@ -35,251 +35,254 @@
TODO: incorporate and codify AsyncJustInTimeActor.
*/
template <class T, class LocalTimeScale = HalfCycles, int multiplier = 1, int divider = 1> class JustInTimeActor:
public ClockingHint::Observer {
private:
/*!
A std::unique_ptr deleter which causes an update_sequence_point to occur on the actor supplied
to it at construction if it implements @c next_sequence_point(). Otherwise destruction is a no-op.
public ClockingHint::Observer
{
private:
/*!
A std::unique_ptr deleter which causes an update_sequence_point to occur on the actor supplied
to it at construction if it implements @c next_sequence_point(). Otherwise destruction is a no-op.
**Does not delete the object.**
This is used by the -> operators below, which provide a unique pointer to the enclosed object and
update their sequence points upon its destruction i.e. after the caller has made whatever call
or calls as were relevant to the enclosed object.
*/
class SequencePointAwareDeleter {
public:
explicit SequencePointAwareDeleter(JustInTimeActor<T, LocalTimeScale, multiplier, divider> *actor) noexcept
: actor_(actor) {}
forceinline void operator ()(const T *const) const {
if constexpr (has_sequence_points<T>::value) {
actor_->update_sequence_point();
}
}
private:
JustInTimeActor<T, LocalTimeScale, multiplier, divider> *const actor_;
};
// This block of SFINAE determines whether objects of type T accepts Cycles or HalfCycles.
using HalfRunFor = void (T::*const)(HalfCycles);
static uint8_t half_sig(...);
static uint16_t half_sig(HalfRunFor);
using TargetTimeScale =
std::conditional_t<
sizeof(half_sig(&T::run_for)) == sizeof(uint16_t),
HalfCycles,
Cycles>;
**Does not delete the object.**
This is used by the -> operators below, which provide a unique pointer to the enclosed object and
update their sequence points upon its destruction i.e. after the caller has made whatever call
or calls as were relevant to the enclosed object.
*/
class SequencePointAwareDeleter {
public:
/// Constructs a new JustInTimeActor using the same construction arguments as the included object.
template<typename... Args> JustInTimeActor(Args&&... args) : object_(std::forward<Args>(args)...) {
if constexpr (std::is_base_of<ClockingHint::Source, T>::value) {
object_.set_clocking_hint_observer(this);
}
}
/// Adds time to the actor.
///
/// @returns @c true if adding time caused a flush; @c false otherwise.
forceinline bool operator += (LocalTimeScale rhs) {
if constexpr (std::is_base_of<ClockingHint::Source, T>::value) {
if(clocking_preference_ == ClockingHint::Preference::None) {
return false;
}
}
if constexpr (multiplier != 1) {
time_since_update_ += rhs * multiplier;
} else {
time_since_update_ += rhs;
}
is_flushed_ = false;
if constexpr (std::is_base_of<ClockingHint::Source, T>::value) {
if (clocking_preference_ == ClockingHint::Preference::RealTime) {
flush();
return true;
}
}
explicit SequencePointAwareDeleter(
JustInTimeActor<T, LocalTimeScale, multiplier, divider> *const actor) noexcept
: actor_(actor) {}
forceinline void operator ()(const T *const) const {
if constexpr (has_sequence_points<T>::value) {
time_until_event_ -= rhs * multiplier;
if(time_until_event_ <= LocalTimeScale(0)) {
time_overrun_ = time_until_event_ / divider;
flush();
update_sequence_point();
return true;
}
}
return false;
}
/// Flushes all accumulated time and returns a pointer to the included object.
///
/// If this object provides sequence points, checks for changes to the next
/// sequence point upon deletion of the pointer.
[[nodiscard]] forceinline auto operator->() {
#ifndef NDEBUG
assert(!flush_concurrency_check_.test_and_set());
#endif
flush();
#ifndef NDEBUG
flush_concurrency_check_.clear();
#endif
return std::unique_ptr<T, SequencePointAwareDeleter>(&object_, SequencePointAwareDeleter(this));
}
/// Acts exactly as per the standard ->, but preserves constness.
///
/// Despite being const, this will flush the object and, if relevant, update the next sequence point.
[[nodiscard]] forceinline auto operator -> () const {
auto non_const_this = const_cast<JustInTimeActor<T, LocalTimeScale, multiplier, divider> *>(this);
#ifndef NDEBUG
assert(!non_const_this->flush_concurrency_check_.test_and_set());
#endif
non_const_this->flush();
#ifndef NDEBUG
non_const_this->flush_concurrency_check_.clear();
#endif
return std::unique_ptr<const T, SequencePointAwareDeleter>(&object_, SequencePointAwareDeleter(non_const_this));
}
/// @returns a pointer to the included object, without flushing time.
[[nodiscard]] forceinline T *last_valid() {
return &object_;
}
/// @returns a const pointer to the included object, without flushing time.
[[nodiscard]] forceinline const T *last_valid() const {
return &object_;
}
/// @returns the amount of time since the object was last flushed, in the target time scale.
[[nodiscard]] forceinline TargetTimeScale time_since_flush() const {
if constexpr (divider == 1) {
return time_since_update_;
}
return TargetTimeScale(time_since_update_.as_integral() / divider);
}
/// @returns the amount of time since the object was last flushed, plus the local time scale @c offset,
/// converted to the target time scale.
[[nodiscard]] forceinline TargetTimeScale time_since_flush(LocalTimeScale offset) const {
if constexpr (divider == 1) {
return time_since_update_ + offset;
}
return TargetTimeScale((time_since_update_ + offset).as_integral() / divider);
}
/// Flushes all accumulated time.
///
/// This does not affect this actor's record of when the next sequence point will occur.
forceinline void flush() {
if(!is_flushed_) {
did_flush_ = is_flushed_ = true;
if constexpr (divider == 1) {
const auto duration = time_since_update_.template flush<TargetTimeScale>();
object_.run_for(duration);
} else {
const auto duration = time_since_update_.template divide<TargetTimeScale>(LocalTimeScale(divider));
if(duration > TargetTimeScale(0))
object_.run_for(duration);
}
actor_->update_sequence_point();
}
}
/// Indicates whether a flush has occurred since the last call to did_flush().
[[nodiscard]] forceinline bool did_flush() {
const bool did_flush = did_flush_;
did_flush_ = false;
return did_flush;
}
private:
JustInTimeActor<T, LocalTimeScale, multiplier, divider> *const actor_;
};
/// @returns a number in the range [-max, 0] indicating the offset of the most recent sequence
/// point from the final time at the end of the += that triggered the sequence point.
[[nodiscard]] forceinline LocalTimeScale last_sequence_point_overrun() {
return time_overrun_;
}
// This block of SFINAE determines whether objects of type T accepts Cycles or HalfCycles.
using HalfRunFor = void (T::*const)(HalfCycles);
static uint8_t half_sig(...);
static uint16_t half_sig(HalfRunFor);
using TargetTimeScale =
std::conditional_t<
sizeof(half_sig(&T::run_for)) == sizeof(uint16_t),
HalfCycles,
Cycles>;
/// @returns the number of cycles until the next sequence-point-based flush, if the embedded object
/// supports sequence points; @c LocalTimeScale() otherwise.
[[nodiscard]] LocalTimeScale cycles_until_implicit_flush() const {
return time_until_event_ / divider;
public:
/// Constructs a new JustInTimeActor using the same construction arguments as the included object.
template<typename... Args> JustInTimeActor(Args&&... args) : object_(std::forward<Args>(args)...) {
if constexpr (std::is_base_of<ClockingHint::Source, T>::value) {
object_.set_clocking_hint_observer(this);
}
}
/// Indicates whether a sequence-point-caused flush will occur if the specified period is added.
[[nodiscard]] forceinline bool will_flush(LocalTimeScale rhs) const {
if constexpr (!has_sequence_points<T>::value) {
/// Adds time to the actor.
///
/// @returns @c true if adding time caused a flush; @c false otherwise.
forceinline bool operator += (LocalTimeScale rhs) {
if constexpr (std::is_base_of<ClockingHint::Source, T>::value) {
if(clocking_preference_ == ClockingHint::Preference::None) {
return false;
}
return rhs >= time_until_event_;
}
/// Indicates the amount of time, in the local time scale, until the first local slot that falls wholly
/// after @c duration, if that delay were to occur in @c offset units of time from now.
[[nodiscard]] forceinline LocalTimeScale back_map(TargetTimeScale duration, TargetTimeScale offset) const {
// A 1:1 mapping is easy.
if constexpr (multiplier == 1 && divider == 1) {
return duration;
if constexpr (multiplier != 1) {
time_since_update_ += rhs * multiplier;
} else {
time_since_update_ += rhs;
}
is_flushed_ = false;
if constexpr (std::is_base_of<ClockingHint::Source, T>::value) {
if (clocking_preference_ == ClockingHint::Preference::RealTime) {
flush();
return true;
}
// Work out when this query is placed, and the time to which it relates
const auto base = time_since_update_ + offset * divider;
const auto target = base + duration * divider;
// Figure out the number of whole input steps that is required to get
// past target, and subtract the number of whole input steps necessary
// to get to base.
const auto steps_to_base = base.as_integral() / multiplier;
const auto steps_to_target = (target.as_integral() + divider - 1) / multiplier;
return LocalTimeScale(steps_to_target - steps_to_base);
}
/// Updates this template's record of the next sequence point.
void update_sequence_point() {
if constexpr (has_sequence_points<T>::value) {
// Keep a fast path where no conversions will be applied; if conversions are
// going to be applied then do a direct max -> max translation rather than
// allowing the arithmetic to overflow.
if constexpr (divider == 1 && std::is_same_v<LocalTimeScale, TargetTimeScale>) {
time_until_event_ = object_.next_sequence_point();
if constexpr (has_sequence_points<T>::value) {
time_until_event_ -= rhs * multiplier;
if(time_until_event_ <= LocalTimeScale(0)) {
time_overrun_ = time_until_event_ / divider;
flush();
update_sequence_point();
return true;
}
}
return false;
}
/// Flushes all accumulated time and returns a pointer to the included object.
///
/// If this object provides sequence points, checks for changes to the next
/// sequence point upon deletion of the pointer.
[[nodiscard]] forceinline auto operator->() {
#ifndef NDEBUG
assert(!flush_concurrency_check_.test_and_set());
#endif
flush();
#ifndef NDEBUG
flush_concurrency_check_.clear();
#endif
return std::unique_ptr<T, SequencePointAwareDeleter>(&object_, SequencePointAwareDeleter(this));
}
/// Acts exactly as per the standard ->, but preserves constness.
///
/// Despite being const, this will flush the object and, if relevant, update the next sequence point.
[[nodiscard]] forceinline auto operator -> () const {
auto non_const_this = const_cast<JustInTimeActor<T, LocalTimeScale, multiplier, divider> *>(this);
#ifndef NDEBUG
assert(!non_const_this->flush_concurrency_check_.test_and_set());
#endif
non_const_this->flush();
#ifndef NDEBUG
non_const_this->flush_concurrency_check_.clear();
#endif
return std::unique_ptr<const T, SequencePointAwareDeleter>(&object_, SequencePointAwareDeleter(non_const_this));
}
/// @returns a pointer to the included object, without flushing time.
[[nodiscard]] forceinline T *last_valid() {
return &object_;
}
/// @returns a const pointer to the included object, without flushing time.
[[nodiscard]] forceinline const T *last_valid() const {
return &object_;
}
/// @returns the amount of time since the object was last flushed, in the target time scale.
[[nodiscard]] forceinline TargetTimeScale time_since_flush() const {
if constexpr (divider == 1) {
return time_since_update_;
}
return TargetTimeScale(time_since_update_.as_integral() / divider);
}
/// @returns the amount of time since the object was last flushed, plus the local time scale @c offset,
/// converted to the target time scale.
[[nodiscard]] forceinline TargetTimeScale time_since_flush(LocalTimeScale offset) const {
if constexpr (divider == 1) {
return time_since_update_ + offset;
}
return TargetTimeScale((time_since_update_ + offset).as_integral() / divider);
}
/// Flushes all accumulated time.
///
/// This does not affect this actor's record of when the next sequence point will occur.
forceinline void flush() {
if(!is_flushed_) {
did_flush_ = is_flushed_ = true;
if constexpr (divider == 1) {
const auto duration = time_since_update_.template flush<TargetTimeScale>();
object_.run_for(duration);
} else {
const auto duration = time_since_update_.template divide<TargetTimeScale>(LocalTimeScale(divider));
if(duration > TargetTimeScale(0))
object_.run_for(duration);
}
}
}
/// Indicates whether a flush has occurred since the last call to did_flush().
[[nodiscard]] forceinline bool did_flush() {
const bool did_flush = did_flush_;
did_flush_ = false;
return did_flush;
}
/// @returns a number in the range [-max, 0] indicating the offset of the most recent sequence
/// point from the final time at the end of the += that triggered the sequence point.
[[nodiscard]] forceinline LocalTimeScale last_sequence_point_overrun() {
return time_overrun_;
}
/// @returns the number of cycles until the next sequence-point-based flush, if the embedded object
/// supports sequence points; @c LocalTimeScale() otherwise.
[[nodiscard]] LocalTimeScale cycles_until_implicit_flush() const {
return time_until_event_ / divider;
}
/// Indicates whether a sequence-point-caused flush will occur if the specified period is added.
[[nodiscard]] forceinline bool will_flush(LocalTimeScale rhs) const {
if constexpr (!has_sequence_points<T>::value) {
return false;
}
return rhs >= time_until_event_;
}
/// Indicates the amount of time, in the local time scale, until the first local slot that falls wholly
/// after @c duration, if that delay were to occur in @c offset units of time from now.
[[nodiscard]] forceinline LocalTimeScale back_map(TargetTimeScale duration, TargetTimeScale offset) const {
// A 1:1 mapping is easy.
if constexpr (multiplier == 1 && divider == 1) {
return duration;
}
// Work out when this query is placed, and the time to which it relates
const auto base = time_since_update_ + offset * divider;
const auto target = base + duration * divider;
// Figure out the number of whole input steps that is required to get
// past target, and subtract the number of whole input steps necessary
// to get to base.
const auto steps_to_base = base.as_integral() / multiplier;
const auto steps_to_target = (target.as_integral() + divider - 1) / multiplier;
return LocalTimeScale(steps_to_target - steps_to_base);
}
/// Updates this template's record of the next sequence point.
void update_sequence_point() {
if constexpr (has_sequence_points<T>::value) {
// Keep a fast path where no conversions will be applied; if conversions are
// going to be applied then do a direct max -> max translation rather than
// allowing the arithmetic to overflow.
if constexpr (divider == 1 && std::is_same_v<LocalTimeScale, TargetTimeScale>) {
time_until_event_ = object_.next_sequence_point();
} else {
const auto time = object_.next_sequence_point();
if(time == TargetTimeScale::max()) {
time_until_event_ = LocalTimeScale::max();
} else {
const auto time = object_.next_sequence_point();
if(time == TargetTimeScale::max()) {
time_until_event_ = LocalTimeScale::max();
} else {
time_until_event_ = time * divider;
}
time_until_event_ = time * divider;
}
assert(time_until_event_ > LocalTimeScale(0));
}
assert(time_until_event_ > LocalTimeScale(0));
}
}
/// @returns A cached copy of the object's clocking preference.
ClockingHint::Preference clocking_preference() const {
return clocking_preference_;
}
/// @returns A cached copy of the object's clocking preference.
ClockingHint::Preference clocking_preference() const {
return clocking_preference_;
}
private:
T object_;
LocalTimeScale time_since_update_, time_until_event_, time_overrun_;
bool is_flushed_ = true;
bool did_flush_ = false;
private:
T object_;
LocalTimeScale time_since_update_, time_until_event_, time_overrun_;
bool is_flushed_ = true;
bool did_flush_ = false;
template <typename S, typename = void> struct has_sequence_points : std::false_type {};
template <typename S> struct has_sequence_points<S, decltype(void(std::declval<S &>().next_sequence_point()))> : std::true_type {};
template <typename S, typename = void> struct has_sequence_points : std::false_type {};
template <typename S>
struct has_sequence_points<S, decltype(void(std::declval<S &>().next_sequence_point()))> : std::true_type {};
ClockingHint::Preference clocking_preference_ = ClockingHint::Preference::JustInTime;
void set_component_prefers_clocking(ClockingHint::Source *, ClockingHint::Preference clocking) {
clocking_preference_ = clocking;
}
ClockingHint::Preference clocking_preference_ = ClockingHint::Preference::JustInTime;
void set_component_prefers_clocking(ClockingHint::Source *, ClockingHint::Preference clocking) {
clocking_preference_ = clocking;
}
#ifndef NDEBUG
std::atomic_flag flush_concurrency_check_{};
std::atomic_flag flush_concurrency_check_{};
#endif
};
@ -288,49 +291,50 @@ template <class T, class LocalTimeScale = HalfCycles, int multiplier = 1, int di
Any time the amount of accumulated time crosses a threshold provided at construction time,
the object will be updated on the AsyncTaskQueue.
*/
template <class T, class LocalTimeScale = HalfCycles, class TargetTimeScale = LocalTimeScale> class AsyncJustInTimeActor {
public:
/// Constructs a new AsyncJustInTimeActor using the same construction arguments as the included object.
template<typename... Args> AsyncJustInTimeActor(TargetTimeScale threshold, Args&&... args) :
object_(std::forward<Args>(args)...),
threshold_(threshold) {}
template <class T, class LocalTimeScale = HalfCycles, class TargetTimeScale = LocalTimeScale>
class AsyncJustInTimeActor {
public:
/// Constructs a new AsyncJustInTimeActor using the same construction arguments as the included object.
template<typename... Args> AsyncJustInTimeActor(TargetTimeScale threshold, Args&&... args) :
object_(std::forward<Args>(args)...),
threshold_(threshold) {}
/// Adds time to the actor.
inline void operator += (const LocalTimeScale &rhs) {
time_since_update_ += rhs;
if(time_since_update_ >= threshold_) {
time_since_update_ -= threshold_;
task_queue_.enqueue([this] () {
object_.run_for(threshold_);
});
}
is_flushed_ = false;
/// Adds time to the actor.
inline void operator += (const LocalTimeScale &rhs) {
time_since_update_ += rhs;
if(time_since_update_ >= threshold_) {
time_since_update_ -= threshold_;
task_queue_.enqueue([this] () {
object_.run_for(threshold_);
});
}
is_flushed_ = false;
}
/// Flushes all accumulated time and returns a pointer to the included object.
inline T *operator->() {
flush();
return &object_;
/// Flushes all accumulated time and returns a pointer to the included object.
inline T *operator->() {
flush();
return &object_;
}
/// Returns a pointer to the included object without flushing time.
inline T *last_valid() {
return &object_;
}
/// Flushes all accumulated time.
inline void flush() {
if(!is_flushed_) {
task_queue_.flush();
object_.run_for(time_since_update_.template flush<TargetTimeScale>());
is_flushed_ = true;
}
}
/// Returns a pointer to the included object without flushing time.
inline T *last_valid() {
return &object_;
}
/// Flushes all accumulated time.
inline void flush() {
if(!is_flushed_) {
task_queue_.flush();
object_.run_for(time_since_update_.template flush<TargetTimeScale>());
is_flushed_ = true;
}
}
private:
T object_;
LocalTimeScale time_since_update_;
TargetTimeScale threshold_;
bool is_flushed_ = true;
Concurrency::AsyncTaskQueue<true> task_queue_;
private:
T object_;
LocalTimeScale time_since_update_;
TargetTimeScale threshold_;
bool is_flushed_ = true;
Concurrency::AsyncTaskQueue<true> task_queue_;
};

View File

@ -21,65 +21,65 @@ namespace Time {
of time, to bring it into phase.
*/
class ScanSynchroniser {
public:
/*!
@returns @c true if the emulated machine can be synchronised with the host frame output based on its
current @c [scan]status and the host machine's @c frame_duration; @c false otherwise.
*/
bool can_synchronise(const Outputs::Display::ScanStatus &scan_status, double frame_duration) {
ratio_ = 1.0;
if(scan_status.field_duration_gradient < 0.00001) {
// Check out the machine's current frame time.
// If it's within 3% of a non-zero integer multiple of the
// display rate, mark this time window to be split over the sync.
ratio_ = (frame_duration * base_multiplier_) / scan_status.field_duration;
const double integer_ratio = round(ratio_);
if(integer_ratio > 0.0) {
ratio_ /= integer_ratio;
return ratio_ <= maximum_rate_adjustment && ratio_ >= 1.0 / maximum_rate_adjustment;
}
public:
/*!
@returns @c true if the emulated machine can be synchronised with the host frame output based on its
current @c [scan]status and the host machine's @c frame_duration; @c false otherwise.
*/
bool can_synchronise(const Outputs::Display::ScanStatus &scan_status, const double frame_duration) {
ratio_ = 1.0;
if(scan_status.field_duration_gradient < 0.00001) {
// Check out the machine's current frame time.
// If it's within 3% of a non-zero integer multiple of the
// display rate, mark this time window to be split over the sync.
ratio_ = (frame_duration * base_multiplier_) / scan_status.field_duration;
const double integer_ratio = round(ratio_);
if(integer_ratio > 0.0) {
ratio_ /= integer_ratio;
return ratio_ <= maximum_rate_adjustment && ratio_ >= 1.0 / maximum_rate_adjustment;
}
return false;
}
return false;
}
/*!
@returns The appropriate speed multiplier for the next frame based on the inputs previously supplied to @c can_synchronise.
Results are undefined if @c can_synchroise returned @c false.
*/
double next_speed_multiplier(const Outputs::Display::ScanStatus &scan_status) {
// The host versus emulated ratio is calculated based on the current perceived frame duration of the machine.
// Either that number is exactly correct or it's already the result of some sort of low-pass filter. So there's
// no benefit to second guessing it here — just take it to be correct.
//
// ... with one slight caveat, which is that it is desireable to adjust phase here, to align vertical sync points.
// So the set speed multiplier may be adjusted slightly to aim for that.
double speed_multiplier = 1.0 / (ratio_ / base_multiplier_);
if(scan_status.current_position > 0.0) {
if(scan_status.current_position < 0.5) speed_multiplier /= phase_adjustment_ratio;
else speed_multiplier *= phase_adjustment_ratio;
}
speed_multiplier_ = (speed_multiplier_ * 0.95) + (speed_multiplier * 0.05);
return speed_multiplier_ * base_multiplier_;
/*!
@returns The appropriate speed multiplier for the next frame based on the inputs previously supplied to @c can_synchronise.
Results are undefined if @c can_synchroise returned @c false.
*/
double next_speed_multiplier(const Outputs::Display::ScanStatus &scan_status) {
// The host versus emulated ratio is calculated based on the current perceived frame duration of the machine.
// Either that number is exactly correct or it's already the result of some sort of low-pass filter. So there's
// no benefit to second guessing it here — just take it to be correct.
//
// ... with one slight caveat, which is that it is desireable to adjust phase here, to align vertical sync points.
// So the set speed multiplier may be adjusted slightly to aim for that.
double speed_multiplier = 1.0 / (ratio_ / base_multiplier_);
if(scan_status.current_position > 0.0) {
if(scan_status.current_position < 0.5) speed_multiplier /= phase_adjustment_ratio;
else speed_multiplier *= phase_adjustment_ratio;
}
speed_multiplier_ = (speed_multiplier_ * 0.95) + (speed_multiplier * 0.05);
return speed_multiplier_ * base_multiplier_;
}
void set_base_speed_multiplier(double multiplier) {
base_multiplier_ = multiplier;
}
void set_base_speed_multiplier(const double multiplier) {
base_multiplier_ = multiplier;
}
double get_base_speed_multiplier() {
return base_multiplier_;
}
double get_base_speed_multiplier() const {
return base_multiplier_;
}
private:
static constexpr double maximum_rate_adjustment = 1.03;
static constexpr double phase_adjustment_ratio = 1.005;
private:
static constexpr double maximum_rate_adjustment = 1.03;
static constexpr double phase_adjustment_ratio = 1.005;
// Managed local state.
double speed_multiplier_ = 1.0;
double base_multiplier_ = 1.0;
// Managed local state.
double speed_multiplier_ = 1.0;
double base_multiplier_ = 1.0;
// Temporary storage to bridge the can_synchronise -> next_speed_multiplier gap.
double ratio_ = 1.0;
// Temporary storage to bridge the can_synchronise -> next_speed_multiplier gap.
double ratio_ = 1.0;
};
}

View File

@ -16,7 +16,9 @@ typedef double Seconds;
typedef int64_t Nanos;
inline Nanos nanos_now() {
return std::chrono::duration_cast<std::chrono::nanoseconds>(std::chrono::high_resolution_clock::now().time_since_epoch()).count();
return std::chrono::duration_cast<std::chrono::nanoseconds>(
std::chrono::high_resolution_clock::now().time_since_epoch()
).count();
}
inline Seconds seconds(Nanos nanos) {

View File

@ -21,132 +21,132 @@ namespace Time {
(iii) optionally, timer jitter; in order to suggest when you should next start drawing.
*/
class VSyncPredictor {
public:
/*!
Announces to the predictor that the work of producing an output frame has begun.
*/
void begin_redraw() {
redraw_begin_time_ = nanos_now();
public:
/*!
Announces to the predictor that the work of producing an output frame has begun.
*/
void begin_redraw() {
redraw_begin_time_ = nanos_now();
}
/*!
Announces to the predictor that the work of producing an output frame has ended;
the predictor will use the amount of time between each begin/end pair to modify
its expectations as to how long it takes to draw a frame.
*/
void end_redraw() {
redraw_period_.post(nanos_now() - redraw_begin_time_);
}
/*!
Informs the predictor that a block-on-vsync has just ended, i.e. that the moment this
machine calls retrace is now. The predictor uses these notifications to estimate output
frame rate.
*/
void announce_vsync() {
const auto now = nanos_now();
if(last_vsync_) {
last_vsync_ += frame_duration_;
vsync_jitter_.post(last_vsync_ - now);
last_vsync_ = (last_vsync_ + now) >> 1;
} else {
last_vsync_ = now;
}
}
/*!
Announces to the predictor that the work of producing an output frame has ended;
the predictor will use the amount of time between each begin/end pair to modify
its expectations as to how long it takes to draw a frame.
*/
void end_redraw() {
redraw_period_.post(nanos_now() - redraw_begin_time_);
}
/*!
Sets the frame rate for the target display.
*/
void set_frame_rate(float rate) {
frame_duration_ = Nanos(1'000'000'000.0f / rate);
}
/*!
Informs the predictor that a block-on-vsync has just ended, i.e. that the moment this
machine calls retrace is now. The predictor uses these notifications to estimate output
frame rate.
*/
void announce_vsync() {
const auto now = nanos_now();
/*!
@returns The time this class currently believes a whole frame occupies.
*/
Time::Nanos frame_duration() {
return frame_duration_;
}
if(last_vsync_) {
last_vsync_ += frame_duration_;
vsync_jitter_.post(last_vsync_ - now);
last_vsync_ = (last_vsync_ + now) >> 1;
} else {
last_vsync_ = now;
/*!
Adds a record of how much jitter was experienced in scheduling; these values will be
factored into the @c suggested_draw_time if supplied.
A positive number means the timer occurred late. A negative number means it occurred early.
*/
void add_timer_jitter(Time::Nanos jitter) {
timer_jitter_.post(jitter);
}
/*!
Announces to the vsync predictor that output is now paused. This ends frame period
calculations until the next announce_vsync() restarts frame-length counting.
*/
void pause() {
last_vsync_ = 0;
}
/*!
@return The time at which redrawing should begin, given the predicted frame period, how
long it appears to take to draw a frame and how much jitter there is in scheduling
(if those figures are being supplied).
*/
Nanos suggested_draw_time() {
const auto mean = redraw_period_.mean() + timer_jitter_.mean() + vsync_jitter_.mean();
const auto variance = redraw_period_.variance() + timer_jitter_.variance() + vsync_jitter_.variance();
// Permit three standard deviations from the mean, to cover 99.9% of cases.
const auto period = mean + Nanos(3.0f * sqrt(float(variance)));
return last_vsync_ + frame_duration_ - period;
}
private:
class VarianceCollector {
public:
VarianceCollector(Time::Nanos default_value) {
sum_ = default_value * 128;
for(int c = 0; c < 128; ++c) {
history_[c] = default_value;
}
}
}
/*!
Sets the frame rate for the target display.
*/
void set_frame_rate(float rate) {
frame_duration_ = Nanos(1'000'000'000.0f / rate);
}
void post(Time::Nanos value) {
sum_ -= history_[write_pointer_];
sum_ += value;
history_[write_pointer_] = value;
write_pointer_ = (write_pointer_ + 1) & 127;
}
/*!
@returns The time this class currently believes a whole frame occupies.
*/
Time::Nanos frame_duration() {
return frame_duration_;
}
Time::Nanos mean() {
return sum_ / 128;
}
/*!
Adds a record of how much jitter was experienced in scheduling; these values will be
factored into the @c suggested_draw_time if supplied.
A positive number means the timer occurred late. A negative number means it occurred early.
*/
void add_timer_jitter(Time::Nanos jitter) {
timer_jitter_.post(jitter);
}
/*!
Announces to the vsync predictor that output is now paused. This ends frame period
calculations until the next announce_vsync() restarts frame-length counting.
*/
void pause() {
last_vsync_ = 0;
}
/*!
@return The time at which redrawing should begin, given the predicted frame period, how
long it appears to take to draw a frame and how much jitter there is in scheduling
(if those figures are being supplied).
*/
Nanos suggested_draw_time() {
const auto mean = redraw_period_.mean() + timer_jitter_.mean() + vsync_jitter_.mean();
const auto variance = redraw_period_.variance() + timer_jitter_.variance() + vsync_jitter_.variance();
// Permit three standard deviations from the mean, to cover 99.9% of cases.
const auto period = mean + Nanos(3.0f * sqrt(float(variance)));
return last_vsync_ + frame_duration_ - period;
}
private:
class VarianceCollector {
public:
VarianceCollector(Time::Nanos default_value) {
sum_ = default_value * 128;
for(int c = 0; c < 128; ++c) {
history_[c] = default_value;
}
Time::Nanos variance() {
// I haven't yet come up with a better solution that calculating this
// in whole every time, given the way that the mean mutates.
Time::Nanos variance = 0;
for(int c = 0; c < 128; ++c) {
const auto difference = ((history_[c] * 128) - sum_) / 128;
variance += (difference * difference);
}
return variance / 128;
}
void post(Time::Nanos value) {
sum_ -= history_[write_pointer_];
sum_ += value;
history_[write_pointer_] = value;
write_pointer_ = (write_pointer_ + 1) & 127;
}
private:
Time::Nanos sum_;
Time::Nanos history_[128];
size_t write_pointer_ = 0;
};
Time::Nanos mean() {
return sum_ / 128;
}
Nanos redraw_begin_time_ = 0;
Nanos last_vsync_ = 0;
Nanos frame_duration_ = 1'000'000'000 / 60;
Time::Nanos variance() {
// I haven't yet come up with a better solution that calculating this
// in whole every time, given the way that the mean mutates.
Time::Nanos variance = 0;
for(int c = 0; c < 128; ++c) {
const auto difference = ((history_[c] * 128) - sum_) / 128;
variance += (difference * difference);
}
return variance / 128;
}
private:
Time::Nanos sum_;
Time::Nanos history_[128];
size_t write_pointer_ = 0;
};
Nanos redraw_begin_time_ = 0;
Nanos last_vsync_ = 0;
Nanos frame_duration_ = 1'000'000'000 / 60;
VarianceCollector vsync_jitter_{0};
VarianceCollector redraw_period_{1'000'000'000 / 60}; // A less convincing first guess.
VarianceCollector timer_jitter_{0}; // Seed at 0 in case this feature isn't used by the owner.
VarianceCollector vsync_jitter_{0};
VarianceCollector redraw_period_{1'000'000'000 / 60}; // A less convincing first guess.
VarianceCollector timer_jitter_{0}; // Seed at 0 in case this feature isn't used by the owner.
};
}

View File

@ -5455,7 +5455,7 @@
attributes = {
BuildIndependentTargetsInParallel = YES;
LastSwiftUpdateCheck = 0700;
LastUpgradeCheck = 1430;
LastUpgradeCheck = 1610;
ORGANIZATIONNAME = "Thomas Harte";
TargetAttributes = {
4B055A691FAE763F0060FFFF = {
@ -6908,6 +6908,7 @@
isa = XCBuildConfiguration;
buildSettings = {
ALWAYS_SEARCH_USER_PATHS = NO;
ASSETCATALOG_COMPILER_GENERATE_SWIFT_ASSET_SYMBOL_EXTENSIONS = YES;
CLANG_ANALYZER_LOCALIZABILITY_NONLOCALIZED = YES;
CLANG_CXX_LANGUAGE_STANDARD = "c++17";
CLANG_ENABLE_MODULES = YES;
@ -6968,6 +6969,7 @@
isa = XCBuildConfiguration;
buildSettings = {
ALWAYS_SEARCH_USER_PATHS = NO;
ASSETCATALOG_COMPILER_GENERATE_SWIFT_ASSET_SYMBOL_EXTENSIONS = YES;
CLANG_ANALYZER_LOCALIZABILITY_NONLOCALIZED = YES;
CLANG_CXX_LANGUAGE_STANDARD = "c++17";
CLANG_ENABLE_MODULES = YES;

View File

@ -1,6 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<Scheme
LastUpgradeVersion = "1400"
LastUpgradeVersion = "1610"
version = "1.3">
<BuildAction
parallelizeBuildables = "YES"

View File

@ -1,7 +1,7 @@
<?xml version="1.0" encoding="UTF-8"?>
<Scheme
LastUpgradeVersion = "1400"
version = "1.3">
LastUpgradeVersion = "1610"
version = "1.8">
<BuildAction
parallelizeBuildables = "YES"
buildImplicitDependencies = "YES">

View File

@ -1,6 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<Scheme
LastUpgradeVersion = "1400"
LastUpgradeVersion = "1610"
version = "1.3">
<BuildAction
parallelizeBuildables = "YES"
@ -51,15 +51,6 @@
savedToolIdentifier = ""
useCustomWorkingDirectory = "NO"
debugDocumentVersioning = "YES">
<MacroExpansion>
<BuildableReference
BuildableIdentifier = "primary"
BlueprintIdentifier = "4BB73E9D1B587A5100552FC2"
BuildableName = "Clock Signal.app"
BlueprintName = "Clock Signal"
ReferencedContainer = "container:Clock Signal.xcodeproj">
</BuildableReference>
</MacroExpansion>
</ProfileAction>
<AnalyzeAction
buildConfiguration = "Debug">