mirror of
https://github.com/TomHarte/CLK.git
synced 2025-08-07 23:25:00 +00:00
Reformat ClockReceiver.
This commit is contained in:
@@ -56,218 +56,225 @@
|
|||||||
Boolean operators, but forcing callers and receivers to be explicit as to usage.
|
Boolean operators, but forcing callers and receivers to be explicit as to usage.
|
||||||
*/
|
*/
|
||||||
template <class T> class WrappedInt {
|
template <class T> class WrappedInt {
|
||||||
public:
|
public:
|
||||||
using IntType = int64_t;
|
using IntType = int64_t;
|
||||||
|
|
||||||
forceinline constexpr WrappedInt(IntType l) noexcept : length_(l) {}
|
forceinline constexpr WrappedInt(IntType l) noexcept : length_(l) {}
|
||||||
forceinline constexpr WrappedInt() noexcept : length_(0) {}
|
forceinline constexpr WrappedInt() noexcept : length_(0) {}
|
||||||
|
|
||||||
forceinline T &operator =(const T &rhs) {
|
forceinline T &operator =(const T &rhs) {
|
||||||
length_ = rhs.length_;
|
length_ = rhs.length_;
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
|
|
||||||
forceinline T &operator +=(const T &rhs) {
|
forceinline T &operator +=(const T &rhs) {
|
||||||
length_ += rhs.length_;
|
length_ += rhs.length_;
|
||||||
return *static_cast<T *>(this);
|
return *static_cast<T *>(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
forceinline T &operator -=(const T &rhs) {
|
forceinline T &operator -=(const T &rhs) {
|
||||||
length_ -= rhs.length_;
|
length_ -= rhs.length_;
|
||||||
return *static_cast<T *>(this);
|
return *static_cast<T *>(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
forceinline T &operator ++() {
|
forceinline T &operator ++() {
|
||||||
++ length_;
|
++ length_;
|
||||||
return *static_cast<T *>(this);
|
return *static_cast<T *>(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
forceinline T &operator ++(int) {
|
forceinline T &operator ++(int) {
|
||||||
length_ ++;
|
length_ ++;
|
||||||
return *static_cast<T *>(this);
|
return *static_cast<T *>(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
forceinline T &operator --() {
|
forceinline T &operator --() {
|
||||||
-- length_;
|
-- length_;
|
||||||
return *static_cast<T *>(this);
|
return *static_cast<T *>(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
forceinline T &operator --(int) {
|
forceinline T &operator --(int) {
|
||||||
length_ --;
|
length_ --;
|
||||||
return *static_cast<T *>(this);
|
return *static_cast<T *>(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
forceinline T &operator *=(const T &rhs) {
|
forceinline T &operator *=(const T &rhs) {
|
||||||
length_ *= rhs.length_;
|
length_ *= rhs.length_;
|
||||||
return *static_cast<T *>(this);
|
return *static_cast<T *>(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
forceinline T &operator /=(const T &rhs) {
|
forceinline T &operator /=(const T &rhs) {
|
||||||
length_ /= rhs.length_;
|
length_ /= rhs.length_;
|
||||||
return *static_cast<T *>(this);
|
return *static_cast<T *>(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
forceinline T &operator %=(const T &rhs) {
|
forceinline T &operator %=(const T &rhs) {
|
||||||
length_ %= rhs.length_;
|
length_ %= rhs.length_;
|
||||||
return *static_cast<T *>(this);
|
return *static_cast<T *>(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
forceinline T &operator &=(const T &rhs) {
|
forceinline T &operator &=(const T &rhs) {
|
||||||
length_ &= rhs.length_;
|
length_ &= rhs.length_;
|
||||||
return *static_cast<T *>(this);
|
return *static_cast<T *>(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
forceinline constexpr T operator +(const T &rhs) const { return T(length_ + rhs.length_); }
|
forceinline constexpr T operator +(const T &rhs) const { return T(length_ + rhs.length_); }
|
||||||
forceinline constexpr T operator -(const T &rhs) const { return T(length_ - rhs.length_); }
|
forceinline constexpr T operator -(const T &rhs) const { return T(length_ - rhs.length_); }
|
||||||
|
|
||||||
forceinline constexpr T operator *(const T &rhs) const { return T(length_ * rhs.length_); }
|
forceinline constexpr T operator *(const T &rhs) const { return T(length_ * rhs.length_); }
|
||||||
forceinline constexpr T operator /(const T &rhs) const { return T(length_ / rhs.length_); }
|
forceinline constexpr T operator /(const T &rhs) const { return T(length_ / rhs.length_); }
|
||||||
|
|
||||||
forceinline constexpr T operator %(const T &rhs) const { return T(length_ % rhs.length_); }
|
forceinline constexpr T operator %(const T &rhs) const { return T(length_ % rhs.length_); }
|
||||||
forceinline constexpr T operator &(const T &rhs) const { return T(length_ & rhs.length_); }
|
forceinline constexpr T operator &(const T &rhs) const { return T(length_ & rhs.length_); }
|
||||||
|
|
||||||
forceinline constexpr T operator -() const { return T(- length_); }
|
forceinline constexpr T operator -() const { return T(- length_); }
|
||||||
|
|
||||||
forceinline constexpr bool operator <(const T &rhs) const { return length_ < rhs.length_; }
|
forceinline constexpr bool operator <(const T &rhs) const { return length_ < rhs.length_; }
|
||||||
forceinline constexpr bool operator >(const T &rhs) const { return length_ > rhs.length_; }
|
forceinline constexpr bool operator >(const T &rhs) const { return length_ > rhs.length_; }
|
||||||
forceinline constexpr bool operator <=(const T &rhs) const { return length_ <= rhs.length_; }
|
forceinline constexpr bool operator <=(const T &rhs) const { return length_ <= rhs.length_; }
|
||||||
forceinline constexpr bool operator >=(const T &rhs) const { return length_ >= rhs.length_; }
|
forceinline constexpr bool operator >=(const T &rhs) const { return length_ >= rhs.length_; }
|
||||||
forceinline constexpr bool operator ==(const T &rhs) const { return length_ == rhs.length_; }
|
forceinline constexpr bool operator ==(const T &rhs) const { return length_ == rhs.length_; }
|
||||||
forceinline constexpr bool operator !=(const T &rhs) const { return length_ != rhs.length_; }
|
forceinline constexpr bool operator !=(const T &rhs) const { return length_ != rhs.length_; }
|
||||||
|
|
||||||
forceinline constexpr bool operator !() const { return !length_; }
|
forceinline constexpr bool operator !() const { return !length_; }
|
||||||
// bool operator () is not supported because it offers an implicit cast to int, which is prone silently to permit misuse
|
// bool operator () is not supported because it offers an implicit cast to int,
|
||||||
|
// which is prone silently to permit misuse.
|
||||||
|
|
||||||
/// @returns The underlying int, converted to an integral type of your choosing, clamped to that int's range.
|
/// @returns The underlying int, converted to an integral type of your choosing, clamped to that int's range.
|
||||||
template<typename Type = IntType> forceinline constexpr Type as() const {
|
template<typename Type = IntType> forceinline constexpr Type as() const {
|
||||||
if constexpr (sizeof(Type) == sizeof(IntType)) {
|
if constexpr (sizeof(Type) == sizeof(IntType)) {
|
||||||
if constexpr (std::is_same_v<Type, IntType>) {
|
if constexpr (std::is_same_v<Type, IntType>) {
|
||||||
return length_;
|
return length_;
|
||||||
} else if constexpr (std::is_signed_v<Type>) {
|
} else if constexpr (std::is_signed_v<Type>) {
|
||||||
// Both integers are the same size, but a signed result is being asked for
|
// Both integers are the same size, but a signed result is being asked for
|
||||||
// from an unsigned original.
|
// from an unsigned original.
|
||||||
return length_ > Type(std::numeric_limits<Type>::max()) ? Type(std::numeric_limits<Type>::max()) : Type(length_);
|
return length_ > Type(std::numeric_limits<Type>::max()) ?
|
||||||
} else {
|
Type(std::numeric_limits<Type>::max()) : Type(length_);
|
||||||
// An unsigned result is being asked for from a signed original.
|
} else {
|
||||||
return length_ < 0 ? 0 : Type(length_);
|
// An unsigned result is being asked for from a signed original.
|
||||||
}
|
return length_ < 0 ? 0 : Type(length_);
|
||||||
}
|
}
|
||||||
|
|
||||||
const auto clamped = std::clamp(length_, IntType(std::numeric_limits<Type>::min()), IntType(std::numeric_limits<Type>::max()));
|
|
||||||
return Type(clamped);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// @returns The underlying int, in its native form.
|
const auto clamped = std::clamp(
|
||||||
forceinline constexpr IntType as_integral() const { return length_; }
|
length_,
|
||||||
|
IntType(std::numeric_limits<Type>::min()),
|
||||||
|
IntType(std::numeric_limits<Type>::max())
|
||||||
|
);
|
||||||
|
return Type(clamped);
|
||||||
|
}
|
||||||
|
|
||||||
/*!
|
/// @returns The underlying int, in its native form.
|
||||||
Severs from @c this the effect of dividing by @c divisor; @c this will end up with
|
forceinline constexpr IntType as_integral() const { return length_; }
|
||||||
the value of @c this modulo @c divisor and @c divided by @c divisor is returned.
|
|
||||||
*/
|
|
||||||
template <typename Result = T> forceinline Result divide(const T &divisor) {
|
|
||||||
Result r;
|
|
||||||
static_cast<T *>(this)->fill(r, divisor);
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*!
|
/*!
|
||||||
Flushes the value in @c this. The current value is returned, and the internal value
|
Severs from @c this the effect of dividing by @c divisor; @c this will end up with
|
||||||
is reset to zero.
|
the value of @c this modulo @c divisor and @c divided by @c divisor is returned.
|
||||||
*/
|
*/
|
||||||
template <typename Result> Result flush() {
|
template <typename Result = T> forceinline Result divide(const T &divisor) {
|
||||||
// Jiggery pokery here; switching to function overloading avoids
|
Result r;
|
||||||
// the namespace-level requirement for template specialisation.
|
static_cast<T *>(this)->fill(r, divisor);
|
||||||
Result r;
|
return r;
|
||||||
static_cast<T *>(this)->fill(r);
|
}
|
||||||
return r;
|
|
||||||
}
|
|
||||||
|
|
||||||
// operator int() is deliberately not provided, to avoid accidental subtitution of
|
/*!
|
||||||
// classes that use this template.
|
Flushes the value in @c this. The current value is returned, and the internal value
|
||||||
|
is reset to zero.
|
||||||
|
*/
|
||||||
|
template <typename Result> Result flush() {
|
||||||
|
// Jiggery pokery here; switching to function overloading avoids
|
||||||
|
// the namespace-level requirement for template specialisation.
|
||||||
|
Result r;
|
||||||
|
static_cast<T *>(this)->fill(r);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
protected:
|
// operator int() is deliberately not provided, to avoid accidental subtitution of
|
||||||
IntType length_;
|
// classes that use this template.
|
||||||
|
|
||||||
|
protected:
|
||||||
|
IntType length_;
|
||||||
};
|
};
|
||||||
|
|
||||||
/// Describes an integer number of whole cycles: pairs of clock signal transitions.
|
/// Describes an integer number of whole cycles: pairs of clock signal transitions.
|
||||||
class Cycles: public WrappedInt<Cycles> {
|
class Cycles: public WrappedInt<Cycles> {
|
||||||
public:
|
public:
|
||||||
forceinline constexpr Cycles(IntType l) noexcept : WrappedInt<Cycles>(l) {}
|
forceinline constexpr Cycles(IntType l) noexcept : WrappedInt<Cycles>(l) {}
|
||||||
forceinline constexpr Cycles() noexcept : WrappedInt<Cycles>() {}
|
forceinline constexpr Cycles() noexcept : WrappedInt<Cycles>() {}
|
||||||
forceinline static constexpr Cycles max() {
|
forceinline static constexpr Cycles max() {
|
||||||
return Cycles(std::numeric_limits<IntType>::max());
|
return Cycles(std::numeric_limits<IntType>::max());
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
friend WrappedInt;
|
friend WrappedInt;
|
||||||
void fill(Cycles &result) {
|
void fill(Cycles &result) {
|
||||||
result.length_ = length_;
|
result.length_ = length_;
|
||||||
length_ = 0;
|
length_ = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void fill(Cycles &result, const Cycles &divisor) {
|
void fill(Cycles &result, const Cycles &divisor) {
|
||||||
result.length_ = length_ / divisor.length_;
|
result.length_ = length_ / divisor.length_;
|
||||||
length_ %= divisor.length_;
|
length_ %= divisor.length_;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
/// Describes an integer number of half cycles: single clock signal transitions.
|
/// Describes an integer number of half cycles: single clock signal transitions.
|
||||||
class HalfCycles: public WrappedInt<HalfCycles> {
|
class HalfCycles: public WrappedInt<HalfCycles> {
|
||||||
public:
|
public:
|
||||||
forceinline constexpr HalfCycles(IntType l) noexcept : WrappedInt<HalfCycles>(l) {}
|
forceinline constexpr HalfCycles(IntType l) noexcept : WrappedInt<HalfCycles>(l) {}
|
||||||
forceinline constexpr HalfCycles() noexcept : WrappedInt<HalfCycles>() {}
|
forceinline constexpr HalfCycles() noexcept : WrappedInt<HalfCycles>() {}
|
||||||
forceinline static constexpr HalfCycles max() {
|
forceinline static constexpr HalfCycles max() {
|
||||||
return HalfCycles(std::numeric_limits<IntType>::max());
|
return HalfCycles(std::numeric_limits<IntType>::max());
|
||||||
}
|
}
|
||||||
|
|
||||||
forceinline constexpr HalfCycles(const Cycles &cycles) noexcept : WrappedInt<HalfCycles>(cycles.as_integral() * 2) {}
|
forceinline constexpr HalfCycles(const Cycles &cycles) noexcept :
|
||||||
|
WrappedInt<HalfCycles>(cycles.as_integral() * 2) {}
|
||||||
|
|
||||||
/// @returns The number of whole cycles completely covered by this span of half cycles.
|
/// @returns The number of whole cycles completely covered by this span of half cycles.
|
||||||
forceinline constexpr Cycles cycles() const {
|
forceinline constexpr Cycles cycles() const {
|
||||||
return Cycles(length_ >> 1);
|
return Cycles(length_ >> 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*!
|
/*!
|
||||||
Severs from @c this the effect of dividing by @c divisor; @c this will end up with
|
Severs from @c this the effect of dividing by @c divisor; @c this will end up with
|
||||||
the value of @c this modulo @c divisor . @c this divided by @c divisor is returned.
|
the value of @c this modulo @c divisor . @c this divided by @c divisor is returned.
|
||||||
*/
|
*/
|
||||||
forceinline Cycles divide_cycles(const Cycles &divisor) {
|
forceinline Cycles divide_cycles(const Cycles &divisor) {
|
||||||
const HalfCycles half_divisor = HalfCycles(divisor);
|
const HalfCycles half_divisor = HalfCycles(divisor);
|
||||||
const Cycles result(length_ / half_divisor.length_);
|
const Cycles result(length_ / half_divisor.length_);
|
||||||
length_ %= half_divisor.length_;
|
length_ %= half_divisor.length_;
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*!
|
/*!
|
||||||
Equivalent to @c divide_cycles(Cycles(1)) but faster.
|
Equivalent to @c divide_cycles(Cycles(1)) but faster.
|
||||||
*/
|
*/
|
||||||
forceinline Cycles divide_cycles() {
|
forceinline Cycles divide_cycles() {
|
||||||
const Cycles result(length_ >> 1);
|
const Cycles result(length_ >> 1);
|
||||||
length_ &= 1;
|
length_ &= 1;
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
friend WrappedInt;
|
friend WrappedInt;
|
||||||
void fill(Cycles &result) {
|
void fill(Cycles &result) {
|
||||||
result = Cycles(length_ >> 1);
|
result = Cycles(length_ >> 1);
|
||||||
length_ &= 1;
|
length_ &= 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
void fill(HalfCycles &result) {
|
void fill(HalfCycles &result) {
|
||||||
result.length_ = length_;
|
result.length_ = length_;
|
||||||
length_ = 0;
|
length_ = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void fill(Cycles &result, const HalfCycles &divisor) {
|
void fill(Cycles &result, const HalfCycles &divisor) {
|
||||||
result = Cycles(length_ / (divisor.length_ << 1));
|
result = Cycles(length_ / (divisor.length_ << 1));
|
||||||
length_ %= (divisor.length_ << 1);
|
length_ %= (divisor.length_ << 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
void fill(HalfCycles &result, const HalfCycles &divisor) {
|
void fill(HalfCycles &result, const HalfCycles &divisor) {
|
||||||
result.length_ = length_ / divisor.length_;
|
result.length_ = length_ / divisor.length_;
|
||||||
length_ %= divisor.length_;
|
length_ %= divisor.length_;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
// Create a specialisation of WrappedInt::flush for converting HalfCycles to Cycles
|
// Create a specialisation of WrappedInt::flush for converting HalfCycles to Cycles
|
||||||
|
@@ -58,28 +58,28 @@ struct Observer {
|
|||||||
The hint provided is just that: a hint. Owners may perform ::run_for at a greater frequency.
|
The hint provided is just that: a hint. Owners may perform ::run_for at a greater frequency.
|
||||||
*/
|
*/
|
||||||
class Source {
|
class Source {
|
||||||
public:
|
public:
|
||||||
/// Registers @c observer as the new clocking observer.
|
/// Registers @c observer as the new clocking observer.
|
||||||
void set_clocking_hint_observer(Observer *observer) {
|
void set_clocking_hint_observer(Observer *observer) {
|
||||||
observer_ = observer;
|
observer_ = observer;
|
||||||
update_clocking_observer();
|
update_clocking_observer();
|
||||||
}
|
}
|
||||||
|
|
||||||
/// @returns the current preferred clocking strategy.
|
/// @returns the current preferred clocking strategy.
|
||||||
virtual Preference preferred_clocking() const = 0;
|
virtual Preference preferred_clocking() const = 0;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
Observer *observer_ = nullptr;
|
Observer *observer_ = nullptr;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
/*!
|
/*!
|
||||||
Provided for subclasses; call this whenever the clocking preference might have changed.
|
Provided for subclasses; call this whenever the clocking preference might have changed.
|
||||||
This will notify the observer if there is one.
|
This will notify the observer if there is one.
|
||||||
*/
|
*/
|
||||||
void update_clocking_observer() {
|
void update_clocking_observer() {
|
||||||
if(!observer_) return;
|
if(!observer_) return;
|
||||||
observer_->set_component_prefers_clocking(this, preferred_clocking());
|
observer_->set_component_prefers_clocking(this, preferred_clocking());
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@@ -15,78 +15,79 @@
|
|||||||
Provides the logic to insert into and traverse a list of future scheduled items.
|
Provides the logic to insert into and traverse a list of future scheduled items.
|
||||||
*/
|
*/
|
||||||
template <typename TimeUnit> class DeferredQueue {
|
template <typename TimeUnit> class DeferredQueue {
|
||||||
public:
|
public:
|
||||||
/*!
|
/*!
|
||||||
Schedules @c action to occur in @c delay units of time.
|
Schedules @c action to occur in @c delay units of time.
|
||||||
*/
|
*/
|
||||||
void defer(TimeUnit delay, const std::function<void(void)> &action) {
|
void defer(TimeUnit delay, const std::function<void(void)> &action) {
|
||||||
// Apply immediately if there's no delay (or a negative delay).
|
// Apply immediately if there's no delay (or a negative delay).
|
||||||
if(delay <= TimeUnit(0)) {
|
if(delay <= TimeUnit(0)) {
|
||||||
action();
|
action();
|
||||||
return;
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if(!pending_actions_.empty()) {
|
||||||
|
// Otherwise enqueue, having subtracted the delay for any preceding events,
|
||||||
|
// and subtracting from the subsequent, if any.
|
||||||
|
auto insertion_point = pending_actions_.begin();
|
||||||
|
while(insertion_point != pending_actions_.end() && insertion_point->delay < delay) {
|
||||||
|
delay -= insertion_point->delay;
|
||||||
|
++insertion_point;
|
||||||
|
}
|
||||||
|
if(insertion_point != pending_actions_.end()) {
|
||||||
|
insertion_point->delay -= delay;
|
||||||
}
|
}
|
||||||
|
|
||||||
if(!pending_actions_.empty()) {
|
pending_actions_.emplace(insertion_point, delay, action);
|
||||||
// Otherwise enqueue, having subtracted the delay for any preceding events,
|
} else {
|
||||||
// and subtracting from the subsequent, if any.
|
pending_actions_.emplace_back(delay, action);
|
||||||
auto insertion_point = pending_actions_.begin();
|
}
|
||||||
while(insertion_point != pending_actions_.end() && insertion_point->delay < delay) {
|
}
|
||||||
delay -= insertion_point->delay;
|
|
||||||
++insertion_point;
|
|
||||||
}
|
|
||||||
if(insertion_point != pending_actions_.end()) {
|
|
||||||
insertion_point->delay -= delay;
|
|
||||||
}
|
|
||||||
|
|
||||||
pending_actions_.emplace(insertion_point, delay, action);
|
/*!
|
||||||
|
@returns The amount of time until the next enqueued action will occur,
|
||||||
|
or TimeUnit(-1) if the queue is empty.
|
||||||
|
*/
|
||||||
|
TimeUnit time_until_next_action() const {
|
||||||
|
if(pending_actions_.empty()) return TimeUnit(-1);
|
||||||
|
return pending_actions_.front().delay;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*!
|
||||||
|
Advances the queue the specified amount of time, performing any actions it reaches.
|
||||||
|
*/
|
||||||
|
void advance(TimeUnit time) {
|
||||||
|
auto erase_iterator = pending_actions_.begin();
|
||||||
|
while(erase_iterator != pending_actions_.end()) {
|
||||||
|
erase_iterator->delay -= time;
|
||||||
|
if(erase_iterator->delay <= TimeUnit(0)) {
|
||||||
|
time = -erase_iterator->delay;
|
||||||
|
erase_iterator->action();
|
||||||
|
++erase_iterator;
|
||||||
} else {
|
} else {
|
||||||
pending_actions_.emplace_back(delay, action);
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if(erase_iterator != pending_actions_.begin()) {
|
||||||
/*!
|
pending_actions_.erase(pending_actions_.begin(), erase_iterator);
|
||||||
@returns The amount of time until the next enqueued action will occur,
|
|
||||||
or TimeUnit(-1) if the queue is empty.
|
|
||||||
*/
|
|
||||||
TimeUnit time_until_next_action() const {
|
|
||||||
if(pending_actions_.empty()) return TimeUnit(-1);
|
|
||||||
return pending_actions_.front().delay;
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*!
|
/// @returns @c true if no actions are enqueued; @c false otherwise.
|
||||||
Advances the queue the specified amount of time, performing any actions it reaches.
|
bool empty() const {
|
||||||
*/
|
return pending_actions_.empty();
|
||||||
void advance(TimeUnit time) {
|
}
|
||||||
auto erase_iterator = pending_actions_.begin();
|
|
||||||
while(erase_iterator != pending_actions_.end()) {
|
|
||||||
erase_iterator->delay -= time;
|
|
||||||
if(erase_iterator->delay <= TimeUnit(0)) {
|
|
||||||
time = -erase_iterator->delay;
|
|
||||||
erase_iterator->action();
|
|
||||||
++erase_iterator;
|
|
||||||
} else {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if(erase_iterator != pending_actions_.begin()) {
|
|
||||||
pending_actions_.erase(pending_actions_.begin(), erase_iterator);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// @returns @c true if no actions are enqueued; @c false otherwise.
|
private:
|
||||||
bool empty() const {
|
// The list of deferred actions.
|
||||||
return pending_actions_.empty();
|
struct DeferredAction {
|
||||||
}
|
TimeUnit delay;
|
||||||
|
std::function<void(void)> action;
|
||||||
|
|
||||||
private:
|
DeferredAction(TimeUnit delay, const std::function<void(void)> &action) :
|
||||||
// The list of deferred actions.
|
delay(delay), action(std::move(action)) {}
|
||||||
struct DeferredAction {
|
};
|
||||||
TimeUnit delay;
|
std::vector<DeferredAction> pending_actions_;
|
||||||
std::function<void(void)> action;
|
|
||||||
|
|
||||||
DeferredAction(TimeUnit delay, const std::function<void(void)> &action) : delay(delay), action(std::move(action)) {}
|
|
||||||
};
|
|
||||||
std::vector<DeferredAction> pending_actions_;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/*!
|
/*!
|
||||||
@@ -117,8 +118,6 @@ template <typename TimeUnit> class DeferredQueuePerformer: public DeferredQueue<
|
|||||||
|
|
||||||
DeferredQueue<TimeUnit>::advance(length);
|
DeferredQueue<TimeUnit>::advance(length);
|
||||||
target_(length);
|
target_(length);
|
||||||
|
|
||||||
// TODO: optimise this to avoid the multiple std::vector deletes. Find a neat way to expose that solution, maybe?
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
@@ -13,33 +13,33 @@
|
|||||||
of future values.
|
of future values.
|
||||||
*/
|
*/
|
||||||
template <int DeferredDepth, typename ValueT> class DeferredValue {
|
template <int DeferredDepth, typename ValueT> class DeferredValue {
|
||||||
private:
|
private:
|
||||||
static_assert(sizeof(ValueT) <= 4);
|
static_assert(sizeof(ValueT) <= 4);
|
||||||
|
|
||||||
constexpr int elements_per_uint32 = sizeof(uint32_t) / sizeof(ValueT);
|
constexpr int elements_per_uint32 = sizeof(uint32_t) / sizeof(ValueT);
|
||||||
constexpr int unit_shift = sizeof(ValueT) * 8;
|
constexpr int unit_shift = sizeof(ValueT) * 8;
|
||||||
constexpr int insert_shift = (DeferredDepth & (elements_per_uint32 - 1)) * unit_shift;
|
constexpr int insert_shift = (DeferredDepth & (elements_per_uint32 - 1)) * unit_shift;
|
||||||
constexpr uint32_t insert_mask = ~(0xffff'ffff << insert_shift);
|
constexpr uint32_t insert_mask = ~(0xffff'ffff << insert_shift);
|
||||||
|
|
||||||
std::array<uint32_t, (DeferredDepth + elements_per_uint32 - 1) / elements_per_uint32> backlog;
|
std::array<uint32_t, (DeferredDepth + elements_per_uint32 - 1) / elements_per_uint32> backlog;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
/// @returns the current value.
|
/// @returns the current value.
|
||||||
ValueT value() const {
|
ValueT value() const {
|
||||||
return uint8_t(backlog[0]);
|
return uint8_t(backlog[0]);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Advances to the next enqueued value.
|
||||||
|
void advance() {
|
||||||
|
for(size_t c = 0; c < backlog.size() - 1; c--) {
|
||||||
|
backlog[c] = (backlog[c] >> unit_shift) | (backlog[c+1] << (32 - unit_shift));
|
||||||
}
|
}
|
||||||
|
backlog[backlog.size() - 1] >>= unit_shift;
|
||||||
|
}
|
||||||
|
|
||||||
/// Advances to the next enqueued value.
|
/// Inserts a new value, replacing whatever is currently at the end of the queue.
|
||||||
void advance() {
|
void insert(const ValueT value) {
|
||||||
for(size_t c = 0; c < backlog.size() - 1; c--) {
|
backlog[DeferredDepth / elements_per_uint32] =
|
||||||
backlog[c] = (backlog[c] >> unit_shift) | (backlog[c+1] << (32 - unit_shift));
|
(backlog[DeferredDepth / elements_per_uint32] & insert_mask) | (value << insert_shift);
|
||||||
}
|
}
|
||||||
backlog[backlog.size() - 1] >>= unit_shift;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Inserts a new value, replacing whatever is currently at the end of the queue.
|
|
||||||
void insert(ValueT value) {
|
|
||||||
backlog[DeferredDepth / elements_per_uint32] =
|
|
||||||
(backlog[DeferredDepth / elements_per_uint32] & insert_mask) | (value << insert_shift);
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
@@ -35,251 +35,254 @@
|
|||||||
TODO: incorporate and codify AsyncJustInTimeActor.
|
TODO: incorporate and codify AsyncJustInTimeActor.
|
||||||
*/
|
*/
|
||||||
template <class T, class LocalTimeScale = HalfCycles, int multiplier = 1, int divider = 1> class JustInTimeActor:
|
template <class T, class LocalTimeScale = HalfCycles, int multiplier = 1, int divider = 1> class JustInTimeActor:
|
||||||
public ClockingHint::Observer {
|
public ClockingHint::Observer
|
||||||
private:
|
{
|
||||||
/*!
|
private:
|
||||||
A std::unique_ptr deleter which causes an update_sequence_point to occur on the actor supplied
|
/*!
|
||||||
to it at construction if it implements @c next_sequence_point(). Otherwise destruction is a no-op.
|
A std::unique_ptr deleter which causes an update_sequence_point to occur on the actor supplied
|
||||||
|
to it at construction if it implements @c next_sequence_point(). Otherwise destruction is a no-op.
|
||||||
|
|
||||||
**Does not delete the object.**
|
**Does not delete the object.**
|
||||||
|
|
||||||
This is used by the -> operators below, which provide a unique pointer to the enclosed object and
|
|
||||||
update their sequence points upon its destruction — i.e. after the caller has made whatever call
|
|
||||||
or calls as were relevant to the enclosed object.
|
|
||||||
*/
|
|
||||||
class SequencePointAwareDeleter {
|
|
||||||
public:
|
|
||||||
explicit SequencePointAwareDeleter(JustInTimeActor<T, LocalTimeScale, multiplier, divider> *actor) noexcept
|
|
||||||
: actor_(actor) {}
|
|
||||||
|
|
||||||
forceinline void operator ()(const T *const) const {
|
|
||||||
if constexpr (has_sequence_points<T>::value) {
|
|
||||||
actor_->update_sequence_point();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
JustInTimeActor<T, LocalTimeScale, multiplier, divider> *const actor_;
|
|
||||||
};
|
|
||||||
|
|
||||||
// This block of SFINAE determines whether objects of type T accepts Cycles or HalfCycles.
|
|
||||||
using HalfRunFor = void (T::*const)(HalfCycles);
|
|
||||||
static uint8_t half_sig(...);
|
|
||||||
static uint16_t half_sig(HalfRunFor);
|
|
||||||
using TargetTimeScale =
|
|
||||||
std::conditional_t<
|
|
||||||
sizeof(half_sig(&T::run_for)) == sizeof(uint16_t),
|
|
||||||
HalfCycles,
|
|
||||||
Cycles>;
|
|
||||||
|
|
||||||
|
This is used by the -> operators below, which provide a unique pointer to the enclosed object and
|
||||||
|
update their sequence points upon its destruction — i.e. after the caller has made whatever call
|
||||||
|
or calls as were relevant to the enclosed object.
|
||||||
|
*/
|
||||||
|
class SequencePointAwareDeleter {
|
||||||
public:
|
public:
|
||||||
/// Constructs a new JustInTimeActor using the same construction arguments as the included object.
|
explicit SequencePointAwareDeleter(
|
||||||
template<typename... Args> JustInTimeActor(Args&&... args) : object_(std::forward<Args>(args)...) {
|
JustInTimeActor<T, LocalTimeScale, multiplier, divider> *const actor) noexcept
|
||||||
if constexpr (std::is_base_of<ClockingHint::Source, T>::value) {
|
: actor_(actor) {}
|
||||||
object_.set_clocking_hint_observer(this);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Adds time to the actor.
|
|
||||||
///
|
|
||||||
/// @returns @c true if adding time caused a flush; @c false otherwise.
|
|
||||||
forceinline bool operator += (LocalTimeScale rhs) {
|
|
||||||
if constexpr (std::is_base_of<ClockingHint::Source, T>::value) {
|
|
||||||
if(clocking_preference_ == ClockingHint::Preference::None) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if constexpr (multiplier != 1) {
|
|
||||||
time_since_update_ += rhs * multiplier;
|
|
||||||
} else {
|
|
||||||
time_since_update_ += rhs;
|
|
||||||
}
|
|
||||||
is_flushed_ = false;
|
|
||||||
|
|
||||||
if constexpr (std::is_base_of<ClockingHint::Source, T>::value) {
|
|
||||||
if (clocking_preference_ == ClockingHint::Preference::RealTime) {
|
|
||||||
flush();
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
forceinline void operator ()(const T *const) const {
|
||||||
if constexpr (has_sequence_points<T>::value) {
|
if constexpr (has_sequence_points<T>::value) {
|
||||||
time_until_event_ -= rhs * multiplier;
|
actor_->update_sequence_point();
|
||||||
if(time_until_event_ <= LocalTimeScale(0)) {
|
|
||||||
time_overrun_ = time_until_event_ / divider;
|
|
||||||
flush();
|
|
||||||
update_sequence_point();
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Flushes all accumulated time and returns a pointer to the included object.
|
|
||||||
///
|
|
||||||
/// If this object provides sequence points, checks for changes to the next
|
|
||||||
/// sequence point upon deletion of the pointer.
|
|
||||||
[[nodiscard]] forceinline auto operator->() {
|
|
||||||
#ifndef NDEBUG
|
|
||||||
assert(!flush_concurrency_check_.test_and_set());
|
|
||||||
#endif
|
|
||||||
flush();
|
|
||||||
#ifndef NDEBUG
|
|
||||||
flush_concurrency_check_.clear();
|
|
||||||
#endif
|
|
||||||
return std::unique_ptr<T, SequencePointAwareDeleter>(&object_, SequencePointAwareDeleter(this));
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Acts exactly as per the standard ->, but preserves constness.
|
|
||||||
///
|
|
||||||
/// Despite being const, this will flush the object and, if relevant, update the next sequence point.
|
|
||||||
[[nodiscard]] forceinline auto operator -> () const {
|
|
||||||
auto non_const_this = const_cast<JustInTimeActor<T, LocalTimeScale, multiplier, divider> *>(this);
|
|
||||||
#ifndef NDEBUG
|
|
||||||
assert(!non_const_this->flush_concurrency_check_.test_and_set());
|
|
||||||
#endif
|
|
||||||
non_const_this->flush();
|
|
||||||
#ifndef NDEBUG
|
|
||||||
non_const_this->flush_concurrency_check_.clear();
|
|
||||||
#endif
|
|
||||||
return std::unique_ptr<const T, SequencePointAwareDeleter>(&object_, SequencePointAwareDeleter(non_const_this));
|
|
||||||
}
|
|
||||||
|
|
||||||
/// @returns a pointer to the included object, without flushing time.
|
|
||||||
[[nodiscard]] forceinline T *last_valid() {
|
|
||||||
return &object_;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// @returns a const pointer to the included object, without flushing time.
|
|
||||||
[[nodiscard]] forceinline const T *last_valid() const {
|
|
||||||
return &object_;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// @returns the amount of time since the object was last flushed, in the target time scale.
|
|
||||||
[[nodiscard]] forceinline TargetTimeScale time_since_flush() const {
|
|
||||||
if constexpr (divider == 1) {
|
|
||||||
return time_since_update_;
|
|
||||||
}
|
|
||||||
return TargetTimeScale(time_since_update_.as_integral() / divider);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// @returns the amount of time since the object was last flushed, plus the local time scale @c offset,
|
|
||||||
/// converted to the target time scale.
|
|
||||||
[[nodiscard]] forceinline TargetTimeScale time_since_flush(LocalTimeScale offset) const {
|
|
||||||
if constexpr (divider == 1) {
|
|
||||||
return time_since_update_ + offset;
|
|
||||||
}
|
|
||||||
return TargetTimeScale((time_since_update_ + offset).as_integral() / divider);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Flushes all accumulated time.
|
|
||||||
///
|
|
||||||
/// This does not affect this actor's record of when the next sequence point will occur.
|
|
||||||
forceinline void flush() {
|
|
||||||
if(!is_flushed_) {
|
|
||||||
did_flush_ = is_flushed_ = true;
|
|
||||||
if constexpr (divider == 1) {
|
|
||||||
const auto duration = time_since_update_.template flush<TargetTimeScale>();
|
|
||||||
object_.run_for(duration);
|
|
||||||
} else {
|
|
||||||
const auto duration = time_since_update_.template divide<TargetTimeScale>(LocalTimeScale(divider));
|
|
||||||
if(duration > TargetTimeScale(0))
|
|
||||||
object_.run_for(duration);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Indicates whether a flush has occurred since the last call to did_flush().
|
private:
|
||||||
[[nodiscard]] forceinline bool did_flush() {
|
JustInTimeActor<T, LocalTimeScale, multiplier, divider> *const actor_;
|
||||||
const bool did_flush = did_flush_;
|
};
|
||||||
did_flush_ = false;
|
|
||||||
return did_flush;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// @returns a number in the range [-max, 0] indicating the offset of the most recent sequence
|
// This block of SFINAE determines whether objects of type T accepts Cycles or HalfCycles.
|
||||||
/// point from the final time at the end of the += that triggered the sequence point.
|
using HalfRunFor = void (T::*const)(HalfCycles);
|
||||||
[[nodiscard]] forceinline LocalTimeScale last_sequence_point_overrun() {
|
static uint8_t half_sig(...);
|
||||||
return time_overrun_;
|
static uint16_t half_sig(HalfRunFor);
|
||||||
}
|
using TargetTimeScale =
|
||||||
|
std::conditional_t<
|
||||||
|
sizeof(half_sig(&T::run_for)) == sizeof(uint16_t),
|
||||||
|
HalfCycles,
|
||||||
|
Cycles>;
|
||||||
|
|
||||||
/// @returns the number of cycles until the next sequence-point-based flush, if the embedded object
|
public:
|
||||||
/// supports sequence points; @c LocalTimeScale() otherwise.
|
/// Constructs a new JustInTimeActor using the same construction arguments as the included object.
|
||||||
[[nodiscard]] LocalTimeScale cycles_until_implicit_flush() const {
|
template<typename... Args> JustInTimeActor(Args&&... args) : object_(std::forward<Args>(args)...) {
|
||||||
return time_until_event_ / divider;
|
if constexpr (std::is_base_of<ClockingHint::Source, T>::value) {
|
||||||
|
object_.set_clocking_hint_observer(this);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Indicates whether a sequence-point-caused flush will occur if the specified period is added.
|
/// Adds time to the actor.
|
||||||
[[nodiscard]] forceinline bool will_flush(LocalTimeScale rhs) const {
|
///
|
||||||
if constexpr (!has_sequence_points<T>::value) {
|
/// @returns @c true if adding time caused a flush; @c false otherwise.
|
||||||
|
forceinline bool operator += (LocalTimeScale rhs) {
|
||||||
|
if constexpr (std::is_base_of<ClockingHint::Source, T>::value) {
|
||||||
|
if(clocking_preference_ == ClockingHint::Preference::None) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
return rhs >= time_until_event_;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Indicates the amount of time, in the local time scale, until the first local slot that falls wholly
|
if constexpr (multiplier != 1) {
|
||||||
/// after @c duration, if that delay were to occur in @c offset units of time from now.
|
time_since_update_ += rhs * multiplier;
|
||||||
[[nodiscard]] forceinline LocalTimeScale back_map(TargetTimeScale duration, TargetTimeScale offset) const {
|
} else {
|
||||||
// A 1:1 mapping is easy.
|
time_since_update_ += rhs;
|
||||||
if constexpr (multiplier == 1 && divider == 1) {
|
}
|
||||||
return duration;
|
is_flushed_ = false;
|
||||||
|
|
||||||
|
if constexpr (std::is_base_of<ClockingHint::Source, T>::value) {
|
||||||
|
if (clocking_preference_ == ClockingHint::Preference::RealTime) {
|
||||||
|
flush();
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Work out when this query is placed, and the time to which it relates
|
|
||||||
const auto base = time_since_update_ + offset * divider;
|
|
||||||
const auto target = base + duration * divider;
|
|
||||||
|
|
||||||
// Figure out the number of whole input steps that is required to get
|
|
||||||
// past target, and subtract the number of whole input steps necessary
|
|
||||||
// to get to base.
|
|
||||||
const auto steps_to_base = base.as_integral() / multiplier;
|
|
||||||
const auto steps_to_target = (target.as_integral() + divider - 1) / multiplier;
|
|
||||||
|
|
||||||
return LocalTimeScale(steps_to_target - steps_to_base);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Updates this template's record of the next sequence point.
|
if constexpr (has_sequence_points<T>::value) {
|
||||||
void update_sequence_point() {
|
time_until_event_ -= rhs * multiplier;
|
||||||
if constexpr (has_sequence_points<T>::value) {
|
if(time_until_event_ <= LocalTimeScale(0)) {
|
||||||
// Keep a fast path where no conversions will be applied; if conversions are
|
time_overrun_ = time_until_event_ / divider;
|
||||||
// going to be applied then do a direct max -> max translation rather than
|
flush();
|
||||||
// allowing the arithmetic to overflow.
|
update_sequence_point();
|
||||||
if constexpr (divider == 1 && std::is_same_v<LocalTimeScale, TargetTimeScale>) {
|
return true;
|
||||||
time_until_event_ = object_.next_sequence_point();
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Flushes all accumulated time and returns a pointer to the included object.
|
||||||
|
///
|
||||||
|
/// If this object provides sequence points, checks for changes to the next
|
||||||
|
/// sequence point upon deletion of the pointer.
|
||||||
|
[[nodiscard]] forceinline auto operator->() {
|
||||||
|
#ifndef NDEBUG
|
||||||
|
assert(!flush_concurrency_check_.test_and_set());
|
||||||
|
#endif
|
||||||
|
flush();
|
||||||
|
#ifndef NDEBUG
|
||||||
|
flush_concurrency_check_.clear();
|
||||||
|
#endif
|
||||||
|
return std::unique_ptr<T, SequencePointAwareDeleter>(&object_, SequencePointAwareDeleter(this));
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Acts exactly as per the standard ->, but preserves constness.
|
||||||
|
///
|
||||||
|
/// Despite being const, this will flush the object and, if relevant, update the next sequence point.
|
||||||
|
[[nodiscard]] forceinline auto operator -> () const {
|
||||||
|
auto non_const_this = const_cast<JustInTimeActor<T, LocalTimeScale, multiplier, divider> *>(this);
|
||||||
|
#ifndef NDEBUG
|
||||||
|
assert(!non_const_this->flush_concurrency_check_.test_and_set());
|
||||||
|
#endif
|
||||||
|
non_const_this->flush();
|
||||||
|
#ifndef NDEBUG
|
||||||
|
non_const_this->flush_concurrency_check_.clear();
|
||||||
|
#endif
|
||||||
|
return std::unique_ptr<const T, SequencePointAwareDeleter>(&object_, SequencePointAwareDeleter(non_const_this));
|
||||||
|
}
|
||||||
|
|
||||||
|
/// @returns a pointer to the included object, without flushing time.
|
||||||
|
[[nodiscard]] forceinline T *last_valid() {
|
||||||
|
return &object_;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// @returns a const pointer to the included object, without flushing time.
|
||||||
|
[[nodiscard]] forceinline const T *last_valid() const {
|
||||||
|
return &object_;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// @returns the amount of time since the object was last flushed, in the target time scale.
|
||||||
|
[[nodiscard]] forceinline TargetTimeScale time_since_flush() const {
|
||||||
|
if constexpr (divider == 1) {
|
||||||
|
return time_since_update_;
|
||||||
|
}
|
||||||
|
return TargetTimeScale(time_since_update_.as_integral() / divider);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// @returns the amount of time since the object was last flushed, plus the local time scale @c offset,
|
||||||
|
/// converted to the target time scale.
|
||||||
|
[[nodiscard]] forceinline TargetTimeScale time_since_flush(LocalTimeScale offset) const {
|
||||||
|
if constexpr (divider == 1) {
|
||||||
|
return time_since_update_ + offset;
|
||||||
|
}
|
||||||
|
return TargetTimeScale((time_since_update_ + offset).as_integral() / divider);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Flushes all accumulated time.
|
||||||
|
///
|
||||||
|
/// This does not affect this actor's record of when the next sequence point will occur.
|
||||||
|
forceinline void flush() {
|
||||||
|
if(!is_flushed_) {
|
||||||
|
did_flush_ = is_flushed_ = true;
|
||||||
|
if constexpr (divider == 1) {
|
||||||
|
const auto duration = time_since_update_.template flush<TargetTimeScale>();
|
||||||
|
object_.run_for(duration);
|
||||||
|
} else {
|
||||||
|
const auto duration = time_since_update_.template divide<TargetTimeScale>(LocalTimeScale(divider));
|
||||||
|
if(duration > TargetTimeScale(0))
|
||||||
|
object_.run_for(duration);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Indicates whether a flush has occurred since the last call to did_flush().
|
||||||
|
[[nodiscard]] forceinline bool did_flush() {
|
||||||
|
const bool did_flush = did_flush_;
|
||||||
|
did_flush_ = false;
|
||||||
|
return did_flush;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// @returns a number in the range [-max, 0] indicating the offset of the most recent sequence
|
||||||
|
/// point from the final time at the end of the += that triggered the sequence point.
|
||||||
|
[[nodiscard]] forceinline LocalTimeScale last_sequence_point_overrun() {
|
||||||
|
return time_overrun_;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// @returns the number of cycles until the next sequence-point-based flush, if the embedded object
|
||||||
|
/// supports sequence points; @c LocalTimeScale() otherwise.
|
||||||
|
[[nodiscard]] LocalTimeScale cycles_until_implicit_flush() const {
|
||||||
|
return time_until_event_ / divider;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Indicates whether a sequence-point-caused flush will occur if the specified period is added.
|
||||||
|
[[nodiscard]] forceinline bool will_flush(LocalTimeScale rhs) const {
|
||||||
|
if constexpr (!has_sequence_points<T>::value) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return rhs >= time_until_event_;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Indicates the amount of time, in the local time scale, until the first local slot that falls wholly
|
||||||
|
/// after @c duration, if that delay were to occur in @c offset units of time from now.
|
||||||
|
[[nodiscard]] forceinline LocalTimeScale back_map(TargetTimeScale duration, TargetTimeScale offset) const {
|
||||||
|
// A 1:1 mapping is easy.
|
||||||
|
if constexpr (multiplier == 1 && divider == 1) {
|
||||||
|
return duration;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Work out when this query is placed, and the time to which it relates
|
||||||
|
const auto base = time_since_update_ + offset * divider;
|
||||||
|
const auto target = base + duration * divider;
|
||||||
|
|
||||||
|
// Figure out the number of whole input steps that is required to get
|
||||||
|
// past target, and subtract the number of whole input steps necessary
|
||||||
|
// to get to base.
|
||||||
|
const auto steps_to_base = base.as_integral() / multiplier;
|
||||||
|
const auto steps_to_target = (target.as_integral() + divider - 1) / multiplier;
|
||||||
|
|
||||||
|
return LocalTimeScale(steps_to_target - steps_to_base);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Updates this template's record of the next sequence point.
|
||||||
|
void update_sequence_point() {
|
||||||
|
if constexpr (has_sequence_points<T>::value) {
|
||||||
|
// Keep a fast path where no conversions will be applied; if conversions are
|
||||||
|
// going to be applied then do a direct max -> max translation rather than
|
||||||
|
// allowing the arithmetic to overflow.
|
||||||
|
if constexpr (divider == 1 && std::is_same_v<LocalTimeScale, TargetTimeScale>) {
|
||||||
|
time_until_event_ = object_.next_sequence_point();
|
||||||
|
} else {
|
||||||
|
const auto time = object_.next_sequence_point();
|
||||||
|
if(time == TargetTimeScale::max()) {
|
||||||
|
time_until_event_ = LocalTimeScale::max();
|
||||||
} else {
|
} else {
|
||||||
const auto time = object_.next_sequence_point();
|
time_until_event_ = time * divider;
|
||||||
if(time == TargetTimeScale::max()) {
|
|
||||||
time_until_event_ = LocalTimeScale::max();
|
|
||||||
} else {
|
|
||||||
time_until_event_ = time * divider;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
assert(time_until_event_ > LocalTimeScale(0));
|
|
||||||
}
|
}
|
||||||
|
assert(time_until_event_ > LocalTimeScale(0));
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// @returns A cached copy of the object's clocking preference.
|
/// @returns A cached copy of the object's clocking preference.
|
||||||
ClockingHint::Preference clocking_preference() const {
|
ClockingHint::Preference clocking_preference() const {
|
||||||
return clocking_preference_;
|
return clocking_preference_;
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
T object_;
|
T object_;
|
||||||
LocalTimeScale time_since_update_, time_until_event_, time_overrun_;
|
LocalTimeScale time_since_update_, time_until_event_, time_overrun_;
|
||||||
bool is_flushed_ = true;
|
bool is_flushed_ = true;
|
||||||
bool did_flush_ = false;
|
bool did_flush_ = false;
|
||||||
|
|
||||||
template <typename S, typename = void> struct has_sequence_points : std::false_type {};
|
template <typename S, typename = void> struct has_sequence_points : std::false_type {};
|
||||||
template <typename S> struct has_sequence_points<S, decltype(void(std::declval<S &>().next_sequence_point()))> : std::true_type {};
|
template <typename S>
|
||||||
|
struct has_sequence_points<S, decltype(void(std::declval<S &>().next_sequence_point()))> : std::true_type {};
|
||||||
|
|
||||||
ClockingHint::Preference clocking_preference_ = ClockingHint::Preference::JustInTime;
|
ClockingHint::Preference clocking_preference_ = ClockingHint::Preference::JustInTime;
|
||||||
void set_component_prefers_clocking(ClockingHint::Source *, ClockingHint::Preference clocking) {
|
void set_component_prefers_clocking(ClockingHint::Source *, ClockingHint::Preference clocking) {
|
||||||
clocking_preference_ = clocking;
|
clocking_preference_ = clocking;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef NDEBUG
|
#ifndef NDEBUG
|
||||||
std::atomic_flag flush_concurrency_check_{};
|
std::atomic_flag flush_concurrency_check_{};
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -288,49 +291,50 @@ template <class T, class LocalTimeScale = HalfCycles, int multiplier = 1, int di
|
|||||||
Any time the amount of accumulated time crosses a threshold provided at construction time,
|
Any time the amount of accumulated time crosses a threshold provided at construction time,
|
||||||
the object will be updated on the AsyncTaskQueue.
|
the object will be updated on the AsyncTaskQueue.
|
||||||
*/
|
*/
|
||||||
template <class T, class LocalTimeScale = HalfCycles, class TargetTimeScale = LocalTimeScale> class AsyncJustInTimeActor {
|
template <class T, class LocalTimeScale = HalfCycles, class TargetTimeScale = LocalTimeScale>
|
||||||
public:
|
class AsyncJustInTimeActor {
|
||||||
/// Constructs a new AsyncJustInTimeActor using the same construction arguments as the included object.
|
public:
|
||||||
template<typename... Args> AsyncJustInTimeActor(TargetTimeScale threshold, Args&&... args) :
|
/// Constructs a new AsyncJustInTimeActor using the same construction arguments as the included object.
|
||||||
object_(std::forward<Args>(args)...),
|
template<typename... Args> AsyncJustInTimeActor(TargetTimeScale threshold, Args&&... args) :
|
||||||
threshold_(threshold) {}
|
object_(std::forward<Args>(args)...),
|
||||||
|
threshold_(threshold) {}
|
||||||
|
|
||||||
/// Adds time to the actor.
|
/// Adds time to the actor.
|
||||||
inline void operator += (const LocalTimeScale &rhs) {
|
inline void operator += (const LocalTimeScale &rhs) {
|
||||||
time_since_update_ += rhs;
|
time_since_update_ += rhs;
|
||||||
if(time_since_update_ >= threshold_) {
|
if(time_since_update_ >= threshold_) {
|
||||||
time_since_update_ -= threshold_;
|
time_since_update_ -= threshold_;
|
||||||
task_queue_.enqueue([this] () {
|
task_queue_.enqueue([this] () {
|
||||||
object_.run_for(threshold_);
|
object_.run_for(threshold_);
|
||||||
});
|
});
|
||||||
}
|
|
||||||
is_flushed_ = false;
|
|
||||||
}
|
}
|
||||||
|
is_flushed_ = false;
|
||||||
|
}
|
||||||
|
|
||||||
/// Flushes all accumulated time and returns a pointer to the included object.
|
/// Flushes all accumulated time and returns a pointer to the included object.
|
||||||
inline T *operator->() {
|
inline T *operator->() {
|
||||||
flush();
|
flush();
|
||||||
return &object_;
|
return &object_;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a pointer to the included object without flushing time.
|
||||||
|
inline T *last_valid() {
|
||||||
|
return &object_;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Flushes all accumulated time.
|
||||||
|
inline void flush() {
|
||||||
|
if(!is_flushed_) {
|
||||||
|
task_queue_.flush();
|
||||||
|
object_.run_for(time_since_update_.template flush<TargetTimeScale>());
|
||||||
|
is_flushed_ = true;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns a pointer to the included object without flushing time.
|
private:
|
||||||
inline T *last_valid() {
|
T object_;
|
||||||
return &object_;
|
LocalTimeScale time_since_update_;
|
||||||
}
|
TargetTimeScale threshold_;
|
||||||
|
bool is_flushed_ = true;
|
||||||
/// Flushes all accumulated time.
|
Concurrency::AsyncTaskQueue<true> task_queue_;
|
||||||
inline void flush() {
|
|
||||||
if(!is_flushed_) {
|
|
||||||
task_queue_.flush();
|
|
||||||
object_.run_for(time_since_update_.template flush<TargetTimeScale>());
|
|
||||||
is_flushed_ = true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
T object_;
|
|
||||||
LocalTimeScale time_since_update_;
|
|
||||||
TargetTimeScale threshold_;
|
|
||||||
bool is_flushed_ = true;
|
|
||||||
Concurrency::AsyncTaskQueue<true> task_queue_;
|
|
||||||
};
|
};
|
||||||
|
@@ -21,65 +21,65 @@ namespace Time {
|
|||||||
of time, to bring it into phase.
|
of time, to bring it into phase.
|
||||||
*/
|
*/
|
||||||
class ScanSynchroniser {
|
class ScanSynchroniser {
|
||||||
public:
|
public:
|
||||||
/*!
|
/*!
|
||||||
@returns @c true if the emulated machine can be synchronised with the host frame output based on its
|
@returns @c true if the emulated machine can be synchronised with the host frame output based on its
|
||||||
current @c [scan]status and the host machine's @c frame_duration; @c false otherwise.
|
current @c [scan]status and the host machine's @c frame_duration; @c false otherwise.
|
||||||
*/
|
*/
|
||||||
bool can_synchronise(const Outputs::Display::ScanStatus &scan_status, double frame_duration) {
|
bool can_synchronise(const Outputs::Display::ScanStatus &scan_status, const double frame_duration) {
|
||||||
ratio_ = 1.0;
|
ratio_ = 1.0;
|
||||||
if(scan_status.field_duration_gradient < 0.00001) {
|
if(scan_status.field_duration_gradient < 0.00001) {
|
||||||
// Check out the machine's current frame time.
|
// Check out the machine's current frame time.
|
||||||
// If it's within 3% of a non-zero integer multiple of the
|
// If it's within 3% of a non-zero integer multiple of the
|
||||||
// display rate, mark this time window to be split over the sync.
|
// display rate, mark this time window to be split over the sync.
|
||||||
ratio_ = (frame_duration * base_multiplier_) / scan_status.field_duration;
|
ratio_ = (frame_duration * base_multiplier_) / scan_status.field_duration;
|
||||||
const double integer_ratio = round(ratio_);
|
const double integer_ratio = round(ratio_);
|
||||||
if(integer_ratio > 0.0) {
|
if(integer_ratio > 0.0) {
|
||||||
ratio_ /= integer_ratio;
|
ratio_ /= integer_ratio;
|
||||||
return ratio_ <= maximum_rate_adjustment && ratio_ >= 1.0 / maximum_rate_adjustment;
|
return ratio_ <= maximum_rate_adjustment && ratio_ >= 1.0 / maximum_rate_adjustment;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
/*!
|
/*!
|
||||||
@returns The appropriate speed multiplier for the next frame based on the inputs previously supplied to @c can_synchronise.
|
@returns The appropriate speed multiplier for the next frame based on the inputs previously supplied to @c can_synchronise.
|
||||||
Results are undefined if @c can_synchroise returned @c false.
|
Results are undefined if @c can_synchroise returned @c false.
|
||||||
*/
|
*/
|
||||||
double next_speed_multiplier(const Outputs::Display::ScanStatus &scan_status) {
|
double next_speed_multiplier(const Outputs::Display::ScanStatus &scan_status) {
|
||||||
// The host versus emulated ratio is calculated based on the current perceived frame duration of the machine.
|
// The host versus emulated ratio is calculated based on the current perceived frame duration of the machine.
|
||||||
// Either that number is exactly correct or it's already the result of some sort of low-pass filter. So there's
|
// Either that number is exactly correct or it's already the result of some sort of low-pass filter. So there's
|
||||||
// no benefit to second guessing it here — just take it to be correct.
|
// no benefit to second guessing it here — just take it to be correct.
|
||||||
//
|
//
|
||||||
// ... with one slight caveat, which is that it is desireable to adjust phase here, to align vertical sync points.
|
// ... with one slight caveat, which is that it is desireable to adjust phase here, to align vertical sync points.
|
||||||
// So the set speed multiplier may be adjusted slightly to aim for that.
|
// So the set speed multiplier may be adjusted slightly to aim for that.
|
||||||
double speed_multiplier = 1.0 / (ratio_ / base_multiplier_);
|
double speed_multiplier = 1.0 / (ratio_ / base_multiplier_);
|
||||||
if(scan_status.current_position > 0.0) {
|
if(scan_status.current_position > 0.0) {
|
||||||
if(scan_status.current_position < 0.5) speed_multiplier /= phase_adjustment_ratio;
|
if(scan_status.current_position < 0.5) speed_multiplier /= phase_adjustment_ratio;
|
||||||
else speed_multiplier *= phase_adjustment_ratio;
|
else speed_multiplier *= phase_adjustment_ratio;
|
||||||
}
|
|
||||||
speed_multiplier_ = (speed_multiplier_ * 0.95) + (speed_multiplier * 0.05);
|
|
||||||
return speed_multiplier_ * base_multiplier_;
|
|
||||||
}
|
}
|
||||||
|
speed_multiplier_ = (speed_multiplier_ * 0.95) + (speed_multiplier * 0.05);
|
||||||
|
return speed_multiplier_ * base_multiplier_;
|
||||||
|
}
|
||||||
|
|
||||||
void set_base_speed_multiplier(double multiplier) {
|
void set_base_speed_multiplier(const double multiplier) {
|
||||||
base_multiplier_ = multiplier;
|
base_multiplier_ = multiplier;
|
||||||
}
|
}
|
||||||
|
|
||||||
double get_base_speed_multiplier() {
|
double get_base_speed_multiplier() const {
|
||||||
return base_multiplier_;
|
return base_multiplier_;
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
static constexpr double maximum_rate_adjustment = 1.03;
|
static constexpr double maximum_rate_adjustment = 1.03;
|
||||||
static constexpr double phase_adjustment_ratio = 1.005;
|
static constexpr double phase_adjustment_ratio = 1.005;
|
||||||
|
|
||||||
// Managed local state.
|
// Managed local state.
|
||||||
double speed_multiplier_ = 1.0;
|
double speed_multiplier_ = 1.0;
|
||||||
double base_multiplier_ = 1.0;
|
double base_multiplier_ = 1.0;
|
||||||
|
|
||||||
// Temporary storage to bridge the can_synchronise -> next_speed_multiplier gap.
|
// Temporary storage to bridge the can_synchronise -> next_speed_multiplier gap.
|
||||||
double ratio_ = 1.0;
|
double ratio_ = 1.0;
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@@ -16,7 +16,9 @@ typedef double Seconds;
|
|||||||
typedef int64_t Nanos;
|
typedef int64_t Nanos;
|
||||||
|
|
||||||
inline Nanos nanos_now() {
|
inline Nanos nanos_now() {
|
||||||
return std::chrono::duration_cast<std::chrono::nanoseconds>(std::chrono::high_resolution_clock::now().time_since_epoch()).count();
|
return std::chrono::duration_cast<std::chrono::nanoseconds>(
|
||||||
|
std::chrono::high_resolution_clock::now().time_since_epoch()
|
||||||
|
).count();
|
||||||
}
|
}
|
||||||
|
|
||||||
inline Seconds seconds(Nanos nanos) {
|
inline Seconds seconds(Nanos nanos) {
|
||||||
|
@@ -21,132 +21,132 @@ namespace Time {
|
|||||||
(iii) optionally, timer jitter; in order to suggest when you should next start drawing.
|
(iii) optionally, timer jitter; in order to suggest when you should next start drawing.
|
||||||
*/
|
*/
|
||||||
class VSyncPredictor {
|
class VSyncPredictor {
|
||||||
public:
|
public:
|
||||||
/*!
|
/*!
|
||||||
Announces to the predictor that the work of producing an output frame has begun.
|
Announces to the predictor that the work of producing an output frame has begun.
|
||||||
*/
|
*/
|
||||||
void begin_redraw() {
|
void begin_redraw() {
|
||||||
redraw_begin_time_ = nanos_now();
|
redraw_begin_time_ = nanos_now();
|
||||||
|
}
|
||||||
|
|
||||||
|
/*!
|
||||||
|
Announces to the predictor that the work of producing an output frame has ended;
|
||||||
|
the predictor will use the amount of time between each begin/end pair to modify
|
||||||
|
its expectations as to how long it takes to draw a frame.
|
||||||
|
*/
|
||||||
|
void end_redraw() {
|
||||||
|
redraw_period_.post(nanos_now() - redraw_begin_time_);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*!
|
||||||
|
Informs the predictor that a block-on-vsync has just ended, i.e. that the moment this
|
||||||
|
machine calls retrace is now. The predictor uses these notifications to estimate output
|
||||||
|
frame rate.
|
||||||
|
*/
|
||||||
|
void announce_vsync() {
|
||||||
|
const auto now = nanos_now();
|
||||||
|
|
||||||
|
if(last_vsync_) {
|
||||||
|
last_vsync_ += frame_duration_;
|
||||||
|
vsync_jitter_.post(last_vsync_ - now);
|
||||||
|
last_vsync_ = (last_vsync_ + now) >> 1;
|
||||||
|
} else {
|
||||||
|
last_vsync_ = now;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*!
|
/*!
|
||||||
Announces to the predictor that the work of producing an output frame has ended;
|
Sets the frame rate for the target display.
|
||||||
the predictor will use the amount of time between each begin/end pair to modify
|
*/
|
||||||
its expectations as to how long it takes to draw a frame.
|
void set_frame_rate(float rate) {
|
||||||
*/
|
frame_duration_ = Nanos(1'000'000'000.0f / rate);
|
||||||
void end_redraw() {
|
}
|
||||||
redraw_period_.post(nanos_now() - redraw_begin_time_);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*!
|
/*!
|
||||||
Informs the predictor that a block-on-vsync has just ended, i.e. that the moment this
|
@returns The time this class currently believes a whole frame occupies.
|
||||||
machine calls retrace is now. The predictor uses these notifications to estimate output
|
*/
|
||||||
frame rate.
|
Time::Nanos frame_duration() {
|
||||||
*/
|
return frame_duration_;
|
||||||
void announce_vsync() {
|
}
|
||||||
const auto now = nanos_now();
|
|
||||||
|
|
||||||
if(last_vsync_) {
|
/*!
|
||||||
last_vsync_ += frame_duration_;
|
Adds a record of how much jitter was experienced in scheduling; these values will be
|
||||||
vsync_jitter_.post(last_vsync_ - now);
|
factored into the @c suggested_draw_time if supplied.
|
||||||
last_vsync_ = (last_vsync_ + now) >> 1;
|
|
||||||
} else {
|
A positive number means the timer occurred late. A negative number means it occurred early.
|
||||||
last_vsync_ = now;
|
*/
|
||||||
|
void add_timer_jitter(Time::Nanos jitter) {
|
||||||
|
timer_jitter_.post(jitter);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*!
|
||||||
|
Announces to the vsync predictor that output is now paused. This ends frame period
|
||||||
|
calculations until the next announce_vsync() restarts frame-length counting.
|
||||||
|
*/
|
||||||
|
void pause() {
|
||||||
|
last_vsync_ = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*!
|
||||||
|
@return The time at which redrawing should begin, given the predicted frame period, how
|
||||||
|
long it appears to take to draw a frame and how much jitter there is in scheduling
|
||||||
|
(if those figures are being supplied).
|
||||||
|
*/
|
||||||
|
Nanos suggested_draw_time() {
|
||||||
|
const auto mean = redraw_period_.mean() + timer_jitter_.mean() + vsync_jitter_.mean();
|
||||||
|
const auto variance = redraw_period_.variance() + timer_jitter_.variance() + vsync_jitter_.variance();
|
||||||
|
|
||||||
|
// Permit three standard deviations from the mean, to cover 99.9% of cases.
|
||||||
|
const auto period = mean + Nanos(3.0f * sqrt(float(variance)));
|
||||||
|
|
||||||
|
return last_vsync_ + frame_duration_ - period;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
class VarianceCollector {
|
||||||
|
public:
|
||||||
|
VarianceCollector(Time::Nanos default_value) {
|
||||||
|
sum_ = default_value * 128;
|
||||||
|
for(int c = 0; c < 128; ++c) {
|
||||||
|
history_[c] = default_value;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
/*!
|
void post(Time::Nanos value) {
|
||||||
Sets the frame rate for the target display.
|
sum_ -= history_[write_pointer_];
|
||||||
*/
|
sum_ += value;
|
||||||
void set_frame_rate(float rate) {
|
history_[write_pointer_] = value;
|
||||||
frame_duration_ = Nanos(1'000'000'000.0f / rate);
|
write_pointer_ = (write_pointer_ + 1) & 127;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*!
|
Time::Nanos mean() {
|
||||||
@returns The time this class currently believes a whole frame occupies.
|
return sum_ / 128;
|
||||||
*/
|
}
|
||||||
Time::Nanos frame_duration() {
|
|
||||||
return frame_duration_;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*!
|
Time::Nanos variance() {
|
||||||
Adds a record of how much jitter was experienced in scheduling; these values will be
|
// I haven't yet come up with a better solution that calculating this
|
||||||
factored into the @c suggested_draw_time if supplied.
|
// in whole every time, given the way that the mean mutates.
|
||||||
|
Time::Nanos variance = 0;
|
||||||
A positive number means the timer occurred late. A negative number means it occurred early.
|
for(int c = 0; c < 128; ++c) {
|
||||||
*/
|
const auto difference = ((history_[c] * 128) - sum_) / 128;
|
||||||
void add_timer_jitter(Time::Nanos jitter) {
|
variance += (difference * difference);
|
||||||
timer_jitter_.post(jitter);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*!
|
|
||||||
Announces to the vsync predictor that output is now paused. This ends frame period
|
|
||||||
calculations until the next announce_vsync() restarts frame-length counting.
|
|
||||||
*/
|
|
||||||
void pause() {
|
|
||||||
last_vsync_ = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*!
|
|
||||||
@return The time at which redrawing should begin, given the predicted frame period, how
|
|
||||||
long it appears to take to draw a frame and how much jitter there is in scheduling
|
|
||||||
(if those figures are being supplied).
|
|
||||||
*/
|
|
||||||
Nanos suggested_draw_time() {
|
|
||||||
const auto mean = redraw_period_.mean() + timer_jitter_.mean() + vsync_jitter_.mean();
|
|
||||||
const auto variance = redraw_period_.variance() + timer_jitter_.variance() + vsync_jitter_.variance();
|
|
||||||
|
|
||||||
// Permit three standard deviations from the mean, to cover 99.9% of cases.
|
|
||||||
const auto period = mean + Nanos(3.0f * sqrt(float(variance)));
|
|
||||||
|
|
||||||
return last_vsync_ + frame_duration_ - period;
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
class VarianceCollector {
|
|
||||||
public:
|
|
||||||
VarianceCollector(Time::Nanos default_value) {
|
|
||||||
sum_ = default_value * 128;
|
|
||||||
for(int c = 0; c < 128; ++c) {
|
|
||||||
history_[c] = default_value;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
return variance / 128;
|
||||||
|
}
|
||||||
|
|
||||||
void post(Time::Nanos value) {
|
private:
|
||||||
sum_ -= history_[write_pointer_];
|
Time::Nanos sum_;
|
||||||
sum_ += value;
|
Time::Nanos history_[128];
|
||||||
history_[write_pointer_] = value;
|
size_t write_pointer_ = 0;
|
||||||
write_pointer_ = (write_pointer_ + 1) & 127;
|
};
|
||||||
}
|
|
||||||
|
|
||||||
Time::Nanos mean() {
|
Nanos redraw_begin_time_ = 0;
|
||||||
return sum_ / 128;
|
Nanos last_vsync_ = 0;
|
||||||
}
|
Nanos frame_duration_ = 1'000'000'000 / 60;
|
||||||
|
|
||||||
Time::Nanos variance() {
|
VarianceCollector vsync_jitter_{0};
|
||||||
// I haven't yet come up with a better solution that calculating this
|
VarianceCollector redraw_period_{1'000'000'000 / 60}; // A less convincing first guess.
|
||||||
// in whole every time, given the way that the mean mutates.
|
VarianceCollector timer_jitter_{0}; // Seed at 0 in case this feature isn't used by the owner.
|
||||||
Time::Nanos variance = 0;
|
|
||||||
for(int c = 0; c < 128; ++c) {
|
|
||||||
const auto difference = ((history_[c] * 128) - sum_) / 128;
|
|
||||||
variance += (difference * difference);
|
|
||||||
}
|
|
||||||
return variance / 128;
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
Time::Nanos sum_;
|
|
||||||
Time::Nanos history_[128];
|
|
||||||
size_t write_pointer_ = 0;
|
|
||||||
};
|
|
||||||
|
|
||||||
Nanos redraw_begin_time_ = 0;
|
|
||||||
Nanos last_vsync_ = 0;
|
|
||||||
Nanos frame_duration_ = 1'000'000'000 / 60;
|
|
||||||
|
|
||||||
VarianceCollector vsync_jitter_{0};
|
|
||||||
VarianceCollector redraw_period_{1'000'000'000 / 60}; // A less convincing first guess.
|
|
||||||
VarianceCollector timer_jitter_{0}; // Seed at 0 in case this feature isn't used by the owner.
|
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@@ -5455,7 +5455,7 @@
|
|||||||
attributes = {
|
attributes = {
|
||||||
BuildIndependentTargetsInParallel = YES;
|
BuildIndependentTargetsInParallel = YES;
|
||||||
LastSwiftUpdateCheck = 0700;
|
LastSwiftUpdateCheck = 0700;
|
||||||
LastUpgradeCheck = 1430;
|
LastUpgradeCheck = 1610;
|
||||||
ORGANIZATIONNAME = "Thomas Harte";
|
ORGANIZATIONNAME = "Thomas Harte";
|
||||||
TargetAttributes = {
|
TargetAttributes = {
|
||||||
4B055A691FAE763F0060FFFF = {
|
4B055A691FAE763F0060FFFF = {
|
||||||
@@ -6908,6 +6908,7 @@
|
|||||||
isa = XCBuildConfiguration;
|
isa = XCBuildConfiguration;
|
||||||
buildSettings = {
|
buildSettings = {
|
||||||
ALWAYS_SEARCH_USER_PATHS = NO;
|
ALWAYS_SEARCH_USER_PATHS = NO;
|
||||||
|
ASSETCATALOG_COMPILER_GENERATE_SWIFT_ASSET_SYMBOL_EXTENSIONS = YES;
|
||||||
CLANG_ANALYZER_LOCALIZABILITY_NONLOCALIZED = YES;
|
CLANG_ANALYZER_LOCALIZABILITY_NONLOCALIZED = YES;
|
||||||
CLANG_CXX_LANGUAGE_STANDARD = "c++17";
|
CLANG_CXX_LANGUAGE_STANDARD = "c++17";
|
||||||
CLANG_ENABLE_MODULES = YES;
|
CLANG_ENABLE_MODULES = YES;
|
||||||
@@ -6968,6 +6969,7 @@
|
|||||||
isa = XCBuildConfiguration;
|
isa = XCBuildConfiguration;
|
||||||
buildSettings = {
|
buildSettings = {
|
||||||
ALWAYS_SEARCH_USER_PATHS = NO;
|
ALWAYS_SEARCH_USER_PATHS = NO;
|
||||||
|
ASSETCATALOG_COMPILER_GENERATE_SWIFT_ASSET_SYMBOL_EXTENSIONS = YES;
|
||||||
CLANG_ANALYZER_LOCALIZABILITY_NONLOCALIZED = YES;
|
CLANG_ANALYZER_LOCALIZABILITY_NONLOCALIZED = YES;
|
||||||
CLANG_CXX_LANGUAGE_STANDARD = "c++17";
|
CLANG_CXX_LANGUAGE_STANDARD = "c++17";
|
||||||
CLANG_ENABLE_MODULES = YES;
|
CLANG_ENABLE_MODULES = YES;
|
||||||
|
@@ -1,6 +1,6 @@
|
|||||||
<?xml version="1.0" encoding="UTF-8"?>
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
<Scheme
|
<Scheme
|
||||||
LastUpgradeVersion = "1400"
|
LastUpgradeVersion = "1610"
|
||||||
version = "1.3">
|
version = "1.3">
|
||||||
<BuildAction
|
<BuildAction
|
||||||
parallelizeBuildables = "YES"
|
parallelizeBuildables = "YES"
|
||||||
|
@@ -1,7 +1,7 @@
|
|||||||
<?xml version="1.0" encoding="UTF-8"?>
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
<Scheme
|
<Scheme
|
||||||
LastUpgradeVersion = "1400"
|
LastUpgradeVersion = "1610"
|
||||||
version = "1.3">
|
version = "1.8">
|
||||||
<BuildAction
|
<BuildAction
|
||||||
parallelizeBuildables = "YES"
|
parallelizeBuildables = "YES"
|
||||||
buildImplicitDependencies = "YES">
|
buildImplicitDependencies = "YES">
|
||||||
|
@@ -1,6 +1,6 @@
|
|||||||
<?xml version="1.0" encoding="UTF-8"?>
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
<Scheme
|
<Scheme
|
||||||
LastUpgradeVersion = "1400"
|
LastUpgradeVersion = "1610"
|
||||||
version = "1.3">
|
version = "1.3">
|
||||||
<BuildAction
|
<BuildAction
|
||||||
parallelizeBuildables = "YES"
|
parallelizeBuildables = "YES"
|
||||||
@@ -51,15 +51,6 @@
|
|||||||
savedToolIdentifier = ""
|
savedToolIdentifier = ""
|
||||||
useCustomWorkingDirectory = "NO"
|
useCustomWorkingDirectory = "NO"
|
||||||
debugDocumentVersioning = "YES">
|
debugDocumentVersioning = "YES">
|
||||||
<MacroExpansion>
|
|
||||||
<BuildableReference
|
|
||||||
BuildableIdentifier = "primary"
|
|
||||||
BlueprintIdentifier = "4BB73E9D1B587A5100552FC2"
|
|
||||||
BuildableName = "Clock Signal.app"
|
|
||||||
BlueprintName = "Clock Signal"
|
|
||||||
ReferencedContainer = "container:Clock Signal.xcodeproj">
|
|
||||||
</BuildableReference>
|
|
||||||
</MacroExpansion>
|
|
||||||
</ProfileAction>
|
</ProfileAction>
|
||||||
<AnalyzeAction
|
<AnalyzeAction
|
||||||
buildConfiguration = "Debug">
|
buildConfiguration = "Debug">
|
||||||
|
Reference in New Issue
Block a user