1
0
mirror of https://github.com/TomHarte/CLK.git synced 2024-11-26 23:52:26 +00:00
CLK/OSBindings/Qt/audiobuffer.h

95 lines
2.7 KiB
C
Raw Normal View History

2024-01-22 02:49:59 +00:00
#pragma once
#include <cstdint>
#include <mutex>
#include <vector>
#include <QIODevice>
/*!
2021-05-06 16:57:32 +00:00
* \brief An intermediate recepticle for audio data.
*
* Provides a QIODevice that will attempt to buffer the minimum amount
* of data before handing it off to a polling QAudioOutput.
*
2021-05-06 16:57:32 +00:00
* Adding an extra buffer increases worst-case latency but resolves two
* issues uncovered with use of a QAudioOutput by push:
*
* 1. knowing how much data is currently buffered, to avoid being at a
* permanent disadvantage after startup; and
* 2. that such QAudioOutputs empirically seem to introduce a minimum
* 16384-byte latency.
*/
struct AudioBuffer: public QIODevice {
AudioBuffer() {
open(QIODevice::ReadOnly | QIODevice::Unbuffered);
}
void setDepth(size_t depth) {
std::lock_guard lock(mutex);
buffer.resize(depth);
}
// AudioBuffer-specific behaviour: always provide the latest data,
// even if that means skipping some.
qint64 readData(char *data, const qint64 maxlen) override {
if(!maxlen) {
return 0;
}
std::lock_guard lock(mutex);
if(readPointer == writePointer || buffer.empty()) return 0;
const size_t dataAvailable = std::min(writePointer - readPointer, size_t(maxlen));
size_t bytesToCopy = dataAvailable;
while(bytesToCopy) {
const size_t nextLength = std::min(buffer.size() - (readPointer % buffer.size()), bytesToCopy);
memcpy(data, &buffer[readPointer % buffer.size()], nextLength);
bytesToCopy -= nextLength;
data += nextLength;
readPointer += nextLength;
}
2020-06-20 03:12:18 +00:00
return qint64(dataAvailable);
}
qint64 bytesAvailable() const override {
std::lock_guard lock(mutex);
2020-06-20 03:12:18 +00:00
return qint64(writePointer - readPointer);
}
// Required to make QIODevice concrete; not used.
qint64 writeData(const char *, qint64) override {
return 0;
}
// Posts a new set of source data. This buffer permits only the amount of data
// specified by @c setDepth to be enqueued into the future. Additional writes
// after the buffer is full will overwrite the newest data.
void write(const std::vector<int16_t> &source) {
std::lock_guard lock(mutex);
if(buffer.empty()) return;
const size_t sourceSize = source.size() * sizeof(int16_t);
size_t bytesToCopy = sourceSize;
auto data = reinterpret_cast<const uint8_t *>(source.data());
while(bytesToCopy) {
size_t nextLength = std::min(buffer.size() - (writePointer % buffer.size()), bytesToCopy);
memcpy(&buffer[writePointer % buffer.size()], data, nextLength);
bytesToCopy -= nextLength;
data += nextLength;
writePointer += nextLength;
}
readPointer = std::max(readPointer, writePointer - buffer.size());
}
private:
mutable std::mutex mutex;
std::vector<uint8_t> buffer;
mutable size_t readPointer = 0;
size_t writePointer = 0;
};