1
0
mirror of https://github.com/TomHarte/CLK.git synced 2025-02-23 03:29:04 +00:00

Bites the bullet and accepts that an additional texture will be useful for QAM separation.

This commit is contained in:
Thomas Harte 2019-02-09 16:54:31 -05:00
parent 75987f64ec
commit eecd4417e7
3 changed files with 323 additions and 135 deletions

View File

@ -24,7 +24,14 @@ constexpr GLenum SourceDataTextureUnit = GL_TEXTURE0;
constexpr GLenum UnprocessedLineBufferTextureUnit = GL_TEXTURE1;
/// The texture unit that contains the current display.
constexpr GLenum AccumulationTextureUnit = GL_TEXTURE2;
constexpr GLenum AccumulationTextureUnit = GL_TEXTURE3;
/// The texture unit that contains a pre-lowpass-filtered but fixed-resolution version of the chroma signal;
/// this is used when processing composite video only, and for chroma information only. Luminance is calculated
/// at the fidelity permitted by the output target, but my efforts to separate, demodulate and filter
/// chrominance during output without either massively sampling or else incurring significant high-frequency
/// noise that sampling reduces into a Moire, have proven to be unsuccessful for the time being.
constexpr GLenum QAMChromaTextureUnit = GL_TEXTURE2;
#define TextureAddress(x, y) (((y) << 11) | (x))
#define TextureAddressGetY(v) uint16_t((v) >> 11)
@ -299,9 +306,23 @@ void ScanTarget::setup_pipeline() {
write_pointers_.write_area = 0;
}
// Pick a processing width; this will be the minimum necessary not to
// lose any detail when combining the input.
processing_width_ = modals_.cycles_per_line / modals_.clocks_per_pixel_greatest_common_divisor;
// Destroy or create a QAM buffer and shader, if appropriate.
const bool needs_qam_buffer = (modals_.display_type == DisplayType::CompositeColour || modals_.display_type == DisplayType::SVideo);
if(needs_qam_buffer && !qam_chroma_texture_) {
qam_chroma_texture_.reset(new TextureTarget(LineBufferWidth, LineBufferHeight, QAMChromaTextureUnit, GL_NEAREST, false));
} else {
qam_chroma_texture_.reset();
qam_separation_shader_.reset();
}
if(needs_qam_buffer) {
qam_separation_shader_ = qam_separation_shader();
glBindVertexArray(line_vertex_array_);
glBindBuffer(GL_ARRAY_BUFFER, line_buffer_name_);
enable_vertex_attributes(ShaderType::QAMSeparation, *qam_separation_shader_);
set_uniforms(ShaderType::QAMSeparation, *qam_separation_shader_);
qam_separation_shader_->set_uniform("textureName", GLint(UnprocessedLineBufferTextureUnit - GL_TEXTURE0));
}
// Establish an output shader.
output_shader_ = conversion_shader();
@ -312,6 +333,7 @@ void ScanTarget::setup_pipeline() {
output_shader_->set_uniform("origin", modals_.visible_area.origin.x, modals_.visible_area.origin.y);
output_shader_->set_uniform("size", modals_.visible_area.size.width, modals_.visible_area.size.height);
output_shader_->set_uniform("textureName", GLint(UnprocessedLineBufferTextureUnit - GL_TEXTURE0));
output_shader_->set_uniform("qamTextureName", GLint(QAMChromaTextureUnit - GL_TEXTURE0));
// Establish an input shader.
input_shader_ = composition_shader();
@ -487,8 +509,10 @@ void ScanTarget::draw(bool synchronous, int output_width, int output_height) {
// Figure out how many new spans are ostensible ready; use two less than that.
uint16_t new_spans = (submit_pointers.line + LineBufferHeight - read_pointers.line) % LineBufferHeight;
if(new_spans) {
// Bind the accumulation framebuffer.
accumulation_texture_->bind_framebuffer();
// Bind the accumulation framebuffer, unless there's going to be QAM work.
if(!qam_separation_shader_) {
accumulation_texture_->bind_framebuffer();
}
// Enable blending and stenciling, and ensure spans increment the stencil buffer.
glEnable(GL_BLEND);
@ -525,10 +549,12 @@ void ScanTarget::draw(bool synchronous, int output_width, int output_height) {
// Rebind the program for span output.
glBindVertexArray(line_vertex_array_);
output_shader_->bind();
if(!qam_separation_shader_) {
output_shader_->bind();
}
}
// Upload and draw.
// Upload.
const auto buffer_size = spans * sizeof(Line);
if(!end_line || end_line > start_line) {
glBufferSubData(GL_ARRAY_BUFFER, 0, GLsizeiptr(buffer_size), &line_buffer_[start_line]);
@ -547,6 +573,18 @@ void ScanTarget::draw(bool synchronous, int output_width, int output_height) {
glUnmapBuffer(GL_ARRAY_BUFFER);
}
// Produce colour information, if required.
if(qam_separation_shader_) {
qam_chroma_texture_->bind_framebuffer();
qam_separation_shader_->bind();
glDrawArraysInstanced(GL_TRIANGLE_STRIP, 0, 4, GLsizei(spans));
accumulation_texture_->bind_framebuffer();
output_shader_->bind();
}
// Render to the output.
glDrawArraysInstanced(GL_TRIANGLE_STRIP, 0, 4, GLsizei(spans));
start_line = end_line;

View File

@ -124,6 +124,10 @@ class ScanTarget: public Outputs::Display::ScanTarget {
// application of any necessary conversions — e.g. composite processing.
TextureTarget unprocessed_line_texture_;
// Contains pre-lowpass-filtered chrominance information that is
// part-QAM-demoduled, if dealing with a QAM data source.
std::unique_ptr<TextureTarget> qam_chroma_texture_;
// Scans are accumulated to the accumulation texture; the full-display
// rectangle is used to ensure untouched pixels properly decay.
std::unique_ptr<TextureTarget> accumulation_texture_;
@ -165,7 +169,8 @@ class ScanTarget: public Outputs::Display::ScanTarget {
enum class ShaderType {
Composition,
Conversion
Conversion,
QAMSeparation
};
/*!
@ -178,9 +183,10 @@ class ScanTarget: public Outputs::Display::ScanTarget {
GLsync fence_ = nullptr;
std::atomic_flag is_drawing_;
int processing_width_ = 0;
std::unique_ptr<Shader> input_shader_;
std::unique_ptr<Shader> output_shader_;
std::unique_ptr<Shader> qam_separation_shader_;
/*!
Produces a shader that composes fragment of the input stream to a single buffer,
@ -193,6 +199,14 @@ class ScanTarget: public Outputs::Display::ScanTarget {
output RGB, decoding composite or S-Video as necessary.
*/
std::unique_ptr<Shader> conversion_shader() const;
/*!
Produces a shader that writes separated but not-yet filtered QAM components
from the unprocessed line texture to the QAM chroma texture, at a fixed
size of four samples per colour clock, point sampled.
*/
std::unique_ptr<Shader> qam_separation_shader() const;
std::string sampling_function() const;
};
}

View File

@ -12,6 +12,8 @@
using namespace Outputs::Display::OpenGL;
// MARK: - State setup for compiled shaders.
void Outputs::Display::OpenGL::ScanTarget::set_uniforms(ShaderType type, Shader &target) {
// Slightly over-amping rowHeight here is a cheap way to make sure that lines
// converge even allowing for the fact that they may not be spaced by exactly
@ -69,16 +71,18 @@ void ScanTarget::enable_vertex_attributes(ShaderType type, Shader &target) {
1);
break;
case ShaderType::Conversion:
default:
for(int c = 0; c < 2; ++c) {
const std::string prefix = c ? "end" : "start";
target.enable_vertex_attribute_with_pointer(
prefix + "Point",
2, GL_UNSIGNED_SHORT, GL_FALSE,
sizeof(Line),
reinterpret_cast<void *>(rt_offset_of(end_points[c].x, test_line)),
1);
if(type == ShaderType::Conversion) {
target.enable_vertex_attribute_with_pointer(
prefix + "Point",
2, GL_UNSIGNED_SHORT, GL_FALSE,
sizeof(Line),
reinterpret_cast<void *>(rt_offset_of(end_points[c].x, test_line)),
1);
}
target.enable_vertex_attribute_with_pointer(
prefix + "Clock",
@ -113,85 +117,71 @@ void ScanTarget::enable_vertex_attributes(ShaderType type, Shader &target) {
#undef rt_offset_of
}
std::unique_ptr<Shader> ScanTarget::composition_shader() const {
const std::string vertex_shader =
"#version 150\n"
// MARK: - Shader code.
"in float startDataX;"
"in float startClock;"
std::string ScanTarget::sampling_function() const {
std::string fragment_shader;
"in float endDataX;"
"in float endClock;"
"in float dataY;"
"in float lineY;"
"out vec2 textureCoordinate;"
"uniform usampler2D textureName;"
"void main(void) {"
"float lateral = float(gl_VertexID & 1);"
"float longitudinal = float((gl_VertexID & 2) >> 1);"
"textureCoordinate = vec2(mix(startDataX, endDataX, lateral), dataY + 0.5) / textureSize(textureName, 0);"
"vec2 eyePosition = vec2(mix(startClock, endClock, lateral), lineY + longitudinal) / vec2(2048.0, 2048.0);"
"gl_Position = vec4(eyePosition*2.0 - vec2(1.0), 0.0, 1.0);"
"}";
std::string fragment_shader =
"#version 150\n"
"out vec4 fragColour;"
"in vec2 textureCoordinate;"
"uniform usampler2D textureName;"
"void main(void) {";
if(modals_.display_type == DisplayType::SVideo) {
fragment_shader +=
"vec2 svideo_sample(vec2 coordinate, float angle) {";
} else {
fragment_shader +=
"float composite_sample(vec2 coordinate, float angle) {";
}
const bool is_svideo = modals_.display_type == DisplayType::SVideo;
switch(modals_.input_data_type) {
case InputDataType::Luminance1:
fragment_shader += "fragColour = textureLod(textureName, textureCoordinate, 0).rrrr;";
break;
case InputDataType::Luminance8:
fragment_shader += "fragColour = textureLod(textureName, textureCoordinate, 0).rrrr / vec4(255.0);";
// Easy, just copy across.
fragment_shader +=
is_svideo ?
"return vec2(textureLod(textureName, coordinate, 0).r, 0.0);" :
"return textureLod(textureName, coordinate, 0).r;";
break;
case InputDataType::PhaseLinkedLuminance8:
fragment_shader +=
"uint iPhase = uint((angle * 2.0 / 3.141592654) ) & 3u;";
fragment_shader +=
is_svideo ?
"return vec2(textureLod(textureName, coordinate, 0)[iPhase], 0.0);" :
"return textureLod(textureName, coordinate, 0)[iPhase];";
break;
case InputDataType::Luminance8Phase8:
case InputDataType::Red8Green8Blue8:
fragment_shader += "fragColour = textureLod(textureName, textureCoordinate, 0) / vec4(255.0);";
fragment_shader +=
"vec2 yc = textureLod(textureName, coordinate, 0).rg;"
"float phaseOffset = 3.141592654 * 2.0 * 2.0 * yc.y;"
"float rawChroma = step(yc.y, 0.75) * cos(angle + phaseOffset);";
fragment_shader +=
is_svideo ?
"return vec2(yc.x, rawChroma);" :
"return mix(yc.x, rawChroma, compositeAmplitude);";
break;
case InputDataType::Red1Green1Blue1:
fragment_shader += "fragColour = vec4(textureLod(textureName, textureCoordinate, 0).rrr & uvec3(4u, 2u, 1u), 1.0);";
break;
case InputDataType::Red2Green2Blue2:
fragment_shader +=
"uint textureValue = textureLod(textureName, textureCoordinate, 0).r;"
"fragColour = vec4(float((textureValue >> 4) & 3u), float((textureValue >> 2) & 3u), float(textureValue & 3u), 3.0) / 3.0;";
break;
case InputDataType::Red4Green4Blue4:
case InputDataType::Red8Green8Blue8:
fragment_shader +=
"uvec2 textureValue = textureLod(textureName, textureCoordinate, 0).rg;"
"fragColour = vec4(float(textureValue.r) / 15.0, float(textureValue.g & 240u) / 240.0, float(textureValue.g & 15u) / 15.0, 1.0);";
"vec3 colour = rgbToLumaChroma * textureLod(textureName, coordinate, 0).rgb;"
"vec2 quadrature = vec2(cos(angle), sin(angle));";
fragment_shader +=
is_svideo ?
"return vec2(colour.r, dot(quadrature, colour.gb));" :
"return mix(colour.r, dot(quadrature, colour.gb), compositeAmplitude);";
break;
}
return std::unique_ptr<Shader>(new Shader(
vertex_shader,
fragment_shader + "}",
{
"startDataX",
"startClock",
"endDataX",
"endClock",
"dataY",
"lineY",
}
));
fragment_shader += "}";
return fragment_shader;
}
std::unique_ptr<Shader> ScanTarget::conversion_shader() const {
@ -224,6 +214,7 @@ std::unique_ptr<Shader> ScanTarget::conversion_shader() const {
"in float lineCompositeAmplitude;"
"uniform sampler2D textureName;"
"uniform sampler2D qamTextureName;"
"uniform vec2 origin;"
"uniform vec2 size;";
@ -231,6 +222,7 @@ std::unique_ptr<Shader> ScanTarget::conversion_shader() const {
"#version 150\n"
"uniform sampler2D textureName;"
"uniform sampler2D qamTextureName;"
"out vec4 fragColour;";
@ -313,64 +305,7 @@ std::unique_ptr<Shader> ScanTarget::conversion_shader() const {
"uniform mat3 lumaChromaToRGB;"
"uniform mat3 rgbToLumaChroma;";
if(modals_.display_type == DisplayType::SVideo) {
fragment_shader +=
"vec2 svideo_sample(vec2 coordinate, float angle) {";
} else {
fragment_shader +=
"float composite_sample(vec2 coordinate, float angle) {";
}
const bool is_svideo = modals_.display_type == DisplayType::SVideo;
switch(modals_.input_data_type) {
case InputDataType::Luminance1:
case InputDataType::Luminance8:
// Easy, just copy across.
fragment_shader +=
is_svideo ?
"return vec2(textureLod(textureName, coordinate, 0).r, 0.0);" :
"return textureLod(textureName, coordinate, 0).r;";
break;
case InputDataType::PhaseLinkedLuminance8:
fragment_shader +=
"uint iPhase = uint((angle * 2.0 / 3.141592654) ) & 3u;";
fragment_shader +=
is_svideo ?
"return vec2(textureLod(textureName, coordinate, 0)[iPhase], 0.0);" :
"return textureLod(textureName, coordinate, 0)[iPhase];";
break;
case InputDataType::Luminance8Phase8:
fragment_shader +=
"vec2 yc = textureLod(textureName, coordinate, 0).rg;"
"float phaseOffset = 3.141592654 * 2.0 * 2.0 * yc.y;"
"float rawChroma = step(yc.y, 0.75) * cos(angle + phaseOffset);";
fragment_shader +=
is_svideo ?
"return vec2(yc.x, rawChroma);" :
"return mix(yc.x, rawChroma, compositeAmplitude);";
break;
case InputDataType::Red1Green1Blue1:
case InputDataType::Red2Green2Blue2:
case InputDataType::Red4Green4Blue4:
case InputDataType::Red8Green8Blue8:
fragment_shader +=
"vec3 colour = rgbToLumaChroma * textureLod(textureName, coordinate, 0).rgb;"
"vec2 quadrature = vec2(cos(angle), sin(angle));";
fragment_shader +=
is_svideo ?
"return vec2(colour.r, dot(quadrature, colour.gb));" :
"return mix(colour.r, dot(quadrature, colour.gb), compositeAmplitude);";
break;
}
fragment_shader += "}";
fragment_shader += sampling_function();
}
fragment_shader +=
@ -517,3 +452,204 @@ std::unique_ptr<Shader> ScanTarget::conversion_shader() const {
return std::unique_ptr<Shader>(shader);
}
std::unique_ptr<Shader> ScanTarget::composition_shader() const {
const std::string vertex_shader =
"#version 150\n"
"in float startDataX;"
"in float startClock;"
"in float endDataX;"
"in float endClock;"
"in float dataY;"
"in float lineY;"
"out vec2 textureCoordinate;"
"uniform usampler2D textureName;"
"void main(void) {"
"float lateral = float(gl_VertexID & 1);"
"float longitudinal = float((gl_VertexID & 2) >> 1);"
"textureCoordinate = vec2(mix(startDataX, endDataX, lateral), dataY + 0.5) / textureSize(textureName, 0);"
"vec2 eyePosition = vec2(mix(startClock, endClock, lateral), lineY + longitudinal) / vec2(2048.0, 2048.0);"
"gl_Position = vec4(eyePosition*2.0 - vec2(1.0), 0.0, 1.0);"
"}";
std::string fragment_shader =
"#version 150\n"
"out vec4 fragColour;"
"in vec2 textureCoordinate;"
"uniform usampler2D textureName;"
"void main(void) {";
switch(modals_.input_data_type) {
case InputDataType::Luminance1:
fragment_shader += "fragColour = textureLod(textureName, textureCoordinate, 0).rrrr;";
break;
case InputDataType::Luminance8:
fragment_shader += "fragColour = textureLod(textureName, textureCoordinate, 0).rrrr / vec4(255.0);";
break;
case InputDataType::PhaseLinkedLuminance8:
case InputDataType::Luminance8Phase8:
case InputDataType::Red8Green8Blue8:
fragment_shader += "fragColour = textureLod(textureName, textureCoordinate, 0) / vec4(255.0);";
break;
case InputDataType::Red1Green1Blue1:
fragment_shader += "fragColour = vec4(textureLod(textureName, textureCoordinate, 0).rrr & uvec3(4u, 2u, 1u), 1.0);";
break;
case InputDataType::Red2Green2Blue2:
fragment_shader +=
"uint textureValue = textureLod(textureName, textureCoordinate, 0).r;"
"fragColour = vec4(float((textureValue >> 4) & 3u), float((textureValue >> 2) & 3u), float(textureValue & 3u), 3.0) / 3.0;";
break;
case InputDataType::Red4Green4Blue4:
fragment_shader +=
"uvec2 textureValue = textureLod(textureName, textureCoordinate, 0).rg;"
"fragColour = vec4(float(textureValue.r) / 15.0, float(textureValue.g & 240u) / 240.0, float(textureValue.g & 15u) / 15.0, 1.0);";
break;
}
return std::unique_ptr<Shader>(new Shader(
vertex_shader,
fragment_shader + "}",
{
"startDataX",
"startClock",
"endDataX",
"endClock",
"dataY",
"lineY",
}
));
}
std::unique_ptr<Shader> ScanTarget::qam_separation_shader() const {
const bool is_svideo = modals_.display_type == DisplayType::SVideo;
// Sets up texture coordinates to run between startClock and endClock, mapping to
// coordinates that correlate with four times the absolute value of the composite angle.
std::string vertex_shader =
"#version 150\n"
"in float startClock;"
"in float startCompositeAngle;"
"in float endClock;"
"in float endCompositeAngle;"
"in float lineY;"
"in float lineCompositeAmplitude;"
"uniform sampler2D textureName;"
"uniform float textureCoordinateOffsets[4];"
"out float compositeAngle;"
"out float compositeAmplitude;"
"out float oneOverCompositeAmplitude;";
std::string fragment_shader =
"#version 150\n"
"uniform sampler2D textureName;"
"in float compositeAngle;"
"in float compositeAmplitude;"
"in float oneOverCompositeAmplitude;"
"out vec4 fragColour;"
"uniform vec4 compositeAngleOffsets;";
if(is_svideo) {
vertex_shader += "out vec2 textureCoordinate;";
fragment_shader += "out vec2 textureCoordinate;";
} else {
vertex_shader += "out vec2 textureCoordinates[4];";
fragment_shader += "out vec2 textureCoordinates[4];";
}
vertex_shader +=
"void main(void) {"
"float lateral = float(gl_VertexID & 1);"
"float longitudinal = float((gl_VertexID & 2) >> 1);"
"vec2 eyePosition = vec2(abs(mix(startCompositeAngle, endCompositeAngle, lateral) * 4.0), lineY + longitudinal) / vec2(2048.0, 2048.0);"
"gl_Position = vec4(eyePosition*2.0 - vec2(1.0), 0.0, 1.0);"
"compositeAngle = (mix(startCompositeAngle, endCompositeAngle, lateral) / 32.0) * 3.141592654;"
"compositeAmplitude = lineCompositeAmplitude / 255.0;"
"oneOverCompositeAmplitude = mix(0.0, 255.0 / lineCompositeAmplitude, step(0.01, lineCompositeAmplitude));"
"float centreClock = mix(startClock, endClock, lateral);";
if(is_svideo) {
vertex_shader +=
"textureCoordinate = vec2(centreClock + textureCoordinateOffsets[0], lineY + 0.5) / textureSize(textureName, 0)";
} else {
vertex_shader +=
"textureCoordinates[0] = vec2(centreClock + textureCoordinateOffsets[0], lineY + 0.5) / textureSize(textureName, 0);"
"textureCoordinates[1] = vec2(centreClock + textureCoordinateOffsets[1], lineY + 0.5) / textureSize(textureName, 0);"
"textureCoordinates[2] = vec2(centreClock + textureCoordinateOffsets[2], lineY + 0.5) / textureSize(textureName, 0);"
"textureCoordinates[3] = vec2(centreClock + textureCoordinateOffsets[3], lineY + 0.5) / textureSize(textureName, 0);";
}
vertex_shader += "}";
fragment_shader +=
sampling_function() +
"void main(void) {";
// TODO: properly map range of composite value.
if(modals_.display_type == DisplayType::SVideo) {
fragment_shader +=
"fragColour = vec4(svideo_sample(textureCoordinate, compositeAngle).rgg * vec3(1.0, cos(compositeAngle), sin(compositeAngle)), 1.0);";
} else {
fragment_shader +=
"vec4 angles = compositeAngle + compositeAngleOffsets;"
// Sample four times over, at proper angle offsets.
"vec4 samples = vec4("
"composite_sample(textureCoordinates[0], angles.x),"
"composite_sample(textureCoordinates[1], angles.y),"
"composite_sample(textureCoordinates[2], angles.z),"
"composite_sample(textureCoordinates[3], angles.w)"
");"
// Take the average to calculate luminance, then subtract that from all four samples to
// give chrominance.
"float luminance = dot(samples, vec4(0.25));"
"float chrominance = (samples.y - luminance) * oneOverCompositeAmplitude;"
"vec2 channels = vec2(cos(compositeAngle), sin(compositeAngle)) * chrominance;"
// Apply a colour space conversion to get RGB.
"fragColour = vec4(luminance, channels, 1.0);";
};
fragment_shader += "}";
return std::unique_ptr<Shader>(new Shader(
vertex_shader,
fragment_shader,
{
"startClock",
"startCompositeAngle",
"endClock",
"endCompositeAngle",
"lineY",
"lineCompositeAmplitude"
}
));
}