1
0
mirror of https://github.com/TomHarte/CLK.git synced 2024-12-27 01:31:42 +00:00

Attempts an initial flattening of the pipeline, seemingly losing all output.

This commit is contained in:
Thomas Harte 2019-01-01 21:02:21 -05:00
parent 601961deeb
commit fd0ffc7085
3 changed files with 211 additions and 263 deletions

View File

@ -13,22 +13,14 @@ using namespace Outputs::Display::OpenGL;
namespace { namespace {
/// The texture unit from which to source 1bpp input data. /// The texture unit from which to source input data.
constexpr GLenum SourceData1BppTextureUnit = GL_TEXTURE0; constexpr GLenum SourceDataTextureUnit = GL_TEXTURE0;
/// The texture unit from which to source 2bpp input data.
//constexpr GLenum SourceData2BppTextureUnit = GL_TEXTURE1;
/// The texture unit from which to source 4bpp input data.
//constexpr GLenum SourceData4BppTextureUnit = GL_TEXTURE2;
/// The texture unit which contains raw line-by-line composite, S-Video or RGB data. /// The texture unit which contains raw line-by-line composite, S-Video or RGB data.
constexpr GLenum UnprocessedLineBufferTextureUnit = GL_TEXTURE3; constexpr GLenum UnprocessedLineBufferTextureUnit = GL_TEXTURE1;
/// The texture unit which contains line-by-line records of luminance and two channels of chrominance, straight after multiplication by the quadrature vector, not yet filtered.
constexpr GLenum SVideoLineBufferTextureUnit = GL_TEXTURE4;
/// The texture unit which contains line-by-line records of RGB.
constexpr GLenum RGBLineBufferTextureUnit = GL_TEXTURE5;
/// The texture unit that contains the current display. /// The texture unit that contains the current display.
constexpr GLenum AccumulationTextureUnit = GL_TEXTURE6; constexpr GLenum AccumulationTextureUnit = GL_TEXTURE2;
#define TextureAddress(x, y) (((y) << 11) | (x)) #define TextureAddress(x, y) (((y) << 11) | (x))
#define TextureAddressGetY(v) uint16_t((v) >> 11) #define TextureAddressGetY(v) uint16_t((v) >> 11)
@ -292,14 +284,9 @@ void ScanTarget::setup_pipeline() {
write_pointers_.write_area = 0; write_pointers_.write_area = 0;
} }
// Pick a processing width; this will be at least four times the // Pick a processing width; this will be the minimum necessary not to
// colour subcarrier, and an integer multiple of the pixel clock and // lose any detail when combining the input.
// at most 2048. processing_width_ = modals_.cycles_per_line / modals_.clocks_per_pixel_greatest_common_divisor;
const int colour_cycle_width = (modals_.colour_cycle_numerator * 4 + modals_.colour_cycle_denominator - 1) / modals_.colour_cycle_denominator;
const int dot_clock = modals_.cycles_per_line / modals_.clocks_per_pixel_greatest_common_divisor;
const int overflow = colour_cycle_width % dot_clock;
processing_width_ = colour_cycle_width + (overflow ? dot_clock - overflow : 0);
processing_width_ = std::min(processing_width_, 2048);
// Establish an output shader. TODO: add gamma correction here. // Establish an output shader. TODO: add gamma correction here.
output_shader_.reset(new Shader( output_shader_.reset(new Shader(
@ -323,63 +310,15 @@ void ScanTarget::setup_pipeline() {
set_uniforms(ShaderType::Line, *output_shader_); set_uniforms(ShaderType::Line, *output_shader_);
output_shader_->set_uniform("origin", modals_.visible_area.origin.x, modals_.visible_area.origin.y); output_shader_->set_uniform("origin", modals_.visible_area.origin.x, modals_.visible_area.origin.y);
output_shader_->set_uniform("size", modals_.visible_area.size.width, modals_.visible_area.size.height); output_shader_->set_uniform("size", modals_.visible_area.size.width, modals_.visible_area.size.height);
output_shader_->set_uniform("textureName", GLint(UnprocessedLineBufferTextureUnit - GL_TEXTURE0));
// Establish such intermediary shaders as are required.
pipeline_stages_.clear();
if(modals_.display_type == DisplayType::CompositeColour) {
pipeline_stages_.emplace_back(
composite_to_svideo_shader(modals_.colour_cycle_numerator, modals_.colour_cycle_denominator, processing_width_).release(),
SVideoLineBufferTextureUnit,
GL_NEAREST);
}
if(modals_.display_type == DisplayType::SVideo || modals_.display_type == DisplayType::CompositeColour) {
pipeline_stages_.emplace_back(
svideo_to_rgb_shader(modals_.colour_cycle_numerator, modals_.colour_cycle_denominator, processing_width_).release(),
(modals_.display_type == DisplayType::CompositeColour) ? RGBLineBufferTextureUnit : SVideoLineBufferTextureUnit,
GL_NEAREST);
}
glBindVertexArray(scan_vertex_array_);
glBindBuffer(GL_ARRAY_BUFFER, scan_buffer_name_);
// Establish an input shader. // Establish an input shader.
input_shader_ = input_shader(modals_.input_data_type, modals_.display_type); input_shader_ = composition_shader();
glBindVertexArray(scan_vertex_array_);
glBindBuffer(GL_ARRAY_BUFFER, scan_buffer_name_);
enable_vertex_attributes(ShaderType::InputScan, *input_shader_); enable_vertex_attributes(ShaderType::InputScan, *input_shader_);
set_uniforms(ShaderType::InputScan, *input_shader_); set_uniforms(ShaderType::InputScan, *input_shader_);
input_shader_->set_uniform("textureName", GLint(SourceData1BppTextureUnit - GL_TEXTURE0)); input_shader_->set_uniform("textureName", GLint(SourceDataTextureUnit - GL_TEXTURE0));
// Cascade the texture units in use as per the pipeline stages.
std::vector<Shader *> input_shaders = {input_shader_.get()};
GLint texture_unit = GLint(UnprocessedLineBufferTextureUnit - GL_TEXTURE0);
for(const auto &stage: pipeline_stages_) {
input_shaders.push_back(stage.shader.get());
stage.shader->set_uniform("textureName", texture_unit);
set_uniforms(ShaderType::ProcessedScan, *stage.shader);
enable_vertex_attributes(ShaderType::ProcessedScan, *stage.shader);
++texture_unit;
}
output_shader_->set_uniform("textureName", texture_unit);
// Ensure that all shaders involved in the input pipeline have the proper colour space knowledged.
for(auto shader: input_shaders) {
switch(modals_.composite_colour_space) {
case ColourSpace::YIQ: {
const GLfloat rgbToYIQ[] = {0.299f, 0.596f, 0.211f, 0.587f, -0.274f, -0.523f, 0.114f, -0.322f, 0.312f};
const GLfloat yiqToRGB[] = {1.0f, 1.0f, 1.0f, 0.956f, -0.272f, -1.106f, 0.621f, -0.647f, 1.703f};
shader->set_uniform_matrix("lumaChromaToRGB", 3, false, yiqToRGB);
shader->set_uniform_matrix("rgbToLumaChroma", 3, false, rgbToYIQ);
} break;
case ColourSpace::YUV: {
const GLfloat rgbToYUV[] = {0.299f, -0.14713f, 0.615f, 0.587f, -0.28886f, -0.51499f, 0.114f, 0.436f, -0.10001f};
const GLfloat yuvToRGB[] = {1.0f, 1.0f, 1.0f, 0.0f, -0.39465f, 2.03211f, 1.13983f, -0.58060f, 0.0f};
shader->set_uniform_matrix("lumaChromaToRGB", 3, false, yuvToRGB);
shader->set_uniform_matrix("rgbToLumaChroma", 3, false, rgbToYUV);
} break;
}
}
} }
void ScanTarget::draw(bool synchronous, int output_width, int output_height) { void ScanTarget::draw(bool synchronous, int output_width, int output_height) {
@ -431,7 +370,7 @@ void ScanTarget::draw(bool synchronous, int output_width, int output_height) {
// Submit texture. // Submit texture.
if(submit_pointers.write_area != read_pointers.write_area) { if(submit_pointers.write_area != read_pointers.write_area) {
glActiveTexture(SourceData1BppTextureUnit); glActiveTexture(SourceDataTextureUnit);
glBindTexture(GL_TEXTURE_2D, write_area_texture_name_); glBindTexture(GL_TEXTURE_2D, write_area_texture_name_);
// Create storage for the texture if it doesn't yet exist; this was deferred until here // Create storage for the texture if it doesn't yet exist; this was deferred until here
@ -487,7 +426,6 @@ void ScanTarget::draw(bool synchronous, int output_width, int output_height) {
// Push new input to the unprocessed line buffer. // Push new input to the unprocessed line buffer.
if(new_scans) { if(new_scans) {
glDisable(GL_BLEND);
unprocessed_line_texture_.bind_framebuffer(); unprocessed_line_texture_.bind_framebuffer();
// Clear newly-touched lines; that is everything from (read+1) to submit. // Clear newly-touched lines; that is everything from (read+1) to submit.
@ -499,26 +437,11 @@ void ScanTarget::draw(bool synchronous, int output_width, int output_height) {
if(first_line_to_clear < final_line_to_clear) { if(first_line_to_clear < final_line_to_clear) {
glScissor(0, first_line_to_clear, unprocessed_line_texture_.get_width(), final_line_to_clear - first_line_to_clear); glScissor(0, first_line_to_clear, unprocessed_line_texture_.get_width(), final_line_to_clear - first_line_to_clear);
glClear(GL_COLOR_BUFFER_BIT); glClear(GL_COLOR_BUFFER_BIT);
if(pipeline_stages_.size()) {
pipeline_stages_.back().target.bind_framebuffer();
glClear(GL_COLOR_BUFFER_BIT);
unprocessed_line_texture_.bind_framebuffer();
}
} else { } else {
glScissor(0, 0, unprocessed_line_texture_.get_width(), final_line_to_clear); glScissor(0, 0, unprocessed_line_texture_.get_width(), final_line_to_clear);
glClear(GL_COLOR_BUFFER_BIT); glClear(GL_COLOR_BUFFER_BIT);
glScissor(0, first_line_to_clear, unprocessed_line_texture_.get_width(), unprocessed_line_texture_.get_height() - first_line_to_clear); glScissor(0, first_line_to_clear, unprocessed_line_texture_.get_width(), unprocessed_line_texture_.get_height() - first_line_to_clear);
glClear(GL_COLOR_BUFFER_BIT); glClear(GL_COLOR_BUFFER_BIT);
if(pipeline_stages_.size()) {
pipeline_stages_.back().target.bind_framebuffer();
glScissor(0, 0, unprocessed_line_texture_.get_width(), final_line_to_clear);
glClear(GL_COLOR_BUFFER_BIT);
glScissor(0, first_line_to_clear, unprocessed_line_texture_.get_width(), unprocessed_line_texture_.get_height() - first_line_to_clear);
glClear(GL_COLOR_BUFFER_BIT);
unprocessed_line_texture_.bind_framebuffer();
}
} }
glDisable(GL_SCISSOR_TEST); glDisable(GL_SCISSOR_TEST);
@ -528,13 +451,6 @@ void ScanTarget::draw(bool synchronous, int output_width, int output_height) {
glBindVertexArray(scan_vertex_array_); glBindVertexArray(scan_vertex_array_);
input_shader_->bind(); input_shader_->bind();
glDrawArraysInstanced(GL_TRIANGLE_STRIP, 0, 4, GLsizei(new_scans)); glDrawArraysInstanced(GL_TRIANGLE_STRIP, 0, 4, GLsizei(new_scans));
// If there are any further pipeline stages, apply them.
for(auto &stage: pipeline_stages_) {
stage.target.bind_framebuffer();
stage.shader->bind();
glDrawArraysInstanced(GL_TRIANGLE_STRIP, 0, 4, GLsizei(new_scans));
}
} }
// Ensure the accumulation buffer is properly sized. // Ensure the accumulation buffer is properly sized.
@ -545,7 +461,7 @@ void ScanTarget::draw(bool synchronous, int output_width, int output_height) {
GLsizei(proportional_width), GLsizei(proportional_width),
GLsizei(output_height), GLsizei(output_height),
AccumulationTextureUnit, AccumulationTextureUnit,
GL_LINEAR, GL_NEAREST,
true)); true));
if(accumulation_texture_) { if(accumulation_texture_) {
new_framebuffer->bind_framebuffer(); new_framebuffer->bind_framebuffer();

View File

@ -105,8 +105,7 @@ class ScanTarget: public Outputs::Display::ScanTarget {
// Contains the first composition of scans into lines; // Contains the first composition of scans into lines;
// they're accumulated prior to output to allow for continuous // they're accumulated prior to output to allow for continuous
// application of any necessary conversions — e.g. composite processing — // application of any necessary conversions — e.g. composite processing.
// which happen progressively from here to the RGB texture.
TextureTarget unprocessed_line_texture_; TextureTarget unprocessed_line_texture_;
// Scans are accumulated to the accumulation texture; the full-display // Scans are accumulated to the accumulation texture; the full-display
@ -180,22 +179,8 @@ class ScanTarget: public Outputs::Display::ScanTarget {
std::unique_ptr<Shader> input_shader_; std::unique_ptr<Shader> input_shader_;
std::unique_ptr<Shader> output_shader_; std::unique_ptr<Shader> output_shader_;
static std::unique_ptr<Shader> input_shader(InputDataType input_data_type, DisplayType display_type); static std::unique_ptr<Shader> composition_shader();
static std::unique_ptr<Shader> composite_to_svideo_shader(int colour_cycle_numerator, int colour_cycle_denominator, int processing_width); static std::unique_ptr<Shader> conversion_shader(InputDataType input_data_type, DisplayType display_type, int colour_cycle_numerator, int colour_cycle_denominator, int processing_width);
static std::unique_ptr<Shader> svideo_to_rgb_shader(int colour_cycle_numerator, int colour_cycle_denominator, int processing_width);
static SignalProcessing::FIRFilter colour_filter(int colour_cycle_numerator, int colour_cycle_denominator, int processing_width, float low_cutoff, float high_cutoff);
struct PipelineStage {
PipelineStage(Shader *shader, GLenum texture_unit, GLint magnification_filter) :
shader(shader),
target(LineBufferWidth, LineBufferHeight, texture_unit, magnification_filter, false) {}
std::unique_ptr<Shader> shader;
TextureTarget target;
};
// A list is used here to avoid requiring a copy constructor on PipelineStage.
std::list<PipelineStage> pipeline_stages_;
}; };
} }

View File

@ -90,12 +90,14 @@ std::string ScanTarget::glsl_default_vertex_shader(ShaderType type) {
if(type == ShaderType::InputScan) { if(type == ShaderType::InputScan) {
result += result +=
"out vec2 textureCoordinate;" "out vec2 textureCoordinate;"
"uniform usampler2D textureName;"; "uniform sampler2D textureName;";
} else { } else {
result += result +=
"out vec2 textureCoordinates[15];" "out vec2 textureCoordinates[15];"
"out vec2 chromaCoordinates[2];"
"uniform sampler2D textureName;" "uniform sampler2D textureName;"
"uniform float chromaOffset;"
"uniform float edgeExpansion;"; "uniform float edgeExpansion;";
} }
@ -120,9 +122,9 @@ std::string ScanTarget::glsl_default_vertex_shader(ShaderType type) {
"vec2 eyePosition = (sourcePosition + vec2(0.0, longitudinal - 0.5)) / vec2(scale.x, 2048.0);" "vec2 eyePosition = (sourcePosition + vec2(0.0, longitudinal - 0.5)) / vec2(scale.x, 2048.0);"
"sourcePosition /= vec2(scale.x, 2048.0);" "sourcePosition /= vec2(scale.x, 2048.0);"
"vec2 expansion = vec2(2.0*lateral*edgeExpansion - edgeExpansion, 0.0) / textureSize(textureName, 0);" // "vec2 expansion = vec2(edgeExpansion, 0.0) / textureSize(textureName, 0);"
"eyePosition = eyePosition + expansion;" // "eyePosition = eyePosition + expansion;"
"sourcePosition = sourcePosition + expansion;" // "sourcePosition = sourcePosition + expansion;"
"textureCoordinates[0] = sourcePosition + vec2(-7.0, 0.0) / textureSize(textureName, 0);" "textureCoordinates[0] = sourcePosition + vec2(-7.0, 0.0) / textureSize(textureName, 0);"
"textureCoordinates[1] = sourcePosition + vec2(-6.0, 0.0) / textureSize(textureName, 0);" "textureCoordinates[1] = sourcePosition + vec2(-6.0, 0.0) / textureSize(textureName, 0);"
@ -140,6 +142,9 @@ std::string ScanTarget::glsl_default_vertex_shader(ShaderType type) {
"textureCoordinates[13] = sourcePosition + vec2(6.0, 0.0) / textureSize(textureName, 0);" "textureCoordinates[13] = sourcePosition + vec2(6.0, 0.0) / textureSize(textureName, 0);"
"textureCoordinates[14] = sourcePosition + vec2(7.0, 0.0) / textureSize(textureName, 0);" "textureCoordinates[14] = sourcePosition + vec2(7.0, 0.0) / textureSize(textureName, 0);"
"chromaCoordinates[0] = sourcePosition + vec2(chromaOffset, 0.0);"
"chromaCoordinates[1] = sourcePosition - vec2(chromaOffset, 0.0);"
"eyePosition = eyePosition;"; "eyePosition = eyePosition;";
} }
@ -235,8 +240,8 @@ void ScanTarget::enable_vertex_attributes(ShaderType type, Shader &target) {
} }
} }
std::unique_ptr<Shader> ScanTarget::input_shader(InputDataType input_data_type, DisplayType display_type) { std::unique_ptr<Shader> ScanTarget::composition_shader() {
std::string fragment_shader = /* std::string fragment_shader =
"#version 150\n" "#version 150\n"
"out vec3 fragColour;" "out vec3 fragColour;"
@ -313,161 +318,203 @@ std::unique_ptr<Shader> ScanTarget::input_shader(InputDataType input_data_type,
computed_display_type = DisplayType::RGB; computed_display_type = DisplayType::RGB;
fragment_shader += "fragColour = texture(textureName, textureCoordinate).rgb / vec3(255.0);"; fragment_shader += "fragColour = texture(textureName, textureCoordinate).rgb / vec3(255.0);";
break; break;
} }*/
// If the input type is RGB but the output type isn't then // If the input type is RGB but the output type isn't then
// there'll definitely be an RGB to SVideo step. // there'll definitely be an RGB to SVideo step.
if(computed_display_type == DisplayType::RGB && display_type != DisplayType::RGB) { // if(computed_display_type == DisplayType::RGB && display_type != DisplayType::RGB) {
fragment_shader += // fragment_shader +=
"vec3 composite_colour = rgbToLumaChroma * fragColour;" // "vec3 composite_colour = rgbToLumaChroma * fragColour;"
"vec2 quadrature = vec2(cos(compositeAngle), sin(compositeAngle));" // "vec2 quadrature = vec2(cos(compositeAngle), sin(compositeAngle));"
"fragColour = vec3(composite_colour.r, 0.5 + dot(quadrature, composite_colour.gb)*0.5, 0.0);"; // "fragColour = vec3(composite_colour.r, 0.5 + dot(quadrature, composite_colour.gb)*0.5, 0.0);";
} // }
// If the output type is SVideo, throw in an attempt to separate the two chrominance // If the output type is SVideo, throw in an attempt to separate the two chrominance
// channels here. // channels here.
if(display_type == DisplayType::SVideo) { // if(display_type == DisplayType::SVideo) {
if(computed_display_type != DisplayType::RGB) { // if(computed_display_type != DisplayType::RGB) {
fragment_shader += // fragment_shader +=
"vec2 quadrature = vec2(cos(compositeAngle), sin(compositeAngle));"; // "vec2 quadrature = vec2(cos(compositeAngle), sin(compositeAngle));";
} // }
fragment_shader += // fragment_shader +=
"vec2 chroma = (((fragColour.y - 0.5)*2.0) * quadrature)*0.5 + vec2(0.5);" // "vec2 chroma = (((fragColour.y - 0.5)*2.0) * quadrature)*0.5 + vec2(0.5);"
"fragColour = vec3(fragColour.x, chroma);"; // "fragColour = vec3(fragColour.x, chroma);";
} // }
// Add an SVideo to composite step if necessary. // Add an SVideo to composite step if necessary.
if( // if(
(display_type == DisplayType::CompositeMonochrome || display_type == DisplayType::CompositeColour) && // (display_type == DisplayType::CompositeMonochrome || display_type == DisplayType::CompositeColour) &&
computed_display_type != DisplayType::CompositeMonochrome // computed_display_type != DisplayType::CompositeMonochrome
) { // ) {
fragment_shader += "fragColour = vec3(mix(fragColour.r, 2.0*(fragColour.g - 0.5), 1.0 / oneOverCompositeAmplitude));"; // fragment_shader += "fragColour = vec3(mix(fragColour.r, 2.0*(fragColour.g - 0.5), 1.0 / oneOverCompositeAmplitude));";
} // }
const std::string fragment_shader =
"#version 150\n"
"in vec2 textureCoordinate;"
"out vec4 fragColour;"
"uniform sampler2D textureName;"
"void main(void) {"
"fragColour = vec4(1.0) - texture(textureName, textureCoordinate);"
"}";
return std::unique_ptr<Shader>(new Shader( return std::unique_ptr<Shader>(new Shader(
glsl_globals(ShaderType::InputScan) + glsl_default_vertex_shader(ShaderType::InputScan), glsl_globals(ShaderType::InputScan) + glsl_default_vertex_shader(ShaderType::InputScan),
fragment_shader + "}", fragment_shader,
attribute_bindings(ShaderType::InputScan) attribute_bindings(ShaderType::InputScan)
)); ));
} }
SignalProcessing::FIRFilter ScanTarget::colour_filter(int colour_cycle_numerator, int colour_cycle_denominator, int processing_width, float low_cutoff, float high_cutoff) { std::unique_ptr<Shader> ScanTarget::conversion_shader(InputDataType input_data_type, DisplayType display_type, int colour_cycle_numerator, int colour_cycle_denominator, int processing_width) {
const float cycles_per_expanded_line = (float(colour_cycle_numerator) / float(colour_cycle_denominator)) / (float(processing_width) / float(LineBufferWidth)); return nullptr;
return SignalProcessing::FIRFilter(15, float(LineBufferWidth), cycles_per_expanded_line * low_cutoff, cycles_per_expanded_line * high_cutoff);
}
std::unique_ptr<Shader> ScanTarget::svideo_to_rgb_shader(int colour_cycle_numerator, int colour_cycle_denominator, int processing_width) {
/*
Composite to S-Video conversion is achieved by filtering the input signal to obtain luminance, and then subtracting that
from the original to get chrominance.
(Colour cycle numerator)/(Colour cycle denominator) gives the number of colour cycles in (processing_width / LineBufferWidth),
there'll be at least four samples per colour clock and in practice at most just a shade more than 9.
*/
auto shader = std::unique_ptr<Shader>(new Shader(
glsl_globals(ShaderType::ProcessedScan) + glsl_default_vertex_shader(ShaderType::ProcessedScan),
"#version 150\n"
"in vec2 textureCoordinates[15];"
"uniform vec4 chromaWeights[4];"
"uniform vec4 lumaWeights[4];"
"uniform sampler2D textureName;"
"uniform mat3 lumaChromaToRGB;"
"out vec3 fragColour;"
"void main() {"
"vec3 samples[15] = vec3[15]("
"texture(textureName, textureCoordinates[0]).rgb,"
"texture(textureName, textureCoordinates[1]).rgb,"
"texture(textureName, textureCoordinates[2]).rgb,"
"texture(textureName, textureCoordinates[3]).rgb,"
"texture(textureName, textureCoordinates[4]).rgb,"
"texture(textureName, textureCoordinates[5]).rgb,"
"texture(textureName, textureCoordinates[6]).rgb,"
"texture(textureName, textureCoordinates[7]).rgb,"
"texture(textureName, textureCoordinates[8]).rgb,"
"texture(textureName, textureCoordinates[9]).rgb,"
"texture(textureName, textureCoordinates[10]).rgb,"
"texture(textureName, textureCoordinates[11]).rgb,"
"texture(textureName, textureCoordinates[12]).rgb,"
"texture(textureName, textureCoordinates[13]).rgb,"
"texture(textureName, textureCoordinates[14]).rgb"
");"
"vec4 samples0[4] = vec4[4]("
"vec4(samples[0].r, samples[1].r, samples[2].r, samples[3].r),"
"vec4(samples[4].r, samples[5].r, samples[6].r, samples[7].r),"
"vec4(samples[8].r, samples[9].r, samples[10].r, samples[11].r),"
"vec4(samples[12].r, samples[13].r, samples[14].r, 0.0)"
");"
"vec4 samples1[4] = vec4[4]("
"vec4(samples[0].g, samples[1].g, samples[2].g, samples[3].g),"
"vec4(samples[4].g, samples[5].g, samples[6].g, samples[7].g),"
"vec4(samples[8].g, samples[9].g, samples[10].g, samples[11].g),"
"vec4(samples[12].g, samples[13].g, samples[14].g, 0.0)"
");"
"vec4 samples2[4] = vec4[4]("
"vec4(samples[0].b, samples[1].b, samples[2].b, samples[3].b),"
"vec4(samples[4].b, samples[5].b, samples[6].b, samples[7].b),"
"vec4(samples[8].b, samples[9].b, samples[10].b, samples[11].b),"
"vec4(samples[12].b, samples[13].b, samples[14].b, 0.0)"
");"
"float channel0 = dot(lumaWeights[0], samples0[0]) + dot(lumaWeights[1], samples0[1]) + dot(lumaWeights[2], samples0[2]) + dot(lumaWeights[3], samples0[3]);"
"float channel1 = dot(chromaWeights[0], samples1[0]) + dot(chromaWeights[1], samples1[1]) + dot(chromaWeights[2], samples1[2]) + dot(chromaWeights[3], samples1[3]);"
"float channel2 = dot(chromaWeights[0], samples2[0]) + dot(chromaWeights[1], samples2[1]) + dot(chromaWeights[2], samples2[2]) + dot(chromaWeights[3], samples2[3]);"
"vec2 chroma = vec2(channel1, channel2)*2.0 - vec2(1.0);"
"fragColour = lumaChromaToRGB * vec3(channel0, chroma);"
"}",
attribute_bindings(ShaderType::ProcessedScan)
));
auto chroma_coefficients = colour_filter(colour_cycle_numerator, colour_cycle_denominator, processing_width, 0.0f, 0.25f).get_coefficients();
chroma_coefficients.push_back(0.0f);
shader->set_uniform("chromaWeights", 4, 4, chroma_coefficients.data());
auto luma_coefficients = colour_filter(colour_cycle_numerator, colour_cycle_denominator, processing_width, 0.0f, 0.15f).get_coefficients();
luma_coefficients.push_back(0.0f);
shader->set_uniform("lumaWeights", 4, 4, luma_coefficients.data());
shader->set_uniform("edgeExpansion", 0);
return shader;
}
std::unique_ptr<Shader> ScanTarget::composite_to_svideo_shader(int colour_cycle_numerator, int colour_cycle_denominator, int processing_width) {
auto shader = std::unique_ptr<Shader>(new Shader(
glsl_globals(ShaderType::ProcessedScan) + glsl_default_vertex_shader(ShaderType::ProcessedScan),
"#version 150\n"
"in vec2 textureCoordinates[15];"
"in float compositeAngle;"
"in float oneOverCompositeAmplitude;"
"uniform vec4 lumaWeights[4];"
"uniform sampler2D textureName;"
"out vec3 fragColour;"
"void main() {"
"vec4 samples[4] = vec4[4]("
"vec4(texture(textureName, textureCoordinates[0]).r, texture(textureName, textureCoordinates[1]).r, texture(textureName, textureCoordinates[2]).r, texture(textureName, textureCoordinates[3]).r),"
"vec4(texture(textureName, textureCoordinates[4]).r, texture(textureName, textureCoordinates[5]).r, texture(textureName, textureCoordinates[6]).r, texture(textureName, textureCoordinates[7]).r),"
"vec4(texture(textureName, textureCoordinates[8]).r, texture(textureName, textureCoordinates[9]).r, texture(textureName, textureCoordinates[10]).r, texture(textureName, textureCoordinates[11]).r),"
"vec4(texture(textureName, textureCoordinates[12]).r, texture(textureName, textureCoordinates[13]).r, texture(textureName, textureCoordinates[14]).r, 0.0)"
");"
"float luma = dot(lumaWeights[0], samples[0]) + dot(lumaWeights[1], samples[1]) + dot(lumaWeights[2], samples[2]) + dot(lumaWeights[3], samples[3]);"
"vec2 quadrature = vec2(cos(compositeAngle), sin(compositeAngle));"
"vec2 chroma = ((samples[1].a - luma) * oneOverCompositeAmplitude)*quadrature;"
"fragColour = vec3(samples[1].a, chroma*0.5 + vec2(0.5));"
"}",
attribute_bindings(ShaderType::ProcessedScan)
));
auto luma_low = colour_filter(colour_cycle_numerator, colour_cycle_denominator, processing_width, 0.0f, 0.9f);
auto luma_coefficients = luma_low.get_coefficients();
luma_coefficients.push_back(0.0f);
shader->set_uniform("lumaWeights", 4, 4, luma_coefficients.data());
shader->set_uniform("edgeExpansion", 0);
return shader;
} }
//
//SignalProcessing::FIRFilter ScanTarget::colour_filter(int colour_cycle_numerator, int colour_cycle_denominator, int processing_width, float low_cutoff, float high_cutoff) {
// const float cycles_per_expanded_line = (float(colour_cycle_numerator) / float(colour_cycle_denominator)) / (float(processing_width) / float(LineBufferWidth));
// return SignalProcessing::FIRFilter(15, float(LineBufferWidth), cycles_per_expanded_line * low_cutoff, cycles_per_expanded_line * high_cutoff);
//}
//
//std::unique_ptr<Shader> ScanTarget::svideo_to_rgb_shader(int colour_cycle_numerator, int colour_cycle_denominator, int processing_width) {
// /*
// Composite to S-Video conversion is achieved by filtering the input signal to obtain luminance, and then subtracting that
// from the original to get chrominance.
//
// (Colour cycle numerator)/(Colour cycle denominator) gives the number of colour cycles in (processing_width / LineBufferWidth),
// there'll be at least four samples per colour clock and in practice at most just a shade more than 9.
// */
// auto shader = std::unique_ptr<Shader>(new Shader(
// glsl_globals(ShaderType::ProcessedScan) + glsl_default_vertex_shader(ShaderType::ProcessedScan),
// "#version 150\n"
//
// "in vec2 textureCoordinates[15];"
// "in vec2 chromaCoordinates[2];"
// "in float compositeAngle;"
//
//// "uniform vec4 chromaWeights[4];"
//// "uniform vec4 lumaWeights[4];"
// "uniform sampler2D textureName;"
// "uniform mat3 lumaChromaToRGB;"
//
// "out vec3 fragColour;"
// "void main() {"
// "vec2 angles = vec2(compositeAngle - 1.570795827, compositeAngle + 1.570795827);"
//
// "vec2 sines = sin(angles) * vec2(0.5) + vec2(0.5);"
// "vec2 coses = cos(angles);"
// "float denominator = sines.y * coses.x - sines.x * coses.y;"
//
// "vec2 samples = vec2(texture(textureName, chromaCoordinates[0]).g, texture(textureName, chromaCoordinates[1]).g);"
//
// "float channel1 = (samples.x * sines.x - samples.y * sines.y) / denominator;"
// "float channel2 = (samples.x * coses.x - samples.y * coses.y) / denominator;"
//
//// "fragColour = lumaChromaToRGB * vec3(texture(textureName, textureCoordinates[7]).r, channel1, channel2);"
// "fragColour = vec3(sines.x + sines.y, 0.0, 0.0);"
// //, 0.0);"
//
//// "fragColour = lumaChromaToRGB * vec3(texture(textureName, textureCoordinates[7]).g, 0.0, 0.0);"
//// "fragColour = vec3(0.5);"
///* "vec3 samples[15] = vec3[15]("
// "texture(textureName, textureCoordinates[0]).rgb,"
// "texture(textureName, textureCoordinates[1]).rgb,"
// "texture(textureName, textureCoordinates[2]).rgb,"
// "texture(textureName, textureCoordinates[3]).rgb,"
// "texture(textureName, textureCoordinates[4]).rgb,"
// "texture(textureName, textureCoordinates[5]).rgb,"
// "texture(textureName, textureCoordinates[6]).rgb,"
// "texture(textureName, textureCoordinates[7]).rgb,"
// "texture(textureName, textureCoordinates[8]).rgb,"
// "texture(textureName, textureCoordinates[9]).rgb,"
// "texture(textureName, textureCoordinates[10]).rgb,"
// "texture(textureName, textureCoordinates[11]).rgb,"
// "texture(textureName, textureCoordinates[12]).rgb,"
// "texture(textureName, textureCoordinates[13]).rgb,"
// "texture(textureName, textureCoordinates[14]).rgb"
// ");"
// "vec4 samples0[4] = vec4[4]("
// "vec4(samples[0].r, samples[1].r, samples[2].r, samples[3].r),"
// "vec4(samples[4].r, samples[5].r, samples[6].r, samples[7].r),"
// "vec4(samples[8].r, samples[9].r, samples[10].r, samples[11].r),"
// "vec4(samples[12].r, samples[13].r, samples[14].r, 0.0)"
// ");"
// "vec4 samples1[4] = vec4[4]("
// "vec4(samples[0].g, samples[1].g, samples[2].g, samples[3].g),"
// "vec4(samples[4].g, samples[5].g, samples[6].g, samples[7].g),"
// "vec4(samples[8].g, samples[9].g, samples[10].g, samples[11].g),"
// "vec4(samples[12].g, samples[13].g, samples[14].g, 0.0)"
// ");"
// "vec4 samples2[4] = vec4[4]("
// "vec4(samples[0].b, samples[1].b, samples[2].b, samples[3].b),"
// "vec4(samples[4].b, samples[5].b, samples[6].b, samples[7].b),"
// "vec4(samples[8].b, samples[9].b, samples[10].b, samples[11].b),"
// "vec4(samples[12].b, samples[13].b, samples[14].b, 0.0)"
// ");"
// "float channel0 = dot(lumaWeights[0], samples0[0]) + dot(lumaWeights[1], samples0[1]) + dot(lumaWeights[2], samples0[2]) + dot(lumaWeights[3], samples0[3]);"
// "float channel1 = dot(chromaWeights[0], samples1[0]) + dot(chromaWeights[1], samples1[1]) + dot(chromaWeights[2], samples1[2]) + dot(chromaWeights[3], samples1[3]);"
// "float channel2 = dot(chromaWeights[0], samples2[0]) + dot(chromaWeights[1], samples2[1]) + dot(chromaWeights[2], samples2[2]) + dot(chromaWeights[3], samples2[3]);"
// "vec2 chroma = vec2(channel1, channel2)*2.0 - vec2(1.0);"
// "fragColour = lumaChromaToRGB * vec3(channel0, chroma);"*/
// "}",
// attribute_bindings(ShaderType::ProcessedScan)
// ));
//
// const float cycles_per_expanded_line = (float(colour_cycle_numerator) / float(colour_cycle_denominator)) / (float(processing_width) / float(LineBufferWidth));
// const float chroma_offset = 0.25f / cycles_per_expanded_line;
// shader->set_uniform("chromaOffset", chroma_offset);
//
//// auto chroma_coefficients = colour_filter(colour_cycle_numerator, colour_cycle_denominator, processing_width, 0.0f, 0.25f).get_coefficients();
//// chroma_coefficients.push_back(0.0f);
//// shader->set_uniform("chromaWeights", 4, 4, chroma_coefficients.data());
////
//// auto luma_coefficients = colour_filter(colour_cycle_numerator, colour_cycle_denominator, processing_width, 0.0f, 0.15f).get_coefficients();
//// luma_coefficients.push_back(0.0f);
//// shader->set_uniform("lumaWeights", 4, 4, luma_coefficients.data());
//
// shader->set_uniform("edgeExpansion", 20);
//
// return shader;
//}
//
//std::unique_ptr<Shader> ScanTarget::composite_to_svideo_shader(int colour_cycle_numerator, int colour_cycle_denominator, int processing_width) {
// auto shader = std::unique_ptr<Shader>(new Shader(
// glsl_globals(ShaderType::ProcessedScan) + glsl_default_vertex_shader(ShaderType::ProcessedScan),
// "#version 150\n"
//
// "in vec2 textureCoordinates[15];"
// "in float compositeAngle;"
// "in float oneOverCompositeAmplitude;"
//
// "uniform vec4 lumaWeights[4];"
// "uniform sampler2D textureName;"
//
// "out vec3 fragColour;"
// "void main() {"
// "vec4 samples[4] = vec4[4]("
// "vec4(texture(textureName, textureCoordinates[0]).r, texture(textureName, textureCoordinates[1]).r, texture(textureName, textureCoordinates[2]).r, texture(textureName, textureCoordinates[3]).r),"
// "vec4(texture(textureName, textureCoordinates[4]).r, texture(textureName, textureCoordinates[5]).r, texture(textureName, textureCoordinates[6]).r, texture(textureName, textureCoordinates[7]).r),"
// "vec4(texture(textureName, textureCoordinates[8]).r, texture(textureName, textureCoordinates[9]).r, texture(textureName, textureCoordinates[10]).r, texture(textureName, textureCoordinates[11]).r),"
// "vec4(texture(textureName, textureCoordinates[12]).r, texture(textureName, textureCoordinates[13]).r, texture(textureName, textureCoordinates[14]).r, 0.0)"
// ");"
// "float luma = dot(lumaWeights[0], samples[0]) + dot(lumaWeights[1], samples[1]) + dot(lumaWeights[2], samples[2]) + dot(lumaWeights[3], samples[3]);"
// "vec2 quadrature = vec2(cos(compositeAngle), sin(compositeAngle));"
// "vec2 chroma = ((samples[1].a - luma) * oneOverCompositeAmplitude)*quadrature;"
// "fragColour = vec3(samples[1].a, chroma*0.5 + vec2(0.5));"
// "}",
// attribute_bindings(ShaderType::ProcessedScan)
// ));
//
// auto luma_low = colour_filter(colour_cycle_numerator, colour_cycle_denominator, processing_width, 0.0f, 0.9f);
// auto luma_coefficients = luma_low.get_coefficients();
// luma_coefficients.push_back(0.0f);
// shader->set_uniform("lumaWeights", 4, 4, luma_coefficients.data());
//
// shader->set_uniform("edgeExpansion", 10);
//
// return shader;
//}
//