mirror of
https://github.com/TomHarte/CLK.git
synced 2024-11-25 16:31:42 +00:00
Attempts to introduce a full-on processing pipeline, in theory putting me two shaders away from completion.
Well, subject to finding the last flashing bug and updating the multimachine, anyway.
This commit is contained in:
parent
a5a3769a0f
commit
d4ac79b0af
@ -21,12 +21,12 @@ constexpr GLenum SourceData2BppTextureUnit = GL_TEXTURE1;
|
||||
/// The texture unit from which to source 4bpp input data.
|
||||
constexpr GLenum SourceData4BppTextureUnit = GL_TEXTURE2;
|
||||
|
||||
/// The texture unit which contains raw line-by-line composite or RGB data.
|
||||
/// The texture unit which contains raw line-by-line composite, S-Video or RGB data.
|
||||
constexpr GLenum UnprocessedLineBufferTextureUnit = GL_TEXTURE3;
|
||||
/// The texture unit which contains line-by-line records of luminance and amplitude-modulated chrominance.
|
||||
constexpr GLenum CompositeSeparatedTextureUnit = GL_TEXTURE4;
|
||||
/// The texture unit which contains line-by-line records of luminance and demodulated chrominance.
|
||||
constexpr GLenum DemodulatedCompositeTextureUnit = GL_TEXTURE5;
|
||||
/// The texture unit which contains line-by-line records of luminance and amplitude-modulated chrominance, exactly as if it were S-Video.
|
||||
constexpr GLenum SVideoLineBufferTextureUnit = GL_TEXTURE4;
|
||||
/// The texture unit which contains line-by-line records of luminance and separated, demodulated chrominance.
|
||||
constexpr GLenum RGBLineBufferTextureUnit = GL_TEXTURE5;
|
||||
|
||||
/// The texture unit which contains line-by-line RGB.
|
||||
constexpr GLenum LineBufferTextureUnit = GL_TEXTURE6;
|
||||
@ -73,8 +73,10 @@ template <typename T> void ScanTarget::allocate_buffer(const T &array, GLuint &b
|
||||
}
|
||||
|
||||
ScanTarget::ScanTarget() :
|
||||
unprocessed_line_texture_(LineBufferWidth, LineBufferHeight, UnprocessedLineBufferTextureUnit, GL_LINEAR, false),
|
||||
full_display_rectangle_(-1.0f, -1.0f, 2.0f, 2.0f) {
|
||||
unprocessed_line_texture_(LineBufferWidth, LineBufferHeight, UnprocessedLineBufferTextureUnit, GL_LINEAR, false),
|
||||
// svideo_texture_(LineBufferWidth, LineBufferHeight, SVideoLineBufferTextureUnit, GL_LINEAR, false),
|
||||
// rgb_texture_(LineBufferWidth, LineBufferHeight, RGBLineBufferTextureUnit, GL_LINEAR, false),
|
||||
full_display_rectangle_(-1.0f, -1.0f, 2.0f, 2.0f) {
|
||||
|
||||
// Ensure proper initialisation of the two atomic pointer sets.
|
||||
read_pointers_.store(write_pointers_);
|
||||
@ -90,24 +92,6 @@ ScanTarget::ScanTarget() :
|
||||
|
||||
glGenTextures(1, &write_area_texture_name_);
|
||||
|
||||
output_shader_.reset(new Shader(
|
||||
glsl_globals(ShaderType::Line) + glsl_default_vertex_shader(ShaderType::Line),
|
||||
"#version 150\n"
|
||||
|
||||
"out vec4 fragColour;"
|
||||
"in vec2 textureCoordinate;"
|
||||
|
||||
"uniform sampler2D textureName;"
|
||||
|
||||
"void main(void) {"
|
||||
"fragColour = vec4(texture(textureName, textureCoordinate).rgb, 0.64);"
|
||||
"}"
|
||||
));
|
||||
|
||||
glBindVertexArray(line_vertex_array_);
|
||||
glBindBuffer(GL_ARRAY_BUFFER, line_buffer_name_);
|
||||
enable_vertex_attributes(ShaderType::Line, *output_shader_);
|
||||
|
||||
glBlendFunc(GL_SRC_ALPHA, GL_CONSTANT_COLOR);
|
||||
glBlendColor(0.4f, 0.4f, 0.4f, 1.0f);
|
||||
|
||||
@ -146,36 +130,81 @@ void ScanTarget::set_modals(Modals modals) {
|
||||
processing_width_ = colour_cycle_width + (overflow ? dot_clock - overflow : 0);
|
||||
processing_width_ = std::min(processing_width_, 2048);
|
||||
|
||||
// Establish an output shader. TODO: add gamma correction here.
|
||||
output_shader_.reset(new Shader(
|
||||
glsl_globals(ShaderType::Line) + glsl_default_vertex_shader(ShaderType::Line),
|
||||
"#version 150\n"
|
||||
|
||||
"out vec4 fragColour;"
|
||||
"in vec2 textureCoordinate;"
|
||||
|
||||
"uniform sampler2D textureName;"
|
||||
|
||||
"void main(void) {"
|
||||
"fragColour = vec4(texture(textureName, textureCoordinate).rgb, 0.64);"
|
||||
"}"
|
||||
));
|
||||
|
||||
glBindVertexArray(line_vertex_array_);
|
||||
glBindBuffer(GL_ARRAY_BUFFER, line_buffer_name_);
|
||||
enable_vertex_attributes(ShaderType::Line, *output_shader_);
|
||||
|
||||
set_uniforms(ShaderType::Line, *output_shader_);
|
||||
output_shader_->set_uniform("origin", modals.visible_area.origin.x, modals.visible_area.origin.y);
|
||||
output_shader_->set_uniform("size", modals.visible_area.size.width, modals.visible_area.size.height);
|
||||
|
||||
// Establish an input shader.
|
||||
input_shader_ = input_shader(modals_.input_data_type, modals_.display_type);
|
||||
|
||||
glBindVertexArray(scan_vertex_array_);
|
||||
glBindBuffer(GL_ARRAY_BUFFER, scan_buffer_name_);
|
||||
enable_vertex_attributes(ShaderType::Scan, *input_shader_);
|
||||
|
||||
set_uniforms(Outputs::Display::OpenGL::ScanTarget::ShaderType::Scan, *output_shader_);
|
||||
set_uniforms(Outputs::Display::OpenGL::ScanTarget::ShaderType::Line, *input_shader_);
|
||||
enable_vertex_attributes(ShaderType::InputScan, *input_shader_);
|
||||
|
||||
set_uniforms(ShaderType::InputScan, *input_shader_);
|
||||
input_shader_->set_uniform("textureName", GLint(SourceData1BppTextureUnit - GL_TEXTURE0));
|
||||
switch(modals.composite_colour_space) {
|
||||
case ColourSpace::YIQ: {
|
||||
const GLfloat rgbToYIQ[] = {0.299f, 0.596f, 0.211f, 0.587f, -0.274f, -0.523f, 0.114f, -0.322f, 0.312f};
|
||||
const GLfloat yiqToRGB[] = {1.0f, 1.0f, 1.0f, 0.956f, -0.272f, -1.106f, 0.621f, -0.647f, 1.703f};
|
||||
input_shader_->set_uniform_matrix("lumaChromaToRGB", 3, false, yiqToRGB);
|
||||
input_shader_->set_uniform_matrix("rgbToLumaChroma", 3, false, rgbToYIQ);
|
||||
} break;
|
||||
|
||||
case ColourSpace::YUV: {
|
||||
const GLfloat rgbToYUV[] = {0.299f, -0.14713f, 0.615f, 0.587f, -0.28886f, -0.51499f, 0.114f, 0.436f, -0.10001f};
|
||||
const GLfloat yuvToRGB[] = {1.0f, 1.0f, 1.0f, 0.0f, -0.39465f, 2.03211f, 1.13983f, -0.58060f, 0.0f};
|
||||
input_shader_->set_uniform_matrix("lumaChromaToRGB", 3, false, yuvToRGB);
|
||||
input_shader_->set_uniform_matrix("rgbToLumaChroma", 3, false, rgbToYUV);
|
||||
} break;
|
||||
// Establish such intermediary shaders as are required.
|
||||
pipeline_stages_.clear();
|
||||
if(modals_.display_type == DisplayType::CompositeColour) {
|
||||
pipeline_stages_.emplace_back(
|
||||
composite_to_svideo_shader(modals_.colour_cycle_numerator, modals_.colour_cycle_denominator, processing_width_).release(),
|
||||
SVideoLineBufferTextureUnit);
|
||||
}
|
||||
if(modals_.display_type == DisplayType::SVideo || modals_.display_type == DisplayType::CompositeColour) {
|
||||
pipeline_stages_.emplace_back(
|
||||
svideo_to_rgb_shader(modals_.colour_cycle_numerator, modals_.colour_cycle_denominator, processing_width_).release(),
|
||||
RGBLineBufferTextureUnit);
|
||||
}
|
||||
|
||||
output_shader_->set_uniform("textureName", GLint(UnprocessedLineBufferTextureUnit - GL_TEXTURE0));
|
||||
output_shader_->set_uniform("origin", modals.visible_area.origin.x, modals.visible_area.origin.y);
|
||||
output_shader_->set_uniform("size", modals.visible_area.size.width, modals.visible_area.size.height);
|
||||
// Cascade the texture units in use as per the pipeline stages.
|
||||
std::vector<Shader *> input_shaders = {input_shader_.get()};
|
||||
GLint texture_unit = GLint(UnprocessedLineBufferTextureUnit - GL_TEXTURE0);
|
||||
for(const auto &stage: pipeline_stages_) {
|
||||
input_shaders.push_back(stage.shader.get());
|
||||
stage.shader->set_uniform("textureName", texture_unit);
|
||||
set_uniforms(ShaderType::ProcessedScan, *stage.shader);
|
||||
++texture_unit;
|
||||
}
|
||||
output_shader_->set_uniform("textureName", texture_unit);
|
||||
|
||||
// Ensure that all shaders involved in the input pipeline have the proper colour space knowledged.
|
||||
for(auto shader: input_shaders) {
|
||||
switch(modals.composite_colour_space) {
|
||||
case ColourSpace::YIQ: {
|
||||
const GLfloat rgbToYIQ[] = {0.299f, 0.596f, 0.211f, 0.587f, -0.274f, -0.523f, 0.114f, -0.322f, 0.312f};
|
||||
const GLfloat yiqToRGB[] = {1.0f, 1.0f, 1.0f, 0.956f, -0.272f, -1.106f, 0.621f, -0.647f, 1.703f};
|
||||
shader->set_uniform_matrix("lumaChromaToRGB", 3, false, yiqToRGB);
|
||||
shader->set_uniform_matrix("rgbToLumaChroma", 3, false, rgbToYIQ);
|
||||
} break;
|
||||
|
||||
case ColourSpace::YUV: {
|
||||
const GLfloat rgbToYUV[] = {0.299f, -0.14713f, 0.615f, 0.587f, -0.28886f, -0.51499f, 0.114f, 0.436f, -0.10001f};
|
||||
const GLfloat yuvToRGB[] = {1.0f, 1.0f, 1.0f, 0.0f, -0.39465f, 2.03211f, 1.13983f, -0.58060f, 0.0f};
|
||||
shader->set_uniform_matrix("lumaChromaToRGB", 3, false, yuvToRGB);
|
||||
shader->set_uniform_matrix("rgbToLumaChroma", 3, false, rgbToYUV);
|
||||
} break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Outputs::Display::OpenGL::ScanTarget::set_uniforms(ShaderType type, Shader &target) {
|
||||
@ -466,10 +495,17 @@ void ScanTarget::draw(bool synchronous, int output_width, int output_height) {
|
||||
glDisable(GL_SCISSOR_TEST);
|
||||
}
|
||||
|
||||
// Apply new spans.
|
||||
// Apply new spans. They definitely always go to the first buffer.
|
||||
glBindVertexArray(scan_vertex_array_);
|
||||
input_shader_->bind();
|
||||
glDrawArraysInstanced(GL_TRIANGLE_STRIP, 0, 4, GLsizei(new_scans));
|
||||
|
||||
// If there are any further pipeline stages, apply them.
|
||||
for(auto &stage: pipeline_stages_) {
|
||||
stage.target.bind_framebuffer();
|
||||
stage.shader->bind();
|
||||
glDrawArraysInstanced(GL_TRIANGLE_STRIP, 0, 4, GLsizei(new_scans));
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure the accumulation buffer is properly sized.
|
||||
|
@ -17,6 +17,8 @@
|
||||
#include <array>
|
||||
#include <atomic>
|
||||
#include <cstdint>
|
||||
#include <list>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
@ -101,7 +103,8 @@ class ScanTarget: public Outputs::Display::ScanTarget {
|
||||
|
||||
// Contains the first composition of scans into lines;
|
||||
// they're accumulated prior to output to allow for continuous
|
||||
// application of any necessary conversions — e.g. composite processing.
|
||||
// application of any necessary conversions — e.g. composite processing —
|
||||
// which happen progressively from here to the RGB texture.
|
||||
TextureTarget unprocessed_line_texture_;
|
||||
|
||||
// Scans are accumulated to the accumulation texture; the full-display
|
||||
@ -141,7 +144,8 @@ class ScanTarget: public Outputs::Display::ScanTarget {
|
||||
Modals modals_;
|
||||
|
||||
enum class ShaderType {
|
||||
Scan,
|
||||
InputScan,
|
||||
ProcessedScan,
|
||||
Line
|
||||
};
|
||||
|
||||
@ -172,6 +176,20 @@ class ScanTarget: public Outputs::Display::ScanTarget {
|
||||
std::unique_ptr<Shader> output_shader_;
|
||||
|
||||
static std::unique_ptr<Shader> input_shader(InputDataType input_data_type, DisplayType display_type);
|
||||
static std::unique_ptr<Shader> composite_to_svideo_shader(int colour_cycle_numerator, int colour_cycle_denominator, int processing_width);
|
||||
static std::unique_ptr<Shader> svideo_to_rgb_shader(int colour_cycle_numerator, int colour_cycle_denominator, int processing_width);
|
||||
|
||||
struct PipelineStage {
|
||||
PipelineStage(Shader *shader, GLenum texture_unit) :
|
||||
shader(shader),
|
||||
target(LineBufferWidth, LineBufferHeight, texture_unit, GL_LINEAR, false) {}
|
||||
|
||||
std::unique_ptr<Shader> shader;
|
||||
TextureTarget target;
|
||||
};
|
||||
|
||||
// A list is used here to avoid requiring a copy constructor on PipelineStage.
|
||||
std::list<PipelineStage> pipeline_stages_;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -8,11 +8,14 @@
|
||||
|
||||
#include "ScanTarget.hpp"
|
||||
|
||||
#include "../../SignalProcessing/FIRFilter.hpp"
|
||||
|
||||
using namespace Outputs::Display::OpenGL;
|
||||
|
||||
std::string ScanTarget::glsl_globals(ShaderType type) {
|
||||
switch(type) {
|
||||
case ShaderType::Scan:
|
||||
case ShaderType::InputScan:
|
||||
case ShaderType::ProcessedScan:
|
||||
return
|
||||
"#version 150\n"
|
||||
|
||||
@ -59,23 +62,54 @@ std::string ScanTarget::glsl_globals(ShaderType type) {
|
||||
|
||||
std::string ScanTarget::glsl_default_vertex_shader(ShaderType type) {
|
||||
switch(type) {
|
||||
case ShaderType::Scan:
|
||||
return
|
||||
"out vec2 textureCoordinate;"
|
||||
"out float compositeAngle;"
|
||||
"out float compositeAmplitudeOut;"
|
||||
case ShaderType::InputScan:
|
||||
case ShaderType::ProcessedScan: {
|
||||
std::string result;
|
||||
|
||||
"void main(void) {"
|
||||
"float lateral = float(gl_VertexID & 1);"
|
||||
"float longitudinal = float((gl_VertexID & 2) >> 1);"
|
||||
if(type == ShaderType::InputScan) {
|
||||
result += "out vec2 textureCoordinate;";
|
||||
} else {
|
||||
result += "out vec2 textureCoordinates[11];";
|
||||
}
|
||||
|
||||
"textureCoordinate = vec2(mix(startDataX, endDataX, lateral), dataY) / textureSize(textureName, 0);"
|
||||
"compositeAngle = (mix(startCompositeAngle, endCompositeAngle, lateral) / 32.0) * 3.141592654;"
|
||||
"compositeAmplitudeOut = compositeAmplitude / 255.0;"
|
||||
result +=
|
||||
"out float compositeAngle;"
|
||||
"out float compositeAmplitudeOut;"
|
||||
|
||||
"vec2 eyePosition = vec2(mix(startPoint.x, endPoint.x, lateral) * processingWidth, lineY + longitudinal) / vec2(scale.x, 2048.0);"
|
||||
"gl_Position = vec4(eyePosition*2 - vec2(1.0), 0.0, 1.0);"
|
||||
"}";
|
||||
"void main(void) {"
|
||||
"float lateral = float(gl_VertexID & 1);"
|
||||
"float longitudinal = float((gl_VertexID & 2) >> 1);"
|
||||
|
||||
"compositeAngle = (mix(startCompositeAngle, endCompositeAngle, lateral) / 32.0) * 3.141592654;"
|
||||
"compositeAmplitudeOut = compositeAmplitude / 255.0;";
|
||||
|
||||
if(type == ShaderType::InputScan) {
|
||||
result +=
|
||||
"textureCoordinate = vec2(mix(startDataX, endDataX, lateral), dataY) / textureSize(textureName, 0);"
|
||||
"vec2 eyePosition = vec2(mix(startPoint.x, endPoint.x, lateral) * processingWidth, lineY + longitudinal) / vec2(scale.x, 2048.0);";
|
||||
} else {
|
||||
result +=
|
||||
"vec2 eyePosition = vec2(mix(startDataX, endDataX, lateral) - 10.0 + lateral*20.0, dataY);"
|
||||
|
||||
"textureCoordinates[0] = (eyePosition - vec2(5.0, 0.0)) / textureSize(textureName, 0);"
|
||||
"textureCoordinates[1] = (eyePosition - vec2(4.0, 0.0)) / textureSize(textureName, 0);"
|
||||
"textureCoordinates[2] = (eyePosition - vec2(3.0, 0.0)) / textureSize(textureName, 0);"
|
||||
"textureCoordinates[3] = (eyePosition - vec2(2.0, 0.0)) / textureSize(textureName, 0);"
|
||||
"textureCoordinates[4] = (eyePosition - vec2(1.0, 0.0)) / textureSize(textureName, 0);"
|
||||
"textureCoordinates[5] = eyePosition / textureSize(textureName, 0);"
|
||||
"textureCoordinates[6] = (eyePosition + vec2(1.0, 0.0)) / textureSize(textureName, 0);"
|
||||
"textureCoordinates[7] = (eyePosition + vec2(2.0, 0.0)) / textureSize(textureName, 0);"
|
||||
"textureCoordinates[8] = (eyePosition + vec2(3.0, 0.0)) / textureSize(textureName, 0);"
|
||||
"textureCoordinates[9] = (eyePosition + vec2(4.0, 0.0)) / textureSize(textureName, 0);"
|
||||
"textureCoordinates[10] = (eyePosition + vec2(5.0, 0.0)) / textureSize(textureName, 0);"
|
||||
|
||||
"eyePosition = eyePosition / textureSize(textureName, 0);";
|
||||
}
|
||||
|
||||
return result +
|
||||
"gl_Position = vec4(eyePosition*2.0 - vec2(1.0), 0.0, 1.0);"
|
||||
"}";
|
||||
}
|
||||
|
||||
case ShaderType::Line:
|
||||
return
|
||||
@ -97,7 +131,8 @@ std::string ScanTarget::glsl_default_vertex_shader(ShaderType type) {
|
||||
|
||||
void ScanTarget::enable_vertex_attributes(ShaderType type, Shader &target) {
|
||||
switch(type) {
|
||||
case ShaderType::Scan:
|
||||
case ShaderType::InputScan:
|
||||
case ShaderType::ProcessedScan:
|
||||
for(int c = 0; c < 2; ++c) {
|
||||
const std::string prefix = c ? "end" : "start";
|
||||
|
||||
@ -244,7 +279,42 @@ std::unique_ptr<Shader> ScanTarget::input_shader(InputDataType input_data_type,
|
||||
}
|
||||
|
||||
return std::unique_ptr<Shader>(new Shader(
|
||||
glsl_globals(ShaderType::Scan) + glsl_default_vertex_shader(ShaderType::Scan),
|
||||
glsl_globals(ShaderType::InputScan) + glsl_default_vertex_shader(ShaderType::InputScan),
|
||||
fragment_shader + "}"
|
||||
));
|
||||
}
|
||||
|
||||
std::unique_ptr<Shader> ScanTarget::composite_to_svideo_shader(int colour_cycle_numerator, int colour_cycle_denominator, int processing_width) {
|
||||
/*
|
||||
Composite to S-Video conversion is achieved by filtering the input signal to obtain luminance, and then subtracting that
|
||||
from the original to get chrominance.
|
||||
|
||||
(Colour cycle numerator)/(Colour cycle denominator) gives the number of colour cycles in (processing_width / LineBufferWidth),
|
||||
there'll be at least four samples per colour clock and in practice at most just a shade more than 9.
|
||||
*/
|
||||
const float cycles_per_expanded_line = (float(colour_cycle_numerator) / float(colour_cycle_denominator)) / (float(processing_width) / float(LineBufferWidth));
|
||||
const SignalProcessing::FIRFilter filter(11, float(LineBufferWidth), 0.0f, cycles_per_expanded_line);
|
||||
const auto coefficients = filter.get_coefficients();
|
||||
|
||||
auto shader = std::unique_ptr<Shader>(new Shader(
|
||||
glsl_globals(ShaderType::ProcessedScan) + glsl_default_vertex_shader(ShaderType::ProcessedScan),
|
||||
"#version 150\n"
|
||||
|
||||
"in vec2 textureCoordinates[11];"
|
||||
"uniform float textureWeights[11];"
|
||||
|
||||
"out vec4 fragColour;"
|
||||
"void main(void) {"
|
||||
"fragColour = vec4(1.0);"
|
||||
"}"
|
||||
));
|
||||
shader->set_uniform("textureWeights", GLint(sizeof(GLfloat)), GLsizei(coefficients.size()), coefficients.data());
|
||||
return shader;
|
||||
}
|
||||
|
||||
std::unique_ptr<Shader> ScanTarget::svideo_to_rgb_shader(int colour_cycle_numerator, int colour_cycle_denominator, int processing_width) {
|
||||
const float cycles_per_expanded_line = (float(colour_cycle_numerator) / float(colour_cycle_denominator)) / (float(processing_width) / float(LineBufferWidth));
|
||||
const SignalProcessing::FIRFilter filter(11, float(LineBufferWidth), 0.0f, cycles_per_expanded_line * 0.5f);
|
||||
const auto coefficients = filter.get_coefficients();
|
||||
return nullptr;
|
||||
}
|
||||
|
@ -101,6 +101,23 @@ inline size_t size_for_data_type(InputDataType data_type) {
|
||||
}
|
||||
}
|
||||
|
||||
inline DisplayType natural_display_type_for_data_type(InputDataType data_type) {
|
||||
switch(data_type) {
|
||||
case InputDataType::Luminance1:
|
||||
case InputDataType::Luminance8:
|
||||
return DisplayType::CompositeColour;
|
||||
|
||||
case InputDataType::Red1Green1Blue1:
|
||||
case InputDataType::Red2Green2Blue2:
|
||||
case InputDataType::Red4Green4Blue4:
|
||||
case InputDataType::Red8Green8Blue8:
|
||||
return DisplayType::RGB;
|
||||
|
||||
case InputDataType::Luminance8Phase8:
|
||||
return DisplayType::SVideo;
|
||||
}
|
||||
}
|
||||
|
||||
/*!
|
||||
Provides an abstract target for 'scans' i.e. continuous sweeps of output data,
|
||||
which are identified by 2d start and end coordinates, and the PCM-sampled data
|
||||
@ -124,7 +141,7 @@ struct ScanTarget {
|
||||
InputDataType input_data_type;
|
||||
|
||||
/// Describes the type of display that the data is being shown on.
|
||||
DisplayType display_type = DisplayType::CompositeMonochrome;
|
||||
DisplayType display_type = DisplayType::RGB;
|
||||
|
||||
/// If being fed composite data, this defines the colour space in use.
|
||||
ColourSpace composite_colour_space;
|
||||
|
Loading…
Reference in New Issue
Block a user