1
0
mirror of https://github.com/TomHarte/CLK.git synced 2024-07-05 10:28:58 +00:00

Merge pull request #585 from TomHarte/TrigonometricDecode

Collapses video pipeline down to two stages.
This commit is contained in:
Thomas Harte 2019-01-13 23:07:09 -05:00 committed by GitHub
commit 82922aa2c7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 588 additions and 649 deletions

View File

@ -189,7 +189,6 @@ void WD1770::posit_event(int new_event_type) {
interesting_event_mask_ &= ~new_event_type;
}
Status new_status;
BEGIN_SECTION()
// Wait for a new command, branch to the appropriate handler.

View File

@ -64,7 +64,7 @@ template <class BusHandler> class MOS6560 {
public:
MOS6560(BusHandler &bus_handler) :
bus_handler_(bus_handler),
crt_(65*4, 4, Outputs::Display::Type::NTSC60, Outputs::Display::InputDataType::Luminance8Phase8),
crt_(65*4, 1, Outputs::Display::Type::NTSC60, Outputs::Display::InputDataType::Luminance8Phase8),
audio_generator_(audio_queue_),
speaker_(audio_generator_)
{

View File

@ -39,6 +39,7 @@ namespace AppleII {
*/
class Card {
public:
virtual ~Card() {}
enum Select: int {
None = 0, // No select line is active
IO = 1 << 0, // IO select is active

View File

@ -394,7 +394,6 @@ template <class BusHandler, bool is_iie> class Video: public VideoBase {
static_cast<size_t>(fetch_end - column_),
&base_stream_[static_cast<size_t>(column_)],
&auxiliary_stream_[static_cast<size_t>(column_)]);
// TODO: should character modes be mapped to character pixel outputs here?
}
if(row_ < 192) {
@ -413,7 +412,7 @@ template <class BusHandler, bool is_iie> class Video: public VideoBase {
const int pixel_row = row_ & 7;
const bool is_double = Video::is_double_mode(line_mode);
if(!is_double && was_double_) {
if(!is_double && was_double_ && pixel_pointer_) {
pixel_pointer_[pixel_start*14 + 0] =
pixel_pointer_[pixel_start*14 + 1] =
pixel_pointer_[pixel_start*14 + 2] =
@ -424,79 +423,83 @@ template <class BusHandler, bool is_iie> class Video: public VideoBase {
}
was_double_ = is_double;
switch(line_mode) {
case GraphicsMode::Text:
output_text(
&pixel_pointer_[pixel_start * 14 + 7],
&base_stream_[static_cast<size_t>(pixel_start)],
static_cast<size_t>(pixel_end - pixel_start),
static_cast<size_t>(pixel_row));
break;
if(pixel_pointer_) {
switch(line_mode) {
case GraphicsMode::Text:
output_text(
&pixel_pointer_[pixel_start * 14 + 7],
&base_stream_[static_cast<size_t>(pixel_start)],
static_cast<size_t>(pixel_end - pixel_start),
static_cast<size_t>(pixel_row));
break;
case GraphicsMode::DoubleText:
output_double_text(
&pixel_pointer_[pixel_start * 14],
&base_stream_[static_cast<size_t>(pixel_start)],
&auxiliary_stream_[static_cast<size_t>(pixel_start)],
static_cast<size_t>(pixel_end - pixel_start),
static_cast<size_t>(pixel_row));
break;
case GraphicsMode::DoubleText:
output_double_text(
&pixel_pointer_[pixel_start * 14],
&base_stream_[static_cast<size_t>(pixel_start)],
&auxiliary_stream_[static_cast<size_t>(pixel_start)],
static_cast<size_t>(pixel_end - pixel_start),
static_cast<size_t>(pixel_row));
break;
case GraphicsMode::LowRes:
output_low_resolution(
&pixel_pointer_[pixel_start * 14 + 7],
&base_stream_[static_cast<size_t>(pixel_start)],
static_cast<size_t>(pixel_end - pixel_start),
pixel_start,
pixel_row);
break;
case GraphicsMode::LowRes:
output_low_resolution(
&pixel_pointer_[pixel_start * 14 + 7],
&base_stream_[static_cast<size_t>(pixel_start)],
static_cast<size_t>(pixel_end - pixel_start),
pixel_start,
pixel_row);
break;
case GraphicsMode::FatLowRes:
output_fat_low_resolution(
&pixel_pointer_[pixel_start * 14 + 7],
&base_stream_[static_cast<size_t>(pixel_start)],
static_cast<size_t>(pixel_end - pixel_start),
pixel_start,
pixel_row);
break;
case GraphicsMode::FatLowRes:
output_fat_low_resolution(
&pixel_pointer_[pixel_start * 14 + 7],
&base_stream_[static_cast<size_t>(pixel_start)],
static_cast<size_t>(pixel_end - pixel_start),
pixel_start,
pixel_row);
break;
case GraphicsMode::DoubleLowRes:
output_double_low_resolution(
&pixel_pointer_[pixel_start * 14],
&base_stream_[static_cast<size_t>(pixel_start)],
&auxiliary_stream_[static_cast<size_t>(pixel_start)],
static_cast<size_t>(pixel_end - pixel_start),
pixel_start,
pixel_row);
break;
case GraphicsMode::DoubleLowRes:
output_double_low_resolution(
&pixel_pointer_[pixel_start * 14],
&base_stream_[static_cast<size_t>(pixel_start)],
&auxiliary_stream_[static_cast<size_t>(pixel_start)],
static_cast<size_t>(pixel_end - pixel_start),
pixel_start,
pixel_row);
break;
case GraphicsMode::HighRes:
output_high_resolution(
&pixel_pointer_[pixel_start * 14 + 7],
&base_stream_[static_cast<size_t>(pixel_start)],
static_cast<size_t>(pixel_end - pixel_start));
break;
case GraphicsMode::HighRes:
output_high_resolution(
&pixel_pointer_[pixel_start * 14 + 7],
&base_stream_[static_cast<size_t>(pixel_start)],
static_cast<size_t>(pixel_end - pixel_start));
break;
case GraphicsMode::DoubleHighRes:
output_double_high_resolution(
&pixel_pointer_[pixel_start * 14],
&base_stream_[static_cast<size_t>(pixel_start)],
&auxiliary_stream_[static_cast<size_t>(pixel_start)],
static_cast<size_t>(pixel_end - pixel_start));
break;
case GraphicsMode::DoubleHighRes:
output_double_high_resolution(
&pixel_pointer_[pixel_start * 14],
&base_stream_[static_cast<size_t>(pixel_start)],
&auxiliary_stream_[static_cast<size_t>(pixel_start)],
static_cast<size_t>(pixel_end - pixel_start));
break;
default: break;
default: break;
}
}
if(pixel_end == 40) {
if(was_double_) {
pixel_pointer_[560] = pixel_pointer_[561] = pixel_pointer_[562] = pixel_pointer_[563] =
pixel_pointer_[564] = pixel_pointer_[565] = pixel_pointer_[566] = pixel_pointer_[567] = 0;
} else {
if(line_mode == GraphicsMode::HighRes && base_stream_[39]&0x80)
pixel_pointer_[567] = graphics_carry_;
else
pixel_pointer_[567] = 0;
if(pixel_pointer_) {
if(was_double_) {
pixel_pointer_[560] = pixel_pointer_[561] = pixel_pointer_[562] = pixel_pointer_[563] =
pixel_pointer_[564] = pixel_pointer_[565] = pixel_pointer_[566] = pixel_pointer_[567] = 0;
} else {
if(line_mode == GraphicsMode::HighRes && base_stream_[39]&0x80)
pixel_pointer_[567] = graphics_carry_;
else
pixel_pointer_[567] = 0;
}
}
crt_.output_data(568, 568);

View File

@ -43,6 +43,8 @@ class MemoryMap {
class ROMSlotHandler {
public:
virtual ~ROMSlotHandler() {}
/*! Advances time by @c half_cycles. */
virtual void run_for(HalfCycles half_cycles) {}

View File

@ -23,6 +23,8 @@ namespace Utility {
*/
class CharacterMapper {
public:
virtual ~CharacterMapper() {}
/// @returns The EndSequence-terminated sequence of keys that would cause @c character to be typed.
virtual uint16_t *sequence_for_character(char character) = 0;

View File

@ -68,7 +68,7 @@
</AdditionalOptions>
</TestAction>
<LaunchAction
buildConfiguration = "Release"
buildConfiguration = "Debug"
selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB"
selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB"
enableASanStackUseAfterReturn = "YES"

View File

@ -159,6 +159,18 @@ Flywheel::SyncEvent CRT::get_next_horizontal_sync_event(bool hsync_is_requested,
return horizontal_flywheel_->get_next_event_in_period(hsync_is_requested, cycles_to_run_for, cycles_advanced);
}
Outputs::Display::ScanTarget::Scan::EndPoint CRT::end_point(uint16_t data_offset) {
Display::ScanTarget::Scan::EndPoint end_point;
end_point.x = uint16_t(horizontal_flywheel_->get_current_output_position());
end_point.y = uint16_t(vertical_flywheel_->get_current_output_position() / vertical_flywheel_output_divider_);
end_point.composite_angle = int16_t((phase_numerator_ << 6) / phase_denominator_) * (is_alernate_line_ ? -1 : 1);
end_point.data_offset = data_offset;
end_point.cycles_since_end_of_horizontal_retrace = uint16_t(cycles_since_horizontal_sync_ / time_multiplier_);
return end_point;
}
void CRT::advance_cycles(int number_of_cycles, bool hsync_requested, bool vsync_requested, const Scan::Type type, int number_of_samples) {
number_of_cycles *= time_multiplier_;
@ -186,16 +198,14 @@ void CRT::advance_cycles(int number_of_cycles, bool hsync_requested, bool vsync_
// If outputting, store the start location and scan constants.
if(next_scan) {
next_scan->end_points[0].x = uint16_t(horizontal_flywheel_->get_current_output_position());
next_scan->end_points[0].y = uint16_t(vertical_flywheel_->get_current_output_position() / vertical_flywheel_output_divider_);
next_scan->end_points[0].composite_angle = int16_t((phase_numerator_ << 6) / phase_denominator_) * (is_alernate_line_ ? -1 : 1);
next_scan->end_points[0].data_offset = uint16_t((total_cycles - number_of_cycles) * number_of_samples / total_cycles);
next_scan->end_points[0] = end_point(uint16_t((total_cycles - number_of_cycles) * number_of_samples / total_cycles));
next_scan->composite_amplitude = colour_burst_amplitude_;
}
// Advance time: that'll affect both the colour subcarrier position and the number of cycles left to run.
phase_numerator_ += next_run_length * colour_cycle_numerator_;
number_of_cycles -= next_run_length;
cycles_since_horizontal_sync_ += next_run_length;
// React to the incoming event.
horizontal_flywheel_->apply_event(next_run_length, (next_run_length == time_until_horizontal_sync_event) ? next_horizontal_sync_event : Flywheel::SyncEvent::None);
@ -203,24 +213,28 @@ void CRT::advance_cycles(int number_of_cycles, bool hsync_requested, bool vsync_
// End the scan if necessary.
if(next_scan) {
next_scan->end_points[1].x = uint16_t(horizontal_flywheel_->get_current_output_position());
next_scan->end_points[1].y = uint16_t(vertical_flywheel_->get_current_output_position() / vertical_flywheel_output_divider_);
next_scan->end_points[1].composite_angle = int16_t((phase_numerator_ << 6) / phase_denominator_) * (is_alernate_line_ ? -1 : 1);
next_scan->end_points[1].data_offset = uint16_t((total_cycles - number_of_cycles) * number_of_samples / total_cycles);
next_scan->end_points[1] = end_point(uint16_t((total_cycles - number_of_cycles) * number_of_samples / total_cycles));
scan_target_->end_scan();
}
// Announce horizontal retrace events.
if(next_run_length == time_until_horizontal_sync_event && next_horizontal_sync_event != Flywheel::SyncEvent::None) {
// Reset the cycles-since-sync counter if this is the end of retrace.
if(next_horizontal_sync_event == Flywheel::SyncEvent::EndRetrace) {
cycles_since_horizontal_sync_ = 0;
}
// Announce event.
const auto event =
(next_horizontal_sync_event == Flywheel::SyncEvent::StartRetrace)
? Outputs::Display::ScanTarget::Event::BeginHorizontalRetrace : Outputs::Display::ScanTarget::Event::EndHorizontalRetrace;
scan_target_->announce(
event,
uint16_t(horizontal_flywheel_->get_current_output_position()),
uint16_t(vertical_flywheel_->get_current_output_position() / vertical_flywheel_output_divider_));
!(horizontal_flywheel_->is_in_retrace() || vertical_flywheel_->is_in_retrace()),
end_point(uint16_t((total_cycles - number_of_cycles) * number_of_samples / total_cycles)),
colour_burst_amplitude_);
// Prepare for the next line.
// If retrace is starting, update phase if required and mark no colour burst spotted yet.
if(next_horizontal_sync_event == Flywheel::SyncEvent::StartRetrace) {
is_alernate_line_ ^= phase_alternates_;
colour_burst_amplitude_ = 0;
@ -234,8 +248,9 @@ void CRT::advance_cycles(int number_of_cycles, bool hsync_requested, bool vsync_
? Outputs::Display::ScanTarget::Event::BeginVerticalRetrace : Outputs::Display::ScanTarget::Event::EndVerticalRetrace;
scan_target_->announce(
event,
uint16_t(horizontal_flywheel_->get_current_output_position()),
uint16_t(vertical_flywheel_->get_current_output_position() / vertical_flywheel_output_divider_));
!(horizontal_flywheel_->is_in_retrace() || vertical_flywheel_->is_in_retrace()),
end_point(uint16_t((total_cycles - number_of_cycles) * number_of_samples / total_cycles)),
colour_burst_amplitude_);
}
// if this is vertical retrace then adcance a field

View File

@ -41,6 +41,8 @@ class CRT {
// posted on to the scan target.
std::unique_ptr<Flywheel> horizontal_flywheel_, vertical_flywheel_;
int vertical_flywheel_output_divider_ = 1;
int cycles_since_horizontal_sync_ = 0;
Display::ScanTarget::Scan::EndPoint end_point(uint16_t data_offset);
struct Scan {
enum Type {

View File

@ -47,8 +47,8 @@ GLuint Shader::compile_shader(const std::string &source, GLenum type) {
Shader::Shader(const std::string &vertex_shader, const std::string &fragment_shader, const std::vector<AttributeBinding> &attribute_bindings) {
shader_program_ = glCreateProgram();
GLuint vertex = compile_shader(vertex_shader, GL_VERTEX_SHADER);
GLuint fragment = compile_shader(fragment_shader, GL_FRAGMENT_SHADER);
const GLuint vertex = compile_shader(vertex_shader, GL_VERTEX_SHADER);
const GLuint fragment = compile_shader(fragment_shader, GL_FRAGMENT_SHADER);
glAttachShader(shader_program_, vertex);
glAttachShader(shader_program_, fragment);
@ -60,17 +60,18 @@ Shader::Shader(const std::string &vertex_shader, const std::string &fragment_sha
glLinkProgram(shader_program_);
#ifdef DEBUG
GLint logLength;
glGetProgramiv(shader_program_, GL_INFO_LOG_LENGTH, &logLength);
if(logLength > 0) {
GLchar *log = new GLchar[static_cast<std::size_t>(logLength)];
glGetProgramInfoLog(shader_program_, logLength, &logLength, log);
printf("Link log:\n%s\n", log);
delete[] log;
}
GLint didLink = 0;
glGetProgramiv(shader_program_, GL_LINK_STATUS, &didLink);
if(didLink == GL_FALSE) {
GLint logLength;
glGetProgramiv(shader_program_, GL_INFO_LOG_LENGTH, &logLength);
if(logLength > 0) {
GLchar *log = new GLchar[static_cast<std::size_t>(logLength)];
glGetProgramInfoLog(shader_program_, logLength, &logLength, log);
printf("Link log:\n%s\n", log);
delete[] log;
}
throw ProgramLinkageError;
}
#endif

View File

@ -13,22 +13,14 @@ using namespace Outputs::Display::OpenGL;
namespace {
/// The texture unit from which to source 1bpp input data.
constexpr GLenum SourceData1BppTextureUnit = GL_TEXTURE0;
/// The texture unit from which to source 2bpp input data.
//constexpr GLenum SourceData2BppTextureUnit = GL_TEXTURE1;
/// The texture unit from which to source 4bpp input data.
//constexpr GLenum SourceData4BppTextureUnit = GL_TEXTURE2;
/// The texture unit from which to source input data.
constexpr GLenum SourceDataTextureUnit = GL_TEXTURE1;
/// The texture unit which contains raw line-by-line composite, S-Video or RGB data.
constexpr GLenum UnprocessedLineBufferTextureUnit = GL_TEXTURE3;
/// The texture unit which contains line-by-line records of luminance and two channels of chrominance, straight after multiplication by the quadrature vector, not yet filtered.
constexpr GLenum SVideoLineBufferTextureUnit = GL_TEXTURE4;
/// The texture unit which contains line-by-line records of RGB.
constexpr GLenum RGBLineBufferTextureUnit = GL_TEXTURE5;
/// The texture unit that contains the current display.
constexpr GLenum AccumulationTextureUnit = GL_TEXTURE6;
constexpr GLenum AccumulationTextureUnit = GL_TEXTURE2;
#define TextureAddress(x, y) (((y) << 11) | (x))
#define TextureAddressGetY(v) uint16_t((v) >> 11)
@ -108,17 +100,6 @@ void ScanTarget::set_modals(Modals modals) {
is_drawing_.clear();
}
void Outputs::Display::OpenGL::ScanTarget::set_uniforms(ShaderType type, Shader &target) {
// Slightly over-amping rowHeight here is a cheap way to make sure that lines
// converge even allowing for the fact that they may not be spaced by exactly
// the expected distance. Cf. the stencil-powered logic for making sure all
// pixels are painted only exactly once per field.
target.set_uniform("rowHeight", GLfloat(1.05f / modals_.expected_vertical_lines));
target.set_uniform("scale", GLfloat(modals_.output_scale.x), GLfloat(modals_.output_scale.y));
target.set_uniform("processingWidth", GLfloat(processing_width_) / 2048.0f);
target.set_uniform("phaseOffset", GLfloat(modals_.input_data_tweaks.phase_linked_luminance_offset));
}
Outputs::Display::ScanTarget::Scan *ScanTarget::begin_scan() {
if(allocation_has_failed_) return nullptr;
@ -230,54 +211,55 @@ void ScanTarget::submit() {
allocation_has_failed_ = false;
}
void ScanTarget::announce(Event event, uint16_t x, uint16_t y) {
switch(event) {
default: break;
case ScanTarget::Event::BeginHorizontalRetrace:
if(active_line_) {
active_line_->end_points[1].x = x;
active_line_->end_points[1].y = y;
}
break;
case ScanTarget::Event::EndHorizontalRetrace: {
// Commit the most recent line only if any scans fell on it.
// Otherwise there's no point outputting it, it'll contribute nothing.
if(provided_scans_) {
// Store metadata if concluding a previous line.
if(active_line_) {
line_metadata_buffer_[size_t(write_pointers_.line)].is_first_in_frame = is_first_in_frame_;
line_metadata_buffer_[size_t(write_pointers_.line)].previous_frame_was_complete = frame_was_complete_;
is_first_in_frame_ = false;
}
const auto read_pointers = read_pointers_.load();
// Attempt to allocate a new line; note allocation failure if necessary.
const auto next_line = uint16_t((write_pointers_.line + 1) % LineBufferHeight);
if(next_line == read_pointers.line) {
allocation_has_failed_ = true;
active_line_ = nullptr;
} else {
write_pointers_.line = next_line;
active_line_ = &line_buffer_[size_t(write_pointers_.line)];
}
provided_scans_ = 0;
}
if(active_line_) {
active_line_->end_points[0].x = x;
active_line_->end_points[0].y = y;
active_line_->line = write_pointers_.line;
}
} break;
case ScanTarget::Event::EndVerticalRetrace:
is_first_in_frame_ = true;
frame_was_complete_ = true;
break;
void ScanTarget::announce(Event event, bool is_visible, const Outputs::Display::ScanTarget::Scan::EndPoint &location, uint8_t composite_amplitude) {
if(event == ScanTarget::Event::EndVerticalRetrace) {
is_first_in_frame_ = true;
frame_was_complete_ = true;
}
// TODO: any lines that include any portion of vertical sync should be hidden.
// (maybe set a flag and zero out the line coordinates?)
if(output_is_visible_ == is_visible) return;
if(is_visible) {
// Commit the most recent line only if any scans fell on it.
// Otherwise there's no point outputting it, it'll contribute nothing.
if(provided_scans_) {
// Store metadata if concluding a previous line.
if(active_line_) {
line_metadata_buffer_[size_t(write_pointers_.line)].is_first_in_frame = is_first_in_frame_;
line_metadata_buffer_[size_t(write_pointers_.line)].previous_frame_was_complete = frame_was_complete_;
is_first_in_frame_ = false;
}
const auto read_pointers = read_pointers_.load();
// Attempt to allocate a new line; note allocation failure if necessary.
const auto next_line = uint16_t((write_pointers_.line + 1) % LineBufferHeight);
if(next_line == read_pointers.line) {
allocation_has_failed_ = true;
active_line_ = nullptr;
} else {
write_pointers_.line = next_line;
active_line_ = &line_buffer_[size_t(write_pointers_.line)];
}
provided_scans_ = 0;
}
if(active_line_) {
active_line_->end_points[0].x = location.x;
active_line_->end_points[0].y = location.y;
active_line_->end_points[0].cycles_since_end_of_horizontal_retrace = location.cycles_since_end_of_horizontal_retrace;
active_line_->end_points[0].composite_angle = location.composite_angle;
active_line_->line = write_pointers_.line;
active_line_->composite_amplitude = composite_amplitude;
}
} else {
if(active_line_) {
active_line_->end_points[1].x = location.x;
active_line_->end_points[1].y = location.y;
active_line_->end_points[1].cycles_since_end_of_horizontal_retrace = location.cycles_since_end_of_horizontal_retrace;
active_line_->end_points[1].composite_angle = location.composite_angle;
}
}
output_is_visible_ = is_visible;
}
void ScanTarget::setup_pipeline() {
@ -292,94 +274,27 @@ void ScanTarget::setup_pipeline() {
write_pointers_.write_area = 0;
}
// Pick a processing width; this will be at least four times the
// colour subcarrier, and an integer multiple of the pixel clock and
// at most 2048.
const int colour_cycle_width = (modals_.colour_cycle_numerator * 4 + modals_.colour_cycle_denominator - 1) / modals_.colour_cycle_denominator;
const int dot_clock = modals_.cycles_per_line / modals_.clocks_per_pixel_greatest_common_divisor;
const int overflow = colour_cycle_width % dot_clock;
processing_width_ = colour_cycle_width + (overflow ? dot_clock - overflow : 0);
processing_width_ = std::min(processing_width_, 2048);
// Establish an output shader. TODO: add gamma correction here.
output_shader_.reset(new Shader(
glsl_globals(ShaderType::Line) + glsl_default_vertex_shader(ShaderType::Line),
"#version 150\n"
"out vec4 fragColour;"
"in vec2 textureCoordinate;"
"uniform sampler2D textureName;"
"void main(void) {"
"fragColour = vec4(texture(textureName, textureCoordinate).rgb, 0.64);"
"}",
attribute_bindings(ShaderType::Line)
));
// Pick a processing width; this will be the minimum necessary not to
// lose any detail when combining the input.
processing_width_ = modals_.cycles_per_line / modals_.clocks_per_pixel_greatest_common_divisor;
// Establish an output shader. TODO: add proper decoding and gamma correction here.
output_shader_ = conversion_shader(modals_.input_data_type, modals_.display_type, modals_.composite_colour_space);
glBindVertexArray(line_vertex_array_);
glBindBuffer(GL_ARRAY_BUFFER, line_buffer_name_);
enable_vertex_attributes(ShaderType::Line, *output_shader_);
set_uniforms(ShaderType::Line, *output_shader_);
output_shader_->set_uniform("origin", modals_.visible_area.origin.x, modals_.visible_area.origin.y);
output_shader_->set_uniform("size", modals_.visible_area.size.width, modals_.visible_area.size.height);
// Establish such intermediary shaders as are required.
pipeline_stages_.clear();
if(modals_.display_type == DisplayType::CompositeColour) {
pipeline_stages_.emplace_back(
composite_to_svideo_shader(modals_.colour_cycle_numerator, modals_.colour_cycle_denominator, processing_width_).release(),
SVideoLineBufferTextureUnit,
GL_NEAREST);
}
if(modals_.display_type == DisplayType::SVideo || modals_.display_type == DisplayType::CompositeColour) {
pipeline_stages_.emplace_back(
svideo_to_rgb_shader(modals_.colour_cycle_numerator, modals_.colour_cycle_denominator, processing_width_).release(),
(modals_.display_type == DisplayType::CompositeColour) ? RGBLineBufferTextureUnit : SVideoLineBufferTextureUnit,
GL_NEAREST);
}
glBindVertexArray(scan_vertex_array_);
glBindBuffer(GL_ARRAY_BUFFER, scan_buffer_name_);
output_shader_->set_uniform("textureName", GLint(UnprocessedLineBufferTextureUnit - GL_TEXTURE0));
// Establish an input shader.
input_shader_ = input_shader(modals_.input_data_type, modals_.display_type);
input_shader_ = composition_shader(modals_.input_data_type);
glBindVertexArray(scan_vertex_array_);
glBindBuffer(GL_ARRAY_BUFFER, scan_buffer_name_);
enable_vertex_attributes(ShaderType::InputScan, *input_shader_);
set_uniforms(ShaderType::InputScan, *input_shader_);
input_shader_->set_uniform("textureName", GLint(SourceData1BppTextureUnit - GL_TEXTURE0));
// Cascade the texture units in use as per the pipeline stages.
std::vector<Shader *> input_shaders = {input_shader_.get()};
GLint texture_unit = GLint(UnprocessedLineBufferTextureUnit - GL_TEXTURE0);
for(const auto &stage: pipeline_stages_) {
input_shaders.push_back(stage.shader.get());
stage.shader->set_uniform("textureName", texture_unit);
set_uniforms(ShaderType::ProcessedScan, *stage.shader);
enable_vertex_attributes(ShaderType::ProcessedScan, *stage.shader);
++texture_unit;
}
output_shader_->set_uniform("textureName", texture_unit);
// Ensure that all shaders involved in the input pipeline have the proper colour space knowledged.
for(auto shader: input_shaders) {
switch(modals_.composite_colour_space) {
case ColourSpace::YIQ: {
const GLfloat rgbToYIQ[] = {0.299f, 0.596f, 0.211f, 0.587f, -0.274f, -0.523f, 0.114f, -0.322f, 0.312f};
const GLfloat yiqToRGB[] = {1.0f, 1.0f, 1.0f, 0.956f, -0.272f, -1.106f, 0.621f, -0.647f, 1.703f};
shader->set_uniform_matrix("lumaChromaToRGB", 3, false, yiqToRGB);
shader->set_uniform_matrix("rgbToLumaChroma", 3, false, rgbToYIQ);
} break;
case ColourSpace::YUV: {
const GLfloat rgbToYUV[] = {0.299f, -0.14713f, 0.615f, 0.587f, -0.28886f, -0.51499f, 0.114f, 0.436f, -0.10001f};
const GLfloat yuvToRGB[] = {1.0f, 1.0f, 1.0f, 0.0f, -0.39465f, 2.03211f, 1.13983f, -0.58060f, 0.0f};
shader->set_uniform_matrix("lumaChromaToRGB", 3, false, yuvToRGB);
shader->set_uniform_matrix("rgbToLumaChroma", 3, false, rgbToYUV);
} break;
}
}
input_shader_->set_uniform("textureName", GLint(SourceDataTextureUnit - GL_TEXTURE0));
}
void ScanTarget::draw(bool synchronous, int output_width, int output_height) {
@ -431,7 +346,7 @@ void ScanTarget::draw(bool synchronous, int output_width, int output_height) {
// Submit texture.
if(submit_pointers.write_area != read_pointers.write_area) {
glActiveTexture(SourceData1BppTextureUnit);
glActiveTexture(SourceDataTextureUnit);
glBindTexture(GL_TEXTURE_2D, write_area_texture_name_);
// Create storage for the texture if it doesn't yet exist; this was deferred until here
@ -487,7 +402,6 @@ void ScanTarget::draw(bool synchronous, int output_width, int output_height) {
// Push new input to the unprocessed line buffer.
if(new_scans) {
glDisable(GL_BLEND);
unprocessed_line_texture_.bind_framebuffer();
// Clear newly-touched lines; that is everything from (read+1) to submit.
@ -499,26 +413,11 @@ void ScanTarget::draw(bool synchronous, int output_width, int output_height) {
if(first_line_to_clear < final_line_to_clear) {
glScissor(0, first_line_to_clear, unprocessed_line_texture_.get_width(), final_line_to_clear - first_line_to_clear);
glClear(GL_COLOR_BUFFER_BIT);
if(pipeline_stages_.size()) {
pipeline_stages_.back().target.bind_framebuffer();
glClear(GL_COLOR_BUFFER_BIT);
unprocessed_line_texture_.bind_framebuffer();
}
} else {
glScissor(0, 0, unprocessed_line_texture_.get_width(), final_line_to_clear);
glClear(GL_COLOR_BUFFER_BIT);
glScissor(0, first_line_to_clear, unprocessed_line_texture_.get_width(), unprocessed_line_texture_.get_height() - first_line_to_clear);
glClear(GL_COLOR_BUFFER_BIT);
if(pipeline_stages_.size()) {
pipeline_stages_.back().target.bind_framebuffer();
glScissor(0, 0, unprocessed_line_texture_.get_width(), final_line_to_clear);
glClear(GL_COLOR_BUFFER_BIT);
glScissor(0, first_line_to_clear, unprocessed_line_texture_.get_width(), unprocessed_line_texture_.get_height() - first_line_to_clear);
glClear(GL_COLOR_BUFFER_BIT);
unprocessed_line_texture_.bind_framebuffer();
}
}
glDisable(GL_SCISSOR_TEST);
@ -528,13 +427,6 @@ void ScanTarget::draw(bool synchronous, int output_width, int output_height) {
glBindVertexArray(scan_vertex_array_);
input_shader_->bind();
glDrawArraysInstanced(GL_TRIANGLE_STRIP, 0, 4, GLsizei(new_scans));
// If there are any further pipeline stages, apply them.
for(auto &stage: pipeline_stages_) {
stage.target.bind_framebuffer();
stage.shader->bind();
glDrawArraysInstanced(GL_TRIANGLE_STRIP, 0, 4, GLsizei(new_scans));
}
}
// Ensure the accumulation buffer is properly sized.
@ -545,7 +437,7 @@ void ScanTarget::draw(bool synchronous, int output_width, int output_height) {
GLsizei(proportional_width),
GLsizei(output_height),
AccumulationTextureUnit,
GL_LINEAR,
GL_NEAREST,
true));
if(accumulation_texture_) {
new_framebuffer->bind_framebuffer();
@ -576,7 +468,7 @@ void ScanTarget::draw(bool synchronous, int output_width, int output_height) {
// Enable blending and stenciling, and ensure spans increment the stencil buffer.
glEnable(GL_BLEND);
glEnable(GL_STENCIL_TEST);
glStencilFunc(GL_EQUAL, 0, GLuint(-1));
glStencilFunc(GL_EQUAL, 0, GLuint(~0));
glStencilOp(GL_KEEP, GL_KEEP, GL_INCR);
// Prepare to output lines.

View File

@ -53,7 +53,9 @@ class ScanTarget: public Outputs::Display::ScanTarget {
uint8_t *begin_data(size_t required_length, size_t required_alignment) override;
void end_data(size_t actual_length) override;
void submit() override;
void announce(Event event, uint16_t x, uint16_t y) override;
void announce(Event event, bool is_visible, const Outputs::Display::ScanTarget::Scan::EndPoint &location, uint8_t colour_burst_amplitude) override;
bool output_is_visible_ = false;
// Extends the definition of a Scan to include two extra fields,
// relevant to the way that this scan target processes video.
@ -93,8 +95,11 @@ class ScanTarget: public Outputs::Display::ScanTarget {
struct Line {
struct EndPoint {
uint16_t x, y;
uint16_t cycles_since_end_of_horizontal_retrace;
int16_t composite_angle;
} end_points[2];
uint16_t line;
uint8_t composite_amplitude;
};
struct LineMetadata {
bool is_first_in_frame;
@ -105,8 +110,7 @@ class ScanTarget: public Outputs::Display::ScanTarget {
// Contains the first composition of scans into lines;
// they're accumulated prior to output to allow for continuous
// application of any necessary conversions — e.g. composite processing —
// which happen progressively from here to the RGB texture.
// application of any necessary conversions — e.g. composite processing.
TextureTarget unprocessed_line_texture_;
// Scans are accumulated to the accumulation texture; the full-display
@ -153,24 +157,11 @@ class ScanTarget: public Outputs::Display::ScanTarget {
Line
};
/*!
@returns A string containing GLSL code describing the standard set of
@c in and @c uniform variables to bind to the relevant struct
from [...]OpenGL::ScanTarget and a vertex function to provide
the standard varyings.
*/
static std::string glsl_globals(ShaderType type);
/*!
*/
static std::string glsl_default_vertex_shader(ShaderType type);
/*!
Calls @c taret.enable_vertex_attribute_with_pointer to attach all
globals for shaders of @c type to @c target.
*/
static void enable_vertex_attributes(ShaderType type, Shader &target);
static std::vector<Shader::AttributeBinding> attribute_bindings(ShaderType type);
void set_uniforms(ShaderType type, Shader &target);
GLsync fence_ = nullptr;
@ -180,22 +171,17 @@ class ScanTarget: public Outputs::Display::ScanTarget {
std::unique_ptr<Shader> input_shader_;
std::unique_ptr<Shader> output_shader_;
static std::unique_ptr<Shader> input_shader(InputDataType input_data_type, DisplayType display_type);
static std::unique_ptr<Shader> composite_to_svideo_shader(int colour_cycle_numerator, int colour_cycle_denominator, int processing_width);
static std::unique_ptr<Shader> svideo_to_rgb_shader(int colour_cycle_numerator, int colour_cycle_denominator, int processing_width);
static SignalProcessing::FIRFilter colour_filter(int colour_cycle_numerator, int colour_cycle_denominator, int processing_width, float low_cutoff, float high_cutoff);
struct PipelineStage {
PipelineStage(Shader *shader, GLenum texture_unit, GLint magnification_filter) :
shader(shader),
target(LineBufferWidth, LineBufferHeight, texture_unit, magnification_filter, false) {}
std::unique_ptr<Shader> shader;
TextureTarget target;
};
// A list is used here to avoid requiring a copy constructor on PipelineStage.
std::list<PipelineStage> pipeline_stages_;
/*!
Produces a shader that composes fragment of the input stream to a single buffer,
normalising the data into one of four forms: RGB, 8-bit luminance,
phase-linked luminance or luminance+phase offset.
*/
static std::unique_ptr<Shader> composition_shader(InputDataType input_data_type);
/*!
Produces a shader that reads from a composition buffer and converts to host
output RGB, decoding composite or S-Video as necessary.
*/
static std::unique_ptr<Shader> conversion_shader(InputDataType input_data_type, DisplayType display_type, ColourSpace colour_space);
};
}

View File

@ -10,160 +10,14 @@
using namespace Outputs::Display::OpenGL;
std::string ScanTarget::glsl_globals(ShaderType type) {
switch(type) {
case ShaderType::InputScan:
case ShaderType::ProcessedScan:
return
"#version 150\n"
"uniform vec2 scale;"
"uniform mat3 lumaChromaToRGB;"
"uniform mat3 rgbToLumaChroma;"
"uniform float rowHeight;"
"uniform float processingWidth;"
"in vec2 startPoint;"
"in float startDataX;"
"in float startCompositeAngle;"
"in vec2 endPoint;"
"in float endDataX;"
"in float endCompositeAngle;"
"in float dataY;"
"in float lineY;"
"in float compositeAmplitude;";
case ShaderType::Line:
return
"#version 150\n"
"uniform vec2 scale;"
"uniform float rowHeight;"
"uniform float processingWidth;"
"in vec2 startPoint;"
"in vec2 endPoint;"
"in float lineY;"
"uniform sampler2D textureName;"
"uniform vec2 origin;"
"uniform vec2 size;";
}
}
std::vector<Shader::AttributeBinding> ScanTarget::attribute_bindings(ShaderType type) {
switch(type) {
case ShaderType::InputScan:
case ShaderType::ProcessedScan:
return {
{"startPoint", 0},
{"startDataX", 1},
{"startCompositeAngle", 2},
{"endPoint", 3},
{"endDataX", 4},
{"endCompositeAngle", 5},
{"dataY", 6},
{"lineY", 7},
{"compositeAmplitude", 8},
};
case ShaderType::Line:
return {
{"startPoint", 0},
{"endPoint", 1},
{"lineY", 2},
};
}
}
std::string ScanTarget::glsl_default_vertex_shader(ShaderType type) {
switch(type) {
case ShaderType::InputScan:
case ShaderType::ProcessedScan: {
std::string result;
if(type == ShaderType::InputScan) {
result +=
"out vec2 textureCoordinate;"
"uniform usampler2D textureName;";
} else {
result +=
"out vec2 textureCoordinates[15];"
"uniform sampler2D textureName;"
"uniform float edgeExpansion;";
}
result +=
"out float compositeAngle;"
"out float oneOverCompositeAmplitude;"
"void main(void) {"
"float lateral = float(gl_VertexID & 1);"
"float longitudinal = float((gl_VertexID & 2) >> 1);"
"compositeAngle = (mix(startCompositeAngle, endCompositeAngle, lateral) / 32.0) * 3.141592654;"
"oneOverCompositeAmplitude = mix(0.0, 255.0 / compositeAmplitude, step(0.01, compositeAmplitude));";
if(type == ShaderType::InputScan) {
result +=
"textureCoordinate = vec2(mix(startDataX, endDataX, lateral), dataY + 0.5) / textureSize(textureName, 0);"
"vec2 eyePosition = vec2(mix(startPoint.x, endPoint.x, lateral) * processingWidth, lineY + longitudinal) / vec2(scale.x, 2048.0);";
} else {
result +=
"vec2 sourcePosition = vec2(mix(startPoint.x, endPoint.x, lateral) * processingWidth, lineY + 0.5);"
"vec2 eyePosition = (sourcePosition + vec2(0.0, longitudinal - 0.5)) / vec2(scale.x, 2048.0);"
"sourcePosition /= vec2(scale.x, 2048.0);"
"vec2 expansion = vec2(2.0*lateral*edgeExpansion - edgeExpansion, 0.0) / textureSize(textureName, 0);"
"eyePosition = eyePosition + expansion;"
"sourcePosition = sourcePosition + expansion;"
"textureCoordinates[0] = sourcePosition + vec2(-7.0, 0.0) / textureSize(textureName, 0);"
"textureCoordinates[1] = sourcePosition + vec2(-6.0, 0.0) / textureSize(textureName, 0);"
"textureCoordinates[2] = sourcePosition + vec2(-5.0, 0.0) / textureSize(textureName, 0);"
"textureCoordinates[3] = sourcePosition + vec2(-4.0, 0.0) / textureSize(textureName, 0);"
"textureCoordinates[4] = sourcePosition + vec2(-3.0, 0.0) / textureSize(textureName, 0);"
"textureCoordinates[5] = sourcePosition + vec2(-2.0, 0.0) / textureSize(textureName, 0);"
"textureCoordinates[6] = sourcePosition + vec2(-1.0, 0.0) / textureSize(textureName, 0);"
"textureCoordinates[7] = sourcePosition;"
"textureCoordinates[8] = sourcePosition + vec2(1.0, 0.0) / textureSize(textureName, 0);"
"textureCoordinates[9] = sourcePosition + vec2(2.0, 0.0) / textureSize(textureName, 0);"
"textureCoordinates[10] = sourcePosition + vec2(3.0, 0.0) / textureSize(textureName, 0);"
"textureCoordinates[11] = sourcePosition + vec2(4.0, 0.0) / textureSize(textureName, 0);"
"textureCoordinates[12] = sourcePosition + vec2(5.0, 0.0) / textureSize(textureName, 0);"
"textureCoordinates[13] = sourcePosition + vec2(6.0, 0.0) / textureSize(textureName, 0);"
"textureCoordinates[14] = sourcePosition + vec2(7.0, 0.0) / textureSize(textureName, 0);"
"eyePosition = eyePosition;";
}
return result +
"gl_Position = vec4(eyePosition*2.0 - vec2(1.0), 0.0, 1.0);"
"}";
}
case ShaderType::Line:
return
"out vec2 textureCoordinate;"
"void main(void) {"
"float lateral = float(gl_VertexID & 1);"
"float longitudinal = float((gl_VertexID & 2) >> 1);"
"textureCoordinate = vec2(lateral * processingWidth, lineY + 0.5) / vec2(1.0, textureSize(textureName, 0).y);"
"vec2 centrePoint = mix(startPoint, endPoint, lateral) / scale;"
"vec2 height = normalize(endPoint - startPoint).yx * (longitudinal - 0.5) * rowHeight;"
"vec2 eyePosition = vec2(-1.0, 1.0) + vec2(2.0, -2.0) * (((centrePoint + height) - origin) / size);"
"gl_Position = vec4(eyePosition, 0.0, 1.0);"
"}";
}
void Outputs::Display::OpenGL::ScanTarget::set_uniforms(ShaderType type, Shader &target) {
// Slightly over-amping rowHeight here is a cheap way to make sure that lines
// converge even allowing for the fact that they may not be spaced by exactly
// the expected distance. Cf. the stencil-powered logic for making sure all
// pixels are painted only exactly once per field.
target.set_uniform("rowHeight", GLfloat(1.05f / modals_.expected_vertical_lines));
target.set_uniform("scale", GLfloat(modals_.output_scale.x), GLfloat(modals_.output_scale.y));
target.set_uniform("phaseOffset", GLfloat(modals_.input_data_tweaks.phase_linked_luminance_offset));
}
void ScanTarget::enable_vertex_attributes(ShaderType type, Shader &target) {
@ -173,23 +27,18 @@ void ScanTarget::enable_vertex_attributes(ShaderType type, Shader &target) {
for(int c = 0; c < 2; ++c) {
const std::string prefix = c ? "end" : "start";
target.enable_vertex_attribute_with_pointer(
prefix + "Point",
2, GL_UNSIGNED_SHORT, GL_FALSE,
sizeof(Scan),
reinterpret_cast<void *>(offsetof(Scan, scan.end_points[c].x)),
1);
target.enable_vertex_attribute_with_pointer(
prefix + "DataX",
1, GL_UNSIGNED_SHORT, GL_FALSE,
sizeof(Scan),
reinterpret_cast<void *>(offsetof(Scan, scan.end_points[c].data_offset)),
1);
target.enable_vertex_attribute_with_pointer(
prefix + "CompositeAngle",
prefix + "Clock",
1, GL_UNSIGNED_SHORT, GL_FALSE,
sizeof(Scan),
reinterpret_cast<void *>(offsetof(Scan, scan.end_points[c].composite_angle)),
reinterpret_cast<void *>(offsetof(Scan, scan.end_points[c].cycles_since_end_of_horizontal_retrace)),
1);
}
@ -199,18 +48,13 @@ void ScanTarget::enable_vertex_attributes(ShaderType type, Shader &target) {
sizeof(Scan),
reinterpret_cast<void *>(offsetof(Scan, data_y)),
1);
target.enable_vertex_attribute_with_pointer(
"lineY",
1, GL_UNSIGNED_SHORT, GL_FALSE,
sizeof(Scan),
reinterpret_cast<void *>(offsetof(Scan, line)),
1);
target.enable_vertex_attribute_with_pointer(
"compositeAmplitude",
1, GL_UNSIGNED_BYTE, GL_FALSE,
sizeof(Scan),
reinterpret_cast<void *>(offsetof(Scan, scan.composite_amplitude)),
1);
break;
case ShaderType::Line:
@ -223,6 +67,20 @@ void ScanTarget::enable_vertex_attributes(ShaderType type, Shader &target) {
sizeof(Line),
reinterpret_cast<void *>(offsetof(Line, end_points[c].x)),
1);
target.enable_vertex_attribute_with_pointer(
prefix + "Clock",
1, GL_UNSIGNED_SHORT, GL_FALSE,
sizeof(Line),
reinterpret_cast<void *>(offsetof(Line, end_points[c].cycles_since_end_of_horizontal_retrace)),
1);
target.enable_vertex_attribute_with_pointer(
prefix + "CompositeAngle",
1, GL_UNSIGNED_SHORT, GL_FALSE,
sizeof(Line),
reinterpret_cast<void *>(offsetof(Line, end_points[c].composite_angle)),
1);
}
target.enable_vertex_attribute_with_pointer(
@ -231,243 +89,407 @@ void ScanTarget::enable_vertex_attributes(ShaderType type, Shader &target) {
sizeof(Line),
reinterpret_cast<void *>(offsetof(Line, line)),
1);
target.enable_vertex_attribute_with_pointer(
"lineCompositeAmplitude",
1, GL_UNSIGNED_BYTE, GL_FALSE,
sizeof(Line),
reinterpret_cast<void *>(offsetof(Line, composite_amplitude)),
1);
break;
}
}
std::unique_ptr<Shader> ScanTarget::input_shader(InputDataType input_data_type, DisplayType display_type) {
std::unique_ptr<Shader> ScanTarget::composition_shader(InputDataType input_data_type) {
const std::string vertex_shader =
"#version 150\n"
"in float startDataX;"
"in float startClock;"
"in float endDataX;"
"in float endClock;"
"in float dataY;"
"in float lineY;"
"out vec2 textureCoordinate;"
"uniform usampler2D textureName;"
"void main(void) {"
"float lateral = float(gl_VertexID & 1);"
"float longitudinal = float((gl_VertexID & 2) >> 1);"
"textureCoordinate = vec2(mix(startDataX, endDataX, lateral), dataY + 0.5) / textureSize(textureName, 0);"
"vec2 eyePosition = vec2(mix(startClock, endClock, lateral), lineY + longitudinal) / vec2(2048.0, 2048.0);"
"gl_Position = vec4(eyePosition*2.0 - vec2(1.0), 0.0, 1.0);"
"}";
std::string fragment_shader =
"#version 150\n"
"out vec3 fragColour;"
"out vec4 fragColour;"
"in vec2 textureCoordinate;"
"in float compositeAngle;"
"in float oneOverCompositeAmplitude;"
"uniform mat3 lumaChromaToRGB;"
"uniform mat3 rgbToLumaChroma;"
"uniform usampler2D textureName;"
"uniform float phaseOffset;"
"void main(void) {";
DisplayType computed_display_type;
switch(input_data_type) {
case InputDataType::Luminance1:
computed_display_type = DisplayType::CompositeMonochrome;
fragment_shader += "fragColour = texture(textureName, textureCoordinate).rrr;";
if(computed_display_type != display_type) {
fragment_shader += "fragColour = clamp(fragColour, 0.0, 1.0);";
}
fragment_shader += "fragColour = texture(textureName, textureCoordinate).rrrr;";
break;
case InputDataType::Luminance8:
computed_display_type = DisplayType::CompositeMonochrome;
fragment_shader += "fragColour = vec3(texture(textureName, textureCoordinate).r / 255.0);";
fragment_shader += "fragColour = texture(textureName, textureCoordinate).rrrr / vec4(255.0);";
break;
case InputDataType::PhaseLinkedLuminance8:
computed_display_type = DisplayType::CompositeMonochrome;
fragment_shader +=
"uint iPhase = uint((compositeAngle * 2.0 / 3.141592654) + phaseOffset*4.0) & 3u;"
"fragColour = vec3(texture(textureName, textureCoordinate)[iPhase] / 255.0);";
break;
case InputDataType::Luminance8Phase8:
computed_display_type = DisplayType::SVideo;
fragment_shader +=
"vec2 yc = texture(textureName, textureCoordinate).rg / vec2(255.0);"
"float phaseOffset = 3.141592654 * 2.0 * 2.0 * yc.y;"
"float rawChroma = step(yc.y, 0.75) * cos(compositeAngle + phaseOffset);"
"fragColour = vec3(yc.x, 0.5 + rawChroma*0.5, 0.0);";
case InputDataType::Red8Green8Blue8:
fragment_shader += "fragColour = texture(textureName, textureCoordinate) / vec4(255.0);";
break;
case InputDataType::Red1Green1Blue1:
computed_display_type = DisplayType::RGB;
fragment_shader +=
"uint textureValue = texture(textureName, textureCoordinate).r;"
"fragColour = uvec3(textureValue) & uvec3(4u, 2u, 1u);";
if(computed_display_type != display_type) {
fragment_shader += "fragColour = clamp(fragColour, 0.0, 1.0);";
}
fragment_shader += "fragColour = vec4(texture(textureName, textureCoordinate).rrr & uvec3(4u, 2u, 1u), 1.0);";
break;
case InputDataType::Red2Green2Blue2:
computed_display_type = DisplayType::RGB;
fragment_shader +=
"uint textureValue = texture(textureName, textureCoordinate).r;"
"fragColour = vec3(float((textureValue >> 4) & 3u), float((textureValue >> 2) & 3u), float(textureValue & 3u)) / 3.0;";
"fragColour = vec4(float((textureValue >> 4) & 3u), float((textureValue >> 2) & 3u), float(textureValue & 3u), 3.0) / 3.0;";
break;
case InputDataType::Red4Green4Blue4:
computed_display_type = DisplayType::RGB;
fragment_shader +=
"uvec2 textureValue = texture(textureName, textureCoordinate).rg;"
"fragColour = vec3(float(textureValue.r) / 15.0, float(textureValue.g & 240u) / 240.0, float(textureValue.g & 15u) / 15.0);";
"fragColour = vec4(float(textureValue.r) / 15.0, float(textureValue.g & 240u) / 240.0, float(textureValue.g & 15u) / 15.0, 1.0);";
break;
case InputDataType::Red8Green8Blue8:
computed_display_type = DisplayType::RGB;
fragment_shader += "fragColour = texture(textureName, textureCoordinate).rgb / vec3(255.0);";
break;
}
// If the input type is RGB but the output type isn't then
// there'll definitely be an RGB to SVideo step.
if(computed_display_type == DisplayType::RGB && display_type != DisplayType::RGB) {
fragment_shader +=
"vec3 composite_colour = rgbToLumaChroma * fragColour;"
"vec2 quadrature = vec2(cos(compositeAngle), sin(compositeAngle));"
"fragColour = vec3(composite_colour.r, 0.5 + dot(quadrature, composite_colour.gb)*0.5, 0.0);";
}
// If the output type is SVideo, throw in an attempt to separate the two chrominance
// channels here.
if(display_type == DisplayType::SVideo) {
if(computed_display_type != DisplayType::RGB) {
fragment_shader +=
"vec2 quadrature = vec2(cos(compositeAngle), sin(compositeAngle));";
}
fragment_shader +=
"vec2 chroma = (((fragColour.y - 0.5)*2.0) * quadrature)*0.5 + vec2(0.5);"
"fragColour = vec3(fragColour.x, chroma);";
}
// Add an SVideo to composite step if necessary.
if(
(display_type == DisplayType::CompositeMonochrome || display_type == DisplayType::CompositeColour) &&
computed_display_type != DisplayType::CompositeMonochrome
) {
fragment_shader += "fragColour = vec3(mix(fragColour.r, 2.0*(fragColour.g - 0.5), 1.0 / oneOverCompositeAmplitude));";
}
return std::unique_ptr<Shader>(new Shader(
glsl_globals(ShaderType::InputScan) + glsl_default_vertex_shader(ShaderType::InputScan),
vertex_shader,
fragment_shader + "}",
attribute_bindings(ShaderType::InputScan)
{
{"startDataX", 0},
{"startClock", 1},
{"endDataX", 2},
{"endClock", 3},
{"dataY", 4},
{"lineY", 5},
}
));
}
SignalProcessing::FIRFilter ScanTarget::colour_filter(int colour_cycle_numerator, int colour_cycle_denominator, int processing_width, float low_cutoff, float high_cutoff) {
const float cycles_per_expanded_line = (float(colour_cycle_numerator) / float(colour_cycle_denominator)) / (float(processing_width) / float(LineBufferWidth));
return SignalProcessing::FIRFilter(15, float(LineBufferWidth), cycles_per_expanded_line * low_cutoff, cycles_per_expanded_line * high_cutoff);
}
std::unique_ptr<Shader> ScanTarget::svideo_to_rgb_shader(int colour_cycle_numerator, int colour_cycle_denominator, int processing_width) {
/*
Composite to S-Video conversion is achieved by filtering the input signal to obtain luminance, and then subtracting that
from the original to get chrominance.
(Colour cycle numerator)/(Colour cycle denominator) gives the number of colour cycles in (processing_width / LineBufferWidth),
there'll be at least four samples per colour clock and in practice at most just a shade more than 9.
*/
auto shader = std::unique_ptr<Shader>(new Shader(
glsl_globals(ShaderType::ProcessedScan) + glsl_default_vertex_shader(ShaderType::ProcessedScan),
std::unique_ptr<Shader> ScanTarget::conversion_shader(InputDataType input_data_type, DisplayType display_type, ColourSpace colour_space) {
// Compose a vertex shader. If the display type is RGB, generate just the proper
// geometry position, plus a solitary textureCoordinate.
//
// If the display type is anything other than RGB, also produce composite
// angle and 1/composite amplitude as outputs.
//
// If the display type is composite colour, generate four textureCoordinates,
// spanning a range of -135, -45, +45, +135 degrees.
//
// If the display type is S-Video, generate three textureCoordinates, at
// -45, 0, +45.
std::string vertex_shader =
"#version 150\n"
"in vec2 textureCoordinates[15];"
"uniform vec4 chromaWeights[4];"
"uniform vec4 lumaWeights[4];"
"uniform vec2 scale;"
"uniform float rowHeight;"
"in vec2 startPoint;"
"in vec2 endPoint;"
"in float startClock;"
"in float startCompositeAngle;"
"in float endClock;"
"in float endCompositeAngle;"
"in float lineY;"
"in float lineCompositeAmplitude;"
"uniform sampler2D textureName;"
"uniform mat3 lumaChromaToRGB;"
"uniform vec2 origin;"
"uniform vec2 size;";
"out vec3 fragColour;"
"void main() {"
"vec3 samples[15] = vec3[15]("
"texture(textureName, textureCoordinates[0]).rgb,"
"texture(textureName, textureCoordinates[1]).rgb,"
"texture(textureName, textureCoordinates[2]).rgb,"
"texture(textureName, textureCoordinates[3]).rgb,"
"texture(textureName, textureCoordinates[4]).rgb,"
"texture(textureName, textureCoordinates[5]).rgb,"
"texture(textureName, textureCoordinates[6]).rgb,"
"texture(textureName, textureCoordinates[7]).rgb,"
"texture(textureName, textureCoordinates[8]).rgb,"
"texture(textureName, textureCoordinates[9]).rgb,"
"texture(textureName, textureCoordinates[10]).rgb,"
"texture(textureName, textureCoordinates[11]).rgb,"
"texture(textureName, textureCoordinates[12]).rgb,"
"texture(textureName, textureCoordinates[13]).rgb,"
"texture(textureName, textureCoordinates[14]).rgb"
");"
"vec4 samples0[4] = vec4[4]("
"vec4(samples[0].r, samples[1].r, samples[2].r, samples[3].r),"
"vec4(samples[4].r, samples[5].r, samples[6].r, samples[7].r),"
"vec4(samples[8].r, samples[9].r, samples[10].r, samples[11].r),"
"vec4(samples[12].r, samples[13].r, samples[14].r, 0.0)"
");"
"vec4 samples1[4] = vec4[4]("
"vec4(samples[0].g, samples[1].g, samples[2].g, samples[3].g),"
"vec4(samples[4].g, samples[5].g, samples[6].g, samples[7].g),"
"vec4(samples[8].g, samples[9].g, samples[10].g, samples[11].g),"
"vec4(samples[12].g, samples[13].g, samples[14].g, 0.0)"
");"
"vec4 samples2[4] = vec4[4]("
"vec4(samples[0].b, samples[1].b, samples[2].b, samples[3].b),"
"vec4(samples[4].b, samples[5].b, samples[6].b, samples[7].b),"
"vec4(samples[8].b, samples[9].b, samples[10].b, samples[11].b),"
"vec4(samples[12].b, samples[13].b, samples[14].b, 0.0)"
");"
"float channel0 = dot(lumaWeights[0], samples0[0]) + dot(lumaWeights[1], samples0[1]) + dot(lumaWeights[2], samples0[2]) + dot(lumaWeights[3], samples0[3]);"
"float channel1 = dot(chromaWeights[0], samples1[0]) + dot(chromaWeights[1], samples1[1]) + dot(chromaWeights[2], samples1[2]) + dot(chromaWeights[3], samples1[3]);"
"float channel2 = dot(chromaWeights[0], samples2[0]) + dot(chromaWeights[1], samples2[1]) + dot(chromaWeights[2], samples2[2]) + dot(chromaWeights[3], samples2[3]);"
"vec2 chroma = vec2(channel1, channel2)*2.0 - vec2(1.0);"
"fragColour = lumaChromaToRGB * vec3(channel0, chroma);"
"}",
attribute_bindings(ShaderType::ProcessedScan)
));
auto chroma_coefficients = colour_filter(colour_cycle_numerator, colour_cycle_denominator, processing_width, 0.0f, 0.25f).get_coefficients();
chroma_coefficients.push_back(0.0f);
shader->set_uniform("chromaWeights", 4, 4, chroma_coefficients.data());
auto luma_coefficients = colour_filter(colour_cycle_numerator, colour_cycle_denominator, processing_width, 0.0f, 0.15f).get_coefficients();
luma_coefficients.push_back(0.0f);
shader->set_uniform("lumaWeights", 4, 4, luma_coefficients.data());
shader->set_uniform("edgeExpansion", 0);
return shader;
}
std::unique_ptr<Shader> ScanTarget::composite_to_svideo_shader(int colour_cycle_numerator, int colour_cycle_denominator, int processing_width) {
auto shader = std::unique_ptr<Shader>(new Shader(
glsl_globals(ShaderType::ProcessedScan) + glsl_default_vertex_shader(ShaderType::ProcessedScan),
std::string fragment_shader =
"#version 150\n"
"in vec2 textureCoordinates[15];"
"in float compositeAngle;"
"in float oneOverCompositeAmplitude;"
"uniform vec4 lumaWeights[4];"
"uniform sampler2D textureName;"
"out vec4 fragColour;";
"out vec3 fragColour;"
"void main() {"
"vec4 samples[4] = vec4[4]("
"vec4(texture(textureName, textureCoordinates[0]).r, texture(textureName, textureCoordinates[1]).r, texture(textureName, textureCoordinates[2]).r, texture(textureName, textureCoordinates[3]).r),"
"vec4(texture(textureName, textureCoordinates[4]).r, texture(textureName, textureCoordinates[5]).r, texture(textureName, textureCoordinates[6]).r, texture(textureName, textureCoordinates[7]).r),"
"vec4(texture(textureName, textureCoordinates[8]).r, texture(textureName, textureCoordinates[9]).r, texture(textureName, textureCoordinates[10]).r, texture(textureName, textureCoordinates[11]).r),"
"vec4(texture(textureName, textureCoordinates[12]).r, texture(textureName, textureCoordinates[13]).r, texture(textureName, textureCoordinates[14]).r, 0.0)"
");"
"float luma = dot(lumaWeights[0], samples[0]) + dot(lumaWeights[1], samples[1]) + dot(lumaWeights[2], samples[2]) + dot(lumaWeights[3], samples[3]);"
"vec2 quadrature = vec2(cos(compositeAngle), sin(compositeAngle));"
"vec2 chroma = ((samples[1].a - luma) * oneOverCompositeAmplitude)*quadrature;"
"fragColour = vec3(samples[1].a, chroma*0.5 + vec2(0.5));"
"}",
attribute_bindings(ShaderType::ProcessedScan)
));
if(display_type != DisplayType::RGB) {
vertex_shader +=
"out float compositeAngle;"
"out float compositeAmplitude;"
"out float oneOverCompositeAmplitude;";
fragment_shader +=
"in float compositeAngle;"
"in float compositeAmplitude;"
"in float oneOverCompositeAmplitude;";
}
auto luma_low = colour_filter(colour_cycle_numerator, colour_cycle_denominator, processing_width, 0.0f, 0.9f);
auto luma_coefficients = luma_low.get_coefficients();
luma_coefficients.push_back(0.0f);
shader->set_uniform("lumaWeights", 4, 4, luma_coefficients.data());
switch(display_type){
case DisplayType::RGB:
case DisplayType::CompositeMonochrome:
vertex_shader += "out vec2 textureCoordinate;";
fragment_shader += "in vec2 textureCoordinate;";
break;
shader->set_uniform("edgeExpansion", 0);
case DisplayType::CompositeColour:
case DisplayType::SVideo:
vertex_shader += "out vec2 textureCoordinates[4];";
fragment_shader += "in vec2 textureCoordinates[4];";
break;
}
return shader;
// Add the code to generate a proper output position; this applies to all display types.
vertex_shader +=
"void main(void) {"
"float lateral = float(gl_VertexID & 1);"
"float longitudinal = float((gl_VertexID & 2) >> 1);"
"vec2 centrePoint = mix(startPoint, endPoint, lateral) / scale;"
"vec2 height = normalize(endPoint - startPoint).yx * (longitudinal - 0.5) * rowHeight;"
"vec2 eyePosition = vec2(-1.0, 1.0) + vec2(2.0, -2.0) * (((centrePoint + height) - origin) / size);"
"gl_Position = vec4(eyePosition, 0.0, 1.0);";
// For everything other than RGB, calculate the two composite outputs.
if(display_type != DisplayType::RGB) {
vertex_shader +=
"compositeAngle = (mix(startCompositeAngle, endCompositeAngle, lateral) / 32.0) * 3.141592654;"
"compositeAmplitude = lineCompositeAmplitude / 255.0;"
"oneOverCompositeAmplitude = mix(0.0, 255.0 / lineCompositeAmplitude, step(0.01, lineCompositeAmplitude));";
}
// For RGB and monochrome composite, generate the single texture coordinate; otherwise generate either three
// or four depending on the type of decoding to apply.
switch(display_type){
case DisplayType::RGB:
case DisplayType::CompositeMonochrome:
vertex_shader +=
"textureCoordinate = vec2(mix(startClock, endClock, lateral), lineY + 0.5) / textureSize(textureName, 0);";
break;
case DisplayType::CompositeColour:
case DisplayType::SVideo:
vertex_shader +=
"float centreClock = mix(startClock, endClock, lateral);"
"float clocksPerAngle = (endClock - startClock) / (abs(endCompositeAngle - startCompositeAngle) / 64.0);"
"textureCoordinates[0] = vec2(centreClock - 0.375*clocksPerAngle, lineY + 0.5) / textureSize(textureName, 0);"
"textureCoordinates[1] = vec2(centreClock - 0.125*clocksPerAngle, lineY + 0.5) / textureSize(textureName, 0);"
"textureCoordinates[2] = vec2(centreClock + 0.125*clocksPerAngle, lineY + 0.5) / textureSize(textureName, 0);"
"textureCoordinates[3] = vec2(centreClock + 0.375*clocksPerAngle, lineY + 0.5) / textureSize(textureName, 0);";
break;
}
vertex_shader += "}";
// Compose a fragment shader.
//
// For an RGB display ... [TODO]
if(display_type != DisplayType::RGB) {
fragment_shader +=
"uniform mat3 lumaChromaToRGB;"
"uniform mat3 rgbToLumaChroma;";
}
if(display_type == DisplayType::SVideo) {
fragment_shader +=
"vec2 svideo_sample(vec2 coordinate, float angle) {";
switch(input_data_type) {
case InputDataType::Luminance1:
case InputDataType::Luminance8:
// Easy, just copy across.
fragment_shader += "return vec2(texture(textureName, coordinate).r, 0.0);";
break;
case InputDataType::PhaseLinkedLuminance8:
fragment_shader +=
"uint iPhase = uint((angle * 2.0 / 3.141592654) ) & 3u;" // + phaseOffset*4.0
"return vec2(texture(textureName, coordinate)[iPhase], 0.0);";
break;
case InputDataType::Luminance8Phase8:
fragment_shader +=
"vec2 yc = texture(textureName, coordinate).rg;"
"float phaseOffset = 3.141592654 * 2.0 * 2.0 * yc.y;"
"float rawChroma = step(yc.y, 0.75) * cos(angle + phaseOffset);"
"return vec2(yc.x, rawChroma);";
break;
case InputDataType::Red1Green1Blue1:
case InputDataType::Red2Green2Blue2:
case InputDataType::Red4Green4Blue4:
case InputDataType::Red8Green8Blue8:
fragment_shader +=
"vec3 colour = rgbToLumaChroma * texture(textureName, coordinate).rgb;"
"vec2 quadrature = vec2(cos(angle), sin(angle));"
"return vec2(colour.r, dot(quadrature, colour.gb));";
break;
}
fragment_shader += "}";
}
if(display_type == DisplayType::CompositeMonochrome || display_type == DisplayType::CompositeColour) {
fragment_shader +=
"float composite_sample(vec2 coordinate, float angle) {";
switch(input_data_type) {
case InputDataType::Luminance1:
case InputDataType::Luminance8:
// Easy, just copy across.
fragment_shader += "return texture(textureName, coordinate).r;";
break;
case InputDataType::PhaseLinkedLuminance8:
fragment_shader +=
"uint iPhase = uint((angle * 2.0 / 3.141592654) ) & 3u;" // + phaseOffset*4.0
"return texture(textureName, coordinate)[iPhase];";
break;
case InputDataType::Luminance8Phase8:
fragment_shader +=
"vec2 yc = texture(textureName, coordinate).rg;"
"float phaseOffset = 3.141592654 * 2.0 * 2.0 * yc.y;"
"float rawChroma = step(yc.y, 0.75) * cos(angle + phaseOffset);"
"return mix(yc.x, rawChroma, compositeAmplitude);";
break;
case InputDataType::Red1Green1Blue1:
case InputDataType::Red2Green2Blue2:
case InputDataType::Red4Green4Blue4:
case InputDataType::Red8Green8Blue8:
fragment_shader +=
"vec3 colour = rgbToLumaChroma * texture(textureName, coordinate).rgb;"
"vec2 quadrature = vec2(cos(angle), sin(angle));"
"return mix(colour.r, dot(quadrature, colour.gb), compositeAmplitude);";
break;
}
fragment_shader += "}";
}
fragment_shader +=
"void main(void) {"
"vec3 fragColour3;";
if(display_type == DisplayType::CompositeColour || display_type == DisplayType::SVideo) {
fragment_shader +=
// Figure out the four composite angles.
"vec4 angles = vec4("
"compositeAngle - 2.356194490192345,"
"compositeAngle - 0.785398163397448,"
"compositeAngle + 0.785398163397448,"
"compositeAngle + 2.356194490192345"
");";
}
switch(display_type) {
case DisplayType::RGB:
fragment_shader += "fragColour3 = texture(textureName, textureCoordinate).rgb;";
break;
case DisplayType::SVideo:
fragment_shader +=
// Sample four times over, at proper angle offsets.
"vec2 samples[4] = vec2[4]("
"svideo_sample(textureCoordinates[0], angles[0]),"
"svideo_sample(textureCoordinates[1], angles[1]),"
"svideo_sample(textureCoordinates[2], angles[2]),"
"svideo_sample(textureCoordinates[3], angles[3])"
");"
"vec4 chrominances = vec4("
"samples[0].y,"
"samples[1].y,"
"samples[2].y,"
"samples[3].y"
");"
// Split and average chrominance.
"vec2 channels = vec2("
"dot(cos(angles), chrominances),"
"dot(sin(angles), chrominances)"
") * vec2(0.25);"
// Apply a colour space conversion to get RGB.
"fragColour3 = lumaChromaToRGB * vec3(samples[1].x, channels);";
break;
case DisplayType::CompositeColour:
fragment_shader +=
// Sample four times over, at proper angle offsets.
"vec4 samples = vec4("
"composite_sample(textureCoordinates[0], angles[0]),"
"composite_sample(textureCoordinates[1], angles[1]),"
"composite_sample(textureCoordinates[2], angles[2]),"
"composite_sample(textureCoordinates[3], angles[3])"
");"
// Take the average to calculate luminance, then subtract that from all four samples to
// give chrominance.
"float luminance = dot(samples, vec4(0.25 / (1.0 - compositeAmplitude)));"
"samples -= vec4(luminance);"
// Split and average chrominance.
"vec2 channels = vec2("
"dot(cos(angles), samples),"
"dot(sin(angles), samples)"
") * vec2(0.125 * oneOverCompositeAmplitude);"
// Apply a colour space conversion to get RGB.
"fragColour3 = lumaChromaToRGB * vec3(luminance, channels);";
break;
case DisplayType::CompositeMonochrome:
fragment_shader += "fragColour3 = vec3(composite_sample(textureCoordinate, compositeAngle));";
break;
}
// TODO gamma and range corrections.
fragment_shader +=
"fragColour = vec4(fragColour3, 0.64);"
"}";
const auto shader = new Shader(
vertex_shader,
fragment_shader,
{
{"startPoint", 0},
{"endPoint", 1},
{"startClock", 2},
{"endClock", 3},
{"lineY", 4},
{"lineCompositeAmplitude", 5},
{"startCompositeAngle", 6},
{"endCompositeAngle", 7},
}
);
// If this isn't an RGB or composite colour shader, set the proper colour space.
if(display_type != DisplayType::RGB) {
switch(colour_space) {
case ColourSpace::YIQ: {
const GLfloat rgbToYIQ[] = {0.299f, 0.596f, 0.211f, 0.587f, -0.274f, -0.523f, 0.114f, -0.322f, 0.312f};
const GLfloat yiqToRGB[] = {1.0f, 1.0f, 1.0f, 0.956f, -0.272f, -1.106f, 0.621f, -0.647f, 1.703f};
shader->set_uniform_matrix("lumaChromaToRGB", 3, false, yiqToRGB);
shader->set_uniform_matrix("rgbToLumaChroma", 3, false, rgbToYIQ);
} break;
case ColourSpace::YUV: {
const GLfloat rgbToYUV[] = {0.299f, -0.14713f, 0.615f, 0.587f, -0.28886f, -0.51499f, 0.114f, 0.436f, -0.10001f};
const GLfloat yuvToRGB[] = {1.0f, 1.0f, 1.0f, 0.0f, -0.39465f, 2.03211f, 1.13983f, -0.58060f, 0.0f};
shader->set_uniform_matrix("lumaChromaToRGB", 3, false, yuvToRGB);
shader->set_uniform_matrix("rgbToLumaChroma", 3, false, rgbToYUV);
} break;
}
}
return std::unique_ptr<Shader>(shader);
}

View File

@ -228,6 +228,9 @@ struct ScanTarget {
///
/// It will produce undefined behaviour if signs differ on a single scan.
int16_t composite_angle;
/// Gives the number of cycles since the most recent horizontal retrace ended.
uint16_t cycles_since_end_of_horizontal_retrace;
} end_points[2];
/// For composite video, dictates the amplitude of the colour subcarrier as a proportion of
@ -284,8 +287,15 @@ struct ScanTarget {
EndVerticalRetrace,
};
/// Provides a hint that the named event has occurred.
virtual void announce(Event event, uint16_t x, uint16_t y) {}
/*!
Provides a hint that the named event has occurred.
@param event The event.
@param is_visible @c true if the output stream is visible immediately after this event; @c false otherwise.
@param location The location of the event.
@param composite_amplitude The amplitude of the colour burst on this line (0, if no colour burst was found).
*/
virtual void announce(Event event, bool is_visible, const Scan::EndPoint &location, uint8_t composite_amplitude) {}
};
/*!

View File

@ -19,6 +19,7 @@ using namespace Storage::Encodings::MFM;
class MFMEncoder: public Encoder {
public:
MFMEncoder(std::vector<bool> &target) : Encoder(target) {}
virtual ~MFMEncoder() {}
void add_byte(uint8_t input) {
crc_generator_.add(input);

View File

@ -45,6 +45,7 @@ std::shared_ptr<Storage::Disk::Track> GetFMTrackWithSectors(const std::vector<co
class Encoder {
public:
Encoder(std::vector<bool> &target);
virtual ~Encoder() {}
virtual void add_byte(uint8_t input) = 0;
virtual void add_index_address_mark() = 0;
virtual void add_ID_address_mark() = 0;

View File

@ -70,6 +70,8 @@ class HeadPosition {
*/
class Track {
public:
virtual ~Track() {}
/*!
Describes the location of a track, implementing < to allow for use as a set key.
*/