mirror of
https://github.com/TomHarte/CLK.git
synced 2025-01-13 07:30:21 +00:00
Takes a shot at adding RGB -> S-Video and composite conversion, for all RGB types.
This commit is contained in:
parent
637ec35d6a
commit
a136a00a2f
@ -18,6 +18,8 @@ struct Uniforms {
|
||||
int32_t scale[2];
|
||||
float lineWidth;
|
||||
float aspectRatioMultiplier;
|
||||
simd::float3x3 toRGB;
|
||||
simd::float3x3 fromRGB;
|
||||
};
|
||||
|
||||
constexpr size_t NumBufferedScans = 2048;
|
||||
@ -104,6 +106,108 @@ using BufferingScanTarget = Outputs::Display::BufferingScanTarget;
|
||||
uniforms()->aspectRatioMultiplier = float((4.0 / 3.0) / (size.width / size.height));
|
||||
}
|
||||
|
||||
- (void)setModals:(const Outputs::Display::ScanTarget::Modals &)modals view:(nonnull MTKView *)view {
|
||||
//
|
||||
// Populate uniforms.
|
||||
//
|
||||
uniforms()->scale[0] = modals.output_scale.x;
|
||||
uniforms()->scale[1] = modals.output_scale.y;
|
||||
uniforms()->lineWidth = 1.0f / modals.expected_vertical_lines;
|
||||
|
||||
const auto toRGB = to_rgb_matrix(modals.composite_colour_space);
|
||||
uniforms()->toRGB = simd::float3x3(
|
||||
simd::float3{toRGB[0], toRGB[1], toRGB[2]},
|
||||
simd::float3{toRGB[3], toRGB[4], toRGB[5]},
|
||||
simd::float3{toRGB[6], toRGB[7], toRGB[8]}
|
||||
);
|
||||
|
||||
const auto fromRGB = from_rgb_matrix(modals.composite_colour_space);
|
||||
uniforms()->fromRGB = simd::float3x3(
|
||||
simd::float3{fromRGB[0], fromRGB[1], fromRGB[2]},
|
||||
simd::float3{fromRGB[3], fromRGB[4], fromRGB[5]},
|
||||
simd::float3{fromRGB[6], fromRGB[7], fromRGB[8]}
|
||||
);
|
||||
|
||||
|
||||
|
||||
//
|
||||
// Generate input texture.
|
||||
//
|
||||
MTLPixelFormat pixelFormat;
|
||||
_bytesPerInputPixel = size_for_data_type(modals.input_data_type);
|
||||
if(data_type_is_normalised(modals.input_data_type)) {
|
||||
switch(_bytesPerInputPixel) {
|
||||
default:
|
||||
case 1: pixelFormat = MTLPixelFormatR8Unorm; break;
|
||||
case 2: pixelFormat = MTLPixelFormatRG8Unorm; break;
|
||||
case 4: pixelFormat = MTLPixelFormatRGBA8Unorm; break;
|
||||
}
|
||||
} else {
|
||||
switch(_bytesPerInputPixel) {
|
||||
default:
|
||||
case 1: pixelFormat = MTLPixelFormatR8Uint; break;
|
||||
case 2: pixelFormat = MTLPixelFormatRG8Uint; break;
|
||||
case 4: pixelFormat = MTLPixelFormatRGBA8Uint; break;
|
||||
}
|
||||
}
|
||||
MTLTextureDescriptor *const textureDescriptor = [MTLTextureDescriptor
|
||||
texture2DDescriptorWithPixelFormat:pixelFormat
|
||||
width:BufferingScanTarget::WriteAreaWidth
|
||||
height:BufferingScanTarget::WriteAreaHeight
|
||||
mipmapped:NO];
|
||||
textureDescriptor.resourceOptions = SharedResourceOptionsTexture;
|
||||
|
||||
// TODO: the call below is the only reason why this project now requires macOS 10.13; is it all that helpful versus just uploading each frame?
|
||||
const NSUInteger bytesPerRow = BufferingScanTarget::WriteAreaWidth * _bytesPerInputPixel;
|
||||
_writeAreaTexture = [_writeAreaBuffer
|
||||
newTextureWithDescriptor:textureDescriptor
|
||||
offset:0
|
||||
bytesPerRow:bytesPerRow];
|
||||
_totalTextureBytes = bytesPerRow * BufferingScanTarget::WriteAreaHeight;
|
||||
|
||||
|
||||
|
||||
//
|
||||
// Generate pipeline.
|
||||
//
|
||||
id<MTLLibrary> library = [view.device newDefaultLibrary];
|
||||
MTLRenderPipelineDescriptor *pipelineDescriptor = [[MTLRenderPipelineDescriptor alloc] init];
|
||||
pipelineDescriptor.colorAttachments[0].pixelFormat = view.colorPixelFormat;
|
||||
|
||||
// TODO: logic somewhat more complicated than this, probably
|
||||
pipelineDescriptor.vertexFunction = [library newFunctionWithName:@"scanToDisplay"];
|
||||
switch(modals.input_data_type) {
|
||||
case Outputs::Display::InputDataType::Luminance1:
|
||||
pipelineDescriptor.fragmentFunction = [library newFunctionWithName:@"sampleLuminance1"];
|
||||
break;
|
||||
case Outputs::Display::InputDataType::Luminance8:
|
||||
pipelineDescriptor.fragmentFunction = [library newFunctionWithName:@"sampleLuminance8"];
|
||||
break;
|
||||
case Outputs::Display::InputDataType::PhaseLinkedLuminance8:
|
||||
pipelineDescriptor.fragmentFunction = [library newFunctionWithName:@"samplePhaseLinkedLuminance8"];
|
||||
break;
|
||||
|
||||
case Outputs::Display::InputDataType::Luminance8Phase8:
|
||||
pipelineDescriptor.fragmentFunction = [library newFunctionWithName:@"sampleLuminance8Phase8"];
|
||||
break;
|
||||
|
||||
case Outputs::Display::InputDataType::Red1Green1Blue1:
|
||||
pipelineDescriptor.fragmentFunction = [library newFunctionWithName:@"sampleRed1Green1Blue1"];
|
||||
break;
|
||||
case Outputs::Display::InputDataType::Red2Green2Blue2:
|
||||
pipelineDescriptor.fragmentFunction = [library newFunctionWithName:@"sampleRed2Green2Blue2"];
|
||||
break;
|
||||
case Outputs::Display::InputDataType::Red4Green4Blue4:
|
||||
pipelineDescriptor.fragmentFunction = [library newFunctionWithName:@"sampleRed4Green4Blue4"];
|
||||
break;
|
||||
case Outputs::Display::InputDataType::Red8Green8Blue8:
|
||||
pipelineDescriptor.fragmentFunction = [library newFunctionWithName:@"sampleRed8Green8Blue8"];
|
||||
break;
|
||||
}
|
||||
|
||||
_scanPipeline = [view.device newRenderPipelineStateWithDescriptor:pipelineDescriptor error:nil];
|
||||
}
|
||||
|
||||
/*!
|
||||
@method drawInMTKView:
|
||||
@abstract Called on the delegate when it is asked to render into the view
|
||||
@ -112,82 +216,7 @@ using BufferingScanTarget = Outputs::Display::BufferingScanTarget;
|
||||
- (void)drawInMTKView:(nonnull MTKView *)view {
|
||||
const Outputs::Display::ScanTarget::Modals *const newModals = _scanTarget.new_modals();
|
||||
if(newModals) {
|
||||
uniforms()->scale[0] = newModals->output_scale.x;
|
||||
uniforms()->scale[1] = newModals->output_scale.y;
|
||||
uniforms()->lineWidth = 1.0f / newModals->expected_vertical_lines;
|
||||
|
||||
// TODO: obey the rest of the modals generally.
|
||||
|
||||
// Generate the appropriate input texture.
|
||||
MTLPixelFormat pixelFormat;
|
||||
_bytesPerInputPixel = size_for_data_type(newModals->input_data_type);
|
||||
if(data_type_is_normalised(newModals->input_data_type)) {
|
||||
switch(_bytesPerInputPixel) {
|
||||
default:
|
||||
case 1: pixelFormat = MTLPixelFormatR8Unorm; break;
|
||||
case 2: pixelFormat = MTLPixelFormatRG8Unorm; break;
|
||||
case 4: pixelFormat = MTLPixelFormatRGBA8Unorm; break;
|
||||
}
|
||||
} else {
|
||||
switch(_bytesPerInputPixel) {
|
||||
default:
|
||||
case 1: pixelFormat = MTLPixelFormatR8Uint; break;
|
||||
case 2: pixelFormat = MTLPixelFormatRG8Uint; break;
|
||||
case 4: pixelFormat = MTLPixelFormatRGBA8Uint; break;
|
||||
}
|
||||
}
|
||||
MTLTextureDescriptor *const textureDescriptor = [MTLTextureDescriptor
|
||||
texture2DDescriptorWithPixelFormat:pixelFormat
|
||||
width:BufferingScanTarget::WriteAreaWidth
|
||||
height:BufferingScanTarget::WriteAreaHeight
|
||||
mipmapped:NO];
|
||||
textureDescriptor.resourceOptions = SharedResourceOptionsTexture;
|
||||
|
||||
// TODO: the call below is the only reason why this project now requires macOS 10.13; is it all that helpful versus just uploading each frame?
|
||||
const NSUInteger bytesPerRow = BufferingScanTarget::WriteAreaWidth * _bytesPerInputPixel;
|
||||
_writeAreaTexture = [_writeAreaBuffer
|
||||
newTextureWithDescriptor:textureDescriptor
|
||||
offset:0
|
||||
bytesPerRow:bytesPerRow];
|
||||
_totalTextureBytes = bytesPerRow * BufferingScanTarget::WriteAreaHeight;
|
||||
|
||||
// Generate pipeline.
|
||||
id<MTLLibrary> library = [view.device newDefaultLibrary];
|
||||
MTLRenderPipelineDescriptor *pipelineDescriptor = [[MTLRenderPipelineDescriptor alloc] init];
|
||||
pipelineDescriptor.colorAttachments[0].pixelFormat = view.colorPixelFormat;
|
||||
|
||||
// TODO: logic somewhat more complicated than this, probably
|
||||
pipelineDescriptor.vertexFunction = [library newFunctionWithName:@"scanToDisplay"];
|
||||
switch(newModals->input_data_type) {
|
||||
case Outputs::Display::InputDataType::Luminance1:
|
||||
pipelineDescriptor.fragmentFunction = [library newFunctionWithName:@"sampleLuminance1"];
|
||||
break;
|
||||
case Outputs::Display::InputDataType::Luminance8:
|
||||
pipelineDescriptor.fragmentFunction = [library newFunctionWithName:@"sampleLuminance8"];
|
||||
break;
|
||||
case Outputs::Display::InputDataType::PhaseLinkedLuminance8:
|
||||
pipelineDescriptor.fragmentFunction = [library newFunctionWithName:@"samplePhaseLinkedLuminance8"];
|
||||
break;
|
||||
|
||||
case Outputs::Display::InputDataType::Luminance8Phase8:
|
||||
pipelineDescriptor.fragmentFunction = [library newFunctionWithName:@"sampleLuminance8Phase8"];
|
||||
break;
|
||||
|
||||
case Outputs::Display::InputDataType::Red1Green1Blue1:
|
||||
pipelineDescriptor.fragmentFunction = [library newFunctionWithName:@"sampleRed1Green1Blue1"];
|
||||
break;
|
||||
case Outputs::Display::InputDataType::Red2Green2Blue2:
|
||||
pipelineDescriptor.fragmentFunction = [library newFunctionWithName:@"sampleRed2Green2Blue2"];
|
||||
break;
|
||||
case Outputs::Display::InputDataType::Red4Green4Blue4:
|
||||
pipelineDescriptor.fragmentFunction = [library newFunctionWithName:@"sampleRed4Green4Blue4"];
|
||||
break;
|
||||
case Outputs::Display::InputDataType::Red8Green8Blue8:
|
||||
pipelineDescriptor.fragmentFunction = [library newFunctionWithName:@"sampleRed8Green8Blue8"];
|
||||
break;
|
||||
}
|
||||
|
||||
_scanPipeline = [view.device newRenderPipelineStateWithDescriptor:pipelineDescriptor error:nil];
|
||||
[self setModals:*newModals view:view];
|
||||
}
|
||||
|
||||
// Generate a command encoder for the view.
|
||||
@ -201,6 +230,7 @@ using BufferingScanTarget = Outputs::Display::BufferingScanTarget;
|
||||
[encoder setFragmentTexture:_writeAreaTexture atIndex:0];
|
||||
[encoder setVertexBuffer:_scansBuffer offset:0 atIndex:0];
|
||||
[encoder setVertexBuffer:_uniformsBuffer offset:0 atIndex:1];
|
||||
[encoder setFragmentBuffer:_uniformsBuffer offset:0 atIndex:0];
|
||||
|
||||
_scanTarget.perform([=] (const BufferingScanTarget::OutputArea &outputArea) {
|
||||
// Ensure texture changes are noted.
|
||||
|
@ -19,6 +19,10 @@ struct Uniforms {
|
||||
|
||||
// Provides a scaling factor in order to preserve 4:3 central content.
|
||||
float aspectRatioMultiplier;
|
||||
|
||||
// Provides conversions to and from RGB for the active colour space.
|
||||
float3x3 toRGB;
|
||||
float3x3 fromRGB;
|
||||
};
|
||||
|
||||
// MARK: - Structs used for receiving data from the emulation.
|
||||
@ -143,38 +147,56 @@ fragment float4 compositeSampleLuminance8Phase8(SourceInterpolator vert [[stage_
|
||||
|
||||
// All the RGB formats can produce RGB, composite or S-Video.
|
||||
//
|
||||
// Note on the below: in Metal you may not call a fragment function. Also I can find no
|
||||
// functioning way to offer a templated fragment function. So I don't currently know how
|
||||
// I would avoid the mess below.
|
||||
// Note on the below: in Metal you may not call a fragment function (so e.g. svideoSampleX can't just cann sampleX).
|
||||
// Also I can find no functioning way to offer a templated fragment function. So I don't currently know how
|
||||
// I could avoid the macro mess below.
|
||||
|
||||
// TODO: is the calling convention here causing `vert` and `texture` to be copied?
|
||||
float3 convertRed8Green8Blue8(SourceInterpolator vert, texture2d<float> texture) {
|
||||
return float3(texture.sample(standardSampler, vert.textureCoordinates));
|
||||
}
|
||||
|
||||
#define DeclareShaders(name) \
|
||||
fragment float4 sample##name(SourceInterpolator vert [[stage_in]], texture2d<float> texture [[texture(0)]]) { \
|
||||
float3 convertRed4Green4Blue4(SourceInterpolator vert, texture2d<ushort> texture) {
|
||||
const auto sample = texture.sample(standardSampler, vert.textureCoordinates).rg;
|
||||
return float3(sample.r&15, (sample.g >> 4)&15, sample.g&15);
|
||||
}
|
||||
|
||||
float3 convertRed2Green2Blue2(SourceInterpolator vert, texture2d<ushort> texture) {
|
||||
const auto sample = texture.sample(standardSampler, vert.textureCoordinates).r;
|
||||
return float3((sample >> 4)&3, (sample >> 2)&3, sample&3);
|
||||
}
|
||||
|
||||
float3 convertRed1Green1Blue1(SourceInterpolator vert, texture2d<ushort> texture) {
|
||||
const auto sample = texture.sample(standardSampler, vert.textureCoordinates).r;
|
||||
return float3(sample&4, sample&2, sample&1);
|
||||
}
|
||||
|
||||
#define DeclareShaders(name, pixelType) \
|
||||
fragment float4 sample##name(SourceInterpolator vert [[stage_in]], texture2d<pixelType> texture [[texture(0)]]) { \
|
||||
return float4(convert##name(vert, texture), 1.0); \
|
||||
} \
|
||||
\
|
||||
fragment float4 svideoSample##name(SourceInterpolator vert [[stage_in]], texture2d<float> texture [[texture(0)]]) { \
|
||||
const auto colour = convert##name(vert, texture); \
|
||||
return float4(colour, 1.0); \
|
||||
fragment float4 svideoSample##name(SourceInterpolator vert [[stage_in]], texture2d<pixelType> texture [[texture(0)]], constant Uniforms &uniforms [[buffer(0)]]) { \
|
||||
const auto colour = uniforms.fromRGB * convert##name(vert, texture); \
|
||||
const float2 colourSubcarrier = float2(sin(vert.colourPhase), cos(vert.colourPhase))*0.5 + float2(0.5); \
|
||||
return float4( \
|
||||
colour.r, \
|
||||
dot(colour.gb, colourSubcarrier), \
|
||||
0.0, \
|
||||
1.0 \
|
||||
); \
|
||||
} \
|
||||
\
|
||||
fragment float4 compositeSample##name(SourceInterpolator vert [[stage_in]], texture2d<pixelType> texture [[texture(0)]], constant Uniforms &uniforms [[buffer(0)]]) { \
|
||||
const auto colour = uniforms.fromRGB * convert##name(vert, texture); \
|
||||
const float2 colourSubcarrier = float2(sin(vert.colourPhase), cos(vert.colourPhase)); \
|
||||
return float4( \
|
||||
float3(mix(colour.r, dot(colour.gb, colourSubcarrier), vert.colourAmplitude)), \
|
||||
1.0 \
|
||||
); \
|
||||
}
|
||||
|
||||
// TODO: a colour-space conversion matrix is required to proceed.
|
||||
DeclareShaders(Red8Green8Blue8)
|
||||
|
||||
fragment float4 sampleRed1Green1Blue1(SourceInterpolator vert [[stage_in]], texture2d<ushort> texture [[texture(0)]]) {
|
||||
const auto sample = texture.sample(standardSampler, vert.textureCoordinates).r;
|
||||
return float4(sample&4, sample&2, sample&1, 1.0);
|
||||
}
|
||||
|
||||
fragment float4 sampleRed2Green2Blue2(SourceInterpolator vert [[stage_in]], texture2d<ushort> texture [[texture(0)]]) {
|
||||
const auto sample = texture.sample(standardSampler, vert.textureCoordinates).r;
|
||||
return float4((sample >> 4)&3, (sample >> 2)&3, sample&3, 3.0) / 3.0;
|
||||
}
|
||||
|
||||
fragment float4 sampleRed4Green4Blue4(SourceInterpolator vert [[stage_in]], texture2d<ushort> texture [[texture(0)]]) {
|
||||
const auto sample = texture.sample(standardSampler, vert.textureCoordinates).rg;
|
||||
return float4(sample.r&15, (sample.g >> 4)&15, sample.g&15, 15.0) / 15.0;
|
||||
}
|
||||
DeclareShaders(Red8Green8Blue8, float)
|
||||
DeclareShaders(Red4Green4Blue4, ushort)
|
||||
DeclareShaders(Red2Green2Blue2, ushort)
|
||||
DeclareShaders(Red1Green1Blue1, ushort)
|
||||
|
Loading…
x
Reference in New Issue
Block a user