1
0
mirror of https://github.com/TomHarte/CLK.git synced 2024-06-26 10:29:31 +00:00

Ensures all input data types are parseable in Metal.

Though now I need to think a bit more about the best way to compose signal-type conversions, and whether output-type calculations (i.e. gamma, brightness) are applied.
This commit is contained in:
Thomas Harte 2020-08-10 19:47:47 -04:00
parent eadda6a967
commit 525233e10b
4 changed files with 154 additions and 24 deletions

View File

@ -67,7 +67,7 @@
</Testables>
</TestAction>
<LaunchAction
buildConfiguration = "Debug"
buildConfiguration = "Release"
selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB"
selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB"
enableASanStackUseAfterReturn = "YES"

View File

@ -121,11 +121,20 @@ using BufferingScanTarget = Outputs::Display::BufferingScanTarget;
// Generate the appropriate input texture.
MTLPixelFormat pixelFormat;
_bytesPerInputPixel = size_for_data_type(newModals->input_data_type);
switch(_bytesPerInputPixel) {
default:
case 1: pixelFormat = MTLPixelFormatR8Unorm; break;
case 2: pixelFormat = MTLPixelFormatRG8Unorm; break;
case 4: pixelFormat = MTLPixelFormatRGBA8Unorm; break;
if(data_type_is_normalised(newModals->input_data_type)) {
switch(_bytesPerInputPixel) {
default:
case 1: pixelFormat = MTLPixelFormatR8Unorm; break;
case 2: pixelFormat = MTLPixelFormatRG8Unorm; break;
case 4: pixelFormat = MTLPixelFormatRGBA8Unorm; break;
}
} else {
switch(_bytesPerInputPixel) {
default:
case 1: pixelFormat = MTLPixelFormatR8Uint; break;
case 2: pixelFormat = MTLPixelFormatRG8Uint; break;
case 4: pixelFormat = MTLPixelFormatRGBA8Uint; break;
}
}
MTLTextureDescriptor *const textureDescriptor = [MTLTextureDescriptor
texture2DDescriptorWithPixelFormat:pixelFormat
@ -148,13 +157,33 @@ using BufferingScanTarget = Outputs::Display::BufferingScanTarget;
pipelineDescriptor.colorAttachments[0].pixelFormat = view.colorPixelFormat;
// TODO: logic somewhat more complicated than this, probably
pipelineDescriptor.vertexFunction = [library newFunctionWithName:@"scanVertexMain"];
pipelineDescriptor.vertexFunction = [library newFunctionWithName:@"scanToDisplay"];
switch(newModals->input_data_type) {
default:
pipelineDescriptor.fragmentFunction = [library newFunctionWithName:@"scanFragmentMainRGB"];
break;
case Outputs::Display::InputDataType::Luminance1:
pipelineDescriptor.fragmentFunction = [library newFunctionWithName:@"scanFragmentMainL1"];
pipelineDescriptor.fragmentFunction = [library newFunctionWithName:@"sampleLuminance1"];
break;
case Outputs::Display::InputDataType::Luminance8:
pipelineDescriptor.fragmentFunction = [library newFunctionWithName:@"sampleLuminance8"];
break;
case Outputs::Display::InputDataType::PhaseLinkedLuminance8:
pipelineDescriptor.fragmentFunction = [library newFunctionWithName:@"samplePhaseLinkedLuminance8"];
break;
case Outputs::Display::InputDataType::Luminance8Phase8:
pipelineDescriptor.fragmentFunction = [library newFunctionWithName:@"sampleLuminance8Phase8"];
break;
case Outputs::Display::InputDataType::Red1Green1Blue1:
pipelineDescriptor.fragmentFunction = [library newFunctionWithName:@"sampleRed1Green1Blue1"];
break;
case Outputs::Display::InputDataType::Red2Green2Blue2:
pipelineDescriptor.fragmentFunction = [library newFunctionWithName:@"sampleRed2Green2Blue2"];
break;
case Outputs::Display::InputDataType::Red4Green4Blue4:
pipelineDescriptor.fragmentFunction = [library newFunctionWithName:@"sampleRed4Green4Blue4"];
break;
case Outputs::Display::InputDataType::Red8Green8Blue8:
pipelineDescriptor.fragmentFunction = [library newFunctionWithName:@"sampleRed8Green8Blue8"];
break;
}

View File

@ -21,6 +21,8 @@ struct Uniforms {
float aspectRatioMultiplier;
};
// MARK: - Structs used for receiving data from the emulation.
// This is intended to match the net effect of `Scan` as defined by the BufferingScanTarget.
struct Scan {
struct EndPoint {
@ -47,20 +49,25 @@ struct Line {
uint8_t compositeAmplitude;
};
// MARK: - Intermediate structs.
// This is an intermediate struct, which is TEMPORARY.
struct ColouredVertex {
struct SourceInterpolator {
float4 position [[position]];
float2 textureCoordinates;
float colourPhase;
float colourAmplitude;
};
// MARK: - Scan shaders; these do final output to the display.
vertex ColouredVertex scanVertexMain( constant Uniforms &uniforms [[buffer(1)]],
constant Scan *scans [[buffer(0)]],
uint instanceID [[instance_id]],
uint vertexID [[vertex_id]]) {
vertex SourceInterpolator scanToDisplay( constant Uniforms &uniforms [[buffer(1)]],
constant Scan *scans [[buffer(0)]],
uint instanceID [[instance_id]],
uint vertexID [[vertex_id]]) {
SourceInterpolator output;
// Get start and end vertices in regular float2 form.
const float2 start = float2(
float(scans[instanceID].endPoints[0].position[0]) / float(uniforms.scale.x),
@ -75,8 +82,15 @@ vertex ColouredVertex scanVertexMain( constant Uniforms &uniforms [[buffer(1)]],
const float2 tangent = (end - start);
const float2 normal = float2(-tangent.y, tangent.x) / length(tangent);
// Load up the colour details.
output.colourAmplitude = float(scans[instanceID].compositeAmplitude) / 255.0f;
output.colourPhase = mix(
float(scans[instanceID].endPoints[0].compositeAngle),
float(scans[instanceID].endPoints[1].compositeAngle),
float((vertexID&2) >> 1)
) / 64.0;
// Hence determine this quad's real shape, using vertexID to pick a corner.
ColouredVertex output;
output.position = float4(
((start + (float(vertexID&2) * 0.5) * tangent + (float(vertexID&1) - 0.5) * normal * uniforms.lineWidth) * float2(2.0, -2.0) + float2(-1.0, 1.0)) * float2(uniforms.aspectRatioMultiplier, 1.0),
0.0,
@ -96,12 +110,77 @@ constexpr sampler standardSampler( coord::pixel,
}
// MARK: - Input formst to RGB conversions.
// MARK: - Various input format conversion samplers.
fragment half4 scanFragmentMainRGB (ColouredVertex vert [[stage_in]], texture2d<float> texture [[texture(0)]]) {
return half4(texture.sample(standardSampler, vert.textureCoordinates));
/*
Luminance1, // 1 byte/pixel; any bit set => white; no bits set => black.
Luminance8, // 1 byte/pixel; linear scale.
PhaseLinkedLuminance8, // 4 bytes/pixel; each byte is an individual 8-bit luminance
// value and which value is output is a function of
// colour subcarrier phase — byte 0 defines the first quarter
// of each colour cycle, byte 1 the next quarter, etc. This
// format is intended to permit replay of sampled original data.
// The luminance plus phase types describe a luminance and the phase offset
// of a colour subcarrier. So they can be used to generate a luminance signal,
// or an s-video pipeline.
Luminance8Phase8, // 2 bytes/pixel; first is luminance, second is phase.
// Phase is encoded on a 192-unit circle; anything
// greater than 192 implies that the colour part of
// the signal should be omitted.
// The RGB types can directly feed an RGB pipeline, naturally, or can be mapped
// to phase+luminance, or just to luminance.
Red1Green1Blue1, // 1 byte/pixel; bit 0 is blue on or off, bit 1 is green, bit 2 is red.
Red2Green2Blue2, // 1 byte/pixel; bits 0 and 1 are blue, bits 2 and 3 are green, bits 4 and 5 are blue.
Red4Green4Blue4, // 2 bytes/pixel; first nibble is red, second is green, third is blue.
Red8Green8Blue8, // 4 bytes/pixel; first is red, second is green, third is blue, fourth is vacant.
*/
// There's only one meaningful way to sample the luminance formats.
fragment float4 sampleLuminance1(SourceInterpolator vert [[stage_in]], texture2d<ushort> texture [[texture(0)]]) {
return float4(float3(texture.sample(standardSampler, vert.textureCoordinates).r), 1.0);
}
fragment half4 scanFragmentMainL1(ColouredVertex vert [[stage_in]], texture2d<float> texture [[texture(0)]]) {
return half4(half3(texture.sample(standardSampler, vert.textureCoordinates).r * 255.0), 1.0);
fragment float4 sampleLuminance8(SourceInterpolator vert [[stage_in]], texture2d<float> texture [[texture(0)]]) {
return float4(float3(texture.sample(standardSampler, vert.textureCoordinates).r), 1.0);
}
fragment float4 samplePhaseLinkedLuminance8(SourceInterpolator vert [[stage_in]], texture2d<float> texture [[texture(0)]]) {
const int offset = int(vert.colourPhase * 4.0);
auto sample = texture.sample(standardSampler, vert.textureCoordinates);
return float4(float3(sample[offset]), 1.0);
}
// The luminance/phase format can produce either composite or S-Video.
fragment float4 sampleLuminance8Phase8(SourceInterpolator vert [[stage_in]], texture2d<float> texture [[texture(0)]]) {
return float4(texture.sample(standardSampler, vert.textureCoordinates).rg, 0.0, 1.0);
}
// All the RGB formats can produce RGB, composite or S-Video.
fragment float4 sampleRed8Green8Blue8(SourceInterpolator vert [[stage_in]], texture2d<float> texture [[texture(0)]]) {
return float4(texture.sample(standardSampler, vert.textureCoordinates));
}
fragment float4 sampleRed1Green1Blue1(SourceInterpolator vert [[stage_in]], texture2d<ushort> texture [[texture(0)]]) {
const auto sample = texture.sample(standardSampler, vert.textureCoordinates).r;
return float4(sample&4, sample&2, sample&1, 1.0);
}
fragment float4 sampleRed2Green2Blue2(SourceInterpolator vert [[stage_in]], texture2d<ushort> texture [[texture(0)]]) {
const auto sample = texture.sample(standardSampler, vert.textureCoordinates).r;
return float4((sample >> 4)&3, (sample >> 2)&3, sample&3, 3.0) / 3.0;
}
fragment float4 sampleRed4Green4Blue4(SourceInterpolator vert [[stage_in]], texture2d<ushort> texture [[texture(0)]]) {
const auto sample = texture.sample(standardSampler, vert.textureCoordinates).rg;
return float4(sample.r&15, (sample.g >> 4)&15, sample.g&15, 15.0) / 15.0;
}

View File

@ -89,7 +89,8 @@ enum class InputDataType {
Red8Green8Blue8, // 4 bytes/pixel; first is red, second is green, third is blue, fourth is vacant.
};
inline size_t size_for_data_type(InputDataType data_type) {
/// @returns the number of bytes per sample for data of type @c data_type.
constexpr inline size_t size_for_data_type(InputDataType data_type) {
switch(data_type) {
case InputDataType::Luminance1:
case InputDataType::Luminance8:
@ -110,7 +111,28 @@ inline size_t size_for_data_type(InputDataType data_type) {
}
}
inline DisplayType natural_display_type_for_data_type(InputDataType data_type) {
/// @returns @c true if this data type presents normalised data, i.e. each byte holds a
/// value in the range [0, 255] representing a real number in the range [0.0, 1.0]; @c false otherwise.
constexpr inline size_t data_type_is_normalised(InputDataType data_type) {
switch(data_type) {
case InputDataType::Luminance8:
case InputDataType::Luminance8Phase8:
case InputDataType::Red8Green8Blue8:
case InputDataType::PhaseLinkedLuminance8:
return true;
default:
case InputDataType::Luminance1:
case InputDataType::Red1Green1Blue1:
case InputDataType::Red2Green2Blue2:
case InputDataType::Red4Green4Blue4:
return false;
}
}
/// @returns The 'natural' display type for data of type @c data_type. The natural display is whichever would
/// display it with the least number of conversions. Caveat: a colour display is assumed for pure-composite data types.
constexpr inline DisplayType natural_display_type_for_data_type(InputDataType data_type) {
switch(data_type) {
default:
case InputDataType::Luminance1: