2020-08-04 22:22:14 +00:00
|
|
|
//
|
|
|
|
// ScanTarget.m
|
|
|
|
// Clock Signal
|
|
|
|
//
|
|
|
|
// Created by Thomas Harte on 02/08/2020.
|
|
|
|
// Copyright © 2020 Thomas Harte. All rights reserved.
|
|
|
|
//
|
|
|
|
|
|
|
|
#import "CSScanTarget.h"
|
|
|
|
|
|
|
|
#import <Metal/Metal.h>
|
2020-08-22 01:11:25 +00:00
|
|
|
|
2020-08-31 00:21:01 +00:00
|
|
|
#include <algorithm>
|
2020-08-22 01:11:25 +00:00
|
|
|
#include <atomic>
|
|
|
|
|
2020-08-08 02:03:54 +00:00
|
|
|
#include "BufferingScanTarget.hpp"
|
2020-08-22 01:11:25 +00:00
|
|
|
#include "FIRFilter.hpp"
|
2020-08-04 22:22:14 +00:00
|
|
|
|
2020-09-01 00:01:59 +00:00
|
|
|
/*
|
|
|
|
|
|
|
|
RGB and composite monochrome
|
|
|
|
----------------------------
|
|
|
|
|
|
|
|
Source data is converted to 32bpp RGB or to composite directly from its input, at output resolution.
|
|
|
|
Gamma correction is applied unless the inputs are 1bpp (e.g. Macintosh-style black/white, TTL-style RGB).
|
|
|
|
|
|
|
|
TODO: filtering when the output size is 'small'.
|
|
|
|
|
|
|
|
S-Video
|
|
|
|
-------
|
|
|
|
|
|
|
|
Source data is pasted together with a common clock in the composition buffer. Colour phase is baked in
|
|
|
|
at this point. Format within the composition buffer is:
|
|
|
|
|
|
|
|
.r = luminance
|
|
|
|
.g = 0.5 + 0.5 * chrominance * cos(phase)
|
|
|
|
.b = 0.5 + 0.5 * chrominance * sin(phase)
|
|
|
|
|
|
|
|
Contents of the composition buffer are then drawn into the finalised line texture; at this point a suitable
|
|
|
|
low-filter is applied to the two chrominance channels, colours are converted to RGB and gamma corrected.
|
|
|
|
|
|
|
|
Contents from the finalised line texture are then painted to the display.
|
|
|
|
|
|
|
|
Composite colour
|
|
|
|
----------------
|
|
|
|
|
|
|
|
Source data is pasted together with a common clock in the composition buffer. Colour phase and amplitude are
|
|
|
|
recorded at this point. Format within the composition buffer is:
|
|
|
|
|
|
|
|
.r = composite value
|
|
|
|
.g = phase
|
|
|
|
.b = amplitude
|
|
|
|
|
|
|
|
Contents of the composition buffer are transferred to the separated-luma buffer, subject to a low-paass filter
|
|
|
|
that has sought to separate luminance and chrominance, and with phase and amplitude now baked into the latter:
|
|
|
|
|
|
|
|
.r = luminance
|
|
|
|
.g = 0.5 + 0.5 * chrominance * cos(phase)
|
|
|
|
.b = 0.5 + 0.5 * chrominance * sin(phase)
|
|
|
|
|
|
|
|
The process now continues as per the corresponding S-Video steps.
|
|
|
|
|
|
|
|
NOTES
|
|
|
|
-----
|
|
|
|
|
|
|
|
1) for many of the input pixel formats it would be possible to do the trigonometric side of things at
|
|
|
|
arbitrary precision. Since it would always be necessary to support fixed-precision processing because
|
|
|
|
of the directly-sampled input formats, I've used fixed throughout to reduce the number of permutations
|
|
|
|
and combinations of code I need to support. The precision is always selected to be at least four times
|
|
|
|
the colour clock.
|
|
|
|
|
|
|
|
2) I experimented with skipping the separated-luma buffer for composite colour based on the observation that
|
|
|
|
just multiplying the raw signal by sin and cos and then filtering well below the colour subcarrier frequency
|
|
|
|
should be sufficient. It wasn't in practice because the bits of luminance that don't quite separate are then
|
|
|
|
of such massive amplitude that you get huge bands of bright colour in place of the usual chroma dots.
|
|
|
|
|
|
|
|
3) I also initially didn't want to have a finalied-line texture, but processing costs changed my mind on that.
|
|
|
|
If you accept that output will be fixed precision, anyway. In that case, processing for a typical NTSC frame
|
|
|
|
in its original resolution means applying filtering (i.e. at least 15 samples per pixel) likely between
|
|
|
|
218,400 and 273,000 times per output frame, then upscaling from there at 1 sample per pixel. Count the second
|
|
|
|
sample twice for the original store and you're talking between 16*218,400 = 3,494,400 to 16*273,000 = 4,368,000
|
|
|
|
total pixel accesses. Though that's not a perfect way to measure cost, roll with it.
|
|
|
|
|
|
|
|
On my 4k monitor, doing it at actual output resolution would instead cost 3840*2160*15 = 124,416,000 total
|
|
|
|
accesses. Which doesn't necessarily mean "more than 28 times as much", but does mean "a lot more".
|
|
|
|
|
|
|
|
(going direct-to-display for composite monochrome means evaluating sin/cos a lot more often than it might
|
|
|
|
with more buffering in between, but that doesn't provisionally seem to be as much of a bottleneck)
|
|
|
|
*/
|
|
|
|
|
2020-08-08 01:19:17 +00:00
|
|
|
namespace {
|
|
|
|
|
|
|
|
struct Uniforms {
|
|
|
|
int32_t scale[2];
|
|
|
|
float lineWidth;
|
2020-08-08 02:29:24 +00:00
|
|
|
float aspectRatioMultiplier;
|
2020-08-12 02:11:50 +00:00
|
|
|
simd::float3x3 toRGB;
|
|
|
|
simd::float3x3 fromRGB;
|
2020-08-17 01:11:43 +00:00
|
|
|
float zoom;
|
|
|
|
simd::float2 offset;
|
2020-09-02 12:03:10 +00:00
|
|
|
simd::float3 chromaCoefficients[8];
|
2020-09-03 17:18:21 +00:00
|
|
|
simd::float2 lumaCoefficients[8];
|
2020-08-26 02:05:19 +00:00
|
|
|
float radiansPerPixel;
|
2020-08-30 00:54:46 +00:00
|
|
|
float cyclesMultiplier;
|
2020-08-08 01:19:17 +00:00
|
|
|
};
|
|
|
|
|
2020-08-30 16:06:29 +00:00
|
|
|
constexpr size_t NumBufferedLines = 500;
|
|
|
|
constexpr size_t NumBufferedScans = NumBufferedLines * 4;
|
2020-08-08 02:03:54 +00:00
|
|
|
|
2020-08-09 21:59:52 +00:00
|
|
|
/// The shared resource options this app would most favour; applied as widely as possible.
|
|
|
|
constexpr MTLResourceOptions SharedResourceOptionsStandard = MTLResourceCPUCacheModeWriteCombined | MTLResourceStorageModeShared;
|
|
|
|
|
|
|
|
/// The shared resource options used for the write-area texture; on macOS it can't be MTLResourceStorageModeShared so this is a carve-out.
|
|
|
|
constexpr MTLResourceOptions SharedResourceOptionsTexture = MTLResourceCPUCacheModeWriteCombined | MTLResourceStorageModeManaged;
|
|
|
|
|
2020-08-09 03:11:44 +00:00
|
|
|
#define uniforms() reinterpret_cast<Uniforms *>(_uniformsBuffer.contents)
|
|
|
|
|
2020-08-20 01:56:53 +00:00
|
|
|
#define RangePerform(start, end, size, func) \
|
|
|
|
if(start != end) { \
|
|
|
|
if(start < end) { \
|
|
|
|
func(start, end-start); \
|
|
|
|
} else { \
|
|
|
|
func(start, size-start); \
|
|
|
|
if(end) { \
|
|
|
|
func(0, end); \
|
|
|
|
} \
|
|
|
|
} \
|
|
|
|
}
|
|
|
|
|
2020-08-08 01:19:17 +00:00
|
|
|
}
|
|
|
|
|
2020-08-09 02:49:02 +00:00
|
|
|
using BufferingScanTarget = Outputs::Display::BufferingScanTarget;
|
|
|
|
|
2020-08-04 22:22:14 +00:00
|
|
|
@implementation CSScanTarget {
|
2020-08-30 16:06:29 +00:00
|
|
|
// The command queue for the device in use.
|
2020-08-04 22:22:14 +00:00
|
|
|
id<MTLCommandQueue> _commandQueue;
|
2020-08-05 01:49:01 +00:00
|
|
|
|
2020-08-30 16:06:29 +00:00
|
|
|
// Pipelines.
|
|
|
|
id<MTLRenderPipelineState> _composePipeline; // For rendering to the composition texture.
|
|
|
|
id<MTLRenderPipelineState> _outputPipeline; // For drawing to the frame buffer.
|
|
|
|
id<MTLRenderPipelineState> _copyPipeline; // For copying the frame buffer to the visible surface.
|
|
|
|
id<MTLRenderPipelineState> _clearPipeline; // For applying additional inter-frame clearing (cf. the stencil).
|
2020-08-08 01:19:17 +00:00
|
|
|
|
|
|
|
// Buffers.
|
2020-08-30 16:06:29 +00:00
|
|
|
id<MTLBuffer> _uniformsBuffer; // A static buffer, containing a copy of the Uniforms struct.
|
|
|
|
id<MTLBuffer> _scansBuffer; // A dynamic buffer, into which the CPU writes Scans for later display.
|
|
|
|
id<MTLBuffer> _linesBuffer; // A dynamic buffer, into which the CPU writes Lines for later display.
|
|
|
|
|
|
|
|
// Textures: the write area.
|
|
|
|
//
|
|
|
|
// The write area receives fragments of output from the emulated machine.
|
|
|
|
// So it is written by the CPU and read by the GPU.
|
2020-08-09 21:59:52 +00:00
|
|
|
id<MTLTexture> _writeAreaTexture;
|
2020-08-30 16:06:29 +00:00
|
|
|
id<MTLBuffer> _writeAreaBuffer; // The storage underlying the write-area texture.
|
|
|
|
size_t _bytesPerInputPixel; // Determines per-pixel sizing within the write-area texture.
|
|
|
|
size_t _totalTextureBytes; // Holds the total size of the write-area texture.
|
|
|
|
|
|
|
|
// Textures: the frame buffer.
|
|
|
|
//
|
|
|
|
// When inter-frame blending is in use, the frame buffer contains the most recent output.
|
|
|
|
// Metal isn't really set up for single-buffered output, so this acts as if it were that
|
|
|
|
// single buffer. This texture is complete 2d data, copied directly to the display.
|
2020-08-16 01:24:10 +00:00
|
|
|
id<MTLTexture> _frameBuffer;
|
2020-08-30 16:06:29 +00:00
|
|
|
MTLRenderPassDescriptor *_frameBufferRenderPass; // The render pass for _drawing to_ the frame buffer.
|
|
|
|
|
|
|
|
// Textures: the stencil.
|
|
|
|
//
|
|
|
|
// Scan targets recceive scans, not full frames. Those scans may not cover the entire display,
|
|
|
|
// either because unlit areas have been omitted or because a sync discrepancy means that the full
|
|
|
|
// potential vertical or horizontal width of the display isn't used momentarily.
|
|
|
|
//
|
|
|
|
// In order to manage inter-frame blending correctly in those cases, a stencil is attached to the
|
|
|
|
// frame buffer so that a clearing step can darken any pixels that weren't naturally painted during
|
|
|
|
// any frame.
|
2020-08-16 20:42:32 +00:00
|
|
|
id<MTLTexture> _frameBufferStencil;
|
|
|
|
id<MTLDepthStencilState> _drawStencilState; // Always draws, sets stencil to 1.
|
|
|
|
id<MTLDepthStencilState> _clearStencilState; // Draws only where stencil is 0, clears all to 0.
|
|
|
|
|
2020-08-30 16:06:29 +00:00
|
|
|
// Textures: the composition texture.
|
|
|
|
//
|
|
|
|
// If additional temporal processing is required (i.e. for S-Video and colour composite output),
|
|
|
|
// fragments from the write-area texture are assembled into the composition texture, where they
|
|
|
|
// properly adjoin their neighbours and everything is converted to a common clock.
|
|
|
|
id<MTLTexture> _compositionTexture;
|
|
|
|
MTLRenderPassDescriptor *_compositionRenderPass; // The render pass for _drawing to_ the composition buffer.
|
2020-09-01 00:01:59 +00:00
|
|
|
|
|
|
|
enum class Pipeline {
|
|
|
|
/// Scans are painted directly to the frame buffer.
|
|
|
|
DirectToDisplay,
|
|
|
|
/// Scans are painted to the composition buffer, which is processed to the finalised line buffer,
|
|
|
|
/// from which lines are painted to the frame buffer.
|
|
|
|
SVideo,
|
|
|
|
/// Scans are painted to the composition buffer, which is processed to the separated luma buffer and then the finalised line buffer,
|
|
|
|
/// from which lines are painted to the frame buffer.
|
|
|
|
CompositeColour
|
|
|
|
|
|
|
|
// TODO: decide what to do for downard-scaled direct-to-display. Obvious options are to include lowpass
|
|
|
|
// filtering into the scan outputter and contine hoping that the vertical takes care of itself, or maybe
|
|
|
|
// to stick with DirectToDisplay but with a minimum size for the frame buffer and apply filtering from
|
|
|
|
// there to the screen.
|
|
|
|
};
|
|
|
|
Pipeline _pipeline;
|
2020-08-20 01:20:06 +00:00
|
|
|
|
2020-08-31 00:21:01 +00:00
|
|
|
// Textures: additional storage used when processing S-Video and composite colour input.
|
|
|
|
id<MTLTexture> _finalisedLineTexture;
|
2020-09-01 22:39:52 +00:00
|
|
|
id<MTLComputePipelineState> _finalisedLineState;
|
2020-08-31 00:21:01 +00:00
|
|
|
id<MTLTexture> _separatedLumaTexture;
|
2020-09-01 22:39:52 +00:00
|
|
|
id<MTLComputePipelineState> _separatedLumaState;
|
|
|
|
NSUInteger _lineBufferPixelsPerLine;
|
2020-08-31 00:21:01 +00:00
|
|
|
|
2020-09-02 01:27:40 +00:00
|
|
|
size_t _lineOffsetBuffer;
|
|
|
|
id<MTLBuffer> _lineOffsetBuffers[NumBufferedLines]; // Allocating NumBufferedLines buffers ensures these can't possibly be exhausted;
|
|
|
|
// for this list to be exhausted there'd have to be more draw calls in flight than
|
|
|
|
// there are lines for them to operate upon.
|
|
|
|
|
2020-08-09 02:49:02 +00:00
|
|
|
// The scan target in C++-world terms and the non-GPU storage for it.
|
|
|
|
BufferingScanTarget _scanTarget;
|
|
|
|
BufferingScanTarget::LineMetadata _lineMetadataBuffer[NumBufferedLines];
|
2020-08-20 01:20:06 +00:00
|
|
|
std::atomic_flag _isDrawing;
|
2020-08-16 01:24:10 +00:00
|
|
|
|
2020-08-20 01:20:06 +00:00
|
|
|
// The output view.
|
2020-08-16 01:24:10 +00:00
|
|
|
__weak MTKView *_view;
|
2020-08-04 22:22:14 +00:00
|
|
|
}
|
|
|
|
|
2020-08-04 23:44:56 +00:00
|
|
|
- (nonnull instancetype)initWithView:(nonnull MTKView *)view {
|
2020-08-04 22:22:14 +00:00
|
|
|
self = [super init];
|
|
|
|
if(self) {
|
2020-08-04 23:44:56 +00:00
|
|
|
_commandQueue = [view.device newCommandQueue];
|
2020-08-05 01:49:01 +00:00
|
|
|
|
2020-08-09 03:11:44 +00:00
|
|
|
// Allocate space for uniforms.
|
|
|
|
_uniformsBuffer = [view.device
|
|
|
|
newBufferWithLength:sizeof(Uniforms)
|
|
|
|
options:MTLResourceCPUCacheModeWriteCombined | MTLResourceStorageModeShared];
|
2020-08-05 21:27:43 +00:00
|
|
|
|
2020-08-09 02:49:02 +00:00
|
|
|
// Allocate buffers for scans and lines and for the write area texture.
|
2020-08-08 02:03:54 +00:00
|
|
|
_scansBuffer = [view.device
|
|
|
|
newBufferWithLength:sizeof(Outputs::Display::BufferingScanTarget::Scan)*NumBufferedScans
|
2020-08-09 21:59:52 +00:00
|
|
|
options:SharedResourceOptionsStandard];
|
2020-08-09 02:49:02 +00:00
|
|
|
_linesBuffer = [view.device
|
|
|
|
newBufferWithLength:sizeof(Outputs::Display::BufferingScanTarget::Line)*NumBufferedLines
|
2020-08-09 21:59:52 +00:00
|
|
|
options:SharedResourceOptionsStandard];
|
2020-08-09 02:49:02 +00:00
|
|
|
_writeAreaBuffer = [view.device
|
|
|
|
newBufferWithLength:BufferingScanTarget::WriteAreaWidth*BufferingScanTarget::WriteAreaHeight*4
|
2020-08-09 21:59:52 +00:00
|
|
|
options:SharedResourceOptionsTexture];
|
2020-08-09 02:49:02 +00:00
|
|
|
|
|
|
|
// Install all that storage in the buffering scan target.
|
|
|
|
_scanTarget.set_write_area(reinterpret_cast<uint8_t *>(_writeAreaBuffer.contents));
|
|
|
|
_scanTarget.set_line_buffer(reinterpret_cast<BufferingScanTarget::Line *>(_linesBuffer.contents), _lineMetadataBuffer, NumBufferedLines);
|
|
|
|
_scanTarget.set_scan_buffer(reinterpret_cast<BufferingScanTarget::Scan *>(_scansBuffer.contents), NumBufferedScans);
|
2020-08-10 01:19:07 +00:00
|
|
|
|
|
|
|
// Set initial aspect-ratio multiplier.
|
2020-08-16 01:24:10 +00:00
|
|
|
_view = view;
|
2020-08-10 01:19:07 +00:00
|
|
|
[self mtkView:view drawableSizeWillChange:view.drawableSize];
|
2020-08-16 01:52:55 +00:00
|
|
|
|
2020-08-16 20:42:32 +00:00
|
|
|
// Generate copy and clear pipelines.
|
2020-08-16 01:52:55 +00:00
|
|
|
id<MTLLibrary> library = [_view.device newDefaultLibrary];
|
|
|
|
MTLRenderPipelineDescriptor *const pipelineDescriptor = [[MTLRenderPipelineDescriptor alloc] init];
|
|
|
|
pipelineDescriptor.colorAttachments[0].pixelFormat = _view.colorPixelFormat;
|
|
|
|
pipelineDescriptor.vertexFunction = [library newFunctionWithName:@"copyVertex"];
|
|
|
|
pipelineDescriptor.fragmentFunction = [library newFunctionWithName:@"copyFragment"];
|
|
|
|
_copyPipeline = [_view.device newRenderPipelineStateWithDescriptor:pipelineDescriptor error:nil];
|
2020-08-16 20:42:32 +00:00
|
|
|
|
|
|
|
pipelineDescriptor.fragmentFunction = [library newFunctionWithName:@"clearFragment"];
|
|
|
|
pipelineDescriptor.stencilAttachmentPixelFormat = MTLPixelFormatStencil8;
|
|
|
|
_clearPipeline = [_view.device newRenderPipelineStateWithDescriptor:pipelineDescriptor error:nil];
|
|
|
|
|
|
|
|
// Clear stencil: always write the reference value (of 0), but draw only where the stencil already
|
|
|
|
// had that value.
|
|
|
|
MTLDepthStencilDescriptor *depthStencilDescriptor = [[MTLDepthStencilDescriptor alloc] init];
|
|
|
|
depthStencilDescriptor.frontFaceStencil.stencilCompareFunction = MTLCompareFunctionEqual;
|
|
|
|
depthStencilDescriptor.frontFaceStencil.depthStencilPassOperation = MTLStencilOperationReplace;
|
|
|
|
depthStencilDescriptor.frontFaceStencil.stencilFailureOperation = MTLStencilOperationReplace;
|
|
|
|
_clearStencilState = [view.device newDepthStencilStateWithDescriptor:depthStencilDescriptor];
|
2020-08-20 01:20:06 +00:00
|
|
|
|
2020-09-02 01:27:40 +00:00
|
|
|
// Allocate a large number of single-int buffers, for supplying offsets to the compute shaders.
|
|
|
|
// There's a ridiculous amount of overhead in this, but it avoids allocations during drawing,
|
|
|
|
// and a single int per instance is all I need.
|
|
|
|
for(size_t c = 0; c < NumBufferedLines; ++c) {
|
|
|
|
_lineOffsetBuffers[c] = [_view.device newBufferWithLength:sizeof(int) options:SharedResourceOptionsStandard];
|
|
|
|
}
|
|
|
|
|
2020-08-20 01:20:06 +00:00
|
|
|
// Ensure the is-drawing flag is initially clear.
|
|
|
|
_isDrawing.clear();
|
2020-08-04 22:22:14 +00:00
|
|
|
}
|
2020-08-08 02:03:54 +00:00
|
|
|
|
2020-08-04 22:22:14 +00:00
|
|
|
return self;
|
|
|
|
}
|
|
|
|
|
2020-08-04 23:44:56 +00:00
|
|
|
/*!
|
|
|
|
@method mtkView:drawableSizeWillChange:
|
|
|
|
@abstract Called whenever the drawableSize of the view will change
|
|
|
|
@discussion Delegate can recompute view and projection matricies or regenerate any buffers to be compatible with the new view size or resolution
|
|
|
|
@param view MTKView which called this method
|
|
|
|
@param size New drawable size in pixels
|
|
|
|
*/
|
|
|
|
- (void)mtkView:(nonnull MTKView *)view drawableSizeWillChange:(CGSize)size {
|
2020-08-16 01:24:10 +00:00
|
|
|
[self setAspectRatio];
|
|
|
|
|
2020-08-16 01:52:55 +00:00
|
|
|
@synchronized(self) {
|
2020-09-02 01:33:54 +00:00
|
|
|
[self updateSizeBuffersToSize:size];
|
2020-08-31 00:21:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-02 01:33:54 +00:00
|
|
|
- (void)updateSizeBuffersToSize:(CGSize)size {
|
2020-09-01 00:01:59 +00:00
|
|
|
// TODO: consider multisampling here? But it seems like you'd need another level of indirection
|
|
|
|
// in order to maintain an ongoing buffer that supersamples only at the end.
|
2020-09-02 01:33:54 +00:00
|
|
|
const NSUInteger frameBufferWidth = NSUInteger(size.width * _view.layer.contentsScale);
|
|
|
|
const NSUInteger frameBufferHeight = NSUInteger(size.height * _view.layer.contentsScale);
|
2020-08-31 00:21:01 +00:00
|
|
|
|
2020-09-01 00:01:59 +00:00
|
|
|
// Generate a framebuffer and a stencil.
|
|
|
|
MTLTextureDescriptor *const textureDescriptor = [MTLTextureDescriptor
|
|
|
|
texture2DDescriptorWithPixelFormat:_view.colorPixelFormat
|
|
|
|
width:frameBufferWidth
|
|
|
|
height:frameBufferHeight
|
|
|
|
mipmapped:NO];
|
|
|
|
textureDescriptor.usage = MTLTextureUsageRenderTarget | MTLTextureUsageShaderRead;
|
|
|
|
textureDescriptor.resourceOptions = MTLResourceStorageModePrivate;
|
|
|
|
_frameBuffer = [_view.device newTextureWithDescriptor:textureDescriptor];
|
|
|
|
|
|
|
|
MTLTextureDescriptor *const stencilTextureDescriptor = [MTLTextureDescriptor
|
|
|
|
texture2DDescriptorWithPixelFormat:MTLPixelFormatStencil8
|
|
|
|
width:frameBufferWidth
|
|
|
|
height:frameBufferHeight
|
|
|
|
mipmapped:NO];
|
|
|
|
stencilTextureDescriptor.usage = MTLTextureUsageRenderTarget;
|
|
|
|
stencilTextureDescriptor.resourceOptions = MTLResourceStorageModePrivate;
|
|
|
|
_frameBufferStencil = [_view.device newTextureWithDescriptor:stencilTextureDescriptor];
|
|
|
|
|
|
|
|
// Generate a render pass with that framebuffer and stencil.
|
|
|
|
_frameBufferRenderPass = [[MTLRenderPassDescriptor alloc] init];
|
|
|
|
_frameBufferRenderPass.colorAttachments[0].texture = _frameBuffer;
|
|
|
|
_frameBufferRenderPass.colorAttachments[0].loadAction = MTLLoadActionLoad;
|
|
|
|
_frameBufferRenderPass.colorAttachments[0].storeAction = MTLStoreActionStore;
|
|
|
|
|
|
|
|
_frameBufferRenderPass.stencilAttachment.clearStencil = 0;
|
|
|
|
_frameBufferRenderPass.stencilAttachment.texture = _frameBufferStencil;
|
|
|
|
_frameBufferRenderPass.stencilAttachment.loadAction = MTLLoadActionLoad;
|
|
|
|
_frameBufferRenderPass.stencilAttachment.storeAction = MTLStoreActionStore;
|
|
|
|
|
|
|
|
// Establish intended stencil useage; it's only to track which pixels haven't been painted
|
|
|
|
// at all at the end of every frame. So: always paint, and replace the stored stencil value
|
|
|
|
// (which is seeded as 0) with the nominated one (a 1).
|
|
|
|
MTLDepthStencilDescriptor *depthStencilDescriptor = [[MTLDepthStencilDescriptor alloc] init];
|
|
|
|
depthStencilDescriptor.frontFaceStencil.stencilCompareFunction = MTLCompareFunctionAlways;
|
|
|
|
depthStencilDescriptor.frontFaceStencil.depthStencilPassOperation = MTLStencilOperationReplace;
|
|
|
|
_drawStencilState = [_view.device newDepthStencilStateWithDescriptor:depthStencilDescriptor];
|
|
|
|
|
|
|
|
// TODO: old framebuffer should be resized onto the new one.
|
|
|
|
}
|
2020-08-31 00:21:01 +00:00
|
|
|
|
2020-09-01 00:01:59 +00:00
|
|
|
- (void)updateModalBuffers {
|
2020-08-31 00:21:01 +00:00
|
|
|
// Build a descriptor for any intermediate line texture.
|
|
|
|
MTLTextureDescriptor *const lineTextureDescriptor = [MTLTextureDescriptor
|
2020-09-01 00:01:59 +00:00
|
|
|
texture2DDescriptorWithPixelFormat:MTLPixelFormatBGRA8Unorm
|
|
|
|
width:2048 // This 'should do'.
|
2020-08-31 00:21:01 +00:00
|
|
|
height:NumBufferedLines
|
|
|
|
mipmapped:NO];
|
|
|
|
lineTextureDescriptor.resourceOptions = MTLResourceStorageModePrivate;
|
|
|
|
|
2020-09-01 00:01:59 +00:00
|
|
|
if(_pipeline == Pipeline::DirectToDisplay) {
|
|
|
|
// Buffers are not required when outputting direct to display; so if this isn't that then release anything
|
|
|
|
// currently being held and return.
|
|
|
|
_finalisedLineTexture = nil;
|
2020-09-01 22:39:52 +00:00
|
|
|
_finalisedLineState = nil;
|
2020-09-01 00:01:59 +00:00
|
|
|
_separatedLumaTexture = nil;
|
2020-09-01 22:39:52 +00:00
|
|
|
_separatedLumaState = nil;
|
2020-09-01 00:01:59 +00:00
|
|
|
_compositionTexture = nil;
|
|
|
|
_compositionRenderPass = nil;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a composition texture if one does not yet exist.
|
|
|
|
if(!_compositionTexture) {
|
2020-09-01 22:39:52 +00:00
|
|
|
lineTextureDescriptor.usage = MTLTextureUsageRenderTarget | MTLTextureUsageShaderRead;
|
2020-09-01 00:01:59 +00:00
|
|
|
_compositionTexture = [_view.device newTextureWithDescriptor:lineTextureDescriptor];
|
|
|
|
}
|
|
|
|
|
2020-09-01 22:39:52 +00:00
|
|
|
// Grab the shader library.
|
|
|
|
id<MTLLibrary> library = [_view.device newDefaultLibrary];
|
|
|
|
lineTextureDescriptor.usage = MTLTextureUsageShaderWrite | MTLTextureUsageShaderRead;
|
|
|
|
|
2020-09-01 00:01:59 +00:00
|
|
|
// The finalised texture will definitely exist.
|
|
|
|
if(!_finalisedLineTexture) {
|
|
|
|
_finalisedLineTexture = [_view.device newTextureWithDescriptor:lineTextureDescriptor];
|
2020-09-02 00:37:11 +00:00
|
|
|
_finalisedLineState = [_view.device newComputePipelineStateWithFunction:[library newFunctionWithName:@"filterChromaKernel"] error:nil];
|
2020-09-01 00:01:59 +00:00
|
|
|
}
|
2020-08-31 00:21:01 +00:00
|
|
|
|
|
|
|
// A luma separation texture will exist only for composite colour.
|
2020-09-01 00:01:59 +00:00
|
|
|
if(_pipeline == Pipeline::CompositeColour) {
|
|
|
|
if(!_separatedLumaTexture) {
|
|
|
|
_separatedLumaTexture = [_view.device newTextureWithDescriptor:lineTextureDescriptor];
|
2020-09-02 01:58:33 +00:00
|
|
|
_separatedLumaState = [_view.device newComputePipelineStateWithFunction:[library newFunctionWithName:@"separateLumaKernel"] error:nil];
|
2020-09-01 00:01:59 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
_separatedLumaTexture = nil;
|
2020-08-16 01:52:55 +00:00
|
|
|
}
|
2020-08-16 01:24:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
- (void)setAspectRatio {
|
2020-08-17 01:11:43 +00:00
|
|
|
const auto modals = _scanTarget.modals();
|
|
|
|
const auto viewAspectRatio = (_view.bounds.size.width / _view.bounds.size.height);
|
|
|
|
|
|
|
|
// Set the aspect ratio multiplier.
|
|
|
|
uniforms()->aspectRatioMultiplier = float(modals.aspect_ratio / viewAspectRatio);
|
|
|
|
|
2020-08-18 00:29:46 +00:00
|
|
|
// Also work out the proper zoom.
|
2020-08-17 01:11:43 +00:00
|
|
|
const double fitWidthZoom = (viewAspectRatio / modals.aspect_ratio) / modals.visible_area.size.width;
|
|
|
|
const double fitHeightZoom = 1.0 / modals.visible_area.size.height;
|
|
|
|
uniforms()->zoom = float(std::min(fitWidthZoom, fitHeightZoom));
|
2020-08-18 00:29:46 +00:00
|
|
|
|
|
|
|
// Store the offset.
|
2020-08-17 01:11:43 +00:00
|
|
|
uniforms()->offset.x = -modals.visible_area.origin.x;
|
|
|
|
uniforms()->offset.y = -modals.visible_area.origin.y;
|
2020-08-04 23:44:56 +00:00
|
|
|
}
|
|
|
|
|
2020-08-16 01:24:10 +00:00
|
|
|
- (void)setModals:(const Outputs::Display::ScanTarget::Modals &)modals {
|
2020-08-12 02:11:50 +00:00
|
|
|
//
|
|
|
|
// Populate uniforms.
|
|
|
|
//
|
|
|
|
uniforms()->scale[0] = modals.output_scale.x;
|
|
|
|
uniforms()->scale[1] = modals.output_scale.y;
|
2020-08-16 01:24:10 +00:00
|
|
|
uniforms()->lineWidth = 1.05f / modals.expected_vertical_lines; // TODO: return to 1.0 (or slightly more), once happy.
|
|
|
|
[self setAspectRatio];
|
2020-08-12 02:11:50 +00:00
|
|
|
|
|
|
|
const auto toRGB = to_rgb_matrix(modals.composite_colour_space);
|
|
|
|
uniforms()->toRGB = simd::float3x3(
|
|
|
|
simd::float3{toRGB[0], toRGB[1], toRGB[2]},
|
|
|
|
simd::float3{toRGB[3], toRGB[4], toRGB[5]},
|
|
|
|
simd::float3{toRGB[6], toRGB[7], toRGB[8]}
|
|
|
|
);
|
|
|
|
|
|
|
|
const auto fromRGB = from_rgb_matrix(modals.composite_colour_space);
|
|
|
|
uniforms()->fromRGB = simd::float3x3(
|
|
|
|
simd::float3{fromRGB[0], fromRGB[1], fromRGB[2]},
|
|
|
|
simd::float3{fromRGB[3], fromRGB[4], fromRGB[5]},
|
|
|
|
simd::float3{fromRGB[6], fromRGB[7], fromRGB[8]}
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
//
|
|
|
|
// Generate input texture.
|
|
|
|
//
|
|
|
|
MTLPixelFormat pixelFormat;
|
|
|
|
_bytesPerInputPixel = size_for_data_type(modals.input_data_type);
|
|
|
|
if(data_type_is_normalised(modals.input_data_type)) {
|
|
|
|
switch(_bytesPerInputPixel) {
|
|
|
|
default:
|
|
|
|
case 1: pixelFormat = MTLPixelFormatR8Unorm; break;
|
|
|
|
case 2: pixelFormat = MTLPixelFormatRG8Unorm; break;
|
|
|
|
case 4: pixelFormat = MTLPixelFormatRGBA8Unorm; break;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
switch(_bytesPerInputPixel) {
|
|
|
|
default:
|
|
|
|
case 1: pixelFormat = MTLPixelFormatR8Uint; break;
|
|
|
|
case 2: pixelFormat = MTLPixelFormatRG8Uint; break;
|
|
|
|
case 4: pixelFormat = MTLPixelFormatRGBA8Uint; break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
MTLTextureDescriptor *const textureDescriptor = [MTLTextureDescriptor
|
|
|
|
texture2DDescriptorWithPixelFormat:pixelFormat
|
|
|
|
width:BufferingScanTarget::WriteAreaWidth
|
|
|
|
height:BufferingScanTarget::WriteAreaHeight
|
|
|
|
mipmapped:NO];
|
|
|
|
textureDescriptor.resourceOptions = SharedResourceOptionsTexture;
|
2020-08-15 01:24:25 +00:00
|
|
|
if(@available(macOS 10.14, *)) {
|
|
|
|
textureDescriptor.allowGPUOptimizedContents = NO;
|
|
|
|
}
|
2020-08-12 02:11:50 +00:00
|
|
|
|
|
|
|
// TODO: the call below is the only reason why this project now requires macOS 10.13; is it all that helpful versus just uploading each frame?
|
|
|
|
const NSUInteger bytesPerRow = BufferingScanTarget::WriteAreaWidth * _bytesPerInputPixel;
|
|
|
|
_writeAreaTexture = [_writeAreaBuffer
|
|
|
|
newTextureWithDescriptor:textureDescriptor
|
|
|
|
offset:0
|
|
|
|
bytesPerRow:bytesPerRow];
|
|
|
|
_totalTextureBytes = bytesPerRow * BufferingScanTarget::WriteAreaHeight;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
//
|
2020-08-16 01:24:10 +00:00
|
|
|
// Generate scan pipeline.
|
2020-08-12 02:11:50 +00:00
|
|
|
//
|
2020-08-16 01:24:10 +00:00
|
|
|
id<MTLLibrary> library = [_view.device newDefaultLibrary];
|
2020-08-12 02:11:50 +00:00
|
|
|
MTLRenderPipelineDescriptor *pipelineDescriptor = [[MTLRenderPipelineDescriptor alloc] init];
|
2020-08-20 01:20:06 +00:00
|
|
|
|
|
|
|
// Occasions when the composition buffer isn't required are slender:
|
|
|
|
//
|
|
|
|
// (i) input and output are both RGB; or
|
|
|
|
// (i) output is composite monochrome.
|
2020-09-01 00:01:59 +00:00
|
|
|
const bool isComposition =
|
2020-08-20 01:20:06 +00:00
|
|
|
(
|
|
|
|
(natural_display_type_for_data_type(modals.input_data_type) != Outputs::Display::DisplayType::RGB) ||
|
|
|
|
(modals.display_type != Outputs::Display::DisplayType::RGB)
|
|
|
|
) && modals.display_type != Outputs::Display::DisplayType::CompositeMonochrome;
|
2020-09-01 00:01:59 +00:00
|
|
|
const bool isSVideoOutput = modals.display_type == Outputs::Display::DisplayType::SVideo;
|
2020-08-20 01:20:06 +00:00
|
|
|
|
2020-09-01 00:01:59 +00:00
|
|
|
if(!isComposition) {
|
|
|
|
_pipeline = Pipeline::DirectToDisplay;
|
|
|
|
} else {
|
|
|
|
_pipeline = isSVideoOutput ? Pipeline::SVideo : Pipeline::CompositeColour;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update intermediate storage.
|
|
|
|
[self updateModalBuffers];
|
|
|
|
|
|
|
|
// TODO: factor in gamma, which may or may not be a factor (it isn't for 1-bit formats).
|
2020-08-20 01:20:06 +00:00
|
|
|
struct FragmentSamplerDictionary {
|
|
|
|
/// Fragment shader that outputs to the composition buffer for composite processing.
|
|
|
|
NSString *const compositionComposite;
|
|
|
|
/// Fragment shader that outputs to the composition buffer for S-Video processing.
|
|
|
|
NSString *const compositionSVideo;
|
|
|
|
|
|
|
|
/// Fragment shader that outputs directly as monochrome composite.
|
|
|
|
NSString *const directComposite;
|
|
|
|
/// Fragment shader that outputs directly as RGB.
|
|
|
|
NSString *const directRGB;
|
|
|
|
};
|
|
|
|
const FragmentSamplerDictionary samplerDictionary[8] = {
|
|
|
|
// Luminance1
|
|
|
|
{@"sampleLuminance1", nullptr, @"sampleLuminance1", nullptr},
|
|
|
|
{@"sampleLuminance8", nullptr, @"sampleLuminance8", nullptr},
|
|
|
|
{@"samplePhaseLinkedLuminance8", nullptr, @"samplePhaseLinkedLuminance8", nullptr},
|
|
|
|
{@"compositeSampleLuminance8Phase8", @"sampleLuminance8Phase8", @"compositeSampleLuminance8Phase8", nullptr},
|
|
|
|
{@"compositeSampleRed1Green1Blue1", @"svideoSampleRed1Green1Blue1", @"compositeSampleRed1Green1Blue1", @"sampleRed1Green1Blue1"},
|
|
|
|
{@"compositeSampleRed2Green2Blue2", @"svideoSampleRed2Green2Blue2", @"compositeSampleRed2Green2Blue2", @"sampleRed2Green2Blue2"},
|
|
|
|
{@"compositeSampleRed4Green4Blue4", @"svideoSampleRed4Green4Blue4", @"compositeSampleRed4Green4Blue4", @"sampleRed4Green4Blue4"},
|
|
|
|
{@"compositeSampleRed8Green8Blue8", @"svideoSampleRed8Green8Blue8", @"compositeSampleRed8Green8Blue8", @"sampleRed8Green8Blue8"},
|
|
|
|
};
|
|
|
|
|
|
|
|
#ifndef NDEBUG
|
|
|
|
// Do a quick check of the names entered above. I don't think this is possible at compile time.
|
|
|
|
for(int c = 0; c < 8; ++c) {
|
|
|
|
if(samplerDictionary[c].compositionComposite) assert([library newFunctionWithName:samplerDictionary[c].compositionComposite]);
|
|
|
|
if(samplerDictionary[c].compositionSVideo) assert([library newFunctionWithName:samplerDictionary[c].compositionSVideo]);
|
|
|
|
if(samplerDictionary[c].directComposite) assert([library newFunctionWithName:samplerDictionary[c].directComposite]);
|
|
|
|
if(samplerDictionary[c].directRGB) assert([library newFunctionWithName:samplerDictionary[c].directRGB]);
|
|
|
|
}
|
|
|
|
#endif
|
2020-09-01 00:01:59 +00:00
|
|
|
|
2020-08-30 00:54:46 +00:00
|
|
|
uniforms()->cyclesMultiplier = 1.0f;
|
2020-09-01 00:01:59 +00:00
|
|
|
if(_pipeline != Pipeline::DirectToDisplay) {
|
2020-09-02 23:13:54 +00:00
|
|
|
// Pick a suitable cycle multiplier. TODO: can I reduce this from a multiple of 4?
|
2020-08-30 00:54:46 +00:00
|
|
|
const float minimumSize = 4.0f * float(modals.colour_cycle_numerator) / float(modals.colour_cycle_denominator);
|
|
|
|
while(uniforms()->cyclesMultiplier * modals.cycles_per_line < minimumSize) {
|
|
|
|
uniforms()->cyclesMultiplier += 1.0f;
|
2020-09-03 00:14:41 +00:00
|
|
|
|
|
|
|
if(uniforms()->cyclesMultiplier * modals.cycles_per_line > 2048) {
|
|
|
|
uniforms()->cyclesMultiplier -= 1.0f;
|
|
|
|
break;
|
|
|
|
}
|
2020-08-30 00:54:46 +00:00
|
|
|
}
|
2020-08-20 01:20:06 +00:00
|
|
|
|
2020-09-01 00:01:59 +00:00
|
|
|
// Create the composition render pass.
|
|
|
|
pipelineDescriptor.colorAttachments[0].pixelFormat = _compositionTexture.pixelFormat;
|
2020-08-20 01:20:06 +00:00
|
|
|
pipelineDescriptor.vertexFunction = [library newFunctionWithName:@"scanToComposition"];
|
|
|
|
pipelineDescriptor.fragmentFunction =
|
|
|
|
[library newFunctionWithName:isSVideoOutput ? samplerDictionary[int(modals.input_data_type)].compositionSVideo : samplerDictionary[int(modals.input_data_type)].compositionComposite];
|
|
|
|
|
|
|
|
_composePipeline = [_view.device newRenderPipelineStateWithDescriptor:pipelineDescriptor error:nil];
|
2020-08-20 01:56:53 +00:00
|
|
|
|
|
|
|
_compositionRenderPass = [[MTLRenderPassDescriptor alloc] init];
|
|
|
|
_compositionRenderPass.colorAttachments[0].texture = _compositionTexture;
|
|
|
|
_compositionRenderPass.colorAttachments[0].loadAction = MTLLoadActionClear;
|
|
|
|
_compositionRenderPass.colorAttachments[0].storeAction = MTLStoreActionStore;
|
2020-08-22 02:41:54 +00:00
|
|
|
_compositionRenderPass.colorAttachments[0].clearColor = MTLClearColorMake(0.0, 0.5, 0.5, 1.0);
|
2020-08-22 01:11:25 +00:00
|
|
|
|
2020-09-01 00:01:59 +00:00
|
|
|
// Create suitable FIR filters.
|
2020-09-01 22:39:52 +00:00
|
|
|
_lineBufferPixelsPerLine = NSUInteger(modals.cycles_per_line) * NSUInteger(uniforms()->cyclesMultiplier);
|
2020-08-22 01:11:25 +00:00
|
|
|
const float colourCyclesPerLine = float(modals.colour_cycle_numerator) / float(modals.colour_cycle_denominator);
|
|
|
|
|
2020-09-02 12:03:10 +00:00
|
|
|
// Generate the chrominance filter.
|
|
|
|
{
|
|
|
|
auto *const chromaCoefficients = uniforms()->chromaCoefficients;
|
2020-09-03 00:14:41 +00:00
|
|
|
SignalProcessing::FIRFilter chrominancefilter(15, float(_lineBufferPixelsPerLine), 0.0f, colourCyclesPerLine * 0.01f);
|
2020-09-02 23:13:54 +00:00
|
|
|
const auto calculatedChromaCoefficients = chrominancefilter.get_coefficients();
|
2020-08-22 02:06:36 +00:00
|
|
|
for(size_t c = 0; c < 8; ++c) {
|
2020-09-04 00:48:44 +00:00
|
|
|
chromaCoefficients[c].y = chromaCoefficients[c].z = calculatedChromaCoefficients[c] * (isSVideoOutput ? 4.0f : 3.0f);
|
2020-09-02 12:03:10 +00:00
|
|
|
chromaCoefficients[c].x = 0.0f;
|
2020-08-22 02:06:36 +00:00
|
|
|
}
|
2020-09-02 12:03:10 +00:00
|
|
|
chromaCoefficients[7].x = 1.0f;
|
2020-09-02 23:13:54 +00:00
|
|
|
|
2020-09-03 17:18:21 +00:00
|
|
|
// Luminance will be very soft as a result of the separation phase; apply a sharpen filter to try to undo that.
|
2020-09-04 00:48:44 +00:00
|
|
|
// This is applied separately because the first composite processing step is going to select between the nominal
|
|
|
|
// chroma and luma parts to take the place of luminance depending on whether a colour burst was found, and high-pass
|
|
|
|
// filtering the chrominance channel would be visually detrimental.
|
|
|
|
//
|
|
|
|
// The 30 ['Hz' but per line, not per second] is somewhat arbitrary.
|
2020-09-02 23:13:54 +00:00
|
|
|
if(!isSVideoOutput) {
|
2020-09-04 00:48:44 +00:00
|
|
|
SignalProcessing::FIRFilter sharpenFilter(15, float(_lineBufferPixelsPerLine), 20.0f, colourCyclesPerLine);
|
|
|
|
const auto sharpen = sharpenFilter.get_coefficients();
|
2020-09-02 23:13:54 +00:00
|
|
|
for(size_t c = 0; c < 8; ++c) {
|
2020-09-03 17:18:21 +00:00
|
|
|
chromaCoefficients[c].x = sharpen[c];
|
2020-09-02 23:13:54 +00:00
|
|
|
}
|
|
|
|
}
|
2020-08-22 02:06:36 +00:00
|
|
|
}
|
|
|
|
|
2020-09-02 23:13:54 +00:00
|
|
|
// Generate the luminance separation filter.
|
2020-09-02 12:03:10 +00:00
|
|
|
{
|
|
|
|
auto *const luminanceCoefficients = uniforms()->lumaCoefficients;
|
2020-09-04 00:48:44 +00:00
|
|
|
SignalProcessing::FIRFilter lumaPart(15, float(_lineBufferPixelsPerLine), 0.0f, colourCyclesPerLine * 0.6f);
|
2020-09-04 00:53:45 +00:00
|
|
|
SignalProcessing::FIRFilter chromaPart(15, float(_lineBufferPixelsPerLine), 0.0f, colourCyclesPerLine * 1.0f);
|
2020-09-03 17:18:21 +00:00
|
|
|
|
|
|
|
const auto lumaCoefficients = lumaPart.get_coefficients();
|
|
|
|
const auto chromaCoefficients = chromaPart.get_coefficients();
|
|
|
|
for(size_t c = 0; c < 8; ++c) {
|
|
|
|
luminanceCoefficients[c].x = lumaCoefficients[c];
|
|
|
|
luminanceCoefficients[c].y = chromaCoefficients[c];
|
|
|
|
}
|
2020-08-22 02:06:36 +00:00
|
|
|
}
|
2020-08-26 02:05:19 +00:00
|
|
|
|
2020-09-02 12:03:10 +00:00
|
|
|
// Store radians per pixel. TODO: is this now orphaned? Should I keep it anyway?
|
2020-09-01 22:39:52 +00:00
|
|
|
uniforms()->radiansPerPixel = (colourCyclesPerLine * 3.141592654f * 2.0f) / float(_lineBufferPixelsPerLine);
|
2020-08-20 01:20:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Build the output pipeline.
|
2020-08-16 01:24:10 +00:00
|
|
|
pipelineDescriptor.colorAttachments[0].pixelFormat = _view.colorPixelFormat;
|
2020-09-01 00:01:59 +00:00
|
|
|
pipelineDescriptor.vertexFunction = [library newFunctionWithName:_pipeline == Pipeline::DirectToDisplay ? @"scanToDisplay" : @"lineToDisplay"];
|
2020-08-12 02:11:50 +00:00
|
|
|
|
2020-09-01 00:01:59 +00:00
|
|
|
if(_pipeline != Pipeline::DirectToDisplay) {
|
2020-09-02 01:37:36 +00:00
|
|
|
pipelineDescriptor.fragmentFunction = [library newFunctionWithName:@"interpolateFragment"];
|
2020-08-20 01:20:06 +00:00
|
|
|
} else {
|
|
|
|
const bool isRGBOutput = modals.display_type == Outputs::Display::DisplayType::RGB;
|
|
|
|
pipelineDescriptor.fragmentFunction =
|
|
|
|
[library newFunctionWithName:isRGBOutput ? samplerDictionary[int(modals.input_data_type)].directRGB : samplerDictionary[int(modals.input_data_type)].directComposite];
|
2020-08-12 02:11:50 +00:00
|
|
|
}
|
|
|
|
|
2020-08-12 23:34:07 +00:00
|
|
|
// Enable blending.
|
|
|
|
pipelineDescriptor.colorAttachments[0].blendingEnabled = YES;
|
|
|
|
pipelineDescriptor.colorAttachments[0].sourceRGBBlendFactor = MTLBlendFactorSourceAlpha;
|
|
|
|
pipelineDescriptor.colorAttachments[0].destinationRGBBlendFactor = MTLBlendFactorOneMinusSourceAlpha;
|
|
|
|
|
2020-08-16 20:42:32 +00:00
|
|
|
// Set stencil format.
|
|
|
|
pipelineDescriptor.stencilAttachmentPixelFormat = MTLPixelFormatStencil8;
|
|
|
|
|
|
|
|
// Finish.
|
2020-08-20 01:20:06 +00:00
|
|
|
_outputPipeline = [_view.device newRenderPipelineStateWithDescriptor:pipelineDescriptor error:nil];
|
2020-08-12 02:11:50 +00:00
|
|
|
}
|
|
|
|
|
2020-08-20 01:20:06 +00:00
|
|
|
- (void)outputFrom:(size_t)start to:(size_t)end commandBuffer:(id<MTLCommandBuffer>)commandBuffer {
|
2020-09-02 19:51:48 +00:00
|
|
|
if(start == end) return;
|
|
|
|
|
2020-08-16 20:42:32 +00:00
|
|
|
// Generate a command encoder for the view.
|
|
|
|
id<MTLRenderCommandEncoder> encoder = [commandBuffer renderCommandEncoderWithDescriptor:_frameBufferRenderPass];
|
|
|
|
|
2020-08-20 01:20:06 +00:00
|
|
|
// Final output. Could be scans or lines.
|
|
|
|
[encoder setRenderPipelineState:_outputPipeline];
|
2020-08-16 20:42:32 +00:00
|
|
|
|
2020-09-01 00:01:59 +00:00
|
|
|
if(_pipeline != Pipeline::DirectToDisplay) {
|
2020-09-01 22:39:52 +00:00
|
|
|
[encoder setFragmentTexture:_finalisedLineTexture atIndex:0];
|
2020-08-20 01:20:06 +00:00
|
|
|
[encoder setVertexBuffer:_linesBuffer offset:0 atIndex:0];
|
|
|
|
} else {
|
|
|
|
[encoder setFragmentTexture:_writeAreaTexture atIndex:0];
|
|
|
|
[encoder setVertexBuffer:_scansBuffer offset:0 atIndex:0];
|
|
|
|
}
|
2020-08-16 20:42:32 +00:00
|
|
|
[encoder setVertexBuffer:_uniformsBuffer offset:0 atIndex:1];
|
|
|
|
[encoder setFragmentBuffer:_uniformsBuffer offset:0 atIndex:0];
|
|
|
|
|
|
|
|
[encoder setDepthStencilState:_drawStencilState];
|
|
|
|
[encoder setStencilReferenceValue:1];
|
|
|
|
#ifndef NDEBUG
|
|
|
|
// Quick aid for debugging: the stencil test is predicated on front-facing pixels, so make sure they're
|
|
|
|
// being generated.
|
|
|
|
[encoder setCullMode:MTLCullModeBack];
|
|
|
|
#endif
|
|
|
|
|
2020-08-20 01:56:53 +00:00
|
|
|
#define OutputStrips(start, size) [encoder drawPrimitives:MTLPrimitiveTypeTriangleStrip vertexStart:0 vertexCount:4 instanceCount:size baseInstance:start]
|
2020-09-01 00:01:59 +00:00
|
|
|
RangePerform(start, end, (_pipeline != Pipeline::DirectToDisplay ? NumBufferedLines : NumBufferedScans), OutputStrips);
|
2020-08-20 01:56:53 +00:00
|
|
|
#undef OutputStrips
|
2020-08-16 20:42:32 +00:00
|
|
|
|
2020-08-21 00:21:28 +00:00
|
|
|
// Complete encoding.
|
2020-08-16 20:42:32 +00:00
|
|
|
[encoder endEncoding];
|
|
|
|
}
|
|
|
|
|
|
|
|
- (void)outputFrameCleanerToCommandBuffer:(id<MTLCommandBuffer>)commandBuffer {
|
|
|
|
// Generate a command encoder for the view.
|
|
|
|
id<MTLRenderCommandEncoder> encoder = [commandBuffer renderCommandEncoderWithDescriptor:_frameBufferRenderPass];
|
|
|
|
|
|
|
|
// Drawing. Just scans.
|
|
|
|
[encoder setRenderPipelineState:_clearPipeline];
|
|
|
|
[encoder setDepthStencilState:_clearStencilState];
|
|
|
|
[encoder setStencilReferenceValue:0];
|
|
|
|
|
|
|
|
[encoder setVertexTexture:_frameBuffer atIndex:0];
|
|
|
|
[encoder setFragmentTexture:_frameBuffer atIndex:0];
|
|
|
|
|
|
|
|
[encoder drawPrimitives:MTLPrimitiveTypeTriangleStrip vertexStart:0 vertexCount:4];
|
|
|
|
[encoder endEncoding];
|
|
|
|
}
|
|
|
|
|
2020-09-01 22:39:52 +00:00
|
|
|
- (void)composeOutputArea:(const BufferingScanTarget::OutputArea &)outputArea commandBuffer:(id<MTLCommandBuffer>)commandBuffer {
|
|
|
|
// Output all scans to the composition buffer.
|
|
|
|
const id<MTLRenderCommandEncoder> encoder = [commandBuffer renderCommandEncoderWithDescriptor:_compositionRenderPass];
|
|
|
|
[encoder setRenderPipelineState:_composePipeline];
|
|
|
|
|
|
|
|
[encoder setVertexBuffer:_scansBuffer offset:0 atIndex:0];
|
|
|
|
[encoder setVertexBuffer:_uniformsBuffer offset:0 atIndex:1];
|
|
|
|
[encoder setVertexTexture:_compositionTexture atIndex:0];
|
|
|
|
|
|
|
|
[encoder setFragmentBuffer:_uniformsBuffer offset:0 atIndex:0];
|
|
|
|
[encoder setFragmentTexture:_writeAreaTexture atIndex:0];
|
|
|
|
|
|
|
|
#define OutputScans(start, size) [encoder drawPrimitives:MTLPrimitiveTypeLine vertexStart:0 vertexCount:2 instanceCount:size baseInstance:start]
|
|
|
|
RangePerform(outputArea.start.scan, outputArea.end.scan, NumBufferedScans, OutputScans);
|
|
|
|
#undef OutputScans
|
|
|
|
[encoder endEncoding];
|
|
|
|
}
|
|
|
|
|
2020-09-02 02:11:48 +00:00
|
|
|
- (id<MTLBuffer>)bufferForOffset:(size_t)offset {
|
2020-09-02 01:27:40 +00:00
|
|
|
// Store and apply the offset.
|
2020-09-02 02:11:48 +00:00
|
|
|
const auto buffer = _lineOffsetBuffers[_lineOffsetBuffer];
|
2020-09-02 01:27:40 +00:00
|
|
|
*(reinterpret_cast<int *>(_lineOffsetBuffers[_lineOffsetBuffer].contents)) = int(offset);
|
|
|
|
_lineOffsetBuffer = (_lineOffsetBuffer + 1) % NumBufferedLines;
|
2020-09-02 02:11:48 +00:00
|
|
|
return buffer;
|
|
|
|
}
|
|
|
|
|
|
|
|
- (void)dispatchComputeCommandEncoder:(id<MTLComputeCommandEncoder>)encoder pipelineState:(id<MTLComputePipelineState>)pipelineState width:(NSUInteger)width height:(NSUInteger)height offsetBuffer:(id<MTLBuffer>)offsetBuffer {
|
|
|
|
[encoder setBuffer:offsetBuffer offset:0 atIndex:1];
|
2020-09-02 01:27:40 +00:00
|
|
|
|
2020-09-01 22:39:52 +00:00
|
|
|
// This follows the recommendations at https://developer.apple.com/documentation/metal/calculating_threadgroup_and_grid_sizes ;
|
|
|
|
// I currently have no independent opinion whatsoever.
|
|
|
|
const MTLSize threadsPerThreadgroup = MTLSizeMake(
|
|
|
|
pipelineState.threadExecutionWidth,
|
|
|
|
pipelineState.maxTotalThreadsPerThreadgroup / pipelineState.threadExecutionWidth,
|
|
|
|
1
|
|
|
|
);
|
|
|
|
const MTLSize threadsPerGrid = MTLSizeMake(width, height, 1);
|
|
|
|
|
2020-09-02 01:27:40 +00:00
|
|
|
// Set the pipeline state and dispatch the drawing. Which may slightly overdraw.
|
2020-09-01 22:39:52 +00:00
|
|
|
[encoder setComputePipelineState:pipelineState];
|
|
|
|
[encoder dispatchThreads:threadsPerGrid threadsPerThreadgroup:threadsPerThreadgroup];
|
|
|
|
}
|
|
|
|
|
2020-08-16 20:42:32 +00:00
|
|
|
- (void)updateFrameBuffer {
|
2020-08-16 01:24:10 +00:00
|
|
|
// TODO: rethink BufferingScanTarget::perform. Is it now really just for guarding the modals?
|
|
|
|
_scanTarget.perform([=] {
|
|
|
|
const Outputs::Display::ScanTarget::Modals *const newModals = _scanTarget.new_modals();
|
|
|
|
if(newModals) {
|
|
|
|
[self setModals:*newModals];
|
|
|
|
}
|
|
|
|
});
|
2020-08-12 23:34:07 +00:00
|
|
|
|
2020-08-16 01:52:55 +00:00
|
|
|
@synchronized(self) {
|
|
|
|
if(!_frameBufferRenderPass) return;
|
|
|
|
|
|
|
|
const auto outputArea = _scanTarget.get_output_area();
|
|
|
|
|
2020-09-02 01:58:33 +00:00
|
|
|
if(outputArea.end.line != outputArea.start.line) {
|
|
|
|
|
|
|
|
// Ensure texture changes are noted.
|
|
|
|
const auto writeAreaModificationStart = size_t(outputArea.start.write_area_x + outputArea.start.write_area_y * 2048) * _bytesPerInputPixel;
|
|
|
|
const auto writeAreaModificationEnd = size_t(outputArea.end.write_area_x + outputArea.end.write_area_y * 2048) * _bytesPerInputPixel;
|
2020-08-20 01:56:53 +00:00
|
|
|
#define FlushRegion(start, size) [_writeAreaBuffer didModifyRange:NSMakeRange(start, size)]
|
2020-09-02 01:58:33 +00:00
|
|
|
RangePerform(writeAreaModificationStart, writeAreaModificationEnd, _totalTextureBytes, FlushRegion);
|
2020-08-20 01:56:53 +00:00
|
|
|
#undef FlushRegion
|
2020-08-13 02:08:41 +00:00
|
|
|
|
2020-09-02 01:58:33 +00:00
|
|
|
// Obtain a source for render command encoders.
|
|
|
|
id<MTLCommandBuffer> commandBuffer = [_commandQueue commandBuffer];
|
|
|
|
|
|
|
|
//
|
|
|
|
// Drawing algorithm used below, in broad terms:
|
|
|
|
//
|
|
|
|
// Maintain a persistent buffer of current CRT state.
|
|
|
|
//
|
|
|
|
// During each frame, paint to the persistent buffer anything new. Update a stencil buffer to track
|
|
|
|
// every pixel so-far touched.
|
|
|
|
//
|
|
|
|
// At the end of the frame, draw a 'frame cleaner', which is a whole-screen rect that paints over
|
|
|
|
// only those areas that the stencil buffer indicates weren't painted this frame.
|
|
|
|
//
|
|
|
|
// Hence every pixel is touched every frame, regardless of the machine's output.
|
|
|
|
//
|
|
|
|
|
|
|
|
switch(_pipeline) {
|
|
|
|
case Pipeline::DirectToDisplay: {
|
|
|
|
// Output scans directly, broken up by frame.
|
|
|
|
size_t line = outputArea.start.line;
|
|
|
|
size_t scan = outputArea.start.scan;
|
|
|
|
while(line != outputArea.end.line) {
|
|
|
|
if(_lineMetadataBuffer[line].is_first_in_frame && _lineMetadataBuffer[line].previous_frame_was_complete) {
|
|
|
|
[self outputFrom:scan to:_lineMetadataBuffer[line].first_scan commandBuffer:commandBuffer];
|
|
|
|
[self outputFrameCleanerToCommandBuffer:commandBuffer];
|
|
|
|
scan = _lineMetadataBuffer[line].first_scan;
|
|
|
|
}
|
|
|
|
line = (line + 1) % NumBufferedLines;
|
2020-09-01 22:39:52 +00:00
|
|
|
}
|
2020-09-02 01:58:33 +00:00
|
|
|
[self outputFrom:scan to:outputArea.end.scan commandBuffer:commandBuffer];
|
|
|
|
} break;
|
|
|
|
|
|
|
|
case Pipeline::CompositeColour:
|
|
|
|
case Pipeline::SVideo: {
|
|
|
|
// Build the composition buffer.
|
|
|
|
[self composeOutputArea:outputArea commandBuffer:commandBuffer];
|
|
|
|
|
|
|
|
if(_pipeline == Pipeline::SVideo) {
|
|
|
|
// Filter from composition to the finalised line texture.
|
|
|
|
id<MTLComputeCommandEncoder> computeEncoder = [commandBuffer computeCommandEncoder];
|
|
|
|
[computeEncoder setTexture:_compositionTexture atIndex:0];
|
|
|
|
[computeEncoder setTexture:_finalisedLineTexture atIndex:1];
|
|
|
|
[computeEncoder setBuffer:_uniformsBuffer offset:0 atIndex:0];
|
|
|
|
|
|
|
|
if(outputArea.end.line > outputArea.start.line) {
|
2020-09-02 02:11:48 +00:00
|
|
|
[self dispatchComputeCommandEncoder:computeEncoder pipelineState:_finalisedLineState width:_lineBufferPixelsPerLine height:outputArea.end.line - outputArea.start.line offsetBuffer:[self bufferForOffset:outputArea.start.line]];
|
2020-09-02 01:58:33 +00:00
|
|
|
} else {
|
2020-09-02 02:11:48 +00:00
|
|
|
[self dispatchComputeCommandEncoder:computeEncoder pipelineState:_finalisedLineState width:_lineBufferPixelsPerLine height:NumBufferedLines - outputArea.start.line offsetBuffer:[self bufferForOffset:outputArea.start.line]];
|
2020-09-02 01:58:33 +00:00
|
|
|
if(outputArea.end.line) {
|
2020-09-02 02:11:48 +00:00
|
|
|
[self dispatchComputeCommandEncoder:computeEncoder pipelineState:_finalisedLineState width:_lineBufferPixelsPerLine height:outputArea.end.line offsetBuffer:[self bufferForOffset:0]];
|
2020-09-02 01:58:33 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
[computeEncoder endEncoding];
|
2020-09-02 01:27:40 +00:00
|
|
|
} else {
|
2020-09-02 02:11:48 +00:00
|
|
|
// Separate luminance.
|
2020-09-02 19:51:48 +00:00
|
|
|
id<MTLComputeCommandEncoder> computeEncoder = [commandBuffer computeCommandEncoder];
|
|
|
|
[computeEncoder setTexture:_compositionTexture atIndex:0];
|
|
|
|
[computeEncoder setTexture:_separatedLumaTexture atIndex:1];
|
|
|
|
[computeEncoder setBuffer:_uniformsBuffer offset:0 atIndex:0];
|
2020-09-02 01:58:33 +00:00
|
|
|
|
2020-09-02 02:11:48 +00:00
|
|
|
__unsafe_unretained id<MTLBuffer> offsetBuffers[2] = {nil, nil};
|
|
|
|
offsetBuffers[0] = [self bufferForOffset:outputArea.start.line];
|
|
|
|
|
2020-09-02 01:58:33 +00:00
|
|
|
if(outputArea.end.line > outputArea.start.line) {
|
2020-09-02 19:51:48 +00:00
|
|
|
[self dispatchComputeCommandEncoder:computeEncoder pipelineState:_separatedLumaState width:_lineBufferPixelsPerLine height:outputArea.end.line - outputArea.start.line offsetBuffer:offsetBuffers[0]];
|
2020-09-02 01:58:33 +00:00
|
|
|
} else {
|
2020-09-02 19:51:48 +00:00
|
|
|
[self dispatchComputeCommandEncoder:computeEncoder pipelineState:_separatedLumaState width:_lineBufferPixelsPerLine height:NumBufferedLines - outputArea.start.line offsetBuffer:offsetBuffers[0]];
|
2020-09-02 01:58:33 +00:00
|
|
|
if(outputArea.end.line) {
|
2020-09-02 02:11:48 +00:00
|
|
|
offsetBuffers[1] = [self bufferForOffset:0];
|
2020-09-02 19:51:48 +00:00
|
|
|
[self dispatchComputeCommandEncoder:computeEncoder pipelineState:_separatedLumaState width:_lineBufferPixelsPerLine height:outputArea.end.line offsetBuffer:offsetBuffers[1]];
|
2020-09-02 01:58:33 +00:00
|
|
|
}
|
2020-09-02 01:27:40 +00:00
|
|
|
}
|
2020-09-01 22:39:52 +00:00
|
|
|
|
2020-09-02 02:11:48 +00:00
|
|
|
// Filter resulting chrominance.
|
2020-09-02 19:51:48 +00:00
|
|
|
[computeEncoder setTexture:_separatedLumaTexture atIndex:0];
|
|
|
|
[computeEncoder setTexture:_finalisedLineTexture atIndex:1];
|
|
|
|
[computeEncoder setBuffer:_uniformsBuffer offset:0 atIndex:0];
|
2020-09-02 01:58:33 +00:00
|
|
|
|
|
|
|
if(outputArea.end.line > outputArea.start.line) {
|
2020-09-02 19:51:48 +00:00
|
|
|
[self dispatchComputeCommandEncoder:computeEncoder pipelineState:_finalisedLineState width:_lineBufferPixelsPerLine height:outputArea.end.line - outputArea.start.line offsetBuffer:offsetBuffers[0]];
|
2020-09-02 01:58:33 +00:00
|
|
|
} else {
|
2020-09-02 19:51:48 +00:00
|
|
|
[self dispatchComputeCommandEncoder:computeEncoder pipelineState:_finalisedLineState width:_lineBufferPixelsPerLine height:NumBufferedLines - outputArea.start.line offsetBuffer:offsetBuffers[0]];
|
2020-09-02 01:58:33 +00:00
|
|
|
if(outputArea.end.line) {
|
2020-09-02 19:51:48 +00:00
|
|
|
[self dispatchComputeCommandEncoder:computeEncoder pipelineState:_finalisedLineState width:_lineBufferPixelsPerLine height:outputArea.end.line offsetBuffer:offsetBuffers[1]];
|
2020-09-02 01:58:33 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-02 19:51:48 +00:00
|
|
|
[computeEncoder endEncoding];
|
2020-09-02 01:58:33 +00:00
|
|
|
}
|
2020-09-02 01:27:40 +00:00
|
|
|
|
|
|
|
// Output lines, broken up by frame.
|
|
|
|
size_t startLine = outputArea.start.line;
|
|
|
|
size_t line = outputArea.start.line;
|
|
|
|
while(line != outputArea.end.line) {
|
|
|
|
if(_lineMetadataBuffer[line].is_first_in_frame && _lineMetadataBuffer[line].previous_frame_was_complete) {
|
|
|
|
[self outputFrom:startLine to:line commandBuffer:commandBuffer];
|
|
|
|
[self outputFrameCleanerToCommandBuffer:commandBuffer];
|
|
|
|
startLine = line;
|
|
|
|
}
|
|
|
|
line = (line + 1) % NumBufferedLines;
|
2020-09-01 22:39:52 +00:00
|
|
|
}
|
2020-09-02 01:27:40 +00:00
|
|
|
[self outputFrom:startLine to:outputArea.end.line commandBuffer:commandBuffer];
|
2020-09-02 01:58:33 +00:00
|
|
|
} break;
|
|
|
|
}
|
2020-08-04 23:44:56 +00:00
|
|
|
|
2020-09-02 01:58:33 +00:00
|
|
|
// Add a callback to update the scan target buffer and commit the drawing.
|
|
|
|
[commandBuffer addCompletedHandler:^(id<MTLCommandBuffer> _Nonnull) {
|
|
|
|
self->_scanTarget.complete_output_area(outputArea);
|
|
|
|
}];
|
|
|
|
[commandBuffer commit];
|
|
|
|
} else {
|
2020-09-02 19:51:48 +00:00
|
|
|
// There was no work, but to be contractually correct, remember to announce completion,
|
|
|
|
// and do it after finishing an empty command queue, as a cheap way to ensure this doen't
|
|
|
|
// front run any actual processing. TODO: can I do a better job of that?
|
|
|
|
id<MTLCommandBuffer> commandBuffer = [_commandQueue commandBuffer];
|
|
|
|
[commandBuffer addCompletedHandler:^(id<MTLCommandBuffer> _Nonnull) {
|
|
|
|
self->_scanTarget.complete_output_area(outputArea);
|
|
|
|
}];
|
|
|
|
[commandBuffer commit];
|
|
|
|
|
|
|
|
// TODO: reenable these and work out how on earth the Master System + Alex Kidd (US) is managing
|
|
|
|
// to provide write_area_y = 0, start_x = 0, end_x = 1.
|
|
|
|
// assert(outputArea.end.line == outputArea.start.line);
|
|
|
|
// assert(outputArea.end.scan == outputArea.start.scan);
|
|
|
|
// assert(outputArea.end.write_area_y == outputArea.start.write_area_y);
|
|
|
|
// assert(outputArea.end.write_area_x == outputArea.start.write_area_x);
|
2020-09-02 01:58:33 +00:00
|
|
|
}
|
2020-08-16 01:52:55 +00:00
|
|
|
}
|
2020-08-16 01:24:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*!
|
|
|
|
@method drawInMTKView:
|
|
|
|
@abstract Called on the delegate when it is asked to render into the view
|
|
|
|
@discussion Called on the delegate when it is asked to render into the view
|
|
|
|
*/
|
|
|
|
- (void)drawInMTKView:(nonnull MTKView *)view {
|
2020-08-20 01:20:06 +00:00
|
|
|
if(_isDrawing.test_and_set()) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-08-16 01:24:10 +00:00
|
|
|
// Schedule a copy from the current framebuffer to the view; blitting is unavailable as the target is a framebuffer texture.
|
|
|
|
id<MTLCommandBuffer> commandBuffer = [_commandQueue commandBuffer];
|
2020-08-18 02:09:15 +00:00
|
|
|
|
|
|
|
// Every pixel will be drawn, so don't clear or reload.
|
|
|
|
view.currentRenderPassDescriptor.colorAttachments[0].loadAction = MTLLoadActionDontCare;
|
2020-08-16 01:24:10 +00:00
|
|
|
id<MTLRenderCommandEncoder> encoder = [commandBuffer renderCommandEncoderWithDescriptor:view.currentRenderPassDescriptor];
|
|
|
|
|
|
|
|
[encoder setRenderPipelineState:_copyPipeline];
|
|
|
|
[encoder setVertexTexture:_frameBuffer atIndex:0];
|
|
|
|
[encoder setFragmentTexture:_frameBuffer atIndex:0];
|
|
|
|
|
|
|
|
[encoder drawPrimitives:MTLPrimitiveTypeTriangleStrip vertexStart:0 vertexCount:4];
|
|
|
|
[encoder endEncoding];
|
|
|
|
|
2020-08-04 23:44:56 +00:00
|
|
|
[commandBuffer presentDrawable:view.currentDrawable];
|
2020-08-20 01:20:06 +00:00
|
|
|
[commandBuffer addCompletedHandler:^(id<MTLCommandBuffer> _Nonnull) {
|
|
|
|
self->_isDrawing.clear();
|
|
|
|
}];
|
2020-08-04 23:44:56 +00:00
|
|
|
[commandBuffer commit];
|
|
|
|
}
|
|
|
|
|
2020-08-09 02:49:02 +00:00
|
|
|
- (Outputs::Display::ScanTarget *)scanTarget {
|
|
|
|
return &_scanTarget;
|
|
|
|
}
|
|
|
|
|
2020-08-04 22:22:14 +00:00
|
|
|
@end
|