diff --git a/OSBindings/Mac/Clock Signal/ScanTarget/CSScanTarget.mm b/OSBindings/Mac/Clock Signal/ScanTarget/CSScanTarget.mm index ccef3330d..8dd3c08e4 100644 --- a/OSBindings/Mac/Clock Signal/ScanTarget/CSScanTarget.mm +++ b/OSBindings/Mac/Clock Signal/ScanTarget/CSScanTarget.mm @@ -426,7 +426,7 @@ using BufferingScanTarget = Outputs::Display::BufferingScanTarget; RangePerform(start, end, (_isUsingCompositionPipeline ? NumBufferedLines : NumBufferedScans), OutputStrips); #undef OutputStrips - // Complete encoding and return. + // Complete encoding. [encoder endEncoding]; } @@ -489,10 +489,12 @@ using BufferingScanTarget = Outputs::Display::BufferingScanTarget; id encoder = [commandBuffer renderCommandEncoderWithDescriptor:_compositionRenderPass]; [encoder setRenderPipelineState:_composePipeline]; - [encoder setFragmentTexture:_writeAreaTexture atIndex:0]; [encoder setVertexBuffer:_scansBuffer offset:0 atIndex:0]; [encoder setVertexBuffer:_uniformsBuffer offset:0 atIndex:1]; + [encoder setVertexTexture:_compositionTexture atIndex:0]; + [encoder setFragmentBuffer:_uniformsBuffer offset:0 atIndex:0]; + [encoder setFragmentTexture:_writeAreaTexture atIndex:0]; #define OutputScans(start, size) [encoder drawPrimitives:MTLPrimitiveTypeLine vertexStart:0 vertexCount:2 instanceCount:size baseInstance:start] RangePerform(outputArea.start.scan, outputArea.end.scan, NumBufferedScans, OutputScans); diff --git a/OSBindings/Mac/Clock Signal/ScanTarget/ScanTarget.metal b/OSBindings/Mac/Clock Signal/ScanTarget/ScanTarget.metal index 0dee02f91..6b550f7a9 100644 --- a/OSBindings/Mac/Clock Signal/ScanTarget/ScanTarget.metal +++ b/OSBindings/Mac/Clock Signal/ScanTarget/ScanTarget.metal @@ -171,13 +171,15 @@ vertex SourceInterpolator scanToComposition( constant Uniforms &uniforms [[buffe // Populate result as if direct texture access were available. result.position.x = mix(scans[instanceID].endPoints[0].cyclesSinceRetrace, scans[instanceID].endPoints[1].cyclesSinceRetrace, float(vertexID)); result.position.y = scans[instanceID].line; - result.position.wz = float2(0.0, 1.0); + result.position.zw = float2(0.0, 1.0); result.textureCoordinates.x = mix(scans[instanceID].endPoints[0].dataOffset, scans[instanceID].endPoints[1].dataOffset, float(vertexID)); result.textureCoordinates.y = scans[instanceID].dataY; + result.colourPhase = mix(scans[instanceID].endPoints[0].compositeAngle, scans[instanceID].endPoints[1].compositeAngle, float(vertexID)) / 32.0; + result.colourAmplitude = scans[instanceID].compositeAmplitude; // Map position into eye space, allowing for target texture dimensions. // TODO: is this really necessary? Is there nothing like coord::pixel that applies here? - result.position.xy = ((result.position.xy + float2(0.5)) / float2(texture.get_width(), texture.get_height())) * float2(2.0) - float2(1.0); + result.position.xy = ((result.position.xy + float2(0.5)) / float2(texture.get_width(), texture.get_height())) * float2(2.0, -2.0) + float2(-1.0, 1.0); return result; } @@ -302,6 +304,6 @@ fragment float4 copyFragment(CopyInterpolator vert [[stage_in]], texture2d texture [[texture(0)]]) { +fragment float4 clearFragment() { return float4(0.0, 0.0, 0.0, 0.64); }