diff --git a/src/main/scala/millfork/OptimizationPresets.scala b/src/main/scala/millfork/OptimizationPresets.scala index aa45efff..7754a698 100644 --- a/src/main/scala/millfork/OptimizationPresets.scala +++ b/src/main/scala/millfork/OptimizationPresets.scala @@ -53,6 +53,7 @@ object OptimizationPresets { LaterOptimizations.DoubleLoadToTheSameRegister, LaterOptimizations.DoubleLoadToDifferentRegisters, LaterOptimizations.DoubleLoadToTheSameRegister, + LaterOptimizations.DoubleLoadToTwoRegistersWhenOneWillBeTrashed, EmptyMemoryStoreRemoval, AlwaysGoodOptimizations.PoinlessLoadBeforeAnotherLoad, AlwaysGoodOptimizations.IdempotentDuplicateRemoval, @@ -63,6 +64,7 @@ object OptimizationPresets { AlwaysGoodOptimizations.PointlessRegisterTransfersBeforeReturn, AlwaysGoodOptimizations.PointlessRegisterTransfersBeforeStore, AlwaysGoodOptimizations.PointlessStashingToIndexOverShortSafeBranch, + AlwaysGoodOptimizations.PointlessStackStashing, AlwaysGoodOptimizations.RearrangeMath, EmptyMemoryStoreRemoval, AlwaysGoodOptimizations.PointlessLoadBeforeReturn, @@ -101,6 +103,7 @@ object OptimizationPresets { AlwaysGoodOptimizations.PointlessMathFromFlow, AlwaysGoodOptimizations.PointlessMathFromFlow, AlwaysGoodOptimizations.PointlessMathFromFlow, + AlwaysGoodOptimizations.IncrementingIndexRegistersAfterTransfer, AlwaysGoodOptimizations.MathOperationOnTwoIdenticalMemoryOperands, LaterOptimizations.UseZeropageAddressingMode, @@ -125,6 +128,7 @@ object OptimizationPresets { AlwaysGoodOptimizations.FlagFlowAnalysis, AlwaysGoodOptimizations.IdempotentDuplicateRemoval, AlwaysGoodOptimizations.ImpossibleBranchRemoval, + AlwaysGoodOptimizations.IncrementingIndexRegistersAfterTransfer, AlwaysGoodOptimizations.IndexSequenceOptimization, AlwaysGoodOptimizations.MathOperationOnTwoIdenticalMemoryOperands, AlwaysGoodOptimizations.ModificationOfJustWrittenValue, @@ -141,6 +145,7 @@ object OptimizationPresets { AlwaysGoodOptimizations.PointlessRegisterTransfers, AlwaysGoodOptimizations.PointlessRegisterTransfersBeforeCompare, AlwaysGoodOptimizations.PointlessRegisterTransfersBeforeReturn, + AlwaysGoodOptimizations.PointlessStackStashing, AlwaysGoodOptimizations.PointlessStashingToIndexOverShortSafeBranch, AlwaysGoodOptimizations.PoinlessStoreBeforeStore, AlwaysGoodOptimizations.RearrangeMath, diff --git a/src/main/scala/millfork/assembly/opt/AlwaysGoodOptimizations.scala b/src/main/scala/millfork/assembly/opt/AlwaysGoodOptimizations.scala index 225427b1..c7611e84 100644 --- a/src/main/scala/millfork/assembly/opt/AlwaysGoodOptimizations.scala +++ b/src/main/scala/millfork/assembly/opt/AlwaysGoodOptimizations.scala @@ -192,6 +192,51 @@ object AlwaysGoodOptimizations { operationPairBuilder(DEY, INY, Not(ConcernsX) & Not(ReadsNOrZ)), ) + val PointlessStackStashing = new RuleBasedAssemblyOptimization("Pointless stack stashing", + needsFlowInfo = FlowInfoRequirement.NoRequirement, + (Elidable & HasOpcode(LDA) & MatchAddrMode(0) & MatchParameter(1)) ~ + (Elidable & HasOpcode(PHA)) ~ + (Linear & Not(ConcernsStack) & DoesntChangeIndexingInAddrMode(0) & DoesntChangeMemoryAt(0, 1)).* ~ + (Elidable & HasOpcode(PLA)) ~~> { code => + code.head :: (code.drop(2).init :+ code.head) + }, + (Elidable & HasOpcode(LDX) & MatchAddrMode(0) & MatchParameter(1)) ~ + (Elidable & HasOpcode(PHX)) ~ + (Linear & Not(ConcernsStack) & DoesntChangeIndexingInAddrMode(0) & DoesntChangeMemoryAt(0, 1)).* ~ + (Elidable & HasOpcode(PLX)) ~~> { code => + code.head :: (code.drop(2).init :+ code.head) + }, + (Elidable & HasOpcode(LDY) & MatchAddrMode(0) & MatchParameter(1)) ~ + (Elidable & HasOpcode(PHY)) ~ + (Linear & Not(ConcernsStack) & DoesntChangeIndexingInAddrMode(0) & DoesntChangeMemoryAt(0, 1)).* ~ + (Elidable & HasOpcode(PLY)) ~~> { code => + code.head :: (code.drop(2).init :+ code.head) + }, + ) + + val IncrementingIndexRegistersAfterTransfer = new RuleBasedAssemblyOptimization("Incrementing index registers after transfer", + needsFlowInfo = FlowInfoRequirement.BothFlows, + (Elidable & HasOpcode(CLC) & HasClear(State.D)) ~ + (Elidable & HasOpcode(ADC) & HasImmediate(1)) ~ + (Elidable & HasOpcode(TAY) & DoesntMatterWhatItDoesWith(State.A, State.C)) ~~> { code => + List(AssemblyLine.implied(TAY), AssemblyLine.implied(INY)) + }, + (Elidable & HasOpcode(SEC) & HasClear(State.D)) ~ + (Elidable & HasOpcode(SBC) & HasImmediate(1)) ~ + (Elidable & HasOpcode(TAY) & DoesntMatterWhatItDoesWith(State.A, State.C)) ~~> { code => + List(AssemblyLine.implied(TAY), AssemblyLine.implied(DEY)) + }, + (Elidable & HasOpcode(CLC) & HasClear(State.D)) ~ + (Elidable & HasOpcode(ADC) & HasImmediate(1)) ~ + (Elidable & HasOpcode(TAX) & DoesntMatterWhatItDoesWith(State.A, State.C)) ~~> { code => + List(AssemblyLine.implied(TAX), AssemblyLine.implied(INX)) + }, + (Elidable & HasOpcode(SEC) & HasClear(State.D)) ~ + (Elidable & HasOpcode(SBC) & HasImmediate(1)) ~ + (Elidable & HasOpcode(TAX) & DoesntMatterWhatItDoesWith(State.A, State.C)) ~~> { code => + List(AssemblyLine.implied(TAX), AssemblyLine.implied(DEX)) + }, + ) val BranchInPlaceRemoval = new RuleBasedAssemblyOptimization("Branch in place", needsFlowInfo = FlowInfoRequirement.NoRequirement, @@ -253,7 +298,6 @@ object AlwaysGoodOptimizations { ) // Optimizing Bxx to JMP is generally bad, but it may allow for better optimizations later - // It's okay to undo it at a later stage, but it's not done yet val FlagFlowAnalysis = new RuleBasedAssemblyOptimization("Flag flow analysis", needsFlowInfo = FlowInfoRequirement.ForwardFlow, (HasSet(State.C) & HasOpcode(SEC) & Elidable) ~~> (_ => Nil), diff --git a/src/main/scala/millfork/assembly/opt/LaterOptimizations.scala b/src/main/scala/millfork/assembly/opt/LaterOptimizations.scala index 3a0de139..2e50fa36 100644 --- a/src/main/scala/millfork/assembly/opt/LaterOptimizations.scala +++ b/src/main/scala/millfork/assembly/opt/LaterOptimizations.scala @@ -14,6 +14,12 @@ import millfork.env.{Constant, NormalFunction, NumericConstant} //noinspection ZeroIndexToHead object LaterOptimizations { + private val LdxAddrModes = Set(Immediate, Absolute, ZeroPage, ZeroPageY, AbsoluteY) + private val LdyAddrModes = Set(Immediate, Absolute, ZeroPage, ZeroPageX, AbsoluteX) + private val StxAddrModes = Set(Absolute, ZeroPage, ZeroPageY) + private val StyAddrModes = Set(Absolute, ZeroPage, ZeroPageX) + private val StaAddrModes = Set(Absolute, ZeroPage, ZeroPageX, AbsoluteX, IndexedY, IndexedX, AbsoluteY) + private val CpxyAddrModes = Set(Immediate, Absolute, ZeroPage) // This optimization tends to prevent later Variable To Register Optimization, // so run this only after it's pretty sure V2RO won't happen any more @@ -29,6 +35,54 @@ object LaterOptimizations { TwoDifferentLoadsWhoseFlagsWillNotBeChecked(LDY, Not(ChangesY), LDA, TYA), ) + private def a2x(line: AssemblyLine) = line.opcode match { + case LDA => line.copy(opcode = LDX) + case STA => line.copy(opcode = STX) + case CMP => line.copy(opcode = CPX) + case INC => line.copy(opcode = INX) + case DEC => line.copy(opcode = DEX) + } + + private def a2y(line: AssemblyLine) = line.opcode match { + case LDA => line.copy(opcode = LDY) + case STA => line.copy(opcode = STY) + case CMP => line.copy(opcode = CPY) + case INC => line.copy(opcode = INY) + case DEC => line.copy(opcode = DEY) + } + + private def x2a(line: AssemblyLine) = line.opcode match { + case LDX => line.copy(opcode = LDA) + case STX => line.copy(opcode = STA) + case CPX => line.copy(opcode = CMP) + case INX => line.copy(opcode = INC) + case DEX => line.copy(opcode = DEC) + } + + private def y2a(line: AssemblyLine) = line.opcode match { + case LDY => line.copy(opcode = LDA) + case STY => line.copy(opcode = STA) + case CPY => line.copy(opcode = CMP) + case INY => line.copy(opcode = INC) + case DEY => line.copy(opcode = DEC) + } + + val DoubleLoadToTwoRegistersWhenOneWillBeTrashed = new RuleBasedAssemblyOptimization("Double load to two registers when one will be trashed", + needsFlowInfo = FlowInfoRequirement.BackwardFlow, + (Elidable & HasOpcode(LDA) & MatchAddrMode(0) & MatchParameter(1) & Not(ConcernsX)) ~ + (Elidable & HasOpcode(STA) & HasAddrModeIn(StxAddrModes) & DoesNotConcernMemoryAt(0, 1) & Not(ConcernsX)).+ ~ + (Elidable & (HasOpcode(TAX) | HasOpcode(LDA) & MatchAddrMode(0) & MatchParameter(1) & Not(ConcernsX)) & DoesntMatterWhatItDoesWith(State.A)) ~~> (_.init.map(a2x)), + (Elidable & HasOpcode(LDA) & MatchAddrMode(0) & MatchParameter(1) & Not(ConcernsY)) ~ + (Elidable & HasOpcode(STA) & HasAddrModeIn(StyAddrModes) & DoesNotConcernMemoryAt(0, 1) & Not(ConcernsY)).+ ~ + (Elidable & (HasOpcode(TAY) | HasOpcode(LDA) & MatchAddrMode(0) & MatchParameter(1) & Not(ConcernsY)) & DoesntMatterWhatItDoesWith(State.A)) ~~> (_.init.map(a2y)), + (Elidable & HasOpcode(LDX) & MatchAddrMode(0) & MatchParameter(1)) ~ + (Elidable & HasOpcode(STX) & HasAddrModeIn(StaAddrModes) & DoesNotConcernMemoryAt(0, 1)).+ ~ + (Elidable & (HasOpcode(TXA) | HasOpcode(LDX) & MatchAddrMode(0) & MatchParameter(1)) & DoesntMatterWhatItDoesWith(State.X)) ~~> (_.init.map(x2a)), + (Elidable & HasOpcode(LDY) & MatchAddrMode(0) & MatchParameter(1)) ~ + (Elidable & HasOpcode(STY) & HasAddrModeIn(StaAddrModes) & DoesNotConcernMemoryAt(0, 1)).+ ~ + (Elidable & (HasOpcode(TYA) | HasOpcode(LDY) & MatchAddrMode(0) & MatchParameter(1)) & DoesntMatterWhatItDoesWith(State.Y)) ~~> (_.init.map(y2a)), + ) + private def TwoDifferentLoadsWithNoFlagChangeInBetween(opcode1: Opcode.Value, middle: AssemblyLinePattern, opcode2: Opcode.Value, transferOpcode: Opcode.Value) = { (HasOpcode(opcode1) & MatchAddrMode(0) & MatchParameter(1)) ~ (LinearOrLabel & Not(ChangesMemory) & middle & Not(HasOpcode(opcode2))).* ~ @@ -72,24 +126,17 @@ object LaterOptimizations { } //noinspection ZeroIndexToHead - private def InterleavedImmediateLoads(load: Opcode.Value, store: Opcode.Value) = { - (Elidable & HasOpcode(load) & MatchImmediate(0)) ~ - (Elidable & HasOpcode(store) & HasAddrModeIn(Set(Absolute, ZeroPage)) & MatchParameter(8)) ~ - (Elidable & HasOpcode(load) & MatchImmediate(1)) ~ - (Elidable & HasOpcode(store) & HasAddrModeIn(Set(Absolute, ZeroPage)) & MatchParameter(9) & DontMatchParameter(8)) ~ - (Elidable & HasOpcode(load) & MatchImmediate(0)) ~~> { c => - List(c(2), c(3), c(0), c(1)) - } - } - - //noinspection ZeroIndexToHead - private def InterleavedAbsoluteLoads(load: Opcode.Value, store: Opcode.Value) = { - (Elidable & HasOpcode(load) & HasAddrModeIn(Set(Absolute, ZeroPage)) & MatchParameter(0)) ~ - (Elidable & HasOpcode(store) & HasAddrModeIn(Set(Absolute, ZeroPage)) & MatchParameter(8) & DontMatchParameter(0)) ~ - (Elidable & HasOpcode(load) & HasAddrModeIn(Set(Absolute, ZeroPage)) & MatchParameter(1) & DontMatchParameter(8) & DontMatchParameter(0)) ~ - (Elidable & HasOpcode(store) & HasAddrModeIn(Set(Absolute, ZeroPage)) & MatchParameter(9) & DontMatchParameter(8) & DontMatchParameter(1) & DontMatchParameter(0)) ~ - (Elidable & HasOpcode(load) & HasAddrModeIn(Set(Absolute, ZeroPage)) & MatchParameter(0)) ~~> { c => - List(c(2), c(3), c(0), c(1)) + private def InterleavedLoads(load: Opcode.Value, store: Opcode.Value) = { + (Elidable & HasOpcode(load) & MatchAddrMode(0) & MatchParameter(1)).capture(12) ~ + (Elidable & HasOpcode(store)).+.capture(10) ~ + (Elidable & HasOpcode(load) & MatchAddrMode(2) & MatchParameter(3) & DoesNotConcernMemoryAt(0, 1)).capture(13) ~ + (Elidable & HasOpcode(store) & DoesNotConcernMemoryAt(0, 1) & DoesNotConcernMemoryAt(2, 3)).+.capture(11) ~ + (Elidable & HasOpcode(load) & MatchAddrMode(0) & MatchParameter(1)) ~ + WhereNoMemoryAccessOverlapBetweenTwoLineLists(10, 11) ~~> { (_, ctx) => + List(ctx.get[List[AssemblyLine]](13), + ctx.get[List[AssemblyLine]](11), + ctx.get[List[AssemblyLine]](12), + ctx.get[List[AssemblyLine]](10)).flatten } } @@ -106,12 +153,9 @@ object LaterOptimizations { TwoIdenticalLoadsWhoseFlagsWillNotBeChecked(LDX, Not(ChangesX)), TwoIdenticalLoadsWhoseFlagsWillNotBeChecked(LDY, Not(ChangesY)), TwoIdenticalLoadsWhoseFlagsWillNotBeChecked(LAX, Not(ChangesA) & Not(ChangesX)), - InterleavedImmediateLoads(LDA, STA), - InterleavedImmediateLoads(LDX, STX), - InterleavedImmediateLoads(LDY, STY), - InterleavedAbsoluteLoads(LDA, STA), - InterleavedAbsoluteLoads(LDX, STX), - InterleavedAbsoluteLoads(LDY, STY), + InterleavedLoads(LDA, STA), + InterleavedLoads(LDX, STX), + InterleavedLoads(LDY, STY), ) private def pointlessLoadAfterStore(store: Opcode.Value, load: Opcode.Value, addrMode: AddrMode.Value, meantime: AssemblyLinePattern = Anything) = { @@ -229,12 +273,6 @@ object LaterOptimizations { ) - private val LdxAddrModes = Set(Immediate, Absolute, ZeroPage, ZeroPageY, AbsoluteY) - private val LdyAddrModes = Set(Immediate, Absolute, ZeroPage, ZeroPageX, AbsoluteX) - private val StxAddrModes = Set(Absolute, ZeroPage, ZeroPageY) - private val StyAddrModes = Set(Absolute, ZeroPage, ZeroPageX) - private val CpxyAddrModes = Set(Immediate, Absolute, ZeroPage) - private def incDecThroughIndexRegister(amount: Int, dec: Boolean, carrySet: Boolean, useX: Boolean) = { val ldAddrModes = if (useX) LdxAddrModes else LdyAddrModes val stAddrModes = if (useX) StxAddrModes else StyAddrModes diff --git a/src/main/scala/millfork/assembly/opt/RuleBasedAssemblyOptimization.scala b/src/main/scala/millfork/assembly/opt/RuleBasedAssemblyOptimization.scala index bc4e372c..09b64a6d 100644 --- a/src/main/scala/millfork/assembly/opt/RuleBasedAssemblyOptimization.scala +++ b/src/main/scala/millfork/assembly/opt/RuleBasedAssemblyOptimization.scala @@ -195,6 +195,60 @@ trait AssemblyPattern { def capture(i: Int) = Capture(i, this) def captureLength(i: Int) = CaptureLength(i, this) + + protected def memoryAccessDoesntOverlap(a1: AddrMode.Value, p1: Constant, a2: AddrMode.Value, p2: Constant): Boolean = { + import AddrMode._ + val badAddrModes = Set(IndexedX, IndexedY, ZeroPageIndirect, AbsoluteIndexedX) + if (badAddrModes(a1) || badAddrModes(a2)) return false + val goodAddrModes = Set(Implied, Immediate, Relative) + if (goodAddrModes(a1) || goodAddrModes(a2)) return true + + def handleKnownDistance(distance: Short): Boolean = { + val indexingAddrModes = Set(AbsoluteIndexedX, AbsoluteX, ZeroPageX, AbsoluteY, ZeroPageY) + val a1Indexing = indexingAddrModes(a1) + val a2Indexing = indexingAddrModes(a2) + (a1Indexing, a2Indexing) match { + case (false, false) => distance != 0 + case (true, false) => distance > 255 || distance < 0 + case (false, true) => distance > 0 || distance < -255 + case (true, true) => distance > 255 || distance < -255 + } + } + + (p1.quickSimplify, p2.quickSimplify) match { + case (NumericConstant(n1, _), NumericConstant(n2, _)) => + handleKnownDistance((n2 - n1).toShort) + case (a, CompoundConstant(MathOperator.Plus, b, NumericConstant(distance, _))) if a.quickSimplify == b.quickSimplify => + handleKnownDistance(distance.toShort) + case (CompoundConstant(MathOperator.Plus, a, NumericConstant(distance, _)), b) if a.quickSimplify == b.quickSimplify => + handleKnownDistance((-distance).toShort) + case (a, CompoundConstant(MathOperator.Minus, b, NumericConstant(distance, _))) if a.quickSimplify == b.quickSimplify => + handleKnownDistance((-distance).toShort) + case (CompoundConstant(MathOperator.Minus, a, NumericConstant(distance, _)), b) if a.quickSimplify == b.quickSimplify => + handleKnownDistance(distance.toShort) + case (MemoryAddressConstant(_: ThingInMemory), NumericConstant(_, _)) => + true // TODO: ??? + case (NumericConstant(_, _), MemoryAddressConstant(_: ThingInMemory)) => + true // TODO: ??? + case (CompoundConstant(MathOperator.Plus | MathOperator.Minus, MemoryAddressConstant(a: ThingInMemory), NumericConstant(_, _)), NumericConstant(_, _)) => + true // TODO: ??? + case (NumericConstant(_, _), CompoundConstant(MathOperator.Plus | MathOperator.Minus, MemoryAddressConstant(a: ThingInMemory), NumericConstant(_, _))) => + true // TODO: ??? + case (MemoryAddressConstant(a: ThingInMemory), MemoryAddressConstant(b: ThingInMemory)) => + a.name.takeWhile(_ != '.') != b.name.takeWhile(_ != '.') // TODO: ??? + case (CompoundConstant(MathOperator.Plus | MathOperator.Minus, MemoryAddressConstant(a: ThingInMemory), NumericConstant(_, _)), + MemoryAddressConstant(b: ThingInMemory)) => + a.name.takeWhile(_ != '.') != b.name.takeWhile(_ != '.') // TODO: ??? + case (MemoryAddressConstant(a: ThingInMemory), + CompoundConstant(MathOperator.Plus | MathOperator.Minus, MemoryAddressConstant(b: ThingInMemory), NumericConstant(_, _))) => + a.name.takeWhile(_ != '.') != b.name.takeWhile(_ != '.') // TODO: ??? + case (CompoundConstant(MathOperator.Plus | MathOperator.Minus, MemoryAddressConstant(a: ThingInMemory), NumericConstant(_, _)), + CompoundConstant(MathOperator.Plus | MathOperator.Minus, MemoryAddressConstant(b: ThingInMemory), NumericConstant(_, _))) => + a.name.takeWhile(_ != '.') != b.name.takeWhile(_ != '.') // TODO: ??? + case _ => + false + } + } } case class Capture(i: Int, pattern: AssemblyPattern) extends AssemblyPattern { @@ -352,58 +406,20 @@ trait AssemblyLinePattern extends AssemblyPattern { def |(x: AssemblyLinePattern): AssemblyLinePattern = Either(this, x) def &(x: AssemblyLinePattern): AssemblyLinePattern = Both(this, x) - - protected def memoryAccessDoesntOverlap(a1: AddrMode.Value, p1: Constant, a2: AddrMode.Value, p2: Constant): Boolean = { - import AddrMode._ - val badAddrModes = Set(IndexedX, IndexedY, ZeroPageIndirect, AbsoluteIndexedX) - if (badAddrModes(a1) || badAddrModes(a2)) return false - val goodAddrModes = Set(Implied, Immediate, Relative) - if (goodAddrModes(a1) || goodAddrModes(a2)) return true - - def handleKnownDistance(distance: Short): Boolean = { - val indexingAddrModes = Set(AbsoluteIndexedX, AbsoluteX, ZeroPageX, AbsoluteY, ZeroPageY) - val a1Indexing = indexingAddrModes(a1) - val a2Indexing = indexingAddrModes(a2) - (a1Indexing, a2Indexing) match { - case (false, false) => distance != 0 - case (true, false) => distance > 255 || distance < 0 - case (false, true) => distance > 0 || distance < -255 - case (true, true) => distance > 255 || distance < -255 - } - } - - (p1.quickSimplify, p2.quickSimplify) match { - case (NumericConstant(n1, _), NumericConstant(n2, _)) => - handleKnownDistance((n2 - n1).toShort) - case (a, CompoundConstant(MathOperator.Plus, b, NumericConstant(distance, _))) if a.quickSimplify == b.quickSimplify => - handleKnownDistance(distance.toShort) - case (CompoundConstant(MathOperator.Plus, a, NumericConstant(distance, _)), b) if a.quickSimplify == b.quickSimplify => - handleKnownDistance((-distance).toShort) - case (a, CompoundConstant(MathOperator.Minus, b, NumericConstant(distance, _))) if a.quickSimplify == b.quickSimplify => - handleKnownDistance((-distance).toShort) - case (CompoundConstant(MathOperator.Minus, a, NumericConstant(distance, _)), b) if a.quickSimplify == b.quickSimplify => - handleKnownDistance(distance.toShort) - case (MemoryAddressConstant(a: ThingInMemory), MemoryAddressConstant(b: ThingInMemory)) => - a.name.takeWhile(_ != '.') != b.name.takeWhile(_ != '.') // TODO: ??? - case (CompoundConstant(MathOperator.Plus | MathOperator.Minus, MemoryAddressConstant(a: ThingInMemory), NumericConstant(_, _)), - MemoryAddressConstant(b: ThingInMemory)) => - a.name.takeWhile(_ != '.') != b.name.takeWhile(_ != '.') // TODO: ??? - case (MemoryAddressConstant(a: ThingInMemory), - CompoundConstant(MathOperator.Plus | MathOperator.Minus, MemoryAddressConstant(b: ThingInMemory), NumericConstant(_, _))) => - a.name.takeWhile(_ != '.') != b.name.takeWhile(_ != '.') // TODO: ??? - case (CompoundConstant(MathOperator.Plus | MathOperator.Minus, MemoryAddressConstant(a: ThingInMemory), NumericConstant(_, _)), - CompoundConstant(MathOperator.Plus | MathOperator.Minus, MemoryAddressConstant(b: ThingInMemory), NumericConstant(_, _))) => - a.name.takeWhile(_ != '.') != b.name.takeWhile(_ != '.') // TODO: ??? - case _ => - false - } - } } trait TrivialAssemblyLinePattern extends AssemblyLinePattern with (AssemblyLine => Boolean) { override def matchLineTo(ctx: AssemblyMatchingContext, flowInfo: FlowInfo, line: AssemblyLine): Boolean = this (line) } +case class WhereNoMemoryAccessOverlapBetweenTwoLineLists(ix1: Int, ix2: Int) extends AssemblyPattern { + override def matchTo(ctx: AssemblyMatchingContext, code: List[(FlowInfo, AssemblyLine)]): Option[List[(FlowInfo, AssemblyLine)]] = { + val s1s = ctx.get[List[AssemblyLine]](ix1) + val s2s = ctx.get[List[AssemblyLine]](ix2) + if (s1s.forall(s1 => s2s.forall(s2 => memoryAccessDoesntOverlap(s1.addrMode, s1.parameter, s2.addrMode, s2.parameter)))) Some(code) else None + } +} + //noinspection LanguageFeature object AssemblyLinePattern { implicit def __implicitOpcodeIn(ops: Set[Opcode.Value]): AssemblyLinePattern = HasOpcodeIn(ops) diff --git a/src/test/scala/millfork/test/ArraySuite.scala b/src/test/scala/millfork/test/ArraySuite.scala index 2513c871..382ce33c 100644 --- a/src/test/scala/millfork/test/ArraySuite.scala +++ b/src/test/scala/millfork/test/ArraySuite.scala @@ -156,6 +156,23 @@ class ArraySuite extends FunSuite with Matchers { } + test("Pointer indexing test") { + EmuBenchmarkRun( + """ + | array output [4] @$c000 + | pointer a + | byte i + | void main () { + | setup() + | a[i + 1] = 55 + | } + | void setup() { + | a = output.addr + | i = 2 + | } + """.stripMargin)(_.readByte(0xc003) should equal(55)) + } + test("Syntax") { EmuUnoptimizedRun( """