diff --git a/CHANGELOG.md b/CHANGELOG.md index b265b561..dda51a30 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -35,6 +35,8 @@ Code that uses a custom platform definitions will cause extra warnings until fix * Fixed decimal subtraction. +* Fixed signed comparison. + * Parser performance improvement. * Standard libraries improvements. diff --git a/docs/lang/operators.md b/docs/lang/operators.md index eb06f8d3..53dd5a36 100644 --- a/docs/lang/operators.md +++ b/docs/lang/operators.md @@ -144,9 +144,12 @@ Note you cannot mix those operators, so `a <= b < c` is not valid. `simple word > simple word` `simple long > simple long` -Currently, `>`, `<`, `<=`, `>=` operators perform unsigned comparison -if none of the types of their arguments is signed, -and fail to compile otherwise. This will be changed in the future. +Currently, `>`, `<`, `<=`, `>=` operators perform signed comparison +if any of the types of their arguments is signed, +and unsigned comparison otherwise. + +**WARNING:** On targets using Intel 8080 and Sharp LR35902, a signed comparison `a OP b` is compiled as `a - b OP 0`, +which may give wrong results if the subtraction overflows. ## Assignment and in-place modification operators diff --git a/src/main/scala/millfork/assembly/mos/opt/VariableToRegisterOptimization.scala b/src/main/scala/millfork/assembly/mos/opt/VariableToRegisterOptimization.scala index ffa1b434..e7c3d710 100644 --- a/src/main/scala/millfork/assembly/mos/opt/VariableToRegisterOptimization.scala +++ b/src/main/scala/millfork/assembly/mos/opt/VariableToRegisterOptimization.scala @@ -6,10 +6,11 @@ import millfork.assembly.mos._ import millfork.assembly.mos.Opcode._ import AddrMode._ import millfork.env._ -import millfork.error.{FatalErrorReporting, Logger} +import millfork.error.Logger import millfork.node.MosNiceFunctionProperty import scala.collection.mutable.ListBuffer +import scala.util.control.TailCalls.{ TailRec, done, tailcall } /** * @author Karol Stasiak @@ -278,7 +279,7 @@ object VariableToRegisterOptimization extends AssemblyOptimization[AssemblyLine] case (v, range, _) => log.debug(s"Inlining $v to register X") val oldCode = code.zip(importances).slice(range.start, range.end) - val newCode = inlineVars(Some(v), None, None, None, featuresForIndices, oldCode) + val newCode = inlineVars(Some(v), None, None, None, featuresForIndices, oldCode).result reportOptimizedBlock(oldCode, newCode) output ++= newCode i = range.end @@ -292,7 +293,7 @@ object VariableToRegisterOptimization extends AssemblyOptimization[AssemblyLine] case (v, range, _) => log.debug(s"Inlining $v to register Y") val oldCode = code.zip(importances).slice(range.start, range.end) - val newCode = inlineVars(None, Some(v), None, None, featuresForIndices, oldCode) + val newCode = inlineVars(None, Some(v), None, None, featuresForIndices, oldCode).result reportOptimizedBlock(oldCode, newCode) output ++= newCode i = range.end @@ -307,7 +308,7 @@ object VariableToRegisterOptimization extends AssemblyOptimization[AssemblyLine] case (v, range, _) => log.debug(s"Inlining $v to register Z") val oldCode = code.zip(importances).slice(range.start, range.end) - val newCode = inlineVars(None, None, Some(v), None, featuresForIndices, oldCode) + val newCode = inlineVars(None, None, Some(v), None, featuresForIndices, oldCode).result reportOptimizedBlock(oldCode, newCode) output ++= newCode i = range.end @@ -322,7 +323,7 @@ object VariableToRegisterOptimization extends AssemblyOptimization[AssemblyLine] case (v, range, _) => log.debug(s"Inlining $v to register A") val oldCode = code.zip(importances).slice(range.start, range.end) - val newCode = inlineVars(None, None, None, Some(v), featuresForIndices, oldCode) + val newCode = inlineVars(None, None, None, Some(v), featuresForIndices, oldCode).result reportOptimizedBlock(oldCode, newCode) output ++= newCode i = range.end @@ -744,7 +745,12 @@ object VariableToRegisterOptimization extends AssemblyOptimization[AssemblyLine] case _ => true } - def inlineVars(xCandidate: Option[String], yCandidate: Option[String], zCandidate: Option[String], aCandidate: Option[String], features: FeaturesForIndexRegisters, lines: List[(AssemblyLine, CpuImportance)]): List[AssemblyLine] = { + def inlineVars(xCandidate: Option[String], + yCandidate: Option[String], + zCandidate: Option[String], + aCandidate: Option[String], + features: FeaturesForIndexRegisters, + lines: List[(AssemblyLine, CpuImportance)]): TailRec[List[AssemblyLine]] = { val vx = xCandidate.getOrElse("-") val vy = yCandidate.getOrElse("-") val vz = zCandidate.getOrElse("-") @@ -752,94 +758,94 @@ object VariableToRegisterOptimization extends AssemblyOptimization[AssemblyLine] lines match { case (AssemblyLine(INC, Absolute | ZeroPage, MemoryAddressConstant(th), _), _) :: xs if th.name == vx => - AssemblyLine.implied(INX) :: inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs) + tailcall(inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs)).map( AssemblyLine.implied(INX) :: _) case (AssemblyLine(INC, Absolute | ZeroPage, MemoryAddressConstant(th), _), _) :: xs if th.name == vy => - AssemblyLine.implied(INY) :: inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs) + tailcall(inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs)).map(AssemblyLine.implied(INY) :: _) case (AssemblyLine(INC, Absolute | ZeroPage, MemoryAddressConstant(th), _), _) :: xs if th.name == vz => - AssemblyLine.implied(INZ) :: inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs) + tailcall(inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs)).map(AssemblyLine.implied(INZ) :: _) case (AssemblyLine(DEC, Absolute | ZeroPage, MemoryAddressConstant(th), _), _) :: xs if th.name == vx => - AssemblyLine.implied(DEX) :: inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs) + tailcall(inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs)).map(AssemblyLine.implied(DEX) :: _) case (AssemblyLine(DEC, Absolute | ZeroPage, MemoryAddressConstant(th), _), _) :: xs if th.name == vy => - AssemblyLine.implied(DEY) :: inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs) + tailcall(inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs)).map(AssemblyLine.implied(DEY) :: _) case (AssemblyLine(DEC, Absolute | ZeroPage, MemoryAddressConstant(th), _), _) :: xs if th.name == vz => - AssemblyLine.implied(DEZ) :: inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs) + tailcall(inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs)).map(AssemblyLine.implied(DEZ) :: _) case (AssemblyLine(opcode@(DEC | INC | ROL | ROR | ASL | LSR), Absolute | ZeroPage, MemoryAddressConstant(th), _), _) :: xs if th.name == va => - AssemblyLine.implied(opcode) :: inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs) + tailcall(inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs)).map(AssemblyLine.implied(opcode) :: _) case (AssemblyLine(LDX, Absolute | ZeroPage, MemoryAddressConstant(th), _), imp) :: xs if th.name == vx => if (imp.z == Unimportant && imp.n == Unimportant) { - inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs) + tailcall(inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs)) } else { - AssemblyLine.immediate(CPX, 0) :: inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs) + tailcall(inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs)).map(AssemblyLine.immediate(CPX, 0) :: _) } case (AssemblyLine(LAX, Absolute | ZeroPage, MemoryAddressConstant(th), _), _) :: xs if th.name == vx => - AssemblyLine.implied(TXA) :: inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs) + tailcall(inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs)).map(AssemblyLine.implied(TXA) :: _) case (l@AssemblyLine(op, Absolute | ZeroPage, MemoryAddressConstant(th), _), _) :: xs if opcodesIdentityTable(op) && th.name == vx => - l.copy(addrMode = AbsoluteX, parameter = features.identityArray) :: inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs) + tailcall(inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs)).map(l.copy(addrMode = AbsoluteX, parameter = features.identityArray) :: _) case (l@AssemblyLine(op, Absolute | ZeroPage, MemoryAddressConstant(th), _), _) :: xs if opcodesIdentityTable(op) && th.name == vy => - l.copy(addrMode = AbsoluteY, parameter = features.identityArray) :: inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs) + tailcall(inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs)).map(l.copy(addrMode = AbsoluteY, parameter = features.identityArray) :: _) case (l@AssemblyLine(LDA, _, _, _), _) :: (AssemblyLine(op, Absolute | ZeroPage, MemoryAddressConstant(th), _), _) :: xs if opcodesCommutative(op) && th.name == va => - l.copy(opcode = op) :: inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs) + tailcall(inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs)).map(l.copy(opcode = op) :: _) case (l@AssemblyLine(LDA, _, _, _), _) :: (clc@AssemblyLine(CLC, _, _, _), _) :: (AssemblyLine(op, Absolute | ZeroPage, MemoryAddressConstant(th), _), _) :: xs if opcodesCommutative(op) && th.name == va => - clc :: l.copy(opcode = op) :: inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs) + tailcall(inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs)).map(clc :: l.copy(opcode = op) :: _) case (l@AssemblyLine(LDA, _, _, _), _) :: (AssemblyLine(op, Absolute | ZeroPage, MemoryAddressConstant(th), _), _) :: xs if opcodesCommutative(op) && th.name == vx => - AssemblyLine.implied(TXA) :: l.copy(opcode = op) :: inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs) + tailcall(inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs)).map(AssemblyLine.implied(TXA) :: l.copy(opcode = op) :: _) case (l@AssemblyLine(LDA, _, _, _), _) :: (clc@AssemblyLine(CLC, _, _, _), _) :: (AssemblyLine(op, Absolute | ZeroPage, MemoryAddressConstant(th), _), _) :: xs if opcodesCommutative(op) && th.name == vx => - AssemblyLine.implied(TXA) :: clc :: l.copy(opcode = op) :: inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs) + tailcall(inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs)).map(AssemblyLine.implied(TXA) :: clc :: l.copy(opcode = op) :: _) case (l@AssemblyLine(LDA, _, _, _), _) :: (AssemblyLine(op, Absolute | ZeroPage, MemoryAddressConstant(th), _), _) :: xs if opcodesCommutative(op) && th.name == vy => - AssemblyLine.implied(TYA) :: l.copy(opcode = op) :: inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs) + tailcall(inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs)).map(AssemblyLine.implied(TYA) :: l.copy(opcode = op) :: _) case (l@AssemblyLine(LDA, _, _, _), _) :: (clc@AssemblyLine(CLC, _, _, _), _) :: (AssemblyLine(op, Absolute | ZeroPage, MemoryAddressConstant(th), _), _) :: xs if opcodesCommutative(op) && th.name == vy => - AssemblyLine.implied(TYA) :: clc :: l.copy(opcode = op) :: inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs) + tailcall(inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs)).map(AssemblyLine.implied(TYA) :: clc :: l.copy(opcode = op) :: _) case (AssemblyLine(LDA | STA, Absolute | ZeroPage, MemoryAddressConstant(th), _), imp) :: xs if th.name == va => if (imp.z == Unimportant && imp.n == Unimportant) { inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs) } else { - AssemblyLine.immediate(CMP, 0) :: inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs) + tailcall(inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs)).map(AssemblyLine.immediate(CMP, 0) :: _) } case (AssemblyLine(LAX, Absolute | ZeroPage, MemoryAddressConstant(th), _), _) :: xs if th.name == va => - AssemblyLine.implied(TAX) :: inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs) + tailcall(inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs)).map(AssemblyLine.implied(TAX) :: _) case (AssemblyLine(LDY, Absolute | ZeroPage, MemoryAddressConstant(th), _), imp) :: xs if th.name == vy => if (imp.z == Unimportant && imp.n == Unimportant) { inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs) } else { - AssemblyLine.immediate(CPY, 0) :: inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs) + tailcall(inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs)).map(AssemblyLine.immediate(CPY, 0) :: _) } case (AssemblyLine(LDZ, Absolute | ZeroPage, MemoryAddressConstant(th), _), imp) :: xs @@ -847,134 +853,134 @@ object VariableToRegisterOptimization extends AssemblyOptimization[AssemblyLine] if (imp.z == Unimportant && imp.n == Unimportant) { inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs) } else { - AssemblyLine.immediate(CPZ, 0) :: inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs) + tailcall(inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs)).map(AssemblyLine.immediate(CPZ, 0) :: _) } case (AssemblyLine(LDA, Absolute | ZeroPage, MemoryAddressConstant(th), true), _) :: (AssemblyLine(TAX, _, _, true), _) :: xs if th.name == vx => // these TXA's may get optimized away by a different optimization - AssemblyLine.implied(TXA) :: inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs) + tailcall(inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs)).map(AssemblyLine.implied(TXA) :: _) case (AssemblyLine(LDA, Absolute | ZeroPage, MemoryAddressConstant(th), true), _) :: (AssemblyLine(TAY, _, _, true), _) :: xs if th.name == vy => // these TYA's may get optimized away by a different optimization - AssemblyLine.implied(TYA) :: inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs) + tailcall(inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs)).map(AssemblyLine.implied(TYA) :: _) case (AssemblyLine(LDA, Absolute | ZeroPage, MemoryAddressConstant(th), true), _) :: (AssemblyLine(TAZ, _, _, true), _) :: xs if th.name == vz => // these TZA's may get optimized away by a different optimization - AssemblyLine.implied(TZA) :: inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs) + tailcall(inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs)).map(AssemblyLine.implied(TZA) :: _) case (AssemblyLine(LDX, Absolute | ZeroPage, MemoryAddressConstant(th), true), _) :: (AssemblyLine(TXA, _, _, true), _) :: xs if th.name == va => // these TAX's may get optimized away by a different optimization - AssemblyLine.implied(TAX) :: inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs) + tailcall(inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs)).map(AssemblyLine.implied(TAX) :: _) case (AssemblyLine(LDY, Absolute | ZeroPage, MemoryAddressConstant(th), true), _) :: (AssemblyLine(TYA, _, _, true), _) :: xs if th.name == va => // these TAY's may get optimized away by a different optimization - AssemblyLine.implied(TAY) :: inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs) + tailcall(inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs)).map(AssemblyLine.implied(TAY) :: _) case (AssemblyLine(LDA, am, param, true), _) :: (AssemblyLine(STA, Absolute | ZeroPage, MemoryAddressConstant(th), true), _) :: xs if th.name == vx && LdxAddrModes(am) => // these TXA's may get optimized away by a different optimization - AssemblyLine(LDX, am, param) :: AssemblyLine.implied(TXA) :: inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs) + tailcall(inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs)).map(AssemblyLine(LDX, am, param) :: AssemblyLine.implied(TXA) :: _) case (AssemblyLine(LDA, am, param, true), _) :: (AssemblyLine(STA, Absolute | ZeroPage, MemoryAddressConstant(th), true), _) :: xs if th.name == vy && LdyAddrModes(am) => // these TYA's may get optimized away by a different optimization - AssemblyLine(LDY, am, param) :: AssemblyLine.implied(TYA) :: inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs) + tailcall(inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs)).map(AssemblyLine(LDY, am, param) :: AssemblyLine.implied(TYA) :: _) case (AssemblyLine(LDA, am, param, true), _) :: (AssemblyLine(STA, Absolute | ZeroPage, MemoryAddressConstant(th), true), _) :: xs if th.name == vz && LdzAddrModes(am) => // these TZA's may get optimized away by a different optimization - AssemblyLine(LDZ, am, param) :: AssemblyLine.implied(TZA) :: inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs) + tailcall(inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs)).map(AssemblyLine(LDZ, am, param) :: AssemblyLine.implied(TZA) :: _) case (AssemblyLine(LDA, Absolute | ZeroPage, MemoryAddressConstant(th), _), _) :: (AssemblyLine(CMP, am, param, true), _) :: xs if th.name == vx && CpxyzAddrModes(am) && isNot(vx, param) => // ditto - AssemblyLine.implied(TXA) :: AssemblyLine(CPX, am, param) :: inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs) + tailcall(inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs)).map(AssemblyLine.implied(TXA) :: AssemblyLine(CPX, am, param) :: _) case (AssemblyLine(LDA, Absolute | ZeroPage, MemoryAddressConstant(th), _), _) :: (AssemblyLine(CMP, am, param, true), _) :: xs if th.name == vy && CpxyzAddrModes(am) && isNot(vx, param) => // ditto - AssemblyLine.implied(TYA) :: AssemblyLine(CPY, am, param) :: inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs) + tailcall(inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs)).map(AssemblyLine.implied(TYA) :: AssemblyLine(CPY, am, param) :: _) case (AssemblyLine(LDA, Absolute | ZeroPage, MemoryAddressConstant(th), _), _) :: (AssemblyLine(CMP, am, param, true), _) :: xs if th.name == vy && CpxyzAddrModes(am) && isNot(vx, param) => // ditto - AssemblyLine.implied(TZA) :: AssemblyLine(CPZ, am, param) :: inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs) + tailcall(inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs)).map(AssemblyLine.implied(TZA) :: AssemblyLine(CPZ, am, param) :: _) case (AssemblyLine(LDA, Absolute | ZeroPage, MemoryAddressConstant(th), _), _) :: xs if th.name == vx => - AssemblyLine.implied(TXA) :: inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs) + tailcall(inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs)).map(AssemblyLine.implied(TXA) :: _) case (AssemblyLine(LDA, Absolute | ZeroPage, MemoryAddressConstant(th), _), _) :: xs if th.name == vy => - AssemblyLine.implied(TYA) :: inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs) + tailcall(inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs)).map(AssemblyLine.implied(TYA) :: _) case (AssemblyLine(LDY, Absolute | ZeroPage, MemoryAddressConstant(th), _), _) :: xs if th.name == vx => - AssemblyLine.implied(TXY) :: inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs) + tailcall(inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs)).map(AssemblyLine.implied(TXY) :: _) case (AssemblyLine(LDX, Absolute | ZeroPage, MemoryAddressConstant(th), _), _) :: xs if th.name == vy => - AssemblyLine.implied(TYX) :: inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs) + tailcall(inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs)).map(AssemblyLine.implied(TYX) :: _) case (AssemblyLine(LDA, Absolute | ZeroPage, MemoryAddressConstant(th), _), _) :: xs if th.name == vz => - AssemblyLine.implied(TZA) :: inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs) + tailcall(inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs)).map(AssemblyLine.implied(TZA) :: _) case (AssemblyLine(LDX, Absolute | ZeroPage, MemoryAddressConstant(th), _), _) :: xs if th.name == va => - AssemblyLine.implied(TAX) :: inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs) + tailcall(inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs)).map(AssemblyLine.implied(TAX) :: _) case (AssemblyLine(LDY, Absolute | ZeroPage, MemoryAddressConstant(th), _), _) :: xs if th.name == va => - AssemblyLine.implied(TAY) :: inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs) + tailcall(inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs)).map(AssemblyLine.implied(TAY) :: _) case (AssemblyLine(STA, Absolute | ZeroPage, MemoryAddressConstant(th), _), _) :: xs if th.name == vx => - AssemblyLine.implied(TAX) :: inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs) + tailcall(inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs)).map(AssemblyLine.implied(TAX) :: _) case (AssemblyLine(STA, Absolute | ZeroPage, MemoryAddressConstant(th), _), _) :: xs if th.name == vy => - AssemblyLine.implied(TAY) :: inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs) + tailcall(inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs)).map(AssemblyLine.implied(TAY) :: _) case (AssemblyLine(STA, Absolute | ZeroPage, MemoryAddressConstant(th), _), _) :: xs if th.name == vz => - AssemblyLine.implied(TAZ) :: inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs) + tailcall(inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs)).map(AssemblyLine.implied(TAZ) :: _) case (AssemblyLine(STX, Absolute | ZeroPage, MemoryAddressConstant(th), _), _) :: xs if th.name == va => - AssemblyLine.implied(TXA) :: inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs) + tailcall(inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs)).map(AssemblyLine.implied(TXA) :: _) case (AssemblyLine(STY, Absolute | ZeroPage, MemoryAddressConstant(th), _), _) :: xs if th.name == va => - AssemblyLine.implied(TYA) :: inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs) + tailcall(inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs)).map(AssemblyLine.implied(TYA) :: _) case (AssemblyLine(STX, Absolute | ZeroPage, MemoryAddressConstant(th), _), _) :: xs if th.name == vy => - AssemblyLine.implied(TXY) :: inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs) + tailcall(inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs)).map(AssemblyLine.implied(TXY) :: _) case (AssemblyLine(STY, Absolute | ZeroPage, MemoryAddressConstant(th), _), _) :: xs if th.name == vx => - AssemblyLine.implied(TYX) :: inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs) + tailcall(inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs)).map(AssemblyLine.implied(TYX) :: _) case (AssemblyLine(STZ, Absolute | ZeroPage, MemoryAddressConstant(th), _), _) :: xs if th.name == vx => - if (features.izIsAlwaysZero) AssemblyLine.immediate(LDX, 0) :: inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs) + if (features.izIsAlwaysZero) tailcall(inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs)).map(AssemblyLine.immediate(LDX, 0) :: _) else features.log.fatal("Unexpected STZ") case (AssemblyLine(STZ, Absolute | ZeroPage, MemoryAddressConstant(th), _), _) :: xs if th.name == vy => - if (features.izIsAlwaysZero) AssemblyLine.immediate(LDY, 0) :: inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs) + if (features.izIsAlwaysZero) tailcall(inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs)).map(AssemblyLine.immediate(LDY, 0) :: _) else features.log.fatal("Unexpected STZ") case (AssemblyLine(STZ, Absolute | ZeroPage, MemoryAddressConstant(th), _), _) :: xs if th.name == va => - if (features.izIsAlwaysZero) AssemblyLine.immediate(LDA, 0) :: inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs) - else AssemblyLine.implied(TZA) :: inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs) + if (features.izIsAlwaysZero) tailcall(inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs)).map(AssemblyLine.immediate(LDA, 0) :: _) + else tailcall(inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs)).map(AssemblyLine.implied(TZA) :: _) case (AssemblyLine(TAX, _, _, _), _) :: xs if xCandidate.isDefined => features.log.fatal("Unexpected TAX") @@ -986,17 +992,17 @@ object VariableToRegisterOptimization extends AssemblyOptimization[AssemblyLine] features.log.fatal("Unexpected TAZ") case (AssemblyLine(TXA, _, _, _), _) :: xs if aCandidate.isDefined => - AssemblyLine.immediate(CPX, 0) :: inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs) + tailcall(inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs)).map(AssemblyLine.immediate(CPX, 0) :: _) case (AssemblyLine(TYA, _, _, _), _) :: xs if aCandidate.isDefined => - AssemblyLine.immediate(CPY, 0) :: inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs) + tailcall(inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs)).map(AssemblyLine.immediate(CPY, 0) :: _) case (AssemblyLine(TZA, _, _, _), _) :: xs if aCandidate.isDefined => - AssemblyLine.immediate(CPZ, 0) :: inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs) + tailcall(inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs)).map(AssemblyLine.immediate(CPZ, 0) :: _) - case (x, _) :: xs => x :: inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs) + case (x, _) :: xs => inlineVars(xCandidate, yCandidate, zCandidate, aCandidate, features, xs).map(x :: _) - case Nil => Nil + case Nil => done(Nil) } } diff --git a/src/main/scala/millfork/assembly/z80/opt/CoarseFlowAnalyzer.scala b/src/main/scala/millfork/assembly/z80/opt/CoarseFlowAnalyzer.scala index 448082b1..76b7f9f4 100644 --- a/src/main/scala/millfork/assembly/z80/opt/CoarseFlowAnalyzer.scala +++ b/src/main/scala/millfork/assembly/z80/opt/CoarseFlowAnalyzer.scala @@ -4,7 +4,7 @@ import millfork.assembly.opt.{AnyStatus, SingleStatus, Status} import millfork.assembly.z80._ import millfork.env.{Label, MemoryAddressConstant, NormalFunction, NumericConstant} import millfork.node.ZRegister -import millfork.{CompilationOptions, Cpu} +import millfork.{CompilationFlag, CompilationOptions, Cpu} /** * @author Karol Stasiak @@ -17,6 +17,7 @@ object CoarseFlowAnalyzer { val emptyStatus = CpuStatus() val flagArray = Array.fill[CpuStatus](code.length)(emptyStatus) val codeArray = code.toArray + val z80 = compilationOptions.flag(CompilationFlag.EmitZ80Opcodes) var changed = true while (changed) { @@ -42,25 +43,41 @@ object CoarseFlowAnalyzer { case ZLine(BYTE, _, _, _) => currentStatus = initialStatus - case ZLine(ADD, OneRegister(s), _, _) => + case ZLine(ADD, OneRegister(ZRegister.IMM_8), NumericConstant(0, _), _) => + currentStatus = currentStatus.copy( + nf = Status.SingleFalse, + cf = Status.SingleFalse, + zf = currentStatus.a.map(_.&(0xff) == 0), + sf = currentStatus.a.map(_.&(0x80) == 0), + pf = if (z80) Status.SingleFalse else AnyStatus, + hf = Status.SingleFalse) + case ZLine(SUB, OneRegister(ZRegister.IMM_8), NumericConstant(0, _), _) => + currentStatus = currentStatus.copy( + nf = Status.SingleTrue, + cf = Status.SingleFalse, + zf = currentStatus.a.map(_.&(0xff) == 0), + sf = currentStatus.a.map(_.&(0x80) == 0), + pf = if (z80) Status.SingleFalse else AnyStatus, + hf = Status.SingleFalse) + case l@ZLine(ADD, OneRegister(s), _, _) => currentStatus = currentStatus.copy(a = (currentStatus.a <*> currentStatus.getRegister(s)) ((m, n) => (m + n) & 0xff), - cf = AnyStatus, zf = AnyStatus, sf = AnyStatus, pf = AnyStatus, hf = AnyStatus) + nf = Status.SingleFalse, cf = AnyStatus, zf = AnyStatus, sf = AnyStatus, pf = AnyStatus, hf = AnyStatus) case ZLine(SUB, OneRegister(s), _, _) => currentStatus = currentStatus.copy(a = (currentStatus.a <*> currentStatus.getRegister(s)) ((m, n) => (m - n) & 0xff), - cf = AnyStatus, zf = AnyStatus, sf = AnyStatus, pf = AnyStatus, hf = AnyStatus) + nf = Status.SingleTrue, cf = AnyStatus, zf = AnyStatus, sf = AnyStatus, pf = AnyStatus, hf = AnyStatus) case ZLine(AND, OneRegister(s), _, _) => currentStatus = currentStatus.copy(a = (currentStatus.a <*> currentStatus.getRegister(s)) ((m, n) => (m & n) & 0xff), - cf = AnyStatus, zf = AnyStatus, sf = AnyStatus, pf = AnyStatus, hf = AnyStatus) + nf = Status.SingleFalse, cf = AnyStatus, zf = AnyStatus, sf = AnyStatus, pf = AnyStatus, hf = AnyStatus) case ZLine(OR, OneRegister(ZRegister.A), _, _) => - currentStatus = currentStatus.copy(cf = Status.SingleFalse, zf = AnyStatus, sf = AnyStatus, pf = AnyStatus, hf = AnyStatus) + currentStatus = currentStatus.copy(nf = Status.SingleFalse, cf = Status.SingleFalse, zf = AnyStatus, sf = AnyStatus, pf = AnyStatus, hf = AnyStatus) case ZLine(XOR, OneRegister(ZRegister.A), _, _) => - currentStatus = currentStatus.copy(a = Status.SingleZero, cf = Status.SingleFalse, zf = AnyStatus, sf = AnyStatus, pf = AnyStatus, hf = AnyStatus) + currentStatus = currentStatus.copy(a = Status.SingleZero, nf = Status.SingleFalse, cf = Status.SingleFalse, zf = Status.SingleTrue, sf = Status.SingleFalse, pf = AnyStatus, hf = AnyStatus) case ZLine(OR, OneRegister(s), _, _) => currentStatus = currentStatus.copy(a = (currentStatus.a <*> currentStatus.getRegister(s)) ((m, n) => (m | n) & 0xff), - cf = AnyStatus, zf = AnyStatus, sf = AnyStatus, pf = AnyStatus, hf = AnyStatus) + nf = Status.SingleFalse, cf = AnyStatus, zf = AnyStatus, sf = AnyStatus, pf = AnyStatus, hf = AnyStatus) case ZLine(XOR, OneRegister(s), _, _) => currentStatus = currentStatus.copy(a = (currentStatus.a <*> currentStatus.getRegister(s)) ((m, n) => (m ^ n) & 0xff), - cf = AnyStatus, zf = AnyStatus, sf = AnyStatus, pf = AnyStatus, hf = AnyStatus) + nf = Status.SingleFalse, cf = AnyStatus, zf = AnyStatus, sf = AnyStatus, pf = AnyStatus, hf = AnyStatus) case ZLine(INC, OneRegister(r), _, _) => currentStatus = currentStatus. @@ -126,8 +143,8 @@ object CoarseFlowAnalyzer { zf = AnyStatus, pf = AnyStatus, hf = Status.SingleFalse) - case ZLine(SCF, _, _, _) => currentStatus.copy(cf = Status.SingleTrue) - case ZLine(CCF, _, _, _) => currentStatus.copy(cf = currentStatus.cf.negate) + case ZLine(SCF, _, _, _) => currentStatus.copy(cf = Status.SingleTrue, hf = Status.SingleFalse, nf = Status.SingleFalse) + case ZLine(CCF, _, _, _) => currentStatus.copy(cf = currentStatus.cf.negate, hf = AnyStatus, nf = AnyStatus) case ZLine(opcode, registers, _, _) => currentStatus = currentStatus.copy(cf = AnyStatus, zf = AnyStatus, sf = AnyStatus, pf = AnyStatus, hf = AnyStatus) diff --git a/src/main/scala/millfork/assembly/z80/opt/CpuStatus.scala b/src/main/scala/millfork/assembly/z80/opt/CpuStatus.scala index 66c0ebc7..e2919897 100644 --- a/src/main/scala/millfork/assembly/z80/opt/CpuStatus.scala +++ b/src/main/scala/millfork/assembly/z80/opt/CpuStatus.scala @@ -135,7 +135,7 @@ case class CpuStatus(a: Status[Int] = UnknownStatus, override def toString: String = { val memRepr = if (memIx.isEmpty) "" else (0 to memIx.keys.max).map(i => memIx.getOrElse(i, UnknownStatus)).mkString("") - s"A=$a,B=$b,C=$c,D=$d,E=$e,H=$h,L=$l,IX=$ixh$ixl,Y=$iyh$iyl; Z=$zf,C=$cf,N=$nf,S=$sf,P=$pf,H=$hf; M=" + memRepr.padTo(4, ' ') + s"A=$a,B=$b,C=$c,D=$d,E=$e,H=$h,L=$l,IX=$ixh$ixl,IY=$iyh$iyl; Z=$zf,C=$cf,N=$nf,S=$sf,P=$pf,H=$hf; M=" + memRepr.padTo(4, ' ') } } \ No newline at end of file diff --git a/src/main/scala/millfork/assembly/z80/opt/ReverseFlowAnalyzer.scala b/src/main/scala/millfork/assembly/z80/opt/ReverseFlowAnalyzer.scala index cbb5c5f2..53f67d1e 100644 --- a/src/main/scala/millfork/assembly/z80/opt/ReverseFlowAnalyzer.scala +++ b/src/main/scala/millfork/assembly/z80/opt/ReverseFlowAnalyzer.scala @@ -73,6 +73,7 @@ case class CpuImportance(a: Importance = UnknownImportance, cf = this.cf ~ that.cf, pf = this.pf ~ that.pf, hf = this.hf ~ that.hf, + sf = this.sf ~ that.sf, ) def getRegister(register: ZRegister.Value, offset: Int = -1): Importance = register match { diff --git a/src/main/scala/millfork/compiler/ComparisonType.scala b/src/main/scala/millfork/compiler/ComparisonType.scala index c2812177..66f398ae 100644 --- a/src/main/scala/millfork/compiler/ComparisonType.scala +++ b/src/main/scala/millfork/compiler/ComparisonType.scala @@ -34,4 +34,12 @@ object ComparisonType extends Enumeration { case Equal => NotEqual case NotEqual => Equal } + + def isSigned(x: ComparisonType.Value): Boolean = x match { + case LessSigned => true + case GreaterSigned => true + case LessOrEqualSigned => true + case GreaterOrEqualSigned => true + case _ => false + } } diff --git a/src/main/scala/millfork/compiler/mos/BuiltIns.scala b/src/main/scala/millfork/compiler/mos/BuiltIns.scala index 1a43cf4f..7143572a 100644 --- a/src/main/scala/millfork/compiler/mos/BuiltIns.scala +++ b/src/main/scala/millfork/compiler/mos/BuiltIns.scala @@ -383,20 +383,24 @@ object BuiltIns { } case _ => } - val secondParamCompiled = maybeConstant match { + val cmpOp = if (ComparisonType.isSigned(compType)) SBC else CMP + var comparingAgainstZero = false + val secondParamCompiled0 = maybeConstant match { case Some(x) => compType match { - case ComparisonType.Equal | ComparisonType.NotEqual | ComparisonType.LessSigned | ComparisonType.GreaterOrEqualSigned => - if (x.quickSimplify.isLowestByteAlwaysEqual(0) && OpcodeClasses.ChangesAAlways(firstParamCompiled.last.opcode)) Nil - else List(AssemblyLine.immediate(CMP, x)) + case ComparisonType.Equal | ComparisonType.NotEqual | ComparisonType.LessSigned | ComparisonType.GreaterOrEqualSigned | ComparisonType.LessOrEqualSigned | ComparisonType.GreaterSigned => + if (x.quickSimplify.isLowestByteAlwaysEqual(0) && OpcodeClasses.ChangesAAlways(firstParamCompiled.last.opcode)) { + comparingAgainstZero = true + Nil + } else List(AssemblyLine.immediate(cmpOp, x)) case _ => - List(AssemblyLine.immediate(CMP, x)) + List(AssemblyLine.immediate(cmpOp, x)) } case _ => compType match { case ComparisonType.Equal | ComparisonType.NotEqual | ComparisonType.LessSigned | ComparisonType.GreaterOrEqualSigned => - val secondParamCompiledUnoptimized = simpleOperation(CMP, ctx, rhs, IndexChoice.PreferY, preserveA = true, commutative = false) + val secondParamCompiledUnoptimized = simpleOperation(cmpOp, ctx, rhs, IndexChoice.PreferY, preserveA = true, commutative = false) secondParamCompiledUnoptimized match { - case List(AssemblyLine(CMP, Immediate, NumericConstant(0, _), true)) => + case List(AssemblyLine(cmpOp, Immediate, NumericConstant(0, _), true)) => if (OpcodeClasses.ChangesAAlways(firstParamCompiled.last.opcode)) { Nil } else { @@ -405,9 +409,10 @@ object BuiltIns { case _ => secondParamCompiledUnoptimized } case _ => - simpleOperation(CMP, ctx, rhs, IndexChoice.PreferY, preserveA = true, commutative = false) + simpleOperation(cmpOp, ctx, rhs, IndexChoice.PreferY, preserveA = true, commutative = false) } } + val secondParamCompiled = if(cmpOp == SBC && !comparingAgainstZero) AssemblyLine.implied(SEC) :: secondParamCompiled0 else secondParamCompiled0 val (effectiveComparisonType, label) = branches match { case NoBranching => return Nil case BranchIfTrue(l) => compType -> l @@ -460,17 +465,51 @@ object BuiltIns { AssemblyLine.label(x)) case ComparisonType.LessSigned => - List(AssemblyLine.relative(BMI, Label(label))) + if (comparingAgainstZero) List(AssemblyLine.relative(BMI, label)) else { + val fixup = ctx.nextLabel("co") + List( + AssemblyLine.relative(BVC, fixup), + AssemblyLine.immediate(EOR, 0x80), + AssemblyLine.label(fixup), + AssemblyLine.relative(BMI, label)) + } case ComparisonType.GreaterOrEqualSigned => - List(AssemblyLine.relative(BPL, Label(label))) + if (comparingAgainstZero) List(AssemblyLine.relative(BPL, label)) else { + val fixup = ctx.nextLabel("co") + List( + AssemblyLine.relative(BVC, fixup), + AssemblyLine.immediate(EOR, 0x80), + AssemblyLine.label(fixup), AssemblyLine.relative(BPL, label)) + } case ComparisonType.LessOrEqualSigned => - List(AssemblyLine.relative(BMI, Label(label)), AssemblyLine.relative(BEQ, Label(label))) + if (comparingAgainstZero) { + List(AssemblyLine.relative(BEQ, label), + AssemblyLine.relative(BMI, label)) + } else { + val fixup = ctx.nextLabel("co") + List(AssemblyLine.relative(BVC, fixup), + AssemblyLine.immediate(EOR, 0x80), + AssemblyLine.label(fixup), + AssemblyLine.relative(BMI, label), + AssemblyLine.relative(BEQ, label)) + } case ComparisonType.GreaterSigned => - val x = ctx.nextLabel("co") - List( - AssemblyLine.relative(BEQ, x), - AssemblyLine.relative(BPL, Label(label)), - AssemblyLine.label(x)) + if (comparingAgainstZero) { + val x = ctx.nextLabel("co") + List(AssemblyLine.relative(BEQ, x), + AssemblyLine.relative(BPL, label), + AssemblyLine.label(x)) + } else { + val fixup = ctx.nextLabel("co") + val x = ctx.nextLabel("co") + List( + AssemblyLine.relative(BVC, fixup), + AssemblyLine.immediate(EOR, 0x80), + AssemblyLine.label(fixup), + AssemblyLine.relative(BEQ, x), + AssemblyLine.relative(BPL, label), + AssemblyLine.label(x)) + } } firstParamCompiled ++ secondParamCompiled ++ branchingCompiled @@ -622,8 +661,33 @@ object BuiltIns { List(AssemblyLine.relative(BCS, x), AssemblyLine.label(innerLabel)) + case ComparisonType.LessSigned => + val fixup = ctx.nextLabel("co") + cmpTo(LDA, ll) ++ + List(AssemblyLine.implied(SEC)) ++ + cmpTo(SBC, rl) ++ + cmpTo(LDA, lh) ++ + cmpTo(SBC, rh) ++ + List( + AssemblyLine.relative(BVC, fixup), + AssemblyLine.immediate(EOR, 0x80), + AssemblyLine.label(fixup)) + List(AssemblyLine.relative(BCC, x)) + + case ComparisonType.GreaterOrEqualSigned => + val fixup = ctx.nextLabel("co") + cmpTo(LDA, ll) ++ + List(AssemblyLine.implied(SEC)) ++ + cmpTo(SBC, rl) ++ + cmpTo(LDA, lh) ++ + cmpTo(SBC, rh) ++ + List( + AssemblyLine.relative(BVC, fixup), + AssemblyLine.immediate(EOR, 0x80), + AssemblyLine.label(fixup)) + List(AssemblyLine.relative(BCS, x)) case _ => ??? - // TODO: signed word comparisons + // TODO: signed word comparisons: <=, > } } diff --git a/src/main/scala/millfork/compiler/z80/Z80Comparisons.scala b/src/main/scala/millfork/compiler/z80/Z80Comparisons.scala index 8751b878..f87fcf47 100644 --- a/src/main/scala/millfork/compiler/z80/Z80Comparisons.scala +++ b/src/main/scala/millfork/compiler/z80/Z80Comparisons.scala @@ -20,24 +20,79 @@ object Z80Comparisons { return compile8BitComparison(ctx, ComparisonType.flip(compType), r, l, branches) case _ => () } - val calculateFlags = - Z80ExpressionCompiler.compileToA(ctx, r) ++ - List(ZLine.ld8(ZRegister.E, ZRegister.A)) ++ - Z80ExpressionCompiler.stashDEIfChanged(ctx, Z80ExpressionCompiler.compileToA(ctx, l)) ++ - List(ZLine.register(ZOpcode.CP, ZRegister.E)) - if (branches == NoBranching) return calculateFlags - val jump = (compType, branches) match { - case (Equal, BranchIfTrue(label)) => ZLine.jump(label, IfFlagSet(ZFlag.Z)) - case (Equal, BranchIfFalse(label)) => ZLine.jump(label, IfFlagClear(ZFlag.Z)) - case (NotEqual, BranchIfTrue(label)) => ZLine.jump(label, IfFlagClear(ZFlag.Z)) - case (NotEqual, BranchIfFalse(label)) => ZLine.jump(label, IfFlagSet(ZFlag.Z)) - case (LessUnsigned, BranchIfTrue(label)) => ZLine.jump(label, IfFlagSet(ZFlag.C)) - case (LessUnsigned, BranchIfFalse(label)) => ZLine.jump(label, IfFlagClear(ZFlag.C)) - case (GreaterOrEqualUnsigned, BranchIfTrue(label)) => ZLine.jump(label, IfFlagClear(ZFlag.C)) - case (GreaterOrEqualUnsigned, BranchIfFalse(label)) => ZLine.jump(label, IfFlagSet(ZFlag.C)) - case _ => ??? + val prepareAE = Z80ExpressionCompiler.compileToA(ctx, r) ++ + List(ZLine.ld8(ZRegister.E, ZRegister.A)) ++ + Z80ExpressionCompiler.stashDEIfChanged(ctx, Z80ExpressionCompiler.compileToA(ctx, l)) + val calculateFlags = if (ComparisonType.isSigned(compType) && ctx.options.flag(CompilationFlag.EmitZ80Opcodes)) { + val fixup = ctx.nextLabel("co") + List( + ZLine.register(ZOpcode.SUB, ZRegister.E), + ZLine.jump(fixup, IfFlagClear(ZFlag.P)), + ZLine.imm8(ZOpcode.XOR, 0x80), + ZLine.label(fixup)) + } else if (ComparisonType.isSigned(compType) && !ctx.options.flag(CompilationFlag.EmitIntel8080Opcodes)) { + List(ZLine.register(ZOpcode.SUB, ZRegister.E)) + } else List(ZLine.register(ZOpcode.CP, ZRegister.E)) + if (branches == NoBranching) return prepareAE ++ calculateFlags + val (effectiveCompType, label) = branches match { + case BranchIfFalse(la) => ComparisonType.negate(compType) -> la + case BranchIfTrue(la) => compType -> la } - calculateFlags :+ jump + val jump = effectiveCompType match { + case Equal => List(ZLine.jump(label, IfFlagSet(ZFlag.Z))) + case NotEqual => List(ZLine.jump(label, IfFlagClear(ZFlag.Z))) + case LessUnsigned => List(ZLine.jump(label, IfFlagSet(ZFlag.C))) + case GreaterOrEqualUnsigned => List(ZLine.jump(label, IfFlagClear(ZFlag.C))) + case LessOrEqualUnsigned => List(ZLine.jump(label, IfFlagSet(ZFlag.Z)), ZLine.jump(label, IfFlagSet(ZFlag.C))) + case GreaterUnsigned => + val x = ctx.nextLabel("co") + List( + ZLine.jumpR(ctx, x, IfFlagSet(ZFlag.Z)), + ZLine.jump(label, IfFlagClear(ZFlag.C)), + ZLine.label(x)) + case LessSigned => + if (ctx.options.flag(CompilationFlag.EmitIntel8080Opcodes)) { + List(ZLine.jump(label, IfFlagSet(ZFlag.S))) + } else { + List( + ZLine.register(ZOpcode.BIT7, ZRegister.A), + ZLine.jump(label, IfFlagClear(ZFlag.Z))) + } + case GreaterOrEqualSigned => + if (ctx.options.flag(CompilationFlag.EmitIntel8080Opcodes)) { + List(ZLine.jump(label, IfFlagClear(ZFlag.S))) + } else { + List( + ZLine.register(ZOpcode.BIT7, ZRegister.A), + ZLine.jump(label, IfFlagSet(ZFlag.Z))) + } + case LessOrEqualSigned => + if (ctx.options.flag(CompilationFlag.EmitIntel8080Opcodes)) { + List( + ZLine.jump(label, IfFlagSet(ZFlag.Z)), + ZLine.jump(label, IfFlagSet(ZFlag.S))) + } else { + List( + ZLine.jump(label, IfFlagSet(ZFlag.Z)), + ZLine.register(ZOpcode.BIT7, ZRegister.A), + ZLine.jump(label, IfFlagClear(ZFlag.Z))) + } + case GreaterSigned => + val x = ctx.nextLabel("co") + if (ctx.options.flag(CompilationFlag.EmitIntel8080Opcodes)) { + List( + ZLine.jumpR(ctx, x, IfFlagSet(ZFlag.Z)), + ZLine.jump(label, IfFlagClear(ZFlag.S)), + ZLine.label(x)) + } else { + List( + ZLine.jumpR(ctx, x, IfFlagSet(ZFlag.Z)), + ZLine.register(ZOpcode.BIT7, ZRegister.A), + ZLine.jump(label, IfFlagSet(ZFlag.Z)), + ZLine.label(x)) + } + } + prepareAE ++ calculateFlags ++ jump } private def handleConstantComparison(ctx: CompilationContext, compType: ComparisonType.Value, l: Expression, r: Expression, branches: BranchSpec): Option[List[ZLine]] = { @@ -79,20 +134,20 @@ object Z80Comparisons { } else { calculateRight ++ List(ZLine.ld8(ZRegister.B, ZRegister.H), ZLine.ld8(ZRegister.C, ZRegister.L)) ++ calculateLeft -> true } - if (ctx.options.flag(CompilationFlag.EmitZ80Opcodes)) { + val (effectiveCompType, label) = branches match { + case BranchIfFalse(la) => ComparisonType.negate(compType) -> la + case BranchIfTrue(la) => compType -> la + } + if (ctx.options.flag(CompilationFlag.EmitZ80Opcodes) && !ComparisonType.isSigned(compType)) { val calculateFlags = calculated ++ List( ZLine.register(ZOpcode.OR, ZRegister.A), ZLine.registers(ZOpcode.SBC_16, ZRegister.HL, if (useBC) ZRegister.BC else ZRegister.DE)) if (branches == NoBranching) return calculateFlags - val jump = (compType, branches) match { - case (Equal, BranchIfTrue(label)) => ZLine.jump(label, IfFlagSet(ZFlag.Z)) - case (Equal, BranchIfFalse(label)) => ZLine.jump(label, IfFlagClear(ZFlag.Z)) - case (NotEqual, BranchIfTrue(label)) => ZLine.jump(label, IfFlagClear(ZFlag.Z)) - case (NotEqual, BranchIfFalse(label)) => ZLine.jump(label, IfFlagSet(ZFlag.Z)) - case (LessUnsigned, BranchIfTrue(label)) => ZLine.jump(label, IfFlagSet(ZFlag.C)) - case (LessUnsigned, BranchIfFalse(label)) => ZLine.jump(label, IfFlagClear(ZFlag.C)) - case (GreaterOrEqualUnsigned, BranchIfTrue(label)) => ZLine.jump(label, IfFlagClear(ZFlag.C)) - case (GreaterOrEqualUnsigned, BranchIfFalse(label)) => ZLine.jump(label, IfFlagSet(ZFlag.C)) + val jump = effectiveCompType match { + case Equal => ZLine.jump(label, IfFlagSet(ZFlag.Z)) + case NotEqual => ZLine.jump(label, IfFlagClear(ZFlag.Z)) + case LessUnsigned => ZLine.jump(label, IfFlagSet(ZFlag.C)) + case GreaterOrEqualUnsigned => ZLine.jump(label, IfFlagClear(ZFlag.C)) case _ => ??? } calculateFlags :+ jump @@ -107,11 +162,9 @@ object Z80Comparisons { ZLine.register(XOR, if (useBC) B else D), ZLine.register(OR, L)) if (branches == NoBranching) return calculateFlags - val jump = (compType, branches) match { - case (Equal, BranchIfTrue(label)) => ZLine.jump(label, IfFlagSet(ZFlag.Z)) - case (Equal, BranchIfFalse(label)) => ZLine.jump(label, IfFlagClear(ZFlag.Z)) - case (NotEqual, BranchIfTrue(label)) => ZLine.jump(label, IfFlagClear(ZFlag.Z)) - case (NotEqual, BranchIfFalse(label)) => ZLine.jump(label, IfFlagSet(ZFlag.Z)) + val jump = effectiveCompType match { + case Equal => ZLine.jump(label, IfFlagSet(ZFlag.Z)) + case NotEqual => ZLine.jump(label, IfFlagClear(ZFlag.Z)) case _ => throw new IllegalStateException() } calculateFlags :+ jump @@ -124,14 +177,39 @@ object Z80Comparisons { ZLine.ld8(A, H), ZLine.register(SBC, if (useBC) B else D)) if (branches == NoBranching) return calculateFlags - val jump = (compType, branches) match { - case (LessUnsigned, BranchIfTrue(label)) => ZLine.jump(label, IfFlagSet(ZFlag.C)) - case (LessUnsigned, BranchIfFalse(label)) => ZLine.jump(label, IfFlagClear(ZFlag.C)) - case (GreaterOrEqualUnsigned, BranchIfTrue(label)) => ZLine.jump(label, IfFlagClear(ZFlag.C)) - case (GreaterOrEqualUnsigned, BranchIfFalse(label)) => ZLine.jump(label, IfFlagSet(ZFlag.C)) + + def fixBit7: List[ZLine] = { + if (ctx.options.flag(CompilationFlag.EmitZ80Opcodes)) { + val fixup = ctx.nextLabel("co") + List( + ZLine.jump(fixup, IfFlagClear(ZFlag.P)), + ZLine.imm8(ZOpcode.XOR, 0x80), + ZLine.label(fixup)) + } else Nil + } + + val jump = effectiveCompType match { + case LessUnsigned => List(ZLine.jump(label, IfFlagSet(ZFlag.C))) + case GreaterOrEqualUnsigned => List(ZLine.jump(label, IfFlagClear(ZFlag.C))) + case LessSigned => + if (ctx.options.flag(CompilationFlag.EmitIntel8080Opcodes)) { + fixBit7 ++ List(ZLine.jump(label, IfFlagSet(ZFlag.S))) + } else { + fixBit7 ++ List( + ZLine.register(ZOpcode.BIT7, ZRegister.A), + ZLine.jump(label, IfFlagClear(ZFlag.Z))) + } + case GreaterOrEqualSigned => + if (ctx.options.flag(CompilationFlag.EmitIntel8080Opcodes)) { + fixBit7 ++ List(ZLine.jump(label, IfFlagClear(ZFlag.S))) + } else { + fixBit7 ++ List( + ZLine.register(ZOpcode.BIT7, ZRegister.A), + ZLine.jump(label, IfFlagSet(ZFlag.Z))) + } case _ => ??? } - calculateFlags :+ jump + calculateFlags ++ jump } } diff --git a/src/main/scala/millfork/compiler/z80/ZBuiltIns.scala b/src/main/scala/millfork/compiler/z80/ZBuiltIns.scala index 62ef995e..b344b404 100644 --- a/src/main/scala/millfork/compiler/z80/ZBuiltIns.scala +++ b/src/main/scala/millfork/compiler/z80/ZBuiltIns.scala @@ -392,7 +392,7 @@ object ZBuiltIns { case Some(NumericConstant(0xfd | -3, _)) if ctx.options.flag(CompilationFlag.OptimizeForSpeed) => calculateAddress ++ List(ZLine.register(INC, lv), ZLine.register(INC, lv), ZLine.register(INC, lv)) case _ => - if (ctx.options.flag(CompilationFlag.EmitExtended80Opcodes)) { + if (ctx.options.flag(CompilationFlag.EmitZ80Opcodes)) { setup ++ List( ZLine.implied(NEG), ZLine.register(ADD, lv), diff --git a/src/test/scala/millfork/test/ComparisonSuite.scala b/src/test/scala/millfork/test/ComparisonSuite.scala index 7313c762..75e0ec7f 100644 --- a/src/test/scala/millfork/test/ComparisonSuite.scala +++ b/src/test/scala/millfork/test/ComparisonSuite.scala @@ -229,7 +229,44 @@ class ComparisonSuite extends FunSuite with Matchers { | if a >= 0 { output += 1 } | } """.stripMargin - EmuBenchmarkRun(src)(_.readByte(0xc000) should equal(src.count(_ == '+'))) + EmuCrossPlatformBenchmarkRun(Cpu.Mos, Cpu.Z80, Cpu.Intel8080, Cpu.Sharp)(src)(_.readByte(0xc000) should equal(src.count(_ == '+'))) + } + + test("Signed comparison with overflow") { + // These examples require a CPU with an overflow flag. + // Good: 6502, Z80 + // Bad: 8080, LR35902 + val src = + """ + | byte output @$c000 + | void main () { + | sbyte a + | sbyte b + | sbyte c + | output = 0 + | a = 4 + | b = 4 + | c = 100 + | if c >= -128 { output += 1 } + | if c >= c { output += 1 } + | if a >= -128 { output += 1 } + | if b >= -128 { output += 1 } + | if b >= -88 { output += 1 } + | if a >= -88 { output += 1 } + | if c >= -88 { output += 1 } + | if a >= -1 { output += 1 } + | if c >= -1 { output += 1 } + | if c > -128 { output += 1 } + | if a > -128 { output += 1 } + | if b > -128 { output += 1 } + | if b > -88 { output += 1 } + | if a > -88 { output += 1 } + | if c > -88 { output += 1 } + | if a > -1 { output += 1 } + | if c > -1 { output += 1 } + | } + """.stripMargin + EmuCrossPlatformBenchmarkRun(Cpu.Mos, Cpu.Z80)(src)(_.readByte(0xc000) should equal(src.count(_ == '+'))) } test("Signed comparison < and <=") { @@ -261,7 +298,7 @@ class ComparisonSuite extends FunSuite with Matchers { | if c <= -1 { output -= 7 } | } """.stripMargin - EmuBenchmarkRun(src)(_.readByte(0xc000) should equal(src.count(_ == '+'))) + EmuCrossPlatformBenchmarkRun(Cpu.Mos, Cpu.Z80, Cpu.Intel8080, Cpu.Sharp)(src)(_.readByte(0xc000) should equal(src.count(_ == '+'))) } test("Multiple params for equality") { diff --git a/src/test/scala/millfork/test/emu/EmuZ80Run.scala b/src/test/scala/millfork/test/emu/EmuZ80Run.scala index 306ab862..e9dca1b1 100644 --- a/src/test/scala/millfork/test/emu/EmuZ80Run.scala +++ b/src/test/scala/millfork/test/emu/EmuZ80Run.scala @@ -162,21 +162,35 @@ class EmuZ80Run(cpu: millfork.Cpu.Value, nodeOptimizations: List[NodeOptimizatio } } + def formatZ80Flags(f: Int): String = { + val array = Array[Char]('s', 'z', 'y', 'h', 'x', 'p', 'n', 'c') + for (x <- 0 to 7) { + if (f.&(1 << x) != 0) { + array(7 - x) = (array(7 - x).toInt - 32).toChar + } + } + new String(array) + } + def dump(cpu: Z80Core): Unit = { + val pc = cpu.getRegisterValue(CPUConstants.RegisterNames.PC) val a = cpu.getRegisterValue(CPUConstants.RegisterNames.A) val bc = cpu.getRegisterValue(CPUConstants.RegisterNames.BC) val de = cpu.getRegisterValue(CPUConstants.RegisterNames.DE) val hl = cpu.getRegisterValue(CPUConstants.RegisterNames.HL) - println(f"A=$a%02x,BC=$bc%04x,DE=$de%04x,HL=$hl%04x") + val f = cpu.getRegisterValue(CPUConstants.RegisterNames.F) + println(f"PC=$pc%04x A=$a%02x,BC=$bc%04x,DE=$de%04x,HL=$hl%04x F=${formatZ80Flags(f)}%s") } def dump(cpu: Cpu): Unit = { val regs = cpu.getRegisters + val pc = regs.getPC val a = regs.getA val bc = regs.getBC val de = regs.getDE val hl = regs.getHL - println(f"A=$a%02x,BC=$bc%04x,DE=$de%04x,HL=$hl%04x") + val f = regs.getFlags.toString + println(f"PC=$pc%04x A=$a%02x,BC=$bc%04x,DE=$de%04x,HL=$hl%04x F=$f%s") } }