1
0
mirror of https://github.com/KarolS/millfork.git synced 2024-12-23 08:29:35 +00:00

More optimizations, including optimizations with jump counting

This commit is contained in:
Karol Stasiak 2017-12-20 12:00:24 +01:00
parent f5f4c033f3
commit e28ff16717
14 changed files with 218 additions and 38 deletions

View File

@ -4,7 +4,7 @@ import java.nio.charset.StandardCharsets
import java.nio.file.{Files, Paths}
import java.util.Locale
import millfork.assembly.opt.{CmosOptimizations, DangerousOptimizations, SuperOptimizer, UndocumentedOptimizations}
import millfork.assembly.opt._
import millfork.buildinfo.BuildInfo
import millfork.cli.{CliParser, CliStatus}
import millfork.env.Environment
@ -87,7 +87,7 @@ object Main {
env.collectDeclarations(program, options)
val extras = List(
if (options.flag(CompilationFlag.EmitIllegals)) UndocumentedOptimizations.All else Nil,
if (options.flag(CompilationFlag.EmitCmosOpcodes)) CmosOptimizations.All else Nil,
if (options.flag(CompilationFlag.EmitCmosOpcodes)) CmosOptimizations.All else LaterOptimizations.Nmos,
if (options.flag(CompilationFlag.DangerousOptimizations)) DangerousOptimizations.All else Nil,
).flatten
val goodCycle = List.fill(optLevel - 1)(OptimizationPresets.Good ++ extras).flatten

View File

@ -107,6 +107,7 @@ object OptimizationPresets {
LaterOptimizations.UseXInsteadOfStack,
LaterOptimizations.UseYInsteadOfStack,
LaterOptimizations.IndexSwitchingOptimization,
LaterOptimizations.LoadingBranchesOptimization,
)
val Good: List[AssemblyOptimization] = List[AssemblyOptimization](
@ -114,6 +115,7 @@ object OptimizationPresets {
AlwaysGoodOptimizations.CarryFlagConversion,
DangerousOptimizations.ConstantIndexOffsetPropagation,
AlwaysGoodOptimizations.BranchInPlaceRemoval,
AlwaysGoodOptimizations.CommonBranchBodyOptimization,
AlwaysGoodOptimizations.ConstantFlowAnalysis,
AlwaysGoodOptimizations.ConstantIndexPropagation,
AlwaysGoodOptimizations.FlagFlowAnalysis,
@ -146,5 +148,6 @@ object OptimizationPresets {
UnusedLabelRemoval,
AlwaysGoodOptimizations.TailCallOptimization,
AlwaysGoodOptimizations.UnusedCodeRemoval,
AlwaysGoodOptimizations.UnusedLabelRemoval,
)
}

View File

@ -137,7 +137,8 @@ object OpcodeClasses {
val SupportsZeroPageIndirect = Set(ORA, AND, EOR, ADC, STA, LDA, CMP, SBC)
val ShortBranching = Set(BEQ, BNE, BMI, BPL, BVC, BVS, BCC, BCS, BRA)
val ShortConditionalBranching = Set(BEQ, BNE, BMI, BPL, BVC, BVS, BCC, BCS)
val ShortBranching = ShortConditionalBranching + BRA
val AllDirectJumps = ShortBranching + JMP
val AllLinear = Set(
ORA, AND, EOR,

View File

@ -14,7 +14,7 @@ case object EmptyChunk extends Chunk {
}
case class LabelledChunk(label: String, chunk: Chunk) extends Chunk {
override def linearize: List[AssemblyLine] = AssemblyLine.label(Label(label)) :: chunk.linearize
override def linearize: List[AssemblyLine] = AssemblyLine.label(Label(label)).copy(elidable=false) :: chunk.linearize
override def sizeInBytes: Int = chunk.sizeInBytes
}

View File

@ -846,5 +846,22 @@ object AlwaysGoodOptimizations {
Where(ctx => ctx.get[Constant](1).quickSimplify.isLowestByteAlwaysEqual(ctx.get[Int](0)-1)) ~~> (_ => List(AssemblyLine.implied(DEX))),
)
val CommonBranchBodyOptimization = new RuleBasedAssemblyOptimization("Common branch body optimization",
needsFlowInfo = FlowInfoRequirement.JustLabels,
(Elidable & Linear & MatchOpcode(3) & MatchAddrMode(4) & MatchParameter(5)).capture(1) ~
(
(HasOpcode(JMP) & MatchParameter(2)) ~ Not(MatchOpcode(3)).*
).capture(11) ~
(Elidable & Linear & MatchOpcode(3) & MatchAddrMode(4) & MatchParameter(5)) ~
(HasOpcode(LABEL) & MatchParameter(2) & HasCallerCount(1)).capture(12) ~~> { (code, ctx) =>
ctx.get[List[AssemblyLine]](11) ++ ctx.get[List[AssemblyLine]](12) :+ code.head
}
)
val UnusedLabelRemoval = new RuleBasedAssemblyOptimization("Unused label removal",
needsFlowInfo = FlowInfoRequirement.JustLabels,
(Elidable & HasOpcode(LABEL) & HasCallerCount(0)) ~~> (_ => Nil)
)
}

View File

@ -1,34 +1,53 @@
package millfork.assembly.opt
import millfork.{CompilationFlag, CompilationOptions}
import millfork.assembly.{AssemblyLine, State}
import millfork.env.NormalFunction
import millfork.assembly.{AssemblyLine, Opcode, State}
import millfork.env.{Label, MemoryAddressConstant, NormalFunction}
/**
* @author Karol Stasiak
*/
case class FlowInfo(statusBefore: CpuStatus, importanceAfter: CpuImportance) {
case class FlowInfo(statusBefore: CpuStatus, importanceAfter: CpuImportance, labelUseCountMap: Option[Map[String, Int]]) {
def hasClear(state: State.Value): Boolean = statusBefore.hasClear(state)
def hasSet(state: State.Value): Boolean = statusBefore.hasSet(state)
def isUnimportant(state: State.Value): Boolean = importanceAfter.isUnimportant(state)
def labelUseCount(label: String): Int = labelUseCountMap.map(_.getOrElse(label, 0)).getOrElse(-1)
}
object FlowInfo {
val Default = FlowInfo(CpuStatus(), CpuImportance())
val Default = FlowInfo(CpuStatus(), CpuImportance(), None)
}
object FlowAnalyzer {
def analyze(f: NormalFunction, code: List[AssemblyLine], options: CompilationOptions): List[(FlowInfo, AssemblyLine)] = {
val forwardFlow = if (options.flag(CompilationFlag.DetailedFlowAnalysis)) {
def analyze(f: NormalFunction, code: List[AssemblyLine], options: CompilationOptions, req: FlowInfoRequirement.Value): List[(FlowInfo, AssemblyLine)] = {
val forwardFlow = req match {
case FlowInfoRequirement.BothFlows | FlowInfoRequirement.ForwardFlow =>
if (options.flag(CompilationFlag.DetailedFlowAnalysis)) {
QuantumFlowAnalyzer.analyze(f, code).map(_.collapse)
} else {
CoarseFlowAnalyzer.analyze(f, code)
}
val reverseFlow = ReverseFlowAnalyzer.analyze(f, code)
forwardFlow.zip(reverseFlow).map{case (s,i) => FlowInfo(s,i)}.zip(code)
case FlowInfoRequirement.BackwardFlow | FlowInfoRequirement.JustLabels | FlowInfoRequirement.NoRequirement =>
List.fill(code.size)(CpuStatus())
}
val reverseFlow = req match {
case FlowInfoRequirement.BothFlows | FlowInfoRequirement.BackwardFlow =>
ReverseFlowAnalyzer.analyze(f, code)
case FlowInfoRequirement.ForwardFlow | FlowInfoRequirement.JustLabels | FlowInfoRequirement.NoRequirement =>
List.fill(code.size)(CpuImportance())
}
val labelMap = req match {
case FlowInfoRequirement.NoRequirement => None
case _ => Some(code.flatMap {
case AssemblyLine(op, _, MemoryAddressConstant(Label(l)), _) if op != Opcode.LABEL => Some(l)
case _ => None
}.groupBy(identity).mapValues(_.size))
}
forwardFlow.zip(reverseFlow).map { case (s, i) => FlowInfo(s, i, labelMap) }.zip(code)
}
}

View File

@ -1,6 +1,6 @@
package millfork.assembly.opt
import millfork.assembly.{AddrMode, AssemblyLine, Opcode, State}
import millfork.assembly._
import millfork.assembly.Opcode._
import millfork.assembly.AddrMode._
import millfork.assembly.OpcodeClasses._
@ -228,15 +228,140 @@ object LaterOptimizations {
)
private val LdxAddrModes = Set(Immediate, Absolute, ZeroPage, ZeroPageY, AbsoluteY)
private val LdyAddrModes = Set(Immediate, Absolute, ZeroPage, ZeroPageX, AbsoluteX)
private val StxAddrModes = Set(Absolute, ZeroPage, ZeroPageY)
private val StyAddrModes = Set(Absolute, ZeroPage, ZeroPageX)
private val CpxyAddrModes = Set(Immediate, Absolute, ZeroPage)
private def incDecThroughIndexRegister(amount: Int, dec: Boolean, carrySet: Boolean, useX: Boolean) = {
val ldAddrModes = if (useX) LdxAddrModes else LdyAddrModes
val stAddrModes = if (useX) StxAddrModes else StyAddrModes
val ldOp = if (useX) LDX else LDY
val stOp = if (useX) STX else STY
val changeOp = if (dec) if (useX) DEX else DEY else if (useX) INX else INY
val addOp = if (dec) SBC else ADC
val addParam = if (dec ^ carrySet) amount + 1 else amount
val indexState = if (useX) State.X else State.Y
val cState = if (carrySet) HasSet(State.C) else HasClear(State.C)
val carryOp = if (carrySet) SEC else CLC
(Elidable & HasOpcode(LDA) & HasAddrModeIn(ldAddrModes)).capture(11) ~
(Elidable & HasOpcode(carryOp)).? ~
(Elidable & HasOpcode(addOp) & HasImmediate(addParam) & cState & HasClear(State.D)) ~
(Elidable & HasOpcode(STA) & HasAddrModeIn(stAddrModes) & DoesntMatterWhatItDoesWith(State.A, State.C, State.V, indexState)).capture(12) ~~> { (_, ctx) =>
ctx.get[List[AssemblyLine]](11).head.copy(opcode = ldOp) ::
(List.fill(amount)(AssemblyLine.implied(changeOp)) :+
ctx.get[List[AssemblyLine]](12).head.copy(opcode = stOp))
}
}
val IncrementThroughIndexRegisters = new RuleBasedAssemblyOptimization("Increment through index registers",
needsFlowInfo = FlowInfoRequirement.BothFlows,
incDecThroughIndexRegister(1, dec = false, carrySet = false, useX = true),
incDecThroughIndexRegister(1, dec = false, carrySet = false, useX = false),
incDecThroughIndexRegister(1, dec = false, carrySet = true, useX = true),
incDecThroughIndexRegister(1, dec = false, carrySet = true, useX = false),
incDecThroughIndexRegister(1, dec = true, carrySet = true, useX = true),
incDecThroughIndexRegister(1, dec = true, carrySet = true, useX = false),
incDecThroughIndexRegister(2, dec = false, carrySet = false, useX = true),
incDecThroughIndexRegister(2, dec = false, carrySet = false, useX = false),
incDecThroughIndexRegister(2, dec = false, carrySet = true, useX = true),
incDecThroughIndexRegister(2, dec = false, carrySet = true, useX = false),
incDecThroughIndexRegister(2, dec = true, carrySet = true, useX = true),
incDecThroughIndexRegister(2, dec = true, carrySet = true, useX = false),
)
val LoadingBranchesOptimization = new RuleBasedAssemblyOptimization("Loading branches optimization",
needsFlowInfo = FlowInfoRequirement.BackwardFlow,
(Elidable & HasOpcode(LDA) & HasAddrModeIn(LdxAddrModes) & DoesntMatterWhatItDoesWith(State.X)) ~
(Linear & Not(ConcernsX) & Not(ChangesA) & Not(HasOpcode(CMP)) & (Not(ReadsA) | Elidable & HasOpcode(STA) & HasAddrModeIn(StxAddrModes)) ).*.capture(39) ~
(Elidable & HasOpcode(CMP) & HasAddrModeIn(CpxyAddrModes)).?.capture(40) ~
(Elidable & HasOpcodeIn(OpcodeClasses.ShortConditionalBranching) & MatchParameter(22)).capture(41) ~
(Elidable & HasOpcode(LDA)).capture(31) ~
(Elidable & HasOpcodeIn(Set(JMP, BRA)) & MatchParameter(21)) ~
(Elidable & HasOpcode(LABEL) & MatchParameter(22)).capture(42) ~
(Elidable & HasOpcode(LDA)).capture(32) ~
(Elidable & HasOpcode(LABEL) & MatchParameter(21) & HasCallerCount(1) & DoesntMatterWhatItDoesWith(State.A, State.X, State.N, State.Z)) ~~> { (code, ctx) =>
val ldx = List(code.head.copy(opcode = LDX))
val stx = ctx.get[List[AssemblyLine]](39).map(l => if (l.opcode == STA) l.copy(opcode = STX) else l )
val cpx = ctx.get[List[AssemblyLine]](40).map(_.copy(opcode = CPX))
val branch = ctx.get[List[AssemblyLine]](41)
val label = ctx.get[List[AssemblyLine]](42)
val loadIfJumped = ctx.get[List[AssemblyLine]](32)
val loadIfNotJumped = ctx.get[List[AssemblyLine]](31)
List(loadIfJumped, ldx, stx, cpx, branch, loadIfNotJumped, label).flatten
},
(Elidable & HasOpcode(LDA) & HasAddrModeIn(LdyAddrModes) & DoesntMatterWhatItDoesWith(State.Y)) ~
(Linear & Not(ConcernsY) & Not(ChangesA) & Not(HasOpcode(CMP)) & (Not(ReadsA) | Elidable & HasOpcode(STA) & HasAddrModeIn(StyAddrModes)) ).*.capture(39) ~
(Elidable & HasOpcode(CMP) & HasAddrModeIn(CpxyAddrModes)).?.capture(40) ~
(Elidable & HasOpcodeIn(OpcodeClasses.ShortConditionalBranching) & MatchParameter(22)).capture(41) ~
(Elidable & HasOpcode(LDA)).capture(31) ~
(Elidable & HasOpcodeIn(Set(JMP, BRA)) & MatchParameter(21)) ~
(Elidable & HasOpcode(LABEL) & MatchParameter(22)).capture(42) ~
(Elidable & HasOpcode(LDA)).capture(32) ~
(Elidable & HasOpcode(LABEL) & MatchParameter(21) & HasCallerCount(1) & DoesntMatterWhatItDoesWith(State.A, State.Y, State.N, State.Z)) ~~> { (code, ctx) =>
val ldy = List(code.head.copy(opcode = LDY))
val sty = ctx.get[List[AssemblyLine]](39).map(l => if (l.opcode == STA) l.copy(opcode = STY) else l )
val cpy = ctx.get[List[AssemblyLine]](40).map(_.copy(opcode = CPY))
val branch = ctx.get[List[AssemblyLine]](41)
val label = ctx.get[List[AssemblyLine]](42)
val loadIfJumped = ctx.get[List[AssemblyLine]](32)
val loadIfNotJumped = ctx.get[List[AssemblyLine]](31)
List(loadIfJumped, ldy, sty, cpy, branch, loadIfNotJumped, label).flatten
},
(HasOpcode(LDA) & DoesntMatterWhatItDoesWith(State.X)) ~
(Linear & Not(ConcernsX)).*.capture(39) ~
(Elidable & HasOpcodeIn(OpcodeClasses.ShortConditionalBranching) & MatchParameter(22)).capture(41) ~
(Elidable & HasOpcode(LDA) & HasAddrModeIn(LdxAddrModes)).capture(31) ~
(Elidable & HasOpcodeIn(Set(JMP, BRA)) & MatchParameter(21)) ~
(Elidable & HasOpcode(LABEL) & MatchParameter(22)).capture(42) ~
(Elidable & HasOpcode(LDA) & HasAddrModeIn(LdxAddrModes)).capture(32) ~
(Elidable & HasOpcode(LABEL) & MatchParameter(21) & HasCallerCount(1)) ~
(Elidable & HasOpcode(STA) & HasAddrModeIn(StxAddrModes) & DoesntMatterWhatItDoesWith(State.A, State.X, State.N, State.Z)).capture(33) ~~> { (code, ctx) =>
val lda = List(code.head)
val cmp = ctx.get[List[AssemblyLine]](39)
val branch = ctx.get[List[AssemblyLine]](41)
val label = ctx.get[List[AssemblyLine]](42)
val loadIfJumped = ctx.get[List[AssemblyLine]](32).map(_.copy(opcode = LDX))
val loadIfNotJumped = ctx.get[List[AssemblyLine]](31).map(_.copy(opcode = LDX))
val stx = ctx.get[List[AssemblyLine]](33).map(_.copy(opcode = STX))
List(loadIfJumped, lda, cmp, branch, loadIfNotJumped, label, stx).flatten
},
(HasOpcode(LDA) & DoesntMatterWhatItDoesWith(State.Y)) ~
(Linear & Not(ConcernsY)).*.capture(39) ~
(Elidable & HasOpcodeIn(OpcodeClasses.ShortConditionalBranching) & MatchParameter(22)).capture(41) ~
(Elidable & HasOpcode(LDA) & HasAddrModeIn(LdyAddrModes)).capture(31) ~
(Elidable & HasOpcodeIn(Set(JMP, BRA)) & MatchParameter(21)) ~
(Elidable & HasOpcode(LABEL) & MatchParameter(22)).capture(42) ~
(Elidable & HasOpcode(LDA) & HasAddrModeIn(LdyAddrModes)).capture(32) ~
(Elidable & HasOpcode(LABEL) & MatchParameter(21) & HasCallerCount(1)) ~
(Elidable & HasOpcode(STA) & HasAddrModeIn(StyAddrModes) & DoesntMatterWhatItDoesWith(State.A, State.Y, State.N, State.Z)).capture(33) ~~> { (code, ctx) =>
val lda = List(code.head)
val cmp = ctx.get[List[AssemblyLine]](39)
val branch = ctx.get[List[AssemblyLine]](41)
val label = ctx.get[List[AssemblyLine]](42)
val loadIfJumped = ctx.get[List[AssemblyLine]](32).map(_.copy(opcode = LDY))
val loadIfNotJumped = ctx.get[List[AssemblyLine]](31).map(_.copy(opcode = LDY))
val sty = ctx.get[List[AssemblyLine]](33).map(_.copy(opcode = STY))
List(loadIfJumped, lda, cmp, branch, loadIfNotJumped, label, sty).flatten
},
)
val All = List(
DoubleLoadToDifferentRegisters,
DoubleLoadToTheSameRegister,
IndexSwitchingOptimization,
LoadingBranchesOptimization,
PointlessLoadAfterStore,
PointessLoadingForShifting,
LoadingAfterShifting,
UseXInsteadOfStack,
UseYInsteadOfStack,
UseZeropageAddressingMode)
val Nmos = List(
IncrementThroughIndexRegisters
)
}

View File

@ -13,16 +13,16 @@ import scala.collection.mutable
object FlowInfoRequirement extends Enumeration {
val NoRequirement, BothFlows, ForwardFlow, BackwardFlow = Value
val NoRequirement, JustLabels, BothFlows, ForwardFlow, BackwardFlow = Value
def assertForward(x: FlowInfoRequirement.Value): Unit = x match {
case BothFlows | ForwardFlow => ()
case NoRequirement | BackwardFlow => ErrorReporting.fatal("Forward flow info required")
case NoRequirement | JustLabels | BackwardFlow => ErrorReporting.fatal("Forward flow info required")
}
def assertBackward(x: FlowInfoRequirement.Value): Unit = x match {
case BothFlows | BackwardFlow => ()
case NoRequirement | ForwardFlow => ErrorReporting.fatal("Backward flow info required")
case NoRequirement | JustLabels | ForwardFlow => ErrorReporting.fatal("Backward flow info required")
}
}
@ -32,18 +32,7 @@ class RuleBasedAssemblyOptimization(val name: String, val needsFlowInfo: FlowInf
override def optimize(f: NormalFunction, code: List[AssemblyLine], options: CompilationOptions): List[AssemblyLine] = {
val effectiveCode = code.map(a => a.copy(parameter = a.parameter.quickSimplify))
val taggedCode = needsFlowInfo match {
case FlowInfoRequirement.NoRequirement => effectiveCode.map(FlowInfo.Default -> _)
case FlowInfoRequirement.BothFlows => FlowAnalyzer.analyze(f, effectiveCode, options)
case FlowInfoRequirement.ForwardFlow =>
if (options.flag(CompilationFlag.DetailedFlowAnalysis)) {
QuantumFlowAnalyzer.analyze(f, code).map(s => FlowInfo(s.collapse, CpuImportance())).zip(code)
} else {
CoarseFlowAnalyzer.analyze(f, code).map(s => FlowInfo(s, CpuImportance())).zip(code)
}
case FlowInfoRequirement.BackwardFlow =>
ReverseFlowAnalyzer.analyze(f, code).map(i => FlowInfo(CpuStatus(), i)).zip(code)
}
val taggedCode = FlowAnalyzer.analyze(f, effectiveCode, options, needsFlowInfo)
optimizeImpl(f, taggedCode, options)
}
@ -755,3 +744,11 @@ case class Before(pattern: AssemblyPattern) extends AssemblyLinePattern {
override def matchLineTo(ctx: AssemblyMatchingContext, flowInfo: FlowInfo, line: AssemblyLine): Boolean = ???
}
case class HasCallerCount(count: Int) extends AssemblyLinePattern {
override def matchLineTo(ctx: AssemblyMatchingContext, flowInfo: FlowInfo, line: AssemblyLine): Boolean =
line match {
case AssemblyLine(Opcode.LABEL, _, MemoryAddressConstant(Label(l)), _) => flowInfo.labelUseCount(l) == count
case _ => false
}
}

View File

@ -21,6 +21,8 @@ object SuperOptimizer extends AssemblyOptimization {
}
if (options.flag(CompilationFlag.EmitCmosOpcodes)) {
allOptimizers ++= CmosOptimizations.All
} else {
allOptimizers ++= LaterOptimizations.Nmos
}
allOptimizers ++= List(
VariableToRegisterOptimization,

View File

@ -1,6 +1,6 @@
package millfork.test
import millfork.test.emu.EmuBenchmarkRun
import millfork.test.emu.{EmuBenchmarkRun, EmuSuperOptimizedRun, EmuUltraBenchmarkRun}
import org.scalatest.{FunSuite, Matchers}
/**
@ -55,7 +55,7 @@ class ComparisonSuite extends FunSuite with Matchers {
}
test("Carry flag optimization test") {
EmuBenchmarkRun(
EmuUltraBenchmarkRun(
"""
| byte output @$c000
| void main () {

View File

@ -1,5 +1,6 @@
package millfork.test.emu
import millfork.assembly.opt.LaterOptimizations
import millfork.{Cpu, OptimizationPresets}
/**
@ -8,7 +9,10 @@ import millfork.{Cpu, OptimizationPresets}
object EmuOptimizedInlinedRun extends EmuRun(
Cpu.StrictMos,
OptimizationPresets.NodeOpt,
OptimizationPresets.AssOpt ++ OptimizationPresets.Good ++ OptimizationPresets.Good ++ OptimizationPresets.Good,
OptimizationPresets.AssOpt ++
OptimizationPresets.Good ++ LaterOptimizations.Nmos ++
OptimizationPresets.Good ++ LaterOptimizations.Nmos ++
OptimizationPresets.Good,
false)

View File

@ -1,5 +1,6 @@
package millfork.test.emu
import millfork.assembly.opt.LaterOptimizations
import millfork.{Cpu, OptimizationPresets}
/**
@ -8,7 +9,10 @@ import millfork.{Cpu, OptimizationPresets}
object EmuOptimizedRun extends EmuRun(
Cpu.StrictMos,
OptimizationPresets.NodeOpt,
OptimizationPresets.AssOpt ++ OptimizationPresets.Good ++ OptimizationPresets.Good ++ OptimizationPresets.Good,
OptimizationPresets.AssOpt ++
OptimizationPresets.Good ++ LaterOptimizations.Nmos ++
OptimizationPresets.Good ++ LaterOptimizations.Nmos ++
OptimizationPresets.Good,
false)

View File

@ -1,5 +1,6 @@
package millfork.test.emu
import millfork.assembly.opt.LaterOptimizations
import millfork.{Cpu, OptimizationPresets}
/**
@ -8,7 +9,10 @@ import millfork.{Cpu, OptimizationPresets}
object EmuQuantumOptimizedRun extends EmuRun(
Cpu.StrictMos,
OptimizationPresets.NodeOpt,
OptimizationPresets.AssOpt ++ OptimizationPresets.Good ++ OptimizationPresets.Good ++ OptimizationPresets.Good,
OptimizationPresets.AssOpt ++
OptimizationPresets.Good ++ LaterOptimizations.Nmos ++
OptimizationPresets.Good ++ LaterOptimizations.Nmos ++
OptimizationPresets.Good,
true)

View File

@ -1,6 +1,6 @@
package millfork.test.emu
import millfork.assembly.opt.UndocumentedOptimizations
import millfork.assembly.opt.{LaterOptimizations, UndocumentedOptimizations}
import millfork.{Cpu, OptimizationPresets}
/**
@ -9,7 +9,11 @@ import millfork.{Cpu, OptimizationPresets}
object EmuUndocumentedRun extends EmuRun(
Cpu.Ricoh, // not Cpu.Mos, because I haven't found an emulator that supports both illegals and decimal mode yet
OptimizationPresets.NodeOpt,
OptimizationPresets.AssOpt ++ UndocumentedOptimizations.All ++ OptimizationPresets.Good ++ UndocumentedOptimizations.All ++ OptimizationPresets.Good,
OptimizationPresets.AssOpt ++ LaterOptimizations.Nmos ++
UndocumentedOptimizations.All ++
OptimizationPresets.Good ++ LaterOptimizations.Nmos ++
UndocumentedOptimizations.All ++
OptimizationPresets.Good,
false) {
override def emitIllegals = true