mirror of
https://github.com/KarolS/millfork.git
synced 2025-04-04 22:29:32 +00:00
Z80: Interrupt handlers. Allow using IY as base pointer.
This commit is contained in:
parent
99df25bde2
commit
69f233e378
@ -92,7 +92,23 @@ This may cause problems if the parameter table is stored next to a hardware regi
|
||||
|
||||
* `-flenient-encoding`, `-fno-lenient-encoding` –
|
||||
Whether the compiler should allow for invalid characters in string/character literals that use the default encodings and replace them with alternatives.
|
||||
.ini` equivalent: `lenient_encoding`. Default: no.
|
||||
`.ini` equivalent: `lenient_encoding`. Default: no.
|
||||
|
||||
* `-fshadow-irq`, `-fno-shadow-irq` –
|
||||
Whether the interrupt routines should make use of Z80 shadow registers.
|
||||
`.ini` equivalent: `use_shadow_registers_for_irq`. Default: yes on Z80, no otherwise.
|
||||
|
||||
* `-fuse-ix-for-stack`, `-fuse-iy-for-stack`, `-fno-use-index-for-stack` –
|
||||
Which of Z80 index registers should be used for accessing stack variables, if any.
|
||||
`.ini` equivalent: `ix_stack` and `iy_stack`. Default: IX on Z80, no otherwise.
|
||||
|
||||
* `-fuse-ix-for-scratch`, `-fno-use-ix-for-scratch` –
|
||||
Allow using the IX register for other purposes.
|
||||
`.ini` equivalent: `ix_scratch`. Default: no.
|
||||
|
||||
* `-fuse-iy-for-scratch`, `-fno-use-iy-for-scratch` –
|
||||
Allow using the IY register for other purposes.
|
||||
`.ini` equivalent: `iy_scratch`. Default: no.
|
||||
|
||||
## Optimization options
|
||||
|
||||
|
@ -67,6 +67,17 @@ Default: the same as `encoding`.
|
||||
* `ipo` - enable interprocedural optimization, default is `false`.
|
||||
|
||||
* `lenient_encoding` - allow for automatic substitution of invalid characters in string literals using the default encodings, default is `false`.
|
||||
|
||||
* `use_shadow_registers_for_irq` – use Z80 shadow registers in interrupt routines, default is `true` for Z80 and `false` otherwise
|
||||
|
||||
* `ix_stack` – use the IX register to access stack variables, default is `true` for Z80 and `false` otherwise
|
||||
|
||||
* `iy_stack` – use the IY register to access stack variables, default is `false`
|
||||
|
||||
* `ix_scratch` – allow using the IY register for other purposes, default is `false`
|
||||
|
||||
* `iy_scratch` – allow using the IY register for other purposes, default is `false`
|
||||
|
||||
|
||||
#### `[define]` section
|
||||
|
||||
|
@ -35,7 +35,9 @@ case class CompilationOptions(platform: Platform,
|
||||
EmitCmosOpcodes, EmitCmosNopOpcodes, EmitHudsonOpcodes, Emit65CE02Opcodes, EmitEmulation65816Opcodes, EmitNative65816Opcodes,
|
||||
PreventJmpIndirectBug, LargeCode, ReturnWordsViaAccumulator, LUnixRelocatableCode, RorWarning)
|
||||
|
||||
if (CpuFamily.forType(platform.cpu) != CpuFamily.I80) invalids ++= Set(EmitExtended80Opcodes, EmitZ80Opcodes, EmitSharpOpcodes, UseIxForStack)
|
||||
if (CpuFamily.forType(platform.cpu) != CpuFamily.I80) invalids ++= Set(
|
||||
EmitExtended80Opcodes, EmitZ80Opcodes, EmitSharpOpcodes, EmitIntel8080Opcodes, EmitEZ80Opcodes,
|
||||
UseIxForStack, UseIyForStack, UseShadowRegistersForInterrupts)
|
||||
|
||||
invalids = invalids.filter(flags)
|
||||
|
||||
@ -103,9 +105,34 @@ case class CompilationOptions(platform: Platform,
|
||||
log.error("Illegal opcodes enabled for architecture that doesn't support them")
|
||||
}
|
||||
}
|
||||
if (flags(UseIxForStack)) {
|
||||
if (flags(UseIxForStack) || flags(UseIxForScratch)) {
|
||||
if (platform.cpu != Z80) {
|
||||
log.error("IX register enabled for architecture that doesn't support it")
|
||||
} else if (!flags(EmitZ80Opcodes)) {
|
||||
log.error("IX register is enabled but instructions using it are disabled")
|
||||
}
|
||||
}
|
||||
if (flags(UseIyForStack) || flags(UseIyForScratch)) {
|
||||
if (platform.cpu != Z80) {
|
||||
log.error("IY register enabled for architecture that doesn't support it")
|
||||
} else if (!flags(EmitZ80Opcodes)) {
|
||||
log.error("IY register is enabled but instructions using it are disabled")
|
||||
}
|
||||
}
|
||||
if (flags(UseIxForScratch) && flags(UseIxForStack)) {
|
||||
log.error("Cannot use the IX register for both stack variables and scratch simultaneously")
|
||||
}
|
||||
if (flags(UseIyForScratch) && flags(UseIyForStack)) {
|
||||
log.error("Cannot use the IY register for both stack variables and scratch simultaneously")
|
||||
}
|
||||
if (flags(UseIxForStack) && flags(UseIyForStack)) {
|
||||
log.error("Cannot use both IX and IY registers for stack variables simultaneously")
|
||||
}
|
||||
if (flags(UseShadowRegistersForInterrupts)) {
|
||||
if (platform.cpu != Z80) {
|
||||
log.error("Shadow registers enabled for architecture that doesn't support them")
|
||||
} else if (!flags(EmitZ80Opcodes)) {
|
||||
log.error("Shadow registers are enabled but instructions using them are disabled")
|
||||
}
|
||||
}
|
||||
if (flags(EmitZ80Opcodes)) {
|
||||
@ -185,9 +212,9 @@ object Cpu extends Enumeration {
|
||||
case Intel8080 =>
|
||||
i80AlwaysDefaultFlags ++ Set(EmitIntel8080Opcodes)
|
||||
case Z80 =>
|
||||
i80AlwaysDefaultFlags ++ Set(EmitIntel8080Opcodes, EmitExtended80Opcodes, EmitZ80Opcodes, UseIxForStack)
|
||||
i80AlwaysDefaultFlags ++ Set(EmitIntel8080Opcodes, EmitExtended80Opcodes, EmitZ80Opcodes, UseIxForStack, UseShadowRegistersForInterrupts)
|
||||
case EZ80 =>
|
||||
i80AlwaysDefaultFlags ++ Set(EmitIntel8080Opcodes, EmitExtended80Opcodes, EmitZ80Opcodes, UseIxForStack, EmitEZ80Opcodes)
|
||||
i80AlwaysDefaultFlags ++ Set(EmitIntel8080Opcodes, EmitExtended80Opcodes, EmitZ80Opcodes, UseIxForStack, UseShadowRegistersForInterrupts, EmitEZ80Opcodes)
|
||||
case Sharp =>
|
||||
i80AlwaysDefaultFlags ++ Set(EmitExtended80Opcodes, EmitSharpOpcodes)
|
||||
}
|
||||
@ -248,7 +275,10 @@ object CompilationFlag extends Enumeration {
|
||||
EmitCmosOpcodes, EmitCmosNopOpcodes, EmitHudsonOpcodes, Emit65CE02Opcodes, EmitEmulation65816Opcodes, EmitNative65816Opcodes,
|
||||
PreventJmpIndirectBug, LargeCode, ReturnWordsViaAccumulator,
|
||||
// compilation options for I80
|
||||
EmitIntel8080Opcodes, EmitExtended80Opcodes, EmitZ80Opcodes, EmitEZ80Opcodes, EmitSharpOpcodes, UseIxForStack,
|
||||
EmitIntel8080Opcodes, EmitExtended80Opcodes, EmitZ80Opcodes, EmitEZ80Opcodes, EmitSharpOpcodes,
|
||||
UseShadowRegistersForInterrupts,
|
||||
UseIxForStack, UseIyForStack,
|
||||
UseIxForScratch, UseIyForScratch,
|
||||
// optimization options:
|
||||
DangerousOptimizations, InlineFunctions, InterproceduralOptimization, OptimizeForSize, OptimizeForSpeed, OptimizeForSonicSpeed,
|
||||
// memory allocation options
|
||||
@ -279,6 +309,10 @@ object CompilationFlag extends Enumeration {
|
||||
"emit_8080" -> EmitIntel8080Opcodes,
|
||||
"emit_sharp" -> EmitSharpOpcodes,
|
||||
"ix_stack" -> UseIxForStack,
|
||||
"iy_stack" -> UseIyForStack,
|
||||
"ix_scratch" -> UseIxForScratch,
|
||||
"iy_scratch" -> UseIyForScratch,
|
||||
"use_shadow_registers_for_irq" -> UseShadowRegistersForInterrupts,
|
||||
"ipo" -> InterproceduralOptimization,
|
||||
"inline" -> InlineFunctions,
|
||||
"dangerous_optimizations" -> DangerousOptimizations,
|
||||
|
@ -353,6 +353,32 @@ object Main {
|
||||
boolean("-flenient-encoding", "-fno-lenient-encoding").action { (c, v) =>
|
||||
c.changeFlag(CompilationFlag.LenientTextEncoding, v)
|
||||
}.description("Whether the compiler should replace invalid characters in string literals that use the default encodings.")
|
||||
boolean("-fshadow-irq", "-fno-shadow-irq").action { (c, v) =>
|
||||
c.changeFlag(CompilationFlag.UseShadowRegistersForInterrupts, v)
|
||||
}.description("Whether shadow registers should be used in interrupt routines (Z80 only)")
|
||||
flag("-fuse-ix-for-stack").action { c =>
|
||||
c.changeFlag(CompilationFlag.UseIxForStack, true).changeFlag(CompilationFlag.UseIyForStack, false)
|
||||
}.description("Use IX as base pointer for stack variables (Z80 only)")
|
||||
flag("-fuse-iy-for-stack").action { c =>
|
||||
c.changeFlag(CompilationFlag.UseIyForStack, true).changeFlag(CompilationFlag.UseIxForStack, false)
|
||||
}.description("Use IY as base pointer for stack variables (Z80 only)")
|
||||
boolean("-fuse-ix-for-scratch", "-fno-use-ix-for-scratch").action { (c, v) =>
|
||||
if (v) {
|
||||
c.changeFlag(CompilationFlag.UseIxForScratch, true).changeFlag(CompilationFlag.UseIxForStack, false)
|
||||
} else {
|
||||
c.changeFlag(CompilationFlag.UseIxForScratch, false)
|
||||
}
|
||||
}.description("Use IX as base pointer for stack variables (Z80 only)")
|
||||
boolean("-fuse-iy-for-scratch", "-fno-use-iy-for-scratch").action { (c, v) =>
|
||||
if (v) {
|
||||
c.changeFlag(CompilationFlag.UseIyForScratch, true).changeFlag(CompilationFlag.UseIyForStack, false)
|
||||
} else {
|
||||
c.changeFlag(CompilationFlag.UseIyForScratch, false)
|
||||
}
|
||||
}.description("Use IY as base pointer for stack variables (Z80 only)")
|
||||
flag("-fno-use-index-for-stack").action { c =>
|
||||
c.changeFlag(CompilationFlag.UseIyForStack, false).changeFlag(CompilationFlag.UseIxForStack, false)
|
||||
}.description("Don't use either IX or IY as base pointer for stack variables (Z80 only)")
|
||||
|
||||
fluff("", "Optimization options:", "")
|
||||
|
||||
|
@ -165,6 +165,10 @@ object ZLine {
|
||||
def ldViaIy(target: ZRegister.Value, sourceOffset: Int): ZLine = ZLine(LD, TwoRegistersOffset(target, ZRegister.MEM_IY_D, sourceOffset), Constant.Zero)
|
||||
|
||||
def ldViaIy(targetOffset: Int, source: ZRegister.Value): ZLine = ZLine(LD, TwoRegistersOffset(ZRegister.MEM_IY_D, source, targetOffset), Constant.Zero)
|
||||
|
||||
def ldViaIxy(x: Boolean, target: ZRegister.Value, sourceOffset: Int): ZLine = if (x) ldViaIx(target, sourceOffset) else ldViaIy(target, sourceOffset)
|
||||
|
||||
def ldViaIxy(x: Boolean, targetOffset: Int, source: ZRegister.Value): ZLine = if (x) ldViaIx(targetOffset, source) else ldViaIy(targetOffset, source)
|
||||
}
|
||||
|
||||
case class ZLine(opcode: ZOpcode.Value, registers: ZRegisters, parameter: Constant, elidable: Boolean = true) extends AbstractCode {
|
||||
|
@ -3,7 +3,6 @@ package millfork.assembly.z80.opt
|
||||
import millfork.assembly.z80._
|
||||
import millfork.assembly.{AssemblyOptimization, OptimizationContext}
|
||||
import millfork.env._
|
||||
import millfork.error.ConsoleLogger
|
||||
import millfork.node.ZRegister
|
||||
import millfork.{CompilationFlag, NonOverlappingIntervals}
|
||||
|
||||
@ -29,14 +28,19 @@ object ByteVariableToRegisterOptimization extends AssemblyOptimization[ZLine] {
|
||||
override def optimize(f: NormalFunction, code: List[ZLine], optimizationContext: OptimizationContext): List[ZLine] = {
|
||||
val vs = VariableStatus(f, code, optimizationContext, _.size == 1).getOrElse(return code)
|
||||
val options = optimizationContext.options
|
||||
val useIx = options.flag(CompilationFlag.UseIxForStack)
|
||||
val useIy = options.flag(CompilationFlag.UseIyForStack)
|
||||
val log = options.log
|
||||
val removeVariablesForReal = !options.flag(CompilationFlag.InternalCurrentlyOptimizingForMeasurement)
|
||||
val costFunction: CyclesAndBytes => Int = if (options.flag(CompilationFlag.OptimizeForSpeed)) _.cycles else _.bytes
|
||||
lazy val savingsForRemovingOneStackVariable = {
|
||||
val localVariableAreaSize = code.flatMap {
|
||||
case ZLine(_, OneRegisterOffset(ZRegister.MEM_IX_D, offset), _, _) => Some(offset)
|
||||
case ZLine(_, TwoRegistersOffset(_, ZRegister.MEM_IX_D, offset), _, _) => Some(offset)
|
||||
case ZLine(_, TwoRegistersOffset(ZRegister.MEM_IX_D, _, offset), _, _) => Some(offset)
|
||||
case ZLine(_, OneRegisterOffset(ZRegister.MEM_IX_D, offset), _, _) if useIx => Some(offset)
|
||||
case ZLine(_, TwoRegistersOffset(_, ZRegister.MEM_IX_D, offset), _, _) if useIx => Some(offset)
|
||||
case ZLine(_, TwoRegistersOffset(ZRegister.MEM_IX_D, _, offset), _, _) if useIx => Some(offset)
|
||||
case ZLine(_, OneRegisterOffset(ZRegister.MEM_IY_D, offset), _, _) if useIy => Some(offset)
|
||||
case ZLine(_, TwoRegistersOffset(_, ZRegister.MEM_IY_D, offset), _, _) if useIy => Some(offset)
|
||||
case ZLine(_, TwoRegistersOffset(ZRegister.MEM_IY_D, _, offset), _, _) if useIy => Some(offset)
|
||||
case _ => None
|
||||
}.toSet.size
|
||||
val prologueAndEpilogue = if (f.returnType.size == 2) CyclesAndBytes(107, 20) else CyclesAndBytes(95, 17)
|
||||
@ -49,12 +53,12 @@ object ByteVariableToRegisterOptimization extends AssemblyOptimization[ZLine] {
|
||||
|
||||
}
|
||||
|
||||
val bCandidates = getCandidates(vs, _.b, ZRegister.B, savingsForRemovingOneStackVariable)
|
||||
val cCandidates = getCandidates(vs, _.c, ZRegister.C, savingsForRemovingOneStackVariable)
|
||||
val dCandidates = getCandidates(vs, _.d, ZRegister.D, savingsForRemovingOneStackVariable)
|
||||
val eCandidates = getCandidates(vs, _.e, ZRegister.E, savingsForRemovingOneStackVariable)
|
||||
val hCandidates = getCandidates(vs, _.h, ZRegister.H, savingsForRemovingOneStackVariable)
|
||||
val lCandidates = getCandidates(vs, _.l, ZRegister.L, savingsForRemovingOneStackVariable)
|
||||
val bCandidates = getCandidates(vs, _.b, ZRegister.B, savingsForRemovingOneStackVariable, useIx, useIy)
|
||||
val cCandidates = getCandidates(vs, _.c, ZRegister.C, savingsForRemovingOneStackVariable, useIx, useIy)
|
||||
val dCandidates = getCandidates(vs, _.d, ZRegister.D, savingsForRemovingOneStackVariable, useIx, useIy)
|
||||
val eCandidates = getCandidates(vs, _.e, ZRegister.E, savingsForRemovingOneStackVariable, useIx, useIy)
|
||||
val hCandidates = getCandidates(vs, _.h, ZRegister.H, savingsForRemovingOneStackVariable, useIx, useIy)
|
||||
val lCandidates = getCandidates(vs, _.l, ZRegister.L, savingsForRemovingOneStackVariable, useIx, useIy)
|
||||
|
||||
val bCandidateSets = NonOverlappingIntervals.apply[(String, Range, CyclesAndBytes)](bCandidates, _._2.start, _._2.end)
|
||||
val cCandidateSets = NonOverlappingIntervals.apply[(String, Range, CyclesAndBytes)](cCandidates, _._2.start, _._2.end)
|
||||
@ -135,7 +139,12 @@ object ByteVariableToRegisterOptimization extends AssemblyOptimization[ZLine] {
|
||||
reportOptimizedBlock(oldCode, newCode)
|
||||
output ++= newCode
|
||||
i = range.end
|
||||
if (removeVariablesForReal && !v.startsWith("IX+") && vs.variablesWithLifetimesMap.contains(v) && contains(range, vs.variablesWithLifetimesMap(v))) {
|
||||
if (removeVariablesForReal &&
|
||||
!v.startsWith("IX+") &&
|
||||
!v.startsWith("IY+") &&
|
||||
!v.startsWith("SP+") &&
|
||||
vs.variablesWithLifetimesMap.contains(v) &&
|
||||
contains(range, vs.variablesWithLifetimesMap(v))) {
|
||||
f.environment.removeVariable(v)
|
||||
}
|
||||
true
|
||||
@ -171,7 +180,7 @@ object ByteVariableToRegisterOptimization extends AssemblyOptimization[ZLine] {
|
||||
}
|
||||
}
|
||||
|
||||
private def getCandidates(vs: VariableStatus, importanceExtractor: CpuImportance => Importance, register: ZRegister.Value, savingsForRemovingOneStackVariable: =>CyclesAndBytes) = {
|
||||
private def getCandidates(vs: VariableStatus, importanceExtractor: CpuImportance => Importance, register: ZRegister.Value, savingsForRemovingOneStackVariable: =>CyclesAndBytes, useIx: Boolean, useIy: Boolean): Seq[(String, Range, CyclesAndBytes)] = {
|
||||
vs.variablesWithLifetimes.filter {
|
||||
case (v, range) =>
|
||||
val tuple = vs.codeWithFlow(range.start)
|
||||
@ -183,12 +192,15 @@ object ByteVariableToRegisterOptimization extends AssemblyOptimization[ZLine] {
|
||||
case (v, range) =>
|
||||
val id = v match {
|
||||
case MemoryVariable(name, _, _) => name
|
||||
case StackVariable(_, _, offset) => "IX+" + offset
|
||||
case StackVariable(_, _, offset) if useIx => "IX+" + offset
|
||||
case StackVariable(_, _, offset) if useIy => "IY+" + offset
|
||||
case StackVariable(_, _, offset) => "SP+" + offset
|
||||
}
|
||||
var bonus = CyclesAndBytes.Zero
|
||||
if (vs.variablesWithRegisterHint(v.name)) bonus += CyclesAndBytes(16, 16)
|
||||
if (id.startsWith("IX+")) bonus += savingsForRemovingOneStackVariable
|
||||
canBeInlined(id, synced = false, register, Some(false), Some(false), Some(false), vs.codeWithFlow.slice(range.start, range.end)).map { score =>
|
||||
if (id.startsWith("IX+") || id.startsWith("IY+")) bonus += savingsForRemovingOneStackVariable
|
||||
if (id.startsWith("SP+")) None
|
||||
else canBeInlined(id, synced = false, register, Some(false), Some(false), Some(false), vs.codeWithFlow.slice(range.start, range.end)).map { score =>
|
||||
(id, range, score + bonus)
|
||||
}
|
||||
}
|
||||
@ -220,20 +232,30 @@ object ByteVariableToRegisterOptimization extends AssemblyOptimization[ZLine] {
|
||||
case _ => None
|
||||
}
|
||||
}
|
||||
object ThisOffset {
|
||||
object ThisOffsetX {
|
||||
def unapply(c: Int): Option[Int] = if ("IX+" + c == vname) Some(c) else None
|
||||
}
|
||||
object ThisOffsetY {
|
||||
def unapply(c: Int): Option[Int] = if ("IY+" + c == vname) Some(c) else None
|
||||
}
|
||||
code match {
|
||||
case (_, ZLine(LD, TwoRegisters(A, MEM_ABS_8), ThisVar(_), _)) :: xs =>
|
||||
canBeInlined(vname, synced, target, addressInHl, addressInBc, addressInDe, xs).map(add(CyclesAndBytes(9, 2)))
|
||||
case (_, ZLine(LD, TwoRegisters(MEM_ABS_8, A), ThisVar(_), _)) :: xs =>
|
||||
canBeInlined(vname, synced, target, addressInHl, addressInBc, addressInDe, xs).map(add(CyclesAndBytes(9, 2)))
|
||||
|
||||
case (_, ZLine(LD, TwoRegistersOffset(reg, MEM_IX_D, ThisOffset(_)), _, _)) :: xs =>
|
||||
case (_, ZLine(LD, TwoRegistersOffset(reg, MEM_IX_D, ThisOffsetX(_)), _, _)) :: xs =>
|
||||
canBeInlined(vname, synced, target, addressInHl, addressInBc, addressInDe, xs).map(add(reg == target, CyclesAndBytes(19, 3), CyclesAndBytes(15, 2)))
|
||||
case (_, ZLine(LD, TwoRegistersOffset(MEM_IX_D, reg, ThisOffset(_)), _, _)) :: xs =>
|
||||
case (_, ZLine(LD, TwoRegistersOffset(MEM_IX_D, reg, ThisOffsetX(_)), _, _)) :: xs =>
|
||||
canBeInlined(vname, synced, target, addressInHl, addressInBc, addressInDe, xs).map(add(reg == target, CyclesAndBytes(19, 3), CyclesAndBytes(15, 2)))
|
||||
case (_, ZLine(_, OneRegisterOffset(MEM_IX_D, ThisOffset(_)), _, _)) :: xs =>
|
||||
case (_, ZLine(_, OneRegisterOffset(MEM_IX_D, ThisOffsetX(_)), _, _)) :: xs =>
|
||||
canBeInlined(vname, synced, target, addressInHl, addressInBc, addressInDe, xs).map(add(CyclesAndBytes(15, 2)))
|
||||
|
||||
case (_, ZLine(LD, TwoRegistersOffset(reg, MEM_IY_D, ThisOffsetY(_)), _, _)) :: xs =>
|
||||
canBeInlined(vname, synced, target, addressInHl, addressInBc, addressInDe, xs).map(add(reg == target, CyclesAndBytes(19, 3), CyclesAndBytes(15, 2)))
|
||||
case (_, ZLine(LD, TwoRegistersOffset(MEM_IY_D, reg, ThisOffsetY(_)), _, _)) :: xs =>
|
||||
canBeInlined(vname, synced, target, addressInHl, addressInBc, addressInDe, xs).map(add(reg == target, CyclesAndBytes(19, 3), CyclesAndBytes(15, 2)))
|
||||
case (_, ZLine(_, OneRegisterOffset(MEM_IY_D, ThisOffsetY(_)), _, _)) :: xs =>
|
||||
canBeInlined(vname, synced, target, addressInHl, addressInBc, addressInDe, xs).map(add(CyclesAndBytes(15, 2)))
|
||||
|
||||
case (_, ZLine(LD_16, TwoRegisters(HL, IMM_16), ThisVar(_), _)) :: xs =>
|
||||
@ -309,6 +331,7 @@ object ByteVariableToRegisterOptimization extends AssemblyOptimization[ZLine] {
|
||||
ZLine.ld8(A, target) :: inlineVars(vname, target, addressInHl, addressInBc, addressInDe, xs)
|
||||
case ZLine(LD, TwoRegisters(MEM_ABS_8, A), MemoryAddressConstant(th), _) :: xs if th.name == vname =>
|
||||
ZLine.ld8(target, A) :: inlineVars(vname, target, addressInHl, addressInBc, addressInDe, xs)
|
||||
|
||||
case ZLine(LD, TwoRegistersOffset(reg, MEM_IX_D, off), _, _) :: xs if "IX+" + off == vname =>
|
||||
if (reg == target) inlineVars(vname, target, addressInHl, addressInBc, addressInDe, xs)
|
||||
else ZLine.ld8(reg, target) :: inlineVars(vname, target, addressInHl, addressInBc, addressInDe, xs)
|
||||
@ -318,6 +341,15 @@ object ByteVariableToRegisterOptimization extends AssemblyOptimization[ZLine] {
|
||||
case (l@ZLine(_, OneRegisterOffset(MEM_IX_D, off), _, _)) :: xs if "IX+" + off == vname =>
|
||||
l.copy(registers = OneRegister(target)) :: inlineVars(vname, target, addressInHl, addressInBc, addressInDe, xs)
|
||||
|
||||
case ZLine(LD, TwoRegistersOffset(reg, MEM_IY_D, off), _, _) :: xs if "IY+" + off == vname =>
|
||||
if (reg == target) inlineVars(vname, target, addressInHl, addressInBc, addressInDe, xs)
|
||||
else ZLine.ld8(reg, target) :: inlineVars(vname, target, addressInHl, addressInBc, addressInDe, xs)
|
||||
case ZLine(LD, TwoRegistersOffset(MEM_IY_D, reg, off), _, _) :: xs if "IY+" + off == vname =>
|
||||
if (reg == target) inlineVars(vname, target, addressInHl, addressInBc, addressInDe, xs)
|
||||
else ZLine.ld8(target, reg) :: inlineVars(vname, target, addressInHl, addressInBc, addressInDe, xs)
|
||||
case (l@ZLine(_, OneRegisterOffset(MEM_IY_D, off), _, _)) :: xs if "IY+" + off == vname =>
|
||||
l.copy(registers = OneRegister(target)) :: inlineVars(vname, target, addressInHl, addressInBc, addressInDe, xs)
|
||||
|
||||
case ZLine(LD_16, TwoRegisters(HL, IMM_16), MemoryAddressConstant(th), _) :: xs if th.name == vname =>
|
||||
inlineVars(vname, target, addressInHl = true, addressInBc, addressInDe, xs)
|
||||
case ZLine(LD_16, TwoRegisters(BC, IMM_16), MemoryAddressConstant(th), _) :: xs if th.name == vname =>
|
||||
|
@ -1,9 +1,9 @@
|
||||
package millfork.assembly.z80.opt
|
||||
|
||||
import millfork.CompilationFlag
|
||||
import millfork.assembly.{AssemblyOptimization, OptimizationContext}
|
||||
import millfork.assembly.z80._
|
||||
import millfork.env.{MemoryAddressConstant, NormalFunction, NumericConstant}
|
||||
import millfork.error.ConsoleLogger
|
||||
import millfork.node.ZRegister
|
||||
|
||||
/**
|
||||
@ -13,7 +13,11 @@ object CompactStackFrame extends AssemblyOptimization[ZLine] {
|
||||
override def name: String = "Compacting the stack frame"
|
||||
|
||||
override def optimize(f: NormalFunction, code: List[ZLine], context: OptimizationContext): List[ZLine] = {
|
||||
optimizeStart(code) match {
|
||||
val register =
|
||||
if (context.options.flag(CompilationFlag.UseIxForStack)) ZRegister.IX
|
||||
else if (context.options.flag(CompilationFlag.UseIyForStack)) ZRegister.IY
|
||||
else return code
|
||||
optimizeStart(code, register) match {
|
||||
case Some((optimized, before, after)) =>
|
||||
context.log.debug(s"Optimized stack frame from $before to $after bytes")
|
||||
optimized
|
||||
@ -21,26 +25,29 @@ object CompactStackFrame extends AssemblyOptimization[ZLine] {
|
||||
}
|
||||
}
|
||||
|
||||
def optimizeStart(code: List[ZLine]): Option[(List[ZLine], Int, Int)] = {
|
||||
def optimizeStart(code: List[ZLine], Index: ZRegister.Value): Option[(List[ZLine], Int, Int)] = {
|
||||
import millfork.assembly.z80.ZOpcode._
|
||||
import millfork.node.ZRegister._
|
||||
code match {
|
||||
case (name@ZLine(LABEL, _, _, _)) ::
|
||||
ZLine(PUSH, OneRegister(IX), _, true) ::
|
||||
ZLine(LD_16, TwoRegisters(IX, IMM_16), NumericConstant(negativeSize, _), true) ::
|
||||
ZLine(ADD_16, TwoRegisters(IX, SP), _, true) ::
|
||||
ZLine(LD_16, TwoRegisters(SP, IX), _, true) :: tail =>
|
||||
ZLine(PUSH, OneRegister(Index), _, true) ::
|
||||
ZLine(LD_16, TwoRegisters(Index, IMM_16), NumericConstant(negativeSize, _), true) ::
|
||||
ZLine(ADD_16, TwoRegisters(Index, SP), _, true) ::
|
||||
ZLine(LD_16, TwoRegisters(SP, Index), _, true) :: tail =>
|
||||
val sourceSize = (-negativeSize).&(0xffff).toInt
|
||||
val usedOffsets: Set[Int] = findUsedOffsets(tail)
|
||||
val usedOffsets: Set[Int] = findUsedOffsets(tail, Index match {
|
||||
case IX => MEM_IX_D
|
||||
case IY => MEM_IY_D
|
||||
})
|
||||
val targetSize = usedOffsets.size + usedOffsets.size.&(1)
|
||||
if (targetSize == sourceSize) None else {
|
||||
val prologue = if (targetSize == 0) Nil else List(
|
||||
ZLine.register(PUSH, IX),
|
||||
ZLine.ldImm16(IX, 0x10000 - targetSize),
|
||||
ZLine.registers(ADD_16, IX, SP),
|
||||
ZLine.ld16(SP, IX))
|
||||
ZLine.register(PUSH, Index),
|
||||
ZLine.ldImm16(Index, 0x10000 - targetSize),
|
||||
ZLine.registers(ADD_16, Index, SP),
|
||||
ZLine.ld16(SP, Index))
|
||||
val map = usedOffsets.toSeq.sorted.zipWithIndex.toMap
|
||||
optimizeContinue(tail, sourceSize, targetSize, map).map { optTail =>
|
||||
optimizeContinue(tail, Index, sourceSize, targetSize, map).map { optTail =>
|
||||
(name :: prologue ++ optTail, sourceSize, targetSize)
|
||||
}
|
||||
}
|
||||
@ -50,55 +57,59 @@ object CompactStackFrame extends AssemblyOptimization[ZLine] {
|
||||
}
|
||||
|
||||
|
||||
def findUsedOffsets(code: List[ZLine]): Set[Int] = {
|
||||
def findUsedOffsets(code: List[ZLine], Mem: ZRegister.Value): Set[Int] = {
|
||||
code.flatMap {
|
||||
case ZLine(_, OneRegisterOffset(ZRegister.MEM_IX_D, offset), _, _) => Some(offset)
|
||||
case ZLine(_, TwoRegistersOffset(_, ZRegister.MEM_IX_D, offset), _, _) => Some(offset)
|
||||
case ZLine(_, TwoRegistersOffset(ZRegister.MEM_IX_D, _, offset), _, _) => Some(offset)
|
||||
case ZLine(_, OneRegisterOffset(Mem, offset), _, _) => Some(offset)
|
||||
case ZLine(_, TwoRegistersOffset(_, Mem, offset), _, _) => Some(offset)
|
||||
case ZLine(_, TwoRegistersOffset(Mem, _, offset), _, _) => Some(offset)
|
||||
case _ => None
|
||||
}.toSet
|
||||
}
|
||||
|
||||
def optimizeContinue(code: List[ZLine], sourceSize: Int, targetSize: Int, mapping: Map[Int, Int]): Option[List[ZLine]] = {
|
||||
def optimizeContinue(code: List[ZLine], Index: ZRegister.Value, sourceSize: Int, targetSize: Int, mapping: Map[Int, Int]): Option[List[ZLine]] = {
|
||||
import millfork.assembly.z80.ZOpcode._
|
||||
import millfork.node.ZRegister._
|
||||
val Mem = Index match {
|
||||
case IX => MEM_IX_D
|
||||
case IY => MEM_IY_D
|
||||
}
|
||||
code match {
|
||||
case (head@ZLine(_, TwoRegistersOffset(reg, MEM_IX_D, offset), _, _)) :: tail =>
|
||||
optimizeContinue(tail, sourceSize, targetSize, mapping).map(
|
||||
head.copy(registers = TwoRegistersOffset(reg, MEM_IX_D, mapping(offset))) :: _)
|
||||
case (head@ZLine(_, TwoRegistersOffset(reg, Mem, offset), _, _)) :: tail =>
|
||||
optimizeContinue(tail, Index, sourceSize, targetSize, mapping).map(
|
||||
head.copy(registers = TwoRegistersOffset(reg, Mem, mapping(offset))) :: _)
|
||||
|
||||
case (head@ZLine(_, TwoRegistersOffset(MEM_IX_D, reg, offset), _, _)) :: tail =>
|
||||
optimizeContinue(tail, sourceSize, targetSize, mapping).map(
|
||||
head.copy(registers = TwoRegistersOffset(MEM_IX_D, reg, mapping(offset))) :: _)
|
||||
case (head@ZLine(_, TwoRegistersOffset(Mem, reg, offset), _, _)) :: tail =>
|
||||
optimizeContinue(tail, Index, sourceSize, targetSize, mapping).map(
|
||||
head.copy(registers = TwoRegistersOffset(Mem, reg, mapping(offset))) :: _)
|
||||
|
||||
case (head@ZLine(_, OneRegisterOffset(MEM_IX_D, offset), _, _)) :: tail =>
|
||||
optimizeContinue(tail, sourceSize, targetSize, mapping).map(
|
||||
head.copy(registers = OneRegisterOffset(MEM_IX_D, mapping(offset))) :: _)
|
||||
case (head@ZLine(_, OneRegisterOffset(Mem, offset), _, _)) :: tail =>
|
||||
optimizeContinue(tail, Index, sourceSize, targetSize, mapping).map(
|
||||
head.copy(registers = OneRegisterOffset(Mem, mapping(offset))) :: _)
|
||||
|
||||
case
|
||||
ZLine(LD_16, TwoRegisters(IX, IMM_16), NumericConstant(size, _), _) ::
|
||||
ZLine(ADD_16, TwoRegisters(IX, SP), _, _) ::
|
||||
ZLine(LD_16, TwoRegisters(SP, IX), _, _) ::
|
||||
ZLine(POP, OneRegister(IX), _, _) :: tail =>
|
||||
ZLine(LD_16, TwoRegisters(Index, IMM_16), NumericConstant(size, _), _) ::
|
||||
ZLine(ADD_16, TwoRegisters(Index, SP), _, _) ::
|
||||
ZLine(LD_16, TwoRegisters(SP, Index), _, _) ::
|
||||
ZLine(POP, OneRegister(Index), _, _) :: tail =>
|
||||
if (size != sourceSize) None
|
||||
else {
|
||||
stripReturn(tail).flatMap {
|
||||
case (ret, rest) =>
|
||||
val epilogue = if (targetSize == 0) Nil else {
|
||||
List(
|
||||
ZLine.ldImm16(IX, targetSize),
|
||||
ZLine.registers(ADD_16, IX, SP),
|
||||
ZLine.ld16(SP, IX),
|
||||
ZLine.register(POP, IX))
|
||||
ZLine.ldImm16(Index, targetSize),
|
||||
ZLine.registers(ADD_16, Index, SP),
|
||||
ZLine.ld16(SP, Index),
|
||||
ZLine.register(POP, Index))
|
||||
}
|
||||
optimizeContinue(rest, sourceSize, targetSize, mapping).map(epilogue ++ ret ++ _)
|
||||
optimizeContinue(rest, Index, sourceSize, targetSize, mapping).map(epilogue ++ ret ++ _)
|
||||
}
|
||||
}
|
||||
case
|
||||
ZLine(LD_16, TwoRegisters(HL, IMM_16), NumericConstant(size, _), _) ::
|
||||
ZLine(ADD_16, TwoRegisters(HL, SP), _, _) ::
|
||||
ZLine(LD_16, TwoRegisters(SP, HL), _, _) ::
|
||||
ZLine(POP, OneRegister(IX), _, _) :: tail =>
|
||||
ZLine(POP, OneRegister(Index), _, _) :: tail =>
|
||||
if (size != sourceSize) {
|
||||
println("Mismatched stack frame sizes")
|
||||
None
|
||||
@ -110,15 +121,15 @@ object CompactStackFrame extends AssemblyOptimization[ZLine] {
|
||||
ZLine.ldImm16(HL, targetSize),
|
||||
ZLine.registers(ADD_16, HL, SP),
|
||||
ZLine.ld16(SP, HL),
|
||||
ZLine.register(POP, IX))
|
||||
ZLine.register(POP, Index))
|
||||
}
|
||||
optimizeContinue(rest, sourceSize, targetSize, mapping).map(epilogue ++ ret ++ _)
|
||||
optimizeContinue(rest, Index, sourceSize, targetSize, mapping).map(epilogue ++ ret ++ _)
|
||||
}
|
||||
}
|
||||
case ZLine(RET | RETI | RETN | BYTE, _, _, _) :: _ => None
|
||||
case ZLine(JP, _, MemoryAddressConstant(f: NormalFunction), _) :: _ => None
|
||||
case x :: _ if x.changesRegister(ZRegister.IX) => None
|
||||
case x :: xs => optimizeContinue(xs, sourceSize, targetSize, mapping).map(x :: _)
|
||||
case x :: _ if x.changesRegister(Index) => None
|
||||
case x :: xs => optimizeContinue(xs, Index, sourceSize, targetSize, mapping).map(x :: _)
|
||||
case Nil => Some(Nil)
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,6 @@
|
||||
package millfork.assembly.z80.opt
|
||||
|
||||
import millfork.CompilationFlag
|
||||
import millfork.assembly.OptimizationContext
|
||||
import millfork.assembly.opt.SingleStatus
|
||||
import millfork.assembly.z80.{OneRegister, TwoRegisters, ZLine}
|
||||
@ -84,9 +85,13 @@ object VariableStatus {
|
||||
case v: StackVariable =>
|
||||
v -> StackVariableLifetime.apply(v.baseOffset, flow)
|
||||
}
|
||||
val stackPrefix =
|
||||
if (optimizationContext.options.flag(CompilationFlag.UseIxForStack)) "IX+"
|
||||
else if (optimizationContext.options.flag(CompilationFlag.UseIyForStack)) "IY+"
|
||||
else "SP+"
|
||||
val variablesWithLifetimesMap = variablesWithLifetimes.map {
|
||||
case (v: MemoryVariable, lt) => v.name -> lt
|
||||
case (v: StackVariable, lt) => ("IX+" + v.baseOffset) -> lt
|
||||
case (v: StackVariable, lt) => (stackPrefix + v.baseOffset) -> lt
|
||||
}.toMap
|
||||
Some(new VariableStatus(
|
||||
paramVariables,
|
||||
|
@ -19,6 +19,13 @@ abstract class AbstractReturnDispatch[T <: AbstractCode] {
|
||||
return Nil
|
||||
}
|
||||
|
||||
if (ctx.function.interrupt) {
|
||||
ctx.log.error(s"Return dispatch in interrupt function ${ctx.function.name}", stmt.position)
|
||||
}
|
||||
if (ctx.function.kernalInterrupt) {
|
||||
ctx.log.error(s"Return dispatch in kernal interrupt function ${ctx.function.name}", stmt.position)
|
||||
}
|
||||
|
||||
def toConstant(e: Expression) = {
|
||||
ctx.env.eval(e).getOrElse {
|
||||
ctx.log.error("Non-constant parameter for dispatch branch", e.position)
|
||||
|
@ -61,7 +61,101 @@ object Z80Compiler extends AbstractCompiler[ZLine] {
|
||||
}
|
||||
case _ => Nil
|
||||
}
|
||||
label :: (stackPointerFixAtBeginning(ctx) ++ storeParamsFromRegisters ++ chunk)
|
||||
label :: (preserveRegisters(ctx) ++ stackPointerFixAtBeginning(ctx) ++ storeParamsFromRegisters ++ chunk)
|
||||
}
|
||||
|
||||
def preserveRegisters(ctx: CompilationContext): List[ZLine] = {
|
||||
import millfork.assembly.z80.ZOpcode._
|
||||
import ZRegister._
|
||||
if (ctx.function.interrupt) {
|
||||
if (ctx.options.flag(CompilationFlag.EmitZ80Opcodes)) {
|
||||
if (ctx.options.flag(CompilationFlag.UseShadowRegistersForInterrupts)) {
|
||||
List(
|
||||
ZLine.implied(EX_AF_AF),
|
||||
ZLine.implied(EXX),
|
||||
ZLine.register(PUSH, IX),
|
||||
ZLine.register(PUSH, IY))
|
||||
} else {
|
||||
List(
|
||||
ZLine.register(PUSH, AF),
|
||||
ZLine.register(PUSH, BC),
|
||||
ZLine.register(PUSH, DE),
|
||||
ZLine.register(PUSH, HL),
|
||||
ZLine.register(PUSH, IX),
|
||||
ZLine.register(PUSH, IY))
|
||||
}
|
||||
} else {
|
||||
List(
|
||||
ZLine.register(PUSH, AF),
|
||||
ZLine.register(PUSH, BC),
|
||||
ZLine.register(PUSH, DE),
|
||||
ZLine.register(PUSH, HL))
|
||||
}
|
||||
} else if (ctx.function.kernalInterrupt) {
|
||||
if (ctx.options.flag(CompilationFlag.EmitZ80Opcodes)) {
|
||||
List(
|
||||
ZLine.register(PUSH, IX),
|
||||
ZLine.register(PUSH, IY))
|
||||
} else Nil
|
||||
} else Nil
|
||||
}
|
||||
|
||||
def restoreRegistersAndReturn(ctx: CompilationContext): List[ZLine] = {
|
||||
import millfork.assembly.z80.ZOpcode._
|
||||
import ZRegister._
|
||||
if (ctx.function.interrupt) {
|
||||
if (ctx.options.flag(CompilationFlag.EmitZ80Opcodes)) {
|
||||
if (ctx.options.flag(CompilationFlag.UseShadowRegistersForInterrupts)) {
|
||||
List(
|
||||
ZLine.register(POP, IY),
|
||||
ZLine.register(POP, IX),
|
||||
ZLine.implied(EXX),
|
||||
ZLine.implied(EX_AF_AF),
|
||||
ZLine.implied(EI),
|
||||
ZLine.implied(RETI)) // TODO: NMI?
|
||||
} else {
|
||||
List(
|
||||
ZLine.register(POP, IY),
|
||||
ZLine.register(POP, IX),
|
||||
ZLine.register(POP, HL),
|
||||
ZLine.register(POP, DE),
|
||||
ZLine.register(POP, BC),
|
||||
ZLine.register(POP, AF),
|
||||
ZLine.implied(EI),
|
||||
ZLine.implied(RETI))
|
||||
}
|
||||
} else if (ctx.options.flag(CompilationFlag.EmitSharpOpcodes)) {
|
||||
List(
|
||||
ZLine.register(POP, HL),
|
||||
ZLine.register(POP, DE),
|
||||
ZLine.register(POP, BC),
|
||||
ZLine.register(POP, AF),
|
||||
ZLine.implied(RETI)) // Gameboy enables interrupts automatically
|
||||
} else if (ctx.options.flag(CompilationFlag.EmitExtended80Opcodes)) {
|
||||
List(
|
||||
ZLine.register(POP, HL),
|
||||
ZLine.register(POP, DE),
|
||||
ZLine.register(POP, BC),
|
||||
ZLine.register(POP, AF),
|
||||
ZLine.implied(EI),
|
||||
ZLine.implied(RETI))
|
||||
} else {
|
||||
List(
|
||||
ZLine.register(POP, HL),
|
||||
ZLine.register(POP, DE),
|
||||
ZLine.register(POP, BC),
|
||||
ZLine.register(POP, AF),
|
||||
ZLine.implied(EI),
|
||||
ZLine.implied(RET))
|
||||
}
|
||||
} else if (ctx.function.kernalInterrupt) {
|
||||
if (ctx.options.flag(CompilationFlag.EmitZ80Opcodes)) {
|
||||
List(
|
||||
ZLine.register(POP, IY),
|
||||
ZLine.register(POP, IX),
|
||||
ZLine.implied(RET))
|
||||
} else List(ZLine.implied(RET))
|
||||
} else List(ZLine.implied(RET))
|
||||
}
|
||||
|
||||
def stackPointerFixAtBeginning(ctx: CompilationContext): List[ZLine] = {
|
||||
@ -84,6 +178,12 @@ object Z80Compiler extends AbstractCompiler[ZLine] {
|
||||
ZLine.ldImm16(IX, 0x10000 - localVariableArea),
|
||||
ZLine.registers(ADD_16, IX, SP),
|
||||
ZLine.ld16(SP, IX))
|
||||
} else if (ctx.options.flag(CompilationFlag.UseIyForStack)) {
|
||||
List(
|
||||
ZLine.register(PUSH, IY),
|
||||
ZLine.ldImm16(IY, 0x10000 - localVariableArea),
|
||||
ZLine.registers(ADD_16, IY, SP),
|
||||
ZLine.ld16(SP, IY))
|
||||
} else if (localVariableArea == 2) {
|
||||
// cycles: 11
|
||||
// bytes: 1
|
||||
|
@ -337,38 +337,41 @@ object Z80ExpressionCompiler extends AbstractExpressionCompiler[ZLine] {
|
||||
}
|
||||
case v: StackVariable =>
|
||||
import ZRegister._
|
||||
if (ctx.options.flag(CompilationFlag.UseIxForStack)) {
|
||||
if (ctx.options.flag(CompilationFlag.UseIxForStack) || ctx.options.flag(CompilationFlag.UseIyForStack)) {
|
||||
val x = ctx.options.flag(CompilationFlag.UseIxForStack)
|
||||
v.typ.size match {
|
||||
case 0 => ???
|
||||
case 1 => loadByteViaIX(v.baseOffset, target)
|
||||
case 1 =>
|
||||
if (x) loadByteViaIX(v.baseOffset, target)
|
||||
else loadByteViaIY(v.baseOffset, target)
|
||||
case 2 => target match {
|
||||
// TODO: signed words
|
||||
case ZExpressionTarget.NOTHING => Nil
|
||||
case ZExpressionTarget.HL =>
|
||||
List(ZLine.ldViaIx(ZRegister.L, v.baseOffset), ZLine.ldViaIx(ZRegister.H, v.baseOffset + 1))
|
||||
List(ZLine.ldViaIxy(x, ZRegister.L, v.baseOffset), ZLine.ldViaIxy(x, ZRegister.H, v.baseOffset + 1))
|
||||
case ZExpressionTarget.BC =>
|
||||
List(ZLine.ldViaIx(ZRegister.C, v.baseOffset), ZLine.ldViaIx(ZRegister.B, v.baseOffset + 1))
|
||||
List(ZLine.ldViaIxy(x, ZRegister.C, v.baseOffset), ZLine.ldViaIxy(x, ZRegister.B, v.baseOffset + 1))
|
||||
case ZExpressionTarget.DE =>
|
||||
List(ZLine.ldViaIx(ZRegister.E, v.baseOffset), ZLine.ldViaIx(ZRegister.D, v.baseOffset + 1))
|
||||
List(ZLine.ldViaIxy(x, ZRegister.E, v.baseOffset), ZLine.ldViaIxy(x, ZRegister.D, v.baseOffset + 1))
|
||||
case ZExpressionTarget.EHL =>
|
||||
List(ZLine.ldViaIx(L, v.baseOffset), ZLine.ldViaIx(H, v.baseOffset + 1), ZLine.ldImm8(E, 0))
|
||||
List(ZLine.ldViaIxy(x, L, v.baseOffset), ZLine.ldViaIxy(x, H, v.baseOffset + 1), ZLine.ldImm8(E, 0))
|
||||
case ZExpressionTarget.DEHL =>
|
||||
List(ZLine.ldViaIx(L, v.baseOffset), ZLine.ldViaIx(H, v.baseOffset + 1), ZLine.ldImm16(DE, 0))
|
||||
List(ZLine.ldViaIxy(x, L, v.baseOffset), ZLine.ldViaIxy(x, H, v.baseOffset + 1), ZLine.ldImm16(DE, 0))
|
||||
}
|
||||
case 3 => target match {
|
||||
// TODO: signed farwords
|
||||
case ZExpressionTarget.NOTHING => Nil
|
||||
case ZExpressionTarget.EHL =>
|
||||
List(ZLine.ldViaIx(L, v.baseOffset), ZLine.ldViaIx(H, v.baseOffset + 1), ZLine.ldViaIx(E, v.baseOffset + 2))
|
||||
List(ZLine.ldViaIxy(x, L, v.baseOffset), ZLine.ldViaIxy(x, H, v.baseOffset + 1), ZLine.ldViaIxy(x, E, v.baseOffset + 2))
|
||||
case ZExpressionTarget.DEHL =>
|
||||
List(ZLine.ldViaIx(L, v.baseOffset), ZLine.ldViaIx(H, v.baseOffset + 1), ZLine.ldViaIx(E, v.baseOffset + 2), ZLine.ldImm8(D, 0))
|
||||
List(ZLine.ldViaIxy(x, L, v.baseOffset), ZLine.ldViaIxy(x, H, v.baseOffset + 1), ZLine.ldViaIxy(x, E, v.baseOffset + 2), ZLine.ldImm8(D, 0))
|
||||
}
|
||||
case 4 => target match {
|
||||
case ZExpressionTarget.NOTHING => Nil
|
||||
case ZExpressionTarget.EHL =>
|
||||
List(ZLine.ldViaIx(L, v.baseOffset), ZLine.ldViaIx(H, v.baseOffset + 1), ZLine.ldViaIx(E, v.baseOffset + 2))
|
||||
List(ZLine.ldViaIxy(x, L, v.baseOffset), ZLine.ldViaIxy(x, H, v.baseOffset + 1), ZLine.ldViaIxy(x, E, v.baseOffset + 2))
|
||||
case ZExpressionTarget.DEHL =>
|
||||
List(ZLine.ldViaIx(L, v.baseOffset), ZLine.ldViaIx(H, v.baseOffset + 1), ZLine.ldViaIx(E, v.baseOffset + 2), ZLine.ldViaIx(D, v.baseOffset + 3))
|
||||
List(ZLine.ldViaIxy(x, L, v.baseOffset), ZLine.ldViaIxy(x, H, v.baseOffset + 1), ZLine.ldViaIxy(x, E, v.baseOffset + 2), ZLine.ldViaIxy(x, D, v.baseOffset + 3))
|
||||
}
|
||||
case _ => ???
|
||||
}
|
||||
@ -893,6 +896,8 @@ object Z80ExpressionCompiler extends AbstractExpressionCompiler[ZLine] {
|
||||
case v:StackVariable =>
|
||||
if (ctx.options.flag(CompilationFlag.UseIxForStack)){
|
||||
Some(LocalVariableAddressViaIX(v.baseOffset) -> Nil)
|
||||
} else if (ctx.options.flag(CompilationFlag.UseIyForStack)){
|
||||
Some(LocalVariableAddressViaIY(v.baseOffset) -> Nil)
|
||||
} else {
|
||||
Some(LocalVariableAddressViaHL -> calculateStackAddressToHL(ctx, v))
|
||||
}
|
||||
@ -907,7 +912,7 @@ object Z80ExpressionCompiler extends AbstractExpressionCompiler[ZLine] {
|
||||
def calculateStackAddressToHL(ctx: CompilationContext, v: StackVariable): List[ZLine] = calculateStackAddressToHL(ctx, v.baseOffset)
|
||||
|
||||
def calculateStackAddressToHL(ctx: CompilationContext, baseOffset: Int): List[ZLine] = {
|
||||
if (ctx.options.flag(CompilationFlag.UseIxForStack)) {
|
||||
if (ctx.options.flag(CompilationFlag.UseIxForStack) || ctx.options.flag(CompilationFlag.UseIyForStack)) {
|
||||
???
|
||||
} else if (ctx.options.flag(CompilationFlag.EmitSharpOpcodes)) {
|
||||
List(ZLine.imm8(ZOpcode.LD_HLSP, baseOffset + ctx.extraStackOffset))
|
||||
@ -986,6 +991,18 @@ object Z80ExpressionCompiler extends AbstractExpressionCompiler[ZLine] {
|
||||
}
|
||||
}
|
||||
|
||||
def loadByteViaIY(offset: Int, target: ZExpressionTarget.Value): List[ZLine] = {
|
||||
target match {
|
||||
case ZExpressionTarget.NOTHING => Nil
|
||||
case ZExpressionTarget.A => List(ZLine.ldViaIy(ZRegister.A, offset))
|
||||
case ZExpressionTarget.HL => List(ZLine.ldViaIy(ZRegister.L, offset), ZLine.ldImm8(ZRegister.H, 0))
|
||||
case ZExpressionTarget.BC => List(ZLine.ldViaIy(ZRegister.C, offset), ZLine.ldImm8(ZRegister.B, 0))
|
||||
case ZExpressionTarget.DE => List(ZLine.ldViaIy(ZRegister.E, offset), ZLine.ldImm8(ZRegister.D, 0))
|
||||
case ZExpressionTarget.EHL => List(ZLine.ldViaIy(ZRegister.L, offset), ZLine.ldImm8(ZRegister.H, 0), ZLine.ldImm8(ZRegister.E, 0))
|
||||
case ZExpressionTarget.DEHL => List(ZLine.ldViaIy(ZRegister.L, offset), ZLine.ldImm8(ZRegister.H, 0), ZLine.ldImm16(ZRegister.DE, 0))
|
||||
}
|
||||
}
|
||||
|
||||
def loadByteViaHL(target: ZExpressionTarget.Value): List[ZLine] = {
|
||||
target match {
|
||||
case ZExpressionTarget.NOTHING => Nil
|
||||
@ -1042,6 +1059,17 @@ object Z80ExpressionCompiler extends AbstractExpressionCompiler[ZLine] {
|
||||
prepareA ++ fillUpperBytes
|
||||
}
|
||||
|
||||
def signExtendViaIY(ctx: CompilationContext, targetOffset: Int, hiRegister: ZRegister.Value, bytes: Int, signedSource: Boolean): List[ZLine] = {
|
||||
if (bytes == 0) return Nil
|
||||
val prepareA = if (signedSource) {
|
||||
signExtendHighestByte(ctx, hiRegister)
|
||||
} else {
|
||||
List(ZLine.ldImm8(ZRegister.A, 0))
|
||||
}
|
||||
val fillUpperBytes = List.tabulate(bytes)(i => ZLine.ldViaIy(targetOffset + i, ZRegister.A))
|
||||
prepareA ++ fillUpperBytes
|
||||
}
|
||||
|
||||
def signExtendViaHL(ctx: CompilationContext, hiRegister: ZRegister.Value, bytes: Int, signedSource: Boolean): List[ZLine] = {
|
||||
if (bytes == 0) return Nil
|
||||
val prepareA = if (signedSource) {
|
||||
@ -1077,6 +1105,14 @@ object Z80ExpressionCompiler extends AbstractExpressionCompiler[ZLine] {
|
||||
}
|
||||
}
|
||||
|
||||
def storeAViaIY(ctx: CompilationContext, targetOffset: Int, targetSize: Int, signedSource: Boolean): List[ZLine] = {
|
||||
targetSize match {
|
||||
case 0 => Nil
|
||||
case 1 => List(ZLine.ldViaIy(targetOffset, ZRegister.A))
|
||||
case n => ZLine.ldViaIy(targetOffset, ZRegister.A) :: signExtendViaIY(ctx, targetOffset + 1, ZRegister.A, n - 1, signedSource)
|
||||
}
|
||||
}
|
||||
|
||||
def storeHL(ctx: CompilationContext, targetAddr: Constant, targetSize: Int, signedSource: Boolean): List[ZLine] = {
|
||||
// TODO: LD (nnnn),HL compatibility?
|
||||
targetSize match {
|
||||
@ -1115,6 +1151,16 @@ object Z80ExpressionCompiler extends AbstractExpressionCompiler[ZLine] {
|
||||
}
|
||||
}
|
||||
|
||||
def storeHLViaIY(ctx: CompilationContext, offset: Int, targetSize: Int, signedSource: Boolean): List[ZLine] = {
|
||||
// TODO: LD (nnnn),HL compatibility?
|
||||
targetSize match {
|
||||
case 0 => Nil
|
||||
case 1 => List(ZLine.ldViaIy(offset, ZRegister.L))
|
||||
case 2 => List(ZLine.ldViaIy(offset, ZRegister.L), ZLine.ldViaIy(offset + 1, ZRegister.H))
|
||||
case n => List(ZLine.ldViaIy(offset, ZRegister.L), ZLine.ldViaIy(offset + 1, ZRegister.H)) ++ signExtendViaIY(ctx, offset + 2, ZRegister.H, n - 2, signedSource)
|
||||
}
|
||||
}
|
||||
|
||||
def storeA(ctx: CompilationContext, target: LhsExpression, signedSource: Boolean): List[ZLine] = {
|
||||
val env = ctx.env
|
||||
target match {
|
||||
@ -1124,6 +1170,8 @@ object Z80ExpressionCompiler extends AbstractExpressionCompiler[ZLine] {
|
||||
case v: StackVariable =>
|
||||
if (ctx.options.flag(CompilationFlag.UseIxForStack)){
|
||||
storeAViaIX(ctx, v.baseOffset, v.typ.size, signedSource)
|
||||
} else if (ctx.options.flag(CompilationFlag.UseIyForStack)){
|
||||
storeAViaIY(ctx, v.baseOffset, v.typ.size, signedSource)
|
||||
} else {
|
||||
calculateStackAddressToHL(ctx, v) ++ storeAViaHL(ctx, v.typ.size, signedSource)
|
||||
}
|
||||
@ -1150,6 +1198,8 @@ object Z80ExpressionCompiler extends AbstractExpressionCompiler[ZLine] {
|
||||
import ZRegister._
|
||||
if (ctx.options.flag(CompilationFlag.UseIxForStack)){
|
||||
storeHLViaIX(ctx, v.baseOffset, v.typ.size, signedSource)
|
||||
} else if (ctx.options.flag(CompilationFlag.UseIyForStack)){
|
||||
storeHLViaIY(ctx, v.baseOffset, v.typ.size, signedSource)
|
||||
} else if (ctx.options.flag(CompilationFlag.EmitIntel8080Opcodes)) {
|
||||
List(
|
||||
ZLine.register(PUSH, DE),
|
||||
@ -1254,7 +1304,7 @@ object Z80ExpressionCompiler extends AbstractExpressionCompiler[ZLine] {
|
||||
}
|
||||
case v: StackVariable =>
|
||||
import ZRegister._
|
||||
if (ctx.options.flag(CompilationFlag.EmitZ80Opcodes)) {
|
||||
if (ctx.options.flag(CompilationFlag.UseIxForStack)) {
|
||||
List.tabulate(size) { i =>
|
||||
if (i < v.typ.size) {
|
||||
List(ZLine.ldViaIx(A, v.baseOffset + i))
|
||||
@ -1264,6 +1314,16 @@ object Z80ExpressionCompiler extends AbstractExpressionCompiler[ZLine] {
|
||||
List(ZLine.ldImm8(A, 0))
|
||||
}
|
||||
}
|
||||
} else if (ctx.options.flag(CompilationFlag.UseIyForStack)) {
|
||||
List.tabulate(size) { i =>
|
||||
if (i < v.typ.size) {
|
||||
List(ZLine.ldViaIy(A, v.baseOffset + i))
|
||||
} else if (v.typ.isSigned) {
|
||||
ZLine.ldViaIy(A, v.baseOffset + v.typ.size - 1) :: signExtendHighestByte(ctx, A)
|
||||
} else {
|
||||
List(ZLine.ldImm8(A, 0))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
val prepareHL = calculateStackAddressToHL(ctx, v)
|
||||
List.tabulate(size) { i =>
|
||||
|
@ -18,6 +18,7 @@ object Z80StatementCompiler extends AbstractStatementCompiler[ZLine] {
|
||||
def compile(ctx: CompilationContext, statement: ExecutableStatement): List[ZLine] = {
|
||||
val options = ctx.options
|
||||
val env = ctx.env
|
||||
val ret = Z80Compiler.restoreRegistersAndReturn(ctx)
|
||||
statement match {
|
||||
case EmptyStatement(stmts) =>
|
||||
stmts.foreach(s => compile(ctx, s))
|
||||
@ -25,13 +26,13 @@ object Z80StatementCompiler extends AbstractStatementCompiler[ZLine] {
|
||||
case ReturnStatement(None) =>
|
||||
fixStackOnReturn(ctx) ++ (ctx.function.returnType match {
|
||||
case _: BooleanType =>
|
||||
List(ZLine.implied(DISCARD_A), ZLine.implied(DISCARD_HL), ZLine.implied(DISCARD_BC), ZLine.implied(DISCARD_DE), ZLine.implied(RET))
|
||||
List(ZLine.implied(DISCARD_A), ZLine.implied(DISCARD_HL), ZLine.implied(DISCARD_BC), ZLine.implied(DISCARD_DE)) ++ ret
|
||||
case t => t.size match {
|
||||
case 0 =>
|
||||
List(ZLine.implied(DISCARD_F), ZLine.implied(DISCARD_A), ZLine.implied(DISCARD_HL), ZLine.implied(DISCARD_BC), ZLine.implied(DISCARD_DE), ZLine.implied(RET))
|
||||
List(ZLine.implied(DISCARD_F), ZLine.implied(DISCARD_A), ZLine.implied(DISCARD_HL), ZLine.implied(DISCARD_BC), ZLine.implied(DISCARD_DE)) ++ ret
|
||||
case _ =>
|
||||
ctx.log.warn("Returning without a value", statement.position)
|
||||
List(ZLine.implied(DISCARD_F), ZLine.implied(DISCARD_A), ZLine.implied(DISCARD_HL), ZLine.implied(DISCARD_BC), ZLine.implied(DISCARD_DE), ZLine.implied(RET))
|
||||
List(ZLine.implied(DISCARD_F), ZLine.implied(DISCARD_A), ZLine.implied(DISCARD_HL), ZLine.implied(DISCARD_BC), ZLine.implied(DISCARD_DE)) ++ ret
|
||||
}
|
||||
})
|
||||
case ReturnStatement(Some(e)) =>
|
||||
@ -40,13 +41,16 @@ object Z80StatementCompiler extends AbstractStatementCompiler[ZLine] {
|
||||
case 0 =>
|
||||
ctx.log.error("Cannot return anything from a void function", statement.position)
|
||||
fixStackOnReturn(ctx) ++
|
||||
List(ZLine.implied(DISCARD_A), ZLine.implied(DISCARD_HL), ZLine.implied(DISCARD_BC), ZLine.implied(DISCARD_DE), ZLine.implied(RET))
|
||||
List(ZLine.implied(DISCARD_A), ZLine.implied(DISCARD_HL), ZLine.implied(DISCARD_BC), ZLine.implied(DISCARD_DE)) ++ ret
|
||||
case 1 =>
|
||||
Z80ExpressionCompiler.compileToA(ctx, e) ++ fixStackOnReturn(ctx) ++
|
||||
List(ZLine.implied(DISCARD_HL), ZLine.implied(DISCARD_BC), ZLine.implied(DISCARD_DE), ZLine.implied(RET))
|
||||
List(ZLine.implied(DISCARD_HL), ZLine.implied(DISCARD_BC), ZLine.implied(DISCARD_DE)) ++ ret
|
||||
case 2 =>
|
||||
Z80ExpressionCompiler.compileToHL(ctx, e) ++ fixStackOnReturn(ctx) ++
|
||||
List(ZLine.implied(DISCARD_A), ZLine.implied(DISCARD_BC), ZLine.implied(DISCARD_DE), ZLine.implied(RET))
|
||||
List(ZLine.implied(DISCARD_A), ZLine.implied(DISCARD_BC), ZLine.implied(DISCARD_DE)) ++ ret
|
||||
case _ =>
|
||||
Z80ExpressionCompiler.compileToHL(ctx, e) ++ fixStackOnReturn(ctx) ++
|
||||
List(ZLine.implied(DISCARD_A), ZLine.implied(DISCARD_BC)) ++ ret
|
||||
}
|
||||
case t =>
|
||||
AbstractExpressionCompiler.checkAssignmentType(ctx, e, ctx.function.returnType)
|
||||
@ -209,6 +213,20 @@ object Z80StatementCompiler extends AbstractStatementCompiler[ZLine] {
|
||||
ZLine.ld16(SP, HL),
|
||||
ZLine.register(POP, IX))
|
||||
}
|
||||
} else if (ctx.options.flags(CompilationFlag.UseIyForStack)) {
|
||||
if (ctx.function.returnType.size == 2) {
|
||||
List(
|
||||
ZLine.ldImm16(IY, localVariableArea),
|
||||
ZLine.registers(ADD_16, IY, SP),
|
||||
ZLine.ld16(SP, IY),
|
||||
ZLine.register(POP, IY))
|
||||
} else {
|
||||
List(
|
||||
ZLine.ldImm16(HL, localVariableArea),
|
||||
ZLine.registers(ADD_16, HL, SP),
|
||||
ZLine.ld16(SP, HL),
|
||||
ZLine.register(POP, IY))
|
||||
}
|
||||
} else {
|
||||
if (ctx.function.returnType.size == 2) {
|
||||
if (ctx.options.flags(CompilationFlag.EmitSharpOpcodes)) {
|
||||
|
25
src/test/scala/millfork/test/InterruptSuite.scala
Normal file
25
src/test/scala/millfork/test/InterruptSuite.scala
Normal file
@ -0,0 +1,25 @@
|
||||
package millfork.test
|
||||
import millfork.Cpu
|
||||
import millfork.test.emu.{EmuBenchmarkRun, EmuCrossPlatformBenchmarkRun, EmuUltraBenchmarkRun, EmuUnoptimizedCrossPlatformRun}
|
||||
import org.scalatest.{FunSuite, Matchers}
|
||||
|
||||
/**
|
||||
* @author Karol Stasiak
|
||||
*/
|
||||
class InterruptSuite extends FunSuite with Matchers {
|
||||
|
||||
test("Interrupts should compile") {
|
||||
EmuUnoptimizedCrossPlatformRun(Cpu.Mos, Cpu.Cmos, Cpu.Z80, Cpu.Intel8080, Cpu.Sharp)(
|
||||
"""
|
||||
| interrupt void f() {
|
||||
| asm { nop }
|
||||
| }
|
||||
| kernal_interrupt void g() {
|
||||
| asm { nop }
|
||||
| }
|
||||
| array hold = [ @word [f.addr, g.addr]]
|
||||
| void main() {
|
||||
| }
|
||||
""".stripMargin){m => }
|
||||
}
|
||||
}
|
@ -9,13 +9,18 @@ import millfork.output.MemoryBank
|
||||
object EmuUnoptimizedCrossPlatformRun {
|
||||
def apply(platforms: Cpu.Value*)(source: String)(verifier: MemoryBank => Unit): Unit = {
|
||||
val (_, mm) = if (platforms.contains(Cpu.Mos)) EmuUnoptimizedRun.apply2(source) else Timings(-1, -1) -> null
|
||||
val (_, mc) = if (platforms.contains(Cpu.Cmos)) EmuUnoptimizedCmosRun.apply2(source) else Timings(-1, -1) -> null
|
||||
val (_, mz) = if (platforms.contains(Cpu.Z80)) EmuUnoptimizedZ80Run.apply2(source) else Timings(-1, -1) -> null
|
||||
val (_, mi) = if (platforms.contains(Cpu.Intel8080)) EmuUnoptimizedIntel8080Run.apply2(source) else Timings(-1, -1) -> null
|
||||
val (_, ms) = if (platforms.contains(Cpu.Sharp)) EmuUnoptimizedSharpRun.apply2(source) else Timings(-1, -1) -> null
|
||||
if (platforms.contains(Cpu.Mos)) {
|
||||
println(f"Running MOS")
|
||||
println(f"Running 6502")
|
||||
verifier(mm)
|
||||
}
|
||||
if (platforms.contains(Cpu.Cmos)) {
|
||||
println(f"Running 65C02")
|
||||
verifier(mc)
|
||||
}
|
||||
if (platforms.contains(Cpu.Z80)) {
|
||||
println(f"Running Z80")
|
||||
verifier(mz)
|
||||
|
@ -8,6 +8,8 @@ import millfork.Cpu
|
||||
*/
|
||||
object EmuUnoptimizedRun extends EmuRun(Cpu.StrictMos, Nil, Nil)
|
||||
|
||||
object EmuUnoptimizedCmosRun extends EmuRun(Cpu.Cmos, Nil, Nil)
|
||||
|
||||
object EmuUnoptimizedZ80Run extends EmuZ80Run(Cpu.Z80, Nil, Nil)
|
||||
|
||||
object EmuUnoptimizedIntel8080Run extends EmuZ80Run(Cpu.Intel8080, Nil, Nil)
|
||||
|
Loading…
x
Reference in New Issue
Block a user