mirror of
https://github.com/irmen/prog8.git
synced 2024-12-23 09:32:43 +00:00
refactor IR codegen into separate module
This commit is contained in:
parent
d07d00fa41
commit
b4352ad38b
1
.idea/modules.xml
generated
1
.idea/modules.xml
generated
@ -5,6 +5,7 @@
|
||||
<module fileurl="file://$PROJECT_DIR$/codeCore/codeCore.iml" filepath="$PROJECT_DIR$/codeCore/codeCore.iml" />
|
||||
<module fileurl="file://$PROJECT_DIR$/codeGenCpu6502/codeGenCpu6502.iml" filepath="$PROJECT_DIR$/codeGenCpu6502/codeGenCpu6502.iml" />
|
||||
<module fileurl="file://$PROJECT_DIR$/codeGenExperimental/codeGenExperimental.iml" filepath="$PROJECT_DIR$/codeGenExperimental/codeGenExperimental.iml" />
|
||||
<module fileurl="file://$PROJECT_DIR$/codeGenIntermediate/codeGenIntermediate.iml" filepath="$PROJECT_DIR$/codeGenIntermediate/codeGenIntermediate.iml" />
|
||||
<module fileurl="file://$PROJECT_DIR$/codeGenVirtual/codeGenVirtual.iml" filepath="$PROJECT_DIR$/codeGenVirtual/codeGenVirtual.iml" />
|
||||
<module fileurl="file://$PROJECT_DIR$/codeOptimizers/codeOptimizers.iml" filepath="$PROJECT_DIR$/codeOptimizers/codeOptimizers.iml" />
|
||||
<module fileurl="file://$PROJECT_DIR$/compiler/compiler.iml" filepath="$PROJECT_DIR$/compiler/compiler.iml" />
|
||||
|
@ -26,7 +26,7 @@ compileTestKotlin {
|
||||
dependencies {
|
||||
implementation project(':codeCore')
|
||||
implementation project(':intermediate')
|
||||
implementation project(':virtualmachine')
|
||||
implementation project(':codeGenIntermediate')
|
||||
implementation "org.jetbrains.kotlin:kotlin-stdlib-jdk8"
|
||||
// implementation "org.jetbrains.kotlin:kotlin-reflect"
|
||||
implementation "com.michael-bull.kotlin-result:kotlin-result-jvm:1.1.16"
|
||||
|
@ -10,8 +10,8 @@
|
||||
<orderEntry type="sourceFolder" forTests="false" />
|
||||
<orderEntry type="library" name="KotlinJavaRuntime" level="project" />
|
||||
<orderEntry type="library" name="michael.bull.kotlin.result.jvm" level="project" />
|
||||
<orderEntry type="module" module-name="codeCore" />
|
||||
<orderEntry type="module" module-name="virtualmachine" />
|
||||
<orderEntry type="module" module-name="codeGenIntermediate" />
|
||||
<orderEntry type="module" module-name="intermediate" />
|
||||
<orderEntry type="module" module-name="codeCore" />
|
||||
</component>
|
||||
</module>
|
File diff suppressed because it is too large
Load Diff
64
codeGenIntermediate/build.gradle
Normal file
64
codeGenIntermediate/build.gradle
Normal file
@ -0,0 +1,64 @@
|
||||
|
||||
plugins {
|
||||
id 'java'
|
||||
id 'application'
|
||||
id "org.jetbrains.kotlin.jvm"
|
||||
}
|
||||
|
||||
java {
|
||||
toolchain {
|
||||
languageVersion = JavaLanguageVersion.of(javaVersion)
|
||||
}
|
||||
}
|
||||
|
||||
compileKotlin {
|
||||
kotlinOptions {
|
||||
jvmTarget = javaVersion
|
||||
}
|
||||
}
|
||||
|
||||
compileTestKotlin {
|
||||
kotlinOptions {
|
||||
jvmTarget = javaVersion
|
||||
}
|
||||
}
|
||||
|
||||
dependencies {
|
||||
implementation project(':codeCore')
|
||||
implementation project(':intermediate')
|
||||
implementation project(':virtualmachine')
|
||||
implementation "org.jetbrains.kotlin:kotlin-stdlib-jdk8"
|
||||
// implementation "org.jetbrains.kotlin:kotlin-reflect"
|
||||
implementation "com.michael-bull.kotlin-result:kotlin-result-jvm:1.1.16"
|
||||
|
||||
testImplementation 'io.kotest:kotest-runner-junit5-jvm:5.3.2'
|
||||
}
|
||||
|
||||
sourceSets {
|
||||
main {
|
||||
java {
|
||||
srcDirs = ["${project.projectDir}/src"]
|
||||
}
|
||||
resources {
|
||||
srcDirs = ["${project.projectDir}/res"]
|
||||
}
|
||||
}
|
||||
test {
|
||||
java {
|
||||
srcDir "${project.projectDir}/test"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
test {
|
||||
// Enable JUnit 5 (Gradle 4.6+).
|
||||
useJUnitPlatform()
|
||||
|
||||
// Always run tests, even when nothing changed.
|
||||
dependsOn 'cleanTest'
|
||||
|
||||
// Show test results.
|
||||
testLogging {
|
||||
events "skipped", "failed"
|
||||
}
|
||||
}
|
19
codeGenIntermediate/codeGenIntermediate.iml
Normal file
19
codeGenIntermediate/codeGenIntermediate.iml
Normal file
@ -0,0 +1,19 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<module type="JAVA_MODULE" version="4">
|
||||
<component name="NewModuleRootManager" inherit-compiler-output="true">
|
||||
<exclude-output />
|
||||
<content url="file://$MODULE_DIR$">
|
||||
<sourceFolder url="file://$MODULE_DIR$/src" isTestSource="false" />
|
||||
<sourceFolder url="file://$MODULE_DIR$/test" isTestSource="true" />
|
||||
<excludeFolder url="file://$MODULE_DIR$/build" />
|
||||
</content>
|
||||
<orderEntry type="inheritedJdk" />
|
||||
<orderEntry type="sourceFolder" forTests="false" />
|
||||
<orderEntry type="library" name="KotlinJavaRuntime" level="project" />
|
||||
<orderEntry type="module" module-name="codeCore" />
|
||||
<orderEntry type="module" module-name="intermediate" />
|
||||
<orderEntry type="module" module-name="virtualmachine" />
|
||||
<orderEntry type="library" name="io.kotest.assertions.core.jvm" level="project" />
|
||||
<orderEntry type="library" name="io.kotest.runner.junit5.jvm" level="project" />
|
||||
</component>
|
||||
</module>
|
@ -1,4 +1,4 @@
|
||||
package prog8.codegen.experimental
|
||||
package prog8.codegen.intermediate
|
||||
|
||||
import prog8.code.ast.*
|
||||
import prog8.code.core.AssemblyError
|
||||
@ -10,7 +10,7 @@ import prog8.intermediate.IRCodeInstruction
|
||||
import prog8.intermediate.Opcode
|
||||
import prog8.intermediate.VmDataType
|
||||
|
||||
internal class AssignmentGen(private val codeGen: CodeGen, private val expressionEval: ExpressionGen) {
|
||||
internal class AssignmentGen(private val codeGen: IntermediateCodeGen, private val expressionEval: ExpressionGen) {
|
||||
|
||||
internal fun translate(assignment: PtAssignment): IRCodeChunk {
|
||||
if(assignment.target.children.single() is PtMachineRegister)
|
@ -1,4 +1,4 @@
|
||||
package prog8.codegen.experimental
|
||||
package prog8.codegen.intermediate
|
||||
|
||||
import prog8.code.StStaticVariable
|
||||
import prog8.code.ast.*
|
||||
@ -9,7 +9,7 @@ import prog8.intermediate.*
|
||||
import prog8.vm.Syscall
|
||||
|
||||
|
||||
internal class BuiltinFuncGen(private val codeGen: CodeGen, private val exprGen: ExpressionGen) {
|
||||
internal class BuiltinFuncGen(private val codeGen: IntermediateCodeGen, private val exprGen: ExpressionGen) {
|
||||
|
||||
fun translate(call: PtBuiltinFunctionCall, resultRegister: Int): IRCodeChunk {
|
||||
return when(call.name) {
|
@ -1,4 +1,4 @@
|
||||
package prog8.codegen.experimental
|
||||
package prog8.codegen.intermediate
|
||||
|
||||
import prog8.code.StRomSub
|
||||
import prog8.code.StStaticVariable
|
||||
@ -8,7 +8,7 @@ import prog8.code.core.*
|
||||
import prog8.intermediate.*
|
||||
|
||||
|
||||
internal class ExpressionGen(private val codeGen: CodeGen) {
|
||||
internal class ExpressionGen(private val codeGen: IntermediateCodeGen) {
|
||||
fun translateExpression(expr: PtExpression, resultRegister: Int, resultFpRegister: Int): IRCodeChunk {
|
||||
require(codeGen.vmRegisters.peekNext() > resultRegister)
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,27 @@
|
||||
package prog8.codegen.intermediate
|
||||
|
||||
import prog8.code.core.AssemblyError
|
||||
|
||||
internal class RegisterPool {
|
||||
private var firstFree: Int=3 // integer registers 0,1,2 are reserved
|
||||
private var firstFreeFloat: Int=0
|
||||
|
||||
fun peekNext() = firstFree
|
||||
fun peekNextFloat() = firstFreeFloat
|
||||
|
||||
fun nextFree(): Int {
|
||||
val result = firstFree
|
||||
firstFree++
|
||||
if(firstFree>65535)
|
||||
throw AssemblyError("out of virtual registers (int)")
|
||||
return result
|
||||
}
|
||||
|
||||
fun nextFreeFloat(): Int {
|
||||
val result = firstFreeFloat
|
||||
firstFreeFloat++
|
||||
if(firstFreeFloat>65535)
|
||||
throw AssemblyError("out of virtual registers (fp)")
|
||||
return result
|
||||
}
|
||||
}
|
@ -1,5 +1,3 @@
|
||||
package prog8tests.vm.helpers
|
||||
|
||||
import prog8.code.core.DataType
|
||||
import prog8.code.core.Encoding
|
||||
import prog8.code.core.IMemSizer
|
178
codeGenIntermediate/test/TestIRPeepholeOpt.kt
Normal file
178
codeGenIntermediate/test/TestIRPeepholeOpt.kt
Normal file
@ -0,0 +1,178 @@
|
||||
import io.kotest.core.spec.style.FunSpec
|
||||
import io.kotest.matchers.shouldBe
|
||||
import prog8.code.SymbolTable
|
||||
import prog8.code.core.*
|
||||
import prog8.code.target.VMTarget
|
||||
import prog8.intermediate.*
|
||||
|
||||
class TestIRPeepholeOpt: FunSpec({
|
||||
fun makeIRProgram(lines: List<IRCodeLine>): IRProgram {
|
||||
val block = IRBlock("main", null, IRBlock.BlockAlignment.NONE, Position.DUMMY)
|
||||
val sub = IRSubroutine("main.start", emptyList(), null, Position.DUMMY)
|
||||
val chunk = IRCodeChunk(Position.DUMMY)
|
||||
for(line in lines)
|
||||
chunk += line
|
||||
sub += chunk
|
||||
block += sub
|
||||
val st = SymbolTable()
|
||||
val target = VMTarget()
|
||||
val options = CompilationOptions(
|
||||
OutputType.RAW,
|
||||
CbmPrgLauncherType.NONE,
|
||||
ZeropageType.DONTUSE,
|
||||
emptyList(),
|
||||
floats = false,
|
||||
noSysInit = true,
|
||||
compTarget = target,
|
||||
loadAddress = target.machine.PROGRAM_LOAD_ADDRESS
|
||||
)
|
||||
val prog = IRProgram("test", st, options, target)
|
||||
prog.addBlock(block)
|
||||
return prog
|
||||
}
|
||||
|
||||
fun IRProgram.lines(): List<IRCodeLine> = this.blocks.flatMap { it.subroutines }.flatMap { it.chunks }.flatMap { it.lines }
|
||||
|
||||
test("remove nops") {
|
||||
val irProg = makeIRProgram(listOf(
|
||||
IRCodeInstruction(Opcode.JUMP, labelSymbol = "dummy"),
|
||||
IRCodeInstruction(Opcode.NOP),
|
||||
IRCodeInstruction(Opcode.NOP)
|
||||
))
|
||||
irProg.lines().size shouldBe 3
|
||||
val opt = IRPeepholeOptimizer(irProg)
|
||||
opt.optimize()
|
||||
irProg.lines().size shouldBe 1
|
||||
}
|
||||
|
||||
test("remove jmp to label below") {
|
||||
val irProg = makeIRProgram(listOf(
|
||||
IRCodeInstruction(Opcode.JUMP, labelSymbol = "label"), // removed
|
||||
IRCodeLabel("label"),
|
||||
IRCodeInstruction(Opcode.JUMP, labelSymbol = "label2"), // removed
|
||||
IRCodeInstruction(Opcode.NOP), // removed
|
||||
IRCodeLabel("label2"),
|
||||
IRCodeInstruction(Opcode.JUMP, labelSymbol = "label3"),
|
||||
IRCodeInstruction(Opcode.INC, VmDataType.BYTE, reg1=1),
|
||||
IRCodeLabel("label3")
|
||||
))
|
||||
irProg.lines().size shouldBe 8
|
||||
val opt = IRPeepholeOptimizer(irProg)
|
||||
opt.optimize()
|
||||
val lines = irProg.lines()
|
||||
lines.size shouldBe 5
|
||||
(lines[0] as IRCodeLabel).name shouldBe "label"
|
||||
(lines[1] as IRCodeLabel).name shouldBe "label2"
|
||||
(lines[2] as IRCodeInstruction).ins.opcode shouldBe Opcode.JUMP
|
||||
(lines[3] as IRCodeInstruction).ins.opcode shouldBe Opcode.INC
|
||||
(lines[4] as IRCodeLabel).name shouldBe "label3"
|
||||
}
|
||||
|
||||
test("remove double sec/clc") {
|
||||
val irProg = makeIRProgram(listOf(
|
||||
IRCodeInstruction(Opcode.SEC),
|
||||
IRCodeInstruction(Opcode.SEC),
|
||||
IRCodeInstruction(Opcode.SEC),
|
||||
IRCodeInstruction(Opcode.CLC),
|
||||
IRCodeInstruction(Opcode.CLC),
|
||||
IRCodeInstruction(Opcode.CLC)
|
||||
))
|
||||
irProg.lines().size shouldBe 6
|
||||
val opt = IRPeepholeOptimizer(irProg)
|
||||
opt.optimize()
|
||||
val lines = irProg.lines()
|
||||
lines.size shouldBe 1
|
||||
(lines[0] as IRCodeInstruction).ins.opcode shouldBe Opcode.CLC
|
||||
}
|
||||
|
||||
test("push followed by pop") {
|
||||
val irProg = makeIRProgram(listOf(
|
||||
IRCodeInstruction(Opcode.PUSH, VmDataType.BYTE, reg1=42),
|
||||
IRCodeInstruction(Opcode.POP, VmDataType.BYTE, reg1=42),
|
||||
IRCodeInstruction(Opcode.PUSH, VmDataType.BYTE, reg1=99),
|
||||
IRCodeInstruction(Opcode.POP, VmDataType.BYTE, reg1=222)
|
||||
))
|
||||
irProg.lines().size shouldBe 4
|
||||
val opt = IRPeepholeOptimizer(irProg)
|
||||
opt.optimize()
|
||||
val lines = irProg.lines()
|
||||
lines.size shouldBe 1
|
||||
(lines[0] as IRCodeInstruction).ins.opcode shouldBe Opcode.LOADR
|
||||
(lines[0] as IRCodeInstruction).ins.reg1 shouldBe 222
|
||||
(lines[0] as IRCodeInstruction).ins.reg2 shouldBe 99
|
||||
}
|
||||
|
||||
test("remove useless div/mul, add/sub") {
|
||||
val irProg = makeIRProgram(listOf(
|
||||
IRCodeInstruction(Opcode.DIV, VmDataType.BYTE, reg1=42, value = 1),
|
||||
IRCodeInstruction(Opcode.DIVS, VmDataType.BYTE, reg1=42, value = 1),
|
||||
IRCodeInstruction(Opcode.MUL, VmDataType.BYTE, reg1=42, value = 1),
|
||||
IRCodeInstruction(Opcode.MOD, VmDataType.BYTE, reg1=42, value = 1),
|
||||
IRCodeInstruction(Opcode.DIV, VmDataType.BYTE, reg1=42, value = 2),
|
||||
IRCodeInstruction(Opcode.DIVS, VmDataType.BYTE, reg1=42, value = 2),
|
||||
IRCodeInstruction(Opcode.MUL, VmDataType.BYTE, reg1=42, value = 2),
|
||||
IRCodeInstruction(Opcode.MOD, VmDataType.BYTE, reg1=42, value = 2),
|
||||
IRCodeInstruction(Opcode.ADD, VmDataType.BYTE, reg1=42, value = 0),
|
||||
IRCodeInstruction(Opcode.SUB, VmDataType.BYTE, reg1=42, value = 0)
|
||||
))
|
||||
irProg.lines().size shouldBe 10
|
||||
val opt = IRPeepholeOptimizer(irProg)
|
||||
opt.optimize()
|
||||
val lines = irProg.lines()
|
||||
lines.size shouldBe 4
|
||||
}
|
||||
|
||||
test("replace add/sub 1 by inc/dec") {
|
||||
val irProg = makeIRProgram(listOf(
|
||||
IRCodeInstruction(Opcode.ADD, VmDataType.BYTE, reg1=42, value = 1),
|
||||
IRCodeInstruction(Opcode.SUB, VmDataType.BYTE, reg1=42, value = 1)
|
||||
))
|
||||
irProg.lines().size shouldBe 2
|
||||
val opt = IRPeepholeOptimizer(irProg)
|
||||
opt.optimize()
|
||||
val lines = irProg.lines()
|
||||
lines.size shouldBe 2
|
||||
(lines[0] as IRCodeInstruction).ins.opcode shouldBe Opcode.INC
|
||||
(lines[1] as IRCodeInstruction).ins.opcode shouldBe Opcode.DEC
|
||||
}
|
||||
|
||||
test("remove useless and/or/xor") {
|
||||
val irProg = makeIRProgram(listOf(
|
||||
IRCodeInstruction(Opcode.AND, VmDataType.BYTE, reg1=42, value = 255),
|
||||
IRCodeInstruction(Opcode.AND, VmDataType.WORD, reg1=42, value = 65535),
|
||||
IRCodeInstruction(Opcode.OR, VmDataType.BYTE, reg1=42, value = 0),
|
||||
IRCodeInstruction(Opcode.XOR, VmDataType.BYTE, reg1=42, value = 0),
|
||||
IRCodeInstruction(Opcode.AND, VmDataType.BYTE, reg1=42, value = 200),
|
||||
IRCodeInstruction(Opcode.AND, VmDataType.WORD, reg1=42, value = 60000),
|
||||
IRCodeInstruction(Opcode.OR, VmDataType.BYTE, reg1=42, value = 1),
|
||||
IRCodeInstruction(Opcode.XOR, VmDataType.BYTE, reg1=42, value = 1)
|
||||
))
|
||||
irProg.lines().size shouldBe 8
|
||||
val opt = IRPeepholeOptimizer(irProg)
|
||||
opt.optimize()
|
||||
val lines = irProg.lines()
|
||||
lines.size shouldBe 4
|
||||
}
|
||||
|
||||
test("replace and/or/xor by constant number") {
|
||||
val irProg = makeIRProgram(listOf(
|
||||
IRCodeInstruction(Opcode.AND, VmDataType.BYTE, reg1=42, value = 0),
|
||||
IRCodeInstruction(Opcode.AND, VmDataType.WORD, reg1=42, value = 0),
|
||||
IRCodeInstruction(Opcode.OR, VmDataType.BYTE, reg1=42, value = 255),
|
||||
IRCodeInstruction(Opcode.OR, VmDataType.WORD, reg1=42, value = 65535)
|
||||
))
|
||||
irProg.lines().size shouldBe 4
|
||||
val opt = IRPeepholeOptimizer(irProg)
|
||||
opt.optimize()
|
||||
val lines = irProg.lines()
|
||||
lines.size shouldBe 4
|
||||
(lines[0] as IRCodeInstruction).ins.opcode shouldBe Opcode.LOAD
|
||||
(lines[1] as IRCodeInstruction).ins.opcode shouldBe Opcode.LOAD
|
||||
(lines[2] as IRCodeInstruction).ins.opcode shouldBe Opcode.LOAD
|
||||
(lines[3] as IRCodeInstruction).ins.opcode shouldBe Opcode.LOAD
|
||||
(lines[0] as IRCodeInstruction).ins.value shouldBe 0
|
||||
(lines[1] as IRCodeInstruction).ins.value shouldBe 0
|
||||
(lines[2] as IRCodeInstruction).ins.value shouldBe 255
|
||||
(lines[3] as IRCodeInstruction).ins.value shouldBe 65535
|
||||
}
|
||||
})
|
@ -27,7 +27,7 @@ compileTestKotlin {
|
||||
dependencies {
|
||||
implementation project(':codeCore')
|
||||
implementation project(':intermediate')
|
||||
implementation project(':virtualmachine')
|
||||
implementation project(':codeGenIntermediate')
|
||||
implementation "org.jetbrains.kotlin:kotlin-stdlib-jdk8"
|
||||
// implementation "org.jetbrains.kotlin:kotlin-reflect"
|
||||
implementation "com.michael-bull.kotlin-result:kotlin-result-jvm:1.1.16"
|
||||
|
@ -5,6 +5,7 @@
|
||||
<content url="file://$MODULE_DIR$">
|
||||
<sourceFolder url="file://$MODULE_DIR$/src" isTestSource="false" />
|
||||
<sourceFolder url="file://$MODULE_DIR$/test" isTestSource="true" />
|
||||
<excludeFolder url="file://$MODULE_DIR$/build" />
|
||||
</content>
|
||||
<orderEntry type="inheritedJdk" />
|
||||
<orderEntry type="sourceFolder" forTests="false" />
|
||||
@ -13,7 +14,7 @@
|
||||
<orderEntry type="library" name="io.kotest.assertions.core.jvm" level="project" />
|
||||
<orderEntry type="library" name="io.kotest.runner.junit5.jvm" level="project" />
|
||||
<orderEntry type="module" module-name="codeCore" />
|
||||
<orderEntry type="module" module-name="virtualmachine" />
|
||||
<orderEntry type="module" module-name="intermediate" />
|
||||
<orderEntry type="module" module-name="codeGenIntermediate" />
|
||||
</component>
|
||||
</module>
|
@ -1,140 +0,0 @@
|
||||
package prog8.codegen.virtual
|
||||
|
||||
import prog8.code.core.AssemblyError
|
||||
import prog8.code.core.CompilationOptions
|
||||
import prog8.code.core.IAssemblyProgram
|
||||
import prog8.intermediate.Instruction
|
||||
import prog8.intermediate.Opcode
|
||||
import prog8.intermediate.OpcodesWithAddress
|
||||
import prog8.intermediate.VmDataType
|
||||
import java.io.BufferedWriter
|
||||
import java.nio.file.Path
|
||||
import kotlin.io.path.bufferedWriter
|
||||
import kotlin.io.path.div
|
||||
|
||||
|
||||
class AssemblyProgram(override val name: String, private val allocations: VariableAllocator
|
||||
) : IAssemblyProgram {
|
||||
|
||||
private val globalInits = mutableListOf<VmCodeLine>()
|
||||
private val blocks = mutableListOf<VmCodeChunk>()
|
||||
|
||||
override fun assemble(options: CompilationOptions): Boolean {
|
||||
val outfile = options.outputDir / ("$name.p8virt")
|
||||
println("write code to $outfile")
|
||||
outfile.bufferedWriter().use { out ->
|
||||
allocations.asVmMemory().forEach { (name, alloc) ->
|
||||
out.write("var ${name.joinToString(".")} $alloc\n")
|
||||
}
|
||||
out.write("------PROGRAM------\n")
|
||||
|
||||
if(!options.dontReinitGlobals) {
|
||||
out.write("; global var inits\n")
|
||||
globalInits.forEach { out.writeLine(it) }
|
||||
}
|
||||
|
||||
out.write("; actual program code\n")
|
||||
blocks.asSequence().flatMap { it.lines }.forEach { line->out.writeLine(line) }
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
private fun BufferedWriter.writeLine(line: VmCodeLine) {
|
||||
when(line) {
|
||||
is VmCodeComment -> write("; ${line.comment}\n")
|
||||
is VmCodeInstruction -> {
|
||||
write(line.ins.toString() + "\n")
|
||||
}
|
||||
is VmCodeLabel -> write("_" + line.name.joinToString(".") + ":\n")
|
||||
is VmCodeInlineAsm -> {
|
||||
// TODO do we have to replace variable names by their allocated address???
|
||||
write(line.assembly+"\n")
|
||||
}
|
||||
is VmCodeInlineBinary -> {
|
||||
write("incbin \"${line.file}\"")
|
||||
if(line.offset!=null)
|
||||
write(",${line.offset}")
|
||||
if(line.length!=null)
|
||||
write(",${line.length}")
|
||||
write("\n")
|
||||
}
|
||||
else -> throw AssemblyError("invalid vm code line")
|
||||
}
|
||||
}
|
||||
|
||||
fun addGlobalInits(chunk: VmCodeChunk) = globalInits.addAll(chunk.lines)
|
||||
fun addBlock(block: VmCodeChunk) = blocks.add(block)
|
||||
fun getBlocks(): List<VmCodeChunk> = blocks
|
||||
}
|
||||
|
||||
sealed class VmCodeLine
|
||||
|
||||
class VmCodeInstruction(
|
||||
opcode: Opcode,
|
||||
type: VmDataType?=null,
|
||||
reg1: Int?=null, // 0-$ffff
|
||||
reg2: Int?=null, // 0-$ffff
|
||||
fpReg1: Int?=null, // 0-$ffff
|
||||
fpReg2: Int?=null, // 0-$ffff
|
||||
value: Int?=null, // 0-$ffff
|
||||
fpValue: Float?=null,
|
||||
labelSymbol: List<String>?=null // alternative to value for branch/call/jump labels
|
||||
): VmCodeLine() {
|
||||
val ins = Instruction(opcode, type, reg1, reg2, fpReg1, fpReg2, value, fpValue, labelSymbol)
|
||||
|
||||
init {
|
||||
if(reg1!=null && (reg1<0 || reg1>65536))
|
||||
throw IllegalArgumentException("reg1 out of bounds")
|
||||
if(reg2!=null && (reg2<0 || reg2>65536))
|
||||
throw IllegalArgumentException("reg2 out of bounds")
|
||||
if(fpReg1!=null && (fpReg1<0 || fpReg1>65536))
|
||||
throw IllegalArgumentException("fpReg1 out of bounds")
|
||||
if(fpReg2!=null && (fpReg2<0 || fpReg2>65536))
|
||||
throw IllegalArgumentException("fpReg2 out of bounds")
|
||||
|
||||
if(value!=null && opcode !in OpcodesWithAddress) {
|
||||
when (type) {
|
||||
VmDataType.BYTE -> {
|
||||
if (value < -128 || value > 255)
|
||||
throw IllegalArgumentException("value out of range for byte: $value")
|
||||
}
|
||||
VmDataType.WORD -> {
|
||||
if (value < -32768 || value > 65535)
|
||||
throw IllegalArgumentException("value out of range for word: $value")
|
||||
}
|
||||
VmDataType.FLOAT, null -> {}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class VmCodeLabel(val name: List<String>): VmCodeLine()
|
||||
internal class VmCodeComment(val comment: String): VmCodeLine()
|
||||
|
||||
class VmCodeChunk(initial: VmCodeLine? = null) {
|
||||
val lines = mutableListOf<VmCodeLine>()
|
||||
|
||||
init {
|
||||
if(initial!=null)
|
||||
lines.add(initial)
|
||||
}
|
||||
|
||||
operator fun plusAssign(line: VmCodeLine) {
|
||||
lines.add(line)
|
||||
}
|
||||
|
||||
operator fun plusAssign(chunk: VmCodeChunk) {
|
||||
lines.addAll(chunk.lines)
|
||||
}
|
||||
}
|
||||
|
||||
internal class VmCodeInlineAsm(asm: String): VmCodeLine() {
|
||||
val assembly: String = asm.trimIndent()
|
||||
}
|
||||
|
||||
internal class VmCodeInlineBinary(val file: Path, val offset: UInt?, val length: UInt?): VmCodeLine() {
|
||||
init {
|
||||
require(file.isAbsolute)
|
||||
require(file.toFile().isFile)
|
||||
}
|
||||
}
|
@ -1,249 +0,0 @@
|
||||
package prog8.codegen.virtual
|
||||
|
||||
import prog8.code.ast.*
|
||||
import prog8.code.core.AssemblyError
|
||||
import prog8.code.core.DataType
|
||||
import prog8.code.core.SignedDatatypes
|
||||
import prog8.intermediate.Opcode
|
||||
import prog8.intermediate.VmDataType
|
||||
|
||||
internal class AssignmentGen(private val codeGen: CodeGen, private val expressionEval: ExpressionGen) {
|
||||
|
||||
internal fun translate(assignment: PtAssignment): VmCodeChunk {
|
||||
if(assignment.target.children.single() is PtMachineRegister)
|
||||
throw AssemblyError("assigning to a register should be done by just evaluating the expression into resultregister")
|
||||
|
||||
return if (assignment.isInplaceAssign)
|
||||
translateInplaceAssign(assignment)
|
||||
else
|
||||
translateRegularAssign(assignment)
|
||||
}
|
||||
|
||||
private fun translateInplaceAssign(assignment: PtAssignment): VmCodeChunk {
|
||||
val ident = assignment.target.identifier
|
||||
val memory = assignment.target.memory
|
||||
val array = assignment.target.array
|
||||
|
||||
return if(ident!=null) {
|
||||
val address = codeGen.allocations.get(ident.targetName)
|
||||
assignSelfInMemory(address, assignment.value, assignment)
|
||||
} else if(memory != null) {
|
||||
if(memory.address is PtNumber)
|
||||
assignSelfInMemory((memory.address as PtNumber).number.toInt(), assignment.value, assignment)
|
||||
else
|
||||
fallbackAssign(assignment)
|
||||
} else if(array!=null) {
|
||||
// NOTE: naive fallback assignment here will sometimes generate code that loads the index value multiple times
|
||||
// in a register. It's way too much work to optimize that here - instead, we trust that the generated IL assembly
|
||||
// will be optimized later and have the double assignments removed.
|
||||
fallbackAssign(assignment)
|
||||
} else {
|
||||
fallbackAssign(assignment)
|
||||
}
|
||||
}
|
||||
|
||||
private fun assignSelfInMemory(
|
||||
address: Int,
|
||||
value: PtExpression,
|
||||
origAssign: PtAssignment
|
||||
): VmCodeChunk {
|
||||
val vmDt = codeGen.vmType(value.type)
|
||||
val code = VmCodeChunk()
|
||||
when(value) {
|
||||
is PtIdentifier -> return code // do nothing, x=x null assignment.
|
||||
is PtMachineRegister -> return code // do nothing, reg=reg null assignment
|
||||
is PtPrefix -> return inplacePrefix(value.operator, vmDt, address)
|
||||
is PtBinaryExpression -> return inplaceBinexpr(value.operator, value.right, vmDt, value.type in SignedDatatypes, address, origAssign)
|
||||
is PtMemoryByte -> {
|
||||
return if (!codeGen.options.compTarget.machine.isIOAddress(address.toUInt()))
|
||||
code // do nothing, mem=mem null assignment.
|
||||
else {
|
||||
// read and write a (i/o) memory location to itself.
|
||||
val tempReg = codeGen.vmRegisters.nextFree()
|
||||
code += VmCodeInstruction(Opcode.LOADM, vmDt, reg1 = tempReg, value = address)
|
||||
code += VmCodeInstruction(Opcode.STOREM, vmDt, reg1 = tempReg, value = address)
|
||||
code
|
||||
}
|
||||
}
|
||||
else -> return fallbackAssign(origAssign)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private fun fallbackAssign(origAssign: PtAssignment): VmCodeChunk {
|
||||
if (codeGen.options.slowCodegenWarnings)
|
||||
codeGen.errors.warn("indirect code for in-place assignment", origAssign.position)
|
||||
return translateRegularAssign(origAssign)
|
||||
}
|
||||
|
||||
private fun inplaceBinexpr(
|
||||
operator: String,
|
||||
operand: PtExpression,
|
||||
vmDt: VmDataType,
|
||||
signed: Boolean,
|
||||
address: Int,
|
||||
origAssign: PtAssignment
|
||||
): VmCodeChunk {
|
||||
when(operator) {
|
||||
"+" -> return expressionEval.operatorPlusInplace(address, vmDt, operand)
|
||||
"-" -> return expressionEval.operatorMinusInplace(address, vmDt, operand)
|
||||
"*" -> return expressionEval.operatorMultiplyInplace(address, vmDt, operand)
|
||||
"/" -> return expressionEval.operatorDivideInplace(address, vmDt, signed, operand)
|
||||
"|" -> return expressionEval.operatorOrInplace(address, vmDt, operand)
|
||||
"&" -> return expressionEval.operatorAndInplace(address, vmDt, operand)
|
||||
"^" -> return expressionEval.operatorXorInplace(address, vmDt, operand)
|
||||
"<<" -> return expressionEval.operatorShiftLeftInplace(address, vmDt, operand)
|
||||
">>" -> return expressionEval.operatorShiftRightInplace(address, vmDt, signed, operand)
|
||||
else -> {}
|
||||
}
|
||||
return fallbackAssign(origAssign)
|
||||
}
|
||||
|
||||
private fun inplacePrefix(operator: String, vmDt: VmDataType, address: Int): VmCodeChunk {
|
||||
val code= VmCodeChunk()
|
||||
when(operator) {
|
||||
"+" -> { }
|
||||
"-" -> {
|
||||
code += VmCodeInstruction(Opcode.NEGM, vmDt, value = address)
|
||||
}
|
||||
"~" -> {
|
||||
val regMask = codeGen.vmRegisters.nextFree()
|
||||
val mask = if(vmDt==VmDataType.BYTE) 0x00ff else 0xffff
|
||||
code += VmCodeInstruction(Opcode.LOAD, vmDt, reg1=regMask, value = mask)
|
||||
code += VmCodeInstruction(Opcode.XORM, vmDt, reg1=regMask, value = address)
|
||||
}
|
||||
else -> throw AssemblyError("weird prefix operator")
|
||||
}
|
||||
return code
|
||||
}
|
||||
|
||||
private fun translateRegularAssign(assignment: PtAssignment): VmCodeChunk {
|
||||
// note: assigning array and string values is done via an explicit memcopy/stringcopy function call.
|
||||
val ident = assignment.target.identifier
|
||||
val memory = assignment.target.memory
|
||||
val array = assignment.target.array
|
||||
val vmDt = codeGen.vmType(assignment.value.type)
|
||||
|
||||
val code = VmCodeChunk()
|
||||
var resultRegister = -1
|
||||
var resultFpRegister = -1
|
||||
val zero = codeGen.isZero(assignment.value)
|
||||
if(!zero) {
|
||||
// calculate the assignment value
|
||||
if (vmDt == VmDataType.FLOAT) {
|
||||
resultFpRegister = codeGen.vmRegisters.nextFreeFloat()
|
||||
code += expressionEval.translateExpression(assignment.value, -1, resultFpRegister)
|
||||
} else {
|
||||
resultRegister = if (assignment.value is PtMachineRegister) {
|
||||
(assignment.value as PtMachineRegister).register
|
||||
} else {
|
||||
val reg = codeGen.vmRegisters.nextFree()
|
||||
code += expressionEval.translateExpression(assignment.value, reg, -1)
|
||||
reg
|
||||
}
|
||||
}
|
||||
}
|
||||
if(ident!=null) {
|
||||
val address = codeGen.allocations.get(ident.targetName)
|
||||
code += if(zero) {
|
||||
VmCodeInstruction(Opcode.STOREZM, vmDt, value = address)
|
||||
} else {
|
||||
if (vmDt == VmDataType.FLOAT)
|
||||
VmCodeInstruction(Opcode.STOREM, vmDt, fpReg1 = resultFpRegister, value = address)
|
||||
else
|
||||
VmCodeInstruction(Opcode.STOREM, vmDt, reg1 = resultRegister, value = address)
|
||||
}
|
||||
}
|
||||
else if(array!=null) {
|
||||
val variable = array.variable.targetName
|
||||
var variableAddr = codeGen.allocations.get(variable)
|
||||
val itemsize = codeGen.program.memsizer.memorySize(array.type)
|
||||
|
||||
if(array.variable.type==DataType.UWORD) {
|
||||
// indexing a pointer var instead of a real array or string
|
||||
if(itemsize!=1)
|
||||
throw AssemblyError("non-array var indexing requires bytes dt")
|
||||
if(array.index.type!=DataType.UBYTE)
|
||||
throw AssemblyError("non-array var indexing requires bytes index")
|
||||
val idxReg = codeGen.vmRegisters.nextFree()
|
||||
code += expressionEval.translateExpression(array.index, idxReg, -1)
|
||||
if(zero) {
|
||||
// there's no STOREZIX instruction
|
||||
resultRegister = codeGen.vmRegisters.nextFree()
|
||||
code += VmCodeInstruction(Opcode.LOAD, vmDt, reg1=resultRegister, value=0)
|
||||
}
|
||||
code += VmCodeInstruction(Opcode.STOREIX, vmDt, reg1=resultRegister, reg2=idxReg, value = variableAddr)
|
||||
return code
|
||||
}
|
||||
|
||||
val fixedIndex = constIntValue(array.index)
|
||||
if(zero) {
|
||||
if(fixedIndex!=null) {
|
||||
variableAddr += fixedIndex*itemsize
|
||||
code += VmCodeInstruction(Opcode.STOREZM, vmDt, value=variableAddr)
|
||||
} else {
|
||||
val indexReg = codeGen.vmRegisters.nextFree()
|
||||
code += loadIndexReg(array, itemsize, indexReg)
|
||||
code += VmCodeInstruction(Opcode.STOREZX, vmDt, reg1=indexReg, value=variableAddr)
|
||||
}
|
||||
} else {
|
||||
if(vmDt== VmDataType.FLOAT) {
|
||||
if(fixedIndex!=null) {
|
||||
variableAddr += fixedIndex*itemsize
|
||||
code += VmCodeInstruction(Opcode.STOREM, vmDt, fpReg1 = resultFpRegister, value=variableAddr)
|
||||
} else {
|
||||
val indexReg = codeGen.vmRegisters.nextFree()
|
||||
code += loadIndexReg(array, itemsize, indexReg)
|
||||
code += VmCodeInstruction(Opcode.STOREX, vmDt, reg1 = resultRegister, reg2=indexReg, value=variableAddr)
|
||||
}
|
||||
} else {
|
||||
if(fixedIndex!=null) {
|
||||
variableAddr += fixedIndex*itemsize
|
||||
code += VmCodeInstruction(Opcode.STOREM, vmDt, reg1 = resultRegister, value=variableAddr)
|
||||
} else {
|
||||
val indexReg = codeGen.vmRegisters.nextFree()
|
||||
code += loadIndexReg(array, itemsize, indexReg)
|
||||
code += VmCodeInstruction(Opcode.STOREX, vmDt, reg1 = resultRegister, reg2=indexReg, value=variableAddr)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
else if(memory!=null) {
|
||||
require(vmDt== VmDataType.BYTE)
|
||||
if(zero) {
|
||||
if(memory.address is PtNumber) {
|
||||
code += VmCodeInstruction(Opcode.STOREZM, vmDt, value=(memory.address as PtNumber).number.toInt())
|
||||
} else {
|
||||
val addressReg = codeGen.vmRegisters.nextFree()
|
||||
code += expressionEval.translateExpression(memory.address, addressReg, -1)
|
||||
code += VmCodeInstruction(Opcode.STOREZI, vmDt, reg1=addressReg)
|
||||
}
|
||||
} else {
|
||||
if(memory.address is PtNumber) {
|
||||
code += VmCodeInstruction(Opcode.STOREM, vmDt, reg1=resultRegister, value=(memory.address as PtNumber).number.toInt())
|
||||
} else {
|
||||
val addressReg = codeGen.vmRegisters.nextFree()
|
||||
code += expressionEval.translateExpression(memory.address, addressReg, -1)
|
||||
code += VmCodeInstruction(Opcode.STOREI, vmDt, reg1=resultRegister, reg2=addressReg)
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
throw AssemblyError("weird assigntarget")
|
||||
return code
|
||||
}
|
||||
|
||||
private fun loadIndexReg(array: PtArrayIndexer, itemsize: Int, indexReg: Int): VmCodeChunk {
|
||||
val code = VmCodeChunk()
|
||||
if(itemsize==1) {
|
||||
code += expressionEval.translateExpression(array.index, indexReg, -1)
|
||||
}
|
||||
else {
|
||||
val mult = PtBinaryExpression("*", DataType.UBYTE, array.position)
|
||||
mult.children += array.index
|
||||
mult.children += PtNumber(DataType.UBYTE, itemsize.toDouble(), array.position)
|
||||
code += expressionEval.translateExpression(mult, indexReg, -1)
|
||||
}
|
||||
return code
|
||||
}
|
||||
}
|
@ -1,369 +0,0 @@
|
||||
package prog8.codegen.virtual
|
||||
|
||||
import prog8.code.StStaticVariable
|
||||
import prog8.code.ast.*
|
||||
import prog8.code.core.AssemblyError
|
||||
import prog8.code.core.DataType
|
||||
import prog8.intermediate.Opcode
|
||||
import prog8.intermediate.VmDataType
|
||||
import prog8.vm.Syscall
|
||||
|
||||
internal class BuiltinFuncGen(private val codeGen: CodeGen, private val exprGen: ExpressionGen) {
|
||||
|
||||
fun translate(call: PtBuiltinFunctionCall, resultRegister: Int): VmCodeChunk {
|
||||
return when(call.name) {
|
||||
"any" -> funcAny(call, resultRegister)
|
||||
"all" -> funcAll(call, resultRegister)
|
||||
"abs" -> funcAbs(call, resultRegister)
|
||||
"cmp" -> funcCmp(call)
|
||||
"sgn" -> funcSgn(call, resultRegister)
|
||||
"sqrt16" -> funcSqrt16(call, resultRegister)
|
||||
"pop" -> funcPop(call)
|
||||
"popw" -> funcPopw(call)
|
||||
"push" -> funcPush(call)
|
||||
"pushw" -> funcPushw(call)
|
||||
"rsave",
|
||||
"rsavex",
|
||||
"rrestore",
|
||||
"rrestorex" -> VmCodeChunk() // vm doesn't have registers to save/restore
|
||||
"rnd" -> funcRnd(resultRegister)
|
||||
"rndw" -> funcRndw(resultRegister)
|
||||
"callfar" -> throw AssemblyError("callfar() is for cx16 target only")
|
||||
"callrom" -> throw AssemblyError("callrom() is for cx16 target only")
|
||||
"msb" -> funcMsb(call, resultRegister)
|
||||
"lsb" -> funcLsb(call, resultRegister)
|
||||
"memory" -> funcMemory(call, resultRegister)
|
||||
"peek" -> funcPeek(call, resultRegister)
|
||||
"peekw" -> funcPeekW(call, resultRegister)
|
||||
"poke" -> funcPoke(call)
|
||||
"pokew" -> funcPokeW(call)
|
||||
"pokemon" -> VmCodeChunk()
|
||||
"mkword" -> funcMkword(call, resultRegister)
|
||||
"sort" -> funcSort(call)
|
||||
"reverse" -> funcReverse(call)
|
||||
"rol" -> funcRolRor(Opcode.ROXL, call, resultRegister)
|
||||
"ror" -> funcRolRor(Opcode.ROXR, call, resultRegister)
|
||||
"rol2" -> funcRolRor(Opcode.ROL, call, resultRegister)
|
||||
"ror2" -> funcRolRor(Opcode.ROR, call, resultRegister)
|
||||
else -> throw AssemblyError("missing builtinfunc for ${call.name}")
|
||||
}
|
||||
}
|
||||
|
||||
private fun funcCmp(call: PtBuiltinFunctionCall): VmCodeChunk {
|
||||
val code = VmCodeChunk()
|
||||
val leftRegister = codeGen.vmRegisters.nextFree()
|
||||
val rightRegister = codeGen.vmRegisters.nextFree()
|
||||
code += exprGen.translateExpression(call.args[0], leftRegister, -1)
|
||||
code += exprGen.translateExpression(call.args[1], rightRegister, -1)
|
||||
code += VmCodeInstruction(Opcode.CMP, codeGen.vmType(call.args[0].type), reg1=leftRegister, reg2=rightRegister)
|
||||
return code
|
||||
}
|
||||
|
||||
private fun funcAny(call: PtBuiltinFunctionCall, resultRegister: Int): VmCodeChunk {
|
||||
val arrayName = call.args[0] as PtIdentifier
|
||||
val array = codeGen.symbolTable.flat.getValue(arrayName.targetName) as StStaticVariable
|
||||
val code = VmCodeChunk()
|
||||
val syscall =
|
||||
when (array.dt) {
|
||||
DataType.ARRAY_UB,
|
||||
DataType.ARRAY_B -> Syscall.ANY_BYTE
|
||||
DataType.ARRAY_UW,
|
||||
DataType.ARRAY_W -> Syscall.ANY_WORD
|
||||
DataType.ARRAY_F -> Syscall.ANY_FLOAT
|
||||
else -> throw IllegalArgumentException("weird type")
|
||||
}
|
||||
code += exprGen.translateExpression(call.args[0], 0, -1)
|
||||
code += VmCodeInstruction(Opcode.LOAD, VmDataType.BYTE, reg1 = 1, value = array.length)
|
||||
code += VmCodeInstruction(Opcode.SYSCALL, value = syscall.ordinal)
|
||||
if (resultRegister != 0)
|
||||
code += VmCodeInstruction(Opcode.LOADR, VmDataType.BYTE, reg1 = resultRegister, reg2 = 0)
|
||||
return code
|
||||
}
|
||||
|
||||
private fun funcAll(call: PtBuiltinFunctionCall, resultRegister: Int): VmCodeChunk {
|
||||
val arrayName = call.args[0] as PtIdentifier
|
||||
val array = codeGen.symbolTable.flat.getValue(arrayName.targetName) as StStaticVariable
|
||||
val syscall =
|
||||
when(array.dt) {
|
||||
DataType.ARRAY_UB,
|
||||
DataType.ARRAY_B -> Syscall.ALL_BYTE
|
||||
DataType.ARRAY_UW,
|
||||
DataType.ARRAY_W -> Syscall.ALL_WORD
|
||||
DataType.ARRAY_F -> Syscall.ALL_FLOAT
|
||||
else -> throw IllegalArgumentException("weird type")
|
||||
}
|
||||
val code = VmCodeChunk()
|
||||
code += exprGen.translateExpression(call.args[0], 0, -1)
|
||||
code += VmCodeInstruction(Opcode.LOAD, VmDataType.BYTE, reg1=1, value=array.length)
|
||||
code += VmCodeInstruction(Opcode.SYSCALL, value=syscall.ordinal)
|
||||
if(resultRegister!=0)
|
||||
code += VmCodeInstruction(Opcode.LOADR, VmDataType.BYTE, reg1=resultRegister, reg2=0)
|
||||
return code
|
||||
}
|
||||
|
||||
private fun funcAbs(call: PtBuiltinFunctionCall, resultRegister: Int): VmCodeChunk {
|
||||
val code = VmCodeChunk()
|
||||
val sourceDt = call.args.single().type
|
||||
if(sourceDt!=DataType.UWORD) {
|
||||
code += exprGen.translateExpression(call.args[0], resultRegister, -1)
|
||||
when (sourceDt) {
|
||||
DataType.UBYTE -> {
|
||||
code += VmCodeInstruction(Opcode.EXT, VmDataType.BYTE, reg1=resultRegister)
|
||||
}
|
||||
DataType.BYTE -> {
|
||||
val notNegativeLabel = codeGen.createLabelName()
|
||||
val compareReg = codeGen.vmRegisters.nextFree()
|
||||
code += VmCodeInstruction(Opcode.LOADR, VmDataType.BYTE, reg1=compareReg, reg2=resultRegister)
|
||||
code += VmCodeInstruction(Opcode.AND, VmDataType.BYTE, reg1=compareReg, value=0x80)
|
||||
code += VmCodeInstruction(Opcode.BZ, VmDataType.BYTE, reg1=compareReg, labelSymbol = notNegativeLabel)
|
||||
code += VmCodeInstruction(Opcode.NEG, VmDataType.BYTE, reg1=resultRegister)
|
||||
code += VmCodeInstruction(Opcode.EXT, VmDataType.BYTE, reg1=resultRegister)
|
||||
code += VmCodeLabel(notNegativeLabel)
|
||||
}
|
||||
DataType.WORD -> {
|
||||
val notNegativeLabel = codeGen.createLabelName()
|
||||
val compareReg = codeGen.vmRegisters.nextFree()
|
||||
code += VmCodeInstruction(Opcode.LOADR, VmDataType.WORD, reg1=compareReg, reg2=resultRegister)
|
||||
code += VmCodeInstruction(Opcode.AND, VmDataType.WORD, reg1=compareReg, value=0x8000)
|
||||
code += VmCodeInstruction(Opcode.BZ, VmDataType.WORD, reg1=compareReg, labelSymbol = notNegativeLabel)
|
||||
code += VmCodeInstruction(Opcode.NEG, VmDataType.WORD, reg1=resultRegister)
|
||||
code += VmCodeLabel(notNegativeLabel)
|
||||
}
|
||||
else -> throw AssemblyError("weird type")
|
||||
}
|
||||
}
|
||||
return code
|
||||
}
|
||||
|
||||
private fun funcSgn(call: PtBuiltinFunctionCall, resultRegister: Int): VmCodeChunk {
|
||||
val code = VmCodeChunk()
|
||||
val reg = codeGen.vmRegisters.nextFree()
|
||||
code += exprGen.translateExpression(call.args.single(), reg, -1)
|
||||
code += VmCodeInstruction(Opcode.SGN, codeGen.vmType(call.type), reg1=resultRegister, reg2=reg)
|
||||
return code
|
||||
}
|
||||
|
||||
private fun funcSqrt16(call: PtBuiltinFunctionCall, resultRegister: Int): VmCodeChunk {
|
||||
val code = VmCodeChunk()
|
||||
val reg = codeGen.vmRegisters.nextFree()
|
||||
code += exprGen.translateExpression(call.args.single(), reg, -1)
|
||||
code += VmCodeInstruction(Opcode.SQRT, VmDataType.WORD, reg1=resultRegister, reg2=reg)
|
||||
return code
|
||||
}
|
||||
|
||||
private fun funcPop(call: PtBuiltinFunctionCall): VmCodeChunk {
|
||||
val code = VmCodeChunk()
|
||||
val reg = codeGen.vmRegisters.nextFree()
|
||||
code += VmCodeInstruction(Opcode.POP, VmDataType.BYTE, reg1=reg)
|
||||
code += assignRegisterTo(call.args.single(), reg)
|
||||
return code
|
||||
}
|
||||
|
||||
private fun funcPopw(call: PtBuiltinFunctionCall): VmCodeChunk {
|
||||
val code = VmCodeChunk()
|
||||
val reg = codeGen.vmRegisters.nextFree()
|
||||
code += VmCodeInstruction(Opcode.POP, VmDataType.WORD, reg1=reg)
|
||||
code += assignRegisterTo(call.args.single(), reg)
|
||||
return code
|
||||
}
|
||||
|
||||
private fun funcPush(call: PtBuiltinFunctionCall): VmCodeChunk {
|
||||
val code = VmCodeChunk()
|
||||
val reg = codeGen.vmRegisters.nextFree()
|
||||
code += exprGen.translateExpression(call.args.single(), reg, -1)
|
||||
code += VmCodeInstruction(Opcode.PUSH, VmDataType.BYTE, reg1=reg)
|
||||
return code
|
||||
}
|
||||
|
||||
private fun funcPushw(call: PtBuiltinFunctionCall): VmCodeChunk {
|
||||
val code = VmCodeChunk()
|
||||
val reg = codeGen.vmRegisters.nextFree()
|
||||
code += exprGen.translateExpression(call.args.single(), reg, -1)
|
||||
code += VmCodeInstruction(Opcode.PUSH, VmDataType.WORD, reg1=reg)
|
||||
return code
|
||||
}
|
||||
|
||||
private fun funcReverse(call: PtBuiltinFunctionCall): VmCodeChunk {
|
||||
val arrayName = call.args[0] as PtIdentifier
|
||||
val array = codeGen.symbolTable.flat.getValue(arrayName.targetName) as StStaticVariable
|
||||
val sortSyscall =
|
||||
when(array.dt) {
|
||||
DataType.ARRAY_UB, DataType.ARRAY_B, DataType.STR -> Syscall.REVERSE_BYTES
|
||||
DataType.ARRAY_UW, DataType.ARRAY_W -> Syscall.REVERSE_WORDS
|
||||
DataType.ARRAY_F -> Syscall.REVERSE_FLOATS
|
||||
else -> throw IllegalArgumentException("weird type to reverse")
|
||||
}
|
||||
val code = VmCodeChunk()
|
||||
code += exprGen.translateExpression(call.args[0], 0, -1)
|
||||
code += VmCodeInstruction(Opcode.LOAD, VmDataType.BYTE, reg1=1, value=array.length)
|
||||
code += VmCodeInstruction(Opcode.SYSCALL, value=sortSyscall.ordinal)
|
||||
return code
|
||||
}
|
||||
|
||||
private fun funcSort(call: PtBuiltinFunctionCall): VmCodeChunk {
|
||||
val arrayName = call.args[0] as PtIdentifier
|
||||
val array = codeGen.symbolTable.flat.getValue(arrayName.targetName) as StStaticVariable
|
||||
val sortSyscall =
|
||||
when(array.dt) {
|
||||
DataType.ARRAY_UB -> Syscall.SORT_UBYTE
|
||||
DataType.ARRAY_B -> Syscall.SORT_BYTE
|
||||
DataType.ARRAY_UW -> Syscall.SORT_UWORD
|
||||
DataType.ARRAY_W -> Syscall.SORT_WORD
|
||||
DataType.STR -> Syscall.SORT_UBYTE
|
||||
DataType.ARRAY_F -> throw IllegalArgumentException("sorting a floating point array is not supported")
|
||||
else -> throw IllegalArgumentException("weird type to sort")
|
||||
}
|
||||
val code = VmCodeChunk()
|
||||
code += exprGen.translateExpression(call.args[0], 0, -1)
|
||||
code += VmCodeInstruction(Opcode.LOAD, VmDataType.BYTE, reg1=1, value=array.length)
|
||||
code += VmCodeInstruction(Opcode.SYSCALL, value=sortSyscall.ordinal)
|
||||
return code
|
||||
}
|
||||
|
||||
private fun funcMkword(call: PtBuiltinFunctionCall, resultRegister: Int): VmCodeChunk {
|
||||
val msbReg = codeGen.vmRegisters.nextFree()
|
||||
val code = VmCodeChunk()
|
||||
code += exprGen.translateExpression(call.args[0], msbReg, -1)
|
||||
code += exprGen.translateExpression(call.args[1], resultRegister, -1)
|
||||
code += VmCodeInstruction(Opcode.CONCAT, VmDataType.BYTE, reg1=resultRegister, reg2=msbReg)
|
||||
return code
|
||||
}
|
||||
|
||||
private fun funcPokeW(call: PtBuiltinFunctionCall): VmCodeChunk {
|
||||
val code = VmCodeChunk()
|
||||
if(codeGen.isZero(call.args[1])) {
|
||||
if (call.args[0] is PtNumber) {
|
||||
val address = (call.args[0] as PtNumber).number.toInt()
|
||||
code += VmCodeInstruction(Opcode.STOREZM, VmDataType.WORD, value = address)
|
||||
} else {
|
||||
val addressReg = codeGen.vmRegisters.nextFree()
|
||||
code += exprGen.translateExpression(call.args[0], addressReg, -1)
|
||||
code += VmCodeInstruction(Opcode.STOREZI, VmDataType.WORD, reg2 = addressReg)
|
||||
}
|
||||
} else {
|
||||
val valueReg = codeGen.vmRegisters.nextFree()
|
||||
if (call.args[0] is PtNumber) {
|
||||
val address = (call.args[0] as PtNumber).number.toInt()
|
||||
code += exprGen.translateExpression(call.args[1], valueReg, -1)
|
||||
code += VmCodeInstruction(Opcode.STOREM, VmDataType.WORD, reg1 = valueReg, value = address)
|
||||
} else {
|
||||
val addressReg = codeGen.vmRegisters.nextFree()
|
||||
code += exprGen.translateExpression(call.args[0], addressReg, -1)
|
||||
code += exprGen.translateExpression(call.args[1], valueReg, -1)
|
||||
code += VmCodeInstruction(Opcode.STOREI, VmDataType.WORD, reg1 = valueReg, reg2 = addressReg)
|
||||
}
|
||||
}
|
||||
return code
|
||||
}
|
||||
|
||||
private fun funcPoke(call: PtBuiltinFunctionCall): VmCodeChunk {
|
||||
val code = VmCodeChunk()
|
||||
if(codeGen.isZero(call.args[1])) {
|
||||
if (call.args[0] is PtNumber) {
|
||||
val address = (call.args[0] as PtNumber).number.toInt()
|
||||
code += VmCodeInstruction(Opcode.STOREZM, VmDataType.BYTE, value = address)
|
||||
} else {
|
||||
val addressReg = codeGen.vmRegisters.nextFree()
|
||||
code += exprGen.translateExpression(call.args[0], addressReg, -1)
|
||||
code += VmCodeInstruction(Opcode.STOREZI, VmDataType.BYTE, reg2 = addressReg)
|
||||
}
|
||||
} else {
|
||||
val valueReg = codeGen.vmRegisters.nextFree()
|
||||
if (call.args[0] is PtNumber) {
|
||||
val address = (call.args[0] as PtNumber).number.toInt()
|
||||
code += exprGen.translateExpression(call.args[1], valueReg, -1)
|
||||
code += VmCodeInstruction(Opcode.STOREM, VmDataType.BYTE, reg1 = valueReg, value = address)
|
||||
} else {
|
||||
val addressReg = codeGen.vmRegisters.nextFree()
|
||||
code += exprGen.translateExpression(call.args[0], addressReg, -1)
|
||||
code += exprGen.translateExpression(call.args[1], valueReg, -1)
|
||||
code += VmCodeInstruction(Opcode.STOREI, VmDataType.BYTE, reg1 = valueReg, reg2 = addressReg)
|
||||
}
|
||||
}
|
||||
return code
|
||||
}
|
||||
|
||||
private fun funcPeekW(call: PtBuiltinFunctionCall, resultRegister: Int): VmCodeChunk {
|
||||
val code = VmCodeChunk()
|
||||
if(call.args[0] is PtNumber) {
|
||||
val address = (call.args[0] as PtNumber).number.toInt()
|
||||
code += VmCodeInstruction(Opcode.LOADM, VmDataType.WORD, reg1 = resultRegister, value = address)
|
||||
} else {
|
||||
val addressReg = codeGen.vmRegisters.nextFree()
|
||||
code += exprGen.translateExpression(call.args.single(), addressReg, -1)
|
||||
code += VmCodeInstruction(Opcode.LOADI, VmDataType.WORD, reg1 = resultRegister, reg2 = addressReg)
|
||||
}
|
||||
return code
|
||||
}
|
||||
|
||||
private fun funcPeek(call: PtBuiltinFunctionCall, resultRegister: Int): VmCodeChunk {
|
||||
val code = VmCodeChunk()
|
||||
if(call.args[0] is PtNumber) {
|
||||
val address = (call.args[0] as PtNumber).number.toInt()
|
||||
code += VmCodeInstruction(Opcode.LOADM, VmDataType.BYTE, reg1 = resultRegister, value = address)
|
||||
} else {
|
||||
val addressReg = codeGen.vmRegisters.nextFree()
|
||||
code += exprGen.translateExpression(call.args.single(), addressReg, -1)
|
||||
code += VmCodeInstruction(Opcode.LOADI, VmDataType.BYTE, reg1 = resultRegister, reg2 = addressReg)
|
||||
}
|
||||
return code
|
||||
}
|
||||
|
||||
private fun funcRnd(resultRegister: Int): VmCodeChunk {
|
||||
val code = VmCodeChunk()
|
||||
code += VmCodeInstruction(Opcode.RND, VmDataType.BYTE, reg1=resultRegister)
|
||||
return code
|
||||
}
|
||||
|
||||
private fun funcRndw(resultRegister: Int): VmCodeChunk {
|
||||
val code = VmCodeChunk()
|
||||
code += VmCodeInstruction(Opcode.RND, VmDataType.WORD, reg1=resultRegister)
|
||||
return code
|
||||
}
|
||||
|
||||
private fun funcMemory(call: PtBuiltinFunctionCall, resultRegister: Int): VmCodeChunk {
|
||||
val name = (call.args[0] as PtString).value
|
||||
val size = (call.args[1] as PtNumber).number.toUInt()
|
||||
val align = (call.args[2] as PtNumber).number.toUInt()
|
||||
val prefixedName = "prog8_memoryslab_$name"
|
||||
val memorySlab = codeGen.allocations.getMemorySlab(prefixedName) ?: codeGen.allocations.allocateMemorySlab(prefixedName, size, align, call.position)
|
||||
val code = VmCodeChunk()
|
||||
code += VmCodeInstruction(Opcode.LOAD, VmDataType.WORD, reg1=resultRegister, value=memorySlab.allocatedAddress!!.toInt())
|
||||
return code
|
||||
}
|
||||
|
||||
private fun funcLsb(call: PtBuiltinFunctionCall, resultRegister: Int): VmCodeChunk {
|
||||
val code = VmCodeChunk()
|
||||
code += exprGen.translateExpression(call.args.single(), resultRegister, -1)
|
||||
// note: if a word result is needed, the upper byte is cleared by the typecast that follows. No need to do it here.
|
||||
return code
|
||||
}
|
||||
|
||||
private fun funcMsb(call: PtBuiltinFunctionCall, resultRegister: Int): VmCodeChunk {
|
||||
val code = VmCodeChunk()
|
||||
code += exprGen.translateExpression(call.args.single(), resultRegister, -1)
|
||||
code += VmCodeInstruction(Opcode.MSIG, VmDataType.BYTE, reg1 = resultRegister, reg2=resultRegister)
|
||||
// note: if a word result is needed, the upper byte is cleared by the typecast that follows. No need to do it here.
|
||||
return code
|
||||
}
|
||||
|
||||
private fun funcRolRor(opcode: Opcode, call: PtBuiltinFunctionCall, resultRegister: Int): VmCodeChunk {
|
||||
val vmDt = codeGen.vmType(call.args[0].type)
|
||||
val code = VmCodeChunk()
|
||||
code += exprGen.translateExpression(call.args[0], resultRegister, -1)
|
||||
code += VmCodeInstruction(opcode, vmDt, reg1=resultRegister)
|
||||
code += assignRegisterTo(call.args[0], resultRegister)
|
||||
return code
|
||||
}
|
||||
|
||||
private fun assignRegisterTo(target: PtExpression, register: Int): VmCodeChunk {
|
||||
val code = VmCodeChunk()
|
||||
val assignment = PtAssignment(target.position)
|
||||
val assignTarget = PtAssignTarget(target.position)
|
||||
assignTarget.children.add(target)
|
||||
assignment.children.add(assignTarget)
|
||||
assignment.children.add(PtMachineRegister(register, target.type, target.position))
|
||||
code += codeGen.translateNode(assignment)
|
||||
return code
|
||||
}
|
||||
}
|
@ -1,814 +0,0 @@
|
||||
package prog8.codegen.virtual
|
||||
|
||||
import prog8.code.StStaticVariable
|
||||
import prog8.code.SymbolTable
|
||||
import prog8.code.ast.*
|
||||
import prog8.code.core.*
|
||||
import prog8.intermediate.Opcode
|
||||
import prog8.intermediate.VmDataType
|
||||
import kotlin.math.pow
|
||||
|
||||
|
||||
internal class VmRegisterPool {
|
||||
private var firstFree: Int=3 // integer registers 0,1,2 are reserved
|
||||
private var firstFreeFloat: Int=0
|
||||
|
||||
fun peekNext() = firstFree
|
||||
fun peekNextFloat() = firstFreeFloat
|
||||
|
||||
fun nextFree(): Int {
|
||||
val result = firstFree
|
||||
firstFree++
|
||||
if(firstFree>65535)
|
||||
throw AssemblyError("out of virtual registers (int)")
|
||||
return result
|
||||
}
|
||||
|
||||
fun nextFreeFloat(): Int {
|
||||
val result = firstFreeFloat
|
||||
firstFreeFloat++
|
||||
if(firstFreeFloat>65535)
|
||||
throw AssemblyError("out of virtual registers (fp)")
|
||||
return result
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
class CodeGen(internal val program: PtProgram,
|
||||
internal val symbolTable: SymbolTable,
|
||||
internal val options: CompilationOptions,
|
||||
internal val errors: IErrorReporter
|
||||
): IAssemblyGenerator {
|
||||
|
||||
internal val allocations = VariableAllocator(symbolTable, program)
|
||||
private val expressionEval = ExpressionGen(this)
|
||||
private val builtinFuncGen = BuiltinFuncGen(this, expressionEval)
|
||||
private val assignmentGen = AssignmentGen(this, expressionEval)
|
||||
internal val vmRegisters = VmRegisterPool()
|
||||
|
||||
override fun compileToAssembly(): IAssemblyProgram? {
|
||||
val vmprog = AssemblyProgram(program.name, allocations)
|
||||
|
||||
if(!options.dontReinitGlobals) {
|
||||
// collect global variables initializers
|
||||
program.allBlocks().forEach {
|
||||
val code = VmCodeChunk()
|
||||
it.children.filterIsInstance<PtAssignment>().forEach { assign -> code += assignmentGen.translate(assign) }
|
||||
vmprog.addGlobalInits(code)
|
||||
}
|
||||
}
|
||||
|
||||
if(options.symbolDefs.isNotEmpty())
|
||||
throw AssemblyError("virtual target doesn't support symbols defined on the commandline")
|
||||
if(options.evalStackBaseAddress!=null)
|
||||
throw AssemblyError("virtual target doesn't use eval-stack")
|
||||
|
||||
for (block in program.allBlocks()) {
|
||||
vmprog.addBlock(translate(block))
|
||||
}
|
||||
|
||||
if(options.optimize) {
|
||||
val optimizer = VmPeepholeOptimizer(vmprog)
|
||||
optimizer.optimize()
|
||||
}
|
||||
|
||||
println("Vm codegen: memory usage=${allocations.freeMem}")
|
||||
|
||||
return vmprog
|
||||
}
|
||||
|
||||
|
||||
internal fun translateNode(node: PtNode): VmCodeChunk {
|
||||
val code = when(node) {
|
||||
is PtBlock -> translate(node)
|
||||
is PtSub -> translate(node)
|
||||
is PtAsmSub -> translate(node)
|
||||
is PtScopeVarsDecls -> VmCodeChunk() // vars should be looked up via symbol table
|
||||
is PtVariable -> VmCodeChunk() // var should be looked up via symbol table
|
||||
is PtMemMapped -> VmCodeChunk() // memmapped var should be looked up via symbol table
|
||||
is PtConstant -> VmCodeChunk() // constants have all been folded into the code
|
||||
is PtAssignment -> assignmentGen.translate(node)
|
||||
is PtNodeGroup -> translateGroup(node.children)
|
||||
is PtBuiltinFunctionCall -> translateBuiltinFunc(node, 0)
|
||||
is PtFunctionCall -> expressionEval.translate(node, 0, 0)
|
||||
is PtNop -> VmCodeChunk()
|
||||
is PtReturn -> translate(node)
|
||||
is PtJump -> translate(node)
|
||||
is PtWhen -> translate(node)
|
||||
is PtForLoop -> translate(node)
|
||||
is PtIfElse -> translate(node)
|
||||
is PtPostIncrDecr -> translate(node)
|
||||
is PtRepeatLoop -> translate(node)
|
||||
is PtLabel -> VmCodeChunk(VmCodeLabel(node.scopedName))
|
||||
is PtBreakpoint -> VmCodeChunk(VmCodeInstruction(Opcode.BREAKPOINT))
|
||||
is PtConditionalBranch -> translate(node)
|
||||
is PtInlineAssembly -> VmCodeChunk(VmCodeInlineAsm(node.assembly))
|
||||
is PtIncludeBinary -> VmCodeChunk(VmCodeInlineBinary(node.file, node.offset, node.length))
|
||||
is PtAddressOf,
|
||||
is PtContainmentCheck,
|
||||
is PtMemoryByte,
|
||||
is PtProgram,
|
||||
is PtArrayIndexer,
|
||||
is PtBinaryExpression,
|
||||
is PtIdentifier,
|
||||
is PtWhenChoice,
|
||||
is PtPrefix,
|
||||
is PtRange,
|
||||
is PtAssignTarget,
|
||||
is PtTypeCast,
|
||||
is PtSubroutineParameter,
|
||||
is PtNumber,
|
||||
is PtArray,
|
||||
is PtString -> throw AssemblyError("should not occur as separate statement node ${node.position}")
|
||||
else -> TODO("missing codegen for $node")
|
||||
}
|
||||
if(code.lines.isNotEmpty() && node.position.line!=0)
|
||||
code.lines.add(0, VmCodeComment(node.position.toString()))
|
||||
return code
|
||||
}
|
||||
|
||||
private fun translate(branch: PtConditionalBranch): VmCodeChunk {
|
||||
val code = VmCodeChunk()
|
||||
val elseLabel = createLabelName()
|
||||
// note that the branch opcode used is the opposite as the branch condition, because the generated code jumps to the 'else' part
|
||||
code += when(branch.condition) {
|
||||
BranchCondition.CS -> VmCodeInstruction(Opcode.BSTCC, labelSymbol = elseLabel)
|
||||
BranchCondition.CC -> VmCodeInstruction(Opcode.BSTCS, labelSymbol = elseLabel)
|
||||
BranchCondition.EQ, BranchCondition.Z -> VmCodeInstruction(Opcode.BSTNE, labelSymbol = elseLabel)
|
||||
BranchCondition.NE, BranchCondition.NZ -> VmCodeInstruction(Opcode.BSTEQ, labelSymbol = elseLabel)
|
||||
BranchCondition.MI, BranchCondition.NEG -> VmCodeInstruction(Opcode.BSTPOS, labelSymbol = elseLabel)
|
||||
BranchCondition.PL, BranchCondition.POS -> VmCodeInstruction(Opcode.BSTNEG, labelSymbol = elseLabel)
|
||||
BranchCondition.VC,
|
||||
BranchCondition.VS -> throw AssemblyError("conditional branch ${branch.condition} not supported in vm target due to lack of cpu V flag ${branch.position}")
|
||||
}
|
||||
code += translateNode(branch.trueScope)
|
||||
if(branch.falseScope.children.isNotEmpty()) {
|
||||
val endLabel = createLabelName()
|
||||
code += VmCodeInstruction(Opcode.JUMP, labelSymbol = endLabel)
|
||||
code += VmCodeLabel(elseLabel)
|
||||
code += translateNode(branch.falseScope)
|
||||
code += VmCodeLabel(endLabel)
|
||||
} else {
|
||||
code += VmCodeLabel(elseLabel)
|
||||
}
|
||||
return code
|
||||
}
|
||||
|
||||
private fun translate(whenStmt: PtWhen): VmCodeChunk {
|
||||
if(whenStmt.choices.children.isEmpty())
|
||||
return VmCodeChunk()
|
||||
val code = VmCodeChunk()
|
||||
val valueReg = vmRegisters.nextFree()
|
||||
val choiceReg = vmRegisters.nextFree()
|
||||
val valueDt = vmType(whenStmt.value.type)
|
||||
code += expressionEval.translateExpression(whenStmt.value, valueReg, -1)
|
||||
val choices = whenStmt.choices.children.map {it as PtWhenChoice }
|
||||
val endLabel = createLabelName()
|
||||
for (choice in choices) {
|
||||
if(choice.isElse) {
|
||||
code += translateNode(choice.statements)
|
||||
} else {
|
||||
val skipLabel = createLabelName()
|
||||
val values = choice.values.children.map {it as PtNumber}
|
||||
if(values.size==1) {
|
||||
code += VmCodeInstruction(Opcode.LOAD, valueDt, reg1=choiceReg, value=values[0].number.toInt())
|
||||
code += VmCodeInstruction(Opcode.BNE, valueDt, reg1=valueReg, reg2=choiceReg, labelSymbol = skipLabel)
|
||||
code += translateNode(choice.statements)
|
||||
if(choice.statements.children.last() !is PtReturn)
|
||||
code += VmCodeInstruction(Opcode.JUMP, labelSymbol = endLabel)
|
||||
} else {
|
||||
val matchLabel = createLabelName()
|
||||
for (value in values) {
|
||||
code += VmCodeInstruction(Opcode.LOAD, valueDt, reg1=choiceReg, value=value.number.toInt())
|
||||
code += VmCodeInstruction(Opcode.BEQ, valueDt, reg1=valueReg, reg2=choiceReg, labelSymbol = matchLabel)
|
||||
}
|
||||
code += VmCodeInstruction(Opcode.JUMP, labelSymbol = skipLabel)
|
||||
code += VmCodeLabel(matchLabel)
|
||||
code += translateNode(choice.statements)
|
||||
if(choice.statements.children.last() !is PtReturn)
|
||||
code += VmCodeInstruction(Opcode.JUMP, labelSymbol = endLabel)
|
||||
}
|
||||
code += VmCodeLabel(skipLabel)
|
||||
}
|
||||
}
|
||||
code += VmCodeLabel(endLabel)
|
||||
return code
|
||||
}
|
||||
|
||||
private fun translate(forLoop: PtForLoop): VmCodeChunk {
|
||||
val loopvar = symbolTable.lookup(forLoop.variable.targetName) as StStaticVariable
|
||||
val iterable = forLoop.iterable
|
||||
val code = VmCodeChunk()
|
||||
when(iterable) {
|
||||
is PtRange -> {
|
||||
if(iterable.from is PtNumber && iterable.to is PtNumber)
|
||||
code += translateForInConstantRange(forLoop, loopvar)
|
||||
else
|
||||
code += translateForInNonConstantRange(forLoop, loopvar)
|
||||
}
|
||||
is PtIdentifier -> {
|
||||
val arrayAddress = allocations.get(iterable.targetName)
|
||||
val iterableVar = symbolTable.lookup(iterable.targetName) as StStaticVariable
|
||||
val loopvarAddress = allocations.get(loopvar.scopedName)
|
||||
val indexReg = vmRegisters.nextFree()
|
||||
val tmpReg = vmRegisters.nextFree()
|
||||
val loopLabel = createLabelName()
|
||||
val endLabel = createLabelName()
|
||||
if(iterableVar.dt==DataType.STR) {
|
||||
// iterate over a zero-terminated string
|
||||
code += VmCodeInstruction(Opcode.LOAD, VmDataType.BYTE, reg1=indexReg, value=0)
|
||||
code += VmCodeLabel(loopLabel)
|
||||
code += VmCodeInstruction(Opcode.LOADX, VmDataType.BYTE, reg1=tmpReg, reg2=indexReg, value = arrayAddress)
|
||||
code += VmCodeInstruction(Opcode.BZ, VmDataType.BYTE, reg1=tmpReg, labelSymbol = endLabel)
|
||||
code += VmCodeInstruction(Opcode.STOREM, VmDataType.BYTE, reg1=tmpReg, value = loopvarAddress)
|
||||
code += translateNode(forLoop.statements)
|
||||
code += VmCodeInstruction(Opcode.INC, VmDataType.BYTE, reg1=indexReg)
|
||||
code += VmCodeInstruction(Opcode.JUMP, labelSymbol = loopLabel)
|
||||
code += VmCodeLabel(endLabel)
|
||||
} else {
|
||||
// iterate over array
|
||||
val elementDt = ArrayToElementTypes.getValue(iterable.type)
|
||||
val elementSize = program.memsizer.memorySize(elementDt)
|
||||
val lengthBytes = iterableVar.length!! * elementSize
|
||||
if(lengthBytes<256) {
|
||||
val lengthReg = vmRegisters.nextFree()
|
||||
code += VmCodeInstruction(Opcode.LOAD, VmDataType.BYTE, reg1=indexReg, value=0)
|
||||
code += VmCodeInstruction(Opcode.LOAD, VmDataType.BYTE, reg1=lengthReg, value=lengthBytes)
|
||||
code += VmCodeLabel(loopLabel)
|
||||
code += VmCodeInstruction(Opcode.LOADX, vmType(elementDt), reg1=tmpReg, reg2=indexReg, value=arrayAddress)
|
||||
code += VmCodeInstruction(Opcode.STOREM, vmType(elementDt), reg1=tmpReg, value = loopvarAddress)
|
||||
code += translateNode(forLoop.statements)
|
||||
code += addConstReg(VmDataType.BYTE, indexReg, elementSize)
|
||||
code += VmCodeInstruction(Opcode.BNE, VmDataType.BYTE, reg1=indexReg, reg2=lengthReg, labelSymbol = loopLabel)
|
||||
} else if(lengthBytes==256) {
|
||||
code += VmCodeInstruction(Opcode.LOAD, VmDataType.BYTE, reg1=indexReg, value=0)
|
||||
code += VmCodeLabel(loopLabel)
|
||||
code += VmCodeInstruction(Opcode.LOADX, vmType(elementDt), reg1=tmpReg, reg2=indexReg, value=arrayAddress)
|
||||
code += VmCodeInstruction(Opcode.STOREM, vmType(elementDt), reg1=tmpReg, value = loopvarAddress)
|
||||
code += translateNode(forLoop.statements)
|
||||
code += addConstReg(VmDataType.BYTE, indexReg, elementSize)
|
||||
code += VmCodeInstruction(Opcode.BNZ, VmDataType.BYTE, reg1=indexReg, labelSymbol = loopLabel)
|
||||
} else {
|
||||
throw AssemblyError("iterator length should never exceed 256")
|
||||
}
|
||||
}
|
||||
}
|
||||
else -> throw AssemblyError("weird for iterable")
|
||||
}
|
||||
return code
|
||||
}
|
||||
|
||||
private fun translateForInNonConstantRange(forLoop: PtForLoop, loopvar: StStaticVariable): VmCodeChunk {
|
||||
val iterable = forLoop.iterable as PtRange
|
||||
val step = iterable.step.number.toInt()
|
||||
if (step==0)
|
||||
throw AssemblyError("step 0")
|
||||
val indexReg = vmRegisters.nextFree()
|
||||
val endvalueReg = vmRegisters.nextFree()
|
||||
val loopvarAddress = allocations.get(loopvar.scopedName)
|
||||
val loopvarDt = vmType(loopvar.dt)
|
||||
val loopLabel = createLabelName()
|
||||
val code = VmCodeChunk()
|
||||
|
||||
code += expressionEval.translateExpression(iterable.to, endvalueReg, -1)
|
||||
code += expressionEval.translateExpression(iterable.from, indexReg, -1)
|
||||
code += VmCodeInstruction(Opcode.STOREM, loopvarDt, reg1=indexReg, value=loopvarAddress)
|
||||
code += VmCodeLabel(loopLabel)
|
||||
code += translateNode(forLoop.statements)
|
||||
code += addConstMem(loopvarDt, loopvarAddress.toUInt(), step)
|
||||
code += VmCodeInstruction(Opcode.LOADM, loopvarDt, reg1 = indexReg, value = loopvarAddress)
|
||||
val branchOpcode = if(loopvar.dt in SignedDatatypes) Opcode.BLES else Opcode.BLE
|
||||
code += VmCodeInstruction(branchOpcode, loopvarDt, reg1=indexReg, reg2=endvalueReg, labelSymbol=loopLabel)
|
||||
return code
|
||||
}
|
||||
|
||||
private fun translateForInConstantRange(forLoop: PtForLoop, loopvar: StStaticVariable): VmCodeChunk {
|
||||
val loopLabel = createLabelName()
|
||||
val loopvarAddress = allocations.get(loopvar.scopedName)
|
||||
val indexReg = vmRegisters.nextFree()
|
||||
val loopvarDt = vmType(loopvar.dt)
|
||||
val iterable = forLoop.iterable as PtRange
|
||||
val step = iterable.step.number.toInt()
|
||||
val rangeStart = (iterable.from as PtNumber).number.toInt()
|
||||
val rangeEndUntyped = (iterable.to as PtNumber).number.toInt() + step
|
||||
if(step==0)
|
||||
throw AssemblyError("step 0")
|
||||
if(step>0 && rangeEndUntyped<rangeStart || step<0 && rangeEndUntyped>rangeStart)
|
||||
throw AssemblyError("empty range")
|
||||
val rangeEndWrapped = if(loopvarDt==VmDataType.BYTE) rangeEndUntyped and 255 else rangeEndUntyped and 65535
|
||||
val code = VmCodeChunk()
|
||||
val endvalueReg: Int
|
||||
if(rangeEndWrapped!=0) {
|
||||
endvalueReg = vmRegisters.nextFree()
|
||||
code += VmCodeInstruction(Opcode.LOAD, loopvarDt, reg1 = endvalueReg, value = rangeEndWrapped)
|
||||
} else {
|
||||
endvalueReg = -1 // not used
|
||||
}
|
||||
code += VmCodeInstruction(Opcode.LOAD, loopvarDt, reg1=indexReg, value=rangeStart)
|
||||
code += VmCodeInstruction(Opcode.STOREM, loopvarDt, reg1=indexReg, value=loopvarAddress)
|
||||
code += VmCodeLabel(loopLabel)
|
||||
code += translateNode(forLoop.statements)
|
||||
code += addConstMem(loopvarDt, loopvarAddress.toUInt(), step)
|
||||
code += VmCodeInstruction(Opcode.LOADM, loopvarDt, reg1 = indexReg, value = loopvarAddress)
|
||||
code += if(rangeEndWrapped==0) {
|
||||
VmCodeInstruction(Opcode.BNZ, loopvarDt, reg1 = indexReg, labelSymbol = loopLabel)
|
||||
} else {
|
||||
VmCodeInstruction(Opcode.BNE, loopvarDt, reg1 = indexReg, reg2 = endvalueReg, labelSymbol = loopLabel)
|
||||
}
|
||||
return code
|
||||
}
|
||||
|
||||
private fun addConstReg(dt: VmDataType, reg: Int, value: Int): VmCodeChunk {
|
||||
val code = VmCodeChunk()
|
||||
when(value) {
|
||||
0 -> { /* do nothing */ }
|
||||
1 -> {
|
||||
code += VmCodeInstruction(Opcode.INC, dt, reg1=reg)
|
||||
}
|
||||
2 -> {
|
||||
code += VmCodeInstruction(Opcode.INC, dt, reg1=reg)
|
||||
code += VmCodeInstruction(Opcode.INC, dt, reg1=reg)
|
||||
}
|
||||
-1 -> {
|
||||
code += VmCodeInstruction(Opcode.DEC, dt, reg1=reg)
|
||||
}
|
||||
-2 -> {
|
||||
code += VmCodeInstruction(Opcode.DEC, dt, reg1=reg)
|
||||
code += VmCodeInstruction(Opcode.DEC, dt, reg1=reg)
|
||||
}
|
||||
else -> {
|
||||
code += if(value>0) {
|
||||
VmCodeInstruction(Opcode.ADD, dt, reg1 = reg, value=value)
|
||||
} else {
|
||||
VmCodeInstruction(Opcode.SUB, dt, reg1 = reg, value=-value)
|
||||
}
|
||||
}
|
||||
}
|
||||
return code
|
||||
}
|
||||
|
||||
private fun addConstMem(dt: VmDataType, address: UInt, value: Int): VmCodeChunk {
|
||||
val code = VmCodeChunk()
|
||||
when(value) {
|
||||
0 -> { /* do nothing */ }
|
||||
1 -> {
|
||||
code += VmCodeInstruction(Opcode.INCM, dt, value=address.toInt())
|
||||
}
|
||||
2 -> {
|
||||
code += VmCodeInstruction(Opcode.INCM, dt, value=address.toInt())
|
||||
code += VmCodeInstruction(Opcode.INCM, dt, value=address.toInt())
|
||||
}
|
||||
-1 -> {
|
||||
code += VmCodeInstruction(Opcode.DECM, dt, value=address.toInt())
|
||||
}
|
||||
-2 -> {
|
||||
code += VmCodeInstruction(Opcode.DECM, dt, value=address.toInt())
|
||||
code += VmCodeInstruction(Opcode.DECM, dt, value=address.toInt())
|
||||
}
|
||||
else -> {
|
||||
val valueReg = vmRegisters.nextFree()
|
||||
if(value>0) {
|
||||
code += VmCodeInstruction(Opcode.LOAD, dt, reg1=valueReg, value=value)
|
||||
code += VmCodeInstruction(Opcode.ADDM, dt, reg1=valueReg, value=address.toInt())
|
||||
}
|
||||
else {
|
||||
code += VmCodeInstruction(Opcode.LOAD, dt, reg1=valueReg, value=-value)
|
||||
code += VmCodeInstruction(Opcode.SUBM, dt, reg1=valueReg, value=address.toInt())
|
||||
}
|
||||
}
|
||||
}
|
||||
return code
|
||||
}
|
||||
|
||||
internal fun multiplyByConstFloat(fpReg: Int, factor: Float): VmCodeChunk {
|
||||
val code = VmCodeChunk()
|
||||
if(factor==1f)
|
||||
return code
|
||||
code += if(factor==0f) {
|
||||
VmCodeInstruction(Opcode.LOAD, VmDataType.FLOAT, fpReg1 = fpReg, fpValue = 0f)
|
||||
} else {
|
||||
VmCodeInstruction(Opcode.MUL, VmDataType.FLOAT, fpReg1 = fpReg, fpValue=factor)
|
||||
}
|
||||
return code
|
||||
}
|
||||
|
||||
internal fun multiplyByConstFloatInplace(address: Int, factor: Float): VmCodeChunk {
|
||||
val code = VmCodeChunk()
|
||||
if(factor==1f)
|
||||
return code
|
||||
if(factor==0f) {
|
||||
code += VmCodeInstruction(Opcode.STOREZM, VmDataType.FLOAT, value = address)
|
||||
} else {
|
||||
val factorReg = vmRegisters.nextFreeFloat()
|
||||
code += VmCodeInstruction(Opcode.LOAD, VmDataType.FLOAT, fpReg1=factorReg, fpValue = factor)
|
||||
code += VmCodeInstruction(Opcode.MULM, VmDataType.FLOAT, fpReg1 = factorReg, value = address)
|
||||
}
|
||||
return code
|
||||
}
|
||||
|
||||
internal val powersOfTwo = (0..16).map { 2.0.pow(it.toDouble()).toInt() }
|
||||
|
||||
internal fun multiplyByConst(dt: VmDataType, reg: Int, factor: Int): VmCodeChunk {
|
||||
val code = VmCodeChunk()
|
||||
if(factor==1)
|
||||
return code
|
||||
val pow2 = powersOfTwo.indexOf(factor)
|
||||
if(pow2==1) {
|
||||
// just shift 1 bit
|
||||
code += VmCodeInstruction(Opcode.LSL, dt, reg1=reg)
|
||||
}
|
||||
else if(pow2>=1) {
|
||||
// just shift multiple bits
|
||||
val pow2reg = vmRegisters.nextFree()
|
||||
code += VmCodeInstruction(Opcode.LOAD, dt, reg1=pow2reg, value=pow2)
|
||||
code += VmCodeInstruction(Opcode.LSLN, dt, reg1=reg, reg2=pow2reg)
|
||||
} else {
|
||||
code += if (factor == 0) {
|
||||
VmCodeInstruction(Opcode.LOAD, dt, reg1=reg, value=0)
|
||||
} else {
|
||||
VmCodeInstruction(Opcode.MUL, dt, reg1=reg, value=factor)
|
||||
}
|
||||
}
|
||||
return code
|
||||
}
|
||||
|
||||
internal fun multiplyByConstInplace(dt: VmDataType, address: Int, factor: Int): VmCodeChunk {
|
||||
val code = VmCodeChunk()
|
||||
if(factor==1)
|
||||
return code
|
||||
val pow2 = powersOfTwo.indexOf(factor)
|
||||
if(pow2==1) {
|
||||
// just shift 1 bit
|
||||
code += VmCodeInstruction(Opcode.LSLM, dt, value = address)
|
||||
}
|
||||
else if(pow2>=1) {
|
||||
// just shift multiple bits
|
||||
val pow2reg = vmRegisters.nextFree()
|
||||
code += VmCodeInstruction(Opcode.LOAD, dt, reg1=pow2reg, value=pow2)
|
||||
code += VmCodeInstruction(Opcode.LSLNM, dt, reg1=pow2reg, value=address)
|
||||
} else {
|
||||
if (factor == 0) {
|
||||
code += VmCodeInstruction(Opcode.STOREZM, dt, value=address)
|
||||
}
|
||||
else {
|
||||
val factorReg = vmRegisters.nextFree()
|
||||
code += VmCodeInstruction(Opcode.LOAD, dt, reg1=factorReg, value = factor)
|
||||
code += VmCodeInstruction(Opcode.MULM, dt, reg1=factorReg, value = address)
|
||||
}
|
||||
}
|
||||
return code
|
||||
}
|
||||
|
||||
internal fun divideByConstFloat(fpReg: Int, factor: Float): VmCodeChunk {
|
||||
val code = VmCodeChunk()
|
||||
if(factor==1f)
|
||||
return code
|
||||
code += if(factor==0f) {
|
||||
VmCodeInstruction(Opcode.LOAD, VmDataType.FLOAT, fpReg1 = fpReg, fpValue = Float.MAX_VALUE)
|
||||
} else {
|
||||
VmCodeInstruction(Opcode.DIVS, VmDataType.FLOAT, fpReg1 = fpReg, fpValue=factor)
|
||||
}
|
||||
return code
|
||||
}
|
||||
|
||||
internal fun divideByConstFloatInplace(address: Int, factor: Float): VmCodeChunk {
|
||||
val code = VmCodeChunk()
|
||||
if(factor==1f)
|
||||
return code
|
||||
if(factor==0f) {
|
||||
val maxvalueReg = vmRegisters.nextFreeFloat()
|
||||
code += VmCodeInstruction(Opcode.LOAD, VmDataType.FLOAT, fpReg1 = maxvalueReg, fpValue = Float.MAX_VALUE)
|
||||
code += VmCodeInstruction(Opcode.STOREM, VmDataType.FLOAT, fpReg1 = maxvalueReg, value=address)
|
||||
} else {
|
||||
val factorReg = vmRegisters.nextFreeFloat()
|
||||
code += VmCodeInstruction(Opcode.LOAD, VmDataType.FLOAT, fpReg1=factorReg, fpValue = factor)
|
||||
code += VmCodeInstruction(Opcode.DIVSM, VmDataType.FLOAT, fpReg1 = factorReg, value=address)
|
||||
}
|
||||
return code
|
||||
}
|
||||
|
||||
internal fun divideByConst(dt: VmDataType, reg: Int, factor: Int, signed: Boolean): VmCodeChunk {
|
||||
val code = VmCodeChunk()
|
||||
if(factor==1)
|
||||
return code
|
||||
val pow2 = powersOfTwo.indexOf(factor)
|
||||
if(pow2==1 && !signed) {
|
||||
code += VmCodeInstruction(Opcode.LSR, dt, reg1=reg) // simple single bit shift
|
||||
}
|
||||
else if(pow2>=1 &&!signed) {
|
||||
// just shift multiple bits
|
||||
val pow2reg = vmRegisters.nextFree()
|
||||
code += VmCodeInstruction(Opcode.LOAD, dt, reg1=pow2reg, value=pow2)
|
||||
code += if(signed)
|
||||
VmCodeInstruction(Opcode.ASRN, dt, reg1=reg, reg2=pow2reg)
|
||||
else
|
||||
VmCodeInstruction(Opcode.LSRN, dt, reg1=reg, reg2=pow2reg)
|
||||
} else {
|
||||
code += if (factor == 0) {
|
||||
VmCodeInstruction(Opcode.LOAD, dt, reg1=reg, value=0xffff)
|
||||
} else {
|
||||
if(signed)
|
||||
VmCodeInstruction(Opcode.DIVS, dt, reg1=reg, value=factor)
|
||||
else
|
||||
VmCodeInstruction(Opcode.DIV, dt, reg1=reg, value=factor)
|
||||
}
|
||||
}
|
||||
return code
|
||||
}
|
||||
|
||||
internal fun divideByConstInplace(dt: VmDataType, address: Int, factor: Int, signed: Boolean): VmCodeChunk {
|
||||
val code = VmCodeChunk()
|
||||
if(factor==1)
|
||||
return code
|
||||
val pow2 = powersOfTwo.indexOf(factor)
|
||||
if(pow2==1 && !signed) {
|
||||
code += VmCodeInstruction(Opcode.LSRM, dt, value=address) // just simple bit shift
|
||||
}
|
||||
else if(pow2>=1 && !signed) {
|
||||
// just shift multiple bits
|
||||
val pow2reg = vmRegisters.nextFree()
|
||||
code += VmCodeInstruction(Opcode.LOAD, dt, reg1=pow2reg, value=pow2)
|
||||
code += if(signed)
|
||||
VmCodeInstruction(Opcode.ASRNM, dt, reg1=pow2reg, value=address)
|
||||
else
|
||||
VmCodeInstruction(Opcode.LSRNM, dt, reg1=pow2reg, value=address)
|
||||
} else {
|
||||
if (factor == 0) {
|
||||
val reg = vmRegisters.nextFree()
|
||||
code += VmCodeInstruction(Opcode.LOAD, dt, reg1=reg, value=0xffff)
|
||||
code += VmCodeInstruction(Opcode.STOREM, dt, reg1=reg, value=address)
|
||||
}
|
||||
else {
|
||||
val factorReg = vmRegisters.nextFree()
|
||||
code += VmCodeInstruction(Opcode.LOAD, dt, reg1=factorReg, value= factor)
|
||||
code += if(signed)
|
||||
VmCodeInstruction(Opcode.DIVSM, dt, reg1=factorReg, value=address)
|
||||
else
|
||||
VmCodeInstruction(Opcode.DIVM, dt, reg1=factorReg, value=address)
|
||||
}
|
||||
}
|
||||
return code
|
||||
}
|
||||
|
||||
private fun translate(ifElse: PtIfElse): VmCodeChunk {
|
||||
if(ifElse.condition.operator !in ComparisonOperators)
|
||||
throw AssemblyError("if condition should only be a binary comparison expression")
|
||||
|
||||
val signed = ifElse.condition.left.type in arrayOf(DataType.BYTE, DataType.WORD, DataType.FLOAT)
|
||||
val vmDt = vmType(ifElse.condition.left.type)
|
||||
val code = VmCodeChunk()
|
||||
|
||||
fun translateNonZeroComparison(): VmCodeChunk {
|
||||
val elseBranch = when(ifElse.condition.operator) {
|
||||
"==" -> Opcode.BNE
|
||||
"!=" -> Opcode.BEQ
|
||||
"<" -> if(signed) Opcode.BGES else Opcode.BGE
|
||||
">" -> if(signed) Opcode.BLES else Opcode.BLE
|
||||
"<=" -> if(signed) Opcode.BGTS else Opcode.BGT
|
||||
">=" -> if(signed) Opcode.BLTS else Opcode.BLT
|
||||
else -> throw AssemblyError("invalid comparison operator")
|
||||
}
|
||||
|
||||
val leftReg = vmRegisters.nextFree()
|
||||
val rightReg = vmRegisters.nextFree()
|
||||
code += expressionEval.translateExpression(ifElse.condition.left, leftReg, -1)
|
||||
code += expressionEval.translateExpression(ifElse.condition.right, rightReg, -1)
|
||||
if(ifElse.elseScope.children.isNotEmpty()) {
|
||||
// if and else parts
|
||||
val elseLabel = createLabelName()
|
||||
val afterIfLabel = createLabelName()
|
||||
code += VmCodeInstruction(elseBranch, vmDt, reg1=leftReg, reg2=rightReg, labelSymbol = elseLabel)
|
||||
code += translateNode(ifElse.ifScope)
|
||||
code += VmCodeInstruction(Opcode.JUMP, labelSymbol = afterIfLabel)
|
||||
code += VmCodeLabel(elseLabel)
|
||||
code += translateNode(ifElse.elseScope)
|
||||
code += VmCodeLabel(afterIfLabel)
|
||||
} else {
|
||||
// only if part
|
||||
val afterIfLabel = createLabelName()
|
||||
code += VmCodeInstruction(elseBranch, vmDt, reg1=leftReg, reg2=rightReg, labelSymbol = afterIfLabel)
|
||||
code += translateNode(ifElse.ifScope)
|
||||
code += VmCodeLabel(afterIfLabel)
|
||||
}
|
||||
return code
|
||||
}
|
||||
|
||||
fun translateZeroComparison(): VmCodeChunk {
|
||||
fun equalOrNotEqualZero(elseBranch: Opcode): VmCodeChunk {
|
||||
val leftReg = vmRegisters.nextFree()
|
||||
code += expressionEval.translateExpression(ifElse.condition.left, leftReg, -1)
|
||||
if(ifElse.elseScope.children.isNotEmpty()) {
|
||||
// if and else parts
|
||||
val elseLabel = createLabelName()
|
||||
val afterIfLabel = createLabelName()
|
||||
code += VmCodeInstruction(elseBranch, vmDt, reg1=leftReg, labelSymbol = elseLabel)
|
||||
code += translateNode(ifElse.ifScope)
|
||||
code += VmCodeInstruction(Opcode.JUMP, labelSymbol = afterIfLabel)
|
||||
code += VmCodeLabel(elseLabel)
|
||||
code += translateNode(ifElse.elseScope)
|
||||
code += VmCodeLabel(afterIfLabel)
|
||||
} else {
|
||||
// only if part
|
||||
val afterIfLabel = createLabelName()
|
||||
code += VmCodeInstruction(elseBranch, vmDt, reg1=leftReg, labelSymbol = afterIfLabel)
|
||||
code += translateNode(ifElse.ifScope)
|
||||
code += VmCodeLabel(afterIfLabel)
|
||||
}
|
||||
return code
|
||||
}
|
||||
|
||||
return when (ifElse.condition.operator) {
|
||||
"==" -> {
|
||||
// if X==0 ... so we just branch on left expr is Not-zero.
|
||||
equalOrNotEqualZero(Opcode.BNZ)
|
||||
}
|
||||
"!=" -> {
|
||||
// if X!=0 ... so we just branch on left expr is Zero.
|
||||
equalOrNotEqualZero(Opcode.BZ)
|
||||
}
|
||||
else -> {
|
||||
// another comparison against 0, just use regular codegen for this.
|
||||
translateNonZeroComparison()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return if(constValue(ifElse.condition.right)==0.0)
|
||||
translateZeroComparison()
|
||||
else
|
||||
translateNonZeroComparison()
|
||||
}
|
||||
|
||||
|
||||
private fun translate(postIncrDecr: PtPostIncrDecr): VmCodeChunk {
|
||||
val code = VmCodeChunk()
|
||||
val operationMem: Opcode
|
||||
val operationRegister: Opcode
|
||||
when(postIncrDecr.operator) {
|
||||
"++" -> {
|
||||
operationMem = Opcode.INCM
|
||||
operationRegister = Opcode.INC
|
||||
}
|
||||
"--" -> {
|
||||
operationMem = Opcode.DECM
|
||||
operationRegister = Opcode.DEC
|
||||
}
|
||||
else -> throw AssemblyError("weird operator")
|
||||
}
|
||||
val ident = postIncrDecr.target.identifier
|
||||
val memory = postIncrDecr.target.memory
|
||||
val array = postIncrDecr.target.array
|
||||
val vmDt = vmType(postIncrDecr.target.type)
|
||||
if(ident!=null) {
|
||||
val address = allocations.get(ident.targetName)
|
||||
code += VmCodeInstruction(operationMem, vmDt, value = address)
|
||||
} else if(memory!=null) {
|
||||
if(memory.address is PtNumber) {
|
||||
val address = (memory.address as PtNumber).number.toInt()
|
||||
code += VmCodeInstruction(operationMem, vmDt, value = address)
|
||||
} else {
|
||||
val incReg = vmRegisters.nextFree()
|
||||
val addressReg = vmRegisters.nextFree()
|
||||
code += expressionEval.translateExpression(memory.address, addressReg, -1)
|
||||
code += VmCodeInstruction(Opcode.LOADI, vmDt, reg1 = incReg, reg2 = addressReg)
|
||||
code += VmCodeInstruction(operationRegister, vmDt, reg1 = incReg)
|
||||
code += VmCodeInstruction(Opcode.STOREI, vmDt, reg1 = incReg, reg2 = addressReg)
|
||||
}
|
||||
} else if (array!=null) {
|
||||
val variable = array.variable.targetName
|
||||
var variableAddr = allocations.get(variable)
|
||||
val itemsize = program.memsizer.memorySize(array.type)
|
||||
val fixedIndex = constIntValue(array.index)
|
||||
if(fixedIndex!=null) {
|
||||
variableAddr += fixedIndex*itemsize
|
||||
code += VmCodeInstruction(operationMem, vmDt, value=variableAddr)
|
||||
} else {
|
||||
val incReg = vmRegisters.nextFree()
|
||||
val indexReg = vmRegisters.nextFree()
|
||||
code += expressionEval.translateExpression(array.index, indexReg, -1)
|
||||
code += VmCodeInstruction(Opcode.LOADX, vmDt, reg1=incReg, reg2=indexReg, value=variableAddr)
|
||||
code += VmCodeInstruction(operationRegister, vmDt, reg1=incReg)
|
||||
code += VmCodeInstruction(Opcode.STOREX, vmDt, reg1=incReg, reg2=indexReg, value=variableAddr)
|
||||
}
|
||||
} else
|
||||
throw AssemblyError("weird assigntarget")
|
||||
|
||||
return code
|
||||
}
|
||||
|
||||
private fun translate(repeat: PtRepeatLoop): VmCodeChunk {
|
||||
when (constIntValue(repeat.count)) {
|
||||
0 -> return VmCodeChunk()
|
||||
1 -> return translateGroup(repeat.children)
|
||||
256 -> {
|
||||
// 256 iterations can still be done with just a byte counter if you set it to zero as starting value.
|
||||
repeat.children[0] = PtNumber(DataType.UBYTE, 0.0, repeat.count.position)
|
||||
}
|
||||
}
|
||||
|
||||
val code = VmCodeChunk()
|
||||
val counterReg = vmRegisters.nextFree()
|
||||
val vmDt = vmType(repeat.count.type)
|
||||
code += expressionEval.translateExpression(repeat.count, counterReg, -1)
|
||||
val repeatLabel = createLabelName()
|
||||
code += VmCodeLabel(repeatLabel)
|
||||
code += translateNode(repeat.statements)
|
||||
code += VmCodeInstruction(Opcode.DEC, vmDt, reg1=counterReg)
|
||||
code += VmCodeInstruction(Opcode.BNZ, vmDt, reg1=counterReg, labelSymbol = repeatLabel)
|
||||
return code
|
||||
}
|
||||
|
||||
private fun translate(jump: PtJump): VmCodeChunk {
|
||||
val code = VmCodeChunk()
|
||||
if(jump.address!=null)
|
||||
throw AssemblyError("cannot jump to memory location in the vm target")
|
||||
code += if(jump.generatedLabel!=null)
|
||||
VmCodeInstruction(Opcode.JUMP, labelSymbol = listOf(jump.generatedLabel!!))
|
||||
else if(jump.identifier!=null)
|
||||
VmCodeInstruction(Opcode.JUMP, labelSymbol = jump.identifier!!.targetName)
|
||||
else
|
||||
throw AssemblyError("weird jump")
|
||||
return code
|
||||
}
|
||||
|
||||
private fun translateGroup(group: List<PtNode>): VmCodeChunk {
|
||||
val code = VmCodeChunk()
|
||||
group.forEach { code += translateNode(it) }
|
||||
return code
|
||||
}
|
||||
|
||||
private fun translate(ret: PtReturn): VmCodeChunk {
|
||||
val code = VmCodeChunk()
|
||||
val value = ret.value
|
||||
if(value!=null) {
|
||||
// Call Convention: return value is always returned in r0 (or fr0 if float)
|
||||
code += if(value.type==DataType.FLOAT)
|
||||
expressionEval.translateExpression(value, -1, 0)
|
||||
else
|
||||
expressionEval.translateExpression(value, 0, -1)
|
||||
}
|
||||
code += VmCodeInstruction(Opcode.RETURN)
|
||||
return code
|
||||
}
|
||||
|
||||
private fun translate(sub: PtSub): VmCodeChunk {
|
||||
val code = VmCodeChunk()
|
||||
code += VmCodeComment("SUB: ${sub.scopedName} -> ${sub.returntype}")
|
||||
code += VmCodeLabel(sub.scopedName)
|
||||
for (child in sub.children) {
|
||||
code += translateNode(child)
|
||||
}
|
||||
code += VmCodeComment("SUB-END '${sub.name}'")
|
||||
return code
|
||||
}
|
||||
|
||||
private fun translate(sub: PtAsmSub): VmCodeChunk {
|
||||
val code = VmCodeChunk()
|
||||
code += VmCodeComment("ASMSUB: ${sub.scopedName}")
|
||||
code += VmCodeLabel(sub.scopedName)
|
||||
for (child in sub.children) {
|
||||
code += translateNode(child)
|
||||
}
|
||||
code += VmCodeComment("ASMSUB-END '${sub.name}'")
|
||||
return code
|
||||
}
|
||||
|
||||
private fun translate(block: PtBlock): VmCodeChunk {
|
||||
val code = VmCodeChunk()
|
||||
code += VmCodeComment("BLOCK '${block.name}' addr=${block.address} lib=${block.library}")
|
||||
for (child in block.children) {
|
||||
if(child !is PtAssignment) // global variable initialization is done elsewhere
|
||||
code += translateNode(child)
|
||||
}
|
||||
code += VmCodeComment("BLOCK-END '${block.name}'")
|
||||
return code
|
||||
}
|
||||
|
||||
|
||||
internal fun vmType(type: DataType): VmDataType {
|
||||
return when(type) {
|
||||
DataType.BOOL,
|
||||
DataType.UBYTE,
|
||||
DataType.BYTE -> VmDataType.BYTE
|
||||
DataType.UWORD,
|
||||
DataType.WORD -> VmDataType.WORD
|
||||
DataType.FLOAT -> VmDataType.FLOAT
|
||||
in PassByReferenceDatatypes -> VmDataType.WORD
|
||||
else -> throw AssemblyError("no vm datatype for $type")
|
||||
}
|
||||
}
|
||||
|
||||
private var labelSequenceNumber = 0
|
||||
internal fun createLabelName(): List<String> {
|
||||
labelSequenceNumber++
|
||||
return listOf("prog8_label_gen_$labelSequenceNumber")
|
||||
}
|
||||
|
||||
internal fun translateBuiltinFunc(call: PtBuiltinFunctionCall, resultRegister: Int): VmCodeChunk =
|
||||
builtinFuncGen.translate(call, resultRegister)
|
||||
|
||||
internal fun isZero(expression: PtExpression): Boolean = expression is PtNumber && expression.number==0.0
|
||||
|
||||
internal fun isOne(expression: PtExpression): Boolean = expression is PtNumber && expression.number==1.0
|
||||
}
|
||||
|
@ -1,864 +0,0 @@
|
||||
package prog8.codegen.virtual
|
||||
|
||||
import prog8.code.StRomSub
|
||||
import prog8.code.StStaticVariable
|
||||
import prog8.code.StSub
|
||||
import prog8.code.ast.*
|
||||
import prog8.code.core.*
|
||||
import prog8.intermediate.Opcode
|
||||
import prog8.intermediate.VmDataType
|
||||
|
||||
|
||||
internal class ExpressionGen(private val codeGen: CodeGen) {
|
||||
fun translateExpression(expr: PtExpression, resultRegister: Int, resultFpRegister: Int): VmCodeChunk {
|
||||
require(codeGen.vmRegisters.peekNext() > resultRegister)
|
||||
|
||||
val code = VmCodeChunk()
|
||||
|
||||
when (expr) {
|
||||
is PtMachineRegister -> {
|
||||
if(resultRegister!=expr.register) {
|
||||
val vmDt = codeGen.vmType(expr.type)
|
||||
code += VmCodeInstruction(Opcode.LOADR, vmDt, reg1=resultRegister, reg2=expr.register)
|
||||
}
|
||||
}
|
||||
is PtNumber -> {
|
||||
val vmDt = codeGen.vmType(expr.type)
|
||||
code += if(vmDt==VmDataType.FLOAT)
|
||||
VmCodeInstruction(Opcode.LOAD, vmDt, fpReg1 = resultFpRegister, fpValue = expr.number.toFloat())
|
||||
else
|
||||
VmCodeInstruction(Opcode.LOAD, vmDt, reg1=resultRegister, value=expr.number.toInt())
|
||||
}
|
||||
is PtIdentifier -> {
|
||||
val vmDt = codeGen.vmType(expr.type)
|
||||
val mem = codeGen.allocations.get(expr.targetName)
|
||||
code += if (expr.type in PassByValueDatatypes) {
|
||||
if(vmDt==VmDataType.FLOAT)
|
||||
VmCodeInstruction(Opcode.LOADM, vmDt, fpReg1 = resultFpRegister, value = mem)
|
||||
else
|
||||
VmCodeInstruction(Opcode.LOADM, vmDt, reg1 = resultRegister, value = mem)
|
||||
} else {
|
||||
// for strings and arrays etc., load the *address* of the value instead
|
||||
VmCodeInstruction(Opcode.LOAD, vmDt, reg1 = resultRegister, value = mem)
|
||||
}
|
||||
}
|
||||
is PtAddressOf -> {
|
||||
val vmDt = codeGen.vmType(expr.type)
|
||||
val mem = codeGen.allocations.get(expr.identifier.targetName)
|
||||
code += VmCodeInstruction(Opcode.LOAD, vmDt, reg1=resultRegister, value=mem)
|
||||
}
|
||||
is PtMemoryByte -> {
|
||||
if(expr.address is PtNumber) {
|
||||
val address = (expr.address as PtNumber).number.toInt()
|
||||
code += VmCodeInstruction(Opcode.LOADM, VmDataType.BYTE, reg1=resultRegister, value = address)
|
||||
} else {
|
||||
val addressRegister = codeGen.vmRegisters.nextFree()
|
||||
code += translateExpression(expr.address, addressRegister, -1)
|
||||
code += VmCodeInstruction(Opcode.LOADI, VmDataType.BYTE, reg1=resultRegister, reg2=addressRegister)
|
||||
}
|
||||
}
|
||||
is PtTypeCast -> code += translate(expr, resultRegister, resultFpRegister)
|
||||
is PtPrefix -> code += translate(expr, resultRegister)
|
||||
is PtArrayIndexer -> code += translate(expr, resultRegister, resultFpRegister)
|
||||
is PtBinaryExpression -> code += translate(expr, resultRegister, resultFpRegister)
|
||||
is PtBuiltinFunctionCall -> code += codeGen.translateBuiltinFunc(expr, resultRegister)
|
||||
is PtFunctionCall -> code += translate(expr, resultRegister, resultFpRegister)
|
||||
is PtContainmentCheck -> code += translate(expr, resultRegister, resultFpRegister)
|
||||
is PtRange,
|
||||
is PtArray,
|
||||
is PtString -> throw AssemblyError("range/arrayliteral/string should no longer occur as expression")
|
||||
else -> throw AssemblyError("weird expression")
|
||||
}
|
||||
return code
|
||||
}
|
||||
|
||||
private fun translate(check: PtContainmentCheck, resultRegister: Int, resultFpRegister: Int): VmCodeChunk {
|
||||
val code = VmCodeChunk()
|
||||
code += translateExpression(check.element, resultRegister, -1) // load the element to check in resultRegister
|
||||
val iterable = codeGen.symbolTable.flat.getValue(check.iterable.targetName) as StStaticVariable
|
||||
when(iterable.dt) {
|
||||
DataType.STR -> {
|
||||
val call = PtFunctionCall(listOf("prog8_lib", "string_contains"), false, DataType.UBYTE, check.position)
|
||||
call.children.add(check.element)
|
||||
call.children.add(check.iterable)
|
||||
code += translate(call, resultRegister, resultFpRegister)
|
||||
}
|
||||
DataType.ARRAY_UB, DataType.ARRAY_B -> {
|
||||
val call = PtFunctionCall(listOf("prog8_lib", "bytearray_contains"), false, DataType.UBYTE, check.position)
|
||||
call.children.add(check.element)
|
||||
call.children.add(check.iterable)
|
||||
call.children.add(PtNumber(DataType.UBYTE, iterable.length!!.toDouble(), iterable.position))
|
||||
code += translate(call, resultRegister, resultFpRegister)
|
||||
}
|
||||
DataType.ARRAY_UW, DataType.ARRAY_W -> {
|
||||
val call = PtFunctionCall(listOf("prog8_lib", "wordarray_contains"), false, DataType.UBYTE, check.position)
|
||||
call.children.add(check.element)
|
||||
call.children.add(check.iterable)
|
||||
call.children.add(PtNumber(DataType.UBYTE, iterable.length!!.toDouble(), iterable.position))
|
||||
code += translate(call, resultRegister, resultFpRegister)
|
||||
}
|
||||
DataType.ARRAY_F -> throw AssemblyError("containment check in float-array not supported")
|
||||
else -> throw AssemblyError("weird iterable dt ${iterable.dt} for ${check.iterable.targetName}")
|
||||
}
|
||||
return code
|
||||
}
|
||||
|
||||
private fun translate(arrayIx: PtArrayIndexer, resultRegister: Int, resultFpRegister: Int): VmCodeChunk {
|
||||
val eltSize = codeGen.program.memsizer.memorySize(arrayIx.type)
|
||||
val vmDt = codeGen.vmType(arrayIx.type)
|
||||
val code = VmCodeChunk()
|
||||
val idxReg = codeGen.vmRegisters.nextFree()
|
||||
val arrayLocation = codeGen.allocations.get(arrayIx.variable.targetName)
|
||||
|
||||
if(arrayIx.variable.type==DataType.UWORD) {
|
||||
// indexing a pointer var instead of a real array or string
|
||||
if(eltSize!=1)
|
||||
throw AssemblyError("non-array var indexing requires bytes dt")
|
||||
if(arrayIx.index.type!=DataType.UBYTE)
|
||||
throw AssemblyError("non-array var indexing requires bytes index")
|
||||
code += translateExpression(arrayIx.index, idxReg, -1)
|
||||
code += VmCodeInstruction(Opcode.LOADIX, vmDt, reg1=resultRegister, reg2=idxReg, value = arrayLocation)
|
||||
return code
|
||||
}
|
||||
|
||||
if(arrayIx.index is PtNumber) {
|
||||
// optimized code when index is known - just calculate the memory address here
|
||||
val memOffset = (arrayIx.index as PtNumber).number.toInt() * eltSize
|
||||
if(vmDt==VmDataType.FLOAT)
|
||||
code += VmCodeInstruction(Opcode.LOADM, VmDataType.FLOAT, fpReg1=resultFpRegister, value=arrayLocation+memOffset)
|
||||
else
|
||||
code += VmCodeInstruction(Opcode.LOADM, vmDt, reg1=resultRegister, value=arrayLocation+memOffset)
|
||||
} else {
|
||||
code += translateExpression(arrayIx.index, idxReg, -1)
|
||||
if(eltSize>1)
|
||||
code += codeGen.multiplyByConst(VmDataType.BYTE, idxReg, eltSize)
|
||||
if(vmDt==VmDataType.FLOAT)
|
||||
code += VmCodeInstruction(Opcode.LOADX, VmDataType.FLOAT, fpReg1 = resultFpRegister, reg1=idxReg, value = arrayLocation)
|
||||
else
|
||||
code += VmCodeInstruction(Opcode.LOADX, vmDt, reg1=resultRegister, reg2=idxReg, value = arrayLocation)
|
||||
}
|
||||
return code
|
||||
}
|
||||
|
||||
private fun translate(expr: PtPrefix, resultRegister: Int): VmCodeChunk {
|
||||
val code = VmCodeChunk()
|
||||
code += translateExpression(expr.value, resultRegister, -1)
|
||||
val vmDt = codeGen.vmType(expr.type)
|
||||
when(expr.operator) {
|
||||
"+" -> { }
|
||||
"-" -> {
|
||||
code += VmCodeInstruction(Opcode.NEG, vmDt, reg1=resultRegister)
|
||||
}
|
||||
"~" -> {
|
||||
val mask = if(vmDt==VmDataType.BYTE) 0x00ff else 0xffff
|
||||
code += VmCodeInstruction(Opcode.XOR, vmDt, reg1=resultRegister, value=mask)
|
||||
}
|
||||
else -> throw AssemblyError("weird prefix operator")
|
||||
}
|
||||
return code
|
||||
}
|
||||
|
||||
private fun translate(cast: PtTypeCast, predefinedResultRegister: Int, predefinedResultFpRegister: Int): VmCodeChunk {
|
||||
val code = VmCodeChunk()
|
||||
if(cast.type==cast.value.type)
|
||||
return code
|
||||
val actualResultFpReg = if(predefinedResultFpRegister>=0) predefinedResultFpRegister else codeGen.vmRegisters.nextFreeFloat()
|
||||
val actualResultReg = if(predefinedResultRegister>=0) predefinedResultRegister else codeGen.vmRegisters.nextFree()
|
||||
if(cast.value.type==DataType.FLOAT) {
|
||||
// a cast from float to integer, so evaluate the value into a float register first
|
||||
code += translateExpression(cast.value, -1, actualResultFpReg)
|
||||
}
|
||||
else
|
||||
code += translateExpression(cast.value, actualResultReg, -1)
|
||||
when(cast.type) {
|
||||
DataType.UBYTE -> {
|
||||
when(cast.value.type) {
|
||||
DataType.BYTE, DataType.UWORD, DataType.WORD -> { /* just keep the LSB as it is */ }
|
||||
DataType.FLOAT -> code += VmCodeInstruction(Opcode.FTOUB, VmDataType.FLOAT, reg1=actualResultReg, fpReg1 = actualResultFpReg)
|
||||
else -> throw AssemblyError("weird cast value type")
|
||||
}
|
||||
}
|
||||
DataType.BYTE -> {
|
||||
when(cast.value.type) {
|
||||
DataType.UBYTE, DataType.UWORD, DataType.WORD -> { /* just keep the LSB as it is */ }
|
||||
DataType.FLOAT -> code += VmCodeInstruction(Opcode.FTOSB, VmDataType.FLOAT, reg1=actualResultReg, fpReg1 = actualResultFpReg)
|
||||
else -> throw AssemblyError("weird cast value type")
|
||||
}
|
||||
}
|
||||
DataType.UWORD -> {
|
||||
when(cast.value.type) {
|
||||
DataType.BYTE -> {
|
||||
// byte -> uword: sign extend
|
||||
code += VmCodeInstruction(Opcode.EXTS, type = VmDataType.BYTE, reg1 = actualResultReg)
|
||||
}
|
||||
DataType.UBYTE -> {
|
||||
// ubyte -> uword: sign extend
|
||||
code += VmCodeInstruction(Opcode.EXT, type = VmDataType.BYTE, reg1 = actualResultReg)
|
||||
}
|
||||
DataType.WORD -> { }
|
||||
DataType.FLOAT -> {
|
||||
code += VmCodeInstruction(Opcode.FTOUW, VmDataType.FLOAT, reg1=actualResultReg, fpReg1 = actualResultFpReg)
|
||||
}
|
||||
else -> throw AssemblyError("weird cast value type")
|
||||
}
|
||||
}
|
||||
DataType.WORD -> {
|
||||
when(cast.value.type) {
|
||||
DataType.BYTE -> {
|
||||
// byte -> word: sign extend
|
||||
code += VmCodeInstruction(Opcode.EXTS, type = VmDataType.BYTE, reg1 = actualResultReg)
|
||||
}
|
||||
DataType.UBYTE -> {
|
||||
// byte -> word: sign extend
|
||||
code += VmCodeInstruction(Opcode.EXT, type = VmDataType.BYTE, reg1 = actualResultReg)
|
||||
}
|
||||
DataType.UWORD -> { }
|
||||
DataType.FLOAT -> {
|
||||
code += VmCodeInstruction(Opcode.FTOSW, VmDataType.FLOAT, reg1=actualResultReg, fpReg1 = actualResultFpReg)
|
||||
}
|
||||
else -> throw AssemblyError("weird cast value type")
|
||||
}
|
||||
}
|
||||
DataType.FLOAT -> {
|
||||
code += when(cast.value.type) {
|
||||
DataType.UBYTE -> {
|
||||
VmCodeInstruction(Opcode.FFROMUB, VmDataType.FLOAT, reg1=actualResultReg, fpReg1 = actualResultFpReg)
|
||||
}
|
||||
DataType.BYTE -> {
|
||||
VmCodeInstruction(Opcode.FFROMSB, VmDataType.FLOAT, reg1=actualResultReg, fpReg1 = actualResultFpReg)
|
||||
}
|
||||
DataType.UWORD -> {
|
||||
VmCodeInstruction(Opcode.FFROMUW, VmDataType.FLOAT, reg1=actualResultReg, fpReg1 = actualResultFpReg)
|
||||
}
|
||||
DataType.WORD -> {
|
||||
VmCodeInstruction(Opcode.FFROMSW, VmDataType.FLOAT, reg1=actualResultReg, fpReg1 = actualResultFpReg)
|
||||
}
|
||||
else -> throw AssemblyError("weird cast value type")
|
||||
}
|
||||
}
|
||||
else -> throw AssemblyError("weird cast type")
|
||||
}
|
||||
return code
|
||||
}
|
||||
|
||||
private fun translate(binExpr: PtBinaryExpression, resultRegister: Int, resultFpRegister: Int): VmCodeChunk {
|
||||
val vmDt = codeGen.vmType(binExpr.left.type)
|
||||
val signed = binExpr.left.type in SignedDatatypes
|
||||
return when(binExpr.operator) {
|
||||
"+" -> operatorPlus(binExpr, vmDt, resultRegister, resultFpRegister)
|
||||
"-" -> operatorMinus(binExpr, vmDt, resultRegister, resultFpRegister)
|
||||
"*" -> operatorMultiply(binExpr, vmDt, resultRegister, resultFpRegister)
|
||||
"/" -> operatorDivide(binExpr, vmDt, resultRegister, resultFpRegister, signed)
|
||||
"%" -> operatorModulo(binExpr, vmDt, resultRegister)
|
||||
"|" -> operatorOr(binExpr, vmDt, resultRegister)
|
||||
"&" -> operatorAnd(binExpr, vmDt, resultRegister)
|
||||
"^" -> operatorXor(binExpr, vmDt, resultRegister)
|
||||
"<<" -> operatorShiftLeft(binExpr, vmDt, resultRegister)
|
||||
">>" -> operatorShiftRight(binExpr, vmDt, resultRegister, signed)
|
||||
"==" -> operatorEquals(binExpr, vmDt, resultRegister, false)
|
||||
"!=" -> operatorEquals(binExpr, vmDt, resultRegister, true)
|
||||
"<" -> operatorLessThan(binExpr, vmDt, resultRegister, signed, false)
|
||||
">" -> operatorGreaterThan(binExpr, vmDt, resultRegister, signed, false)
|
||||
"<=" -> operatorLessThan(binExpr, vmDt, resultRegister, signed, true)
|
||||
">=" -> operatorGreaterThan(binExpr, vmDt, resultRegister, signed, true)
|
||||
else -> throw AssemblyError("weird operator ${binExpr.operator}")
|
||||
}
|
||||
}
|
||||
|
||||
private fun operatorGreaterThan(
|
||||
binExpr: PtBinaryExpression,
|
||||
vmDt: VmDataType,
|
||||
resultRegister: Int,
|
||||
signed: Boolean,
|
||||
greaterEquals: Boolean
|
||||
): VmCodeChunk {
|
||||
val code = VmCodeChunk()
|
||||
if(vmDt==VmDataType.FLOAT) {
|
||||
val leftFpReg = codeGen.vmRegisters.nextFreeFloat()
|
||||
val rightFpReg = codeGen.vmRegisters.nextFreeFloat()
|
||||
val zeroRegister = codeGen.vmRegisters.nextFree()
|
||||
code += translateExpression(binExpr.left, -1, leftFpReg)
|
||||
code += translateExpression(binExpr.right, -1, rightFpReg)
|
||||
code += VmCodeInstruction(Opcode.FCOMP, VmDataType.FLOAT, reg1=resultRegister, fpReg1 = leftFpReg, fpReg2 = rightFpReg)
|
||||
code += VmCodeInstruction(Opcode.LOAD, VmDataType.BYTE, reg1=zeroRegister, value=0)
|
||||
val ins = if (signed) {
|
||||
if (greaterEquals) Opcode.SGES else Opcode.SGTS
|
||||
} else {
|
||||
if (greaterEquals) Opcode.SGE else Opcode.SGT
|
||||
}
|
||||
code += VmCodeInstruction(ins, VmDataType.BYTE, reg1 = resultRegister, reg2 = zeroRegister)
|
||||
} else {
|
||||
if(binExpr.left.type==DataType.STR && binExpr.right.type==DataType.STR) {
|
||||
val comparisonCall = PtFunctionCall(listOf("prog8_lib", "string_compare"), false, DataType.BYTE, Position.DUMMY)
|
||||
comparisonCall.children.add(binExpr.left)
|
||||
comparisonCall.children.add(binExpr.right)
|
||||
code += translate(comparisonCall, resultRegister, -1)
|
||||
val zeroRegister = codeGen.vmRegisters.nextFree()
|
||||
code += VmCodeInstruction(Opcode.LOAD, VmDataType.BYTE, reg1=zeroRegister, value=0)
|
||||
code += if(greaterEquals)
|
||||
VmCodeInstruction(Opcode.SGES, VmDataType.BYTE, reg1=resultRegister, reg2=zeroRegister)
|
||||
else
|
||||
VmCodeInstruction(Opcode.SGTS, VmDataType.BYTE, reg1=resultRegister, reg2=zeroRegister)
|
||||
} else {
|
||||
val rightResultReg = codeGen.vmRegisters.nextFree()
|
||||
code += translateExpression(binExpr.left, resultRegister, -1)
|
||||
code += translateExpression(binExpr.right, rightResultReg, -1)
|
||||
val ins = if (signed) {
|
||||
if (greaterEquals) Opcode.SGES else Opcode.SGTS
|
||||
} else {
|
||||
if (greaterEquals) Opcode.SGE else Opcode.SGT
|
||||
}
|
||||
code += VmCodeInstruction(ins, vmDt, reg1 = resultRegister, reg2 = rightResultReg)
|
||||
}
|
||||
}
|
||||
return code
|
||||
}
|
||||
|
||||
private fun operatorLessThan(
|
||||
binExpr: PtBinaryExpression,
|
||||
vmDt: VmDataType,
|
||||
resultRegister: Int,
|
||||
signed: Boolean,
|
||||
lessEquals: Boolean
|
||||
): VmCodeChunk {
|
||||
val code = VmCodeChunk()
|
||||
if(vmDt==VmDataType.FLOAT) {
|
||||
val leftFpReg = codeGen.vmRegisters.nextFreeFloat()
|
||||
val rightFpReg = codeGen.vmRegisters.nextFreeFloat()
|
||||
val zeroRegister = codeGen.vmRegisters.nextFree()
|
||||
code += translateExpression(binExpr.left, -1, leftFpReg)
|
||||
code += translateExpression(binExpr.right, -1, rightFpReg)
|
||||
code += VmCodeInstruction(Opcode.FCOMP, VmDataType.FLOAT, reg1=resultRegister, fpReg1 = leftFpReg, fpReg2 = rightFpReg)
|
||||
code += VmCodeInstruction(Opcode.LOAD, VmDataType.BYTE, reg1=zeroRegister, value=0)
|
||||
val ins = if (signed) {
|
||||
if (lessEquals) Opcode.SLES else Opcode.SLTS
|
||||
} else {
|
||||
if (lessEquals) Opcode.SLE else Opcode.SLT
|
||||
}
|
||||
code += VmCodeInstruction(ins, VmDataType.BYTE, reg1 = resultRegister, reg2 = zeroRegister)
|
||||
} else {
|
||||
if(binExpr.left.type==DataType.STR && binExpr.right.type==DataType.STR) {
|
||||
val comparisonCall = PtFunctionCall(listOf("prog8_lib", "string_compare"), false, DataType.BYTE, Position.DUMMY)
|
||||
comparisonCall.children.add(binExpr.left)
|
||||
comparisonCall.children.add(binExpr.right)
|
||||
code += translate(comparisonCall, resultRegister, -1)
|
||||
val zeroRegister = codeGen.vmRegisters.nextFree()
|
||||
code += VmCodeInstruction(Opcode.LOAD, VmDataType.BYTE, reg1=zeroRegister, value=0)
|
||||
code += if(lessEquals)
|
||||
VmCodeInstruction(Opcode.SLES, VmDataType.BYTE, reg1=resultRegister, reg2=zeroRegister)
|
||||
else
|
||||
VmCodeInstruction(Opcode.SLTS, VmDataType.BYTE, reg1=resultRegister, reg2=zeroRegister)
|
||||
} else {
|
||||
val rightResultReg = codeGen.vmRegisters.nextFree()
|
||||
code += translateExpression(binExpr.left, resultRegister, -1)
|
||||
code += translateExpression(binExpr.right, rightResultReg, -1)
|
||||
val ins = if (signed) {
|
||||
if (lessEquals) Opcode.SLES else Opcode.SLTS
|
||||
} else {
|
||||
if (lessEquals) Opcode.SLE else Opcode.SLT
|
||||
}
|
||||
code += VmCodeInstruction(ins, vmDt, reg1 = resultRegister, reg2 = rightResultReg)
|
||||
}
|
||||
}
|
||||
return code
|
||||
}
|
||||
|
||||
private fun operatorEquals(binExpr: PtBinaryExpression, vmDt: VmDataType, resultRegister: Int, notEquals: Boolean): VmCodeChunk {
|
||||
val code = VmCodeChunk()
|
||||
if(vmDt==VmDataType.FLOAT) {
|
||||
val leftFpReg = codeGen.vmRegisters.nextFreeFloat()
|
||||
val rightFpReg = codeGen.vmRegisters.nextFreeFloat()
|
||||
code += translateExpression(binExpr.left, -1, leftFpReg)
|
||||
code += translateExpression(binExpr.right, -1, rightFpReg)
|
||||
if (notEquals) {
|
||||
code += VmCodeInstruction(Opcode.FCOMP, VmDataType.FLOAT, reg1=resultRegister, fpReg1 = leftFpReg, fpReg2 = rightFpReg)
|
||||
} else {
|
||||
val label = codeGen.createLabelName()
|
||||
val valueReg = codeGen.vmRegisters.nextFree()
|
||||
code += VmCodeInstruction(Opcode.LOAD, VmDataType.BYTE, reg1=resultRegister, value=1)
|
||||
code += VmCodeInstruction(Opcode.FCOMP, VmDataType.FLOAT, reg1=valueReg, fpReg1 = leftFpReg, fpReg2 = rightFpReg)
|
||||
code += VmCodeInstruction(Opcode.BZ, VmDataType.BYTE, reg1=valueReg, labelSymbol = label)
|
||||
code += VmCodeInstruction(Opcode.LOAD, VmDataType.BYTE, reg1=resultRegister, value=0)
|
||||
code += VmCodeLabel(label)
|
||||
}
|
||||
} else {
|
||||
if(binExpr.left.type==DataType.STR && binExpr.right.type==DataType.STR) {
|
||||
val comparisonCall = PtFunctionCall(listOf("prog8_lib", "string_compare"), false, DataType.BYTE, Position.DUMMY)
|
||||
comparisonCall.children.add(binExpr.left)
|
||||
comparisonCall.children.add(binExpr.right)
|
||||
code += translate(comparisonCall, resultRegister, -1)
|
||||
if(!notEquals)
|
||||
code += VmCodeInstruction(Opcode.INV, vmDt, reg1=resultRegister)
|
||||
code += VmCodeInstruction(Opcode.AND, vmDt, reg1=resultRegister, value=1)
|
||||
} else {
|
||||
val rightResultReg = codeGen.vmRegisters.nextFree()
|
||||
code += translateExpression(binExpr.left, resultRegister, -1)
|
||||
code += translateExpression(binExpr.right, rightResultReg, -1)
|
||||
val opcode = if (notEquals) Opcode.SNE else Opcode.SEQ
|
||||
code += VmCodeInstruction(opcode, vmDt, reg1 = resultRegister, reg2 = rightResultReg)
|
||||
}
|
||||
}
|
||||
return code
|
||||
}
|
||||
|
||||
private fun operatorShiftRight(binExpr: PtBinaryExpression, vmDt: VmDataType, resultRegister: Int, signed: Boolean): VmCodeChunk {
|
||||
val code = VmCodeChunk()
|
||||
if(codeGen.isOne(binExpr.right)) {
|
||||
code += translateExpression(binExpr.left, resultRegister, -1)
|
||||
val opc = if (signed) Opcode.ASR else Opcode.LSR
|
||||
code += VmCodeInstruction(opc, vmDt, reg1 = resultRegister)
|
||||
} else {
|
||||
val rightResultReg = codeGen.vmRegisters.nextFree()
|
||||
code += translateExpression(binExpr.left, resultRegister, -1)
|
||||
code += translateExpression(binExpr.right, rightResultReg, -1)
|
||||
val opc = if (signed) Opcode.ASRN else Opcode.LSRN
|
||||
code += VmCodeInstruction(opc, vmDt, reg1 = resultRegister, reg2 = rightResultReg)
|
||||
}
|
||||
return code
|
||||
}
|
||||
|
||||
internal fun operatorShiftRightInplace(address: Int, vmDt: VmDataType, signed: Boolean, operand: PtExpression): VmCodeChunk {
|
||||
val code = VmCodeChunk()
|
||||
if(codeGen.isOne(operand)) {
|
||||
val opc = if (signed) Opcode.ASRM else Opcode.LSRM
|
||||
code += VmCodeInstruction(opc, vmDt, value=address)
|
||||
} else {
|
||||
val operandReg = codeGen.vmRegisters.nextFree()
|
||||
code += translateExpression(operand, operandReg, -1)
|
||||
val opc = if (signed) Opcode.ASRNM else Opcode.LSRNM
|
||||
code += VmCodeInstruction(opc, vmDt, reg1 = operandReg, value=address)
|
||||
}
|
||||
return code
|
||||
}
|
||||
|
||||
private fun operatorShiftLeft(binExpr: PtBinaryExpression, vmDt: VmDataType, resultRegister: Int): VmCodeChunk {
|
||||
val code = VmCodeChunk()
|
||||
if(codeGen.isOne(binExpr.right)){
|
||||
code += translateExpression(binExpr.left, resultRegister, -1)
|
||||
code += VmCodeInstruction(Opcode.LSL, vmDt, reg1=resultRegister)
|
||||
} else {
|
||||
val rightResultReg = codeGen.vmRegisters.nextFree()
|
||||
code += translateExpression(binExpr.left, resultRegister, -1)
|
||||
code += translateExpression(binExpr.right, rightResultReg, -1)
|
||||
code += VmCodeInstruction(Opcode.LSLN, vmDt, reg1=resultRegister, rightResultReg)
|
||||
}
|
||||
return code
|
||||
}
|
||||
|
||||
internal fun operatorShiftLeftInplace(address: Int, vmDt: VmDataType, operand: PtExpression): VmCodeChunk {
|
||||
val code = VmCodeChunk()
|
||||
if(codeGen.isOne(operand)){
|
||||
code += VmCodeInstruction(Opcode.LSLM, vmDt, value=address)
|
||||
} else {
|
||||
val operandReg = codeGen.vmRegisters.nextFree()
|
||||
code += translateExpression(operand, operandReg, -1)
|
||||
code += VmCodeInstruction(Opcode.LSLNM, vmDt, reg1=operandReg, value=address)
|
||||
}
|
||||
return code
|
||||
}
|
||||
|
||||
private fun operatorXor(binExpr: PtBinaryExpression, vmDt: VmDataType, resultRegister: Int): VmCodeChunk {
|
||||
val code = VmCodeChunk()
|
||||
if(binExpr.right is PtNumber) {
|
||||
code += translateExpression(binExpr.left, resultRegister, -1)
|
||||
code += VmCodeInstruction(Opcode.XOR, vmDt, reg1 = resultRegister, value=(binExpr.right as PtNumber).number.toInt())
|
||||
} else {
|
||||
val rightResultReg = codeGen.vmRegisters.nextFree()
|
||||
code += translateExpression(binExpr.left, resultRegister, -1)
|
||||
code += translateExpression(binExpr.right, rightResultReg, -1)
|
||||
code += VmCodeInstruction(Opcode.XORR, vmDt, reg1 = resultRegister, reg2 = rightResultReg)
|
||||
}
|
||||
return code
|
||||
}
|
||||
|
||||
internal fun operatorXorInplace(address: Int, vmDt: VmDataType, operand: PtExpression): VmCodeChunk {
|
||||
val code = VmCodeChunk()
|
||||
val operandReg = codeGen.vmRegisters.nextFree()
|
||||
code += translateExpression(operand, operandReg, -1)
|
||||
code += VmCodeInstruction(Opcode.XORM, vmDt, reg1=operandReg, value = address)
|
||||
return code
|
||||
}
|
||||
|
||||
private fun operatorAnd(binExpr: PtBinaryExpression, vmDt: VmDataType, resultRegister: Int): VmCodeChunk {
|
||||
val code = VmCodeChunk()
|
||||
if(binExpr.right is PtNumber) {
|
||||
code += translateExpression(binExpr.left, resultRegister, -1)
|
||||
code += VmCodeInstruction(Opcode.AND, vmDt, reg1 = resultRegister, value=(binExpr.right as PtNumber).number.toInt())
|
||||
} else {
|
||||
val rightResultReg = codeGen.vmRegisters.nextFree()
|
||||
code += translateExpression(binExpr.left, resultRegister, -1)
|
||||
code += translateExpression(binExpr.right, rightResultReg, -1)
|
||||
code += VmCodeInstruction(Opcode.ANDR, vmDt, reg1 = resultRegister, reg2 = rightResultReg)
|
||||
}
|
||||
return code
|
||||
}
|
||||
|
||||
internal fun operatorAndInplace(address: Int, vmDt: VmDataType, operand: PtExpression): VmCodeChunk {
|
||||
val code = VmCodeChunk()
|
||||
val operandReg = codeGen.vmRegisters.nextFree()
|
||||
code += translateExpression(operand, operandReg, -1)
|
||||
code += VmCodeInstruction(Opcode.ANDM, vmDt, reg1=operandReg, value=address)
|
||||
return code
|
||||
}
|
||||
|
||||
private fun operatorOr(binExpr: PtBinaryExpression, vmDt: VmDataType, resultRegister: Int): VmCodeChunk {
|
||||
val code = VmCodeChunk()
|
||||
if(binExpr.right is PtNumber) {
|
||||
code += translateExpression(binExpr.left, resultRegister, -1)
|
||||
code += VmCodeInstruction(Opcode.OR, vmDt, reg1 = resultRegister, value=(binExpr.right as PtNumber).number.toInt())
|
||||
} else {
|
||||
val rightResultReg = codeGen.vmRegisters.nextFree()
|
||||
code += translateExpression(binExpr.left, resultRegister, -1)
|
||||
code += translateExpression(binExpr.right, rightResultReg, -1)
|
||||
code += VmCodeInstruction(Opcode.ORR, vmDt, reg1 = resultRegister, reg2 = rightResultReg)
|
||||
}
|
||||
return code
|
||||
}
|
||||
|
||||
internal fun operatorOrInplace(address: Int, vmDt: VmDataType, operand: PtExpression): VmCodeChunk {
|
||||
val code = VmCodeChunk()
|
||||
val operandReg = codeGen.vmRegisters.nextFree()
|
||||
code += translateExpression(operand, operandReg, -1)
|
||||
code += VmCodeInstruction(Opcode.ORM, vmDt, reg1=operandReg, value = address)
|
||||
return code
|
||||
}
|
||||
|
||||
private fun operatorModulo(binExpr: PtBinaryExpression, vmDt: VmDataType, resultRegister: Int): VmCodeChunk {
|
||||
if(vmDt==VmDataType.FLOAT)
|
||||
throw IllegalArgumentException("floating-point modulo not supported")
|
||||
val code = VmCodeChunk()
|
||||
val rightResultReg = codeGen.vmRegisters.nextFree()
|
||||
if(binExpr.right is PtNumber) {
|
||||
code += translateExpression(binExpr.left, resultRegister, -1)
|
||||
code += VmCodeInstruction(Opcode.MOD, vmDt, reg1 = resultRegister, value=(binExpr.right as PtNumber).number.toInt())
|
||||
} else {
|
||||
code += translateExpression(binExpr.left, resultRegister, -1)
|
||||
code += translateExpression(binExpr.right, rightResultReg, -1)
|
||||
code += VmCodeInstruction(Opcode.MODR, vmDt, reg1 = resultRegister, reg2 = rightResultReg)
|
||||
}
|
||||
return code
|
||||
}
|
||||
|
||||
private fun operatorDivide(binExpr: PtBinaryExpression,
|
||||
vmDt: VmDataType,
|
||||
resultRegister: Int,
|
||||
resultFpRegister: Int,
|
||||
signed: Boolean): VmCodeChunk {
|
||||
val code = VmCodeChunk()
|
||||
val constFactorRight = binExpr.right as? PtNumber
|
||||
if(vmDt==VmDataType.FLOAT) {
|
||||
if(constFactorRight!=null && constFactorRight.type!=DataType.FLOAT) {
|
||||
code += translateExpression(binExpr.left, -1, resultFpRegister)
|
||||
val factor = constFactorRight.number.toFloat()
|
||||
code += codeGen.divideByConstFloat(resultFpRegister, factor)
|
||||
} else {
|
||||
val rightResultFpReg = codeGen.vmRegisters.nextFreeFloat()
|
||||
code += translateExpression(binExpr.left, -1, resultFpRegister)
|
||||
code += translateExpression(binExpr.right, -1, rightResultFpReg)
|
||||
code += if(signed)
|
||||
VmCodeInstruction(Opcode.DIVSR, vmDt, fpReg1 = resultFpRegister, fpReg2=rightResultFpReg)
|
||||
else
|
||||
VmCodeInstruction(Opcode.DIVR, vmDt, fpReg1 = resultFpRegister, fpReg2=rightResultFpReg)
|
||||
}
|
||||
} else {
|
||||
if(constFactorRight!=null && constFactorRight.type!=DataType.FLOAT) {
|
||||
code += translateExpression(binExpr.left, resultRegister, -1)
|
||||
val factor = constFactorRight.number.toInt()
|
||||
code += codeGen.divideByConst(vmDt, resultRegister, factor, signed)
|
||||
} else {
|
||||
val rightResultReg = codeGen.vmRegisters.nextFree()
|
||||
if(binExpr.right is PtNumber) {
|
||||
code += translateExpression(binExpr.left, resultRegister, -1)
|
||||
code += if (signed)
|
||||
VmCodeInstruction(Opcode.DIVS, vmDt, reg1 = resultRegister, value=(binExpr.right as PtNumber).number.toInt())
|
||||
else
|
||||
VmCodeInstruction(Opcode.DIV, vmDt, reg1 = resultRegister, value=(binExpr.right as PtNumber).number.toInt())
|
||||
} else {
|
||||
code += translateExpression(binExpr.left, resultRegister, -1)
|
||||
code += translateExpression(binExpr.right, rightResultReg, -1)
|
||||
code += if (signed)
|
||||
VmCodeInstruction(Opcode.DIVSR, vmDt, reg1 = resultRegister, reg2 = rightResultReg)
|
||||
else
|
||||
VmCodeInstruction(Opcode.DIVR, vmDt, reg1 = resultRegister, reg2 = rightResultReg)
|
||||
}
|
||||
}
|
||||
}
|
||||
return code
|
||||
}
|
||||
|
||||
internal fun operatorDivideInplace(address: Int, vmDt: VmDataType, signed: Boolean, operand: PtExpression): VmCodeChunk {
|
||||
val code = VmCodeChunk()
|
||||
val constFactorRight = operand as? PtNumber
|
||||
if(vmDt==VmDataType.FLOAT) {
|
||||
if(constFactorRight!=null && constFactorRight.type!=DataType.FLOAT) {
|
||||
val factor = constFactorRight.number.toFloat()
|
||||
code += codeGen.divideByConstFloatInplace(address, factor)
|
||||
} else {
|
||||
val operandFpReg = codeGen.vmRegisters.nextFreeFloat()
|
||||
code += translateExpression(operand, -1, operandFpReg)
|
||||
code += if(signed)
|
||||
VmCodeInstruction(Opcode.DIVSM, vmDt, fpReg1 = operandFpReg, value=address)
|
||||
else
|
||||
VmCodeInstruction(Opcode.DIVM, vmDt, fpReg1 = operandFpReg, value=address)
|
||||
}
|
||||
} else {
|
||||
if(constFactorRight!=null && constFactorRight.type!=DataType.FLOAT) {
|
||||
val factor = constFactorRight.number.toInt()
|
||||
code += codeGen.divideByConstInplace(vmDt, address, factor, signed)
|
||||
} else {
|
||||
val operandReg = codeGen.vmRegisters.nextFree()
|
||||
code += translateExpression(operand, operandReg, -1)
|
||||
code += if(signed)
|
||||
VmCodeInstruction(Opcode.DIVSM, vmDt, reg1=operandReg, value = address)
|
||||
else
|
||||
VmCodeInstruction(Opcode.DIVM, vmDt, reg1=operandReg, value = address)
|
||||
}
|
||||
}
|
||||
return code
|
||||
}
|
||||
|
||||
private fun operatorMultiply(binExpr: PtBinaryExpression, vmDt: VmDataType, resultRegister: Int, resultFpRegister: Int): VmCodeChunk {
|
||||
val code = VmCodeChunk()
|
||||
val constFactorLeft = binExpr.left as? PtNumber
|
||||
val constFactorRight = binExpr.right as? PtNumber
|
||||
if(vmDt==VmDataType.FLOAT) {
|
||||
if(constFactorLeft!=null) {
|
||||
code += translateExpression(binExpr.right, -1, resultFpRegister)
|
||||
val factor = constFactorLeft.number.toFloat()
|
||||
code += codeGen.multiplyByConstFloat(resultFpRegister, factor)
|
||||
} else if(constFactorRight!=null) {
|
||||
code += translateExpression(binExpr.left, -1, resultFpRegister)
|
||||
val factor = constFactorRight.number.toFloat()
|
||||
code += codeGen.multiplyByConstFloat(resultFpRegister, factor)
|
||||
} else {
|
||||
val rightResultFpReg = codeGen.vmRegisters.nextFreeFloat()
|
||||
code += translateExpression(binExpr.left, -1, resultFpRegister)
|
||||
code += translateExpression(binExpr.right, -1, rightResultFpReg)
|
||||
code += VmCodeInstruction(Opcode.MULR, vmDt, fpReg1 = resultFpRegister, fpReg2 = rightResultFpReg)
|
||||
}
|
||||
} else {
|
||||
if(constFactorLeft!=null && constFactorLeft.type!=DataType.FLOAT) {
|
||||
code += translateExpression(binExpr.right, resultRegister, -1)
|
||||
val factor = constFactorLeft.number.toInt()
|
||||
code += codeGen.multiplyByConst(vmDt, resultRegister, factor)
|
||||
} else if(constFactorRight!=null && constFactorRight.type!=DataType.FLOAT) {
|
||||
code += translateExpression(binExpr.left, resultRegister, -1)
|
||||
val factor = constFactorRight.number.toInt()
|
||||
code += codeGen.multiplyByConst(vmDt, resultRegister, factor)
|
||||
} else {
|
||||
val rightResultReg = codeGen.vmRegisters.nextFree()
|
||||
code += translateExpression(binExpr.left, resultRegister, -1)
|
||||
code += translateExpression(binExpr.right, rightResultReg, -1)
|
||||
code += VmCodeInstruction(Opcode.MULR, vmDt, reg1 = resultRegister, reg2 = rightResultReg)
|
||||
}
|
||||
}
|
||||
return code
|
||||
}
|
||||
|
||||
internal fun operatorMultiplyInplace(address: Int, vmDt: VmDataType, operand: PtExpression): VmCodeChunk {
|
||||
val code = VmCodeChunk()
|
||||
val constFactorRight = operand as? PtNumber
|
||||
if(vmDt==VmDataType.FLOAT) {
|
||||
if(constFactorRight!=null) {
|
||||
val factor = constFactorRight.number.toFloat()
|
||||
code += codeGen.multiplyByConstFloatInplace(address, factor)
|
||||
} else {
|
||||
val operandFpReg = codeGen.vmRegisters.nextFreeFloat()
|
||||
code += translateExpression(operand, -1, operandFpReg)
|
||||
code += VmCodeInstruction(Opcode.MULM, vmDt, fpReg1 = operandFpReg, value = address)
|
||||
}
|
||||
} else {
|
||||
if(constFactorRight!=null && constFactorRight.type!=DataType.FLOAT) {
|
||||
val factor = constFactorRight.number.toInt()
|
||||
code += codeGen.multiplyByConstInplace(vmDt, address, factor)
|
||||
} else {
|
||||
val operandReg = codeGen.vmRegisters.nextFree()
|
||||
code += translateExpression(operand, operandReg, -1)
|
||||
code += VmCodeInstruction(Opcode.MULM, vmDt, reg1=operandReg, value = address)
|
||||
}
|
||||
}
|
||||
return code
|
||||
}
|
||||
|
||||
private fun operatorMinus(binExpr: PtBinaryExpression, vmDt: VmDataType, resultRegister: Int, resultFpRegister: Int): VmCodeChunk {
|
||||
val code = VmCodeChunk()
|
||||
if(vmDt==VmDataType.FLOAT) {
|
||||
if((binExpr.right as? PtNumber)?.number==1.0) {
|
||||
code += translateExpression(binExpr.left, -1, resultFpRegister)
|
||||
code += VmCodeInstruction(Opcode.DEC, vmDt, fpReg1 = resultFpRegister)
|
||||
}
|
||||
else {
|
||||
if(binExpr.right is PtNumber) {
|
||||
code += translateExpression(binExpr.left, -1, resultFpRegister)
|
||||
code += VmCodeInstruction(Opcode.SUB, vmDt, fpReg1 = resultFpRegister, fpValue = (binExpr.right as PtNumber).number.toFloat())
|
||||
} else {
|
||||
val rightResultFpReg = codeGen.vmRegisters.nextFreeFloat()
|
||||
code += translateExpression(binExpr.left, -1, resultFpRegister)
|
||||
code += translateExpression(binExpr.right, -1, rightResultFpReg)
|
||||
code += VmCodeInstruction(Opcode.SUBR, vmDt, fpReg1 = resultFpRegister, fpReg2 = rightResultFpReg)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if((binExpr.right as? PtNumber)?.number==1.0) {
|
||||
code += translateExpression(binExpr.left, resultRegister, -1)
|
||||
code += VmCodeInstruction(Opcode.DEC, vmDt, reg1=resultRegister)
|
||||
}
|
||||
else {
|
||||
if(binExpr.right is PtNumber) {
|
||||
code += translateExpression(binExpr.left, resultRegister, -1)
|
||||
code += VmCodeInstruction(Opcode.SUB, vmDt, reg1 = resultRegister, value = (binExpr.right as PtNumber).number.toInt())
|
||||
} else {
|
||||
val rightResultReg = codeGen.vmRegisters.nextFree()
|
||||
code += translateExpression(binExpr.left, resultRegister, -1)
|
||||
code += translateExpression(binExpr.right, rightResultReg, -1)
|
||||
code += VmCodeInstruction(Opcode.SUBR, vmDt, reg1 = resultRegister, reg2 = rightResultReg)
|
||||
}
|
||||
}
|
||||
}
|
||||
return code
|
||||
}
|
||||
|
||||
internal fun operatorMinusInplace(address: Int, vmDt: VmDataType, operand: PtExpression): VmCodeChunk {
|
||||
val code = VmCodeChunk()
|
||||
if(vmDt==VmDataType.FLOAT) {
|
||||
if((operand as? PtNumber)?.number==1.0) {
|
||||
code += VmCodeInstruction(Opcode.DECM, vmDt, value=address)
|
||||
}
|
||||
else {
|
||||
val operandFpReg = codeGen.vmRegisters.nextFreeFloat()
|
||||
code += translateExpression(operand, -1, operandFpReg)
|
||||
code += VmCodeInstruction(Opcode.SUBM, vmDt, fpReg1=operandFpReg, value=address)
|
||||
}
|
||||
} else {
|
||||
if((operand as? PtNumber)?.number==1.0) {
|
||||
code += VmCodeInstruction(Opcode.DECM, vmDt, value=address)
|
||||
}
|
||||
else {
|
||||
val operandReg = codeGen.vmRegisters.nextFree()
|
||||
code += translateExpression(operand, operandReg, -1)
|
||||
code += VmCodeInstruction(Opcode.SUBM, vmDt, reg1=operandReg, value = address)
|
||||
}
|
||||
}
|
||||
return code
|
||||
}
|
||||
|
||||
private fun operatorPlus(binExpr: PtBinaryExpression, vmDt: VmDataType, resultRegister: Int, resultFpRegister: Int): VmCodeChunk {
|
||||
val code = VmCodeChunk()
|
||||
if(vmDt==VmDataType.FLOAT) {
|
||||
if((binExpr.left as? PtNumber)?.number==1.0) {
|
||||
code += translateExpression(binExpr.right, -1, resultFpRegister)
|
||||
code += VmCodeInstruction(Opcode.INC, vmDt, fpReg1=resultFpRegister)
|
||||
}
|
||||
else if((binExpr.right as? PtNumber)?.number==1.0) {
|
||||
code += translateExpression(binExpr.left, -1, resultFpRegister)
|
||||
code += VmCodeInstruction(Opcode.INC, vmDt, fpReg1=resultFpRegister)
|
||||
}
|
||||
else {
|
||||
if(binExpr.right is PtNumber) {
|
||||
code += translateExpression(binExpr.left, -1, resultFpRegister)
|
||||
code += VmCodeInstruction(Opcode.ADD, vmDt, fpReg1 = resultFpRegister, fpValue = (binExpr.right as PtNumber).number.toFloat())
|
||||
} else {
|
||||
val rightResultFpReg = codeGen.vmRegisters.nextFreeFloat()
|
||||
code += translateExpression(binExpr.left, -1, resultFpRegister)
|
||||
code += translateExpression(binExpr.right, -1, rightResultFpReg)
|
||||
code += VmCodeInstruction(Opcode.ADDR, vmDt, fpReg1 = resultFpRegister, fpReg2 = rightResultFpReg)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if((binExpr.left as? PtNumber)?.number==1.0) {
|
||||
code += translateExpression(binExpr.right, resultRegister, -1)
|
||||
code += VmCodeInstruction(Opcode.INC, vmDt, reg1=resultRegister)
|
||||
}
|
||||
else if((binExpr.right as? PtNumber)?.number==1.0) {
|
||||
code += translateExpression(binExpr.left, resultRegister, -1)
|
||||
code += VmCodeInstruction(Opcode.INC, vmDt, reg1=resultRegister)
|
||||
}
|
||||
else {
|
||||
if(binExpr.right is PtNumber) {
|
||||
code += translateExpression(binExpr.left, resultRegister, -1)
|
||||
code += VmCodeInstruction(Opcode.ADD, vmDt, reg1 = resultRegister, value=(binExpr.right as PtNumber).number.toInt())
|
||||
} else {
|
||||
val rightResultReg = codeGen.vmRegisters.nextFree()
|
||||
code += translateExpression(binExpr.left, resultRegister, -1)
|
||||
code += translateExpression(binExpr.right, rightResultReg, -1)
|
||||
code += VmCodeInstruction(Opcode.ADDR, vmDt, reg1 = resultRegister, reg2 = rightResultReg)
|
||||
}
|
||||
}
|
||||
}
|
||||
return code
|
||||
}
|
||||
|
||||
internal fun operatorPlusInplace(address: Int, vmDt: VmDataType, operand: PtExpression): VmCodeChunk {
|
||||
val code = VmCodeChunk()
|
||||
if(vmDt==VmDataType.FLOAT) {
|
||||
if((operand as? PtNumber)?.number==1.0) {
|
||||
code += VmCodeInstruction(Opcode.INCM, vmDt, value = address)
|
||||
}
|
||||
else {
|
||||
val operandFpReg = codeGen.vmRegisters.nextFreeFloat()
|
||||
code += translateExpression(operand, -1, operandFpReg)
|
||||
code += VmCodeInstruction(Opcode.ADDM, vmDt, fpReg1=operandFpReg, value=address)
|
||||
}
|
||||
} else {
|
||||
if((operand as? PtNumber)?.number==1.0) {
|
||||
code += VmCodeInstruction(Opcode.INCM, vmDt, value = address)
|
||||
}
|
||||
else {
|
||||
val operandReg = codeGen.vmRegisters.nextFree()
|
||||
code += translateExpression(operand, operandReg, -1)
|
||||
code += VmCodeInstruction(Opcode.ADDM, vmDt, reg1=operandReg, value=address)
|
||||
}
|
||||
}
|
||||
return code
|
||||
}
|
||||
|
||||
fun translate(fcall: PtFunctionCall, resultRegister: Int, resultFpRegister: Int): VmCodeChunk {
|
||||
when (val callTarget = codeGen.symbolTable.flat.getValue(fcall.functionName)) {
|
||||
is StSub -> {
|
||||
val code = VmCodeChunk()
|
||||
for ((arg, parameter) in fcall.args.zip(callTarget.parameters)) {
|
||||
val paramDt = codeGen.vmType(parameter.type)
|
||||
if(codeGen.isZero(arg)) {
|
||||
if (paramDt == VmDataType.FLOAT) {
|
||||
val mem = codeGen.allocations.get(fcall.functionName + parameter.name)
|
||||
code += VmCodeInstruction(Opcode.STOREZM, paramDt, value = mem)
|
||||
} else {
|
||||
val mem = codeGen.allocations.get(fcall.functionName + parameter.name)
|
||||
code += VmCodeInstruction(Opcode.STOREZM, paramDt, value = mem)
|
||||
}
|
||||
} else {
|
||||
if (paramDt == VmDataType.FLOAT) {
|
||||
val argFpReg = codeGen.vmRegisters.nextFreeFloat()
|
||||
code += translateExpression(arg, -1, argFpReg)
|
||||
val mem = codeGen.allocations.get(fcall.functionName + parameter.name)
|
||||
code += VmCodeInstruction(Opcode.STOREM, paramDt, fpReg1 = argFpReg, value = mem)
|
||||
} else {
|
||||
val argReg = codeGen.vmRegisters.nextFree()
|
||||
code += translateExpression(arg, argReg, -1)
|
||||
val mem = codeGen.allocations.get(fcall.functionName + parameter.name)
|
||||
code += VmCodeInstruction(Opcode.STOREM, paramDt, reg1 = argReg, value = mem)
|
||||
}
|
||||
}
|
||||
}
|
||||
code += VmCodeInstruction(Opcode.CALL, labelSymbol=fcall.functionName)
|
||||
if(fcall.type==DataType.FLOAT) {
|
||||
if (!fcall.void && resultFpRegister != 0) {
|
||||
// Call convention: result value is in fr0, so put it in the required register instead.
|
||||
code += VmCodeInstruction(Opcode.LOADR, VmDataType.FLOAT, fpReg1 = resultFpRegister, fpReg2 = 0)
|
||||
}
|
||||
} else {
|
||||
if (!fcall.void && resultRegister != 0) {
|
||||
// Call convention: result value is in r0, so put it in the required register instead.
|
||||
code += VmCodeInstruction(Opcode.LOADR, codeGen.vmType(fcall.type), reg1 = resultRegister, reg2 = 0)
|
||||
}
|
||||
}
|
||||
return code
|
||||
}
|
||||
is StRomSub -> {
|
||||
throw AssemblyError("virtual machine doesn't yet support calling romsub $fcall")
|
||||
}
|
||||
else -> throw AssemblyError("invalid node type")
|
||||
}
|
||||
}
|
||||
|
||||
}
|
@ -1,128 +0,0 @@
|
||||
package prog8.codegen.virtual
|
||||
|
||||
import prog8.code.StMemorySlab
|
||||
import prog8.code.StNodeType
|
||||
import prog8.code.SymbolTable
|
||||
import prog8.code.ast.PtProgram
|
||||
import prog8.code.core.*
|
||||
|
||||
class VariableAllocator(private val st: SymbolTable, private val program: PtProgram) {
|
||||
|
||||
private val allocations = mutableMapOf<List<String>, Int>()
|
||||
private var freeMemoryStart: Int
|
||||
|
||||
val freeMem: Int
|
||||
get() = freeMemoryStart
|
||||
|
||||
init {
|
||||
var nextLocation = 0
|
||||
for (variable in st.allVariables) {
|
||||
val memsize =
|
||||
when (variable.dt) {
|
||||
DataType.STR -> variable.onetimeInitializationStringValue!!.first.length + 1 // include the zero byte
|
||||
in NumericDatatypes -> program.memsizer.memorySize(variable.dt)
|
||||
in ArrayDatatypes -> program.memsizer.memorySize(variable.dt, variable.length!!)
|
||||
else -> throw InternalCompilerException("weird dt")
|
||||
}
|
||||
|
||||
allocations[variable.scopedName] = nextLocation
|
||||
nextLocation += memsize
|
||||
}
|
||||
for (memvar in st.allMemMappedVariables) {
|
||||
// TODO virtual machine doesn't have memory mapped variables, so treat them as regular allocated variables for now
|
||||
val memsize =
|
||||
when (memvar.dt) {
|
||||
in NumericDatatypes -> program.memsizer.memorySize(memvar.dt)
|
||||
in ArrayDatatypes -> program.memsizer.memorySize(memvar.dt, memvar.length!!)
|
||||
else -> throw InternalCompilerException("weird dt")
|
||||
}
|
||||
|
||||
allocations[memvar.scopedName] = nextLocation
|
||||
nextLocation += memsize
|
||||
}
|
||||
|
||||
freeMemoryStart = nextLocation
|
||||
}
|
||||
|
||||
fun get(name: List<String>) = allocations.getValue(name)
|
||||
|
||||
fun asVmMemory(): List<Pair<List<String>, String>> {
|
||||
val mm = mutableListOf<Pair<List<String>, String>>()
|
||||
for (variable in st.allVariables) {
|
||||
val location = allocations.getValue(variable.scopedName)
|
||||
val typeStr = when(variable.dt) {
|
||||
DataType.UBYTE, DataType.ARRAY_UB, DataType.STR -> "ubyte"
|
||||
DataType.BYTE, DataType.ARRAY_B -> "byte"
|
||||
DataType.UWORD, DataType.ARRAY_UW -> "uword"
|
||||
DataType.WORD, DataType.ARRAY_W -> "word"
|
||||
DataType.FLOAT, DataType.ARRAY_F -> "float"
|
||||
else -> throw InternalCompilerException("weird dt")
|
||||
}
|
||||
val value = when(variable.dt) {
|
||||
DataType.FLOAT -> (variable.onetimeInitializationNumericValue ?: 0.0).toString()
|
||||
in NumericDatatypes -> (variable.onetimeInitializationNumericValue ?: 0).toHex()
|
||||
DataType.STR -> {
|
||||
val encoded = program.encoding.encodeString(variable.onetimeInitializationStringValue!!.first, variable.onetimeInitializationStringValue!!.second) + listOf(0u)
|
||||
encoded.joinToString(",") { it.toInt().toHex() }
|
||||
}
|
||||
DataType.ARRAY_F -> {
|
||||
if(variable.onetimeInitializationArrayValue!=null) {
|
||||
variable.onetimeInitializationArrayValue!!.joinToString(",") { it.number!!.toString() }
|
||||
} else {
|
||||
(1..variable.length!!).joinToString(",") { "0" }
|
||||
}
|
||||
}
|
||||
in ArrayDatatypes -> {
|
||||
if(variable.onetimeInitializationArrayValue!==null) {
|
||||
variable.onetimeInitializationArrayValue!!.joinToString(",") { it.number!!.toHex() }
|
||||
} else {
|
||||
(1..variable.length!!).joinToString(",") { "0" }
|
||||
}
|
||||
}
|
||||
else -> throw InternalCompilerException("weird dt")
|
||||
}
|
||||
mm.add(Pair(variable.scopedName, "@$location $typeStr $value"))
|
||||
}
|
||||
for (variable in st.allMemMappedVariables) {
|
||||
val location = allocations.getValue(variable.scopedName)
|
||||
val typeStr = when(variable.dt) {
|
||||
DataType.UBYTE, DataType.ARRAY_UB, DataType.STR -> "ubyte"
|
||||
DataType.BYTE, DataType.ARRAY_B -> "byte"
|
||||
DataType.UWORD, DataType.ARRAY_UW -> "uword"
|
||||
DataType.WORD, DataType.ARRAY_W -> "word"
|
||||
DataType.FLOAT, DataType.ARRAY_F -> "float"
|
||||
else -> throw InternalCompilerException("weird dt")
|
||||
}
|
||||
val value = when(variable.dt) {
|
||||
DataType.FLOAT -> "0.0"
|
||||
in NumericDatatypes -> "0"
|
||||
DataType.ARRAY_F -> (1..variable.length!!).joinToString(",") { "0.0" }
|
||||
in ArrayDatatypes -> (1..variable.length!!).joinToString(",") { "0" }
|
||||
else -> throw InternalCompilerException("weird dt for mem mapped var")
|
||||
}
|
||||
mm.add(Pair(variable.scopedName, "@$location $typeStr $value"))
|
||||
}
|
||||
return mm
|
||||
}
|
||||
|
||||
fun allocateMemorySlab(name: String, size: UInt, align: UInt, position: Position): StMemorySlab {
|
||||
val address =
|
||||
if(align==0u || align==1u)
|
||||
freeMemoryStart.toUInt()
|
||||
else
|
||||
(freeMemoryStart.toUInt() + align-1u) and (0xffffffffu xor (align-1u))
|
||||
freeMemoryStart = (address + size).toInt()
|
||||
|
||||
val slab = StMemorySlab(name, size, align, address, position)
|
||||
st.add(slab)
|
||||
return slab
|
||||
}
|
||||
|
||||
fun getMemorySlab(name: String): StMemorySlab? {
|
||||
val existing = st.children[name]
|
||||
return if(existing==null || existing.type!=StNodeType.MEMORYSLAB)
|
||||
null
|
||||
else
|
||||
existing as StMemorySlab
|
||||
}
|
||||
}
|
@ -1,4 +1,4 @@
|
||||
package prog8.codegen.experimental
|
||||
package prog8.codegen.virtual
|
||||
|
||||
import prog8.code.core.AssemblyError
|
||||
import prog8.code.core.CompilationOptions
|
||||
@ -8,11 +8,7 @@ import java.io.BufferedWriter
|
||||
import kotlin.io.path.bufferedWriter
|
||||
import kotlin.io.path.div
|
||||
|
||||
class VmAssemblyProgram(override val name: String, val irProgram: IRProgram): IAssemblyProgram {
|
||||
|
||||
// TODO once this is working, replace the codeGenVirtual by codeGenExperimental
|
||||
// after that, add an option to the VmRunner to accept an IRProgram object directly and skip this intermediate .p8ir step when -emu is present
|
||||
|
||||
internal class VmAssemblyProgram(override val name: String, val irProgram: IRProgram): IAssemblyProgram {
|
||||
|
||||
override fun assemble(options: CompilationOptions): Boolean {
|
||||
val outfile = options.outputDir / ("$name.p8virt")
|
29
codeGenVirtual/src/prog8/codegen/virtual/VmCodeGen.kt
Normal file
29
codeGenVirtual/src/prog8/codegen/virtual/VmCodeGen.kt
Normal file
@ -0,0 +1,29 @@
|
||||
package prog8.codegen.virtual
|
||||
|
||||
import prog8.code.SymbolTable
|
||||
import prog8.code.ast.PtProgram
|
||||
import prog8.code.core.CompilationOptions
|
||||
import prog8.code.core.IAssemblyGenerator
|
||||
import prog8.code.core.IAssemblyProgram
|
||||
import prog8.code.core.IErrorReporter
|
||||
import prog8.codegen.intermediate.IntermediateCodeGen
|
||||
import prog8.intermediate.IRFileReader
|
||||
import prog8.intermediate.IRFileWriter
|
||||
|
||||
class VmCodeGen(private val program: PtProgram,
|
||||
private val symbolTable: SymbolTable,
|
||||
private val options: CompilationOptions,
|
||||
private val errors: IErrorReporter
|
||||
): IAssemblyGenerator {
|
||||
override fun compileToAssembly(): IAssemblyProgram? {
|
||||
|
||||
val irCodeGen = IntermediateCodeGen(program, symbolTable, options, errors)
|
||||
val irProgram = irCodeGen.generate()
|
||||
|
||||
// TODO only write IR file if option is set to do so
|
||||
// create IR file on disk and read it back.
|
||||
IRFileWriter(irProgram).writeFile()
|
||||
val irProgram2 = IRFileReader(options.outputDir, irProgram.name).readFile()
|
||||
return VmAssemblyProgram(irProgram2.name, irProgram2)
|
||||
}
|
||||
}
|
@ -1,187 +0,0 @@
|
||||
package prog8.codegen.virtual
|
||||
|
||||
import prog8.intermediate.Instruction
|
||||
import prog8.intermediate.Opcode
|
||||
import prog8.intermediate.VmDataType
|
||||
|
||||
class VmPeepholeOptimizer(private val vmprog: AssemblyProgram) {
|
||||
fun optimize() {
|
||||
vmprog.getBlocks().forEach { block ->
|
||||
do {
|
||||
val indexedInstructions = block.lines.withIndex()
|
||||
.filter { it.value is VmCodeInstruction }
|
||||
.map { IndexedValue(it.index, (it.value as VmCodeInstruction).ins) }
|
||||
val changed = removeNops(block, indexedInstructions)
|
||||
|| removeDoubleLoadsAndStores(block, indexedInstructions) // TODO not yet implemented
|
||||
|| removeUselessArithmetic(block, indexedInstructions)
|
||||
|| removeWeirdBranches(block, indexedInstructions)
|
||||
|| removeDoubleSecClc(block, indexedInstructions)
|
||||
|| cleanupPushPop(block, indexedInstructions)
|
||||
// TODO other optimizations:
|
||||
// more complex optimizations such as unused registers
|
||||
} while(changed)
|
||||
}
|
||||
}
|
||||
|
||||
private fun cleanupPushPop(block: VmCodeChunk, indexedInstructions: List<IndexedValue<Instruction>>): Boolean {
|
||||
// push followed by pop to same target, or different target->replace with load
|
||||
var changed = false
|
||||
indexedInstructions.reversed().forEach { (idx, ins) ->
|
||||
if(ins.opcode==Opcode.PUSH) {
|
||||
if(idx < block.lines.size-1) {
|
||||
val insAfter = block.lines[idx+1] as? VmCodeInstruction
|
||||
if(insAfter!=null && insAfter.ins.opcode ==Opcode.POP) {
|
||||
if(ins.reg1==insAfter.ins.reg1) {
|
||||
block.lines.removeAt(idx)
|
||||
block.lines.removeAt(idx)
|
||||
} else {
|
||||
block.lines[idx] = VmCodeInstruction(Opcode.LOADR, ins.type, reg1=insAfter.ins.reg1, reg2=ins.reg1)
|
||||
block.lines.removeAt(idx+1)
|
||||
}
|
||||
changed = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return changed
|
||||
}
|
||||
|
||||
|
||||
private fun removeDoubleSecClc(block: VmCodeChunk, indexedInstructions: List<IndexedValue<Instruction>>): Boolean {
|
||||
// double sec, clc
|
||||
// sec+clc or clc+sec
|
||||
var changed = false
|
||||
indexedInstructions.reversed().forEach { (idx, ins) ->
|
||||
if(ins.opcode==Opcode.SEC || ins.opcode==Opcode.CLC) {
|
||||
if(idx < block.lines.size-1) {
|
||||
val insAfter = block.lines[idx+1] as? VmCodeInstruction
|
||||
if(insAfter?.ins?.opcode == ins.opcode) {
|
||||
block.lines.removeAt(idx)
|
||||
changed = true
|
||||
}
|
||||
else if(ins.opcode==Opcode.SEC && insAfter?.ins?.opcode==Opcode.CLC) {
|
||||
block.lines.removeAt(idx)
|
||||
changed = true
|
||||
}
|
||||
else if(ins.opcode==Opcode.CLC && insAfter?.ins?.opcode==Opcode.SEC) {
|
||||
block.lines.removeAt(idx)
|
||||
changed = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return changed
|
||||
}
|
||||
|
||||
private fun removeWeirdBranches(block: VmCodeChunk, indexedInstructions: List<IndexedValue<Instruction>>): Boolean {
|
||||
// jump/branch to label immediately below
|
||||
var changed = false
|
||||
indexedInstructions.reversed().forEach { (idx, ins) ->
|
||||
if(ins.opcode==Opcode.JUMP && ins.labelSymbol!=null) {
|
||||
// if jumping to label immediately following this
|
||||
if(idx < block.lines.size-1) {
|
||||
val label = block.lines[idx+1] as? VmCodeLabel
|
||||
if(label?.name == ins.labelSymbol) {
|
||||
block.lines.removeAt(idx)
|
||||
changed = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return changed
|
||||
}
|
||||
|
||||
private fun removeUselessArithmetic(block: VmCodeChunk, indexedInstructions: List<IndexedValue<Instruction>>): Boolean {
|
||||
// note: this is hard to solve for the non-immediate instructions atm because the values are loaded into registers first
|
||||
var changed = false
|
||||
indexedInstructions.reversed().forEach { (idx, ins) ->
|
||||
when (ins.opcode) {
|
||||
Opcode.DIV, Opcode.DIVS, Opcode.MUL, Opcode.MOD -> {
|
||||
if (ins.value == 1) {
|
||||
block.lines.removeAt(idx)
|
||||
changed = true
|
||||
}
|
||||
}
|
||||
Opcode.ADD, Opcode.SUB -> {
|
||||
if (ins.value == 1) {
|
||||
block.lines[idx] = VmCodeInstruction(
|
||||
if (ins.opcode == Opcode.ADD) Opcode.INC else Opcode.DEC,
|
||||
ins.type,
|
||||
ins.reg1
|
||||
)
|
||||
changed = true
|
||||
} else if (ins.value == 0) {
|
||||
block.lines.removeAt(idx)
|
||||
changed = true
|
||||
}
|
||||
}
|
||||
Opcode.AND -> {
|
||||
if (ins.value == 0) {
|
||||
block.lines[idx] = VmCodeInstruction(Opcode.LOAD, ins.type, reg1 = ins.reg1, value = 0)
|
||||
changed = true
|
||||
} else if (ins.value == 255 && ins.type == VmDataType.BYTE) {
|
||||
block.lines.removeAt(idx)
|
||||
changed = true
|
||||
} else if (ins.value == 65535 && ins.type == VmDataType.WORD) {
|
||||
block.lines.removeAt(idx)
|
||||
changed = true
|
||||
}
|
||||
}
|
||||
Opcode.OR -> {
|
||||
if (ins.value == 0) {
|
||||
block.lines.removeAt(idx)
|
||||
changed = true
|
||||
} else if ((ins.value == 255 && ins.type == VmDataType.BYTE) || (ins.value == 65535 && ins.type == VmDataType.WORD)) {
|
||||
block.lines[idx] = VmCodeInstruction(Opcode.LOAD, ins.type, reg1 = ins.reg1, value = ins.value)
|
||||
changed = true
|
||||
}
|
||||
}
|
||||
Opcode.XOR -> {
|
||||
if (ins.value == 0) {
|
||||
block.lines.removeAt(idx)
|
||||
changed = true
|
||||
}
|
||||
}
|
||||
else -> {}
|
||||
}
|
||||
}
|
||||
return changed
|
||||
}
|
||||
|
||||
private fun removeNops(block: VmCodeChunk, indexedInstructions: List<IndexedValue<Instruction>>): Boolean {
|
||||
var changed = false
|
||||
indexedInstructions.reversed().forEach { (idx, ins) ->
|
||||
if (ins.opcode == Opcode.NOP) {
|
||||
changed = true
|
||||
block.lines.removeAt(idx)
|
||||
}
|
||||
}
|
||||
return changed
|
||||
}
|
||||
|
||||
private fun removeDoubleLoadsAndStores(block: VmCodeChunk, indexedInstructions: List<IndexedValue<Instruction>>): Boolean {
|
||||
var changed = false
|
||||
indexedInstructions.forEach { (idx, ins) ->
|
||||
|
||||
// TODO: detect multiple loads to the same target registers, only keep first (if source is not I/O memory)
|
||||
// TODO: detect multiple stores to the same target, only keep first (if target is not I/O memory)
|
||||
// TODO: detect multiple float ffrom/fto to the same target, only keep first
|
||||
// TODO: detect multiple sequential rnd with same reg1, only keep one
|
||||
// TODO: detect subsequent same xors/nots/negs, remove the pairs completely as they cancel out
|
||||
// TODO: detect multiple same ands, ors; only keep first
|
||||
// TODO: (hard) detect multiple registers being assigned the same value (and not changed) - use only 1 of them
|
||||
// ...
|
||||
}
|
||||
return changed
|
||||
}
|
||||
}
|
||||
|
||||
private interface ICodeChange { // TODO not used? remove?
|
||||
fun perform(block: VmCodeChunk)
|
||||
|
||||
class Remove(val idx: Int): ICodeChange {
|
||||
override fun perform(block: VmCodeChunk) {
|
||||
block.lines.removeAt(idx)
|
||||
}
|
||||
}
|
||||
}
|
@ -1,10 +1,10 @@
|
||||
package prog8.codegen.experimental
|
||||
package prog8.codegen.virtual
|
||||
|
||||
import prog8.code.SymbolTable
|
||||
import prog8.code.core.*
|
||||
import prog8.intermediate.getTypeString
|
||||
|
||||
class VmVariableAllocator(val st: SymbolTable, val encoding: IStringEncoding, memsizer: IMemSizer) {
|
||||
internal class VmVariableAllocator(val st: SymbolTable, val encoding: IStringEncoding, memsizer: IMemSizer) {
|
||||
|
||||
internal val allocations = mutableMapOf<List<String>, Int>()
|
||||
private var freeMemoryStart: Int
|
@ -1,170 +0,0 @@
|
||||
package prog8tests.vm
|
||||
|
||||
import io.kotest.core.spec.style.FunSpec
|
||||
import io.kotest.matchers.shouldBe
|
||||
import prog8.code.SymbolTable
|
||||
import prog8.code.ast.PtProgram
|
||||
import prog8.codegen.virtual.*
|
||||
import prog8.intermediate.Opcode
|
||||
import prog8.intermediate.VmDataType
|
||||
import prog8tests.vm.helpers.DummyMemsizer
|
||||
import prog8tests.vm.helpers.DummyStringEncoder
|
||||
|
||||
class TestVmPeepholeOpt: FunSpec({
|
||||
fun makeVmProgram(lines: List<VmCodeLine>): Pair<AssemblyProgram, VariableAllocator> {
|
||||
val st = SymbolTable()
|
||||
val program = PtProgram("test", DummyMemsizer, DummyStringEncoder)
|
||||
val allocations = VariableAllocator(st, program)
|
||||
val asm = AssemblyProgram("test", allocations)
|
||||
val block = VmCodeChunk()
|
||||
for(line in lines)
|
||||
block += line
|
||||
asm.addBlock(block)
|
||||
return Pair(asm, allocations)
|
||||
}
|
||||
|
||||
fun AssemblyProgram.lines(): List<VmCodeLine> = this.getBlocks().flatMap { it.lines }
|
||||
|
||||
test("remove nops") {
|
||||
val(asm, _) = makeVmProgram(listOf(
|
||||
VmCodeInstruction(Opcode.JUMP, labelSymbol = listOf("dummy")),
|
||||
VmCodeInstruction(Opcode.NOP),
|
||||
VmCodeInstruction(Opcode.NOP)
|
||||
))
|
||||
asm.lines().size shouldBe 3
|
||||
val opt = VmPeepholeOptimizer(asm)
|
||||
opt.optimize()
|
||||
asm.lines().size shouldBe 1
|
||||
}
|
||||
|
||||
test("remove jmp to label below") {
|
||||
val(asm, _) = makeVmProgram(listOf(
|
||||
VmCodeInstruction(Opcode.JUMP, labelSymbol = listOf("label")), // removed
|
||||
VmCodeLabel(listOf("label")),
|
||||
VmCodeInstruction(Opcode.JUMP, labelSymbol = listOf("label2")), // removed
|
||||
VmCodeInstruction(Opcode.NOP), // removed
|
||||
VmCodeLabel(listOf("label2")),
|
||||
VmCodeInstruction(Opcode.JUMP, labelSymbol = listOf("label3")),
|
||||
VmCodeInstruction(Opcode.INC, VmDataType.BYTE, reg1=1),
|
||||
VmCodeLabel(listOf("label3"))
|
||||
))
|
||||
asm.lines().size shouldBe 8
|
||||
val opt = VmPeepholeOptimizer(asm)
|
||||
opt.optimize()
|
||||
val lines = asm.lines()
|
||||
lines.size shouldBe 5
|
||||
(lines[0] as VmCodeLabel).name shouldBe listOf("label")
|
||||
(lines[1] as VmCodeLabel).name shouldBe listOf("label2")
|
||||
(lines[2] as VmCodeInstruction).ins.opcode shouldBe Opcode.JUMP
|
||||
(lines[3] as VmCodeInstruction).ins.opcode shouldBe Opcode.INC
|
||||
(lines[4] as VmCodeLabel).name shouldBe listOf("label3")
|
||||
}
|
||||
|
||||
test("remove double sec/clc") {
|
||||
val(asm, _) = makeVmProgram(listOf(
|
||||
VmCodeInstruction(Opcode.SEC),
|
||||
VmCodeInstruction(Opcode.SEC),
|
||||
VmCodeInstruction(Opcode.SEC),
|
||||
VmCodeInstruction(Opcode.CLC),
|
||||
VmCodeInstruction(Opcode.CLC),
|
||||
VmCodeInstruction(Opcode.CLC)
|
||||
))
|
||||
asm.lines().size shouldBe 6
|
||||
val opt = VmPeepholeOptimizer(asm)
|
||||
opt.optimize()
|
||||
val lines = asm.lines()
|
||||
lines.size shouldBe 1
|
||||
(lines[0] as VmCodeInstruction).ins.opcode shouldBe Opcode.CLC
|
||||
}
|
||||
|
||||
test("push followed by pop") {
|
||||
val(asm, _) = makeVmProgram(listOf(
|
||||
VmCodeInstruction(Opcode.PUSH, VmDataType.BYTE, reg1=42),
|
||||
VmCodeInstruction(Opcode.POP, VmDataType.BYTE, reg1=42),
|
||||
VmCodeInstruction(Opcode.PUSH, VmDataType.BYTE, reg1=99),
|
||||
VmCodeInstruction(Opcode.POP, VmDataType.BYTE, reg1=222)
|
||||
))
|
||||
asm.lines().size shouldBe 4
|
||||
val opt = VmPeepholeOptimizer(asm)
|
||||
opt.optimize()
|
||||
val lines = asm.lines()
|
||||
lines.size shouldBe 1
|
||||
(lines[0] as VmCodeInstruction).ins.opcode shouldBe Opcode.LOADR
|
||||
(lines[0] as VmCodeInstruction).ins.reg1 shouldBe 222
|
||||
(lines[0] as VmCodeInstruction).ins.reg2 shouldBe 99
|
||||
}
|
||||
|
||||
test("remove useless div/mul, add/sub") {
|
||||
val(asm, _) = makeVmProgram(listOf(
|
||||
VmCodeInstruction(Opcode.DIV, VmDataType.BYTE, reg1=42, value = 1),
|
||||
VmCodeInstruction(Opcode.DIVS, VmDataType.BYTE, reg1=42, value = 1),
|
||||
VmCodeInstruction(Opcode.MUL, VmDataType.BYTE, reg1=42, value = 1),
|
||||
VmCodeInstruction(Opcode.MOD, VmDataType.BYTE, reg1=42, value = 1),
|
||||
VmCodeInstruction(Opcode.DIV, VmDataType.BYTE, reg1=42, value = 2),
|
||||
VmCodeInstruction(Opcode.DIVS, VmDataType.BYTE, reg1=42, value = 2),
|
||||
VmCodeInstruction(Opcode.MUL, VmDataType.BYTE, reg1=42, value = 2),
|
||||
VmCodeInstruction(Opcode.MOD, VmDataType.BYTE, reg1=42, value = 2),
|
||||
VmCodeInstruction(Opcode.ADD, VmDataType.BYTE, reg1=42, value = 0),
|
||||
VmCodeInstruction(Opcode.SUB, VmDataType.BYTE, reg1=42, value = 0)
|
||||
))
|
||||
asm.lines().size shouldBe 10
|
||||
val opt = VmPeepholeOptimizer(asm)
|
||||
opt.optimize()
|
||||
val lines = asm.lines()
|
||||
lines.size shouldBe 4
|
||||
}
|
||||
|
||||
test("replace add/sub 1 by inc/dec") {
|
||||
val(asm, _) = makeVmProgram(listOf(
|
||||
VmCodeInstruction(Opcode.ADD, VmDataType.BYTE, reg1=42, value = 1),
|
||||
VmCodeInstruction(Opcode.SUB, VmDataType.BYTE, reg1=42, value = 1)
|
||||
))
|
||||
asm.lines().size shouldBe 2
|
||||
val opt = VmPeepholeOptimizer(asm)
|
||||
opt.optimize()
|
||||
val lines = asm.lines()
|
||||
lines.size shouldBe 2
|
||||
(lines[0] as VmCodeInstruction).ins.opcode shouldBe Opcode.INC
|
||||
(lines[1] as VmCodeInstruction).ins.opcode shouldBe Opcode.DEC
|
||||
}
|
||||
|
||||
test("remove useless and/or/xor") {
|
||||
val(asm, _) = makeVmProgram(listOf(
|
||||
VmCodeInstruction(Opcode.AND, VmDataType.BYTE, reg1=42, value = 255),
|
||||
VmCodeInstruction(Opcode.AND, VmDataType.WORD, reg1=42, value = 65535),
|
||||
VmCodeInstruction(Opcode.OR, VmDataType.BYTE, reg1=42, value = 0),
|
||||
VmCodeInstruction(Opcode.XOR, VmDataType.BYTE, reg1=42, value = 0),
|
||||
VmCodeInstruction(Opcode.AND, VmDataType.BYTE, reg1=42, value = 200),
|
||||
VmCodeInstruction(Opcode.AND, VmDataType.WORD, reg1=42, value = 60000),
|
||||
VmCodeInstruction(Opcode.OR, VmDataType.BYTE, reg1=42, value = 1),
|
||||
VmCodeInstruction(Opcode.XOR, VmDataType.BYTE, reg1=42, value = 1)
|
||||
))
|
||||
asm.lines().size shouldBe 8
|
||||
val opt = VmPeepholeOptimizer(asm)
|
||||
opt.optimize()
|
||||
val lines = asm.lines()
|
||||
lines.size shouldBe 4
|
||||
}
|
||||
|
||||
test("replace and/or/xor by constant number") {
|
||||
val(asm, _) = makeVmProgram(listOf(
|
||||
VmCodeInstruction(Opcode.AND, VmDataType.BYTE, reg1=42, value = 0),
|
||||
VmCodeInstruction(Opcode.AND, VmDataType.WORD, reg1=42, value = 0),
|
||||
VmCodeInstruction(Opcode.OR, VmDataType.BYTE, reg1=42, value = 255),
|
||||
VmCodeInstruction(Opcode.OR, VmDataType.WORD, reg1=42, value = 65535)
|
||||
))
|
||||
asm.lines().size shouldBe 4
|
||||
val opt = VmPeepholeOptimizer(asm)
|
||||
opt.optimize()
|
||||
val lines = asm.lines()
|
||||
lines.size shouldBe 4
|
||||
(lines[0] as VmCodeInstruction).ins.opcode shouldBe Opcode.LOAD
|
||||
(lines[1] as VmCodeInstruction).ins.opcode shouldBe Opcode.LOAD
|
||||
(lines[2] as VmCodeInstruction).ins.opcode shouldBe Opcode.LOAD
|
||||
(lines[3] as VmCodeInstruction).ins.opcode shouldBe Opcode.LOAD
|
||||
(lines[0] as VmCodeInstruction).ins.value shouldBe 0
|
||||
(lines[1] as VmCodeInstruction).ins.value shouldBe 0
|
||||
(lines[2] as VmCodeInstruction).ins.value shouldBe 255
|
||||
(lines[3] as VmCodeInstruction).ins.value shouldBe 65535
|
||||
}
|
||||
})
|
@ -453,7 +453,7 @@ internal fun asmGeneratorFor(program: Program,
|
||||
return prog8.codegen.cpu6502.AsmGen(program, symbolTable, options, errors)
|
||||
if (options.compTarget.name == VMTarget.NAME) {
|
||||
val intermediateAst = IntermediateAstMaker(program).transform()
|
||||
return prog8.codegen.virtual.CodeGen(intermediateAst, symbolTable, options, errors)
|
||||
return prog8.codegen.virtual.VmCodeGen(intermediateAst, symbolTable, options, errors)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3,8 +3,6 @@ TODO
|
||||
|
||||
For next release
|
||||
^^^^^^^^^^^^^^^^
|
||||
- Replace existing vm codegen by expericodegen, expericodegen just stops at saving IR in file.
|
||||
- AFTER THIS, MERGE TO MASTER.
|
||||
- IR: option to save IR in file
|
||||
- IR/VM: improve unit tests
|
||||
- write some documentation about the compiler architecture and where to plug a code generator onto.
|
||||
@ -23,6 +21,8 @@ Need help with
|
||||
Future Things and Ideas
|
||||
^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Compiler:
|
||||
|
||||
- vm/ir: all(), any(), reverse() and sort() still depend on a VM Syscall. Get rid of this. (maybe use a IR 'builtin' function?)
|
||||
- vm: Jumps go to a code block rather than a specific address(label) -> also helps future dead code elimination?
|
||||
- vm: the above means that every label introduces a new code block. This eliminates the use of actual labels altogether.
|
||||
- vm: add more optimizations in IRPeepholeOptimizer
|
||||
|
@ -1,8 +1,8 @@
|
||||
package prog8.intermediate
|
||||
|
||||
class IRPeepholeOptimizer(private val vmprog: IRProgram) {
|
||||
class IRPeepholeOptimizer(private val irprog: IRProgram) {
|
||||
fun optimize() {
|
||||
vmprog.blocks.asSequence().flatMap { it.subroutines }.forEach { sub ->
|
||||
irprog.blocks.asSequence().flatMap { it.subroutines }.forEach { sub ->
|
||||
sub.chunks.forEach { chunk ->
|
||||
// we don't optimize Inline Asm chunks here.
|
||||
if(chunk is IRCodeChunk) {
|
||||
|
@ -5,6 +5,7 @@ include(
|
||||
':compilerAst',
|
||||
':codeOptimizers',
|
||||
':virtualmachine',
|
||||
':codeGenIntermediate',
|
||||
':codeGenVirtual',
|
||||
':codeGenCpu6502',
|
||||
':codeGenExperimental',
|
||||
|
Loading…
Reference in New Issue
Block a user