vm: split off assignment codegen to its own file

This commit is contained in:
Irmen de Jong 2022-05-17 22:38:31 +02:00
parent dc6475c91b
commit e52d9e3210
6 changed files with 180 additions and 222 deletions

View File

@ -11,6 +11,25 @@ sealed class PtExpression(val type: DataType, position: Position) : PtNode(posit
override fun printProperties() { override fun printProperties() {
print(type) print(type)
} }
infix fun isSameTargetAs(other: PtExpression): Boolean = when(this) {
is PtArrayIndexer -> {
if(other is PtArrayIndexer && other.type==type) {
if(other.variable isSameTargetAs variable) {
other.index isSameTargetAs index
}
}
false
}
is PtIdentifier -> other is PtIdentifier && other.type==type && other.targetName==targetName
is PtMachineRegister -> other is PtMachineRegister && other.register==register
is PtMemoryByte -> other is PtMemoryByte && address isSameTargetAs other.address
is PtNumber -> other is PtNumber && other.type == type && other.number==number
is PtAddressOf -> other is PtAddressOf && other.identifier isSameTargetAs identifier
is PtPrefix -> other is PtPrefix && other.operator==operator && other.value isSameTargetAs value
is PtTypeCast -> other is PtTypeCast && other.type==type && other.value isSameTargetAs value
else -> false
}
} }

View File

@ -45,6 +45,14 @@ class PtAssignment(position: Position) : PtNode(position) {
get() = children[1] as PtExpression get() = children[1] as PtExpression
override fun printProperties() { } override fun printProperties() { }
val isInplaceAssign: Boolean by lazy {
val target = target.children.single() as PtExpression
if(value is PtBinaryExpression) {
target isSameTargetAs (value as PtBinaryExpression).left
} else
target isSameTargetAs value
}
} }

View File

@ -0,0 +1,136 @@
package prog8.codegen.virtual
import prog8.code.ast.*
import prog8.code.core.AssemblyError
import prog8.code.core.DataType
import prog8.vm.Opcode
import prog8.vm.VmDataType
internal class AssignmentGen(private val codeGen: CodeGen, private val expressionEval: ExpressionGen) {
internal fun translate(assignment: PtAssignment): VmCodeChunk {
return if (assignment.isInplaceAssign)
translateInplaceAssign(assignment)
else
translateRegularAssign(assignment)
}
private fun translateInplaceAssign(assignment: PtAssignment): VmCodeChunk {
// TODO can in-place assignments be optimized more? use special memory versions of instructions instead of register ones?
return translateRegularAssign(assignment)
}
private fun translateRegularAssign(assignment: PtAssignment): VmCodeChunk {
// note: assigning array and string values is done via an explicit memcopy/stringcopy function call.
if(assignment.target.children.single() is PtMachineRegister)
throw AssemblyError("assigning to a register should be done by just evaluating the expression into resultregister")
val ident = assignment.target.identifier
val memory = assignment.target.memory
val array = assignment.target.array
val vmDt = codeGen.vmType(assignment.value.type)
val code = VmCodeChunk()
var resultRegister = -1
var resultFpRegister = -1
val zero = codeGen.isZero(assignment.value)
if(!zero) {
// calculate the assignment value
if (vmDt == VmDataType.FLOAT) {
resultFpRegister = codeGen.vmRegisters.nextFreeFloat()
code += expressionEval.translateExpression(assignment.value, -1, resultFpRegister)
} else {
resultRegister = if (assignment.value is PtMachineRegister) {
(assignment.value as PtMachineRegister).register
} else {
val reg = codeGen.vmRegisters.nextFree()
code += expressionEval.translateExpression(assignment.value, reg, -1)
reg
}
}
}
if(ident!=null) {
val address = codeGen.allocations.get(ident.targetName)
code += if(zero) {
VmCodeInstruction(Opcode.STOREZM, vmDt, value = address)
} else {
if (vmDt == VmDataType.FLOAT)
VmCodeInstruction(Opcode.STOREM, vmDt, fpReg1 = resultFpRegister, value = address)
else
VmCodeInstruction(Opcode.STOREM, vmDt, reg1 = resultRegister, value = address)
}
}
else if(array!=null) {
val variable = array.variable.targetName
var variableAddr = codeGen.allocations.get(variable)
val itemsize = codeGen.program.memsizer.memorySize(array.type)
val fixedIndex = constIntValue(array.index)
if(zero) {
if(fixedIndex!=null) {
variableAddr += fixedIndex*itemsize
code += VmCodeInstruction(Opcode.STOREZM, VmDataType.FLOAT, value=variableAddr)
} else {
val indexReg = codeGen.vmRegisters.nextFree()
code += loadIndexReg(array, itemsize, indexReg)
code += VmCodeInstruction(Opcode.STOREZX, VmDataType.FLOAT, reg1=indexReg, value=variableAddr)
}
} else {
if(vmDt== VmDataType.FLOAT) {
if(fixedIndex!=null) {
variableAddr += fixedIndex*itemsize
code += VmCodeInstruction(Opcode.STOREM, vmDt, fpReg1 = resultFpRegister, value=variableAddr)
} else {
val indexReg = codeGen.vmRegisters.nextFree()
code += loadIndexReg(array, itemsize, indexReg)
code += VmCodeInstruction(Opcode.STOREX, vmDt, reg1 = resultRegister, reg2=indexReg, value=variableAddr)
}
} else {
if(fixedIndex!=null) {
variableAddr += fixedIndex*itemsize
code += VmCodeInstruction(Opcode.STOREM, vmDt, reg1 = resultRegister, value=variableAddr)
} else {
val indexReg = codeGen.vmRegisters.nextFree()
code += loadIndexReg(array, itemsize, indexReg)
code += VmCodeInstruction(Opcode.STOREX, vmDt, reg1 = resultRegister, reg2=indexReg, value=variableAddr)
}
}
}
}
else if(memory!=null) {
require(vmDt== VmDataType.BYTE)
if(zero) {
if(memory.address is PtNumber) {
code += VmCodeInstruction(Opcode.STOREZM, vmDt, value=(memory.address as PtNumber).number.toInt())
} else {
val addressReg = codeGen.vmRegisters.nextFree()
code += expressionEval.translateExpression(memory.address, addressReg, -1)
code += VmCodeInstruction(Opcode.STOREZI, vmDt, reg1=addressReg)
}
} else {
if(memory.address is PtNumber) {
code += VmCodeInstruction(Opcode.STOREM, vmDt, reg1=resultRegister, value=(memory.address as PtNumber).number.toInt())
} else {
val addressReg = codeGen.vmRegisters.nextFree()
code += expressionEval.translateExpression(memory.address, addressReg, -1)
code += VmCodeInstruction(Opcode.STOREI, vmDt, reg1=resultRegister, reg2=addressReg)
}
}
}
else
throw AssemblyError("weird assigntarget")
return code
}
private fun loadIndexReg(array: PtArrayIndexer, itemsize: Int, indexReg: Int): VmCodeChunk {
val code = VmCodeChunk()
if(itemsize==1) {
code += expressionEval.translateExpression(array.index, indexReg, -1)
}
else {
val mult = PtBinaryExpression("*", DataType.UBYTE, array.position)
mult.children += array.index
mult.children += PtNumber(DataType.UBYTE, itemsize.toDouble(), array.position)
code += expressionEval.translateExpression(mult, indexReg, -1)
}
return code
}
}

View File

@ -43,6 +43,7 @@ class CodeGen(internal val program: PtProgram,
internal val allocations = VariableAllocator(symbolTable, program, errors) internal val allocations = VariableAllocator(symbolTable, program, errors)
private val expressionEval = ExpressionGen(this) private val expressionEval = ExpressionGen(this)
private val builtinFuncGen = BuiltinFuncGen(this, expressionEval) private val builtinFuncGen = BuiltinFuncGen(this, expressionEval)
private val assignmentGen = AssignmentGen(this, expressionEval)
internal val vmRegisters = VmRegisterPool() internal val vmRegisters = VmRegisterPool()
override fun compileToAssembly(): IAssemblyProgram? { override fun compileToAssembly(): IAssemblyProgram? {
@ -52,7 +53,7 @@ class CodeGen(internal val program: PtProgram,
// collect global variables initializers // collect global variables initializers
program.allBlocks().forEach { program.allBlocks().forEach {
val code = VmCodeChunk() val code = VmCodeChunk()
it.children.filterIsInstance<PtAssignment>().forEach { assign -> code += translate(assign) } it.children.filterIsInstance<PtAssignment>().forEach { assign -> code += assignmentGen.translate(assign) }
vmprog.addGlobalInits(code) vmprog.addGlobalInits(code)
} }
} }
@ -75,7 +76,7 @@ class CodeGen(internal val program: PtProgram,
is PtVariable -> VmCodeChunk() // var should be looked up via symbol table is PtVariable -> VmCodeChunk() // var should be looked up via symbol table
is PtMemMapped -> VmCodeChunk() // memmapped var should be looked up via symbol table is PtMemMapped -> VmCodeChunk() // memmapped var should be looked up via symbol table
is PtConstant -> VmCodeChunk() // constants have all been folded into the code is PtConstant -> VmCodeChunk() // constants have all been folded into the code
is PtAssignment -> translate(node) is PtAssignment -> assignmentGen.translate(node)
is PtNodeGroup -> translateGroup(node.children) is PtNodeGroup -> translateGroup(node.children)
is PtBuiltinFunctionCall -> translateBuiltinFunc(node, 0) is PtBuiltinFunctionCall -> translateBuiltinFunc(node, 0)
is PtFunctionCall -> expressionEval.translate(node, 0, 0) is PtFunctionCall -> expressionEval.translate(node, 0, 0)
@ -651,120 +652,6 @@ class CodeGen(internal val program: PtProgram,
return code return code
} }
private fun translate(assignment: PtAssignment): VmCodeChunk {
// TODO can in-place assignments be optimized more? use special memory versions of instructions instead of register ones?
// note: assigning array and string values is done via an explicit memcopy/stringcopy function call.
if(assignment.target.children.single() is PtMachineRegister)
throw AssemblyError("assigning to a register should be done by just evaluating the expression into resultregister")
val code = VmCodeChunk()
val ident = assignment.target.identifier
val memory = assignment.target.memory
val array = assignment.target.array
val vmDt = vmType(assignment.value.type)
var resultRegister = -1
var resultFpRegister = -1
val zero = isZero(assignment.value)
if(!zero) {
// calculate the assignment value
if (vmDt == VmDataType.FLOAT) {
resultFpRegister = vmRegisters.nextFreeFloat()
code += expressionEval.translateExpression(assignment.value, -1, resultFpRegister)
} else {
resultRegister = if (assignment.value is PtMachineRegister) {
(assignment.value as PtMachineRegister).register
} else {
val reg = vmRegisters.nextFree()
code += expressionEval.translateExpression(assignment.value, reg, -1)
reg
}
}
}
if(ident!=null) {
val address = allocations.get(ident.targetName)
code += if(zero) {
VmCodeInstruction(Opcode.STOREZM, vmDt, value = address)
} else {
if (vmDt == VmDataType.FLOAT)
VmCodeInstruction(Opcode.STOREM, vmDt, fpReg1 = resultFpRegister, value = address)
else
VmCodeInstruction(Opcode.STOREM, vmDt, reg1 = resultRegister, value = address)
}
}
else if(array!=null) {
val variable = array.variable.targetName
var variableAddr = allocations.get(variable)
val itemsize = program.memsizer.memorySize(array.type)
val fixedIndex = constIntValue(array.index)
if(zero) {
if(fixedIndex!=null) {
variableAddr += fixedIndex*itemsize
code += VmCodeInstruction(Opcode.STOREZM, VmDataType.FLOAT, value=variableAddr)
} else {
val indexReg = vmRegisters.nextFree()
code += loadIndexReg(array, itemsize, indexReg)
code += VmCodeInstruction(Opcode.STOREZX, VmDataType.FLOAT, reg1=indexReg, value=variableAddr)
}
} else {
if(vmDt==VmDataType.FLOAT) {
if(fixedIndex!=null) {
variableAddr += fixedIndex*itemsize
code += VmCodeInstruction(Opcode.STOREM, vmDt, fpReg1 = resultFpRegister, value=variableAddr)
} else {
val indexReg = vmRegisters.nextFree()
code += loadIndexReg(array, itemsize, indexReg)
code += VmCodeInstruction(Opcode.STOREX, vmDt, reg1 = resultRegister, reg2=indexReg, value=variableAddr)
}
} else {
if(fixedIndex!=null) {
variableAddr += fixedIndex*itemsize
code += VmCodeInstruction(Opcode.STOREM, vmDt, reg1 = resultRegister, value=variableAddr)
} else {
val indexReg = vmRegisters.nextFree()
code += loadIndexReg(array, itemsize, indexReg)
code += VmCodeInstruction(Opcode.STOREX, vmDt, reg1 = resultRegister, reg2=indexReg, value=variableAddr)
}
}
}
}
else if(memory!=null) {
require(vmDt==VmDataType.BYTE)
if(zero) {
if(memory.address is PtNumber) {
code += VmCodeInstruction(Opcode.STOREZM, vmDt, value=(memory.address as PtNumber).number.toInt())
} else {
val addressReg = vmRegisters.nextFree()
code += expressionEval.translateExpression(memory.address, addressReg, -1)
code += VmCodeInstruction(Opcode.STOREZI, vmDt, reg1=addressReg)
}
} else {
if(memory.address is PtNumber) {
code += VmCodeInstruction(Opcode.STOREM, vmDt, reg1=resultRegister, value=(memory.address as PtNumber).number.toInt())
} else {
val addressReg = vmRegisters.nextFree()
code += expressionEval.translateExpression(memory.address, addressReg, -1)
code += VmCodeInstruction(Opcode.STOREI, vmDt, reg1=resultRegister, reg2=addressReg)
}
}
}
else
throw AssemblyError("weird assigntarget")
return code
}
private fun loadIndexReg(array: PtArrayIndexer, itemsize: Int, indexReg: Int): VmCodeChunk {
val code = VmCodeChunk()
if(itemsize==1) {
code += expressionEval.translateExpression(array.index, indexReg, -1)
}
else {
val mult = PtBinaryExpression("*", DataType.UBYTE, array.position)
mult.children += array.index
mult.children += PtNumber(DataType.UBYTE, itemsize.toDouble(), array.position)
code += expressionEval.translateExpression(mult, indexReg, -1)
}
return code
}
private fun translate(ret: PtReturn): VmCodeChunk { private fun translate(ret: PtReturn): VmCodeChunk {
val code = VmCodeChunk() val code = VmCodeChunk()
val value = ret.value val value = ret.value
@ -828,3 +715,4 @@ class CodeGen(internal val program: PtProgram,
internal fun isOne(expression: PtExpression): Boolean = expression is PtNumber && expression.number==1.0 internal fun isOne(expression: PtExpression): Boolean = expression is PtNumber && expression.number==1.0
} }

View File

@ -3,7 +3,8 @@ TODO
For next release For next release
^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^
- vm: add more instructions operating directly on memory instead of only registers? (translate assignment self-assigns) - vm: implement the sin/cos functions in math.p8 and make an example 'shader' that uses them
- vm: add more instructions operating directly on memory instead of only registers? (translate assignment self-assigns in AssignmentGen)
- complete the Inliner - complete the Inliner
- add McCarthy evaluation to shortcircuit and/or expressions. First do ifs by splitting them up? Then do expressions that compute a value? - add McCarthy evaluation to shortcircuit and/or expressions. First do ifs by splitting them up? Then do expressions that compute a value?
@ -23,8 +24,6 @@ Compiler:
- vm: codeGen: various TODOs to tweak code - vm: codeGen: various TODOs to tweak code
- vm: somehow deal with asmsubs otherwise the vm IR can't fully encode all of prog8 - vm: somehow deal with asmsubs otherwise the vm IR can't fully encode all of prog8
- vm: make registers typed? so that it's immediately obvious what type they represent. Much like regular variables in memory.
so we have a set of byte registers, a set of word registers, and other sets if we introduce other types.
- vm: don't store symbol names in instructions to make optimizing the IR easier? but what about jumps to labels. And it's no longer readable by humans. - vm: don't store symbol names in instructions to make optimizing the IR easier? but what about jumps to labels. And it's no longer readable by humans.
- vm: how to remove all unused subroutines? (in the assembly codegen, we let 64tass solve this for us) - vm: how to remove all unused subroutines? (in the assembly codegen, we let 64tass solve this for us)
- vm: rather than being able to jump to any 'address' (IPTR), use 'blocks' that have entry and exit points -> even better dead code elimination possible too - vm: rather than being able to jump to any 'address' (IPTR), use 'blocks' that have entry and exit points -> even better dead code elimination possible too
@ -53,7 +52,7 @@ Libraries:
- optimize several inner loops in gfx2 even further? - optimize several inner loops in gfx2 even further?
- add modes 2 and 3 to gfx2 (lowres 4 color and 16 color)? - add modes 2 and 3 to gfx2 (lowres 4 color and 16 color)?
- add a flood fill routine to gfx2? - add a flood fill routine to gfx2?
- diskio: use cx16 MACPTR() to load stuff faster? (see its use in X16edit to fast load blocks) - diskio: use cx16 MACPTR() in f_read() to load stuff faster? (see its use in X16edit to fast load blocks)
note that it might fail on non sdcard files so have to make graceful degradation note that it might fail on non sdcard files so have to make graceful degradation
Expressions: Expressions:

View File

@ -1,114 +1,22 @@
%import textio
%import math %import math
%import string
%import floats
%zeropage dontuse
; NOTE: meant to test to virtual machine output target (use -target vitual) ; NOTE: meant to test to virtual machine output target (use -target vitual)
main { main {
ubyte value = 42
sub inline_candidate() -> ubyte {
return math.sin8u(value)
}
sub add(ubyte first, ubyte second) -> ubyte {
return first + second
}
sub mul(ubyte first, ubyte second) -> ubyte {
return first * second
}
ubyte ix
sub start() { sub start() {
uword[] array = [1111,2222,3333,4444] ; a "pixelshader":
sys.gfx_enable(0) ; enable lo res screen
sub pa() { ubyte angle
uword ww
for ww in array { for angle in 0 to 255 {
txt.print_uw(ww) ubyte xx = math.sin8u(angle)
txt.spc() ubyte yy = math.cos8u(angle)
} sys.gfx_plot(xx, yy, 255)
txt.nl()
} }
; pa() repeat {
; array[2] = 9999 }
; pa()
ix=2
array[ix]= 8888 ; TODO fix indexing offset in vm
pa()
txt.print_uw(array[ix])
txt.spc()
txt.print_uw(array[ix+1])
txt.nl()
; ubyte @shared value = inline_candidate()
; txt.print_ub(inline_candidate())
; txt.nl()
; ubyte value = add(3,4) |> add(10) |> mul(2) |> math.sin8u()
; txt.print_ub(value)
; txt.nl()
; uword wvalue = add(3,4) |> add($30) |> mkword($ea)
; txt.print_uwhex(wvalue, true)
; txt.nl()
; expected output: aaabbb aaa bbb
; float f1 = 1.555
; floats.print_f(floats.sin(f1))
; txt.nl()
; floats.print_f(floats.cos(f1))
; txt.nl()
; floats.print_f(floats.tan(f1))
; txt.nl()
; floats.print_f(floats.atan(f1))
; txt.nl()
; floats.print_f(floats.ln(f1))
; txt.nl()
; floats.print_f(floats.log2(f1))
; txt.nl()
; floats.print_f(floats.sqrt(f1))
; txt.nl()
; floats.print_f(floats.rad(f1))
; txt.nl()
; floats.print_f(floats.deg(f1))
; txt.nl()
; floats.print_f(floats.round(f1))
; txt.nl()
; floats.print_f(floats.floor(f1))
; txt.nl()
; floats.print_f(floats.ceil(f1))
; txt.nl()
; floats.print_f(floats.rndf())
; txt.nl()
; "sin", "cos", "tan", "atan",
; "ln", "log2", "sqrt", "rad",
; "deg", "round", "floor", "ceil", "rndf"
; a "pixelshader":
; sys.gfx_enable(0) ; enable lo res screen
; ubyte shifter
;
; repeat {
; uword xx
; uword yy = 0
; repeat 240 {
; xx = 0
; repeat 320 {
; sys.gfx_plot(xx, yy, xx*yy + shifter as ubyte)
; xx++
; }
; yy++
; }
; shifter+=4
; }
} }
} }