mirror of
https://github.com/irmen/prog8.git
synced 2025-11-09 13:17:14 +00:00
IR code blocks now better SSA basic blocks (ending with single branch instruction)
This commit is contained in:
@@ -47,6 +47,7 @@ class IRCodeGen(
|
|||||||
ensureFirstChunkLabels(irProg)
|
ensureFirstChunkLabels(irProg)
|
||||||
irProg.linkChunks()
|
irProg.linkChunks()
|
||||||
irProg.convertAsmChunks()
|
irProg.convertAsmChunks()
|
||||||
|
irProg.splitSSAchunks()
|
||||||
|
|
||||||
// the optimizer also does 1 essential step regardless of optimizations: joining adjacent chunks.
|
// the optimizer also does 1 essential step regardless of optimizations: joining adjacent chunks.
|
||||||
val optimizer = IRPeepholeOptimizer(irProg)
|
val optimizer = IRPeepholeOptimizer(irProg)
|
||||||
|
|||||||
@@ -161,10 +161,10 @@ class IRPeepholeOptimizer(private val irprog: IRProgram) {
|
|||||||
if(chunk.label!=null)
|
if(chunk.label!=null)
|
||||||
return false
|
return false
|
||||||
if(previous is IRCodeChunk && chunk is IRCodeChunk) {
|
if(previous is IRCodeChunk && chunk is IRCodeChunk) {
|
||||||
// if the previous chunk doesn't end in a jump or a return, flow continues into the next chunk
|
// if the previous chunk doesn't end in a SSA branching instruction, flow continues into the next chunk, so they may be joined
|
||||||
val lastInstruction = previous.instructions.lastOrNull()
|
val lastInstruction = previous.instructions.lastOrNull()
|
||||||
if(lastInstruction!=null)
|
if(lastInstruction!=null)
|
||||||
return lastInstruction.opcode !in OpcodesThatJump
|
return lastInstruction.opcode !in OpcodesThatEndSSAblock
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
@@ -300,16 +300,16 @@ class IRPeepholeOptimizer(private val irprog: IRProgram) {
|
|||||||
// remove useless RETURN
|
// remove useless RETURN
|
||||||
if(idx>0 && (ins.opcode == Opcode.RETURN || ins.opcode==Opcode.RETURNR || ins.opcode==Opcode.RETURNI)) {
|
if(idx>0 && (ins.opcode == Opcode.RETURN || ins.opcode==Opcode.RETURNR || ins.opcode==Opcode.RETURNI)) {
|
||||||
val previous = chunk.instructions[idx-1]
|
val previous = chunk.instructions[idx-1]
|
||||||
if(previous.opcode in OpcodesThatJump) {
|
if(previous.opcode in OpcodesThatBranchUnconditionally) {
|
||||||
chunk.instructions.removeAt(idx)
|
chunk.instructions.removeAt(idx)
|
||||||
changed = true
|
changed = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// replace subsequent opcodes that jump by just the first
|
// replace subsequent opcodes that jump by just the first
|
||||||
if(idx>0 && (ins.opcode in OpcodesThatJump)) {
|
if(idx>0 && (ins.opcode in OpcodesThatBranchUnconditionally)) {
|
||||||
val previous = chunk.instructions[idx-1]
|
val previous = chunk.instructions[idx-1]
|
||||||
if(previous.opcode in OpcodesThatJump) {
|
if(previous.opcode in OpcodesThatBranchUnconditionally) {
|
||||||
chunk.instructions.removeAt(idx)
|
chunk.instructions.removeAt(idx)
|
||||||
changed = true
|
changed = true
|
||||||
}
|
}
|
||||||
@@ -362,7 +362,7 @@ jump p8_label_gen_2
|
|||||||
*/
|
*/
|
||||||
var changed=false
|
var changed=false
|
||||||
indexedInstructions.reversed().forEach { (idx, ins) ->
|
indexedInstructions.reversed().forEach { (idx, ins) ->
|
||||||
if(idx>=2 && ins.opcode in OpcodesThatJump) {
|
if(idx>=2 && ins.opcode in OpcodesThatBranchUnconditionally) {
|
||||||
val previous = indexedInstructions[idx-1].value
|
val previous = indexedInstructions[idx-1].value
|
||||||
val previous2 = indexedInstructions[idx-2].value
|
val previous2 = indexedInstructions[idx-2].value
|
||||||
if(previous.opcode==Opcode.LOADR && previous2.opcode in OpcodesThatLoad) {
|
if(previous.opcode==Opcode.LOADR && previous2.opcode in OpcodesThatLoad) {
|
||||||
|
|||||||
@@ -341,7 +341,7 @@ class TestVmCodeGen: FunSpec({
|
|||||||
val errors = ErrorReporterForTests()
|
val errors = ErrorReporterForTests()
|
||||||
val result = codegen.generate(program, st, options, errors) as VmAssemblyProgram
|
val result = codegen.generate(program, st, options, errors) as VmAssemblyProgram
|
||||||
val irChunks = (result.irProgram.blocks.first().children.single() as IRSubroutine).chunks
|
val irChunks = (result.irProgram.blocks.first().children.single() as IRSubroutine).chunks
|
||||||
irChunks.size shouldBe 1
|
irChunks.size shouldBe 2
|
||||||
}
|
}
|
||||||
|
|
||||||
test("integer comparison expressions against zero") {
|
test("integer comparison expressions against zero") {
|
||||||
@@ -537,7 +537,7 @@ class TestVmCodeGen: FunSpec({
|
|||||||
val errors = ErrorReporterForTests()
|
val errors = ErrorReporterForTests()
|
||||||
val result = codegen.generate(program, st, options, errors) as VmAssemblyProgram
|
val result = codegen.generate(program, st, options, errors) as VmAssemblyProgram
|
||||||
val irChunks = (result.irProgram.blocks.first().children.single() as IRSubroutine).chunks
|
val irChunks = (result.irProgram.blocks.first().children.single() as IRSubroutine).chunks
|
||||||
irChunks.size shouldBe 1
|
irChunks.size shouldBe 2
|
||||||
}
|
}
|
||||||
|
|
||||||
test("extsub allowed in ir-codegen") {
|
test("extsub allowed in ir-codegen") {
|
||||||
|
|||||||
@@ -763,7 +763,6 @@ main {
|
|||||||
test("address-of struct fields") {
|
test("address-of struct fields") {
|
||||||
val src="""
|
val src="""
|
||||||
%import floats
|
%import floats
|
||||||
%import textio
|
|
||||||
|
|
||||||
main {
|
main {
|
||||||
struct List {
|
struct List {
|
||||||
@@ -1458,7 +1457,6 @@ other {
|
|||||||
test("passing arrays to subroutines via typed pointer parameters") {
|
test("passing arrays to subroutines via typed pointer parameters") {
|
||||||
val src="""
|
val src="""
|
||||||
%import floats
|
%import floats
|
||||||
%import textio
|
|
||||||
|
|
||||||
main {
|
main {
|
||||||
struct Node {
|
struct Node {
|
||||||
|
|||||||
@@ -1,6 +1,9 @@
|
|||||||
TODO
|
TODO
|
||||||
====
|
====
|
||||||
|
|
||||||
|
IR: rename <CODE> to <CHUNK> , put the code lines in a new sub node <CODE> for improved xml structure.
|
||||||
|
TestVMCodegen: add tests for SSA block split/joins/check last instruction to be a valid SSE block ender
|
||||||
|
|
||||||
|
|
||||||
STRUCTS and TYPED POINTERS
|
STRUCTS and TYPED POINTERS
|
||||||
--------------------------
|
--------------------------
|
||||||
@@ -80,7 +83,6 @@ IR/VM
|
|||||||
- implement more TODOs in AssignmentGen
|
- implement more TODOs in AssignmentGen
|
||||||
- do something with the 'split' tag on split word arrays
|
- do something with the 'split' tag on split word arrays
|
||||||
- add more optimizations in IRPeepholeOptimizer
|
- add more optimizations in IRPeepholeOptimizer
|
||||||
- apparently for SSA form, the IRCodeChunk is not a proper "basic block" yet because the last operation should be a branch or return, and no other branches
|
|
||||||
- reduce register usage via linear-scan algorithm (based on live intervals) https://anoopsarkar.github.io/compilers-class/assets/lectures/opt3-regalloc-linearscan.pdf
|
- reduce register usage via linear-scan algorithm (based on live intervals) https://anoopsarkar.github.io/compilers-class/assets/lectures/opt3-regalloc-linearscan.pdf
|
||||||
don't forget to take into account the data type of the register when it's going to be reused!
|
don't forget to take into account the data type of the register when it's going to be reused!
|
||||||
- idea: (but LLVM IR simply keeps the variables, so not a good idea then?...): replace all scalar variables by an allocated register. Keep a table of the variable to register mapping (including the datatype)
|
- idea: (but LLVM IR simply keeps the variables, so not a good idea then?...): replace all scalar variables by an allocated register. Keep a table of the variable to register mapping (including the datatype)
|
||||||
|
|||||||
@@ -1,15 +1,30 @@
|
|||||||
main {
|
%import textio
|
||||||
|
%zeropage basicsafe
|
||||||
|
|
||||||
|
main {
|
||||||
sub start() {
|
sub start() {
|
||||||
uword @shared value
|
cx16.r0 = 200
|
||||||
|
cx16.r1 = 0
|
||||||
|
plot_particles()
|
||||||
|
txt.print_uw(cx16.r1)
|
||||||
|
txt.print(" expected 0\n")
|
||||||
|
|
||||||
if msb(value)>0
|
cx16.r0 = 500
|
||||||
cx16.r0++
|
cx16.r1 = 0
|
||||||
|
plot_particles()
|
||||||
|
txt.print_uw(cx16.r1)
|
||||||
|
txt.print(" expected 1\n")
|
||||||
|
|
||||||
if lsb(value)>0
|
cx16.r0 = 1
|
||||||
cx16.r0++
|
cx16.r1 = 0
|
||||||
|
plot_particles()
|
||||||
|
txt.print_uw(cx16.r1)
|
||||||
|
txt.print(" expected 1\n")
|
||||||
|
}
|
||||||
|
|
||||||
value = mkword(cx16.r0L, cx16.r1L)
|
sub plot_particles() {
|
||||||
if_z
|
if cx16.r0<10 or cx16.r0>319 {
|
||||||
cx16.r0++
|
cx16.r1++
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -427,7 +427,7 @@ enum class Opcode {
|
|||||||
ALIGN
|
ALIGN
|
||||||
}
|
}
|
||||||
|
|
||||||
val OpcodesThatJump = arrayOf(
|
val OpcodesThatBranchUnconditionally = arrayOf(
|
||||||
Opcode.JUMP,
|
Opcode.JUMP,
|
||||||
Opcode.JUMPI,
|
Opcode.JUMPI,
|
||||||
Opcode.RETURN,
|
Opcode.RETURN,
|
||||||
@@ -435,12 +435,7 @@ val OpcodesThatJump = arrayOf(
|
|||||||
Opcode.RETURNI
|
Opcode.RETURNI
|
||||||
)
|
)
|
||||||
|
|
||||||
val OpcodesThatBranch = arrayOf(
|
val OpcodesThatBranch = OpcodesThatBranchUnconditionally + arrayOf(
|
||||||
Opcode.JUMP,
|
|
||||||
Opcode.JUMPI,
|
|
||||||
Opcode.RETURN,
|
|
||||||
Opcode.RETURNR,
|
|
||||||
Opcode.RETURNI,
|
|
||||||
Opcode.CALLI,
|
Opcode.CALLI,
|
||||||
Opcode.CALL,
|
Opcode.CALL,
|
||||||
Opcode.CALLFAR,
|
Opcode.CALLFAR,
|
||||||
@@ -468,6 +463,29 @@ val OpcodesThatBranch = arrayOf(
|
|||||||
Opcode.BLES
|
Opcode.BLES
|
||||||
)
|
)
|
||||||
|
|
||||||
|
val OpcodesThatEndSSAblock = OpcodesThatBranchUnconditionally + arrayOf(
|
||||||
|
Opcode.BSTCC,
|
||||||
|
Opcode.BSTCS,
|
||||||
|
Opcode.BSTEQ,
|
||||||
|
Opcode.BSTNE,
|
||||||
|
Opcode.BSTNEG,
|
||||||
|
Opcode.BSTPOS,
|
||||||
|
Opcode.BSTVC,
|
||||||
|
Opcode.BSTVS,
|
||||||
|
Opcode.BGTR,
|
||||||
|
Opcode.BGT,
|
||||||
|
Opcode.BLT,
|
||||||
|
Opcode.BGTSR,
|
||||||
|
Opcode.BGTS,
|
||||||
|
Opcode.BLTS,
|
||||||
|
Opcode.BGER,
|
||||||
|
Opcode.BGE,
|
||||||
|
Opcode.BLE,
|
||||||
|
Opcode.BGESR,
|
||||||
|
Opcode.BGES,
|
||||||
|
Opcode.BLES
|
||||||
|
)
|
||||||
|
|
||||||
val OpcodesThatSetStatusbitsIncludingCarry = arrayOf(
|
val OpcodesThatSetStatusbitsIncludingCarry = arrayOf(
|
||||||
Opcode.BIT,
|
Opcode.BIT,
|
||||||
Opcode.CMP,
|
Opcode.CMP,
|
||||||
|
|||||||
@@ -144,11 +144,11 @@ class IRProgram(val name: String,
|
|||||||
fun linkCodeChunk(chunk: IRCodeChunk, next: IRCodeChunkBase?) {
|
fun linkCodeChunk(chunk: IRCodeChunk, next: IRCodeChunkBase?) {
|
||||||
// link sequential chunks
|
// link sequential chunks
|
||||||
val jump = chunk.instructions.lastOrNull()?.opcode
|
val jump = chunk.instructions.lastOrNull()?.opcode
|
||||||
if (jump == null || jump !in OpcodesThatJump) {
|
if (jump == null || jump !in OpcodesThatBranchUnconditionally) {
|
||||||
// no jump at the end, so link to next chunk (if it exists)
|
// no jump at the end, so link to next chunk (if it exists)
|
||||||
if(next!=null) {
|
if(next!=null) {
|
||||||
when (next) {
|
when (next) {
|
||||||
is IRCodeChunk if chunk.instructions.lastOrNull()?.opcode !in OpcodesThatJump -> chunk.next = next
|
is IRCodeChunk if chunk.instructions.lastOrNull()?.opcode !in OpcodesThatBranchUnconditionally -> chunk.next = next
|
||||||
is IRInlineAsmChunk -> chunk.next = next
|
is IRInlineAsmChunk -> chunk.next = next
|
||||||
is IRInlineBinaryChunk -> chunk.next =next
|
is IRInlineBinaryChunk -> chunk.next =next
|
||||||
else -> throw AssemblyError("code chunk followed by invalid chunk type $next")
|
else -> throw AssemblyError("code chunk followed by invalid chunk type $next")
|
||||||
@@ -181,7 +181,7 @@ class IRProgram(val name: String,
|
|||||||
is IRInlineAsmChunk -> {
|
is IRInlineAsmChunk -> {
|
||||||
if(next!=null) {
|
if(next!=null) {
|
||||||
val lastInstr = chunk.instructions.lastOrNull()
|
val lastInstr = chunk.instructions.lastOrNull()
|
||||||
if(lastInstr==null || lastInstr.opcode !in OpcodesThatJump)
|
if(lastInstr==null || lastInstr.opcode !in OpcodesThatBranchUnconditionally)
|
||||||
chunk.next = next
|
chunk.next = next
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -210,7 +210,7 @@ class IRProgram(val name: String,
|
|||||||
if (chunk is IRCodeChunk) {
|
if (chunk is IRCodeChunk) {
|
||||||
if(!emptyChunkIsAllowed)
|
if(!emptyChunkIsAllowed)
|
||||||
require(chunk.instructions.isNotEmpty() || chunk.label != null)
|
require(chunk.instructions.isNotEmpty() || chunk.label != null)
|
||||||
if(chunk.instructions.lastOrNull()?.opcode in OpcodesThatJump)
|
if(chunk.instructions.lastOrNull()?.opcode in OpcodesThatBranchUnconditionally)
|
||||||
require(chunk.next == null) { "chunk ending with a jump or return shouldn't be linked to next" }
|
require(chunk.next == null) { "chunk ending with a jump or return shouldn't be linked to next" }
|
||||||
else if (sub!=null) {
|
else if (sub!=null) {
|
||||||
// if chunk is NOT the last in the block, it needs to link to next.
|
// if chunk is NOT the last in the block, it needs to link to next.
|
||||||
@@ -316,7 +316,7 @@ class IRProgram(val name: String,
|
|||||||
chunks += chunk
|
chunks += chunk
|
||||||
chunk = IRCodeChunk(label, null)
|
chunk = IRCodeChunk(label, null)
|
||||||
val lastInstr = lastChunk.instructions.lastOrNull()
|
val lastInstr = lastChunk.instructions.lastOrNull()
|
||||||
if(lastInstr==null || lastInstr.opcode !in OpcodesThatJump)
|
if(lastInstr==null || lastInstr.opcode !in OpcodesThatBranchUnconditionally)
|
||||||
lastChunk.next = chunk
|
lastChunk.next = chunk
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
@@ -325,7 +325,7 @@ class IRProgram(val name: String,
|
|||||||
chunks += chunk
|
chunks += chunk
|
||||||
chunks.lastOrNull()?.let {
|
chunks.lastOrNull()?.let {
|
||||||
val lastInstr = it.instructions.lastOrNull()
|
val lastInstr = it.instructions.lastOrNull()
|
||||||
if(lastInstr==null || lastInstr.opcode !in OpcodesThatJump)
|
if(lastInstr==null || lastInstr.opcode !in OpcodesThatBranchUnconditionally)
|
||||||
it.next = asmChunk.next
|
it.next = asmChunk.next
|
||||||
}
|
}
|
||||||
return chunks
|
return chunks
|
||||||
@@ -361,6 +361,78 @@ class IRProgram(val name: String,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fun splitSSAchunks() {
|
||||||
|
|
||||||
|
class SplitInfo(val chunk: IRCodeChunkBase, val splitAt: Int, val blockParent: IRBlock?, val subParent: IRSubroutine?, val chunkIndex: Int)
|
||||||
|
|
||||||
|
val tosplit = mutableListOf<SplitInfo>()
|
||||||
|
|
||||||
|
fun split(chunk: IRCodeChunk, parent: IRBlock, chunkIndex: Int) {
|
||||||
|
chunk.instructions.withIndex().forEach { (index, instr) ->
|
||||||
|
if(instr.opcode in OpcodesThatEndSSAblock) {
|
||||||
|
if(instr !== chunk.instructions.last()) {
|
||||||
|
// to be a proper SSA basic block, this instruction has to be the last one in the block.
|
||||||
|
// split the current chunk and link both halves together using the next pointer
|
||||||
|
tosplit += SplitInfo(chunk, index, parent, null, chunkIndex)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fun split(chunk: IRCodeChunk, parent: IRSubroutine, chunkIndex: Int) {
|
||||||
|
chunk.instructions.withIndex().forEach { (index, instr) ->
|
||||||
|
if(instr.opcode in OpcodesThatEndSSAblock) {
|
||||||
|
if(instr !== chunk.instructions.last()) {
|
||||||
|
// to be a proper SSA basic block, this instruction has to be the last one in the block.
|
||||||
|
// split the current chunk and link both halves together using the next pointer
|
||||||
|
tosplit += SplitInfo(chunk, index, null, parent, chunkIndex)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
this.blocks.forEach { block ->
|
||||||
|
block.children.withIndex().forEach { (index, child) ->
|
||||||
|
when(child) {
|
||||||
|
is IRCodeChunk -> split(child, block, index)
|
||||||
|
is IRSubroutine -> child.chunks.withIndex().forEach { (index2, chunk) ->
|
||||||
|
if(chunk is IRCodeChunk)
|
||||||
|
split(chunk, child, index2)
|
||||||
|
}
|
||||||
|
else -> {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for(split in tosplit.reversed()) {
|
||||||
|
val chunk = split.chunk
|
||||||
|
if(split.blockParent!=null)
|
||||||
|
require(chunk===split.blockParent.children[split.chunkIndex])
|
||||||
|
else
|
||||||
|
require(chunk===split.subParent!!.chunks[split.chunkIndex])
|
||||||
|
val totalSize = chunk.instructions.size
|
||||||
|
val first = chunk.instructions.dropLast(totalSize-split.splitAt-1)
|
||||||
|
val second = chunk.instructions.drop(split.splitAt+1)
|
||||||
|
chunk.instructions.clear()
|
||||||
|
chunk.instructions.addAll(first)
|
||||||
|
val secondChunk = IRCodeChunk(null, chunk.next)
|
||||||
|
secondChunk.instructions.addAll(second)
|
||||||
|
require(chunk.instructions.last().opcode in OpcodesThatEndSSAblock)
|
||||||
|
require(chunk.instructions.size + secondChunk.instructions.size == totalSize)
|
||||||
|
if(chunk.instructions.last().opcode !in OpcodesThatBranchUnconditionally) {
|
||||||
|
chunk.next = secondChunk
|
||||||
|
if(split.blockParent!=null) split.blockParent.children.add(split.chunkIndex+1, secondChunk)
|
||||||
|
else split.subParent!!.chunks.add(split.chunkIndex+1, secondChunk)
|
||||||
|
// println("split chunk ${chunk.label} at ${split.splitAt}: ${chunk.instructions[split.splitAt]} ${totalSize} = ${chunk.instructions.size}+${secondChunk.instructions.size}")
|
||||||
|
} else {
|
||||||
|
// shouldn't occur , unreachable code in second chunk?
|
||||||
|
chunk.next = null
|
||||||
|
// println("REMOVED UNREACHABLE CODE")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
fun verifyRegisterTypes(registerTypes: Map<Int, IRDataType>) {
|
fun verifyRegisterTypes(registerTypes: Map<Int, IRDataType>) {
|
||||||
for(block in blocks) {
|
for(block in blocks) {
|
||||||
for(bc in block.children) {
|
for(bc in block.children) {
|
||||||
|
|||||||
Reference in New Issue
Block a user