mirror of
https://github.com/irmen/prog8.git
synced 2024-12-25 23:29:55 +00:00
added asm optimization for same pointer index
This commit is contained in:
parent
7ef4ddf0f3
commit
f249ccd414
@ -59,6 +59,13 @@ internal fun optimizeAssembly(lines: MutableList<String>, machine: IMachineDefin
|
|||||||
numberOfOptimizations++
|
numberOfOptimizations++
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mods = optimizeSamePointerIndexing(linesByFourteen, machine, program)
|
||||||
|
if(mods.isNotEmpty()) {
|
||||||
|
apply(mods, lines)
|
||||||
|
linesByFourteen = getLinesBy(lines, 14)
|
||||||
|
numberOfOptimizations++
|
||||||
|
}
|
||||||
|
|
||||||
// TODO more assembly peephole optimizations
|
// TODO more assembly peephole optimizations
|
||||||
|
|
||||||
return numberOfOptimizations
|
return numberOfOptimizations
|
||||||
@ -320,6 +327,48 @@ private fun optimizeSameAssignments(linesByFourteen: List<List<IndexedValue<Stri
|
|||||||
return mods
|
return mods
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private fun optimizeSamePointerIndexing(linesByFourteen: List<List<IndexedValue<String>>>, machine: IMachineDefinition, program: Program): List<Modification> {
|
||||||
|
|
||||||
|
// Optimize same pointer indexing where for instance we load and store to the same ptr index in Y
|
||||||
|
// if Y isn't modified in between we can omit the second LDY:
|
||||||
|
// ldy #0
|
||||||
|
// lda (ptr),y
|
||||||
|
// ora #3 ; <-- instruction(s) that don't modify Y
|
||||||
|
// ldy #0 ; <-- can be removed
|
||||||
|
// sta (ptr),y
|
||||||
|
|
||||||
|
val mods = mutableListOf<Modification>()
|
||||||
|
for (lines in linesByFourteen) {
|
||||||
|
val first = lines[0].value.trimStart()
|
||||||
|
val second = lines[1].value.trimStart()
|
||||||
|
val third = lines[2].value.trimStart()
|
||||||
|
val fourth = lines[3].value.trimStart()
|
||||||
|
val fifth = lines[4].value.trimStart()
|
||||||
|
val sixth = lines[5].value.trimStart()
|
||||||
|
|
||||||
|
if(first.startsWith("ldy") && second.startsWith("lda") && fourth.startsWith("ldy") && fifth.startsWith("sta")) {
|
||||||
|
val firstvalue = first.substring(4)
|
||||||
|
val secondvalue = second.substring(4)
|
||||||
|
val fourthvalue = fourth.substring(4)
|
||||||
|
val fifthvalue = fifth.substring(4)
|
||||||
|
if("y" !in third && firstvalue==fourthvalue && secondvalue==fifthvalue && secondvalue.endsWith(",y") && fifthvalue.endsWith(",y")) {
|
||||||
|
mods.add(Modification(lines[3].index, true, null))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if(first.startsWith("ldy") && second.startsWith("lda") && fifth.startsWith("ldy") && sixth.startsWith("sta")) {
|
||||||
|
val firstvalue = first.substring(4)
|
||||||
|
val secondvalue = second.substring(4)
|
||||||
|
val fifthvalue = fifth.substring(4)
|
||||||
|
val sixthvalue = sixth.substring(4)
|
||||||
|
if("y" !in third && "y" !in fourth && firstvalue==fifthvalue && secondvalue==sixthvalue && secondvalue.endsWith(",y") && sixthvalue.endsWith(",y")) {
|
||||||
|
mods.add(Modification(lines[4].index, true, null))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return mods
|
||||||
|
}
|
||||||
|
|
||||||
private fun optimizeStoreLoadSame(linesByFour: List<List<IndexedValue<String>>>, machine: IMachineDefinition, program: Program): List<Modification> {
|
private fun optimizeStoreLoadSame(linesByFour: List<List<IndexedValue<String>>>, machine: IMachineDefinition, program: Program): List<Modification> {
|
||||||
// sta X + lda X, sty X + ldy X, stx X + ldx X -> the second instruction can OFTEN be eliminated
|
// sta X + lda X, sty X + ldy X, stx X + ldx X -> the second instruction can OFTEN be eliminated
|
||||||
val mods = mutableListOf<Modification>()
|
val mods = mutableListOf<Modification>()
|
||||||
|
@ -3,12 +3,6 @@ TODO
|
|||||||
|
|
||||||
For next release
|
For next release
|
||||||
^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^
|
||||||
- @(ptr) |= 3 -> asm peephole optimize remove the second ldy if the instruction before doesn't modify y
|
|
||||||
ldy #0
|
|
||||||
lda (starfieldPtr2),y
|
|
||||||
ora #3
|
|
||||||
ldy #0
|
|
||||||
sta (starfieldPtr2),y
|
|
||||||
- vm: intermediate code: don't flatten everything. Instead, as a new intermediary step,
|
- vm: intermediate code: don't flatten everything. Instead, as a new intermediary step,
|
||||||
convert the new Ast into *structured* intermediary code.
|
convert the new Ast into *structured* intermediary code.
|
||||||
Basically keep the blocks and subroutines structure, including full subroutine signature information,
|
Basically keep the blocks and subroutines structure, including full subroutine signature information,
|
||||||
|
Loading…
Reference in New Issue
Block a user