From ccf6e32bf92b4561d923d3990c268f8b10354bbf Mon Sep 17 00:00:00 2001 From: Irmen de Jong Date: Sun, 17 Sep 2023 03:48:16 +0200 Subject: [PATCH] adding setlsb() and setmsb() builtin functions to 6502 codegen --- .../codegen/cpu6502/BuiltinFunctionsAsmGen.kt | 77 ++++++--- .../cpu6502/assignment/AssignmentAsmGen.kt | 155 +++++++++--------- .../codegen/intermediate/BuiltinFuncGen.kt | 125 +++++++++----- docs/source/todo.rst | 2 +- examples/test.p8 | 10 +- syntax-files/NotepadPlusPlus/Prog8.xml | 2 +- syntax-files/Vim/prog8_builtins.vim | 2 +- 7 files changed, 227 insertions(+), 146 deletions(-) diff --git a/codeGenCpu6502/src/prog8/codegen/cpu6502/BuiltinFunctionsAsmGen.kt b/codeGenCpu6502/src/prog8/codegen/cpu6502/BuiltinFunctionsAsmGen.kt index d24eb4e5d..e08b7ca87 100644 --- a/codeGenCpu6502/src/prog8/codegen/cpu6502/BuiltinFunctionsAsmGen.kt +++ b/codeGenCpu6502/src/prog8/codegen/cpu6502/BuiltinFunctionsAsmGen.kt @@ -612,51 +612,82 @@ internal class BuiltinFunctionsAsmGen(private val program: PtProgram, } private fun funcSetLsbMsb(fcall: PtBuiltinFunctionCall, msb: Boolean) { - asmgen.assignExpressionToRegister(fcall.args[1], RegisterOrPair.A, false) - - val address: PtExpression + val target: AsmAssignTarget when(fcall.args[0]) { is PtIdentifier -> { - if(msb) { - address = PtBinaryExpression("+", DataType.UWORD, fcall.args[0].position) - val addressOf = PtAddressOf(fcall.position) - addressOf.add(fcall.args[0]) - address.add(addressOf) - address.add(PtNumber(address.type, 1.0, fcall.args[0].position)) - } else { - address = PtAddressOf(fcall.position) - address.add(fcall.args[0]) - } + val varname = asmgen.asmVariableName(fcall.args[0] as PtIdentifier) + if(msb) "+1" else "" + target = AsmAssignTarget(TargetStorageKind.VARIABLE, asmgen, DataType.UBYTE, fcall.definingSub(), fcall.position, variableAsmName = varname) } is PtNumber -> { val num = (fcall.args[0] as PtNumber).number + if(msb) 1 else 0 - address = PtNumber(fcall.args[0].type, num, fcall.args[0].position) + val mem = PtMemoryByte(fcall.position) + mem.add(PtNumber(DataType.UBYTE, num, fcall.position)) + target = AsmAssignTarget(TargetStorageKind.MEMORY, asmgen, DataType.UBYTE, fcall.definingSub(), fcall.position, memory = mem) } is PtAddressOf -> { + val mem = PtMemoryByte(fcall.position) if(msb) { - address = PtBinaryExpression("+", DataType.UWORD, fcall.args[0].position) + val address = PtBinaryExpression("+", DataType.UWORD, fcall.args[0].position) address.add(fcall.args[0]) address.add(PtNumber(address.type, 1.0, fcall.args[0].position)) + mem.add(address) } else { - address = fcall.args[0] + mem.add(fcall.args[0]) } + target = AsmAssignTarget(TargetStorageKind.MEMORY, asmgen, DataType.UBYTE, fcall.definingSub(), fcall.position, memory = mem) } is PtArrayIndexer -> { val indexer = fcall.args[0] as PtArrayIndexer require(!indexer.usesPointerVariable) + val elementSize: Int + val msbAdd: Int if(indexer.splitWords) { - // lsb/msb in split arrays, element index 'size' is always 1 - TODO("setlsb/setmsb on split array element ${fcall.position}") + val arrayVariable = indexer.variable + indexer.children[0] = PtIdentifier(arrayVariable.name + if(msb) "_msb" else "_lsb", DataType.ARRAY_UB, arrayVariable.position) + indexer.children[0].parent = indexer + elementSize = 1 + msbAdd = 0 } else { - TODO("setlsb/setmsb on array element ${fcall.position}") + elementSize = 2 + msbAdd = if(msb) 1 else 0 } + + // double the index because of word array (if not split), add one if msb (if not split) + val constIndexNum = (indexer.index as? PtNumber)?.number + if(constIndexNum!=null) { + indexer.children[1] = PtNumber(indexer.index.type, constIndexNum*elementSize + msbAdd, indexer.position) + indexer.children[1].parent = indexer + } else { + val multipliedIndex: PtExpression + if(elementSize==1) { + multipliedIndex = indexer.index + } else { + multipliedIndex = PtBinaryExpression("<<", indexer.index.type, indexer.position) + multipliedIndex.add(indexer.index) + multipliedIndex.add(PtNumber(DataType.UBYTE, 1.0, indexer.position)) + } + if(msbAdd>0) { + val msbIndex = PtBinaryExpression("+", indexer.index.type, indexer.position) + msbIndex.add(multipliedIndex) + msbIndex.add(PtNumber(DataType.UBYTE, msbAdd.toDouble(), indexer.position)) + indexer.children[1] = msbIndex + msbIndex.parent = indexer + } else { + indexer.children[1] = multipliedIndex + multipliedIndex.parent=indexer + } + } + target = AsmAssignTarget(TargetStorageKind.ARRAY, asmgen, DataType.UBYTE, fcall.definingSub(), fcall.position, array = indexer) } else -> throw AssemblyError("setlsb/setmsb on weird target ${fcall.args[0]}") } - val mem = PtMemoryByte(fcall.position) - mem.add(address) - mem.parent = fcall - assignAsmGen.storeRegisterAInMemoryAddress(mem) // TODO use assignRegisterByte()???, and assignConstantByte() if the value is contstant zero + + if(fcall.args[1].asConstInteger() == 0) { + assignAsmGen.assignConstantByte(target, 0) + } else { + asmgen.assignExpressionToRegister(fcall.args[1], RegisterOrPair.A, false) + assignAsmGen.assignRegisterByte(target, CpuRegister.A, false) + } } private fun funcSgn(fcall: PtBuiltinFunctionCall, resultRegister: RegisterOrPair?, scope: IPtSubroutine?) { diff --git a/codeGenCpu6502/src/prog8/codegen/cpu6502/assignment/AssignmentAsmGen.kt b/codeGenCpu6502/src/prog8/codegen/cpu6502/assignment/AssignmentAsmGen.kt index 5868a817a..37cd3780c 100644 --- a/codeGenCpu6502/src/prog8/codegen/cpu6502/assignment/AssignmentAsmGen.kt +++ b/codeGenCpu6502/src/prog8/codegen/cpu6502/assignment/AssignmentAsmGen.kt @@ -2717,68 +2717,8 @@ internal class AssignmentAsmGen(private val program: PtProgram, } TargetStorageKind.ARRAY -> { if(assignAsWord) - TODO("assign register as word into Array not yet supported") - if(target.array!!.splitWords) - TODO("assign register into split words ${target.position}") - if(assignsIndexedPointerVar(target)) { - if (target.constArrayIndexValue!=null) { - when (register) { - CpuRegister.A -> {} - CpuRegister.X -> asmgen.out(" txa") - CpuRegister.Y -> asmgen.out(" tya") - } - if(asmgen.isZpVar(target.origAstTarget!!.array!!.variable)) { - asmgen.out(" ldy #${target.constArrayIndexValue} | sta (${target.asmVarname}),y") - } else { - asmgen.out(""" - ldy ${target.asmVarname} - sty P8ZP_SCRATCH_W1 - ldy ${target.asmVarname}+1 - sty P8ZP_SCRATCH_W1+1 - ldy #${target.constArrayIndexValue} - sta (P8ZP_SCRATCH_W1),y""") - } - } - else { - when (register) { - CpuRegister.A -> {} - CpuRegister.X -> asmgen.out(" txa") - CpuRegister.Y -> asmgen.out(" tya") - } - val indexVar = target.array.index as PtIdentifier - if(asmgen.isZpVar(target.origAstTarget!!.array!!.variable)) { - asmgen.out(" ldy ${asmgen.asmVariableName(indexVar)} | sta (${target.asmVarname}),y") - } else { - asmgen.out(""" - ldy ${target.asmVarname} - sty P8ZP_SCRATCH_W1 - ldy ${target.asmVarname}+1 - sty P8ZP_SCRATCH_W1+1 - ldy ${asmgen.asmVariableName(indexVar)} - sta (P8ZP_SCRATCH_W1),y""") - } - } - return - } else { - // assign regular array indexing - if (target.constArrayIndexValue!=null) { - when (register) { - CpuRegister.A -> {} - CpuRegister.X -> asmgen.out(" txa") - CpuRegister.Y -> asmgen.out(" tya") - } - asmgen.out(" sta ${target.asmVarname}+${target.constArrayIndexValue}") - } - else { - when (register) { - CpuRegister.A -> {} - CpuRegister.X -> asmgen.out(" txa") - CpuRegister.Y -> asmgen.out(" tya") - } - val indexVar = target.array.index as PtIdentifier - asmgen.out(" ldy ${asmgen.asmVariableName(indexVar)} | sta ${target.asmVarname},y") - } - } + TODO("assign register byte as word into Array not yet supported") + assignRegisterByteToArray(target, register) } TargetStorageKind.REGISTER -> { when(register) { @@ -2926,6 +2866,79 @@ internal class AssignmentAsmGen(private val program: PtProgram, } } + private fun assignRegisterByteToArray(target: AsmAssignTarget, register: CpuRegister) { + if(target.array!!.splitWords) + throw AssemblyError("cannot assign byte to split word array here ${target.position}") + + if(assignsIndexedPointerVar(target)) { + if (target.constArrayIndexValue!=null) { + when (register) { + CpuRegister.A -> {} + CpuRegister.X -> asmgen.out(" txa") + CpuRegister.Y -> asmgen.out(" tya") + } + if(asmgen.isZpVar(target.origAstTarget!!.array!!.variable)) { + asmgen.out(" ldy #${target.constArrayIndexValue} | sta (${target.asmVarname}),y") + } else { + asmgen.out(""" + ldy ${target.asmVarname} + sty P8ZP_SCRATCH_W1 + ldy ${target.asmVarname}+1 + sty P8ZP_SCRATCH_W1+1 + ldy #${target.constArrayIndexValue} + sta (P8ZP_SCRATCH_W1),y""") + } + } + else { + when (register) { + CpuRegister.A -> {} + CpuRegister.X -> asmgen.out(" txa") + CpuRegister.Y -> asmgen.out(" tya") + } + val indexVar = target.array.index as PtIdentifier + if(asmgen.isZpVar(target.origAstTarget!!.array!!.variable)) { + asmgen.out(" ldy ${asmgen.asmVariableName(indexVar)} | sta (${target.asmVarname}),y") + } else { + asmgen.out(""" + ldy ${target.asmVarname} + sty P8ZP_SCRATCH_W1 + ldy ${target.asmVarname}+1 + sty P8ZP_SCRATCH_W1+1 + ldy ${asmgen.asmVariableName(indexVar)} + sta (P8ZP_SCRATCH_W1),y""") + } + } + return + } else { + // assign regular array indexing + if (target.constArrayIndexValue!=null) { + when (register) { + CpuRegister.A -> {} + CpuRegister.X -> asmgen.out(" txa") + CpuRegister.Y -> asmgen.out(" tya") + } + asmgen.out(" sta ${target.asmVarname}+${target.constArrayIndexValue}") + } + else { + when (register) { + CpuRegister.A -> {} + CpuRegister.X -> asmgen.out(" txa") + CpuRegister.Y -> asmgen.out(" tya") + } + val indexVar = target.array.index as? PtIdentifier + if(indexVar!=null) { + asmgen.out(" ldy ${asmgen.asmVariableName(indexVar)} | sta ${target.asmVarname},y") + } else { + require(target.array.index.type in ByteDatatypes) + asmgen.saveRegisterStack(register, false) + asmgen.assignExpressionToRegister(target.array.index, RegisterOrPair.Y, false) + asmgen.restoreRegisterStack(CpuRegister.A, false) + asmgen.out(" sta ${target.asmVarname},y") + } + } + } + } + internal fun assignRegisterpairWord(target: AsmAssignTarget, regs: RegisterOrPair) { require(target.datatype in NumericDatatypes || target.datatype in PassByReferenceDatatypes) { "assign target must be word type ${target.position}" @@ -3205,7 +3218,7 @@ internal class AssignmentAsmGen(private val program: PtProgram, } } - private fun assignConstantByte(target: AsmAssignTarget, byte: Int) { + internal fun assignConstantByte(target: AsmAssignTarget, byte: Int) { if(byte==0 && asmgen.isTargetCpu(CpuType.CPU65c02)) { // optimize setting zero value for this cpu when(target.kind) { @@ -3233,14 +3246,14 @@ internal class AssignmentAsmGen(private val program: PtProgram, return } if(target.array!!.splitWords) - TODO("assign into split words ${target.position}") + throw AssemblyError("cannot assign byte to split word array here ${target.position}") if (target.constArrayIndexValue!=null) { val indexValue = target.constArrayIndexValue!! asmgen.out(" stz ${target.asmVarname}+$indexValue") } else { - asmgen.loadScaledArrayIndexIntoRegister(target.array, DataType.UBYTE, CpuRegister.Y) - asmgen.out(" lda #0 | sta ${target.asmVarname},y") + asmgen.assignExpressionToRegister(target.array.index, RegisterOrPair.X, false) + asmgen.out(" stz ${target.asmVarname},x") } } TargetStorageKind.REGISTER -> when(target.register!!) { @@ -3306,17 +3319,11 @@ internal class AssignmentAsmGen(private val program: PtProgram, RegisterOrPair.XY -> asmgen.out(" ldy #0 | ldx #${byte.toHex()}") RegisterOrPair.FAC1, RegisterOrPair.FAC2 -> throw AssemblyError("expected typecasted byte to float") in Cx16VirtualRegisters -> { - asmgen.out( - " lda #${byte.toHex()} | sta cx16.${ - target.register.toString().lowercase() - }") + asmgen.out(" lda #${byte.toHex()} | sta cx16.${target.register.toString().lowercase()}") if(asmgen.isTargetCpu(CpuType.CPU65c02)) asmgen.out(" stz cx16.${target.register.toString().lowercase()}+1\n") else - asmgen.out( - " lda #0 | sta cx16.${ - target.register.toString().lowercase() - }+1\n") + asmgen.out(" lda #0 | sta cx16.${target.register.toString().lowercase()}+1\n") } else -> throw AssemblyError("weird register") } diff --git a/codeGenIntermediate/src/prog8/codegen/intermediate/BuiltinFuncGen.kt b/codeGenIntermediate/src/prog8/codegen/intermediate/BuiltinFuncGen.kt index e974b9848..76bcdbc8d 100644 --- a/codeGenIntermediate/src/prog8/codegen/intermediate/BuiltinFuncGen.kt +++ b/codeGenIntermediate/src/prog8/codegen/intermediate/BuiltinFuncGen.kt @@ -581,66 +581,109 @@ internal class BuiltinFuncGen(private val codeGen: IRCodeGen, private val exprGe private fun funcSetLsbMsb(call: PtBuiltinFunctionCall, msb: Boolean): ExpressionCodeResult { val result = mutableListOf() val target = call.args[0] + val isConstZeroValue = call.args[1].asConstInteger()==0 when(target) { is PtIdentifier -> { - val valueTr = exprGen.translateExpression(call.args[1]) - addToResult(result, valueTr, valueTr.resultReg, -1) - result += IRCodeChunk(null, null).also { - val pointerReg = codeGen.registers.nextFree() - it += IRInstruction(Opcode.LOAD, IRDataType.WORD, reg1=pointerReg, labelSymbol = target.name) - if(msb) - it += IRInstruction(Opcode.INC, IRDataType.WORD, reg1=pointerReg) - it += IRInstruction(Opcode.STOREI, IRDataType.BYTE, reg1=valueTr.resultReg, reg2=pointerReg) - // TODO use STOREZI if the value is zero + if(isConstZeroValue) { + result += IRCodeChunk(null, null).also { + val pointerReg = codeGen.registers.nextFree() + it += IRInstruction(Opcode.LOAD, IRDataType.WORD, reg1 = pointerReg, labelSymbol = target.name) + if (msb) + it += IRInstruction(Opcode.INC, IRDataType.WORD, reg1 = pointerReg) + it += IRInstruction(Opcode.STOREZI, IRDataType.BYTE, reg1 = pointerReg) + } + } else { + val valueTr = exprGen.translateExpression(call.args[1]) + addToResult(result, valueTr, valueTr.resultReg, -1) + result += IRCodeChunk(null, null).also { + val pointerReg = codeGen.registers.nextFree() + it += IRInstruction(Opcode.LOAD, IRDataType.WORD, reg1 = pointerReg, labelSymbol = target.name) + if (msb) + it += IRInstruction(Opcode.INC, IRDataType.WORD, reg1 = pointerReg) + it += IRInstruction(Opcode.STOREI, IRDataType.BYTE, reg1 = valueTr.resultReg, reg2 = pointerReg) + } } } is PtArrayIndexer -> { require(!target.usesPointerVariable) if(target.splitWords) { // lsb/msb in split arrays, element index 'size' is always 1 - val varName = target.variable.name + if(msb) "_msb" else "_lsb" - val valueTr = exprGen.translateExpression(call.args[1]) - addToResult(result, valueTr, valueTr.resultReg, -1) val constIndex = target.index.asConstInteger() - if(constIndex!=null) { - val offsetReg = codeGen.registers.nextFree() - result += IRCodeChunk(null, null).also { - it += IRInstruction(Opcode.LOAD, IRDataType.BYTE, reg1=offsetReg, immediate = constIndex) - it += IRInstruction(Opcode.STOREX, IRDataType.BYTE, reg1=valueTr.resultReg, reg2=offsetReg, labelSymbol = varName) - // TODO: use STOREZX if the value is zero + val varName = target.variable.name + if(msb) "_msb" else "_lsb" + if(isConstZeroValue) { + if(constIndex!=null) { + val offsetReg = codeGen.registers.nextFree() + result += IRCodeChunk(null, null).also { + it += IRInstruction(Opcode.LOAD, IRDataType.BYTE, reg1=offsetReg, immediate = constIndex) + it += IRInstruction(Opcode.STOREZX, IRDataType.BYTE, reg1=offsetReg, labelSymbol = varName) + } + } else { + val indexTr = exprGen.translateExpression(target.index) + addToResult(result, indexTr, indexTr.resultReg, -1) + result += IRCodeChunk(null, null).also { + it += IRInstruction(Opcode.STOREZX, IRDataType.BYTE, reg1=indexTr.resultReg, labelSymbol = varName) + } } } else { - val indexTr = exprGen.translateExpression(target.index) - addToResult(result, indexTr, indexTr.resultReg, -1) - result += IRCodeChunk(null, null).also { - it += IRInstruction(Opcode.STOREX, IRDataType.BYTE, reg1=valueTr.resultReg, reg2=indexTr.resultReg, labelSymbol = varName) - // TODO: use STOREZX if the value is zero + val valueTr = exprGen.translateExpression(call.args[1]) + addToResult(result, valueTr, valueTr.resultReg, -1) + if(constIndex!=null) { + val offsetReg = codeGen.registers.nextFree() + result += IRCodeChunk(null, null).also { + it += IRInstruction(Opcode.LOAD, IRDataType.BYTE, reg1=offsetReg, immediate = constIndex) + it += IRInstruction(Opcode.STOREX, IRDataType.BYTE, reg1=valueTr.resultReg, reg2=offsetReg, labelSymbol = varName) + } + } else { + val indexTr = exprGen.translateExpression(target.index) + addToResult(result, indexTr, indexTr.resultReg, -1) + result += IRCodeChunk(null, null).also { + it += IRInstruction(Opcode.STOREX, IRDataType.BYTE, reg1=valueTr.resultReg, reg2=indexTr.resultReg, labelSymbol = varName) + } } } } else { - val valueTr = exprGen.translateExpression(call.args[1]) - addToResult(result, valueTr, valueTr.resultReg, -1) val eltSize = codeGen.program.memsizer.memorySize(target.type) val constIndex = target.index.asConstInteger() - if(constIndex!=null) { - val offsetReg = codeGen.registers.nextFree() - val offset = eltSize*constIndex + if(msb) 1 else 0 - result += IRCodeChunk(null, null).also { - it += IRInstruction(Opcode.LOAD, IRDataType.BYTE, reg1=offsetReg, immediate = offset) - it += IRInstruction(Opcode.STOREX, IRDataType.BYTE, reg1=valueTr.resultReg, reg2=offsetReg, labelSymbol = target.variable.name) - // TODO: use STOREZX if the value is zero + if(isConstZeroValue) { + if(constIndex!=null) { + val offsetReg = codeGen.registers.nextFree() + val offset = eltSize*constIndex + if(msb) 1 else 0 + result += IRCodeChunk(null, null).also { + it += IRInstruction(Opcode.LOAD, IRDataType.BYTE, reg1=offsetReg, immediate = offset) + it += IRInstruction(Opcode.STOREZX, IRDataType.BYTE, reg1=offsetReg, labelSymbol = target.variable.name) + } + } else { + val indexTr = exprGen.translateExpression(target.index) + addToResult(result, indexTr, indexTr.resultReg, -1) + result += IRCodeChunk(null, null).also { + if(eltSize>1) + it += codeGen.multiplyByConst(IRDataType.BYTE, indexTr.resultReg, eltSize) + if(msb) + it += IRInstruction(Opcode.INC, IRDataType.BYTE, reg1=indexTr.resultReg) + it += IRInstruction(Opcode.STOREZX, IRDataType.BYTE, reg1=indexTr.resultReg, labelSymbol = target.variable.name) + } } } else { - val indexTr = exprGen.translateExpression(target.index) - addToResult(result, indexTr, indexTr.resultReg, -1) - result += IRCodeChunk(null, null).also { - if(eltSize>1) - it += codeGen.multiplyByConst(IRDataType.BYTE, indexTr.resultReg, eltSize) - if(msb) - it += IRInstruction(Opcode.INC, IRDataType.BYTE, reg1=indexTr.resultReg) - it += IRInstruction(Opcode.STOREX, IRDataType.BYTE, reg1=valueTr.resultReg, reg2=indexTr.resultReg, labelSymbol = target.variable.name) - // TODO: use STOREZX if the value is zero + val valueTr = exprGen.translateExpression(call.args[1]) + addToResult(result, valueTr, valueTr.resultReg, -1) + if(constIndex!=null) { + val offsetReg = codeGen.registers.nextFree() + val offset = eltSize*constIndex + if(msb) 1 else 0 + result += IRCodeChunk(null, null).also { + it += IRInstruction(Opcode.LOAD, IRDataType.BYTE, reg1=offsetReg, immediate = offset) + it += IRInstruction(Opcode.STOREX, IRDataType.BYTE, reg1=valueTr.resultReg, reg2=offsetReg, labelSymbol = target.variable.name) + } + } else { + val indexTr = exprGen.translateExpression(target.index) + addToResult(result, indexTr, indexTr.resultReg, -1) + result += IRCodeChunk(null, null).also { + if(eltSize>1) + it += codeGen.multiplyByConst(IRDataType.BYTE, indexTr.resultReg, eltSize) + if(msb) + it += IRInstruction(Opcode.INC, IRDataType.BYTE, reg1=indexTr.resultReg) + it += IRInstruction(Opcode.STOREX, IRDataType.BYTE, reg1=valueTr.resultReg, reg2=indexTr.resultReg, labelSymbol = target.variable.name) + } } } } diff --git a/docs/source/todo.rst b/docs/source/todo.rst index 4c5bb6325..31389d9a2 100644 --- a/docs/source/todo.rst +++ b/docs/source/todo.rst @@ -2,6 +2,7 @@ TODO ==== - allow taking address of array variable (now gives parser error) +- ir: the @split arrays are currently also split in _lsb/_msb arrays in the IR, and operations take multiple (byte) instructions that may lead to verbose and slow operation and machine code generation down the line. - [on branch: shortcircuit] investigate McCarthy evaluation again? this may also reduce code size perhaps for things like if a>4 or a<2 .... - IR: reduce the number of branch instructions such as BEQ, BEQR, etc (gradually), replace with CMP(I) + status branch instruction @@ -38,7 +39,6 @@ Compiler: global initialization values are simply a list of LOAD instructions. Variables replaced include all subroutine parameters! So the only variables that remain as variables are arrays and strings. - ir: add more optimizations in IRPeepholeOptimizer -- ir: the @split arrays are currently also split in _lsb/_msb arrays in the IR, and operations take multiple (byte) instructions that may lead to verbose and slow operation and machine code generation down the line. - ir: for expressions with array indexes that occur multiple times, can we avoid loading them into new virtualregs everytime and just reuse a single virtualreg as indexer? (simple form of common subexpression elimination) - PtAst/IR: more complex common subexpression eliminations - [problematic due to using 64tass:] better support for building library programs, where unused .proc shouldn't be deleted from the assembly? diff --git a/examples/test.p8 b/examples/test.p8 index e3740c8c3..0e710841b 100644 --- a/examples/test.p8 +++ b/examples/test.p8 @@ -8,16 +8,16 @@ main { txt.nl() ;@(&zz) = $11 - setlsb(zz, $11) + setlsb(zz, 0) txt.print_uwhex(zz, true) txt.nl() ;@(&zz+1) = $22 - setmsb(zz, $22) + setmsb(zz, 0) txt.print_uwhex(zz, true) txt.nl() txt.nl() - uword[] array = [$1234,$5678,$abcd] ; TODO also with @split + uword[] @split array = [$1234,$5678,$abcd] ; TODO also with @split ubyte one = 1 ubyte two = 2 @@ -27,8 +27,8 @@ main { txt.nl() ;@(&array+one*2) = $ff ;@(&array+two*2+1) = $ff - setlsb(array[one],$ff) - setmsb(array[two],$00) + setlsb(array[one],0) + setmsb(array[two],0) txt.print_uwhex(array[1], true) txt.nl() txt.print_uwhex(array[2], true) diff --git a/syntax-files/NotepadPlusPlus/Prog8.xml b/syntax-files/NotepadPlusPlus/Prog8.xml index 3d4291195..db34f3c14 100644 --- a/syntax-files/NotepadPlusPlus/Prog8.xml +++ b/syntax-files/NotepadPlusPlus/Prog8.xml @@ -27,7 +27,7 @@ void const str byte ubyte bool word uword float zp shared split requirezp %address %asm %ir %asmbinary %asminclude %breakpoint %import %launcher %option %output %zeropage %zpreserved inline sub asmsub romsub clobbers asm if when else if_cc if_cs if_eq if_mi if_neg if_nz if_pl if_pos if_vc if_vs if_z for in step do while repeat unroll break return goto - abs all any callfar clamp cmp divmod len lsb lsl lsr memory mkword min max msb peek peekw poke pokew push pushw pop popw rsave rsavex rrestore rrestorex reverse rnd rndw rol rol2 ror ror2 sgn sizeof sort sqrtw swap + abs all any callfar clamp cmp divmod len lsb lsl lsr memory mkword min max msb peek peekw poke pokew push pushw pop popw rsave rsavex rrestore rrestorex reverse rnd rndw rol rol2 ror ror2 setlsb setmsb sgn sizeof sort sqrtw swap true false not and or xor as to downto |> diff --git a/syntax-files/Vim/prog8_builtins.vim b/syntax-files/Vim/prog8_builtins.vim index ebc9c33c9..6055ddf92 100644 --- a/syntax-files/Vim/prog8_builtins.vim +++ b/syntax-files/Vim/prog8_builtins.vim @@ -14,7 +14,7 @@ syn keyword prog8BuiltInFunc any all len reverse sort " Miscellaneous functions syn keyword prog8BuiltInFunc cmp divmod lsb msb mkword min max peek peekw poke pokew push pushw pop popw rsave rsavex rrestore rrestorex -syn keyword prog8BuiltInFunc rol rol2 ror ror2 sizeof +syn keyword prog8BuiltInFunc rol rol2 ror ror2 sizeof setlsb setmsb syn keyword prog8BuiltInFunc swap memory callfar clamp