From e7fc0360ad29a42cdbfb0121ba62b3636bc83c2f Mon Sep 17 00:00:00 2001 From: Irmen de Jong Date: Sat, 27 Sep 2025 23:29:10 +0200 Subject: [PATCH] long bitwise operator expressions --- .../cpu6502/assignment/AssignmentAsmGen.kt | 14 ++ docs/source/todo.rst | 5 +- examples/test.p8 | 154 +++--------------- 3 files changed, 37 insertions(+), 136 deletions(-) diff --git a/codeGenCpu6502/src/prog8/codegen/cpu6502/assignment/AssignmentAsmGen.kt b/codeGenCpu6502/src/prog8/codegen/cpu6502/assignment/AssignmentAsmGen.kt index 4855f3cc4..14393411c 100644 --- a/codeGenCpu6502/src/prog8/codegen/cpu6502/assignment/AssignmentAsmGen.kt +++ b/codeGenCpu6502/src/prog8/codegen/cpu6502/assignment/AssignmentAsmGen.kt @@ -1927,6 +1927,20 @@ internal class AssignmentAsmGen( assignRegisterpairWord(target, RegisterOrPair.AY) return true } + else if (expr.left.type.isLong && expr.right.type.isLong) { + // TODO optimize for when the left operand is const values or variables + asmgen.assignExpressionToRegister(expr.left, RegisterOrPair.R2R3_32, expr.left.type.isSigned) + val constval = expr.right.asConstInteger() + val varname = (expr.right as? PtIdentifier)?.name + if(constval!=null) + augmentableAsmGen.inplacemodificationLongWithLiteralval("cx16.r2", expr.operator, constval) + else if(varname!=null) + augmentableAsmGen.inplacemodificationLongWithVariable("cx16.r2", expr.operator, varname) + else + augmentableAsmGen.inplacemodificationLongWithExpression("cx16.r2", expr.operator, expr.right) + assignRegisterLong(target, RegisterOrPair.R2R3_32) + return true + } return false } diff --git a/docs/source/todo.rst b/docs/source/todo.rst index f6ca6ed40..66496035a 100644 --- a/docs/source/todo.rst +++ b/docs/source/todo.rst @@ -1,9 +1,6 @@ TODO ==== -implement the bitwise & | ^ operations as expressions on longs (all types args) - - LONG TYPE --------- - call convention: return long -> return it in R0+R1.... because AY is only 16 bits... @@ -176,7 +173,7 @@ Optimizations - more optimized operator handling of different types, for example uword a ^ byte b now does a type cast of b to word first - optimize longEqualsValue() for const and variable operands to not assign needlessly to R0-R3. -- optimize inplacemodificationLongWithLiteralval() for more shift values such as 8, 16, 24 etc but take sign bit into account! +- optimize optimizedBitwiseExpr() for const and variable operands to not assign needlessly to R0-R3. - optimize inplacemodificationLongWithLiteralval() for more shift values such as 8, 16, 24 etc but take sign bit into account! - Port benchmarks from https://thred.github.io/c-bench-64/ to prog8 and see how it stacks up. - Since fixing the missing zp-var initialization, programs grew in size again because STZ's reappeared. Can we add more intelligent (and correct!) optimizations to remove those STZs that might be redundant again? diff --git a/examples/test.p8 b/examples/test.p8 index 4ad625d01..6698b0c73 100644 --- a/examples/test.p8 +++ b/examples/test.p8 @@ -5,150 +5,40 @@ main { sub start() { long @shared lv1, lv2 - bool b1,b2 lv1 = $11223344 - lv2 = $33883388 + lv2 = $22ffff22 - txt.print_bool(lv1==0) + txt.print_ulhex(lv1 | $8080, true) txt.spc() - txt.print_bool(lv1==1) + txt.print_ulhex(lv1 & $f0f0, true) txt.spc() - txt.print_bool(lv1==$11000000) + txt.print_ulhex(lv1 ^ $8f8f, true) + txt.nl() + + cx16.r6 = $8080 + cx16.r7 = $f0f0 + cx16.r8 = $8f8f + + txt.print_ulhex(lv1 | cx16.r6, true) txt.spc() - txt.print_bool(lv1==$11223344) - txt.nl() - txt.print_bool(lv1==lv2) + txt.print_ulhex(lv1 & cx16.r7, true) txt.spc() - lv2 = lv1 - txt.print_bool(lv1==lv2) + txt.print_ulhex(lv1 ^ cx16.r8, true) txt.nl() - - lv2 = $33883388 - txt.print_bool(lv1!=0) - txt.spc() - txt.print_bool(lv1!=1) - txt.spc() - txt.print_bool(lv1!=$11000000) - txt.spc() - txt.print_bool(lv1!=$11223344) - txt.nl() - txt.print_bool(lv1!=lv2) - txt.spc() - lv2 = lv1 - txt.print_bool(lv1!=lv2) - txt.nl() - - txt.print_ulhex(lv1, false) - txt.nl() - lv1 = ~lv1 - txt.print_ulhex(lv1, false) - txt.nl() - - lv1 = 999999 - txt.print_l(lv1<<3) - txt.nl() - cx16.r4sL = -55 - lv1 = 999999 - txt.print_l(lv1+cx16.r4sL) - txt.nl() - cx16.r4s = -5555 - lv1 = 999999 - txt.print_l(lv1+cx16.r4s) - txt.nl() - - lv2 <<= cx16.r0L - lv2 >>= cx16.r0L - - lv1 <<= 3 - lv2 <<= cx16.r0L - lv1 >>= 3 - lv2 >>= cx16.r0L - - lv1 += cx16.r0L - lv1 += cx16.r0 - - lv1 |= cx16.r0L - lv1 |= cx16.r0 + lv1 = $11223344 + lv2 = $22ffff22 lv1 |= lv2 - lv1 &= cx16.r0L - lv1 &= cx16.r0 + txt.print_ulhex(lv1, true) + txt.spc() + lv1 = $11223344 lv1 &= lv2 - lv1 ^= cx16.r0L - lv1 ^= cx16.r0 + txt.print_ulhex(lv1, true) + txt.spc() + lv1 = $11223344 lv1 ^= lv2 - - b1 = lv1 == cx16.r0L - b1 = lv2 == cx16.r0sL - b2 = lv1 == cx16.r0 - b1 = lv2 == cx16.r0s - b2 = lv1 == lv2 - - b1 = lv1 != cx16.r0L - b2 = lv2 != cx16.r0sL - b1 = lv1 != cx16.r0 - b2 = lv2 != cx16.r0s - b1 = lv1 != lv2 - - txt.print_l(mklong2($a000,$bbbb)) - txt.spc() - txt.print_ulhex(mklong2($a000,$bbbb), true) - txt.spc() - txt.print_l(mklong(9,8,7,6)) - txt.spc() - txt.print_ulhex(mklong(9,8,7,6), true) + txt.print_ulhex(lv1, true) txt.nl() - cx16.r8 = $a000 - cx16.r9 = $bbbb - cx16.r2L = 9 - cx16.r3L = 8 - cx16.r4L = 7 - cx16.r5L = 6 - txt.print_l(mklong2(cx16.r8, cx16.r9)) - txt.spc() - txt.print_ulhex(mklong2(cx16.r8, cx16.r9), true) - txt.spc() - txt.print_l(mklong(cx16.r2L,cx16.r3L,cx16.r4L,cx16.r5L)) - txt.spc() - txt.print_ulhex(mklong(cx16.r2L,cx16.r3L,cx16.r4L,cx16.r5L), true) - txt.nl() - - long @shared lv = 111111111 - lv2 = 1000000 - word @shared ww = 1000 - byte @shared bb = 1 - long @shared result = lv + lv2 - txt.print_l(result) - txt.spc() - result = lv + ww - txt.print_l(result) - txt.spc() - result = lv + bb - txt.print_l(result) - txt.nl() - txt.print_l(lv) - txt.spc() - txt.print_l(lv + 1) - txt.spc() - txt.print_l(lv + 1000) - txt.spc() - txt.print_l(lv + 1000000) - txt.nl() - - long[] array = [-1999888777, -999, 42, 0, 77, 123456, 999999999] - long xx - for xx in array { - txt.print_uw(msw(xx)) - txt.spc() - txt.print_uw(lsw(xx)) - txt.nl() - } - txt.nl() - array[2] = 0 - array[3] = 222222222 - array[4] = lv1 - array[5]++ - array[6]-- } }