mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-01-26 23:32:58 +00:00
Teach the ARM peephole optimizer that RSB, RSC, ADC, and SBC can be used for folded comparisons, just like ADD and SUB.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@129038 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
ef7fb17936
commit
df298c9ea6
@ -1618,10 +1618,17 @@ OptimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, int CmpMask,
|
||||
// Set the "zero" bit in CPSR.
|
||||
switch (MI->getOpcode()) {
|
||||
default: break;
|
||||
case ARM::RSBri:
|
||||
case ARM::RSCri:
|
||||
case ARM::ADDri:
|
||||
case ARM::ADCri:
|
||||
case ARM::SUBri:
|
||||
case ARM::SBCri:
|
||||
case ARM::t2RSBri:
|
||||
case ARM::t2ADDri:
|
||||
case ARM::t2SUBri: {
|
||||
case ARM::t2ADCri:
|
||||
case ARM::t2SUBri:
|
||||
case ARM::t2SBCri: {
|
||||
// Scan forward for the use of CPSR, if it's a conditional code requires
|
||||
// checking of V bit, then this is not safe to do. If we can't find the
|
||||
// CPSR use (i.e. used in another block), then it's not safe to perform
|
||||
|
@ -27,3 +27,12 @@ define i64 @f3(i64 %a) {
|
||||
ret i64 %tmp
|
||||
}
|
||||
|
||||
define i32 @f4(i32 %x) {
|
||||
entry:
|
||||
; CHECK: f4
|
||||
; CHECK: rsbs
|
||||
%sub = sub i32 1, %x
|
||||
%cmp = icmp ugt i32 %sub, 0
|
||||
%sel = select i1 %cmp, i32 1, i32 %sub
|
||||
ret i32 %sel
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user