ARM64: add support for AArch64's addsub_ext.ll

There was one definite issue in ARM64 (the off-by-1 check for whether
a shift could be folded in) and one difference that is probably
correct: ARM64 didn't fold nodes with multiple uses into the
arithmetic operations unless optimising for code size.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@206168 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Tim Northover 2014-04-14 12:50:50 +00:00
parent 41b47904ba
commit 3c68c5c55e
2 changed files with 5 additions and 4 deletions

View File

@ -532,7 +532,7 @@ bool ARM64DAGToDAGISel::SelectArithExtendedRegister(SDValue N, SDValue &Reg,
if (!CSD)
return false;
ShiftVal = CSD->getZExtValue();
if ((ShiftVal & 0x3) != ShiftVal)
if (ShiftVal > 4)
return false;
Ext = getExtendTypeForNode(N.getOperand(0));

View File

@ -1,11 +1,12 @@
; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
; RUN: llc -verify-machineinstrs %s -o - -mtriple=arm64 | FileCheck %s
@var8 = global i8 0
@var16 = global i16 0
@var32 = global i32 0
@var64 = global i64 0
define void @addsub_i8rhs() {
define void @addsub_i8rhs() minsize {
; CHECK-LABEL: addsub_i8rhs:
%val8_tmp = load i8* @var8
%lhs32 = load i32* @var32
@ -80,7 +81,7 @@ end:
ret void
}
define void @addsub_i16rhs() {
define void @addsub_i16rhs() minsize {
; CHECK-LABEL: addsub_i16rhs:
%val16_tmp = load i16* @var16
%lhs32 = load i32* @var32
@ -158,7 +159,7 @@ end:
; N.b. we could probably check more here ("add w2, w3, w1, uxtw" for
; example), but the remaining instructions are probably not idiomatic
; in the face of "add/sub (shifted register)" so I don't intend to.
define void @addsub_i32rhs() {
define void @addsub_i32rhs() minsize {
; CHECK-LABEL: addsub_i32rhs:
%val32_tmp = load i32* @var32
%lhs64 = load i64* @var64