From ccf53013cdc56b2c4ca9dd24688f71d29b007630 Mon Sep 17 00:00:00 2001 From: Juergen Ributzka Date: Wed, 27 Aug 2014 21:38:33 +0000 Subject: [PATCH] [FastISel][AArch64] Fix simplify address when the address comes from a shift. When the address comes directly from a shift instruction then the address computation cannot be folded into the memory instruction, because the zero register is not available as a base register. Simplify addess needs to emit the shift instruction and use the result as base register. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@216621 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/AArch64/AArch64FastISel.cpp | 4 ++++ .../AArch64/fast-isel-addressing-modes.ll | 21 +++++++++++++++++++ 2 files changed, 25 insertions(+) diff --git a/lib/Target/AArch64/AArch64FastISel.cpp b/lib/Target/AArch64/AArch64FastISel.cpp index a6e7daa0d9b..2faa1398064 100644 --- a/lib/Target/AArch64/AArch64FastISel.cpp +++ b/lib/Target/AArch64/AArch64FastISel.cpp @@ -708,6 +708,10 @@ bool AArch64FastISel::SimplifyAddress(Address &Addr, MVT VT) { Addr.getOffsetReg()) RegisterOffsetNeedsLowering = true; + // Cannot encode zero register as base. + if (Addr.isRegBase() && Addr.getOffsetReg() && !Addr.getReg()) + RegisterOffsetNeedsLowering = true; + // If this is a stack pointer and the offset needs to be simplified then put // the alloca address into a register, set the base type back to register and // continue. This should almost never happen. diff --git a/test/CodeGen/AArch64/fast-isel-addressing-modes.ll b/test/CodeGen/AArch64/fast-isel-addressing-modes.ll index 86ba400cff2..750e081d423 100644 --- a/test/CodeGen/AArch64/fast-isel-addressing-modes.ll +++ b/test/CodeGen/AArch64/fast-isel-addressing-modes.ll @@ -107,6 +107,16 @@ define void @store_breg_f64(double* %a) { ret void } +; Load Immediate +define i32 @load_immoff_1() { +; CHECK-LABEL: load_immoff_1 +; CHECK: orr {{w|x}}[[REG:[0-9]+]], {{wzr|xzr}}, #0x80 +; CHECK: ldr {{w[0-9]+}}, {{\[}}x[[REG]]{{\]}} + %1 = inttoptr i64 128 to i32* + %2 = load i32* %1 + ret i32 %2 +} + ; Load / Store Base Register + Immediate Offset ; Max supported negative offset define i32 @load_breg_immoff_1(i64 %a) { @@ -318,6 +328,17 @@ define i64 @load_breg_offreg_immoff_2(i64 %a, i64 %b) { ret i64 %4 } +; Load Scaled Register Offset +define i32 @load_shift_offreg_1(i64 %a) { +; CHECK-LABEL: load_shift_offreg_1 +; CHECK: lsl [[REG:x[0-9]+]], x0, #2 +; CHECK: ldr {{w[0-9]+}}, {{\[}}[[REG]]{{\]}} + %1 = shl i64 %a, 2 + %2 = inttoptr i64 %1 to i32* + %3 = load i32* %2 + ret i32 %3 +} + ; Load Base Register + Scaled Register Offset define i32 @load_breg_shift_offreg_1(i64 %a, i64 %b) { ; CHECK-LABEL: load_breg_shift_offreg_1