AArch64/ARM64: implement BFI optimisation

ARM64 was not producing pure BFI instructions for bitfield insertion
operations, unlike AArch64. The approach had to be a little different (in
ISelDAGToDAG rather than ISelLowering), and the outcomes aren't identical but
hopefully this gives it similar power.

This should address PR19424.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@207102 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Tim Northover
2014-04-24 12:11:53 +00:00
parent fe6f4e4d31
commit b62ba5eca0
3 changed files with 159 additions and 60 deletions

View File

@@ -1,4 +1,5 @@
; RUN: llc -mtriple=aarch64-none-linux-gnu < %s | FileCheck %s
; RUN: llc -mtriple=aarch64-none-linux-gnu < %s | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-AARCH64
; RUN: llc -mtriple=arm64-none-linux-gnu < %s | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-ARM64
; First, a simple example from Clang. The registers could plausibly be
; different, but probably won't be.
@@ -7,8 +8,10 @@
define [1 x i64] @from_clang([1 x i64] %f.coerce, i32 %n) nounwind readnone {
; CHECK-LABEL: from_clang:
; CHECK: bfi w0, w1, #3, #4
; CHECK-NEXT: ret
; CHECK-AARCH64: bfi w0, w1, #3, #4
; CHECK-ARCH64-NEXT: ret
; CHECK-ARM64: bfm {{w[0-9]+}}, {{w[0-9]+}}, #29, #3
entry:
%f.coerce.fca.0.extract = extractvalue [1 x i64] %f.coerce, 0
@@ -26,7 +29,9 @@ entry:
define void @test_whole32(i32* %existing, i32* %new) {
; CHECK-LABEL: test_whole32:
; CHECK: bfi {{w[0-9]+}}, {{w[0-9]+}}, #26, #5
; CHECK-AARCH64: bfi {{w[0-9]+}}, {{w[0-9]+}}, #26, #5
; CHECK-ARM64: bfm {{w[0-9]+}}, {{w[0-9]+}}, #6, #4
%oldval = load volatile i32* %existing
%oldval_keep = and i32 %oldval, 2214592511 ; =0x83ffffff
@@ -43,7 +48,8 @@ define void @test_whole32(i32* %existing, i32* %new) {
define void @test_whole64(i64* %existing, i64* %new) {
; CHECK-LABEL: test_whole64:
; CHECK: bfi {{x[0-9]+}}, {{x[0-9]+}}, #26, #14
; CHECK-AARCH64: bfi {{x[0-9]+}}, {{x[0-9]+}}, #26, #14
; CHECK-ARM64: bfm {{x[0-9]+}}, {{x[0-9]+}}, #38, #13
; CHECK-NOT: and
; CHECK: ret
@@ -62,8 +68,11 @@ define void @test_whole64(i64* %existing, i64* %new) {
define void @test_whole32_from64(i64* %existing, i64* %new) {
; CHECK-LABEL: test_whole32_from64:
; CHECK: bfi {{w[0-9]+}}, {{w[0-9]+}}, #{{0|16}}, #16
; CHECK-NOT: and
; CHECK-AARCH64: bfi {{w[0-9]+}}, {{w[0-9]+}}, #{{0|16}}, #16
; CHECK-AARCH64-NOT: and
; CHECK-ARM64: bfm {{x[0-9]+}}, {{x[0-9]+}}, #0, #15
; CHECK: ret
%oldval = load volatile i64* %existing
@@ -80,8 +89,12 @@ define void @test_whole32_from64(i64* %existing, i64* %new) {
define void @test_32bit_masked(i32 *%existing, i32 *%new) {
; CHECK-LABEL: test_32bit_masked:
; CHECK: bfi [[INSERT:w[0-9]+]], {{w[0-9]+}}, #3, #4
; CHECK: and {{w[0-9]+}}, [[INSERT]], #0xff
; CHECK-AARCH64: bfi [[INSERT:w[0-9]+]], {{w[0-9]+}}, #3, #4
; CHECK-AARCH64: and {{w[0-9]+}}, [[INSERT]], #0xff
; CHECK-ARM64: and
; CHECK-ARM64: bfm {{w[0-9]+}}, {{w[0-9]+}}, #29, #3
%oldval = load volatile i32* %existing
%oldval_keep = and i32 %oldval, 135 ; = 0x87
@@ -98,8 +111,11 @@ define void @test_32bit_masked(i32 *%existing, i32 *%new) {
define void @test_64bit_masked(i64 *%existing, i64 *%new) {
; CHECK-LABEL: test_64bit_masked:
; CHECK: bfi [[INSERT:x[0-9]+]], {{x[0-9]+}}, #40, #8
; CHECK: and {{x[0-9]+}}, [[INSERT]], #0xffff00000000
; CHECK-AARCH64: bfi [[INSERT:x[0-9]+]], {{x[0-9]+}}, #40, #8
; CHECK-AARCH64: and {{x[0-9]+}}, [[INSERT]], #0xffff00000000
; CHECK-ARM64: and
; CHECK-ARM64: bfm {{x[0-9]+}}, {{x[0-9]+}}, #24, #7
%oldval = load volatile i64* %existing
%oldval_keep = and i64 %oldval, 1095216660480 ; = 0xff_0000_0000
@@ -117,8 +133,12 @@ define void @test_64bit_masked(i64 *%existing, i64 *%new) {
; Mask is too complicated for literal ANDwwi, make sure other avenues are tried.
define void @test_32bit_complexmask(i32 *%existing, i32 *%new) {
; CHECK-LABEL: test_32bit_complexmask:
; CHECK: bfi {{w[0-9]+}}, {{w[0-9]+}}, #3, #4
; CHECK: and {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
; CHECK-AARCH64: bfi {{w[0-9]+}}, {{w[0-9]+}}, #3, #4
; CHECK-AARCH64: and {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
; CHECK-ARM64: and
; CHECK-ARM64: bfm {{w[0-9]+}}, {{w[0-9]+}}, #29, #3
%oldval = load volatile i32* %existing
%oldval_keep = and i32 %oldval, 647 ; = 0x287
@@ -137,6 +157,7 @@ define void @test_32bit_complexmask(i32 *%existing, i32 *%new) {
define void @test_32bit_badmask(i32 *%existing, i32 *%new) {
; CHECK-LABEL: test_32bit_badmask:
; CHECK-NOT: bfi
; CHECK-NOT: bfm
; CHECK: ret
%oldval = load volatile i32* %existing
@@ -156,6 +177,7 @@ define void @test_32bit_badmask(i32 *%existing, i32 *%new) {
define void @test_64bit_badmask(i64 *%existing, i64 *%new) {
; CHECK-LABEL: test_64bit_badmask:
; CHECK-NOT: bfi
; CHECK-NOT: bfm
; CHECK: ret
%oldval = load volatile i64* %existing
@@ -186,8 +208,8 @@ define void @test_32bit_with_shr(i32* %existing, i32* %new) {
%combined = or i32 %oldval_keep, %newval_masked
store volatile i32 %combined, i32* %existing
; CHECK: lsr [[BIT:w[0-9]+]], {{w[0-9]+}}, #14
; CHECK: bfi {{w[0-9]}}, [[BIT]], #26, #5
; CHECK-AARCH64: bfi {{w[0-9]+}}, [[BIT]], #26, #5
; CHECK-ARM64: bfm {{w[0-9]+}}, [[BIT]], #6, #4
ret void
}