- Assign load / store with shifter op address modes the right itinerary classes.

- For now, loads of [r, r] addressing mode is the same as the
  [r, r lsl/lsr/asr #] variants. ARMBaseInstrInfo::getOperandLatency() should
  identify the former case and reduce the output latency by 1.
- Also identify [r, r << 2] case. This special form of shifter addressing mode
  is "free".


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@117519 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Evan Cheng
2010-10-28 01:49:06 +00:00
parent 7c88cdcc3b
commit 0104d9de04
11 changed files with 93 additions and 31 deletions
+1 -1
View File
@@ -24,4 +24,4 @@ declare float @fabsf(float)
; CORTEXA8: test:
; CORTEXA8: vabs.f32 d1, d1
; CORTEXA9: test:
; CORTEXA9: vabs.f32 s0, s0
; CORTEXA9: vabs.f32 s1, s1
+1 -1
View File
@@ -20,4 +20,4 @@ entry:
; CORTEXA8: test:
; CORTEXA8: vadd.f32 d0, d1, d0
; CORTEXA9: test:
; CORTEXA9: vadd.f32 s0, s0, s1
; CORTEXA9: vadd.f32 s0, s1, s0
+1 -1
View File
@@ -20,4 +20,4 @@ entry:
; CORTEXA8: test:
; CORTEXA8: vdiv.f32 s0, s1, s0
; CORTEXA9: test:
; CORTEXA9: vdiv.f32 s0, s0, s1
; CORTEXA9: vdiv.f32 s0, s1, s0
+1 -1
View File
@@ -21,4 +21,4 @@ entry:
; CORTEXA8: test:
; CORTEXA8: vmul.f32 d0, d1, d0
; CORTEXA9: test:
; CORTEXA9: vmla.f32 s0, s1, s2
; CORTEXA9: vmla.f32 s2, s1, s0
+2 -2
View File
@@ -19,6 +19,6 @@ entry:
; NFP0: vnmls.f32 s2, s1, s0
; CORTEXA8: test:
; CORTEXA8: vnmls.f32 s1, s2, s0
; CORTEXA8: vnmls.f32 s2, s1, s0
; CORTEXA9: test:
; CORTEXA9: vnmls.f32 s0, s1, s2
; CORTEXA9: vnmls.f32 s2, s1, s0
+1 -1
View File
@@ -20,4 +20,4 @@ entry:
; CORTEXA8: test:
; CORTEXA8: vmul.f32 d0, d1, d0
; CORTEXA9: test:
; CORTEXA9: vmul.f32 s0, s0, s1
; CORTEXA9: vmul.f32 s0, s1, s0
+1 -1
View File
@@ -36,8 +36,8 @@ entry:
; lsl #2 is free
; A9: test3:
; A9: ldr r1, [r1, r2, lsl #2]
; A9: ldr r0, [r0, r2, lsl #2]
; A9: ldr r1, [r1, r2, lsl #2]
%tmp1 = shl i32 %offset, 2
%tmp2 = add i32 %base, %tmp1
%tmp3 = inttoptr i32 %tmp2 to i32*