Allow MachineCSE to coalesce trivial subregister copies the same way

that it coalesces normal copies.

Without this, MachineCSE is powerless to handle redundant operations
with truncated source operands.

This required fixing the 2-addr pass to handle tied subregisters. It
isn't clear what combinations of subregisters can legally be tied, but
the simple case of truncated source operands is now safely handled:

     %vreg11<def> = COPY %vreg1:sub_32bit; GR32:%vreg11 GR64:%vreg1
     %vreg12<def> = COPY %vreg2:sub_32bit; GR32:%vreg12 GR64:%vreg2
     %vreg13<def,tied1> = ADD32rr %vreg11<tied0>, %vreg12<kill>, %EFLAGS<imp-def>

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@197414 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Andrew Trick
2013-12-16 19:36:21 +00:00
parent 86d28968fd
commit b961a26cec
4 changed files with 79 additions and 11 deletions

View File

@ -0,0 +1,42 @@
; RUN: llc < %s -mtriple=x86_64-darwin -mcpu=generic | FileCheck %s
; rdar:15661073 simple example of redundant adds
;
; MachineCSE should coalesce trivial subregister copies.
;
; The extra movl+addl should be removed during MachineCSE.
; CHECK-LABEL: redundantadd
; CHECK: cmpq
; CHECK: movq
; CHECK-NOT: movl
; CHECK: addl
; CHECK-NOT: addl
; CHECK: ret
define i64 @redundantadd(i64* %a0, i64* %a1) {
entry:
%tmp8 = load i64* %a0, align 8
%tmp12 = load i64* %a1, align 8
%tmp13 = icmp ult i64 %tmp12, -281474976710656
br i1 %tmp13, label %exit1, label %body
exit1:
unreachable
body:
%tmp14 = trunc i64 %tmp8 to i32
%tmp15 = trunc i64 %tmp12 to i32
%tmp16 = tail call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %tmp14, i32 %tmp15)
%tmp17 = extractvalue { i32, i1 } %tmp16, 1
br i1 %tmp17, label %exit2, label %return
exit2:
unreachable
return:
%tmp18 = add i64 %tmp12, %tmp8
%tmp19 = and i64 %tmp18, 4294967295
%tmp20 = or i64 %tmp19, -281474976710656
ret i64 %tmp20
}
declare { i32, i1 } @llvm.sadd.with.overflow.i32(i32, i32)