llvm-6502/test/Transforms/PhaseOrdering/basic.ll
Jakob Stoklund Olesen 72847f3057 Reapply r155136 after fixing PR12599.
Original commit message:

Defer some shl transforms to DAGCombine.

The shl instruction is used to represent multiplication by a constant
power of two as well as bitwise left shifts. Some InstCombine
transformations would turn an shl instruction into a bit mask operation,
making it difficult for later analysis passes to recognize the
constsnt multiplication.

Disable those shl transformations, deferring them to DAGCombine time.
An 'shl X, C' instruction is now treated mostly the same was as 'mul X, C'.

These transformations are deferred:

  (X >>? C) << C   --> X & (-1 << C)  (When X >> C has multiple uses)
  (X >>? C1) << C2 --> X << (C2-C1) & (-1 << C2)   (When C2 > C1)
  (X >>? C1) << C2 --> X >>? (C1-C2) & (-1 << C2)  (When C1 > C2)

The corresponding exact transformations are preserved, just like
div-exact + mul:

  (X >>?,exact C) << C   --> X
  (X >>?,exact C1) << C2 --> X << (C2-C1)
  (X >>?,exact C1) << C2 --> X >>?,exact (C1-C2)

The disabled transformations could also prevent the instruction selector
from recognizing rotate patterns in hash functions and cryptographic
primitives. I have a test case for that, but it is too fragile.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@155362 91177308-0d34-0410-b5e6-96231b3b80d8
2012-04-23 17:39:52 +00:00

52 lines
1.5 KiB
LLVM

; RUN: opt -O3 -S %s | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
target triple = "x86_64-apple-macosx10.6.7"
declare i8* @malloc(i64)
declare void @free(i8*)
; PR2338
define void @test1() nounwind ssp {
%retval = alloca i32, align 4
%i = alloca i8*, align 8
%call = call i8* @malloc(i64 1)
store i8* %call, i8** %i, align 8
%tmp = load i8** %i, align 8
store i8 1, i8* %tmp
%tmp1 = load i8** %i, align 8
call void @free(i8* %tmp1)
ret void
; CHECK: @test1
; CHECK-NEXT: ret void
}
; This function exposes a phase ordering problem when InstCombine is
; turning %add into a bitmask, making it difficult to spot a 0 return value.
;
; It it also important that %add is expressed as a multiple of %div so scalar
; evolution can recognize it.
define i32 @test2(i32 %a, i32* %p) nounwind uwtable ssp {
entry:
%div = udiv i32 %a, 4
%arrayidx = getelementptr inbounds i32* %p, i64 0
store i32 %div, i32* %arrayidx, align 4
%add = add i32 %div, %div
%arrayidx1 = getelementptr inbounds i32* %p, i64 1
store i32 %add, i32* %arrayidx1, align 4
%arrayidx2 = getelementptr inbounds i32* %p, i64 1
%0 = load i32* %arrayidx2, align 4
%arrayidx3 = getelementptr inbounds i32* %p, i64 0
%1 = load i32* %arrayidx3, align 4
%mul = mul i32 2, %1
%sub = sub i32 %0, %mul
ret i32 %sub
; CHECK: @test2
; CHECK: %div = lshr i32 %a, 2
; CHECK: %add = shl nuw nsw i32 %div, 1
; CHECK: ret i32 0
}