mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-01-22 10:33:23 +00:00
79590b8edf
%shr = lshr i64 %key, 3 %0 = load i64* %val, align 8 %sub = add i64 %0, -1 %and = and i64 %sub, %shr ret i64 %and to: %shr = lshr i64 %key, 3 %0 = load i64* %val, align 8 %sub = add i64 %0, 2305843009213693951 %and = and i64 %sub, %shr ret i64 %and The demanded bit optimization is actually a pessimization because add -1 would be codegen'ed as a sub 1. Teach the demanded constant shrinking optimization to check for negated constant to make sure it is actually reducing the width of the constant. rdar://11793464 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@160101 91177308-0d34-0410-b5e6-96231b3b80d8
19 lines
538 B
LLVM
19 lines
538 B
LLVM
; RUN: opt < %s -instcombine -S | FileCheck %s
|
|
|
|
; When shrinking demanded constant operand of an add instruction, keep in
|
|
; mind the opcode can be changed to sub and the constant negated. Make sure
|
|
; the shrinking the constant would actually reduce the width.
|
|
; rdar://11793464
|
|
|
|
define i64 @t(i64 %key, i64* %val) nounwind {
|
|
entry:
|
|
; CHECK: @t
|
|
; CHECK-NOT: add i64 %0, 2305843009213693951
|
|
; CHECK: add i64 %0, -1
|
|
%shr = lshr i64 %key, 3
|
|
%0 = load i64* %val, align 8
|
|
%sub = sub i64 %0, 1
|
|
%and = and i64 %sub, %shr
|
|
ret i64 %and
|
|
}
|