mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-02-19 06:31:18 +00:00
[Reassociate] Canonicalize the operands of all binary operators.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@222008 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
e968287996
commit
5c76b3d03e
@ -2078,19 +2078,19 @@ void Reassociate::OptimizeInst(Instruction *I) {
|
||||
if (Instruction *Res = canonicalizeNegConstExpr(I))
|
||||
I = Res;
|
||||
|
||||
// Commute floating point binary operators, to canonicalize the order of their
|
||||
// operands. This can potentially expose more CSE opportunities, and makes
|
||||
// writing other transformations simpler.
|
||||
if (I->getType()->isFloatingPointTy() || I->getType()->isVectorTy()) {
|
||||
// Commute binary operators, to canonicalize the order of their operands.
|
||||
// This can potentially expose more CSE opportunities, and makes writing other
|
||||
// transformations simpler.
|
||||
if (I->isCommutative())
|
||||
canonicalizeOperands(I);
|
||||
|
||||
if (I->isCommutative())
|
||||
canonicalizeOperands(I);
|
||||
// Don't optimize vector instructions.
|
||||
if (I->getType()->isVectorTy())
|
||||
return;
|
||||
|
||||
// Don't try to optimize vector instructions or anything that doesn't have
|
||||
// unsafe algebra.
|
||||
if (I->getType()->isVectorTy() || !I->hasUnsafeAlgebra())
|
||||
return;
|
||||
}
|
||||
// Don't optimize floating point instructions that don't have unsafe algebra.
|
||||
if (I->getType()->isFloatingPointTy() && !I->hasUnsafeAlgebra())
|
||||
return;
|
||||
|
||||
// Do not reassociate boolean (i1) expressions. We want to preserve the
|
||||
// original order of evaluation for short-circuited comparisons that
|
||||
|
19
test/Transforms/Reassociate/commute.ll
Normal file
19
test/Transforms/Reassociate/commute.ll
Normal file
@ -0,0 +1,19 @@
|
||||
; RUN: opt -reassociate -S < %s | FileCheck %s
|
||||
|
||||
declare void @use(i32)
|
||||
|
||||
define void @test1(i32 %x, i32 %y) {
|
||||
; CHECK-LABEL: test1
|
||||
; CHECK: mul i32 %y, %x
|
||||
; CHECK: mul i32 %y, %x
|
||||
; CHECK: sub i32 %1, %2
|
||||
; CHECK: call void @use(i32 %{{.*}})
|
||||
; CHECK: call void @use(i32 %{{.*}})
|
||||
|
||||
%1 = mul i32 %x, %y
|
||||
%2 = mul i32 %y, %x
|
||||
%3 = sub i32 %1, %2
|
||||
call void @use(i32 %1)
|
||||
call void @use(i32 %3)
|
||||
ret void
|
||||
}
|
@ -9,7 +9,7 @@ define i64 @multistep1(i64 %a, i64 %b, i64 %c) {
|
||||
%t3 = mul i64 %a, %t2 ; a*(a*c)
|
||||
%t4 = add i64 %t1, %t3
|
||||
; CHECK-NEXT: add i64 %c, %b
|
||||
; CHECK-NEXT: mul i64 %tmp{{.*}}, %a
|
||||
; CHECK-NEXT: mul i64 %a, %tmp{{.*}}
|
||||
; CHECK-NEXT: mul i64 %tmp{{.*}}, %a
|
||||
; CHECK-NEXT: ret
|
||||
ret i64 %t4
|
||||
|
Loading…
x
Reference in New Issue
Block a user