mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-11-02 07:11:49 +00:00
Avoid illegal integer promotion in fastisel
Stop folding constant adds into GEP when the type size doesn't match. Otherwise, the adds' operands are effectively being promoted, changing the conditions of an overflow. Results are different when: sext(a) + sext(b) != sext(a + b) Problem originally found on x86-64, but also fixed issues with ARM and PPC, which used similar code. <rdar://problem/15292280> Patch by Duncan Exon Smith! git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@194840 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
5cddda6d13
commit
cc7052343e
@ -358,6 +358,15 @@ protected:
|
||||
return 0;
|
||||
}
|
||||
|
||||
/// \brief Check if \c Add is an add that can be safely folded into \c GEP.
|
||||
///
|
||||
/// \c Add can be folded into \c GEP if:
|
||||
/// - \c Add is an add,
|
||||
/// - \c Add's size matches \c GEP's,
|
||||
/// - \c Add is in the same basic block as \c GEP, and
|
||||
/// - \c Add has a constant operand.
|
||||
bool canFoldAddIntoGEP(const User *GEP, const Value *Add);
|
||||
|
||||
private:
|
||||
bool SelectBinaryOp(const User *I, unsigned ISDOpcode);
|
||||
|
||||
|
@ -1571,4 +1571,19 @@ bool FastISel::tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst) {
|
||||
return tryToFoldLoadIntoMI(User, RI.getOperandNo(), LI);
|
||||
}
|
||||
|
||||
bool FastISel::canFoldAddIntoGEP(const User *GEP, const Value *Add) {
|
||||
// Must be an add.
|
||||
if (!isa<AddOperator>(Add))
|
||||
return false;
|
||||
// Type size needs to match.
|
||||
if (TD.getTypeSizeInBits(GEP->getType()) !=
|
||||
TD.getTypeSizeInBits(Add->getType()))
|
||||
return false;
|
||||
// Must be in the same basic block.
|
||||
if (isa<Instruction>(Add) &&
|
||||
FuncInfo.MBBMap[cast<Instruction>(Add)->getParent()] != FuncInfo.MBB)
|
||||
return false;
|
||||
// Must have a constant operand.
|
||||
return isa<ConstantInt>(cast<AddOperator>(Add)->getOperand(1));
|
||||
}
|
||||
|
||||
|
@ -900,13 +900,8 @@ bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) {
|
||||
TmpOffset += CI->getSExtValue() * S;
|
||||
break;
|
||||
}
|
||||
if (isa<AddOperator>(Op) &&
|
||||
(!isa<Instruction>(Op) ||
|
||||
FuncInfo.MBBMap[cast<Instruction>(Op)->getParent()]
|
||||
== FuncInfo.MBB) &&
|
||||
isa<ConstantInt>(cast<AddOperator>(Op)->getOperand(1))) {
|
||||
// An add (in the same block) with a constant operand. Fold the
|
||||
// constant.
|
||||
if (canFoldAddIntoGEP(U, Op)) {
|
||||
// A compatible add with a constant operand. Fold the constant.
|
||||
ConstantInt *CI =
|
||||
cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1));
|
||||
TmpOffset += CI->getSExtValue() * S;
|
||||
|
@ -336,13 +336,8 @@ bool PPCFastISel::PPCComputeAddress(const Value *Obj, Address &Addr) {
|
||||
TmpOffset += CI->getSExtValue() * S;
|
||||
break;
|
||||
}
|
||||
if (isa<AddOperator>(Op) &&
|
||||
(!isa<Instruction>(Op) ||
|
||||
FuncInfo.MBBMap[cast<Instruction>(Op)->getParent()]
|
||||
== FuncInfo.MBB) &&
|
||||
isa<ConstantInt>(cast<AddOperator>(Op)->getOperand(1))) {
|
||||
// An add (in the same block) with a constant operand. Fold the
|
||||
// constant.
|
||||
if (canFoldAddIntoGEP(U, Op)) {
|
||||
// A compatible add with a constant operand. Fold the constant.
|
||||
ConstantInt *CI =
|
||||
cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1));
|
||||
TmpOffset += CI->getSExtValue() * S;
|
||||
|
@ -561,13 +561,8 @@ redo_gep:
|
||||
Disp += CI->getSExtValue() * S;
|
||||
break;
|
||||
}
|
||||
if (isa<AddOperator>(Op) &&
|
||||
(!isa<Instruction>(Op) ||
|
||||
FuncInfo.MBBMap[cast<Instruction>(Op)->getParent()]
|
||||
== FuncInfo.MBB) &&
|
||||
isa<ConstantInt>(cast<AddOperator>(Op)->getOperand(1))) {
|
||||
// An add (in the same block) with a constant operand. Fold the
|
||||
// constant.
|
||||
if (canFoldAddIntoGEP(U, Op)) {
|
||||
// A compatible add with a constant operand. Fold the constant.
|
||||
ConstantInt *CI =
|
||||
cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1));
|
||||
Disp += CI->getSExtValue() * S;
|
||||
|
18
test/CodeGen/ARM/fastisel-gep-promote-before-add.ll
Normal file
18
test/CodeGen/ARM/fastisel-gep-promote-before-add.ll
Normal file
@ -0,0 +1,18 @@
|
||||
; fastisel should not fold add with non-pointer bitwidth
|
||||
; sext(a) + sext(b) != sext(a + b)
|
||||
; RUN: llc -mtriple=armv7-apple-ios %s -O0 -o - | FileCheck %s
|
||||
|
||||
define zeroext i8 @gep_promotion(i8* %ptr) nounwind uwtable ssp {
|
||||
entry:
|
||||
%ptr.addr = alloca i8*, align 8
|
||||
%add = add i8 64, 64 ; 0x40 + 0x40
|
||||
%0 = load i8** %ptr.addr, align 8
|
||||
|
||||
; CHECK-LABEL: _gep_promotion:
|
||||
; CHECK: ldrb {{r[0-9]+}}, {{\[r[0-9]+\]}}
|
||||
%arrayidx = getelementptr inbounds i8* %0, i8 %add
|
||||
|
||||
%1 = load i8* %arrayidx, align 1
|
||||
ret i8 %1
|
||||
}
|
||||
|
17
test/CodeGen/PowerPC/fastisel-gep-promote-before-add.ll
Normal file
17
test/CodeGen/PowerPC/fastisel-gep-promote-before-add.ll
Normal file
@ -0,0 +1,17 @@
|
||||
; fastisel should not fold add with non-pointer bitwidth
|
||||
; sext(a) + sext(b) != sext(a + b)
|
||||
; RUN: llc -mtriple=powerpc64-unknown-freebsd10.0 %s -O0 -o - | FileCheck %s
|
||||
|
||||
define zeroext i8 @gep_promotion(i8* %ptr) nounwind uwtable ssp {
|
||||
entry:
|
||||
%ptr.addr = alloca i8*, align 8
|
||||
%add = add i8 64, 64 ; 0x40 + 0x40
|
||||
%0 = load i8** %ptr.addr, align 8
|
||||
|
||||
; CHECK-LABEL: gep_promotion:
|
||||
; CHECK: lbz {{[0-9]+}}, 0({{.*}})
|
||||
%arrayidx = getelementptr inbounds i8* %0, i8 %add
|
||||
|
||||
%1 = load i8* %arrayidx, align 1
|
||||
ret i8 %1
|
||||
}
|
37
test/CodeGen/X86/fastisel-gep-promote-before-add.ll
Normal file
37
test/CodeGen/X86/fastisel-gep-promote-before-add.ll
Normal file
@ -0,0 +1,37 @@
|
||||
; fastisel should not fold add with non-pointer bitwidth
|
||||
; sext(a) + sext(b) != sext(a + b)
|
||||
; RUN: llc -mtriple=x86_64-apple-darwin %s -O0 -o - | FileCheck %s
|
||||
|
||||
define zeroext i8 @gep_promotion(i8* %ptr) nounwind uwtable ssp {
|
||||
entry:
|
||||
%ptr.addr = alloca i8*, align 8
|
||||
%add = add i8 64, 64 ; 0x40 + 0x40
|
||||
%0 = load i8** %ptr.addr, align 8
|
||||
|
||||
; CHECK-LABEL: _gep_promotion:
|
||||
; CHECK: movzbl ({{.*}})
|
||||
%arrayidx = getelementptr inbounds i8* %0, i8 %add
|
||||
|
||||
%1 = load i8* %arrayidx, align 1
|
||||
ret i8 %1
|
||||
}
|
||||
|
||||
define zeroext i8 @gep_promotion_nonconst(i8 %i, i8* %ptr) nounwind uwtable ssp {
|
||||
entry:
|
||||
%i.addr = alloca i8, align 4
|
||||
%ptr.addr = alloca i8*, align 8
|
||||
store i8 %i, i8* %i.addr, align 4
|
||||
store i8* %ptr, i8** %ptr.addr, align 8
|
||||
%0 = load i8* %i.addr, align 4
|
||||
; CHECK-LABEL: _gep_promotion_nonconst:
|
||||
; CHECK: movzbl ({{.*}})
|
||||
%xor = xor i8 %0, -128 ; %0 ^ 0x80
|
||||
%add = add i8 %xor, -127 ; %xor + 0x81
|
||||
%1 = load i8** %ptr.addr, align 8
|
||||
|
||||
%arrayidx = getelementptr inbounds i8* %1, i8 %add
|
||||
|
||||
%2 = load i8* %arrayidx, align 1
|
||||
ret i8 %2
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user