If a GEP index simply advances by multiples of a type of zero size,

then replace the index with zero.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@119974 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Duncan Sands
2010-11-22 16:32:50 +00:00
parent d87e571e62
commit a63395a30f
3 changed files with 35 additions and 16 deletions

View File

@ -714,7 +714,7 @@ Value *llvm::SimplifyGEPInst(Value *const *Ops, unsigned NumOps,
// getelementptr P, N -> P if P points to a type of zero size. // getelementptr P, N -> P if P points to a type of zero size.
if (TD) { if (TD) {
const Type *Ty = PtrTy->getElementType(); const Type *Ty = PtrTy->getElementType();
if (Ty->isSized() && !TD->getTypeAllocSize(Ty)) if (Ty->isSized() && TD->getTypeAllocSize(Ty) == 0)
return Ops[0]; return Ops[0];
} }
} }

View File

@ -523,26 +523,36 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
Value *PtrOp = GEP.getOperand(0); Value *PtrOp = GEP.getOperand(0);
// Eliminate unneeded casts for indices. // Eliminate unneeded casts for indices, and replace indices which displace
// by multiples of a zero size type with zero.
if (TD) { if (TD) {
bool MadeChange = false; bool MadeChange = false;
unsigned PtrSize = TD->getPointerSizeInBits(); const Type *IntPtrTy = TD->getIntPtrType(GEP.getContext());
gep_type_iterator GTI = gep_type_begin(GEP); gep_type_iterator GTI = gep_type_begin(GEP);
for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end(); for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end();
I != E; ++I, ++GTI) { I != E; ++I, ++GTI) {
if (!isa<SequentialType>(*GTI)) continue; // Skip indices into struct types.
const SequentialType *SeqTy = dyn_cast<SequentialType>(*GTI);
if (!SeqTy) continue;
// If we are using a wider index than needed for this platform, shrink it // If the element type has zero size then any index over it is equivalent
// to what we need. If narrower, sign-extend it to what we need. This // to an index of zero, so replace it with zero if it is not zero already.
// explicit cast can make subsequent optimizations more obvious. if (SeqTy->getElementType()->isSized() &&
unsigned OpBits = cast<IntegerType>((*I)->getType())->getBitWidth(); TD->getTypeAllocSize(SeqTy->getElementType()) == 0)
if (OpBits == PtrSize) if (!isa<Constant>(*I) || !cast<Constant>(*I)->isNullValue()) {
continue; *I = Constant::getNullValue(IntPtrTy);
*I = Builder->CreateIntCast(*I, TD->getIntPtrType(GEP.getContext()),true);
MadeChange = true; MadeChange = true;
} }
if ((*I)->getType() != IntPtrTy) {
// If we are using a wider index than needed for this platform, shrink
// it to what we need. If narrower, sign-extend it to what we need.
// This explicit cast can make subsequent optimizations more obvious.
*I = Builder->CreateIntCast(*I, IntPtrTy, true);
MadeChange = true;
}
}
if (MadeChange) return &GEP; if (MadeChange) return &GEP;
} }

View File

@ -1,8 +1,17 @@
; RUN: opt < %s -instcombine -S | not grep getelementptr ; RUN: opt < %s -instcombine -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128" target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
define {}* @foo({}* %x, i32 %n) { define {}* @foo({}* %x, i32 %n) {
; CHECK: @foo
; CHECK-NOT: getelementptr
%p = getelementptr {}* %x, i32 %n %p = getelementptr {}* %x, i32 %n
ret {}* %p ret {}* %p
} }
define i8* @bar(i64 %n, {{}, [0 x {[0 x i8]}]}* %p) {
; CHECK: @bar
%g = getelementptr {{}, [0 x {[0 x i8]}]}* %p, i64 %n, i32 1, i64 %n, i32 0, i64 %n
; CHECK: %p, i64 0, i32 1, i64 0, i32 0, i64 %n
ret i8* %g
}