diff --git a/lib/Analysis/InstructionSimplify.cpp b/lib/Analysis/InstructionSimplify.cpp index e617ff2980d..4b6320bb46a 100644 --- a/lib/Analysis/InstructionSimplify.cpp +++ b/lib/Analysis/InstructionSimplify.cpp @@ -714,7 +714,7 @@ Value *llvm::SimplifyGEPInst(Value *const *Ops, unsigned NumOps, // getelementptr P, N -> P if P points to a type of zero size. if (TD) { const Type *Ty = PtrTy->getElementType(); - if (Ty->isSized() && !TD->getTypeAllocSize(Ty)) + if (Ty->isSized() && TD->getTypeAllocSize(Ty) == 0) return Ops[0]; } } diff --git a/lib/Transforms/InstCombine/InstructionCombining.cpp b/lib/Transforms/InstCombine/InstructionCombining.cpp index 89d5bfbb6fc..cfc418256aa 100644 --- a/lib/Transforms/InstCombine/InstructionCombining.cpp +++ b/lib/Transforms/InstCombine/InstructionCombining.cpp @@ -523,25 +523,35 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { Value *PtrOp = GEP.getOperand(0); - // Eliminate unneeded casts for indices. + // Eliminate unneeded casts for indices, and replace indices which displace + // by multiples of a zero size type with zero. if (TD) { bool MadeChange = false; - unsigned PtrSize = TD->getPointerSizeInBits(); - + const Type *IntPtrTy = TD->getIntPtrType(GEP.getContext()); + gep_type_iterator GTI = gep_type_begin(GEP); for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end(); I != E; ++I, ++GTI) { - if (!isa(*GTI)) continue; - - // If we are using a wider index than needed for this platform, shrink it - // to what we need. If narrower, sign-extend it to what we need. This - // explicit cast can make subsequent optimizations more obvious. - unsigned OpBits = cast((*I)->getType())->getBitWidth(); - if (OpBits == PtrSize) - continue; - - *I = Builder->CreateIntCast(*I, TD->getIntPtrType(GEP.getContext()),true); - MadeChange = true; + // Skip indices into struct types. + const SequentialType *SeqTy = dyn_cast(*GTI); + if (!SeqTy) continue; + + // If the element type has zero size then any index over it is equivalent + // to an index of zero, so replace it with zero if it is not zero already. + if (SeqTy->getElementType()->isSized() && + TD->getTypeAllocSize(SeqTy->getElementType()) == 0) + if (!isa(*I) || !cast(*I)->isNullValue()) { + *I = Constant::getNullValue(IntPtrTy); + MadeChange = true; + } + + if ((*I)->getType() != IntPtrTy) { + // If we are using a wider index than needed for this platform, shrink + // it to what we need. If narrower, sign-extend it to what we need. + // This explicit cast can make subsequent optimizations more obvious. + *I = Builder->CreateIntCast(*I, IntPtrTy, true); + MadeChange = true; + } } if (MadeChange) return &GEP; } diff --git a/test/Transforms/InstCombine/2010-11-21-SizeZeroTypeGEP.ll b/test/Transforms/InstCombine/2010-11-21-SizeZeroTypeGEP.ll index 24da5bb3df2..720365c4d6b 100644 --- a/test/Transforms/InstCombine/2010-11-21-SizeZeroTypeGEP.ll +++ b/test/Transforms/InstCombine/2010-11-21-SizeZeroTypeGEP.ll @@ -1,8 +1,17 @@ -; RUN: opt < %s -instcombine -S | not grep getelementptr +; RUN: opt < %s -instcombine -S | FileCheck %s target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128" define {}* @foo({}* %x, i32 %n) { +; CHECK: @foo +; CHECK-NOT: getelementptr %p = getelementptr {}* %x, i32 %n ret {}* %p } + +define i8* @bar(i64 %n, {{}, [0 x {[0 x i8]}]}* %p) { +; CHECK: @bar + %g = getelementptr {{}, [0 x {[0 x i8]}]}* %p, i64 %n, i32 1, i64 %n, i32 0, i64 %n +; CHECK: %p, i64 0, i32 1, i64 0, i32 0, i64 %n + ret i8* %g +}