diff --git a/lib/Target/X86/X86FastISel.cpp b/lib/Target/X86/X86FastISel.cpp index 6fa928462b2..88744861e86 100644 --- a/lib/Target/X86/X86FastISel.cpp +++ b/lib/Target/X86/X86FastISel.cpp @@ -399,33 +399,39 @@ bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) { Disp += SL->getElementOffset(Idx); } else { uint64_t S = TD.getTypeAllocSize(GTI.getIndexedType()); - SmallVector Worklist; - Worklist.push_back(Op); - do { - Op = Worklist.pop_back_val(); + for (;;) { if (const ConstantInt *CI = dyn_cast(Op)) { // Constant-offset addressing. Disp += CI->getSExtValue() * S; - } else if (isa(Op) && - isa(cast(Op)->getOperand(1))) { - // An add with a constant operand. Fold the constant. + break; + } + if (isa(Op) && + (!isa(Op) || + FuncInfo.MBBMap[cast(Op)->getParent()] + == FuncInfo.MBB) && + isa(cast(Op)->getOperand(1))) { + // An add (in the same block) with a constant operand. Fold the + // constant. ConstantInt *CI = cast(cast(Op)->getOperand(1)); Disp += CI->getSExtValue() * S; - // Add the other operand back to the work list. - Worklist.push_back(cast(Op)->getOperand(0)); - } else if (IndexReg == 0 && - (!AM.GV || !Subtarget->isPICStyleRIPRel()) && - (S == 1 || S == 2 || S == 4 || S == 8)) { + // Iterate on the other operand. + Op = cast(Op)->getOperand(0); + continue; + } + if (IndexReg == 0 && + (!AM.GV || !Subtarget->isPICStyleRIPRel()) && + (S == 1 || S == 2 || S == 4 || S == 8)) { // Scaled-index addressing. Scale = S; IndexReg = getRegForGEPIndex(Op).first; if (IndexReg == 0) return false; - } else - // Unsupported. - goto unsupported_gep; - } while (!Worklist.empty()); + break; + } + // Unsupported. + goto unsupported_gep; + } } } // Check for displacement overflow. diff --git a/test/CodeGen/X86/fast-isel-gep.ll b/test/CodeGen/X86/fast-isel-gep.ll index fbe0243716b..48abfd0f26e 100644 --- a/test/CodeGen/X86/fast-isel-gep.ll +++ b/test/CodeGen/X86/fast-isel-gep.ll @@ -87,4 +87,23 @@ define i64 @test5(i8* %A, i32 %I, i64 %B) nounwind { ; X64-NEXT: ret } +; PR9500, rdar://9156159 - Don't do non-local address mode folding, +; because it may require values which wouldn't otherwise be live out +; of their blocks. +define void @test6() { +if.end: ; preds = %if.then, %invoke.cont + %tmp15 = load i64* undef + %dec = add i64 %tmp15, 13 + store i64 %dec, i64* undef + %call17 = invoke i8* @_ZNK18G__FastAllocString4dataEv() + to label %invoke.cont16 unwind label %lpad +invoke.cont16: ; preds = %if.then14 + %arrayidx18 = getelementptr inbounds i8* %call17, i64 %dec + store i8 0, i8* %arrayidx18 + unreachable + +lpad: ; preds = %if.end19, %if.then14, %if.end, %entry + unreachable +} +declare i8* @_ZNK18G__FastAllocString4dataEv() nounwind