diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp index 87bb296b8c7..ec8f014f651 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp @@ -754,6 +754,11 @@ bool SelectionDAGISel::TryToFoldFastISelLoad(const LoadInst *LI, TheUser = TheUser->use_back(); } + // If we didn't find the fold instruction, then we failed to collapse the + // sequence. + if (TheUser != FoldInst) + return false; + // Don't try to fold volatile loads. Target has to deal with alignment // constraints. if (LI->isVolatile()) return false; diff --git a/test/CodeGen/X86/fast-isel-gep.ll b/test/CodeGen/X86/fast-isel-gep.ll index 1a2e34ec7f5..d141487c25f 100644 --- a/test/CodeGen/X86/fast-isel-gep.ll +++ b/test/CodeGen/X86/fast-isel-gep.ll @@ -107,3 +107,30 @@ lpad: ; preds = %if.end19, %if.then1 unreachable } declare i8* @_ZNK18G__FastAllocString4dataEv() nounwind + + +; PR10605 / rdar://9930964 - Don't fold loads incorrectly. The load should +; happen before the store. +define i32 @test7({i32,i32,i32}* %tmp1, i32 %tmp71, i32 %tmp63) nounwind { +; X64: test7: +; X64: movl 8({{%rdi|%rcx}}), %eax +; X64 movl $4, 8({{%rdi|%rcx}}) + + + %tmp29 = getelementptr inbounds {i32,i32,i32}* %tmp1, i32 0, i32 2 + %tmp30 = load i32* %tmp29, align 4 + + %p2 = getelementptr inbounds {i32,i32,i32}* %tmp1, i32 0, i32 2 + store i32 4, i32* %p2 + + %tmp72 = or i32 %tmp71, %tmp30 + %tmp73 = icmp ne i32 %tmp63, 32 + br i1 %tmp73, label %T, label %F + +T: + ret i32 %tmp72 + +F: + ret i32 4 +} +