diff --git a/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/lib/Transforms/Instrumentation/MemorySanitizer.cpp index 8cc0084db59..30652378bf7 100644 --- a/lib/Transforms/Instrumentation/MemorySanitizer.cpp +++ b/lib/Transforms/Instrumentation/MemorySanitizer.cpp @@ -710,13 +710,13 @@ struct MemorySanitizerVisitor : public InstVisitor { Type *ShadowTy = getShadowTy(&I); Value *Addr = I.getPointerOperand(); Value *ShadowPtr = getShadowPtr(Addr, ShadowTy, IRB); - setShadow(&I, IRB.CreateLoad(ShadowPtr, "_msld")); + setShadow(&I, IRB.CreateAlignedLoad(ShadowPtr, I.getAlignment(), "_msld")); if (ClCheckAccessAddress) insertCheck(I.getPointerOperand(), &I); if (ClTrackOrigins) - setOrigin(&I, IRB.CreateLoad(getOriginPtr(Addr, IRB))); + setOrigin(&I, IRB.CreateAlignedLoad(getOriginPtr(Addr, IRB), I.getAlignment())); } /// \brief Instrument StoreInst @@ -731,7 +731,7 @@ struct MemorySanitizerVisitor : public InstVisitor { Value *Shadow = getShadow(Val); Value *ShadowPtr = getShadowPtr(Addr, Shadow->getType(), IRB); - StoreInst *NewSI = IRB.CreateStore(Shadow, ShadowPtr); + StoreInst *NewSI = IRB.CreateAlignedStore(Shadow, ShadowPtr, I.getAlignment()); DEBUG(dbgs() << " STORE: " << *NewSI << "\n"); // If the store is volatile, add a check. if (I.isVolatile()) @@ -740,7 +740,7 @@ struct MemorySanitizerVisitor : public InstVisitor { insertCheck(Addr, &I); if (ClTrackOrigins) - IRB.CreateStore(getOrigin(Val), getOriginPtr(Addr, IRB)); + IRB.CreateAlignedStore(getOrigin(Val), getOriginPtr(Addr, IRB), I.getAlignment()); } // Casts. diff --git a/test/Instrumentation/MemorySanitizer/msan_basic.ll b/test/Instrumentation/MemorySanitizer/msan_basic.ll index 37d4abc0860..2346d75f8e9 100644 --- a/test/Instrumentation/MemorySanitizer/msan_basic.ll +++ b/test/Instrumentation/MemorySanitizer/msan_basic.ll @@ -233,3 +233,28 @@ entry: ; CHECK: udiv ; CHECK-NOT: icmp ; CHECK: } + + +; Check that loads from shadow have the same aligment as the original loads. + +define i32 @ShadowLoadAlignmentLarge() nounwind uwtable { + %y = alloca i32, align 64 + %1 = load volatile i32* %y, align 64 + ret i32 %1 +} + +; CHECK: define i32 @ShadowLoadAlignmentLarge +; CHECK: load i32* {{.*}} align 64 +; CHECK: load volatile i32* {{.*}} align 64 +; CHECK: } + +define i32 @ShadowLoadAlignmentSmall() nounwind uwtable { + %y = alloca i32, align 2 + %1 = load volatile i32* %y, align 2 + ret i32 %1 +} + +; CHECK: define i32 @ShadowLoadAlignmentSmall +; CHECK: load i32* {{.*}} align 2 +; CHECK: load volatile i32* {{.*}} align 2 +; CHECK: }