[msan] Handle X86 SIMD bitshift intrinsics.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@202712 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Evgeniy Stepanov
2014-03-03 13:47:42 +00:00
parent afa6150f04
commit 8b77d958fd
2 changed files with 219 additions and 0 deletions
@@ -1827,6 +1827,48 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
}
}
// Given a scalar or vector, extract lower 64 bits (or less), and return all
// zeroes if it is zero, and all ones otherwise.
Value *Lower64ShadowExtend(IRBuilder<> &IRB, Value *S, Type *T) {
if (S->getType()->isVectorTy())
S = CreateShadowCast(IRB, S, IRB.getInt64Ty(), /* Signed */ true);
assert(S->getType()->getPrimitiveSizeInBits() <= 64);
Value *S2 = IRB.CreateICmpNE(S, getCleanShadow(S));
return CreateShadowCast(IRB, S2, T, /* Signed */ true);
}
Value *VariableShadowExtend(IRBuilder<> &IRB, Value *S) {
Type *T = S->getType();
assert(T->isVectorTy());
Value *S2 = IRB.CreateICmpNE(S, getCleanShadow(S));
return IRB.CreateSExt(S2, T);
}
// \brief Instrument vector shift instrinsic.
//
// This function instruments intrinsics like int_x86_avx2_psll_w.
// Intrinsic shifts %In by %ShiftSize bits.
// %ShiftSize may be a vector. In that case the lower 64 bits determine shift
// size, and the rest is ignored. Behavior is defined even if shift size is
// greater than register (or field) width.
void handleVectorShiftIntrinsic(IntrinsicInst &I, bool Variable) {
assert(I.getNumArgOperands() == 2);
IRBuilder<> IRB(&I);
// If any of the S2 bits are poisoned, the whole thing is poisoned.
// Otherwise perform the same shift on S1.
Value *S1 = getShadow(&I, 0);
Value *S2 = getShadow(&I, 1);
Value *S2Conv = Variable ? VariableShadowExtend(IRB, S2)
: Lower64ShadowExtend(IRB, S2, getShadowTy(&I));
Value *V1 = I.getOperand(0);
Value *V2 = I.getOperand(1);
Value *Shift = IRB.CreateCall2(I.getCalledValue(),
IRB.CreateBitCast(S1, V1->getType()), V2);
Shift = IRB.CreateBitCast(Shift, getShadowTy(&I));
setShadow(&I, IRB.CreateOr(Shift, S2Conv));
setOriginForNaryOp(I);
}
void visitIntrinsicInst(IntrinsicInst &I) {
switch (I.getIntrinsicID()) {
case llvm::Intrinsic::bswap:
@@ -1866,6 +1908,83 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
case llvm::Intrinsic::x86_sse_cvttps2pi:
handleVectorConvertIntrinsic(I, 2);
break;
case llvm::Intrinsic::x86_avx512_psll_dq:
case llvm::Intrinsic::x86_avx512_psrl_dq:
case llvm::Intrinsic::x86_avx2_psll_w:
case llvm::Intrinsic::x86_avx2_psll_d:
case llvm::Intrinsic::x86_avx2_psll_q:
case llvm::Intrinsic::x86_avx2_pslli_w:
case llvm::Intrinsic::x86_avx2_pslli_d:
case llvm::Intrinsic::x86_avx2_pslli_q:
case llvm::Intrinsic::x86_avx2_psll_dq:
case llvm::Intrinsic::x86_avx2_psrl_w:
case llvm::Intrinsic::x86_avx2_psrl_d:
case llvm::Intrinsic::x86_avx2_psrl_q:
case llvm::Intrinsic::x86_avx2_psra_w:
case llvm::Intrinsic::x86_avx2_psra_d:
case llvm::Intrinsic::x86_avx2_psrli_w:
case llvm::Intrinsic::x86_avx2_psrli_d:
case llvm::Intrinsic::x86_avx2_psrli_q:
case llvm::Intrinsic::x86_avx2_psrai_w:
case llvm::Intrinsic::x86_avx2_psrai_d:
case llvm::Intrinsic::x86_avx2_psrl_dq:
case llvm::Intrinsic::x86_sse2_psll_w:
case llvm::Intrinsic::x86_sse2_psll_d:
case llvm::Intrinsic::x86_sse2_psll_q:
case llvm::Intrinsic::x86_sse2_pslli_w:
case llvm::Intrinsic::x86_sse2_pslli_d:
case llvm::Intrinsic::x86_sse2_pslli_q:
case llvm::Intrinsic::x86_sse2_psll_dq:
case llvm::Intrinsic::x86_sse2_psrl_w:
case llvm::Intrinsic::x86_sse2_psrl_d:
case llvm::Intrinsic::x86_sse2_psrl_q:
case llvm::Intrinsic::x86_sse2_psra_w:
case llvm::Intrinsic::x86_sse2_psra_d:
case llvm::Intrinsic::x86_sse2_psrli_w:
case llvm::Intrinsic::x86_sse2_psrli_d:
case llvm::Intrinsic::x86_sse2_psrli_q:
case llvm::Intrinsic::x86_sse2_psrai_w:
case llvm::Intrinsic::x86_sse2_psrai_d:
case llvm::Intrinsic::x86_sse2_psrl_dq:
case llvm::Intrinsic::x86_mmx_psll_w:
case llvm::Intrinsic::x86_mmx_psll_d:
case llvm::Intrinsic::x86_mmx_psll_q:
case llvm::Intrinsic::x86_mmx_pslli_w:
case llvm::Intrinsic::x86_mmx_pslli_d:
case llvm::Intrinsic::x86_mmx_pslli_q:
case llvm::Intrinsic::x86_mmx_psrl_w:
case llvm::Intrinsic::x86_mmx_psrl_d:
case llvm::Intrinsic::x86_mmx_psrl_q:
case llvm::Intrinsic::x86_mmx_psra_w:
case llvm::Intrinsic::x86_mmx_psra_d:
case llvm::Intrinsic::x86_mmx_psrli_w:
case llvm::Intrinsic::x86_mmx_psrli_d:
case llvm::Intrinsic::x86_mmx_psrli_q:
case llvm::Intrinsic::x86_mmx_psrai_w:
case llvm::Intrinsic::x86_mmx_psrai_d:
handleVectorShiftIntrinsic(I, /* Variable */ false);
break;
case llvm::Intrinsic::x86_avx2_psllv_d:
case llvm::Intrinsic::x86_avx2_psllv_d_256:
case llvm::Intrinsic::x86_avx2_psllv_q:
case llvm::Intrinsic::x86_avx2_psllv_q_256:
case llvm::Intrinsic::x86_avx2_psrlv_d:
case llvm::Intrinsic::x86_avx2_psrlv_d_256:
case llvm::Intrinsic::x86_avx2_psrlv_q:
case llvm::Intrinsic::x86_avx2_psrlv_q_256:
case llvm::Intrinsic::x86_avx2_psrav_d:
case llvm::Intrinsic::x86_avx2_psrav_d_256:
handleVectorShiftIntrinsic(I, /* Variable */ true);
break;
// Byte shifts are not implemented.
// case llvm::Intrinsic::x86_avx512_psll_dq_bs:
// case llvm::Intrinsic::x86_avx512_psrl_dq_bs:
// case llvm::Intrinsic::x86_avx2_psll_dq_bs:
// case llvm::Intrinsic::x86_avx2_psrl_dq_bs:
// case llvm::Intrinsic::x86_sse2_psll_dq_bs:
// case llvm::Intrinsic::x86_sse2_psrl_dq_bs:
default:
if (!handleUnknownIntrinsic(I))
visitInstruction(I);