mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-07-29 10:25:12 +00:00
Masked Vector Load and Store Intrinsics.
Introduced new target-independent intrinsics in order to support masked vector loads and stores. The loop vectorizer optimizes loops containing conditional memory accesses by generating these intrinsics for existing targets AVX2 and AVX-512. The vectorizer asks the target about availability of masked vector loads and stores. Added SDNodes for masked operations and lowering patterns for X86 code generator. Examples: <16 x i32> @llvm.masked.load.v16i32(i8* %addr, <16 x i32> %passthru, i32 4 /* align */, <16 x i1> %mask) declare void @llvm.masked.store.v8f64(i8* %addr, <8 x double> %value, i32 4, <8 x i1> %mask) Scalarizer for other targets (not AVX2/AVX-512) will be done in a separate patch. http://reviews.llvm.org/D6191 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@222632 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
@@ -2405,6 +2405,19 @@ bool Verifier::VerifyIntrinsicType(Type *Ty,
|
||||
!isa<VectorType>(ArgTys[D.getArgumentNumber()]) ||
|
||||
VectorType::getHalfElementsVectorType(
|
||||
cast<VectorType>(ArgTys[D.getArgumentNumber()])) != Ty;
|
||||
case IITDescriptor::SameVecWidthArgument: {
|
||||
if (D.getArgumentNumber() >= ArgTys.size())
|
||||
return true;
|
||||
VectorType * ReferenceType =
|
||||
dyn_cast<VectorType>(ArgTys[D.getArgumentNumber()]);
|
||||
VectorType *ThisArgType = dyn_cast<VectorType>(Ty);
|
||||
if (!ThisArgType || !ReferenceType ||
|
||||
(ReferenceType->getVectorNumElements() !=
|
||||
ThisArgType->getVectorNumElements()))
|
||||
return true;
|
||||
return VerifyIntrinsicType(ThisArgType->getVectorElementType(),
|
||||
Infos, ArgTys);
|
||||
}
|
||||
}
|
||||
llvm_unreachable("unhandled");
|
||||
}
|
||||
|
Reference in New Issue
Block a user