Remove a lot of the fancy scalar replacement code for dealing with llvm-gcc's

lowering of NEON code. It provides little-to-no benefit now and only introduces
additional complexity.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@141646 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Cameron Zwarich 2011-10-11 06:10:30 +00:00
parent 3606f75f35
commit 446d95224b
2 changed files with 16 additions and 372 deletions

View File

@ -298,8 +298,6 @@ AllocaInst *ConvertToScalarInfo::TryConvert(AllocaInst *AI) {
if (ScalarKind == Unknown)
ScalarKind = Integer;
// FIXME: It should be possible to promote the vector type up to the alloca's
// size.
if (ScalarKind == Vector && VectorTy->getBitWidth() != AllocaSize * 8)
ScalarKind = Integer;
@ -334,16 +332,12 @@ AllocaInst *ConvertToScalarInfo::TryConvert(AllocaInst *AI) {
/// (VectorTy) so far at the offset specified by Offset (which is specified in
/// bytes).
///
/// There are three cases we handle here:
/// There are two cases we handle here:
/// 1) A union of vector types of the same size and potentially its elements.
/// Here we turn element accesses into insert/extract element operations.
/// This promotes a <4 x float> with a store of float to the third element
/// into a <4 x float> that uses insert element.
/// 2) A union of vector types with power-of-2 size differences, e.g. a float,
/// <2 x float> and <4 x float>. Here we turn element accesses into insert
/// and extract element operations, and <2 x float> accesses into a cast to
/// <2 x double>, an extract, and a cast back to <2 x float>.
/// 3) A fully general blob of memory, which we turn into some (potentially
/// 2) A fully general blob of memory, which we turn into some (potentially
/// large) integer type with extract and insert operations where the loads
/// and stores would mutate the memory. We mark this by setting VectorTy
/// to VoidTy.
@ -374,20 +368,13 @@ void ConvertToScalarInfo::MergeInTypeForLoadOrStore(Type *In,
// if the implied vector agrees with what we already have and if Offset is
// compatible with it.
if (Offset % EltSize == 0 && AllocaSize % EltSize == 0 &&
(!VectorTy || Offset * 8 < VectorTy->getPrimitiveSizeInBits())) {
(!VectorTy || EltSize == VectorTy->getElementType()
->getPrimitiveSizeInBits()/8)) {
if (!VectorTy) {
ScalarKind = ImplicitVector;
VectorTy = VectorType::get(In, AllocaSize/EltSize);
return;
}
unsigned CurrentEltSize = VectorTy->getElementType()
->getPrimitiveSizeInBits()/8;
if (EltSize == CurrentEltSize)
return;
if (In->isIntegerTy() && isPowerOf2_32(AllocaSize / EltSize))
return;
return;
}
}
@ -400,78 +387,19 @@ void ConvertToScalarInfo::MergeInTypeForLoadOrStore(Type *In,
/// returning true if the type was successfully merged and false otherwise.
bool ConvertToScalarInfo::MergeInVectorType(VectorType *VInTy,
uint64_t Offset) {
// TODO: Support nonzero offsets?
if (Offset != 0)
return false;
// Only allow vectors that are a power-of-2 away from the size of the alloca.
if (!isPowerOf2_64(AllocaSize / (VInTy->getBitWidth() / 8)))
return false;
// If this the first vector we see, remember the type so that we know the
// element size.
if (!VectorTy) {
ScalarKind = Vector;
VectorTy = VInTy;
return true;
}
unsigned BitWidth = VectorTy->getBitWidth();
unsigned InBitWidth = VInTy->getBitWidth();
// Vectors of the same size can be converted using a simple bitcast.
if (InBitWidth == BitWidth && AllocaSize == (InBitWidth / 8)) {
if (VInTy->getBitWidth()/8 == AllocaSize && Offset == 0) {
// If we're storing/loading a vector of the right size, allow it as a
// vector. If this the first vector we see, remember the type so that
// we know the element size. If this is a subsequent access, ignore it
// even if it is a differing type but the same size. Worst case we can
// bitcast the resultant vectors.
if (!VectorTy)
VectorTy = VInTy;
ScalarKind = Vector;
return true;
}
Type *ElementTy = VectorTy->getElementType();
Type *InElementTy = VInTy->getElementType();
// If they're the same alloc size, we'll be attempting to convert between
// them with a vector shuffle, which requires the element types to match.
if (TD.getTypeAllocSize(VectorTy) == TD.getTypeAllocSize(VInTy) &&
ElementTy != InElementTy)
return false;
// Do not allow mixed integer and floating-point accesses from vectors of
// different sizes.
if (ElementTy->isFloatingPointTy() != InElementTy->isFloatingPointTy())
return false;
if (ElementTy->isFloatingPointTy()) {
// Only allow floating-point vectors of different sizes if they have the
// same element type.
// TODO: This could be loosened a bit, but would anything benefit?
if (ElementTy != InElementTy)
return false;
// There are no arbitrary-precision floating-point types, which limits the
// number of legal vector types with larger element types that we can form
// to bitcast and extract a subvector.
// TODO: We could support some more cases with mixed fp128 and double here.
if (!(BitWidth == 64 || BitWidth == 128) ||
!(InBitWidth == 64 || InBitWidth == 128))
return false;
} else {
assert(ElementTy->isIntegerTy() && "Vector elements must be either integer "
"or floating-point.");
unsigned BitWidth = ElementTy->getPrimitiveSizeInBits();
unsigned InBitWidth = InElementTy->getPrimitiveSizeInBits();
// Do not allow integer types smaller than a byte or types whose widths are
// not a multiple of a byte.
if (BitWidth < 8 || InBitWidth < 8 ||
BitWidth % 8 != 0 || InBitWidth % 8 != 0)
return false;
}
// Pick the largest of the two vector types.
ScalarKind = Vector;
if (InBitWidth > BitWidth)
VectorTy = VInTy;
return true;
return false;
}
/// CanConvertToScalar - V is a pointer. If we can convert the pointee and all
@ -735,63 +663,6 @@ void ConvertToScalarInfo::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI,
}
}
/// getScaledElementType - Gets a scaled element type for a partial vector
/// access of an alloca. The input types must be integer or floating-point
/// scalar or vector types, and the resulting type is an integer, float or
/// double.
static Type *getScaledElementType(Type *Ty1, Type *Ty2,
unsigned NewBitWidth) {
bool IsFP1 = Ty1->isFloatingPointTy() ||
(Ty1->isVectorTy() &&
cast<VectorType>(Ty1)->getElementType()->isFloatingPointTy());
bool IsFP2 = Ty2->isFloatingPointTy() ||
(Ty2->isVectorTy() &&
cast<VectorType>(Ty2)->getElementType()->isFloatingPointTy());
LLVMContext &Context = Ty1->getContext();
// Prefer floating-point types over integer types, as integer types may have
// been created by earlier scalar replacement.
if (IsFP1 || IsFP2) {
if (NewBitWidth == 32)
return Type::getFloatTy(Context);
if (NewBitWidth == 64)
return Type::getDoubleTy(Context);
}
return Type::getIntNTy(Context, NewBitWidth);
}
/// CreateShuffleVectorCast - Creates a shuffle vector to convert one vector
/// to another vector of the same element type which has the same allocation
/// size but different primitive sizes (e.g. <3 x i32> and <4 x i32>).
static Value *CreateShuffleVectorCast(Value *FromVal, Type *ToType,
IRBuilder<> &Builder) {
Type *FromType = FromVal->getType();
VectorType *FromVTy = cast<VectorType>(FromType);
VectorType *ToVTy = cast<VectorType>(ToType);
assert((ToVTy->getElementType() == FromVTy->getElementType()) &&
"Vectors must have the same element type");
Value *UnV = UndefValue::get(FromType);
unsigned numEltsFrom = FromVTy->getNumElements();
unsigned numEltsTo = ToVTy->getNumElements();
SmallVector<Constant*, 3> Args;
Type* Int32Ty = Builder.getInt32Ty();
unsigned minNumElts = std::min(numEltsFrom, numEltsTo);
unsigned i;
for (i=0; i != minNumElts; ++i)
Args.push_back(ConstantInt::get(Int32Ty, i));
if (i < numEltsTo) {
Constant* UnC = UndefValue::get(Int32Ty);
for (; i != numEltsTo; ++i)
Args.push_back(UnC);
}
Constant *Mask = ConstantVector::get(Args);
return Builder.CreateShuffleVector(FromVal, UnV, Mask, "tmpV");
}
/// ConvertScalar_ExtractValue - Extract a value of type ToType from an integer
/// or vector value FromVal, extracting the bits from the offset specified by
/// Offset. This returns the value, which is of type ToType.
@ -815,38 +686,8 @@ ConvertScalar_ExtractValue(Value *FromVal, Type *ToType,
if (VectorType *VTy = dyn_cast<VectorType>(FromType)) {
unsigned FromTypeSize = TD.getTypeAllocSize(FromType);
unsigned ToTypeSize = TD.getTypeAllocSize(ToType);
if (FromTypeSize == ToTypeSize) {
// If the two types have the same primitive size, use a bit cast.
// Otherwise, it is two vectors with the same element type that has
// the same allocation size but different number of elements so use
// a shuffle vector.
if (FromType->getPrimitiveSizeInBits() ==
ToType->getPrimitiveSizeInBits())
if (FromTypeSize == ToTypeSize)
return Builder.CreateBitCast(FromVal, ToType);
else
return CreateShuffleVectorCast(FromVal, ToType, Builder);
}
if (isPowerOf2_64(FromTypeSize / ToTypeSize)) {
assert(!(ToType->isVectorTy() && Offset != 0) && "Can't extract a value "
"of a smaller vector type at a nonzero offset.");
Type *CastElementTy = getScaledElementType(FromType, ToType,
ToTypeSize * 8);
unsigned NumCastVectorElements = FromTypeSize / ToTypeSize;
LLVMContext &Context = FromVal->getContext();
Type *CastTy = VectorType::get(CastElementTy,
NumCastVectorElements);
Value *Cast = Builder.CreateBitCast(FromVal, CastTy);
unsigned EltSize = TD.getTypeAllocSizeInBits(CastElementTy);
unsigned Elt = Offset/EltSize;
assert(EltSize*Elt == Offset && "Invalid modulus in validity checking");
Value *Extract = Builder.CreateExtractElement(Cast, ConstantInt::get(
Type::getInt32Ty(Context), Elt));
return Builder.CreateBitCast(Extract, ToType);
}
// Otherwise it must be an element access.
unsigned Elt = 0;
@ -961,38 +802,8 @@ ConvertScalar_InsertValue(Value *SV, Value *Old,
// Changing the whole vector with memset or with an access of a different
// vector type?
if (ValSize == VecSize) {
// If the two types have the same primitive size, use a bit cast.
// Otherwise, it is two vectors with the same element type that has
// the same allocation size but different number of elements so use
// a shuffle vector.
if (VTy->getPrimitiveSizeInBits() ==
SV->getType()->getPrimitiveSizeInBits())
if (ValSize == VecSize)
return Builder.CreateBitCast(SV, AllocaType);
else
return CreateShuffleVectorCast(SV, VTy, Builder);
}
if (isPowerOf2_64(VecSize / ValSize)) {
assert(!(SV->getType()->isVectorTy() && Offset != 0) && "Can't insert a "
"value of a smaller vector type at a nonzero offset.");
Type *CastElementTy = getScaledElementType(VTy, SV->getType(),
ValSize);
unsigned NumCastVectorElements = VecSize / ValSize;
Type *OldCastTy = VectorType::get(CastElementTy, NumCastVectorElements);
Value *OldCast = Builder.CreateBitCast(Old, OldCastTy);
Value *SVCast = Builder.CreateBitCast(SV, CastElementTy);
unsigned EltSize = TD.getTypeAllocSizeInBits(CastElementTy);
unsigned Elt = Offset/EltSize;
assert(EltSize*Elt == Offset && "Invalid modulus in validity checking");
Value *Insert =
Builder.CreateInsertElement(OldCast, SVCast, Builder.getInt32(Elt));
return Builder.CreateBitCast(Insert, AllocaType);
}
// Must be an element insertion.
assert(SV->getType() == VTy->getElementType());

View File

@ -86,7 +86,6 @@ define i32 @test5(float %X) { ;; should turn into bitcast.
; CHECK-NEXT: ret i32
}
define i64 @test6(<2 x float> %X) {
%X_addr = alloca <2 x float>
store <2 x float> %X, <2 x float>* %X_addr
@ -97,169 +96,3 @@ define i64 @test6(<2 x float> %X) {
; CHECK: bitcast <2 x float> %X to i64
; CHECK: ret i64
}
define float @test7(<4 x float> %x) {
%a = alloca <4 x float>
store <4 x float> %x, <4 x float>* %a
%p = bitcast <4 x float>* %a to <2 x float>*
%b = load <2 x float>* %p
%q = getelementptr <4 x float>* %a, i32 0, i32 2
%c = load float* %q
ret float %c
; CHECK: @test7
; CHECK-NOT: alloca
; CHECK: bitcast <4 x float> %x to <2 x double>
; CHECK-NEXT: extractelement <2 x double>
; CHECK-NEXT: bitcast double %2 to <2 x float>
; CHECK-NEXT: extractelement <4 x float>
}
define void @test8(<4 x float> %x, <2 x float> %y) {
%a = alloca <4 x float>
store <4 x float> %x, <4 x float>* %a
%p = bitcast <4 x float>* %a to <2 x float>*
store <2 x float> %y, <2 x float>* %p
ret void
; CHECK: @test8
; CHECK-NOT: alloca
; CHECK: bitcast <4 x float> %x to <2 x double>
; CHECK-NEXT: bitcast <2 x float> %y to double
; CHECK-NEXT: insertelement <2 x double>
; CHECK-NEXT: bitcast <2 x double> %3 to <4 x float>
}
define i256 @test9(<4 x i256> %x) {
%a = alloca <4 x i256>
store <4 x i256> %x, <4 x i256>* %a
%p = bitcast <4 x i256>* %a to <2 x i256>*
%b = load <2 x i256>* %p
%q = getelementptr <4 x i256>* %a, i32 0, i32 2
%c = load i256* %q
ret i256 %c
; CHECK: @test9
; CHECK-NOT: alloca
; CHECK: bitcast <4 x i256> %x to <2 x i512>
; CHECK-NEXT: extractelement <2 x i512>
; CHECK-NEXT: bitcast i512 %2 to <2 x i256>
; CHECK-NEXT: extractelement <4 x i256>
}
define void @test10(<4 x i256> %x, <2 x i256> %y) {
%a = alloca <4 x i256>
store <4 x i256> %x, <4 x i256>* %a
%p = bitcast <4 x i256>* %a to <2 x i256>*
store <2 x i256> %y, <2 x i256>* %p
ret void
; CHECK: @test10
; CHECK-NOT: alloca
; CHECK: bitcast <4 x i256> %x to <2 x i512>
; CHECK-NEXT: bitcast <2 x i256> %y to i512
; CHECK-NEXT: insertelement <2 x i512>
; CHECK-NEXT: bitcast <2 x i512> %3 to <4 x i256>
}
%union.v = type { <2 x i64> }
define void @test11(<2 x i64> %x) {
%a = alloca %union.v
%p = getelementptr inbounds %union.v* %a, i32 0, i32 0
store <2 x i64> %x, <2 x i64>* %p, align 16
%q = getelementptr inbounds %union.v* %a, i32 0, i32 0
%r = bitcast <2 x i64>* %q to <4 x float>*
%b = load <4 x float>* %r, align 16
ret void
; CHECK: @test11
; CHECK-NOT: alloca
}
define void @test12() {
entry:
%a = alloca <64 x i8>, align 64
store <64 x i8> undef, <64 x i8>* %a, align 64
%p = bitcast <64 x i8>* %a to <16 x i8>*
%0 = load <16 x i8>* %p, align 64
store <16 x i8> undef, <16 x i8>* %p, align 64
%q = bitcast <16 x i8>* %p to <64 x i8>*
%1 = load <64 x i8>* %q, align 64
ret void
; CHECK: @test12
; CHECK-NOT: alloca
; CHECK: extractelement <4 x i128>
; CHECK: insertelement <4 x i128>
}
define float @test13(<4 x float> %x, <2 x i32> %y) {
%a = alloca <4 x float>
store <4 x float> %x, <4 x float>* %a
%p = bitcast <4 x float>* %a to <2 x float>*
%b = load <2 x float>* %p
%q = getelementptr <4 x float>* %a, i32 0, i32 2
%c = load float* %q
%r = bitcast <4 x float>* %a to <2 x i32>*
store <2 x i32> %y, <2 x i32>* %r
ret float %c
; CHECK: @test13
; CHECK-NOT: alloca
; CHECK: bitcast <4 x float> %x to i128
}
define <3 x float> @test14(<3 x float> %x) {
entry:
%x.addr = alloca <3 x float>, align 16
%r = alloca <3 x i32>, align 16
%extractVec = shufflevector <3 x float> %x, <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 undef>
%storetmp = bitcast <3 x float>* %x.addr to <4 x float>*
store <4 x float> %extractVec, <4 x float>* %storetmp, align 16
%tmp = load <3 x float>* %x.addr, align 16
%cmp = fcmp une <3 x float> %tmp, zeroinitializer
%sext = sext <3 x i1> %cmp to <3 x i32>
%and = and <3 x i32> <i32 1065353216, i32 1065353216, i32 1065353216>, %sext
%extractVec1 = shufflevector <3 x i32> %and, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 undef>
%storetmp2 = bitcast <3 x i32>* %r to <4 x i32>*
store <4 x i32> %extractVec1, <4 x i32>* %storetmp2, align 16
%tmp3 = load <3 x i32>* %r, align 16
%0 = bitcast <3 x i32> %tmp3 to <3 x float>
%tmp4 = load <3 x float>* %x.addr, align 16
ret <3 x float> %tmp4
; CHECK: @test14
; CHECK-NOT: alloca
; CHECK: shufflevector <4 x i32> %extractVec1, <4 x i32> undef, <3 x i32> <i32 0, i32 1, i32 2>
}
define void @test15(<3 x i64>* sret %agg.result, <3 x i64> %x, <3 x i64> %min) {
entry:
%x.addr = alloca <3 x i64>, align 32
%min.addr = alloca <3 x i64>, align 32
%extractVec = shufflevector <3 x i64> %x, <3 x i64> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 undef>
%storetmp = bitcast <3 x i64>* %x.addr to <4 x i64>*
store <4 x i64> %extractVec, <4 x i64>* %storetmp, align 32
%extractVec1 = shufflevector <3 x i64> %min, <3 x i64> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 undef>
%storetmp2 = bitcast <3 x i64>* %min.addr to <4 x i64>*
store <4 x i64> %extractVec1, <4 x i64>* %storetmp2, align 32
%tmp = load <3 x i64>* %x.addr
%tmp5 = extractelement <3 x i64> %tmp, i32 0
%tmp11 = insertelement <3 x i64> %tmp, i64 %tmp5, i32 0
store <3 x i64> %tmp11, <3 x i64>* %x.addr
%tmp30 = load <3 x i64>* %x.addr, align 32
store <3 x i64> %tmp30, <3 x i64>* %agg.result
ret void
; CHECK: @test15
; CHECK-NOT: alloca
; CHECK: shufflevector <4 x i64> %tmpV1, <4 x i64> undef, <3 x i32> <i32 0, i32 1, i32 2>
}
define <4 x float> @test16(<4 x float> %x, i64 %y0, i64 %y1) {
entry:
%tmp8 = bitcast <4 x float> undef to <2 x double>
%tmp9 = bitcast i64 %y0 to double
%tmp10 = insertelement <2 x double> %tmp8, double %tmp9, i32 0
%tmp11 = bitcast <2 x double> %tmp10 to <4 x float>
%tmp3 = bitcast <4 x float> %tmp11 to <2 x double>
%tmp4 = bitcast i64 %y1 to double
%tmp5 = insertelement <2 x double> %tmp3, double %tmp4, i32 1
%tmp6 = bitcast <2 x double> %tmp5 to <4 x float>
ret <4 x float> %tmp6
; CHECK: @test16
; CHECK-NOT: alloca
; CHECK: bitcast <4 x float> %tmp11 to <2 x double>
}