mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-07-25 13:24:46 +00:00
Loop Vectorizer minor changes in the code -
some comments, function names, identation. Reviewed here: http://reviews.llvm.org/D6527 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@224218 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
@@ -274,8 +274,8 @@ public:
|
|||||||
/// AVX2 allows masks for consecutive load and store for i32 and i64 elements.
|
/// AVX2 allows masks for consecutive load and store for i32 and i64 elements.
|
||||||
/// AVX-512 architecture will also allow masks for non-consecutive memory
|
/// AVX-512 architecture will also allow masks for non-consecutive memory
|
||||||
/// accesses.
|
/// accesses.
|
||||||
virtual bool isLegalPredicatedStore(Type *DataType, int Consecutive) const;
|
virtual bool isLegalMaskedStore(Type *DataType, int Consecutive) const;
|
||||||
virtual bool isLegalPredicatedLoad (Type *DataType, int Consecutive) const;
|
virtual bool isLegalMaskedLoad (Type *DataType, int Consecutive) const;
|
||||||
|
|
||||||
/// \brief Return the cost of the scaling factor used in the addressing
|
/// \brief Return the cost of the scaling factor used in the addressing
|
||||||
/// mode represented by AM for this target, for a load/store
|
/// mode represented by AM for this target, for a load/store
|
||||||
|
@@ -101,13 +101,13 @@ bool TargetTransformInfo::isLegalICmpImmediate(int64_t Imm) const {
|
|||||||
return PrevTTI->isLegalICmpImmediate(Imm);
|
return PrevTTI->isLegalICmpImmediate(Imm);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool TargetTransformInfo::isLegalPredicatedLoad(Type *DataType,
|
bool TargetTransformInfo::isLegalMaskedLoad(Type *DataType,
|
||||||
int Consecutive) const {
|
int Consecutive) const {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool TargetTransformInfo::isLegalPredicatedStore(Type *DataType,
|
bool TargetTransformInfo::isLegalMaskedStore(Type *DataType,
|
||||||
int Consecutive) const {
|
int Consecutive) const {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -111,8 +111,8 @@ public:
|
|||||||
Type *Ty) const override;
|
Type *Ty) const override;
|
||||||
unsigned getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
|
unsigned getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
|
||||||
Type *Ty) const override;
|
Type *Ty) const override;
|
||||||
bool isLegalPredicatedLoad (Type *DataType, int Consecutive) const override;
|
bool isLegalMaskedLoad (Type *DataType, int Consecutive) const override;
|
||||||
bool isLegalPredicatedStore(Type *DataType, int Consecutive) const override;
|
bool isLegalMaskedStore(Type *DataType, int Consecutive) const override;
|
||||||
|
|
||||||
/// @}
|
/// @}
|
||||||
};
|
};
|
||||||
@@ -1159,7 +1159,7 @@ unsigned X86TTI::getIntImmCost(Intrinsic::ID IID, unsigned Idx,
|
|||||||
return X86TTI::getIntImmCost(Imm, Ty);
|
return X86TTI::getIntImmCost(Imm, Ty);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool X86TTI::isLegalPredicatedLoad(Type *DataType, int Consecutive) const {
|
bool X86TTI::isLegalMaskedLoad(Type *DataType, int Consecutive) const {
|
||||||
int ScalarWidth = DataType->getScalarSizeInBits();
|
int ScalarWidth = DataType->getScalarSizeInBits();
|
||||||
|
|
||||||
// Todo: AVX512 allows gather/scatter, works with strided and random as well
|
// Todo: AVX512 allows gather/scatter, works with strided and random as well
|
||||||
@@ -1170,7 +1170,7 @@ bool X86TTI::isLegalPredicatedLoad(Type *DataType, int Consecutive) const {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool X86TTI::isLegalPredicatedStore(Type *DataType, int Consecutive) const {
|
bool X86TTI::isLegalMaskedStore(Type *DataType, int Consecutive) const {
|
||||||
return isLegalPredicatedLoad(DataType, Consecutive);
|
return isLegalMaskedLoad(DataType, Consecutive);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -1871,7 +1871,7 @@ void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr) {
|
|||||||
|
|
||||||
if (Reverse) {
|
if (Reverse) {
|
||||||
// If the address is consecutive but reversed, then the
|
// If the address is consecutive but reversed, then the
|
||||||
// wide store needs to start at the last vector element.
|
// wide load needs to start at the last vector element.
|
||||||
PartPtr = Builder.CreateGEP(Ptr, Builder.getInt32(-Part * VF));
|
PartPtr = Builder.CreateGEP(Ptr, Builder.getInt32(-Part * VF));
|
||||||
PartPtr = Builder.CreateGEP(PartPtr, Builder.getInt32(1 - VF));
|
PartPtr = Builder.CreateGEP(PartPtr, Builder.getInt32(1 - VF));
|
||||||
}
|
}
|
||||||
@@ -5341,7 +5341,7 @@ bool LoopVectorizationLegality::blockCanBePredicated(BasicBlock *BB,
|
|||||||
case Instruction::SDiv:
|
case Instruction::SDiv:
|
||||||
case Instruction::URem:
|
case Instruction::URem:
|
||||||
case Instruction::SRem:
|
case Instruction::SRem:
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -5385,7 +5385,7 @@ LoopVectorizationCostModel::selectVectorizationFactor(bool OptForSize) {
|
|||||||
MaxVectorSize = 1;
|
MaxVectorSize = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(MaxVectorSize <= 32 && "Did not expect to pack so many elements"
|
assert(MaxVectorSize <= 64 && "Did not expect to pack so many elements"
|
||||||
" into one vector!");
|
" into one vector!");
|
||||||
|
|
||||||
unsigned VF = MaxVectorSize;
|
unsigned VF = MaxVectorSize;
|
||||||
|
Reference in New Issue
Block a user