mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-02-21 21:29:41 +00:00
Add address space argument to allowsUnalignedMemoryAccess.
On R600, some address spaces have more strict alignment requirements than others. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@200887 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
df7da79db6
commit
bb7bf85f3c
@ -713,14 +713,16 @@ public:
|
||||
|
||||
/// \brief Determine if the target supports unaligned memory accesses.
|
||||
///
|
||||
/// This function returns true if the target allows unaligned memory accesses.
|
||||
/// of the specified type. If true, it also returns whether the unaligned
|
||||
/// memory access is "fast" in the second argument by reference. This is used,
|
||||
/// for example, in situations where an array copy/move/set is converted to a
|
||||
/// sequence of store operations. It's use helps to ensure that such
|
||||
/// replacements don't generate code that causes an alignment error (trap) on
|
||||
/// the target machine.
|
||||
virtual bool allowsUnalignedMemoryAccesses(EVT, bool * /*Fast*/ = 0) const {
|
||||
/// This function returns true if the target allows unaligned memory accesses
|
||||
/// of the specified type in the given address space. If true, it also returns
|
||||
/// whether the unaligned memory access is "fast" in the third argument by
|
||||
/// reference. This is used, for example, in situations where an array
|
||||
/// copy/move/set is converted to a sequence of store operations. Its use
|
||||
/// helps to ensure that such replacements don't generate code that causes an
|
||||
/// alignment error (trap) on the target machine.
|
||||
virtual bool allowsUnalignedMemoryAccesses(EVT,
|
||||
unsigned AddrSpace = 0,
|
||||
bool * /*Fast*/ = 0) const {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -3693,7 +3693,7 @@ static bool FindOptimalMemOpLowering(std::vector<EVT> &MemOps,
|
||||
bool Fast;
|
||||
if (NumMemOps && AllowOverlap &&
|
||||
VTSize >= 8 && NewVTSize < Size &&
|
||||
TLI.allowsUnalignedMemoryAccesses(VT, &Fast) && Fast)
|
||||
TLI.allowsUnalignedMemoryAccesses(VT, 0, &Fast) && Fast)
|
||||
VTSize = Size;
|
||||
else {
|
||||
VT = NewVT;
|
||||
|
@ -10167,7 +10167,8 @@ bool ARMTargetLowering::isDesirableToTransformToIntegerOp(unsigned Opc,
|
||||
return (VT == MVT::f32) && (Opc == ISD::LOAD || Opc == ISD::STORE);
|
||||
}
|
||||
|
||||
bool ARMTargetLowering::allowsUnalignedMemoryAccesses(EVT VT, bool *Fast) const {
|
||||
bool ARMTargetLowering::allowsUnalignedMemoryAccesses(EVT VT, unsigned,
|
||||
bool *Fast) const {
|
||||
// The AllowsUnaliged flag models the SCTLR.A setting in ARM cpus
|
||||
bool AllowsUnaligned = Subtarget->allowsUnalignedMem();
|
||||
|
||||
@ -10221,11 +10222,11 @@ EVT ARMTargetLowering::getOptimalMemOpType(uint64_t Size,
|
||||
bool Fast;
|
||||
if (Size >= 16 &&
|
||||
(memOpAlign(SrcAlign, DstAlign, 16) ||
|
||||
(allowsUnalignedMemoryAccesses(MVT::v2f64, &Fast) && Fast))) {
|
||||
(allowsUnalignedMemoryAccesses(MVT::v2f64, 0, &Fast) && Fast))) {
|
||||
return MVT::v2f64;
|
||||
} else if (Size >= 8 &&
|
||||
(memOpAlign(SrcAlign, DstAlign, 8) ||
|
||||
(allowsUnalignedMemoryAccesses(MVT::f64, &Fast) && Fast))) {
|
||||
(allowsUnalignedMemoryAccesses(MVT::f64, 0, &Fast) && Fast))) {
|
||||
return MVT::f64;
|
||||
}
|
||||
}
|
||||
|
@ -273,7 +273,8 @@ namespace llvm {
|
||||
/// allowsUnalignedMemoryAccesses - Returns true if the target allows
|
||||
/// unaligned memory accesses of the specified type. Returns whether it
|
||||
/// is "fast" by reference in the second argument.
|
||||
virtual bool allowsUnalignedMemoryAccesses(EVT VT, bool *Fast) const;
|
||||
virtual bool allowsUnalignedMemoryAccesses(EVT VT, unsigned AddrSpace,
|
||||
bool *Fast) const;
|
||||
|
||||
virtual EVT getOptimalMemOpType(uint64_t Size,
|
||||
unsigned DstAlign, unsigned SrcAlign,
|
||||
|
@ -159,7 +159,9 @@ llvm::createMips16TargetLowering(MipsTargetMachine &TM) {
|
||||
}
|
||||
|
||||
bool
|
||||
Mips16TargetLowering::allowsUnalignedMemoryAccesses(EVT VT, bool *Fast) const {
|
||||
Mips16TargetLowering::allowsUnalignedMemoryAccesses(EVT VT,
|
||||
unsigned,
|
||||
bool *Fast) const {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -21,7 +21,8 @@ namespace llvm {
|
||||
public:
|
||||
explicit Mips16TargetLowering(MipsTargetMachine &TM);
|
||||
|
||||
virtual bool allowsUnalignedMemoryAccesses(EVT VT, bool *Fast) const;
|
||||
virtual bool allowsUnalignedMemoryAccesses(EVT VT, unsigned AddrSpace,
|
||||
bool *Fast) const;
|
||||
|
||||
virtual MachineBasicBlock *
|
||||
EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const;
|
||||
|
@ -244,7 +244,9 @@ addMSAFloatType(MVT::SimpleValueType Ty, const TargetRegisterClass *RC) {
|
||||
}
|
||||
|
||||
bool
|
||||
MipsSETargetLowering::allowsUnalignedMemoryAccesses(EVT VT, bool *Fast) const {
|
||||
MipsSETargetLowering::allowsUnalignedMemoryAccesses(EVT VT,
|
||||
unsigned,
|
||||
bool *Fast) const {
|
||||
MVT::SimpleValueType SVT = VT.getSimpleVT().SimpleTy;
|
||||
|
||||
switch (SVT) {
|
||||
|
@ -30,7 +30,9 @@ namespace llvm {
|
||||
void addMSAFloatType(MVT::SimpleValueType Ty,
|
||||
const TargetRegisterClass *RC);
|
||||
|
||||
virtual bool allowsUnalignedMemoryAccesses(EVT VT, bool *Fast) const;
|
||||
virtual bool allowsUnalignedMemoryAccesses(
|
||||
EVT VT, unsigned AS = 0,
|
||||
bool *Fast = 0) const LLVM_OVERRIDE;
|
||||
|
||||
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
|
||||
|
||||
|
@ -7885,6 +7885,7 @@ EVT PPCTargetLowering::getOptimalMemOpType(uint64_t Size,
|
||||
}
|
||||
|
||||
bool PPCTargetLowering::allowsUnalignedMemoryAccesses(EVT VT,
|
||||
unsigned,
|
||||
bool *Fast) const {
|
||||
if (DisablePPCUnaligned)
|
||||
return false;
|
||||
|
@ -461,7 +461,9 @@ namespace llvm {
|
||||
|
||||
/// Is unaligned memory access allowed for the given type, and is it fast
|
||||
/// relative to software emulation.
|
||||
virtual bool allowsUnalignedMemoryAccesses(EVT VT, bool *Fast = 0) const;
|
||||
virtual bool allowsUnalignedMemoryAccesses(EVT VT,
|
||||
unsigned AddrSpace,
|
||||
bool *Fast = 0) const;
|
||||
|
||||
/// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster
|
||||
/// than a pair of fmul and fadd instructions. fmuladd intrinsics will be
|
||||
|
@ -159,6 +159,7 @@ SITargetLowering::SITargetLowering(TargetMachine &TM) :
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
bool SITargetLowering::allowsUnalignedMemoryAccesses(EVT VT,
|
||||
unsigned AddrSpace,
|
||||
bool *IsFast) const {
|
||||
// XXX: This depends on the address space and also we may want to revist
|
||||
// the alignment values we specify in the DataLayout.
|
||||
|
@ -50,7 +50,7 @@ class SITargetLowering : public AMDGPUTargetLowering {
|
||||
|
||||
public:
|
||||
SITargetLowering(TargetMachine &tm);
|
||||
bool allowsUnalignedMemoryAccesses(EVT VT, bool *IsFast) const;
|
||||
bool allowsUnalignedMemoryAccesses(EVT VT, unsigned AS, bool *IsFast) const;
|
||||
virtual bool shouldSplitVectorElementType(EVT VT) const;
|
||||
|
||||
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
|
||||
|
@ -337,6 +337,7 @@ bool SystemZTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
|
||||
}
|
||||
|
||||
bool SystemZTargetLowering::allowsUnalignedMemoryAccesses(EVT VT,
|
||||
unsigned,
|
||||
bool *Fast) const {
|
||||
// Unaligned accesses should never be slower than the expanded version.
|
||||
// We check specifically for aligned accesses in the few cases where
|
||||
|
@ -209,8 +209,8 @@ public:
|
||||
virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const LLVM_OVERRIDE;
|
||||
virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const
|
||||
LLVM_OVERRIDE;
|
||||
virtual bool allowsUnalignedMemoryAccesses(EVT VT, bool *Fast) const
|
||||
LLVM_OVERRIDE;
|
||||
virtual bool allowsUnalignedMemoryAccesses(EVT VT, unsigned AS,
|
||||
bool *Fast) const LLVM_OVERRIDE;
|
||||
virtual bool isTruncateFree(Type *, Type *) const LLVM_OVERRIDE;
|
||||
virtual bool isTruncateFree(EVT, EVT) const LLVM_OVERRIDE;
|
||||
virtual const char *getTargetNodeName(unsigned Opcode) const LLVM_OVERRIDE;
|
||||
|
@ -1671,7 +1671,9 @@ bool X86TargetLowering::isSafeMemOpType(MVT VT) const {
|
||||
}
|
||||
|
||||
bool
|
||||
X86TargetLowering::allowsUnalignedMemoryAccesses(EVT VT, bool *Fast) const {
|
||||
X86TargetLowering::allowsUnalignedMemoryAccesses(EVT VT,
|
||||
unsigned,
|
||||
bool *Fast) const {
|
||||
if (Fast)
|
||||
*Fast = Subtarget->isUnalignedMemAccessFast();
|
||||
return true;
|
||||
|
@ -577,7 +577,8 @@ namespace llvm {
|
||||
/// allowsUnalignedMemoryAccesses - Returns true if the target allows
|
||||
/// unaligned memory accesses. of the specified type. Returns whether it
|
||||
/// is "fast" by reference in the second argument.
|
||||
virtual bool allowsUnalignedMemoryAccesses(EVT VT, bool *Fast) const;
|
||||
virtual bool allowsUnalignedMemoryAccesses(EVT VT, unsigned AS,
|
||||
bool *Fast) const;
|
||||
|
||||
/// LowerOperation - Provide custom lowering hooks for some operations.
|
||||
///
|
||||
|
Loading…
x
Reference in New Issue
Block a user