mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-07-25 13:24:46 +00:00
Use vld1 / vst2 for unaligned v2f64 load / store. e.g. Use vld1.16 for 2-byte
aligned address. Based on patch by David Peixotto. Also use vld1.64 / vst1.64 with 128-bit alignment to take advantage of alignment hints. rdar://12090772, rdar://12238782 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@164089 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
@@ -9025,8 +9025,8 @@ bool ARMTargetLowering::isDesirableToTransformToIntegerOp(unsigned Opc,
|
||||
}
|
||||
|
||||
bool ARMTargetLowering::allowsUnalignedMemoryAccesses(EVT VT) const {
|
||||
if (!Subtarget->allowsUnalignedMem())
|
||||
return false;
|
||||
// The AllowsUnaliged flag models the SCTLR.A setting in ARM cpus
|
||||
bool AllowsUnaligned = Subtarget->allowsUnalignedMem();
|
||||
|
||||
switch (VT.getSimpleVT().SimpleTy) {
|
||||
default:
|
||||
@@ -9034,10 +9034,14 @@ bool ARMTargetLowering::allowsUnalignedMemoryAccesses(EVT VT) const {
|
||||
case MVT::i8:
|
||||
case MVT::i16:
|
||||
case MVT::i32:
|
||||
return true;
|
||||
// Unaligned access can use (for example) LRDB, LRDH, LDR
|
||||
return AllowsUnaligned;
|
||||
case MVT::f64:
|
||||
return Subtarget->hasNEON();
|
||||
// FIXME: VLD1 etc with standard alignment is legal.
|
||||
case MVT::v2f64:
|
||||
// For any little-endian targets with neon, we can support unaligned ld/st
|
||||
// of D and Q (e.g. {D0,D1}) registers by using vld1.i8/vst1.i8.
|
||||
// A big-endian target may also explictly support unaligned accesses
|
||||
return Subtarget->hasNEON() && (AllowsUnaligned || isLittleEndian());
|
||||
}
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user