mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-01-19 04:32:19 +00:00
[PowerPC] Don't use a vector preferred memory type at -O0
Even at -O0, we fall back to SDAG when we hit intrinsics, and if the intrinsic is a memset/memcpy/etc. we might normally use vector types. At -O0, this is probably not a good idea (because, if there is a bug in the lowering code, there would be no good way to turn it off). At -O0, only use scalar preferred types. Related to PR22754. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@233755 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
6aebd393f0
commit
72cce21049
@ -11023,21 +11023,23 @@ EVT PPCTargetLowering::getOptimalMemOpType(uint64_t Size,
|
||||
bool IsMemset, bool ZeroMemset,
|
||||
bool MemcpyStrSrc,
|
||||
MachineFunction &MF) const {
|
||||
const Function *F = MF.getFunction();
|
||||
// When expanding a memset, require at least two QPX instructions to cover
|
||||
// the cost of loading the value to be stored from the constant pool.
|
||||
if (Subtarget.hasQPX() && Size >= 32 && (!IsMemset || Size >= 64) &&
|
||||
(!SrcAlign || SrcAlign >= 32) && (!DstAlign || DstAlign >= 32) &&
|
||||
!F->hasFnAttribute(Attribute::NoImplicitFloat)) {
|
||||
return MVT::v4f64;
|
||||
}
|
||||
if (getTargetMachine().getOptLevel() != CodeGenOpt::None) {
|
||||
const Function *F = MF.getFunction();
|
||||
// When expanding a memset, require at least two QPX instructions to cover
|
||||
// the cost of loading the value to be stored from the constant pool.
|
||||
if (Subtarget.hasQPX() && Size >= 32 && (!IsMemset || Size >= 64) &&
|
||||
(!SrcAlign || SrcAlign >= 32) && (!DstAlign || DstAlign >= 32) &&
|
||||
!F->hasFnAttribute(Attribute::NoImplicitFloat)) {
|
||||
return MVT::v4f64;
|
||||
}
|
||||
|
||||
// We should use Altivec/VSX loads and stores when available. For unaligned
|
||||
// addresses, unaligned VSX loads are only fast starting with the P8.
|
||||
if (Subtarget.hasAltivec() && Size >= 16 &&
|
||||
(((!SrcAlign || SrcAlign >= 16) && (!DstAlign || DstAlign >= 16)) ||
|
||||
((IsMemset && Subtarget.hasVSX()) || Subtarget.hasP8Vector())))
|
||||
return MVT::v4i32;
|
||||
// We should use Altivec/VSX loads and stores when available. For unaligned
|
||||
// addresses, unaligned VSX loads are only fast starting with the P8.
|
||||
if (Subtarget.hasAltivec() && Size >= 16 &&
|
||||
(((!SrcAlign || SrcAlign >= 16) && (!DstAlign || DstAlign >= 16)) ||
|
||||
((IsMemset && Subtarget.hasVSX()) || Subtarget.hasP8Vector())))
|
||||
return MVT::v4i32;
|
||||
}
|
||||
|
||||
if (Subtarget.isPPC64()) {
|
||||
return MVT::i64;
|
||||
|
@ -1,4 +1,5 @@
|
||||
; RUN: llc < %s | FileCheck %s
|
||||
; RUN: llc -O0 < %s | FileCheck %s -check-prefix=CHECK-O0
|
||||
target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
|
||||
target triple = "powerpc64-bgq-linux"
|
||||
|
||||
@ -14,6 +15,10 @@ entry:
|
||||
; CHECK: qvstfdx
|
||||
; CHECK: qvstfdx
|
||||
; CHECK: blr
|
||||
|
||||
; CHECK-O0-LABEL: @test_qpx
|
||||
; CHECK-O0-NOT: qvstfdx
|
||||
; CHECK-O0: blr
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind
|
||||
@ -31,6 +36,10 @@ entry:
|
||||
; CHECK: stxvw4x
|
||||
; CHECK: stxvw4x
|
||||
; CHECK: blr
|
||||
|
||||
; CHECK-O0-LABEL: @test_vsx
|
||||
; CHECK-O0-NOT: stxvw4x
|
||||
; CHECK-O0: blr
|
||||
}
|
||||
|
||||
attributes #0 = { nounwind "target-cpu"="a2q" }
|
||||
|
Loading…
x
Reference in New Issue
Block a user