mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-26 21:32:10 +00:00
Fix type of LUXC1 and SUXC1. These instructions were incorrectly defined as
single-precision load and store. Also avoid selecting LUXC1 and SUXC1 instructions during isel. It is incorrect to map unaligned floating point load/store nodes to these instructions. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@161063 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
53b59d1d97
commit
36bcc11236
@ -289,21 +289,6 @@ bool MipsDAGToDAGISel::
|
||||
SelectAddr(SDNode *Parent, SDValue Addr, SDValue &Base, SDValue &Offset) {
|
||||
EVT ValTy = Addr.getValueType();
|
||||
|
||||
// If Parent is an unaligned f32 load or store, select a (base + index)
|
||||
// floating point load/store instruction (luxc1 or suxc1).
|
||||
const LSBaseSDNode *LS = 0;
|
||||
|
||||
if (Parent && (LS = dyn_cast<LSBaseSDNode>(Parent))) {
|
||||
EVT VT = LS->getMemoryVT();
|
||||
|
||||
if (VT.getSizeInBits() / 8 > LS->getAlignment()) {
|
||||
assert(TLI.allowsUnalignedMemoryAccesses(VT) &&
|
||||
"Unaligned loads/stores not supported for this type.");
|
||||
if (VT == MVT::f32)
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// if Address is FI, get the TargetFrameIndex.
|
||||
if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
|
||||
Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), ValTy);
|
||||
@ -362,6 +347,8 @@ SelectAddr(SDNode *Parent, SDValue Addr, SDValue &Base, SDValue &Offset) {
|
||||
}
|
||||
|
||||
// If an indexed floating point load/store can be emitted, return false.
|
||||
const LSBaseSDNode *LS = dyn_cast<LSBaseSDNode>(Parent);
|
||||
|
||||
if (LS &&
|
||||
(LS->getMemoryVT() == MVT::f32 || LS->getMemoryVT() == MVT::f64) &&
|
||||
Subtarget.hasMips32r2Or64())
|
||||
|
@ -314,8 +314,6 @@ bool MipsTargetLowering::allowsUnalignedMemoryAccesses(EVT VT) const {
|
||||
case MVT::i64:
|
||||
case MVT::i32:
|
||||
return true;
|
||||
case MVT::f32:
|
||||
return Subtarget->hasMips32r2Or64();
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
|
@ -101,7 +101,7 @@ class FPStore<bits<6> op, string opstr, RegisterClass RC, Operand MemOpnd>:
|
||||
}
|
||||
// FP indexed load.
|
||||
class FPIdxLoad<bits<6> funct, string opstr, RegisterClass DRC,
|
||||
RegisterClass PRC, PatFrag FOp>:
|
||||
RegisterClass PRC, SDPatternOperator FOp = null_frag>:
|
||||
FFMemIdx<funct, (outs DRC:$fd), (ins PRC:$base, PRC:$index),
|
||||
!strconcat(opstr, "\t$fd, $index($base)"),
|
||||
[(set DRC:$fd, (FOp (add PRC:$base, PRC:$index)))]> {
|
||||
@ -110,7 +110,7 @@ class FPIdxLoad<bits<6> funct, string opstr, RegisterClass DRC,
|
||||
|
||||
// FP indexed store.
|
||||
class FPIdxStore<bits<6> funct, string opstr, RegisterClass DRC,
|
||||
RegisterClass PRC, PatFrag FOp>:
|
||||
RegisterClass PRC, SDPatternOperator FOp= null_frag>:
|
||||
FFMemIdx<funct, (outs), (ins DRC:$fs, PRC:$base, PRC:$index),
|
||||
!strconcat(opstr, "\t$fs, $index($base)"),
|
||||
[(FOp DRC:$fs, (add PRC:$base, PRC:$index))]> {
|
||||
@ -270,7 +270,7 @@ let Predicates = [NotN64, HasStandardEncoding] in {
|
||||
}
|
||||
|
||||
let Predicates = [NotN64, HasMips64, HasStandardEncoding],
|
||||
DecoderNamespace = "Mips64" in {
|
||||
DecoderNamespace = "Mips64" in {
|
||||
def LDC164 : FPLoad<0x35, "ldc1", FGR64, mem>;
|
||||
def SDC164 : FPStore<0x3d, "sdc1", FGR64, mem>;
|
||||
}
|
||||
@ -283,9 +283,7 @@ let Predicates = [NotN64, NotMips64, HasStandardEncoding] in {
|
||||
// Indexed loads and stores.
|
||||
let Predicates = [HasMips32r2Or64, HasStandardEncoding] in {
|
||||
def LWXC1 : FPIdxLoad<0x0, "lwxc1", FGR32, CPURegs, load_a>;
|
||||
def LUXC1 : FPIdxLoad<0x5, "luxc1", FGR32, CPURegs, load_u>;
|
||||
def SWXC1 : FPIdxStore<0x8, "swxc1", FGR32, CPURegs, store_a>;
|
||||
def SUXC1 : FPIdxStore<0xd, "suxc1", FGR32, CPURegs, store_u>;
|
||||
}
|
||||
|
||||
let Predicates = [HasMips32r2, NotMips64, HasStandardEncoding] in {
|
||||
@ -301,13 +299,23 @@ let Predicates = [HasMips64, NotN64, HasStandardEncoding], DecoderNamespace="Mip
|
||||
// n64
|
||||
let Predicates = [IsN64, HasStandardEncoding], isCodeGenOnly=1 in {
|
||||
def LWXC1_P8 : FPIdxLoad<0x0, "lwxc1", FGR32, CPU64Regs, load_a>;
|
||||
def LUXC1_P8 : FPIdxLoad<0x5, "luxc1", FGR32, CPU64Regs, load_u>;
|
||||
def LDXC164_P8 : FPIdxLoad<0x1, "ldxc1", FGR64, CPU64Regs, load_a>;
|
||||
def SWXC1_P8 : FPIdxStore<0x8, "swxc1", FGR32, CPU64Regs, store_a>;
|
||||
def SUXC1_P8 : FPIdxStore<0xd, "suxc1", FGR32, CPU64Regs, store_u>;
|
||||
def SDXC164_P8 : FPIdxStore<0x9, "sdxc1", FGR64, CPU64Regs, store_a>;
|
||||
}
|
||||
|
||||
// Load/store doubleword indexed unaligned.
|
||||
let Predicates = [NotMips64, HasStandardEncoding] in {
|
||||
def LUXC1 : FPIdxLoad<0x5, "luxc1", AFGR64, CPURegs>;
|
||||
def SUXC1 : FPIdxStore<0xd, "suxc1", AFGR64, CPURegs>;
|
||||
}
|
||||
|
||||
let Predicates = [HasMips64, HasStandardEncoding],
|
||||
DecoderNamespace="Mips64" in {
|
||||
def LUXC164 : FPIdxLoad<0x5, "luxc1", FGR64, CPURegs>;
|
||||
def SUXC164 : FPIdxStore<0xd, "suxc1", FGR64, CPURegs>;
|
||||
}
|
||||
|
||||
/// Floating-point Aritmetic
|
||||
defm FADD : FFR2P_M<0x00, "add", fadd, 1>;
|
||||
defm FDIV : FFR2P_M<0x03, "div", fdiv>;
|
||||
@ -466,17 +474,3 @@ let Predicates = [IsFP64bit, HasStandardEncoding] in {
|
||||
def : MipsPat<(f32 (fround FGR64:$src)), (CVT_S_D64 FGR64:$src)>;
|
||||
def : MipsPat<(f64 (fextend FGR32:$src)), (CVT_D64_S FGR32:$src)>;
|
||||
}
|
||||
|
||||
// Patterns for unaligned floating point loads and stores.
|
||||
let Predicates = [HasMips32r2Or64, NotN64, HasStandardEncoding] in {
|
||||
def : MipsPat<(f32 (load_u CPURegs:$addr)), (LUXC1 CPURegs:$addr, ZERO)>;
|
||||
def : MipsPat<(store_u FGR32:$src, CPURegs:$addr),
|
||||
(SUXC1 FGR32:$src, CPURegs:$addr, ZERO)>;
|
||||
}
|
||||
|
||||
let Predicates = [IsN64, HasStandardEncoding] in {
|
||||
def : MipsPat<(f32 (load_u CPU64Regs:$addr)),
|
||||
(LUXC1_P8 CPU64Regs:$addr, ZERO_64)>;
|
||||
def : MipsPat<(store_u FGR32:$src, CPU64Regs:$addr),
|
||||
(SUXC1_P8 FGR32:$src, CPU64Regs:$addr, ZERO_64)>;
|
||||
}
|
||||
|
@ -28,7 +28,7 @@ entry:
|
||||
|
||||
define float @foo2(i32 %b, i32 %c) nounwind readonly {
|
||||
entry:
|
||||
; CHECK: luxc1
|
||||
; CHECK-NOT: luxc1
|
||||
%arrayidx1 = getelementptr inbounds [4 x %struct.S]* @s, i32 0, i32 %b, i32 0, i32 %c
|
||||
%0 = load float* %arrayidx1, align 1
|
||||
ret float %0
|
||||
@ -54,7 +54,7 @@ entry:
|
||||
|
||||
define void @foo5(i32 %b, i32 %c) nounwind {
|
||||
entry:
|
||||
; CHECK: suxc1
|
||||
; CHECK-NOT: suxc1
|
||||
%0 = load float* @gf, align 4
|
||||
%arrayidx1 = getelementptr inbounds [4 x %struct.S]* @s, i32 0, i32 %b, i32 0, i32 %c
|
||||
store float %0, float* %arrayidx1, align 1
|
||||
@ -64,7 +64,7 @@ entry:
|
||||
define double @foo6(i32 %b, i32 %c) nounwind readonly {
|
||||
entry:
|
||||
; CHECK: foo6
|
||||
; CHECK-NOT: ldxc1
|
||||
; CHECK-NOT: luxc1
|
||||
%arrayidx1 = getelementptr inbounds [4 x %struct.S2]* @s2, i32 0, i32 %b, i32 0, i32 %c
|
||||
%0 = load double* %arrayidx1, align 1
|
||||
ret double %0
|
||||
@ -73,7 +73,7 @@ entry:
|
||||
define void @foo7(i32 %b, i32 %c) nounwind {
|
||||
entry:
|
||||
; CHECK: foo7
|
||||
; CHECK-NOT: sdxc1
|
||||
; CHECK-NOT: suxc1
|
||||
%0 = load double* @gd, align 8
|
||||
%arrayidx1 = getelementptr inbounds [4 x %struct.S2]* @s2, i32 0, i32 %b, i32 0, i32 %c
|
||||
store double %0, double* %arrayidx1, align 1
|
||||
@ -83,7 +83,7 @@ entry:
|
||||
define float @foo8() nounwind readonly {
|
||||
entry:
|
||||
; CHECK: foo8
|
||||
; CHECK: luxc1
|
||||
; CHECK-NOT: luxc1
|
||||
%0 = load float* getelementptr inbounds (%struct.S3* @s3, i32 0, i32 1), align 1
|
||||
ret float %0
|
||||
}
|
||||
@ -91,7 +91,7 @@ entry:
|
||||
define void @foo9(float %f) nounwind {
|
||||
entry:
|
||||
; CHECK: foo9
|
||||
; CHECK: suxc1
|
||||
; CHECK-NOT: suxc1
|
||||
store float %f, float* getelementptr inbounds (%struct.S3* @s3, i32 0, i32 1), align 1
|
||||
ret void
|
||||
}
|
||||
|
@ -30,7 +30,7 @@ entry:
|
||||
|
||||
define float @foo2(i32 %b, i32 %c) nounwind readonly {
|
||||
entry:
|
||||
; CHECK: luxc1
|
||||
; CHECK-NOT: luxc1
|
||||
%idxprom = zext i32 %c to i64
|
||||
%idxprom1 = zext i32 %b to i64
|
||||
%arrayidx2 = getelementptr inbounds [4 x %struct.S]* @s, i64 0, i64 %idxprom1, i32 0, i64 %idxprom
|
||||
@ -60,7 +60,7 @@ entry:
|
||||
|
||||
define void @foo5(i32 %b, i32 %c) nounwind {
|
||||
entry:
|
||||
; CHECK: suxc1
|
||||
; CHECK-NOT: suxc1
|
||||
%0 = load float* @gf, align 4
|
||||
%idxprom = zext i32 %c to i64
|
||||
%idxprom1 = zext i32 %b to i64
|
||||
@ -72,7 +72,7 @@ entry:
|
||||
define double @foo6(i32 %b, i32 %c) nounwind readonly {
|
||||
entry:
|
||||
; CHECK: foo6
|
||||
; CHECK-NOT: ldxc1
|
||||
; CHECK-NOT: luxc1
|
||||
%idxprom = zext i32 %c to i64
|
||||
%idxprom1 = zext i32 %b to i64
|
||||
%arrayidx2 = getelementptr inbounds [4 x %struct.S2]* @s2, i64 0, i64 %idxprom1, i32 0, i64 %idxprom
|
||||
@ -83,7 +83,7 @@ entry:
|
||||
define void @foo7(i32 %b, i32 %c) nounwind {
|
||||
entry:
|
||||
; CHECK: foo7
|
||||
; CHECK-NOT: sdxc1
|
||||
; CHECK-NOT: suxc1
|
||||
%0 = load double* @gd, align 8
|
||||
%idxprom = zext i32 %c to i64
|
||||
%idxprom1 = zext i32 %b to i64
|
||||
@ -95,7 +95,7 @@ entry:
|
||||
define float @foo8() nounwind readonly {
|
||||
entry:
|
||||
; CHECK: foo8
|
||||
; CHECK: luxc1
|
||||
; CHECK-NOT: luxc1
|
||||
%0 = load float* getelementptr inbounds (%struct.S3* @s3, i64 0, i32 1), align 1
|
||||
ret float %0
|
||||
}
|
||||
@ -103,7 +103,7 @@ entry:
|
||||
define void @foo9(float %f) nounwind {
|
||||
entry:
|
||||
; CHECK: foo9
|
||||
; CHECK: suxc1
|
||||
; CHECK-NOT: suxc1
|
||||
store float %f, float* getelementptr inbounds (%struct.S3* @s3, i64 0, i32 1), align 1
|
||||
ret void
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user