R600/SI: Match read2/write2 stride 64 versions

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@219536 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Matt Arsenault 2014-10-10 22:12:32 +00:00
parent fcc00a10e3
commit 9bd1daf4b9
6 changed files with 480 additions and 48 deletions

View File

@ -76,14 +76,12 @@ private:
MachineBasicBlock::iterator mergeRead2Pair(
MachineBasicBlock::iterator I,
MachineBasicBlock::iterator Paired,
unsigned EltSize,
const MCInstrDesc &Read2InstDesc);
unsigned EltSize);
MachineBasicBlock::iterator mergeWrite2Pair(
MachineBasicBlock::iterator I,
MachineBasicBlock::iterator Paired,
unsigned EltSize,
const MCInstrDesc &Write2InstDesc);
unsigned EltSize);
public:
static char ID;
@ -144,12 +142,29 @@ FunctionPass *llvm::createSILoadStoreOptimizerPass(TargetMachine &TM) {
bool SILoadStoreOptimizer::offsetsCanBeCombined(unsigned Offset0,
unsigned Offset1,
unsigned EltSize) {
unsigned Size) {
// XXX - Would the same offset be OK? Is there any reason this would happen or
// be useful?
return (Offset0 != Offset1) &&
isUInt<8>(Offset0 / EltSize) &&
isUInt<8>(Offset1 / EltSize);
if (Offset0 == Offset1)
return false;
// This won't be valid if the offset isn't aligned.
if ((Offset0 % Size != 0) || (Offset1 % Size != 0))
return false;
unsigned EltOffset0 = Offset0 / Size;
unsigned EltOffset1 = Offset1 / Size;
// Check if the new offsets fit in the reduced 8-bit range.
if (isUInt<8>(EltOffset0) && isUInt<8>(EltOffset1))
return true;
// If the offset in elements doesn't fit in 8-bits, we might be able to use
// the stride 64 versions.
if ((EltOffset0 % 64 != 0) || (EltOffset1 % 64) != 0)
return false;
return isUInt<8>(EltOffset0 / 64) && isUInt<8>(EltOffset1 / 64);
}
MachineBasicBlock::iterator
@ -176,8 +191,8 @@ SILoadStoreOptimizer::findMatchingDSInst(MachineBasicBlock::iterator I,
AddrReg0.getSubReg() == AddrReg1.getSubReg()) {
int OffsetIdx = AMDGPU::getNamedOperandIdx(I->getOpcode(),
AMDGPU::OpName::offset);
unsigned Offset0 = I->getOperand(OffsetIdx).getImm();
unsigned Offset1 = MBBI->getOperand(OffsetIdx).getImm();
unsigned Offset0 = I->getOperand(OffsetIdx).getImm() & 0xffff;
unsigned Offset1 = MBBI->getOperand(OffsetIdx).getImm() & 0xffff;
// Check both offsets fit in the reduced range.
if (offsetsCanBeCombined(Offset0, Offset1, EltSize))
@ -201,8 +216,7 @@ void SILoadStoreOptimizer::updateRegDefsUses(unsigned SrcReg,
MachineBasicBlock::iterator SILoadStoreOptimizer::mergeRead2Pair(
MachineBasicBlock::iterator I,
MachineBasicBlock::iterator Paired,
unsigned EltSize,
const MCInstrDesc &Read2InstDesc) {
unsigned EltSize) {
MachineBasicBlock *MBB = I->getParent();
// Be careful, since the addresses could be subregisters themselves in weird
@ -213,9 +227,29 @@ MachineBasicBlock::iterator SILoadStoreOptimizer::mergeRead2Pair(
unsigned DestReg1
= TII->getNamedOperand(*Paired, AMDGPU::OpName::vdst)->getReg();
unsigned Offset0 = TII->getNamedOperand(*I, AMDGPU::OpName::offset)->getImm();
unsigned Offset0
= TII->getNamedOperand(*I, AMDGPU::OpName::offset)->getImm() & 0xffff;
unsigned Offset1
= TII->getNamedOperand(*Paired, AMDGPU::OpName::offset)->getImm();
= TII->getNamedOperand(*Paired, AMDGPU::OpName::offset)->getImm() & 0xffff;
unsigned NewOffset0 = Offset0 / EltSize;
unsigned NewOffset1 = Offset1 / EltSize;
unsigned Opc = (EltSize == 4) ? AMDGPU::DS_READ2_B32 : AMDGPU::DS_READ2_B64;
// Prefer the st64 form if we can use it, even if we can fit the offset in the
// non st64 version. I'm not sure if there's any real reason to do this.
bool UseST64 = (NewOffset0 % 64 == 0) && (NewOffset1 % 64 == 0);
if (UseST64) {
NewOffset0 /= 64;
NewOffset1 /= 64;
Opc = (EltSize == 4) ? AMDGPU::DS_READ2ST64_B32 : AMDGPU::DS_READ2ST64_B64;
}
assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
(NewOffset0 != NewOffset1) &&
"Computed offset doesn't fit");
const MCInstrDesc &Read2Desc = TII->get(Opc);
const TargetRegisterClass *SuperRC
= (EltSize == 4) ? &AMDGPU::VReg_64RegClass : &AMDGPU::VReg_128RegClass;
@ -223,11 +257,11 @@ MachineBasicBlock::iterator SILoadStoreOptimizer::mergeRead2Pair(
DebugLoc DL = I->getDebugLoc();
MachineInstrBuilder Read2
= BuildMI(*MBB, I, DL, Read2InstDesc, DestReg)
= BuildMI(*MBB, I, DL, Read2Desc, DestReg)
.addImm(0) // gds
.addOperand(*AddrReg) // addr
.addImm(Offset0 / EltSize) // offset0
.addImm(Offset1 / EltSize) // offset1
.addImm(NewOffset0) // offset0
.addImm(NewOffset1) // offset1
.addMemOperand(*I->memoperands_begin())
.addMemOperand(*Paired->memoperands_begin());
@ -255,8 +289,7 @@ MachineBasicBlock::iterator SILoadStoreOptimizer::mergeRead2Pair(
MachineBasicBlock::iterator SILoadStoreOptimizer::mergeWrite2Pair(
MachineBasicBlock::iterator I,
MachineBasicBlock::iterator Paired,
unsigned EltSize,
const MCInstrDesc &Write2InstDesc) {
unsigned EltSize) {
MachineBasicBlock *MBB = I->getParent();
// Be sure to use .addOperand(), and not .addReg() with these. We want to be
@ -266,19 +299,40 @@ MachineBasicBlock::iterator SILoadStoreOptimizer::mergeWrite2Pair(
const MachineOperand *Data1
= TII->getNamedOperand(*Paired, AMDGPU::OpName::data0);
unsigned Offset0 = TII->getNamedOperand(*I, AMDGPU::OpName::offset)->getImm();
unsigned Offset1
= TII->getNamedOperand(*Paired, AMDGPU::OpName::offset)->getImm();
unsigned Offset0
= TII->getNamedOperand(*I, AMDGPU::OpName::offset)->getImm() & 0xffff;
unsigned Offset1
= TII->getNamedOperand(*Paired, AMDGPU::OpName::offset)->getImm() & 0xffff;
unsigned NewOffset0 = Offset0 / EltSize;
unsigned NewOffset1 = Offset1 / EltSize;
unsigned Opc = (EltSize == 4) ? AMDGPU::DS_WRITE2_B32 : AMDGPU::DS_WRITE2_B64;
// Prefer the st64 form if we can use it, even if we can fit the offset in the
// non st64 version. I'm not sure if there's any real reason to do this.
bool UseST64 = (NewOffset0 % 64 == 0) && (NewOffset1 % 64 == 0);
if (UseST64) {
NewOffset0 /= 64;
NewOffset1 /= 64;
Opc = (EltSize == 4) ? AMDGPU::DS_WRITE2ST64_B32 : AMDGPU::DS_WRITE2ST64_B64;
}
assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
(NewOffset0 != NewOffset1) &&
"Computed offset doesn't fit");
const MCInstrDesc &Write2Desc = TII->get(Opc);
DebugLoc DL = I->getDebugLoc();
MachineInstrBuilder Write2
= BuildMI(*MBB, I, DL, Write2InstDesc)
= BuildMI(*MBB, I, DL, Write2Desc)
.addImm(0) // gds
.addOperand(*Addr) // addr
.addOperand(*Data0) // data0
.addOperand(*Data1) // data1
.addImm(Offset0 / EltSize) // offset0
.addImm(Offset1 / EltSize) // offset1
.addImm(NewOffset0) // offset0
.addImm(NewOffset1) // offset1
.addMemOperand(*I->memoperands_begin())
.addMemOperand(*Paired->memoperands_begin());
@ -300,11 +354,6 @@ MachineBasicBlock::iterator SILoadStoreOptimizer::mergeWrite2Pair(
// the same base register. We rely on the scheduler to do the hard work of
// clustering nearby loads, and assume these are all adjacent.
bool SILoadStoreOptimizer::optimizeBlock(MachineBasicBlock &MBB) {
const MCInstrDesc &Read2B32Desc = TII->get(AMDGPU::DS_READ2_B32);
const MCInstrDesc &Read2B64Desc = TII->get(AMDGPU::DS_READ2_B64);
const MCInstrDesc &Write2B32Desc = TII->get(AMDGPU::DS_WRITE2_B32);
const MCInstrDesc &Write2B64Desc = TII->get(AMDGPU::DS_WRITE2_B64);
bool Modified = false;
for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E;) {
@ -322,10 +371,7 @@ bool SILoadStoreOptimizer::optimizeBlock(MachineBasicBlock &MBB) {
MachineBasicBlock::iterator Match = findMatchingDSInst(I, Size);
if (Match != E) {
Modified = true;
const MCInstrDesc &Read2Desc
= (Opc == AMDGPU::DS_READ_B64) ? Read2B64Desc : Read2B32Desc;
I = mergeRead2Pair(I, Match, Size, Read2Desc);
I = mergeRead2Pair(I, Match, Size);
} else {
++I;
}
@ -336,11 +382,7 @@ bool SILoadStoreOptimizer::optimizeBlock(MachineBasicBlock &MBB) {
MachineBasicBlock::iterator Match = findMatchingDSInst(I, Size);
if (Match != E) {
Modified = true;
const MCInstrDesc &Write2Desc
= (Opc == AMDGPU::DS_WRITE_B64) ? Write2B64Desc : Write2B32Desc;
I = mergeWrite2Pair(I, Match, Size, Write2Desc);
I = mergeWrite2Pair(I, Match, Size);
} else {
++I;
}

View File

@ -47,13 +47,13 @@ define void @simple_read2_f32_max_offset(float addrspace(1)* %out) #0 {
; SI-LABEL: @simple_read2_f32_too_far
; SI-NOT DS_READ2_B32
; SI: DS_READ_B32 v{{[0-9]+}}, v{{[0-9]+}}, 0x0
; SI: DS_READ_B32 v{{[0-9]+}}, v{{[0-9]+}}, 0x400
; SI: DS_READ_B32 v{{[0-9]+}}, v{{[0-9]+}}, 0x404
; SI: S_ENDPGM
define void @simple_read2_f32_too_far(float addrspace(1)* %out) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
%arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
%val0 = load float addrspace(3)* %arrayidx0, align 4
%add.x = add nsw i32 %x.i, 256
%add.x = add nsw i32 %x.i, 257
%arrayidx1 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x
%val1 = load float addrspace(3)* %arrayidx1, align 4
%sum = fadd float %val0, %val1
@ -349,13 +349,13 @@ define void @simple_read2_f64_max_offset(double addrspace(1)* %out) #0 {
; SI-LABEL: @simple_read2_f64_too_far
; SI-NOT DS_READ2_B64
; SI: DS_READ_B64 {{v\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, 0x0
; SI: DS_READ_B64 {{v\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, 0x800
; SI: DS_READ_B64 {{v\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, 0x808
; SI: S_ENDPGM
define void @simple_read2_f64_too_far(double addrspace(1)* %out) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
%arrayidx0 = getelementptr inbounds [512 x double] addrspace(3)* @lds.f64, i32 0, i32 %x.i
%val0 = load double addrspace(3)* %arrayidx0, align 8
%add.x = add nsw i32 %x.i, 256
%add.x = add nsw i32 %x.i, 257
%arrayidx1 = getelementptr inbounds [512 x double] addrspace(3)* @lds.f64, i32 0, i32 %add.x
%val1 = load double addrspace(3)* %arrayidx1, align 8
%sum = fadd double %val0, %val1

View File

@ -0,0 +1,272 @@
; RUN: llc -march=r600 -mcpu=bonaire -verify-machineinstrs -mattr=+load-store-opt -enable-misched < %s | FileCheck -check-prefix=SI %s
@lds = addrspace(3) global [512 x float] zeroinitializer, align 4
@lds.f64 = addrspace(3) global [512 x double] zeroinitializer, align 8
; SI-LABEL: @simple_read2st64_f32_0_1
; SI: DS_READ2ST64_B32 v{{\[}}[[LO_VREG:[0-9]+]]:[[HI_VREG:[0-9]+]]{{\]}}, v{{[0-9]+}}, 0x0, 0x1
; SI: S_WAITCNT lgkmcnt(0)
; SI: V_ADD_F32_e32 [[RESULT:v[0-9]+]], v[[HI_VREG]], v[[LO_VREG]]
; SI: BUFFER_STORE_DWORD [[RESULT]]
; SI: S_ENDPGM
define void @simple_read2st64_f32_0_1(float addrspace(1)* %out) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
%arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
%val0 = load float addrspace(3)* %arrayidx0, align 4
%add.x = add nsw i32 %x.i, 64
%arrayidx1 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x
%val1 = load float addrspace(3)* %arrayidx1, align 4
%sum = fadd float %val0, %val1
%out.gep = getelementptr inbounds float addrspace(1)* %out, i32 %x.i
store float %sum, float addrspace(1)* %out.gep, align 4
ret void
}
; SI-LABEL: @simple_read2st64_f32_1_2
; SI: DS_READ2ST64_B32 v{{\[}}[[LO_VREG:[0-9]+]]:[[HI_VREG:[0-9]+]]{{\]}}, v{{[0-9]+}}, 0x1, 0x2
; SI: S_WAITCNT lgkmcnt(0)
; SI: V_ADD_F32_e32 [[RESULT:v[0-9]+]], v[[HI_VREG]], v[[LO_VREG]]
; SI: BUFFER_STORE_DWORD [[RESULT]]
; SI: S_ENDPGM
define void @simple_read2st64_f32_1_2(float addrspace(1)* %out, float addrspace(3)* %lds) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
%add.x.0 = add nsw i32 %x.i, 64
%arrayidx0 = getelementptr inbounds float addrspace(3)* %lds, i32 %add.x.0
%val0 = load float addrspace(3)* %arrayidx0, align 4
%add.x.1 = add nsw i32 %x.i, 128
%arrayidx1 = getelementptr inbounds float addrspace(3)* %lds, i32 %add.x.1
%val1 = load float addrspace(3)* %arrayidx1, align 4
%sum = fadd float %val0, %val1
%out.gep = getelementptr inbounds float addrspace(1)* %out, i32 %x.i
store float %sum, float addrspace(1)* %out.gep, align 4
ret void
}
; SI-LABEL: @simple_read2st64_f32_max_offset
; SI: DS_READ2ST64_B32 v{{\[}}[[LO_VREG:[0-9]+]]:[[HI_VREG:[0-9]+]]{{\]}}, v{{[0-9]+}}, 0x1, 0xff
; SI: S_WAITCNT lgkmcnt(0)
; SI: V_ADD_F32_e32 [[RESULT:v[0-9]+]], v[[HI_VREG]], v[[LO_VREG]]
; SI: BUFFER_STORE_DWORD [[RESULT]]
; SI: S_ENDPGM
define void @simple_read2st64_f32_max_offset(float addrspace(1)* %out, float addrspace(3)* %lds) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
%add.x.0 = add nsw i32 %x.i, 64
%arrayidx0 = getelementptr inbounds float addrspace(3)* %lds, i32 %add.x.0
%val0 = load float addrspace(3)* %arrayidx0, align 4
%add.x.1 = add nsw i32 %x.i, 16320
%arrayidx1 = getelementptr inbounds float addrspace(3)* %lds, i32 %add.x.1
%val1 = load float addrspace(3)* %arrayidx1, align 4
%sum = fadd float %val0, %val1
%out.gep = getelementptr inbounds float addrspace(1)* %out, i32 %x.i
store float %sum, float addrspace(1)* %out.gep, align 4
ret void
}
; SI-LABEL: @simple_read2st64_f32_over_max_offset
; SI-NOT: DS_READ2ST64_B32
; SI: DS_READ_B32 {{v[0-9]+}}, {{v[0-9]+}}, 0x100,
; SI: V_ADD_I32_e32 [[BIGADD:v[0-9]+]], 0x10000, {{v[0-9]+}}
; SI: DS_READ_B32 {{v[0-9]+}}, [[BIGADD]], 0x0
; SI: S_ENDPGM
define void @simple_read2st64_f32_over_max_offset(float addrspace(1)* %out, float addrspace(3)* %lds) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
%add.x.0 = add nsw i32 %x.i, 64
%arrayidx0 = getelementptr inbounds float addrspace(3)* %lds, i32 %add.x.0
%val0 = load float addrspace(3)* %arrayidx0, align 4
%add.x.1 = add nsw i32 %x.i, 16384
%arrayidx1 = getelementptr inbounds float addrspace(3)* %lds, i32 %add.x.1
%val1 = load float addrspace(3)* %arrayidx1, align 4
%sum = fadd float %val0, %val1
%out.gep = getelementptr inbounds float addrspace(1)* %out, i32 %x.i
store float %sum, float addrspace(1)* %out.gep, align 4
ret void
}
; SI-LABEL: @odd_invalid_read2st64_f32_0
; SI-NOT: DS_READ2ST64_B32
; SI: S_ENDPGM
define void @odd_invalid_read2st64_f32_0(float addrspace(1)* %out) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
%arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
%val0 = load float addrspace(3)* %arrayidx0, align 4
%add.x = add nsw i32 %x.i, 63
%arrayidx1 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x
%val1 = load float addrspace(3)* %arrayidx1, align 4
%sum = fadd float %val0, %val1
%out.gep = getelementptr inbounds float addrspace(1)* %out, i32 %x.i
store float %sum, float addrspace(1)* %out.gep, align 4
ret void
}
; SI-LABEL: @odd_invalid_read2st64_f32_1
; SI-NOT: DS_READ2ST64_B32
; SI: S_ENDPGM
define void @odd_invalid_read2st64_f32_1(float addrspace(1)* %out) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
%add.x.0 = add nsw i32 %x.i, 64
%arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x.0
%val0 = load float addrspace(3)* %arrayidx0, align 4
%add.x.1 = add nsw i32 %x.i, 127
%arrayidx1 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x.1
%val1 = load float addrspace(3)* %arrayidx1, align 4
%sum = fadd float %val0, %val1
%out.gep = getelementptr inbounds float addrspace(1)* %out, i32 %x.i
store float %sum, float addrspace(1)* %out.gep, align 4
ret void
}
; SI-LABEL: @simple_read2st64_f64_0_1
; SI: DS_READ2ST64_B64 v{{\[}}[[LO_VREG:[0-9]+]]:[[HI_VREG:[0-9]+]]{{\]}}, v{{[0-9]+}}, 0x0, 0x1
; SI: S_WAITCNT lgkmcnt(0)
; SI: V_ADD_F64 [[RESULT:v\[[0-9]+:[0-9]+\]]], v{{\[}}[[LO_VREG]]:{{[0-9]+\]}}, v{{\[[0-9]+}}:[[HI_VREG]]{{\]}}
; SI: BUFFER_STORE_DWORDX2 [[RESULT]]
; SI: S_ENDPGM
define void @simple_read2st64_f64_0_1(double addrspace(1)* %out) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
%arrayidx0 = getelementptr inbounds [512 x double] addrspace(3)* @lds.f64, i32 0, i32 %x.i
%val0 = load double addrspace(3)* %arrayidx0, align 8
%add.x = add nsw i32 %x.i, 64
%arrayidx1 = getelementptr inbounds [512 x double] addrspace(3)* @lds.f64, i32 0, i32 %add.x
%val1 = load double addrspace(3)* %arrayidx1, align 8
%sum = fadd double %val0, %val1
%out.gep = getelementptr inbounds double addrspace(1)* %out, i32 %x.i
store double %sum, double addrspace(1)* %out.gep, align 8
ret void
}
; SI-LABEL: @simple_read2st64_f64_1_2
; SI: DS_READ2ST64_B64 v{{\[}}[[LO_VREG:[0-9]+]]:[[HI_VREG:[0-9]+]]{{\]}}, v{{[0-9]+}}, 0x1, 0x2
; SI: S_WAITCNT lgkmcnt(0)
; SI: V_ADD_F64 [[RESULT:v\[[0-9]+:[0-9]+\]]], v{{\[}}[[LO_VREG]]:{{[0-9]+\]}}, v{{\[[0-9]+}}:[[HI_VREG]]{{\]}}
; SI: BUFFER_STORE_DWORDX2 [[RESULT]]
; SI: S_ENDPGM
define void @simple_read2st64_f64_1_2(double addrspace(1)* %out, double addrspace(3)* %lds) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
%add.x.0 = add nsw i32 %x.i, 64
%arrayidx0 = getelementptr inbounds double addrspace(3)* %lds, i32 %add.x.0
%val0 = load double addrspace(3)* %arrayidx0, align 8
%add.x.1 = add nsw i32 %x.i, 128
%arrayidx1 = getelementptr inbounds double addrspace(3)* %lds, i32 %add.x.1
%val1 = load double addrspace(3)* %arrayidx1, align 8
%sum = fadd double %val0, %val1
%out.gep = getelementptr inbounds double addrspace(1)* %out, i32 %x.i
store double %sum, double addrspace(1)* %out.gep, align 8
ret void
}
; Alignment only
; SI-LABEL: @misaligned_read2st64_f64
; SI: DS_READ2_B32 v{{\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}}, 0x0, 0x1
; SI: DS_READ2_B32 v{{\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}}, 0x80, 0x81
; SI: S_ENDPGM
define void @misaligned_read2st64_f64(double addrspace(1)* %out, double addrspace(3)* %lds) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
%arrayidx0 = getelementptr inbounds double addrspace(3)* %lds, i32 %x.i
%val0 = load double addrspace(3)* %arrayidx0, align 4
%add.x = add nsw i32 %x.i, 64
%arrayidx1 = getelementptr inbounds double addrspace(3)* %lds, i32 %add.x
%val1 = load double addrspace(3)* %arrayidx1, align 4
%sum = fadd double %val0, %val1
%out.gep = getelementptr inbounds double addrspace(1)* %out, i32 %x.i
store double %sum, double addrspace(1)* %out.gep, align 4
ret void
}
; The maximum is not the usual 0xff because 0xff * 8 * 64 > 0xffff
; SI-LABEL: @simple_read2st64_f64_max_offset
; SI: DS_READ2ST64_B64 v{{\[}}[[LO_VREG:[0-9]+]]:[[HI_VREG:[0-9]+]]{{\]}}, v{{[0-9]+}}, 0x4, 0x7f
; SI: S_WAITCNT lgkmcnt(0)
; SI: V_ADD_F64 [[RESULT:v\[[0-9]+:[0-9]+\]]], v{{\[}}[[LO_VREG]]:{{[0-9]+\]}}, v{{\[[0-9]+}}:[[HI_VREG]]{{\]}}
; SI: BUFFER_STORE_DWORDX2 [[RESULT]]
; SI: S_ENDPGM
define void @simple_read2st64_f64_max_offset(double addrspace(1)* %out, double addrspace(3)* %lds) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
%add.x.0 = add nsw i32 %x.i, 256
%arrayidx0 = getelementptr inbounds double addrspace(3)* %lds, i32 %add.x.0
%val0 = load double addrspace(3)* %arrayidx0, align 8
%add.x.1 = add nsw i32 %x.i, 8128
%arrayidx1 = getelementptr inbounds double addrspace(3)* %lds, i32 %add.x.1
%val1 = load double addrspace(3)* %arrayidx1, align 8
%sum = fadd double %val0, %val1
%out.gep = getelementptr inbounds double addrspace(1)* %out, i32 %x.i
store double %sum, double addrspace(1)* %out.gep, align 8
ret void
}
; SI-LABEL: @simple_read2st64_f64_over_max_offset
; SI-NOT: DS_READ2ST64_B64
; SI: DS_READ_B64 {{v\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}}, 0x200,
; SI: V_ADD_I32_e32 [[BIGADD:v[0-9]+]], 0x10000, {{v[0-9]+}}
; SI: DS_READ_B64 {{v\[[0-9]+:[0-9]+\]}}, [[BIGADD]], 0x0
; SI: S_ENDPGM
define void @simple_read2st64_f64_over_max_offset(double addrspace(1)* %out, double addrspace(3)* %lds) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
%add.x.0 = add nsw i32 %x.i, 64
%arrayidx0 = getelementptr inbounds double addrspace(3)* %lds, i32 %add.x.0
%val0 = load double addrspace(3)* %arrayidx0, align 8
%add.x.1 = add nsw i32 %x.i, 8192
%arrayidx1 = getelementptr inbounds double addrspace(3)* %lds, i32 %add.x.1
%val1 = load double addrspace(3)* %arrayidx1, align 8
%sum = fadd double %val0, %val1
%out.gep = getelementptr inbounds double addrspace(1)* %out, i32 %x.i
store double %sum, double addrspace(1)* %out.gep, align 8
ret void
}
; SI-LABEL: @invalid_read2st64_f64_odd_offset
; SI-NOT: DS_READ2ST64_B64
; SI: S_ENDPGM
define void @invalid_read2st64_f64_odd_offset(double addrspace(1)* %out, double addrspace(3)* %lds) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
%add.x.0 = add nsw i32 %x.i, 64
%arrayidx0 = getelementptr inbounds double addrspace(3)* %lds, i32 %add.x.0
%val0 = load double addrspace(3)* %arrayidx0, align 8
%add.x.1 = add nsw i32 %x.i, 8129
%arrayidx1 = getelementptr inbounds double addrspace(3)* %lds, i32 %add.x.1
%val1 = load double addrspace(3)* %arrayidx1, align 8
%sum = fadd double %val0, %val1
%out.gep = getelementptr inbounds double addrspace(1)* %out, i32 %x.i
store double %sum, double addrspace(1)* %out.gep, align 8
ret void
}
; The stride of 8 elements is 8 * 8 bytes. We need to make sure the
; stride in elements, not bytes, is a multiple of 64.
; SI-LABEL: @byte_size_only_divisible_64_read2_f64
; SI-NOT: DS_READ2ST_B64
; SI: DS_READ2_B64 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, 0x0, 0x8
; SI: S_ENDPGM
define void @byte_size_only_divisible_64_read2_f64(double addrspace(1)* %out, double addrspace(3)* %lds) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
%arrayidx0 = getelementptr inbounds double addrspace(3)* %lds, i32 %x.i
%val0 = load double addrspace(3)* %arrayidx0, align 8
%add.x = add nsw i32 %x.i, 8
%arrayidx1 = getelementptr inbounds double addrspace(3)* %lds, i32 %add.x
%val1 = load double addrspace(3)* %arrayidx1, align 8
%sum = fadd double %val0, %val1
%out.gep = getelementptr inbounds double addrspace(1)* %out, i32 %x.i
store double %sum, double addrspace(1)* %out.gep, align 4
ret void
}
; Function Attrs: nounwind readnone
declare i32 @llvm.r600.read.tgid.x() #1
; Function Attrs: nounwind readnone
declare i32 @llvm.r600.read.tgid.y() #1
; Function Attrs: nounwind readnone
declare i32 @llvm.r600.read.tidig.x() #1
; Function Attrs: nounwind readnone
declare i32 @llvm.r600.read.tidig.y() #1
; Function Attrs: noduplicate nounwind
declare void @llvm.AMDGPU.barrier.local() #2
attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-realign-stack" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { nounwind readnone }
attributes #2 = { noduplicate nounwind }

View File

@ -162,7 +162,7 @@ define void @simple_write2_two_val_max_offset_f32(float addrspace(1)* %C, float
; SI-LABEL: @simple_write2_two_val_too_far_f32
; SI: DS_WRITE_B32 v{{[0-9]+}}, v{{[0-9]+}}, 0x0
; SI: DS_WRITE_B32 v{{[0-9]+}}, v{{[0-9]+}}, 0x400
; SI: DS_WRITE_B32 v{{[0-9]+}}, v{{[0-9]+}}, 0x404
; SI: S_ENDPGM
define void @simple_write2_two_val_too_far_f32(float addrspace(1)* %C, float addrspace(1)* %in0, float addrspace(1)* %in1) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
@ -172,7 +172,7 @@ define void @simple_write2_two_val_too_far_f32(float addrspace(1)* %C, float add
%val1 = load float addrspace(1)* %in1.gep, align 4
%arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
store float %val0, float addrspace(3)* %arrayidx0, align 4
%add.x = add nsw i32 %x.i, 256
%add.x = add nsw i32 %x.i, 257
%arrayidx1 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x
store float %val1, float addrspace(3)* %arrayidx1, align 4
ret void

View File

@ -0,0 +1,119 @@
; RUN: llc -march=r600 -mcpu=bonaire -verify-machineinstrs -mattr=+load-store-opt -enable-misched < %s | FileCheck -check-prefix=SI %s
@lds = addrspace(3) global [512 x float] zeroinitializer, align 4
; SI-LABEL: @simple_write2st64_one_val_f32_0_1
; SI-DAG: BUFFER_LOAD_DWORD [[VAL:v[0-9]+]]
; SI-DAG: V_LSHLREV_B32_e32 [[VPTR:v[0-9]+]], 2, v{{[0-9]+}}
; SI: DS_WRITE2ST64_B32 [[VPTR]], [[VAL]], [[VAL]], 0x0, 0x1 [M0]
; SI: S_ENDPGM
define void @simple_write2st64_one_val_f32_0_1(float addrspace(1)* %C, float addrspace(1)* %in) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
%in.gep = getelementptr float addrspace(1)* %in, i32 %x.i
%val = load float addrspace(1)* %in.gep, align 4
%arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
store float %val, float addrspace(3)* %arrayidx0, align 4
%add.x = add nsw i32 %x.i, 64
%arrayidx1 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x
store float %val, float addrspace(3)* %arrayidx1, align 4
ret void
}
; SI-LABEL: @simple_write2st64_two_val_f32_2_5
; SI-DAG: BUFFER_LOAD_DWORD [[VAL0:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
; SI-DAG: BUFFER_LOAD_DWORD [[VAL1:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:0x4
; SI-DAG: V_LSHLREV_B32_e32 [[VPTR:v[0-9]+]], 2, v{{[0-9]+}}
; SI: DS_WRITE2ST64_B32 [[VPTR]], [[VAL0]], [[VAL1]], 0x2, 0x5 [M0]
; SI: S_ENDPGM
define void @simple_write2st64_two_val_f32_2_5(float addrspace(1)* %C, float addrspace(1)* %in) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
%in.gep.0 = getelementptr float addrspace(1)* %in, i32 %x.i
%in.gep.1 = getelementptr float addrspace(1)* %in.gep.0, i32 1
%val0 = load float addrspace(1)* %in.gep.0, align 4
%val1 = load float addrspace(1)* %in.gep.1, align 4
%add.x.0 = add nsw i32 %x.i, 128
%arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x.0
store float %val0, float addrspace(3)* %arrayidx0, align 4
%add.x.1 = add nsw i32 %x.i, 320
%arrayidx1 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x.1
store float %val1, float addrspace(3)* %arrayidx1, align 4
ret void
}
; SI-LABEL: @simple_write2st64_two_val_max_offset_f32
; SI-DAG: BUFFER_LOAD_DWORD [[VAL0:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
; SI-DAG: BUFFER_LOAD_DWORD [[VAL1:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:0x4
; SI-DAG: V_LSHLREV_B32_e32 [[VPTR:v[0-9]+]], 2, v{{[0-9]+}}
; SI: DS_WRITE2ST64_B32 [[VPTR]], [[VAL0]], [[VAL1]], 0x0, 0xff [M0]
; SI: S_ENDPGM
define void @simple_write2st64_two_val_max_offset_f32(float addrspace(1)* %C, float addrspace(1)* %in, float addrspace(3)* %lds) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
%in.gep.0 = getelementptr float addrspace(1)* %in, i32 %x.i
%in.gep.1 = getelementptr float addrspace(1)* %in.gep.0, i32 1
%val0 = load float addrspace(1)* %in.gep.0, align 4
%val1 = load float addrspace(1)* %in.gep.1, align 4
%arrayidx0 = getelementptr inbounds float addrspace(3)* %lds, i32 %x.i
store float %val0, float addrspace(3)* %arrayidx0, align 4
%add.x = add nsw i32 %x.i, 16320
%arrayidx1 = getelementptr inbounds float addrspace(3)* %lds, i32 %add.x
store float %val1, float addrspace(3)* %arrayidx1, align 4
ret void
}
; SI-LABEL: @simple_write2st64_two_val_max_offset_f64
; SI-DAG: BUFFER_LOAD_DWORDX2 [[VAL0:v\[[0-9]+:[0-9]+\]]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
; SI-DAG: BUFFER_LOAD_DWORDX2 [[VAL1:v\[[0-9]+:[0-9]+\]]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:0x8
; SI-DAG: V_ADD_I32_e32 [[VPTR:v[0-9]+]],
; SI: DS_WRITE2ST64_B64 [[VPTR]], [[VAL0]], [[VAL1]], 0x4, 0x7f [M0]
; SI: S_ENDPGM
define void @simple_write2st64_two_val_max_offset_f64(double addrspace(1)* %C, double addrspace(1)* %in, double addrspace(3)* %lds) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
%in.gep.0 = getelementptr double addrspace(1)* %in, i32 %x.i
%in.gep.1 = getelementptr double addrspace(1)* %in.gep.0, i32 1
%val0 = load double addrspace(1)* %in.gep.0, align 8
%val1 = load double addrspace(1)* %in.gep.1, align 8
%add.x.0 = add nsw i32 %x.i, 256
%arrayidx0 = getelementptr inbounds double addrspace(3)* %lds, i32 %add.x.0
store double %val0, double addrspace(3)* %arrayidx0, align 8
%add.x.1 = add nsw i32 %x.i, 8128
%arrayidx1 = getelementptr inbounds double addrspace(3)* %lds, i32 %add.x.1
store double %val1, double addrspace(3)* %arrayidx1, align 8
ret void
}
; SI-LABEL: @byte_size_only_divisible_64_write2st64_f64
; SI-NOT: DS_WRITE2ST64_B64
; SI: DS_WRITE2_B64 {{v[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, 0x0, 0x8
; SI: S_ENDPGM
define void @byte_size_only_divisible_64_write2st64_f64(double addrspace(1)* %C, double addrspace(1)* %in, double addrspace(3)* %lds) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
%in.gep = getelementptr double addrspace(1)* %in, i32 %x.i
%val = load double addrspace(1)* %in.gep, align 8
%arrayidx0 = getelementptr inbounds double addrspace(3)* %lds, i32 %x.i
store double %val, double addrspace(3)* %arrayidx0, align 8
%add.x = add nsw i32 %x.i, 8
%arrayidx1 = getelementptr inbounds double addrspace(3)* %lds, i32 %add.x
store double %val, double addrspace(3)* %arrayidx1, align 8
ret void
}
; Function Attrs: nounwind readnone
declare i32 @llvm.r600.read.tgid.x() #1
; Function Attrs: nounwind readnone
declare i32 @llvm.r600.read.tgid.y() #1
; Function Attrs: nounwind readnone
declare i32 @llvm.r600.read.tidig.x() #1
; Function Attrs: nounwind readnone
declare i32 @llvm.r600.read.tidig.y() #1
; Function Attrs: noduplicate nounwind
declare void @llvm.AMDGPU.barrier.local() #2
attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-realign-stack" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { nounwind readnone }
attributes #2 = { noduplicate nounwind }

View File

@ -1,4 +1,4 @@
; RUN: llc -march=r600 -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
; RUN: llc -march=r600 -mcpu=bonaire -verify-machineinstrs -mattr=+load-store-opt -enable-misched < %s | FileCheck -check-prefix=SI %s
; Test that doing a shift of a pointer with a constant add will be
; folded into the constant offset addressing mode even if the add has
@ -69,8 +69,7 @@ define void @load_shl_base_lds_max_offset(i8 addrspace(1)* %out, i8 addrspace(3)
; SI-LABEL: {{^}}load_shl_base_lds_2:
; SI: V_LSHLREV_B32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
; SI-NEXT: DS_READ_B32 {{v[0-9]+}}, [[PTR]], 0x100, [M0]
; SI-NEXT: DS_READ_B32 {{v[0-9]+}}, [[PTR]], 0x900, [M0]
; SI-NEXT: DS_READ2ST64_B32 {{v\[[0-9]+:[0-9]+\]}}, [[PTR]], 0x1, 0x9, [M0]
; SI: S_ENDPGM
define void @load_shl_base_lds_2(float addrspace(1)* %out) #0 {
%tid.x = tail call i32 @llvm.r600.read.tidig.x() #1