mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-14 11:32:34 +00:00
4d0e8a8a3e
Depending on the number of bits set in the writemask. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Michel Dänzer <michel.daenzer@amd.com> git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@179166 91177308-0d34-0410-b5e6-96231b3b80d8
274 lines
8.3 KiB
C++
274 lines
8.3 KiB
C++
//===-- SIInstrInfo.cpp - SI Instruction Information ---------------------===//
|
|
//
|
|
// The LLVM Compiler Infrastructure
|
|
//
|
|
// This file is distributed under the University of Illinois Open Source
|
|
// License. See LICENSE.TXT for details.
|
|
//
|
|
//===----------------------------------------------------------------------===//
|
|
//
|
|
/// \file
|
|
/// \brief SI Implementation of TargetInstrInfo.
|
|
//
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
#include "SIInstrInfo.h"
|
|
#include "AMDGPUTargetMachine.h"
|
|
#include "llvm/CodeGen/MachineInstrBuilder.h"
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
|
#include "llvm/MC/MCInstrDesc.h"
|
|
#include <stdio.h>
|
|
|
|
using namespace llvm;
|
|
|
|
SIInstrInfo::SIInstrInfo(AMDGPUTargetMachine &tm)
|
|
: AMDGPUInstrInfo(tm),
|
|
RI(tm, *this)
|
|
{ }
|
|
|
|
const SIRegisterInfo &SIInstrInfo::getRegisterInfo() const {
|
|
return RI;
|
|
}
|
|
|
|
void
|
|
SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
|
|
MachineBasicBlock::iterator MI, DebugLoc DL,
|
|
unsigned DestReg, unsigned SrcReg,
|
|
bool KillSrc) const {
|
|
|
|
// If we are trying to copy to or from SCC, there is a bug somewhere else in
|
|
// the backend. While it may be theoretically possible to do this, it should
|
|
// never be necessary.
|
|
assert(DestReg != AMDGPU::SCC && SrcReg != AMDGPU::SCC);
|
|
|
|
const int16_t Sub0_15[] = {
|
|
AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
|
|
AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7,
|
|
AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11,
|
|
AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15, 0
|
|
};
|
|
|
|
const int16_t Sub0_7[] = {
|
|
AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
|
|
AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7, 0
|
|
};
|
|
|
|
const int16_t Sub0_3[] = {
|
|
AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, 0
|
|
};
|
|
|
|
const int16_t Sub0_2[] = {
|
|
AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, 0
|
|
};
|
|
|
|
const int16_t Sub0_1[] = {
|
|
AMDGPU::sub0, AMDGPU::sub1, 0
|
|
};
|
|
|
|
unsigned Opcode;
|
|
const int16_t *SubIndices;
|
|
|
|
if (AMDGPU::M0 == DestReg) {
|
|
// Check if M0 isn't already set to this value
|
|
for (MachineBasicBlock::reverse_iterator E = MBB.rend(),
|
|
I = MachineBasicBlock::reverse_iterator(MI); I != E; ++I) {
|
|
|
|
if (!I->definesRegister(AMDGPU::M0))
|
|
continue;
|
|
|
|
unsigned Opc = I->getOpcode();
|
|
if (Opc != TargetOpcode::COPY && Opc != AMDGPU::S_MOV_B32)
|
|
break;
|
|
|
|
if (!I->readsRegister(SrcReg))
|
|
break;
|
|
|
|
// The copy isn't necessary
|
|
return;
|
|
}
|
|
}
|
|
|
|
if (AMDGPU::SReg_32RegClass.contains(DestReg)) {
|
|
assert(AMDGPU::SReg_32RegClass.contains(SrcReg));
|
|
BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg)
|
|
.addReg(SrcReg, getKillRegState(KillSrc));
|
|
return;
|
|
|
|
} else if (AMDGPU::SReg_64RegClass.contains(DestReg)) {
|
|
assert(AMDGPU::SReg_64RegClass.contains(SrcReg));
|
|
BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg)
|
|
.addReg(SrcReg, getKillRegState(KillSrc));
|
|
return;
|
|
|
|
} else if (AMDGPU::SReg_128RegClass.contains(DestReg)) {
|
|
assert(AMDGPU::SReg_128RegClass.contains(SrcReg));
|
|
Opcode = AMDGPU::S_MOV_B32;
|
|
SubIndices = Sub0_3;
|
|
|
|
} else if (AMDGPU::SReg_256RegClass.contains(DestReg)) {
|
|
assert(AMDGPU::SReg_256RegClass.contains(SrcReg));
|
|
Opcode = AMDGPU::S_MOV_B32;
|
|
SubIndices = Sub0_7;
|
|
|
|
} else if (AMDGPU::SReg_512RegClass.contains(DestReg)) {
|
|
assert(AMDGPU::SReg_512RegClass.contains(SrcReg));
|
|
Opcode = AMDGPU::S_MOV_B32;
|
|
SubIndices = Sub0_15;
|
|
|
|
} else if (AMDGPU::VReg_32RegClass.contains(DestReg)) {
|
|
assert(AMDGPU::VReg_32RegClass.contains(SrcReg) ||
|
|
AMDGPU::SReg_32RegClass.contains(SrcReg));
|
|
BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg)
|
|
.addReg(SrcReg, getKillRegState(KillSrc));
|
|
return;
|
|
|
|
} else if (AMDGPU::VReg_64RegClass.contains(DestReg)) {
|
|
assert(AMDGPU::VReg_64RegClass.contains(SrcReg) ||
|
|
AMDGPU::SReg_64RegClass.contains(SrcReg));
|
|
Opcode = AMDGPU::V_MOV_B32_e32;
|
|
SubIndices = Sub0_1;
|
|
|
|
} else if (AMDGPU::VReg_96RegClass.contains(DestReg)) {
|
|
assert(AMDGPU::VReg_96RegClass.contains(SrcReg));
|
|
Opcode = AMDGPU::V_MOV_B32_e32;
|
|
SubIndices = Sub0_2;
|
|
|
|
} else if (AMDGPU::VReg_128RegClass.contains(DestReg)) {
|
|
assert(AMDGPU::VReg_128RegClass.contains(SrcReg) ||
|
|
AMDGPU::SReg_128RegClass.contains(SrcReg));
|
|
Opcode = AMDGPU::V_MOV_B32_e32;
|
|
SubIndices = Sub0_3;
|
|
|
|
} else if (AMDGPU::VReg_256RegClass.contains(DestReg)) {
|
|
assert(AMDGPU::VReg_256RegClass.contains(SrcReg) ||
|
|
AMDGPU::SReg_256RegClass.contains(SrcReg));
|
|
Opcode = AMDGPU::V_MOV_B32_e32;
|
|
SubIndices = Sub0_7;
|
|
|
|
} else if (AMDGPU::VReg_512RegClass.contains(DestReg)) {
|
|
assert(AMDGPU::VReg_512RegClass.contains(SrcReg) ||
|
|
AMDGPU::SReg_512RegClass.contains(SrcReg));
|
|
Opcode = AMDGPU::V_MOV_B32_e32;
|
|
SubIndices = Sub0_15;
|
|
|
|
} else {
|
|
llvm_unreachable("Can't copy register!");
|
|
}
|
|
|
|
while (unsigned SubIdx = *SubIndices++) {
|
|
MachineInstrBuilder Builder = BuildMI(MBB, MI, DL,
|
|
get(Opcode), RI.getSubReg(DestReg, SubIdx));
|
|
|
|
Builder.addReg(RI.getSubReg(SrcReg, SubIdx), getKillRegState(KillSrc));
|
|
|
|
if (*SubIndices)
|
|
Builder.addReg(DestReg, RegState::Define | RegState::Implicit);
|
|
}
|
|
}
|
|
|
|
unsigned SIInstrInfo::commuteOpcode(unsigned Opcode) const {
|
|
|
|
int NewOpc;
|
|
|
|
// Try to map original to commuted opcode
|
|
if ((NewOpc = AMDGPU::getCommuteRev(Opcode)) != -1)
|
|
return NewOpc;
|
|
|
|
// Try to map commuted to original opcode
|
|
if ((NewOpc = AMDGPU::getCommuteOrig(Opcode)) != -1)
|
|
return NewOpc;
|
|
|
|
return Opcode;
|
|
}
|
|
|
|
MachineInstr *SIInstrInfo::commuteInstruction(MachineInstr *MI,
|
|
bool NewMI) const {
|
|
|
|
if (MI->getNumOperands() < 3 || !MI->getOperand(1).isReg() ||
|
|
!MI->getOperand(2).isReg())
|
|
return 0;
|
|
|
|
MI = TargetInstrInfo::commuteInstruction(MI, NewMI);
|
|
|
|
if (MI)
|
|
MI->setDesc(get(commuteOpcode(MI->getOpcode())));
|
|
|
|
return MI;
|
|
}
|
|
|
|
MachineInstr * SIInstrInfo::getMovImmInstr(MachineFunction *MF, unsigned DstReg,
|
|
int64_t Imm) const {
|
|
MachineInstr * MI = MF->CreateMachineInstr(get(AMDGPU::V_MOV_B32_e32), DebugLoc());
|
|
MachineInstrBuilder MIB(*MF, MI);
|
|
MIB.addReg(DstReg, RegState::Define);
|
|
MIB.addImm(Imm);
|
|
|
|
return MI;
|
|
|
|
}
|
|
|
|
bool SIInstrInfo::isMov(unsigned Opcode) const {
|
|
switch(Opcode) {
|
|
default: return false;
|
|
case AMDGPU::S_MOV_B32:
|
|
case AMDGPU::S_MOV_B64:
|
|
case AMDGPU::V_MOV_B32_e32:
|
|
case AMDGPU::V_MOV_B32_e64:
|
|
return true;
|
|
}
|
|
}
|
|
|
|
bool
|
|
SIInstrInfo::isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
|
|
return RC != &AMDGPU::EXECRegRegClass;
|
|
}
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// Indirect addressing callbacks
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
unsigned SIInstrInfo::calculateIndirectAddress(unsigned RegIndex,
|
|
unsigned Channel) const {
|
|
assert(Channel == 0);
|
|
return RegIndex;
|
|
}
|
|
|
|
|
|
int SIInstrInfo::getIndirectIndexBegin(const MachineFunction &MF) const {
|
|
llvm_unreachable("Unimplemented");
|
|
}
|
|
|
|
int SIInstrInfo::getIndirectIndexEnd(const MachineFunction &MF) const {
|
|
llvm_unreachable("Unimplemented");
|
|
}
|
|
|
|
const TargetRegisterClass *SIInstrInfo::getIndirectAddrStoreRegClass(
|
|
unsigned SourceReg) const {
|
|
llvm_unreachable("Unimplemented");
|
|
}
|
|
|
|
const TargetRegisterClass *SIInstrInfo::getIndirectAddrLoadRegClass() const {
|
|
llvm_unreachable("Unimplemented");
|
|
}
|
|
|
|
MachineInstrBuilder SIInstrInfo::buildIndirectWrite(
|
|
MachineBasicBlock *MBB,
|
|
MachineBasicBlock::iterator I,
|
|
unsigned ValueReg,
|
|
unsigned Address, unsigned OffsetReg) const {
|
|
llvm_unreachable("Unimplemented");
|
|
}
|
|
|
|
MachineInstrBuilder SIInstrInfo::buildIndirectRead(
|
|
MachineBasicBlock *MBB,
|
|
MachineBasicBlock::iterator I,
|
|
unsigned ValueReg,
|
|
unsigned Address, unsigned OffsetReg) const {
|
|
llvm_unreachable("Unimplemented");
|
|
}
|
|
|
|
const TargetRegisterClass *SIInstrInfo::getSuperIndirectRegClass() const {
|
|
llvm_unreachable("Unimplemented");
|
|
}
|