Migrate NVPTXISelLowering to take the subtarget that it's dependent

upon as an argument and store/use that in the entire function.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@227541 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Eric Christopher 2015-01-30 01:50:07 +00:00
parent f66d626182
commit b42dc65111
3 changed files with 19 additions and 18 deletions

View File

@ -106,9 +106,9 @@ static void ComputePTXValueVTs(const TargetLowering &TLI, Type *Ty,
}
// NVPTXTargetLowering Constructor.
NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM)
: TargetLowering(TM), nvTM(&TM),
nvptxSubtarget(TM.getSubtarget<NVPTXSubtarget>()) {
NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
const NVPTXSubtarget &STI)
: TargetLowering(TM), nvTM(&TM), STI(STI) {
// always lower memset, memcpy, and memmove intrinsics to load/store
// instructions, rather
@ -167,14 +167,14 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM)
setOperationAction(ISD::SRA_PARTS, MVT::i64 , Custom);
setOperationAction(ISD::SRL_PARTS, MVT::i64 , Custom);
if (nvptxSubtarget.hasROT64()) {
if (STI.hasROT64()) {
setOperationAction(ISD::ROTL, MVT::i64, Legal);
setOperationAction(ISD::ROTR, MVT::i64, Legal);
} else {
setOperationAction(ISD::ROTL, MVT::i64, Expand);
setOperationAction(ISD::ROTR, MVT::i64, Expand);
}
if (nvptxSubtarget.hasROT32()) {
if (STI.hasROT32()) {
setOperationAction(ISD::ROTL, MVT::i32, Legal);
setOperationAction(ISD::ROTR, MVT::i32, Legal);
} else {
@ -879,7 +879,7 @@ NVPTXTargetLowering::getPrototype(Type *retTy, const ArgListTy &Args,
unsigned retAlignment,
const ImmutableCallSite *CS) const {
bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
bool isABI = (STI.getSmVersion() >= 20);
assert(isABI && "Non-ABI compilation is not supported");
if (!isABI)
return "";
@ -1044,7 +1044,7 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
Type *retTy = CLI.RetTy;
ImmutableCallSite *CS = CLI.CS;
bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
bool isABI = (STI.getSmVersion() >= 20);
assert(isABI && "Non-ABI compilation is not supported");
if (!isABI)
return Chain;
@ -1455,8 +1455,8 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
EVT ObjectVT = getValueType(retTy);
unsigned NumElts = ObjectVT.getVectorNumElements();
EVT EltVT = ObjectVT.getVectorElementType();
assert(nvTM->getSubtargetImpl()->getTargetLowering()->getNumRegisters(
F->getContext(), ObjectVT) == NumElts &&
assert(STI.getTargetLowering()->getNumRegisters(F->getContext(),
ObjectVT) == NumElts &&
"Vector was not scalarized");
unsigned sz = EltVT.getSizeInBits();
bool needTruncate = sz < 8 ? true : false;
@ -1678,7 +1678,7 @@ SDValue NVPTXTargetLowering::LowerShiftRightParts(SDValue Op,
SDValue ShAmt = Op.getOperand(2);
unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL;
if (VTBits == 32 && nvptxSubtarget.getSmVersion() >= 35) {
if (VTBits == 32 && STI.getSmVersion() >= 35) {
// For 32bit and sm35, we can use the funnel shift 'shf' instruction.
// {dHi, dLo} = {aHi, aLo} >> Amt
@ -1738,7 +1738,7 @@ SDValue NVPTXTargetLowering::LowerShiftLeftParts(SDValue Op,
SDValue ShOpHi = Op.getOperand(1);
SDValue ShAmt = Op.getOperand(2);
if (VTBits == 32 && nvptxSubtarget.getSmVersion() >= 35) {
if (VTBits == 32 && STI.getSmVersion() >= 35) {
// For 32bit and sm35, we can use the funnel shift 'shf' instruction.
// {dHi, dLo} = {aHi, aLo} << Amt
@ -2050,13 +2050,13 @@ SDValue NVPTXTargetLowering::LowerFormalArguments(
const Function *F = MF.getFunction();
const AttributeSet &PAL = F->getAttributes();
const TargetLowering *TLI = DAG.getSubtarget().getTargetLowering();
const TargetLowering *TLI = STI.getTargetLowering();
SDValue Root = DAG.getRoot();
std::vector<SDValue> OutChains;
bool isKernel = llvm::isKernelFunction(*F);
bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
bool isABI = (STI.getSmVersion() >= 20);
assert(isABI && "Non-ABI compilation is not supported");
if (!isABI)
return Chain;
@ -2354,7 +2354,7 @@ NVPTXTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
Type *RetTy = F->getReturnType();
const DataLayout *TD = getDataLayout();
bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
bool isABI = (STI.getSmVersion() >= 20);
assert(isABI && "Non-ABI compilation is not supported");
if (!isABI)
return Chain;
@ -4217,7 +4217,7 @@ SDValue NVPTXTargetLowering::PerformDAGCombine(SDNode *N,
default: break;
case ISD::ADD:
case ISD::FADD:
return PerformADDCombine(N, DCI, nvptxSubtarget, OptLevel);
return PerformADDCombine(N, DCI, STI, OptLevel);
case ISD::MUL:
return PerformMULCombine(N, DCI, OptLevel);
case ISD::SHL:

View File

@ -436,7 +436,8 @@ class NVPTXSubtarget;
//===--------------------------------------------------------------------===//
class NVPTXTargetLowering : public TargetLowering {
public:
explicit NVPTXTargetLowering(const NVPTXTargetMachine &TM);
explicit NVPTXTargetLowering(const NVPTXTargetMachine &TM,
const NVPTXSubtarget &STI);
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
@ -510,7 +511,7 @@ public:
bool enableAggressiveFMAFusion(EVT VT) const override { return true; }
private:
const NVPTXSubtarget &nvptxSubtarget; // cache the subtarget here
const NVPTXSubtarget &STI; // cache the subtarget here
SDValue getExtSymb(SelectionDAG &DAG, const char *name, int idx,
EVT = MVT::i32) const;

View File

@ -47,7 +47,7 @@ NVPTXSubtarget::NVPTXSubtarget(const std::string &TT, const std::string &CPU,
bool is64Bit)
: NVPTXGenSubtargetInfo(TT, CPU, FS), Is64Bit(is64Bit), PTXVersion(0),
SmVersion(20), InstrInfo(initializeSubtargetDependencies(CPU, FS)),
TLInfo((const NVPTXTargetMachine &)TM), TSInfo(TM.getDataLayout()),
TLInfo((const NVPTXTargetMachine &)TM, *this), TSInfo(TM.getDataLayout()),
FrameLowering(*this) {
Triple T(TT);