mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-01-31 10:34:17 +00:00
Redirect DataLayout from TargetMachine to Module in SelectionDAG
Summary: SelectionDAG itself is not invoking directly the DataLayout in the TargetMachine, but the "TargetLowering" class is still using it. I'll address it in a following commit. This change is part of a series of commits dedicated to have a single DataLayout during compilation by using always the one owned by the module. Reviewers: echristo Subscribers: llvm-commits Differential Revision: http://reviews.llvm.org/D11000 From: Mehdi Amini <mehdi.amini@apple.com> git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@241618 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
51f5a1a8fe
commit
298a718c94
@ -281,6 +281,7 @@ public:
|
||||
void clear();
|
||||
|
||||
MachineFunction &getMachineFunction() const { return *MF; }
|
||||
const DataLayout &getDataLayout() const { return MF->getDataLayout(); }
|
||||
const TargetMachine &getTarget() const { return TM; }
|
||||
const TargetSubtargetInfo &getSubtarget() const { return MF->getSubtarget(); }
|
||||
const TargetLowering &getTargetLoweringInfo() const { return *TLI; }
|
||||
|
@ -2414,6 +2414,7 @@ public:
|
||||
ArgListTy &getArgs() {
|
||||
return Args;
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
/// This function lowers an abstract call to a function into an actual call.
|
||||
@ -2657,7 +2658,8 @@ public:
|
||||
/// specific constraints and their prefixes, and also tie in the associated
|
||||
/// operand values. If this returns an empty vector, and if the constraint
|
||||
/// string itself isn't empty, there was an error parsing.
|
||||
virtual AsmOperandInfoVector ParseConstraints(const TargetRegisterInfo *TRI,
|
||||
virtual AsmOperandInfoVector ParseConstraints(const DataLayout &DL,
|
||||
const TargetRegisterInfo *TRI,
|
||||
ImmutableCallSite CS) const;
|
||||
|
||||
/// Examine constraint type and operand type and determine a weight value.
|
||||
|
@ -2986,7 +2986,8 @@ static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal,
|
||||
const TargetLowering *TLI = TM.getSubtargetImpl(*F)->getTargetLowering();
|
||||
const TargetRegisterInfo *TRI = TM.getSubtargetImpl(*F)->getRegisterInfo();
|
||||
TargetLowering::AsmOperandInfoVector TargetConstraints =
|
||||
TLI->ParseConstraints(TRI, ImmutableCallSite(CI));
|
||||
TLI->ParseConstraints(F->getParent()->getDataLayout(), TRI,
|
||||
ImmutableCallSite(CI));
|
||||
for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) {
|
||||
TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i];
|
||||
|
||||
@ -3547,8 +3548,8 @@ bool CodeGenPrepare::OptimizeInlineAsmInst(CallInst *CS) {
|
||||
|
||||
const TargetRegisterInfo *TRI =
|
||||
TM->getSubtargetImpl(*CS->getParent()->getParent())->getRegisterInfo();
|
||||
TargetLowering::AsmOperandInfoVector
|
||||
TargetConstraints = TLI->ParseConstraints(TRI, CS);
|
||||
TargetLowering::AsmOperandInfoVector TargetConstraints =
|
||||
TLI->ParseConstraints(*DL, TRI, CS);
|
||||
unsigned ArgNo = 0;
|
||||
for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) {
|
||||
TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i];
|
||||
|
@ -3111,7 +3111,7 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
|
||||
// For big endian targets, we need to add an offset to the pointer
|
||||
// to load the correct bytes. For little endian systems, we merely
|
||||
// need to read fewer bytes from the same pointer.
|
||||
if (TLI.isBigEndian()) {
|
||||
if (DAG.getDataLayout().isBigEndian()) {
|
||||
unsigned LVTStoreBytes = LoadedVT.getStoreSize();
|
||||
unsigned EVTStoreBytes = ExtVT.getStoreSize();
|
||||
unsigned PtrOff = LVTStoreBytes - EVTStoreBytes;
|
||||
@ -6675,7 +6675,7 @@ SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) {
|
||||
|
||||
// For big endian targets, we need to adjust the offset to the pointer to
|
||||
// load the correct bytes.
|
||||
if (TLI.isBigEndian()) {
|
||||
if (DAG.getDataLayout().isBigEndian()) {
|
||||
unsigned LVTStoreBits = LN0->getMemoryVT().getStoreSizeInBits();
|
||||
unsigned EVTStoreBits = ExtVT.getStoreSizeInBits();
|
||||
ShAmt = LVTStoreBits - EVTStoreBits - ShAmt;
|
||||
@ -6873,7 +6873,7 @@ SDValue DAGCombiner::visitSIGN_EXTEND_VECTOR_INREG(SDNode *N) {
|
||||
SDValue DAGCombiner::visitTRUNCATE(SDNode *N) {
|
||||
SDValue N0 = N->getOperand(0);
|
||||
EVT VT = N->getValueType(0);
|
||||
bool isLE = TLI.isLittleEndian();
|
||||
bool isLE = DAG.getDataLayout().isLittleEndian();
|
||||
|
||||
// noop truncate
|
||||
if (N0.getValueType() == N->getValueType(0))
|
||||
@ -7093,8 +7093,8 @@ SDValue DAGCombiner::CombineConsecutiveLoads(SDNode *N, EVT VT) {
|
||||
!LD2->isVolatile() &&
|
||||
DAG.isConsecutiveLoad(LD2, LD1, LD1VT.getSizeInBits()/8, 1)) {
|
||||
unsigned Align = LD1->getAlignment();
|
||||
unsigned NewAlign = TLI.getDataLayout()->
|
||||
getABITypeAlignment(VT.getTypeForEVT(*DAG.getContext()));
|
||||
unsigned NewAlign = DAG.getDataLayout().getABITypeAlignment(
|
||||
VT.getTypeForEVT(*DAG.getContext()));
|
||||
|
||||
if (NewAlign <= Align &&
|
||||
(!LegalOperations || TLI.isOperationLegal(ISD::LOAD, VT)))
|
||||
@ -7155,8 +7155,8 @@ SDValue DAGCombiner::visitBITCAST(SDNode *N) {
|
||||
(!LegalOperations || TLI.isOperationLegal(ISD::LOAD, VT)) &&
|
||||
TLI.isLoadBitCastBeneficial(N0.getValueType(), VT)) {
|
||||
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
|
||||
unsigned Align = TLI.getDataLayout()->
|
||||
getABITypeAlignment(VT.getTypeForEVT(*DAG.getContext()));
|
||||
unsigned Align = DAG.getDataLayout().getABITypeAlignment(
|
||||
VT.getTypeForEVT(*DAG.getContext()));
|
||||
unsigned OrigAlign = LN0->getAlignment();
|
||||
|
||||
if (Align <= OrigAlign) {
|
||||
@ -7368,7 +7368,7 @@ ConstantFoldBITCASTofBUILD_VECTOR(SDNode *BV, EVT DstEltVT) {
|
||||
SmallVector<SDValue, 8> Ops;
|
||||
for (unsigned i = 0, e = BV->getNumOperands(); i != e;
|
||||
i += NumInputsPerOutput) {
|
||||
bool isLE = TLI.isLittleEndian();
|
||||
bool isLE = DAG.getDataLayout().isLittleEndian();
|
||||
APInt NewBits = APInt(DstBitSize, 0);
|
||||
bool EltIsUndef = true;
|
||||
for (unsigned j = 0; j != NumInputsPerOutput; ++j) {
|
||||
@ -7415,7 +7415,7 @@ ConstantFoldBITCASTofBUILD_VECTOR(SDNode *BV, EVT DstEltVT) {
|
||||
}
|
||||
|
||||
// For big endian targets, swap the order of the pieces of each element.
|
||||
if (TLI.isBigEndian())
|
||||
if (DAG.getDataLayout().isBigEndian())
|
||||
std::reverse(Ops.end()-NumOutputsPerInput, Ops.end());
|
||||
}
|
||||
|
||||
@ -9869,8 +9869,7 @@ struct LoadedSlice {
|
||||
/// \pre DAG != nullptr.
|
||||
uint64_t getOffsetFromBase() const {
|
||||
assert(DAG && "Missing context.");
|
||||
bool IsBigEndian =
|
||||
DAG->getTargetLoweringInfo().getDataLayout()->isBigEndian();
|
||||
bool IsBigEndian = DAG->getDataLayout().isBigEndian();
|
||||
assert(!(Shift & 0x7) && "Shifts not aligned on Bytes are not supported.");
|
||||
uint64_t Offset = Shift / 8;
|
||||
unsigned TySizeInBytes = Origin->getValueSizeInBits(0) / 8;
|
||||
@ -9953,7 +9952,7 @@ struct LoadedSlice {
|
||||
|
||||
// Check if it will be merged with the load.
|
||||
// 1. Check the alignment constraint.
|
||||
unsigned RequiredAlignment = TLI.getDataLayout()->getABITypeAlignment(
|
||||
unsigned RequiredAlignment = DAG->getDataLayout().getABITypeAlignment(
|
||||
ResVT.getTypeForEVT(*DAG->getContext()));
|
||||
|
||||
if (RequiredAlignment > getAlignment())
|
||||
@ -10321,7 +10320,7 @@ ShrinkLoadReplaceStoreWithStore(const std::pair<unsigned, unsigned> &MaskInfo,
|
||||
unsigned StOffset;
|
||||
unsigned NewAlign = St->getAlignment();
|
||||
|
||||
if (DAG.getTargetLoweringInfo().isLittleEndian())
|
||||
if (DAG.getDataLayout().isLittleEndian())
|
||||
StOffset = ByteShift;
|
||||
else
|
||||
StOffset = IVal.getValueType().getStoreSize() - ByteShift - NumBytes;
|
||||
@ -10434,12 +10433,12 @@ SDValue DAGCombiner::ReduceLoadOpStoreWidth(SDNode *N) {
|
||||
uint64_t PtrOff = ShAmt / 8;
|
||||
// For big endian targets, we need to adjust the offset to the pointer to
|
||||
// load the correct bytes.
|
||||
if (TLI.isBigEndian())
|
||||
if (DAG.getDataLayout().isBigEndian())
|
||||
PtrOff = (BitWidth + 7 - NewBW) / 8 - PtrOff;
|
||||
|
||||
unsigned NewAlign = MinAlign(LD->getAlignment(), PtrOff);
|
||||
Type *NewVTTy = NewVT.getTypeForEVT(*DAG.getContext());
|
||||
if (NewAlign < TLI.getDataLayout()->getABITypeAlignment(NewVTTy))
|
||||
if (NewAlign < DAG.getDataLayout().getABITypeAlignment(NewVTTy))
|
||||
return SDValue();
|
||||
|
||||
SDValue NewPtr = DAG.getNode(ISD::ADD, SDLoc(LD),
|
||||
@ -10503,7 +10502,7 @@ SDValue DAGCombiner::TransformFPLoadStorePair(SDNode *N) {
|
||||
unsigned LDAlign = LD->getAlignment();
|
||||
unsigned STAlign = ST->getAlignment();
|
||||
Type *IntVTTy = IntVT.getTypeForEVT(*DAG.getContext());
|
||||
unsigned ABIAlign = TLI.getDataLayout()->getABITypeAlignment(IntVTTy);
|
||||
unsigned ABIAlign = DAG.getDataLayout().getABITypeAlignment(IntVTTy);
|
||||
if (LDAlign < ABIAlign || STAlign < ABIAlign)
|
||||
return SDValue();
|
||||
|
||||
@ -10685,7 +10684,7 @@ bool DAGCombiner::MergeStoresOfConstantsOrVecElts(
|
||||
|
||||
// Construct a single integer constant which is made of the smaller
|
||||
// constant inputs.
|
||||
bool IsLE = TLI.isLittleEndian();
|
||||
bool IsLE = DAG.getDataLayout().isLittleEndian();
|
||||
for (unsigned i = 0; i < NumElem ; ++i) {
|
||||
unsigned Idx = IsLE ? (NumElem - 1 - i) : i;
|
||||
StoreSDNode *St = cast<StoreSDNode>(StoreNodes[Idx].MemNode);
|
||||
@ -10743,7 +10742,7 @@ static bool allowableAlignment(const SelectionDAG &DAG,
|
||||
return true;
|
||||
|
||||
Type *Ty = EVTTy.getTypeForEVT(*DAG.getContext());
|
||||
unsigned ABIAlignment = TLI.getDataLayout()->getPrefTypeAlignment(Ty);
|
||||
unsigned ABIAlignment = DAG.getDataLayout().getPrefTypeAlignment(Ty);
|
||||
return (Align >= ABIAlignment);
|
||||
}
|
||||
|
||||
@ -11205,8 +11204,8 @@ SDValue DAGCombiner::visitSTORE(SDNode *N) {
|
||||
ST->isUnindexed()) {
|
||||
unsigned OrigAlign = ST->getAlignment();
|
||||
EVT SVT = Value.getOperand(0).getValueType();
|
||||
unsigned Align = TLI.getDataLayout()->
|
||||
getABITypeAlignment(SVT.getTypeForEVT(*DAG.getContext()));
|
||||
unsigned Align = DAG.getDataLayout().getABITypeAlignment(
|
||||
SVT.getTypeForEVT(*DAG.getContext()));
|
||||
if (Align <= OrigAlign &&
|
||||
((!LegalOperations && !ST->isVolatile()) ||
|
||||
TLI.isOperationLegalOrCustom(ISD::STORE, SVT)))
|
||||
@ -11265,7 +11264,8 @@ SDValue DAGCombiner::visitSTORE(SDNode *N) {
|
||||
uint64_t Val = CFP->getValueAPF().bitcastToAPInt().getZExtValue();
|
||||
SDValue Lo = DAG.getConstant(Val & 0xFFFFFFFF, SDLoc(CFP), MVT::i32);
|
||||
SDValue Hi = DAG.getConstant(Val >> 32, SDLoc(CFP), MVT::i32);
|
||||
if (TLI.isBigEndian()) std::swap(Lo, Hi);
|
||||
if (DAG.getDataLayout().isBigEndian())
|
||||
std::swap(Lo, Hi);
|
||||
|
||||
unsigned Alignment = ST->getAlignment();
|
||||
bool isVolatile = ST->isVolatile();
|
||||
@ -11514,7 +11514,7 @@ SDValue DAGCombiner::ReplaceExtractVectorEltOfLoadWithNarrowedLoad(
|
||||
EVT ResultVT = EVE->getValueType(0);
|
||||
EVT VecEltVT = InVecVT.getVectorElementType();
|
||||
unsigned Align = OriginalLoad->getAlignment();
|
||||
unsigned NewAlign = TLI.getDataLayout()->getABITypeAlignment(
|
||||
unsigned NewAlign = DAG.getDataLayout().getABITypeAlignment(
|
||||
VecEltVT.getTypeForEVT(*DAG.getContext()));
|
||||
|
||||
if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, VecEltVT))
|
||||
@ -11825,7 +11825,7 @@ SDValue DAGCombiner::reduceBuildVecExtToExtBuildVec(SDNode *N) {
|
||||
if (!ValidTypes)
|
||||
return SDValue();
|
||||
|
||||
bool isLE = TLI.isLittleEndian();
|
||||
bool isLE = DAG.getDataLayout().isLittleEndian();
|
||||
unsigned ElemRatio = OutScalarTy.getSizeInBits()/SourceType.getSizeInBits();
|
||||
assert(ElemRatio > 1 && "Invalid element size ratio");
|
||||
SDValue Filler = AllAnyExt ? DAG.getUNDEF(SourceType):
|
||||
@ -13354,7 +13354,7 @@ SDValue DAGCombiner::SimplifySelectCC(SDLoc DL, SDValue N0, SDValue N1,
|
||||
const_cast<ConstantFP*>(TV->getConstantFPValue())
|
||||
};
|
||||
Type *FPTy = Elts[0]->getType();
|
||||
const DataLayout &TD = *TLI.getDataLayout();
|
||||
const DataLayout &TD = DAG.getDataLayout();
|
||||
|
||||
// Create a ConstantArray of the two constants.
|
||||
Constant *CA = ConstantArray::get(ArrayType::get(FPTy, 2), Elts);
|
||||
|
@ -106,9 +106,9 @@ void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf,
|
||||
if (AI->isStaticAlloca()) {
|
||||
const ConstantInt *CUI = cast<ConstantInt>(AI->getArraySize());
|
||||
Type *Ty = AI->getAllocatedType();
|
||||
uint64_t TySize = TLI->getDataLayout()->getTypeAllocSize(Ty);
|
||||
uint64_t TySize = MF->getDataLayout().getTypeAllocSize(Ty);
|
||||
unsigned Align =
|
||||
std::max((unsigned)TLI->getDataLayout()->getPrefTypeAlignment(Ty),
|
||||
std::max((unsigned)MF->getDataLayout().getPrefTypeAlignment(Ty),
|
||||
AI->getAlignment());
|
||||
|
||||
TySize *= CUI->getZExtValue(); // Get total allocated size.
|
||||
@ -118,10 +118,10 @@ void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf,
|
||||
MF->getFrameInfo()->CreateStackObject(TySize, Align, false, AI);
|
||||
|
||||
} else {
|
||||
unsigned Align = std::max(
|
||||
(unsigned)TLI->getDataLayout()->getPrefTypeAlignment(
|
||||
AI->getAllocatedType()),
|
||||
AI->getAlignment());
|
||||
unsigned Align =
|
||||
std::max((unsigned)MF->getDataLayout().getPrefTypeAlignment(
|
||||
AI->getAllocatedType()),
|
||||
AI->getAlignment());
|
||||
unsigned StackAlign =
|
||||
MF->getSubtarget().getFrameLowering()->getStackAlignment();
|
||||
if (Align <= StackAlign)
|
||||
@ -138,7 +138,7 @@ void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf,
|
||||
unsigned SP = TLI->getStackPointerRegisterToSaveRestore();
|
||||
const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
|
||||
std::vector<TargetLowering::AsmOperandInfo> Ops =
|
||||
TLI->ParseConstraints(TRI, CS);
|
||||
TLI->ParseConstraints(Fn->getParent()->getDataLayout(), TRI, CS);
|
||||
for (size_t I = 0, E = Ops.size(); I != E; ++I) {
|
||||
TargetLowering::AsmOperandInfo &Op = Ops[I];
|
||||
if (Op.Type == InlineAsm::isClobber) {
|
||||
|
@ -406,10 +406,10 @@ void InstrEmitter::AddOperand(MachineInstrBuilder &MIB,
|
||||
Type *Type = CP->getType();
|
||||
// MachineConstantPool wants an explicit alignment.
|
||||
if (Align == 0) {
|
||||
Align = MF->getTarget().getDataLayout()->getPrefTypeAlignment(Type);
|
||||
Align = MF->getDataLayout().getPrefTypeAlignment(Type);
|
||||
if (Align == 0) {
|
||||
// Alignment of vector types. FIXME!
|
||||
Align = MF->getTarget().getDataLayout()->getTypeAllocSize(Type);
|
||||
Align = MF->getDataLayout().getTypeAllocSize(Type);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -392,17 +392,18 @@ static void ExpandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG,
|
||||
|
||||
// Store the two parts
|
||||
SDValue Store1, Store2;
|
||||
Store1 = DAG.getTruncStore(Chain, dl, TLI.isLittleEndian()?Lo:Hi, Ptr,
|
||||
ST->getPointerInfo(), NewStoredVT,
|
||||
Store1 = DAG.getTruncStore(Chain, dl,
|
||||
DAG.getDataLayout().isLittleEndian() ? Lo : Hi,
|
||||
Ptr, ST->getPointerInfo(), NewStoredVT,
|
||||
ST->isVolatile(), ST->isNonTemporal(), Alignment);
|
||||
|
||||
Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
|
||||
DAG.getConstant(IncrementSize, dl, TLI.getPointerTy(AS)));
|
||||
Alignment = MinAlign(Alignment, IncrementSize);
|
||||
Store2 = DAG.getTruncStore(Chain, dl, TLI.isLittleEndian()?Hi:Lo, Ptr,
|
||||
ST->getPointerInfo().getWithOffset(IncrementSize),
|
||||
NewStoredVT, ST->isVolatile(), ST->isNonTemporal(),
|
||||
Alignment, ST->getAAInfo());
|
||||
Store2 = DAG.getTruncStore(
|
||||
Chain, dl, DAG.getDataLayout().isLittleEndian() ? Hi : Lo, Ptr,
|
||||
ST->getPointerInfo().getWithOffset(IncrementSize), NewStoredVT,
|
||||
ST->isVolatile(), ST->isNonTemporal(), Alignment, ST->getAAInfo());
|
||||
|
||||
SDValue Result =
|
||||
DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Store1, Store2);
|
||||
@ -522,7 +523,7 @@ ExpandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG,
|
||||
|
||||
// Load the value in two parts
|
||||
SDValue Lo, Hi;
|
||||
if (TLI.isLittleEndian()) {
|
||||
if (DAG.getDataLayout().isLittleEndian()) {
|
||||
Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr, LD->getPointerInfo(),
|
||||
NewLoadedVT, LD->isVolatile(),
|
||||
LD->isNonTemporal(), LD->isInvariant(), Alignment,
|
||||
@ -677,7 +678,8 @@ SDValue SelectionDAGLegalize::OptimizeFloatStore(StoreSDNode* ST) {
|
||||
const APInt &IntVal = CFP->getValueAPF().bitcastToAPInt();
|
||||
SDValue Lo = DAG.getConstant(IntVal.trunc(32), dl, MVT::i32);
|
||||
SDValue Hi = DAG.getConstant(IntVal.lshr(32).trunc(32), dl, MVT::i32);
|
||||
if (TLI.isBigEndian()) std::swap(Lo, Hi);
|
||||
if (DAG.getDataLayout().isBigEndian())
|
||||
std::swap(Lo, Hi);
|
||||
|
||||
Lo = DAG.getStore(Chain, dl, Lo, Ptr, ST->getPointerInfo(), isVolatile,
|
||||
isNonTemporal, Alignment, AAInfo);
|
||||
@ -724,7 +726,7 @@ void SelectionDAGLegalize::LegalizeStoreOps(SDNode *Node) {
|
||||
unsigned Align = ST->getAlignment();
|
||||
if (!TLI.allowsMisalignedMemoryAccesses(ST->getMemoryVT(), AS, Align)) {
|
||||
Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext());
|
||||
unsigned ABIAlignment= TLI.getDataLayout()->getABITypeAlignment(Ty);
|
||||
unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(Ty);
|
||||
if (Align < ABIAlignment)
|
||||
ExpandUnalignedStore(cast<StoreSDNode>(Node), DAG, TLI, this);
|
||||
}
|
||||
@ -756,6 +758,7 @@ void SelectionDAGLegalize::LegalizeStoreOps(SDNode *Node) {
|
||||
|
||||
EVT StVT = ST->getMemoryVT();
|
||||
unsigned StWidth = StVT.getSizeInBits();
|
||||
auto &DL = DAG.getDataLayout();
|
||||
|
||||
if (StWidth != StVT.getStoreSizeInBits()) {
|
||||
// Promote to a byte-sized store with upper bits zero if not
|
||||
@ -782,7 +785,7 @@ void SelectionDAGLegalize::LegalizeStoreOps(SDNode *Node) {
|
||||
SDValue Lo, Hi;
|
||||
unsigned IncrementSize;
|
||||
|
||||
if (TLI.isLittleEndian()) {
|
||||
if (DL.isLittleEndian()) {
|
||||
// TRUNCSTORE:i24 X -> TRUNCSTORE:i16 X, TRUNCSTORE@+2:i8 (srl X, 16)
|
||||
// Store the bottom RoundWidth bits.
|
||||
Lo = DAG.getTruncStore(Chain, dl, Value, Ptr, ST->getPointerInfo(),
|
||||
@ -838,7 +841,7 @@ void SelectionDAGLegalize::LegalizeStoreOps(SDNode *Node) {
|
||||
// expand it.
|
||||
if (!TLI.allowsMisalignedMemoryAccesses(ST->getMemoryVT(), AS, Align)) {
|
||||
Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext());
|
||||
unsigned ABIAlignment= TLI.getDataLayout()->getABITypeAlignment(Ty);
|
||||
unsigned ABIAlignment = DL.getABITypeAlignment(Ty);
|
||||
if (Align < ABIAlignment)
|
||||
ExpandUnalignedStore(cast<StoreSDNode>(Node), DAG, TLI, this);
|
||||
}
|
||||
@ -890,8 +893,7 @@ void SelectionDAGLegalize::LegalizeLoadOps(SDNode *Node) {
|
||||
// expand it.
|
||||
if (!TLI.allowsMisalignedMemoryAccesses(LD->getMemoryVT(), AS, Align)) {
|
||||
Type *Ty = LD->getMemoryVT().getTypeForEVT(*DAG.getContext());
|
||||
unsigned ABIAlignment =
|
||||
TLI.getDataLayout()->getABITypeAlignment(Ty);
|
||||
unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(Ty);
|
||||
if (Align < ABIAlignment){
|
||||
ExpandUnalignedLoad(cast<LoadSDNode>(Node), DAG, TLI, RVal, RChain);
|
||||
}
|
||||
@ -995,8 +997,9 @@ void SelectionDAGLegalize::LegalizeLoadOps(SDNode *Node) {
|
||||
EVT ExtraVT = EVT::getIntegerVT(*DAG.getContext(), ExtraWidth);
|
||||
SDValue Lo, Hi, Ch;
|
||||
unsigned IncrementSize;
|
||||
auto &DL = DAG.getDataLayout();
|
||||
|
||||
if (TLI.isLittleEndian()) {
|
||||
if (DL.isLittleEndian()) {
|
||||
// EXTLOAD:i24 -> ZEXTLOAD:i16 | (shl EXTLOAD@+2:i8, 16)
|
||||
// Load the bottom RoundWidth bits.
|
||||
Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, Node->getValueType(0),
|
||||
@ -1086,7 +1089,7 @@ void SelectionDAGLegalize::LegalizeLoadOps(SDNode *Node) {
|
||||
unsigned Align = LD->getAlignment();
|
||||
if (!TLI.allowsMisalignedMemoryAccesses(MemVT, AS, Align)) {
|
||||
Type *Ty = LD->getMemoryVT().getTypeForEVT(*DAG.getContext());
|
||||
unsigned ABIAlignment = TLI.getDataLayout()->getABITypeAlignment(Ty);
|
||||
unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(Ty);
|
||||
if (Align < ABIAlignment){
|
||||
ExpandUnalignedLoad(cast<LoadSDNode>(Node), DAG, TLI, Value, Chain);
|
||||
}
|
||||
@ -1569,6 +1572,7 @@ SDValue SelectionDAGLegalize::ExpandFCOPYSIGN(SDNode* Node) {
|
||||
// Convert to an integer with the same sign bit.
|
||||
SignBit = DAG.getNode(ISD::BITCAST, dl, IVT, Tmp2);
|
||||
} else {
|
||||
auto &DL = DAG.getDataLayout();
|
||||
// Store the float to memory, then load the sign part out as an integer.
|
||||
MVT LoadTy = TLI.getPointerTy();
|
||||
// First create a temporary that is aligned for both the load and store.
|
||||
@ -1577,7 +1581,7 @@ SDValue SelectionDAGLegalize::ExpandFCOPYSIGN(SDNode* Node) {
|
||||
SDValue Ch =
|
||||
DAG.getStore(DAG.getEntryNode(), dl, Tmp2, StackPtr, MachinePointerInfo(),
|
||||
false, false, 0);
|
||||
if (TLI.isBigEndian()) {
|
||||
if (DL.isBigEndian()) {
|
||||
assert(FloatVT.isByteSized() && "Unsupported floating point type!");
|
||||
// Load out a legal integer with the same sign bit as the float.
|
||||
SignBit = DAG.getLoad(LoadTy, dl, Ch, StackPtr, MachinePointerInfo(),
|
||||
@ -1777,9 +1781,8 @@ SDValue SelectionDAGLegalize::EmitStackConvert(SDValue SrcOp,
|
||||
EVT DestVT,
|
||||
SDLoc dl) {
|
||||
// Create the stack frame object.
|
||||
unsigned SrcAlign =
|
||||
TLI.getDataLayout()->getPrefTypeAlignment(SrcOp.getValueType().
|
||||
getTypeForEVT(*DAG.getContext()));
|
||||
unsigned SrcAlign = DAG.getDataLayout().getPrefTypeAlignment(
|
||||
SrcOp.getValueType().getTypeForEVT(*DAG.getContext()));
|
||||
SDValue FIPtr = DAG.CreateStackTemporary(SlotVT, SrcAlign);
|
||||
|
||||
FrameIndexSDNode *StackPtrFI = cast<FrameIndexSDNode>(FIPtr);
|
||||
@ -1790,7 +1793,7 @@ SDValue SelectionDAGLegalize::EmitStackConvert(SDValue SrcOp,
|
||||
unsigned SlotSize = SlotVT.getSizeInBits();
|
||||
unsigned DestSize = DestVT.getSizeInBits();
|
||||
Type *DestType = DestVT.getTypeForEVT(*DAG.getContext());
|
||||
unsigned DestAlign = TLI.getDataLayout()->getPrefTypeAlignment(DestType);
|
||||
unsigned DestAlign = DAG.getDataLayout().getPrefTypeAlignment(DestType);
|
||||
|
||||
// Emit a store to the stack slot. Use a truncstore if the input value is
|
||||
// later than DestVT.
|
||||
@ -2426,7 +2429,7 @@ SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned,
|
||||
SDValue Hi = StackSlot;
|
||||
SDValue Lo = DAG.getNode(ISD::ADD, dl, StackSlot.getValueType(),
|
||||
StackSlot, WordOff);
|
||||
if (TLI.isLittleEndian())
|
||||
if (DAG.getDataLayout().isLittleEndian())
|
||||
std::swap(Hi, Lo);
|
||||
|
||||
// if signed map to unsigned space
|
||||
@ -2584,7 +2587,8 @@ SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned,
|
||||
case MVT::i32: FF = 0x4F800000ULL; break; // 2^32 (as a float)
|
||||
case MVT::i64: FF = 0x5F800000ULL; break; // 2^64 (as a float)
|
||||
}
|
||||
if (TLI.isLittleEndian()) FF <<= 32;
|
||||
if (DAG.getDataLayout().isLittleEndian())
|
||||
FF <<= 32;
|
||||
Constant *FudgeFactor = ConstantInt::get(
|
||||
Type::getInt64Ty(*DAG.getContext()), FF);
|
||||
|
||||
@ -3111,10 +3115,9 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node) {
|
||||
|
||||
// Increment the pointer, VAList, to the next vaarg
|
||||
Tmp3 = DAG.getNode(ISD::ADD, dl, VAList.getValueType(), VAList,
|
||||
DAG.getConstant(TLI.getDataLayout()->
|
||||
getTypeAllocSize(VT.getTypeForEVT(*DAG.getContext())),
|
||||
dl,
|
||||
VAList.getValueType()));
|
||||
DAG.getConstant(DAG.getDataLayout().getTypeAllocSize(
|
||||
VT.getTypeForEVT(*DAG.getContext())),
|
||||
dl, VAList.getValueType()));
|
||||
// Store the incremented VAList to the legalized pointer
|
||||
Tmp3 = DAG.getStore(VAListLoad.getValue(1), dl, Tmp3, Tmp2,
|
||||
MachinePointerInfo(V), false, false, 0);
|
||||
@ -3830,7 +3833,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node) {
|
||||
|
||||
EVT PTy = TLI.getPointerTy();
|
||||
|
||||
const DataLayout &TD = *TLI.getDataLayout();
|
||||
const DataLayout &TD = DAG.getDataLayout();
|
||||
unsigned EntrySize =
|
||||
DAG.getMachineFunction().getJumpTableInfo()->getEntrySize(TD);
|
||||
|
||||
|
@ -282,7 +282,7 @@ SDValue DAGTypeLegalizer::PromoteIntRes_BITCAST(SDNode *N) {
|
||||
Lo = BitConvertToInteger(Lo);
|
||||
Hi = BitConvertToInteger(Hi);
|
||||
|
||||
if (TLI.isBigEndian())
|
||||
if (DAG.getDataLayout().isBigEndian())
|
||||
std::swap(Lo, Hi);
|
||||
|
||||
InOp = DAG.getNode(ISD::ANY_EXTEND, dl,
|
||||
@ -799,7 +799,7 @@ SDValue DAGTypeLegalizer::PromoteIntRes_VAARG(SDNode *N) {
|
||||
}
|
||||
|
||||
// Handle endianness of the load.
|
||||
if (TLI.isBigEndian())
|
||||
if (DAG.getDataLayout().isBigEndian())
|
||||
std::reverse(Parts.begin(), Parts.end());
|
||||
|
||||
// Assemble the parts in the promoted type.
|
||||
@ -1984,7 +1984,7 @@ void DAGTypeLegalizer::ExpandIntRes_LOAD(LoadSDNode *N,
|
||||
// The high part is undefined.
|
||||
Hi = DAG.getUNDEF(NVT);
|
||||
}
|
||||
} else if (TLI.isLittleEndian()) {
|
||||
} else if (DAG.getDataLayout().isLittleEndian()) {
|
||||
// Little-endian - low bits are at low addresses.
|
||||
Lo = DAG.getLoad(NVT, dl, Ch, Ptr, N->getPointerInfo(),
|
||||
isVolatile, isNonTemporal, isInvariant, Alignment,
|
||||
@ -2845,7 +2845,7 @@ SDValue DAGTypeLegalizer::ExpandIntOp_STORE(StoreSDNode *N, unsigned OpNo) {
|
||||
Alignment, AAInfo);
|
||||
}
|
||||
|
||||
if (TLI.isLittleEndian()) {
|
||||
if (DAG.getDataLayout().isLittleEndian()) {
|
||||
// Little-endian - low bits are at low addresses.
|
||||
GetExpandedInteger(N->getValue(), Lo, Hi);
|
||||
|
||||
@ -2963,7 +2963,8 @@ SDValue DAGTypeLegalizer::ExpandIntOp_UINT_TO_FP(SDNode *N) {
|
||||
// Get a pointer to FF if the sign bit was set, or to 0 otherwise.
|
||||
SDValue Zero = DAG.getIntPtrConstant(0, dl);
|
||||
SDValue Four = DAG.getIntPtrConstant(4, dl);
|
||||
if (TLI.isBigEndian()) std::swap(Zero, Four);
|
||||
if (DAG.getDataLayout().isBigEndian())
|
||||
std::swap(Zero, Four);
|
||||
SDValue Offset = DAG.getSelect(dl, Zero.getValueType(), SignSet,
|
||||
Zero, Four);
|
||||
unsigned Alignment = cast<ConstantPoolSDNode>(FudgePtr)->getAlignment();
|
||||
|
@ -131,7 +131,7 @@ void DAGTypeLegalizer::ExpandRes_BITCAST(SDNode *N, SDValue &Lo, SDValue &Hi) {
|
||||
SDValue LHS = Vals[Slot];
|
||||
SDValue RHS = Vals[Slot + 1];
|
||||
|
||||
if (TLI.isBigEndian())
|
||||
if (DAG.getDataLayout().isBigEndian())
|
||||
std::swap(LHS, RHS);
|
||||
|
||||
Vals.push_back(DAG.getNode(ISD::BUILD_PAIR, dl,
|
||||
@ -143,7 +143,7 @@ void DAGTypeLegalizer::ExpandRes_BITCAST(SDNode *N, SDValue &Lo, SDValue &Hi) {
|
||||
Lo = Vals[Slot++];
|
||||
Hi = Vals[Slot++];
|
||||
|
||||
if (TLI.isBigEndian())
|
||||
if (DAG.getDataLayout().isBigEndian())
|
||||
std::swap(Lo, Hi);
|
||||
|
||||
return;
|
||||
@ -155,9 +155,8 @@ void DAGTypeLegalizer::ExpandRes_BITCAST(SDNode *N, SDValue &Lo, SDValue &Hi) {
|
||||
|
||||
// Create the stack frame object. Make sure it is aligned for both
|
||||
// the source and expanded destination types.
|
||||
unsigned Alignment =
|
||||
TLI.getDataLayout()->getPrefTypeAlignment(NOutVT.
|
||||
getTypeForEVT(*DAG.getContext()));
|
||||
unsigned Alignment = DAG.getDataLayout().getPrefTypeAlignment(
|
||||
NOutVT.getTypeForEVT(*DAG.getContext()));
|
||||
SDValue StackPtr = DAG.CreateStackTemporary(InVT, Alignment);
|
||||
int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
|
||||
MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(SPFI);
|
||||
@ -241,7 +240,7 @@ void DAGTypeLegalizer::ExpandRes_EXTRACT_VECTOR_ELT(SDNode *N, SDValue &Lo,
|
||||
DAG.getConstant(1, dl, Idx.getValueType()));
|
||||
Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, NewVT, NewVec, Idx);
|
||||
|
||||
if (TLI.isBigEndian())
|
||||
if (DAG.getDataLayout().isBigEndian())
|
||||
std::swap(Lo, Hi);
|
||||
}
|
||||
|
||||
@ -325,7 +324,7 @@ void DAGTypeLegalizer::IntegerToVector(SDValue Op, unsigned NumElements,
|
||||
if (NumElements > 1) {
|
||||
NumElements >>= 1;
|
||||
SplitInteger(Op, Parts[0], Parts[1]);
|
||||
if (TLI.isBigEndian())
|
||||
if (DAG.getDataLayout().isBigEndian())
|
||||
std::swap(Parts[0], Parts[1]);
|
||||
IntegerToVector(Parts[0], NumElements, Ops, EltVT);
|
||||
IntegerToVector(Parts[1], NumElements, Ops, EltVT);
|
||||
@ -389,7 +388,7 @@ SDValue DAGTypeLegalizer::ExpandOp_BUILD_VECTOR(SDNode *N) {
|
||||
for (unsigned i = 0; i < NumElts; ++i) {
|
||||
SDValue Lo, Hi;
|
||||
GetExpandedOp(N->getOperand(i), Lo, Hi);
|
||||
if (TLI.isBigEndian())
|
||||
if (DAG.getDataLayout().isBigEndian())
|
||||
std::swap(Lo, Hi);
|
||||
NewElts.push_back(Lo);
|
||||
NewElts.push_back(Hi);
|
||||
@ -431,7 +430,7 @@ SDValue DAGTypeLegalizer::ExpandOp_INSERT_VECTOR_ELT(SDNode *N) {
|
||||
|
||||
SDValue Lo, Hi;
|
||||
GetExpandedOp(Val, Lo, Hi);
|
||||
if (TLI.isBigEndian())
|
||||
if (DAG.getDataLayout().isBigEndian())
|
||||
std::swap(Lo, Hi);
|
||||
|
||||
SDValue Idx = N->getOperand(2);
|
||||
|
@ -803,7 +803,7 @@ SDValue VectorLegalizer::ExpandANY_EXTEND_VECTOR_INREG(SDValue Op) {
|
||||
|
||||
// Place the extended lanes into the correct locations.
|
||||
int ExtLaneScale = NumSrcElements / NumElements;
|
||||
int EndianOffset = TLI.isBigEndian() ? ExtLaneScale - 1 : 0;
|
||||
int EndianOffset = DAG.getDataLayout().isBigEndian() ? ExtLaneScale - 1 : 0;
|
||||
for (int i = 0; i < NumElements; ++i)
|
||||
ShuffleMask[i * ExtLaneScale + EndianOffset] = i;
|
||||
|
||||
@ -858,7 +858,7 @@ SDValue VectorLegalizer::ExpandZERO_EXTEND_VECTOR_INREG(SDValue Op) {
|
||||
ShuffleMask.push_back(i);
|
||||
|
||||
int ExtLaneScale = NumSrcElements / NumElements;
|
||||
int EndianOffset = TLI.isBigEndian() ? ExtLaneScale - 1 : 0;
|
||||
int EndianOffset = DAG.getDataLayout().isBigEndian() ? ExtLaneScale - 1 : 0;
|
||||
for (int i = 0; i < NumElements; ++i)
|
||||
ShuffleMask[i * ExtLaneScale + EndianOffset] = NumSrcElements + i;
|
||||
|
||||
|
@ -742,7 +742,7 @@ void DAGTypeLegalizer::SplitVecRes_BITCAST(SDNode *N, SDValue &Lo,
|
||||
// expanded pieces.
|
||||
if (LoVT == HiVT) {
|
||||
GetExpandedOp(InOp, Lo, Hi);
|
||||
if (TLI.isBigEndian())
|
||||
if (DAG.getDataLayout().isBigEndian())
|
||||
std::swap(Lo, Hi);
|
||||
Lo = DAG.getNode(ISD::BITCAST, dl, LoVT, Lo);
|
||||
Hi = DAG.getNode(ISD::BITCAST, dl, HiVT, Hi);
|
||||
@ -761,12 +761,12 @@ void DAGTypeLegalizer::SplitVecRes_BITCAST(SDNode *N, SDValue &Lo,
|
||||
// In the general case, convert the input to an integer and split it by hand.
|
||||
EVT LoIntVT = EVT::getIntegerVT(*DAG.getContext(), LoVT.getSizeInBits());
|
||||
EVT HiIntVT = EVT::getIntegerVT(*DAG.getContext(), HiVT.getSizeInBits());
|
||||
if (TLI.isBigEndian())
|
||||
if (DAG.getDataLayout().isBigEndian())
|
||||
std::swap(LoIntVT, HiIntVT);
|
||||
|
||||
SplitInteger(BitConvertToInteger(InOp), LoIntVT, HiIntVT, Lo, Hi);
|
||||
|
||||
if (TLI.isBigEndian())
|
||||
if (DAG.getDataLayout().isBigEndian())
|
||||
std::swap(Lo, Hi);
|
||||
Lo = DAG.getNode(ISD::BITCAST, dl, LoVT, Lo);
|
||||
Hi = DAG.getNode(ISD::BITCAST, dl, HiVT, Hi);
|
||||
@ -840,7 +840,7 @@ void DAGTypeLegalizer::SplitVecRes_INSERT_SUBVECTOR(SDNode *N, SDValue &Lo,
|
||||
// Store the new subvector into the specified index.
|
||||
SDValue SubVecPtr = GetVectorElementPointer(StackPtr, SubVecVT, Idx);
|
||||
Type *VecType = VecVT.getTypeForEVT(*DAG.getContext());
|
||||
unsigned Alignment = TLI.getDataLayout()->getPrefTypeAlignment(VecType);
|
||||
unsigned Alignment = DAG.getDataLayout().getPrefTypeAlignment(VecType);
|
||||
Store = DAG.getStore(Store, dl, SubVec, SubVecPtr, MachinePointerInfo(),
|
||||
false, false, 0);
|
||||
|
||||
@ -919,8 +919,7 @@ void DAGTypeLegalizer::SplitVecRes_INSERT_VECTOR_ELT(SDNode *N, SDValue &Lo,
|
||||
// so use a truncating store.
|
||||
SDValue EltPtr = GetVectorElementPointer(StackPtr, EltVT, Idx);
|
||||
Type *VecType = VecVT.getTypeForEVT(*DAG.getContext());
|
||||
unsigned Alignment =
|
||||
TLI.getDataLayout()->getPrefTypeAlignment(VecType);
|
||||
unsigned Alignment = DAG.getDataLayout().getPrefTypeAlignment(VecType);
|
||||
Store = DAG.getTruncStore(Store, dl, Elt, EltPtr, MachinePointerInfo(), EltVT,
|
||||
false, false, 0);
|
||||
|
||||
@ -1472,7 +1471,7 @@ SDValue DAGTypeLegalizer::SplitVecOp_BITCAST(SDNode *N) {
|
||||
Lo = BitConvertToInteger(Lo);
|
||||
Hi = BitConvertToInteger(Hi);
|
||||
|
||||
if (TLI.isBigEndian())
|
||||
if (DAG.getDataLayout().isBigEndian())
|
||||
std::swap(Lo, Hi);
|
||||
|
||||
return DAG.getNode(ISD::BITCAST, SDLoc(N), N->getValueType(0),
|
||||
|
@ -921,7 +921,7 @@ unsigned SelectionDAG::getEVTAlignment(EVT VT) const {
|
||||
PointerType::get(Type::getInt8Ty(*getContext()), 0) :
|
||||
VT.getTypeForEVT(*getContext());
|
||||
|
||||
return TLI->getDataLayout()->getABITypeAlignment(Ty);
|
||||
return getDataLayout().getABITypeAlignment(Ty);
|
||||
}
|
||||
|
||||
// EntryNode could meaningfully have debug info if we can find it...
|
||||
@ -1184,7 +1184,7 @@ SDValue SelectionDAG::getConstant(const ConstantInt &Val, SDLoc DL, EVT VT,
|
||||
|
||||
// EltParts is currently in little endian order. If we actually want
|
||||
// big-endian order then reverse it now.
|
||||
if (TLI->isBigEndian())
|
||||
if (getDataLayout().isBigEndian())
|
||||
std::reverse(EltParts.begin(), EltParts.end());
|
||||
|
||||
// The elements must be reversed when the element order is different
|
||||
@ -1303,7 +1303,7 @@ SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, SDLoc DL,
|
||||
"Cannot set target flags on target-independent globals");
|
||||
|
||||
// Truncate (with sign-extension) the offset value to the pointer size.
|
||||
unsigned BitWidth = TLI->getPointerTypeSizeInBits(GV->getType());
|
||||
unsigned BitWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType());
|
||||
if (BitWidth < 64)
|
||||
Offset = SignExtend64(Offset, BitWidth);
|
||||
|
||||
@ -1373,7 +1373,7 @@ SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT,
|
||||
assert((TargetFlags == 0 || isTarget) &&
|
||||
"Cannot set target flags on target-independent globals");
|
||||
if (Alignment == 0)
|
||||
Alignment = TLI->getDataLayout()->getPrefTypeAlignment(C->getType());
|
||||
Alignment = getDataLayout().getPrefTypeAlignment(C->getType());
|
||||
unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
|
||||
FoldingSetNodeID ID;
|
||||
AddNodeIDNode(ID, Opc, getVTList(VT), None);
|
||||
@ -1400,7 +1400,7 @@ SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT,
|
||||
assert((TargetFlags == 0 || isTarget) &&
|
||||
"Cannot set target flags on target-independent globals");
|
||||
if (Alignment == 0)
|
||||
Alignment = TLI->getDataLayout()->getPrefTypeAlignment(C->getType());
|
||||
Alignment = getDataLayout().getPrefTypeAlignment(C->getType());
|
||||
unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
|
||||
FoldingSetNodeID ID;
|
||||
AddNodeIDNode(ID, Opc, getVTList(VT), None);
|
||||
@ -1864,7 +1864,7 @@ SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) {
|
||||
unsigned ByteSize = VT.getStoreSize();
|
||||
Type *Ty = VT.getTypeForEVT(*getContext());
|
||||
unsigned StackAlign =
|
||||
std::max((unsigned)TLI->getDataLayout()->getPrefTypeAlignment(Ty), minAlign);
|
||||
std::max((unsigned)getDataLayout().getPrefTypeAlignment(Ty), minAlign);
|
||||
|
||||
int FrameIdx = FrameInfo->CreateStackObject(ByteSize, StackAlign, false);
|
||||
return getFrameIndex(FrameIdx, TLI->getPointerTy());
|
||||
@ -1877,9 +1877,9 @@ SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) {
|
||||
VT2.getStoreSizeInBits())/8;
|
||||
Type *Ty1 = VT1.getTypeForEVT(*getContext());
|
||||
Type *Ty2 = VT2.getTypeForEVT(*getContext());
|
||||
const DataLayout *TD = TLI->getDataLayout();
|
||||
unsigned Align = std::max(TD->getPrefTypeAlignment(Ty1),
|
||||
TD->getPrefTypeAlignment(Ty2));
|
||||
const DataLayout &DL = getDataLayout();
|
||||
unsigned Align =
|
||||
std::max(DL.getPrefTypeAlignment(Ty1), DL.getPrefTypeAlignment(Ty2));
|
||||
|
||||
MachineFrameInfo *FrameInfo = getMachineFunction().getFrameInfo();
|
||||
int FrameIdx = FrameInfo->CreateStackObject(Bytes, Align, false);
|
||||
@ -3994,7 +3994,7 @@ static SDValue getMemsetStringVal(EVT VT, SDLoc dl, SelectionDAG &DAG,
|
||||
unsigned NumBytes = std::min(NumVTBytes, unsigned(Str.size()));
|
||||
|
||||
APInt Val(NumVTBits, 0);
|
||||
if (TLI.isLittleEndian()) {
|
||||
if (DAG.getDataLayout().isLittleEndian()) {
|
||||
for (unsigned i = 0; i != NumBytes; ++i)
|
||||
Val |= (uint64_t)(unsigned char)Str[i] << i*8;
|
||||
} else {
|
||||
@ -4066,7 +4066,7 @@ static bool FindOptimalMemOpLowering(std::vector<EVT> &MemOps,
|
||||
|
||||
if (VT == MVT::Other) {
|
||||
unsigned AS = 0;
|
||||
if (DstAlign >= TLI.getDataLayout()->getPointerPrefAlignment(AS) ||
|
||||
if (DstAlign >= DAG.getDataLayout().getPointerPrefAlignment(AS) ||
|
||||
TLI.allowsMisalignedMemoryAccesses(VT, AS, DstAlign)) {
|
||||
VT = TLI.getPointerTy();
|
||||
} else {
|
||||
@ -4185,14 +4185,14 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, SDLoc dl,
|
||||
|
||||
if (DstAlignCanChange) {
|
||||
Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
|
||||
unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty);
|
||||
unsigned NewAlign = (unsigned)DAG.getDataLayout().getABITypeAlignment(Ty);
|
||||
|
||||
// Don't promote to an alignment that would require dynamic stack
|
||||
// realignment.
|
||||
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
|
||||
if (!TRI->needsStackRealignment(MF))
|
||||
while (NewAlign > Align &&
|
||||
TLI.getDataLayout()->exceedsNaturalStackAlignment(NewAlign))
|
||||
while (NewAlign > Align &&
|
||||
DAG.getDataLayout().exceedsNaturalStackAlignment(NewAlign))
|
||||
NewAlign /= 2;
|
||||
|
||||
if (NewAlign > Align) {
|
||||
@ -4294,7 +4294,7 @@ static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, SDLoc dl,
|
||||
|
||||
if (DstAlignCanChange) {
|
||||
Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
|
||||
unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty);
|
||||
unsigned NewAlign = (unsigned)DAG.getDataLayout().getABITypeAlignment(Ty);
|
||||
if (NewAlign > Align) {
|
||||
// Give the stack frame object a larger alignment if needed.
|
||||
if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign)
|
||||
@ -4385,7 +4385,7 @@ static SDValue getMemsetStores(SelectionDAG &DAG, SDLoc dl,
|
||||
|
||||
if (DstAlignCanChange) {
|
||||
Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
|
||||
unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty);
|
||||
unsigned NewAlign = (unsigned)DAG.getDataLayout().getABITypeAlignment(Ty);
|
||||
if (NewAlign > Align) {
|
||||
// Give the stack frame object a larger alignment if needed.
|
||||
if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign)
|
||||
@ -4488,7 +4488,7 @@ SDValue SelectionDAG::getMemcpy(SDValue Chain, SDLoc dl, SDValue Dst,
|
||||
// Emit a library call.
|
||||
TargetLowering::ArgListTy Args;
|
||||
TargetLowering::ArgListEntry Entry;
|
||||
Entry.Ty = TLI->getDataLayout()->getIntPtrType(*getContext());
|
||||
Entry.Ty = getDataLayout().getIntPtrType(*getContext());
|
||||
Entry.Node = Dst; Args.push_back(Entry);
|
||||
Entry.Node = Src; Args.push_back(Entry);
|
||||
Entry.Node = Size; Args.push_back(Entry);
|
||||
@ -4594,7 +4594,7 @@ SDValue SelectionDAG::getMemset(SDValue Chain, SDLoc dl, SDValue Dst,
|
||||
}
|
||||
|
||||
// Emit a library call.
|
||||
Type *IntPtrTy = TLI->getDataLayout()->getIntPtrType(*getContext());
|
||||
Type *IntPtrTy = getDataLayout().getIntPtrType(*getContext());
|
||||
TargetLowering::ArgListTy Args;
|
||||
TargetLowering::ArgListEntry Entry;
|
||||
Entry.Node = Dst; Entry.Ty = IntPtrTy;
|
||||
@ -6891,10 +6891,10 @@ unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const {
|
||||
const GlobalValue *GV;
|
||||
int64_t GVOffset = 0;
|
||||
if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) {
|
||||
unsigned PtrWidth = TLI->getPointerTypeSizeInBits(GV->getType());
|
||||
unsigned PtrWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType());
|
||||
APInt KnownZero(PtrWidth, 0), KnownOne(PtrWidth, 0);
|
||||
llvm::computeKnownBits(const_cast<GlobalValue *>(GV), KnownZero, KnownOne,
|
||||
*TLI->getDataLayout());
|
||||
getDataLayout());
|
||||
unsigned AlignBits = KnownZero.countTrailingOnes();
|
||||
unsigned Align = AlignBits ? 1 << std::min(31U, AlignBits) : 0;
|
||||
if (Align)
|
||||
|
@ -146,7 +146,7 @@ static SDValue getCopyFromParts(SelectionDAG &DAG, SDLoc DL,
|
||||
Hi = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[1]);
|
||||
}
|
||||
|
||||
if (TLI.isBigEndian())
|
||||
if (DAG.getDataLayout().isBigEndian())
|
||||
std::swap(Lo, Hi);
|
||||
|
||||
Val = DAG.getNode(ISD::BUILD_PAIR, DL, RoundVT, Lo, Hi);
|
||||
@ -160,7 +160,7 @@ static SDValue getCopyFromParts(SelectionDAG &DAG, SDLoc DL,
|
||||
|
||||
// Combine the round and odd parts.
|
||||
Lo = Val;
|
||||
if (TLI.isBigEndian())
|
||||
if (DAG.getDataLayout().isBigEndian())
|
||||
std::swap(Lo, Hi);
|
||||
EVT TotalVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
|
||||
Hi = DAG.getNode(ISD::ANY_EXTEND, DL, TotalVT, Hi);
|
||||
@ -362,10 +362,10 @@ static void getCopyToParts(SelectionDAG &DAG, SDLoc DL,
|
||||
if (ValueVT.isVector())
|
||||
return getCopyToPartsVector(DAG, DL, Val, Parts, NumParts, PartVT, V);
|
||||
|
||||
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
|
||||
unsigned PartBits = PartVT.getSizeInBits();
|
||||
unsigned OrigNumParts = NumParts;
|
||||
assert(TLI.isTypeLegal(PartVT) && "Copying to an illegal type!");
|
||||
assert(DAG.getTargetLoweringInfo().isTypeLegal(PartVT) &&
|
||||
"Copying to an illegal type!");
|
||||
|
||||
if (NumParts == 0)
|
||||
return;
|
||||
@ -433,7 +433,7 @@ static void getCopyToParts(SelectionDAG &DAG, SDLoc DL,
|
||||
DAG.getIntPtrConstant(RoundBits, DL));
|
||||
getCopyToParts(DAG, DL, OddVal, Parts + RoundParts, OddParts, PartVT, V);
|
||||
|
||||
if (TLI.isBigEndian())
|
||||
if (DAG.getDataLayout().isBigEndian())
|
||||
// The odd parts were reversed by getCopyToParts - unreverse them.
|
||||
std::reverse(Parts + RoundParts, Parts + NumParts);
|
||||
|
||||
@ -468,7 +468,7 @@ static void getCopyToParts(SelectionDAG &DAG, SDLoc DL,
|
||||
}
|
||||
}
|
||||
|
||||
if (TLI.isBigEndian())
|
||||
if (DAG.getDataLayout().isBigEndian())
|
||||
std::reverse(Parts, Parts + OrigNumParts);
|
||||
}
|
||||
|
||||
@ -807,7 +807,7 @@ void SelectionDAGBuilder::init(GCFunctionInfo *gfi, AliasAnalysis &aa,
|
||||
AA = &aa;
|
||||
GFI = gfi;
|
||||
LibInfo = li;
|
||||
DL = DAG.getTarget().getDataLayout();
|
||||
DL = &DAG.getDataLayout();
|
||||
Context = DAG.getContext();
|
||||
LPadToCallSiteMap.clear();
|
||||
}
|
||||
@ -1771,8 +1771,7 @@ void SelectionDAGBuilder::visitSPDescriptorParent(StackProtectorDescriptor &SPD,
|
||||
SDValue GuardPtr = getValue(IRGuard);
|
||||
SDValue StackSlotPtr = DAG.getFrameIndex(FI, PtrTy);
|
||||
|
||||
unsigned Align =
|
||||
TLI.getDataLayout()->getPrefTypeAlignment(IRGuard->getType());
|
||||
unsigned Align = DL->getPrefTypeAlignment(IRGuard->getType());
|
||||
|
||||
SDValue Guard;
|
||||
SDLoc dl = getCurSDLoc();
|
||||
@ -2823,10 +2822,10 @@ void SelectionDAGBuilder::visitAlloca(const AllocaInst &I) {
|
||||
SDLoc dl = getCurSDLoc();
|
||||
Type *Ty = I.getAllocatedType();
|
||||
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
|
||||
uint64_t TySize = TLI.getDataLayout()->getTypeAllocSize(Ty);
|
||||
auto &DL = DAG.getDataLayout();
|
||||
uint64_t TySize = DL.getTypeAllocSize(Ty);
|
||||
unsigned Align =
|
||||
std::max((unsigned)TLI.getDataLayout()->getPrefTypeAlignment(Ty),
|
||||
I.getAlignment());
|
||||
std::max((unsigned)DL.getPrefTypeAlignment(Ty), I.getAlignment());
|
||||
|
||||
SDValue AllocSize = getValue(I.getArraySize());
|
||||
|
||||
@ -5670,9 +5669,8 @@ public:
|
||||
/// getCallOperandValEVT - Return the EVT of the Value* that this operand
|
||||
/// corresponds to. If there is no Value* for this operand, it returns
|
||||
/// MVT::Other.
|
||||
EVT getCallOperandValEVT(LLVMContext &Context,
|
||||
const TargetLowering &TLI,
|
||||
const DataLayout *DL) const {
|
||||
EVT getCallOperandValEVT(LLVMContext &Context, const TargetLowering &TLI,
|
||||
const DataLayout &DL) const {
|
||||
if (!CallOperandVal) return MVT::Other;
|
||||
|
||||
if (isa<BasicBlock>(CallOperandVal))
|
||||
@ -5698,7 +5696,7 @@ public:
|
||||
// If OpTy is not a single value, it may be a struct/union that we
|
||||
// can tile with integers.
|
||||
if (!OpTy->isSingleValueType() && OpTy->isSized()) {
|
||||
unsigned BitSize = DL->getTypeSizeInBits(OpTy);
|
||||
unsigned BitSize = DL.getTypeSizeInBits(OpTy);
|
||||
switch (BitSize) {
|
||||
default: break;
|
||||
case 1:
|
||||
@ -5838,8 +5836,8 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
|
||||
SDISelAsmOperandInfoVector ConstraintOperands;
|
||||
|
||||
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
|
||||
TargetLowering::AsmOperandInfoVector TargetConstraints =
|
||||
TLI.ParseConstraints(DAG.getSubtarget().getRegisterInfo(), CS);
|
||||
TargetLowering::AsmOperandInfoVector TargetConstraints = TLI.ParseConstraints(
|
||||
DAG.getDataLayout(), DAG.getSubtarget().getRegisterInfo(), CS);
|
||||
|
||||
bool hasMemory = false;
|
||||
|
||||
@ -5888,8 +5886,8 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
|
||||
OpInfo.CallOperand = getValue(OpInfo.CallOperandVal);
|
||||
}
|
||||
|
||||
OpVT =
|
||||
OpInfo.getCallOperandValEVT(*DAG.getContext(), TLI, DL).getSimpleVT();
|
||||
OpVT = OpInfo.getCallOperandValEVT(*DAG.getContext(), TLI,
|
||||
DAG.getDataLayout()).getSimpleVT();
|
||||
}
|
||||
|
||||
OpInfo.ConstraintVT = OpVT;
|
||||
@ -5983,8 +5981,9 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
|
||||
// Otherwise, create a stack slot and emit a store to it before the
|
||||
// asm.
|
||||
Type *Ty = OpVal->getType();
|
||||
uint64_t TySize = TLI.getDataLayout()->getTypeAllocSize(Ty);
|
||||
unsigned Align = TLI.getDataLayout()->getPrefTypeAlignment(Ty);
|
||||
auto &DL = DAG.getDataLayout();
|
||||
uint64_t TySize = DL.getTypeAllocSize(Ty);
|
||||
unsigned Align = DL.getPrefTypeAlignment(Ty);
|
||||
MachineFunction &MF = DAG.getMachineFunction();
|
||||
int SSFI = MF.getFrameInfo()->CreateStackObject(TySize, Align, false);
|
||||
SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI.getPointerTy());
|
||||
@ -6380,7 +6379,7 @@ void SelectionDAGBuilder::visitVAStart(const CallInst &I) {
|
||||
|
||||
void SelectionDAGBuilder::visitVAArg(const VAArgInst &I) {
|
||||
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
|
||||
const DataLayout &DL = *TLI.getDataLayout();
|
||||
const DataLayout &DL = DAG.getDataLayout();
|
||||
SDValue V = DAG.getVAArg(TLI.getValueType(I.getType()), getCurSDLoc(),
|
||||
getRoot(), getValue(I.getOperand(0)),
|
||||
DAG.getSrcValue(I.getOperand(0)),
|
||||
@ -6718,6 +6717,7 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
|
||||
Type *OrigRetTy = CLI.RetTy;
|
||||
SmallVector<EVT, 4> RetTys;
|
||||
SmallVector<uint64_t, 4> Offsets;
|
||||
auto &DL = CLI.DAG.getDataLayout();
|
||||
ComputeValueVTs(*this, CLI.RetTy, RetTys, &Offsets);
|
||||
|
||||
SmallVector<ISD::OutputArg, 4> Outs;
|
||||
@ -6733,8 +6733,8 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
|
||||
// FIXME: equivalent assert?
|
||||
// assert(!CS.hasInAllocaArgument() &&
|
||||
// "sret demotion is incompatible with inalloca");
|
||||
uint64_t TySize = getDataLayout()->getTypeAllocSize(CLI.RetTy);
|
||||
unsigned Align = getDataLayout()->getPrefTypeAlignment(CLI.RetTy);
|
||||
uint64_t TySize = DL.getTypeAllocSize(CLI.RetTy);
|
||||
unsigned Align = DL.getPrefTypeAlignment(CLI.RetTy);
|
||||
MachineFunction &MF = CLI.DAG.getMachineFunction();
|
||||
DemoteStackIdx = MF.getFrameInfo()->CreateStackObject(TySize, Align, false);
|
||||
Type *StackSlotPtrType = PointerType::getUnqual(CLI.RetTy);
|
||||
@ -6797,7 +6797,7 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
|
||||
SDValue Op = SDValue(Args[i].Node.getNode(),
|
||||
Args[i].Node.getResNo() + Value);
|
||||
ISD::ArgFlagsTy Flags;
|
||||
unsigned OriginalAlignment = getDataLayout()->getABITypeAlignment(ArgTy);
|
||||
unsigned OriginalAlignment = DL.getABITypeAlignment(ArgTy);
|
||||
|
||||
if (Args[i].isZExt)
|
||||
Flags.setZExt();
|
||||
@ -6821,7 +6821,7 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
|
||||
if (Args[i].isByVal || Args[i].isInAlloca) {
|
||||
PointerType *Ty = cast<PointerType>(Args[i].Ty);
|
||||
Type *ElementTy = Ty->getElementType();
|
||||
Flags.setByValSize(getDataLayout()->getTypeAllocSize(ElementTy));
|
||||
Flags.setByValSize(DL.getTypeAllocSize(ElementTy));
|
||||
// For ByVal, alignment should come from FE. BE will guess if this
|
||||
// info is not there but there are cases it cannot get right.
|
||||
unsigned FrameAlign;
|
||||
@ -7030,7 +7030,7 @@ static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel) {
|
||||
void SelectionDAGISel::LowerArguments(const Function &F) {
|
||||
SelectionDAG &DAG = SDB->DAG;
|
||||
SDLoc dl = SDB->getCurSDLoc();
|
||||
const DataLayout *DL = TLI->getDataLayout();
|
||||
const DataLayout &DL = DAG.getDataLayout();
|
||||
SmallVector<ISD::InputArg, 16> Ins;
|
||||
|
||||
if (!FuncInfo->CanLowerReturn) {
|
||||
@ -7066,7 +7066,7 @@ void SelectionDAGISel::LowerArguments(const Function &F) {
|
||||
EVT VT = ValueVTs[Value];
|
||||
Type *ArgTy = VT.getTypeForEVT(*DAG.getContext());
|
||||
ISD::ArgFlagsTy Flags;
|
||||
unsigned OriginalAlignment = DL->getABITypeAlignment(ArgTy);
|
||||
unsigned OriginalAlignment = DL.getABITypeAlignment(ArgTy);
|
||||
|
||||
if (F.getAttributes().hasAttribute(Idx, Attribute::ZExt))
|
||||
Flags.setZExt();
|
||||
@ -7090,7 +7090,7 @@ void SelectionDAGISel::LowerArguments(const Function &F) {
|
||||
if (Flags.isByVal() || Flags.isInAlloca()) {
|
||||
PointerType *Ty = cast<PointerType>(I->getType());
|
||||
Type *ElementTy = Ty->getElementType();
|
||||
Flags.setByValSize(DL->getTypeAllocSize(ElementTy));
|
||||
Flags.setByValSize(DL.getTypeAllocSize(ElementTy));
|
||||
// For ByVal, alignment should be passed from FE. BE will guess if
|
||||
// this info is not there but there are cases it cannot get right.
|
||||
unsigned FrameAlign;
|
||||
@ -7595,7 +7595,7 @@ void SelectionDAGBuilder::findJumpTables(CaseClusterVector &Clusters,
|
||||
|
||||
bool SelectionDAGBuilder::rangeFitsInWord(const APInt &Low, const APInt &High) {
|
||||
// FIXME: Using the pointer type doesn't seem ideal.
|
||||
uint64_t BW = DAG.getTargetLoweringInfo().getPointerTy().getSizeInBits();
|
||||
uint64_t BW = DAG.getDataLayout().getPointerSizeInBits();
|
||||
uint64_t Range = (High - Low).getLimitedValue(UINT64_MAX - 1) + 1;
|
||||
return Range <= BW;
|
||||
}
|
||||
|
@ -1398,7 +1398,7 @@ TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
|
||||
APInt newMask = APInt::getLowBitsSet(maskWidth, width);
|
||||
for (unsigned offset=0; offset<origWidth/width; offset++) {
|
||||
if ((newMask & Mask) == Mask) {
|
||||
if (!getDataLayout()->isLittleEndian())
|
||||
if (!DAG.getDataLayout().isLittleEndian())
|
||||
bestOffset = (origWidth/width - offset - 1) * (width/8);
|
||||
else
|
||||
bestOffset = (uint64_t)offset * (width/8);
|
||||
@ -2290,7 +2290,8 @@ unsigned TargetLowering::AsmOperandInfo::getMatchedOperand() const {
|
||||
/// If this returns an empty vector, and if the constraint string itself
|
||||
/// isn't empty, there was an error parsing.
|
||||
TargetLowering::AsmOperandInfoVector
|
||||
TargetLowering::ParseConstraints(const TargetRegisterInfo *TRI,
|
||||
TargetLowering::ParseConstraints(const DataLayout &DL,
|
||||
const TargetRegisterInfo *TRI,
|
||||
ImmutableCallSite CS) const {
|
||||
/// ConstraintOperands - Information about all of the constraints.
|
||||
AsmOperandInfoVector ConstraintOperands;
|
||||
@ -2358,7 +2359,7 @@ TargetLowering::ParseConstraints(const TargetRegisterInfo *TRI,
|
||||
// If OpTy is not a single value, it may be a struct/union that we
|
||||
// can tile with integers.
|
||||
if (!OpTy->isSingleValueType() && OpTy->isSized()) {
|
||||
unsigned BitSize = getDataLayout()->getTypeSizeInBits(OpTy);
|
||||
unsigned BitSize = DL.getTypeSizeInBits(OpTy);
|
||||
switch (BitSize) {
|
||||
default: break;
|
||||
case 1:
|
||||
@ -2372,8 +2373,7 @@ TargetLowering::ParseConstraints(const TargetRegisterInfo *TRI,
|
||||
break;
|
||||
}
|
||||
} else if (PointerType *PT = dyn_cast<PointerType>(OpTy)) {
|
||||
unsigned PtrSize
|
||||
= getDataLayout()->getPointerSizeInBits(PT->getAddressSpace());
|
||||
unsigned PtrSize = DL.getPointerSizeInBits(PT->getAddressSpace());
|
||||
OpInfo.ConstraintVT = MVT::getIntegerVT(PtrSize);
|
||||
} else {
|
||||
OpInfo.ConstraintVT = MVT::getVT(OpTy, true);
|
||||
|
Loading…
x
Reference in New Issue
Block a user