Improve MachineMemOperand handling.

- Allocate MachineMemOperands and MachineMemOperand lists in MachineFunctions.
   This eliminates MachineInstr's std::list member and allows the data to be
   created by isel and live for the remainder of codegen, avoiding a lot of
   copying and unnecessary translation. This also shrinks MemSDNode.
 - Delete MemOperandSDNode. Introduce MachineSDNode which has dedicated
   fields for MachineMemOperands.
 - Change MemSDNode to have a MachineMemOperand member instead of its own
   fields with the same information. This introduces some redundancy, but
   it's more consistent with what MachineInstr will eventually want.
 - Ignore alignment when searching for redundant loads for CSE, but remember
   the greatest alignment.

Target-specific code which previously used MemOperandSDNodes with generic
SDNodes now use MemIntrinsicSDNodes, with opcodes in a designated range
so that the SelectionDAG framework knows that MachineMemOperand information
is available.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@82794 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Dan Gohman 2009-09-25 20:36:54 +00:00
parent 602b0c8c17
commit c76909abfe
30 changed files with 744 additions and 544 deletions

View File

@ -27,6 +27,7 @@
namespace llvm {
class Value;
class Function;
class MachineRegisterInfo;
class MachineFrameInfo;
@ -320,6 +321,24 @@ public:
///
void DeleteMachineBasicBlock(MachineBasicBlock *MBB);
/// getMachineMemOperand - Allocate a new MachineMemOperand.
/// MachineMemOperands are owned by the MachineFunction and need not be
/// explicitly deallocated.
MachineMemOperand *getMachineMemOperand(const Value *v, unsigned f,
int64_t o, uint64_t s,
unsigned base_alignment);
/// getMachineMemOperand - Allocate a new MachineMemOperand by copying
/// an existing one, adjusting by an offset and using the given EVT.
/// MachineMemOperands are owned by the MachineFunction and need not be
/// explicitly deallocated.
MachineMemOperand *getMachineMemOperand(const MachineMemOperand *MMO,
int64_t Offset, uint64_t Size);
/// allocateMemRefsArray - Allocate an array to hold MachineMemOperand
/// pointers. This array is owned by the MachineFunction.
MachineInstr::mmo_iterator allocateMemRefsArray(unsigned long Num);
//===--------------------------------------------------------------------===//
// Debug location.
//

View File

@ -20,10 +20,8 @@
#include "llvm/ADT/ilist_node.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/Target/TargetInstrDesc.h"
#include "llvm/Support/DebugLoc.h"
#include <list>
#include <vector>
namespace llvm {
@ -32,17 +30,23 @@ class TargetInstrDesc;
class TargetInstrInfo;
class TargetRegisterInfo;
class MachineFunction;
class MachineMemOperand;
//===----------------------------------------------------------------------===//
/// MachineInstr - Representation of each machine instruction.
///
class MachineInstr : public ilist_node<MachineInstr> {
public:
typedef MachineMemOperand **mmo_iterator;
private:
const TargetInstrDesc *TID; // Instruction descriptor.
unsigned short NumImplicitOps; // Number of implicit operands (which
// are determined at construction time).
std::vector<MachineOperand> Operands; // the operands
std::list<MachineMemOperand> MemOperands; // information on memory references
mmo_iterator MemRefs; // information on memory references
mmo_iterator MemRefsEnd;
MachineBasicBlock *Parent; // Pointer to the owning basic block.
DebugLoc debugLoc; // Source line information.
@ -132,21 +136,14 @@ public:
unsigned getNumExplicitOperands() const;
/// Access to memory operands of the instruction
std::list<MachineMemOperand>::iterator memoperands_begin()
{ return MemOperands.begin(); }
std::list<MachineMemOperand>::iterator memoperands_end()
{ return MemOperands.end(); }
std::list<MachineMemOperand>::const_iterator memoperands_begin() const
{ return MemOperands.begin(); }
std::list<MachineMemOperand>::const_iterator memoperands_end() const
{ return MemOperands.end(); }
bool memoperands_empty() const { return MemOperands.empty(); }
mmo_iterator memoperands_begin() const { return MemRefs; }
mmo_iterator memoperands_end() const { return MemRefsEnd; }
bool memoperands_empty() const { return MemRefsEnd == MemRefs; }
/// hasOneMemOperand - Return true if this instruction has exactly one
/// MachineMemOperand.
bool hasOneMemOperand() const {
return !memoperands_empty() &&
next(memoperands_begin()) == memoperands_end();
return MemRefsEnd - MemRefs == 1;
}
/// isIdenticalTo - Return true if this instruction is identical to (same
@ -319,13 +316,17 @@ public:
///
void RemoveOperand(unsigned i);
/// addMemOperand - Add a MachineMemOperand to the machine instruction,
/// referencing arbitrary storage.
void addMemOperand(MachineFunction &MF,
const MachineMemOperand &MO);
/// addMemOperand - Add a MachineMemOperand to the machine instruction.
/// This function should be used only occasionally. The setMemRefs function
/// is the primary method for setting up a MachineInstr's MemRefs list.
void addMemOperand(MachineFunction &MF, MachineMemOperand *MO);
/// clearMemOperands - Erase all of this MachineInstr's MachineMemOperands.
void clearMemOperands(MachineFunction &MF);
/// setMemRefs - Assign this MachineInstr's memory reference descriptor
/// list. This does not transfer ownership.
void setMemRefs(mmo_iterator NewMemRefs, mmo_iterator NewMemRefsEnd) {
MemRefs = NewMemRefs;
MemRefsEnd = NewMemRefsEnd;
}
private:
/// getRegInfo - If this instruction is embedded into a MachineFunction,

View File

@ -121,7 +121,7 @@ public:
return *this;
}
const MachineInstrBuilder &addMemOperand(const MachineMemOperand &MMO) const {
const MachineInstrBuilder &addMemOperand(MachineMemOperand *MMO) const {
MI->addMemOperand(*MI->getParent()->getParent(), MMO);
return *this;
}

View File

@ -88,6 +88,16 @@ public:
bool isStore() const { return Flags & MOStore; }
bool isVolatile() const { return Flags & MOVolatile; }
/// refineAlignment - Update this MachineMemOperand to reflect the alignment
/// of MMO, if it has a greater alignment. This must only be used when the
/// new alignment applies to all users of this MachineMemOperand.
void refineAlignment(const MachineMemOperand *MMO);
/// setValue - Change the SourceValue for this MachineMemOperand. This
/// should only be used when an object is being relocated and all references
/// to it are being updated.
void setValue(const Value *NewSV) { V = NewSV; }
/// Profile - Gather unique data for the object.
///
void Profile(FoldingSetNodeID &ID) const;

View File

@ -516,8 +516,6 @@ namespace llvm {
///
void EmitNoop();
void AddMemOperand(MachineInstr *MI, const MachineMemOperand &MO);
void EmitPhysRegCopy(SUnit *SU, DenseMap<SUnit*, unsigned> &VRBaseMap);
private:

View File

@ -19,6 +19,7 @@
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
#include "llvm/Support/RecyclingAllocator.h"
#include "llvm/Target/TargetMachine.h"
#include <cassert>
#include <vector>
@ -514,15 +515,23 @@ public:
SDValue getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT, SDValue Chain,
SDValue Ptr, SDValue Cmp, SDValue Swp, const Value* PtrVal,
unsigned Alignment=0);
SDValue getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT, SDValue Chain,
SDValue Ptr, SDValue Cmp, SDValue Swp,
MachineMemOperand *MMO);
/// getAtomic - Gets a node for an atomic op, produces result and chain and
/// takes 2 operands.
SDValue getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT, SDValue Chain,
SDValue Ptr, SDValue Val, const Value* PtrVal,
unsigned Alignment = 0);
SDValue getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT, SDValue Chain,
SDValue Ptr, SDValue Val,
MachineMemOperand *MMO);
/// getMemIntrinsicNode - Creates a MemIntrinsicNode that may produce a
/// result and takes a list of operands.
/// result and takes a list of operands. Opcode may be INTRINSIC_VOID,
/// INTRINSIC_W_CHAIN, or a target-specific opcode with a value not
/// less than FIRST_TARGET_MEMORY_OPCODE.
SDValue getMemIntrinsicNode(unsigned Opcode, DebugLoc dl,
const EVT *VTs, unsigned NumVTs,
const SDValue *Ops, unsigned NumOps,
@ -536,6 +545,10 @@ public:
unsigned Align = 0, bool Vol = false,
bool ReadMem = true, bool WriteMem = true);
SDValue getMemIntrinsicNode(unsigned Opcode, DebugLoc dl, SDVTList VTList,
const SDValue *Ops, unsigned NumOps,
EVT MemVT, MachineMemOperand *MMO);
/// getMergeValues - Create a MERGE_VALUES node from the given operands.
SDValue getMergeValues(const SDValue *Ops, unsigned NumOps, DebugLoc dl);
@ -555,25 +568,28 @@ public:
EVT VT, SDValue Chain, SDValue Ptr, SDValue Offset,
const Value *SV, int SVOffset, EVT MemVT,
bool isVolatile=false, unsigned Alignment=0);
SDValue getLoad(ISD::MemIndexedMode AM, DebugLoc dl, ISD::LoadExtType ExtType,
EVT VT, SDValue Chain, SDValue Ptr, SDValue Offset,
EVT MemVT, MachineMemOperand *MMO);
/// getStore - Helper function to build ISD::STORE nodes.
///
SDValue getStore(SDValue Chain, DebugLoc dl, SDValue Val, SDValue Ptr,
const Value *SV, int SVOffset, bool isVolatile=false,
unsigned Alignment=0);
SDValue getStore(SDValue Chain, DebugLoc dl, SDValue Val, SDValue Ptr,
MachineMemOperand *MMO);
SDValue getTruncStore(SDValue Chain, DebugLoc dl, SDValue Val, SDValue Ptr,
const Value *SV, int SVOffset, EVT TVT,
bool isVolatile=false, unsigned Alignment=0);
SDValue getTruncStore(SDValue Chain, DebugLoc dl, SDValue Val, SDValue Ptr,
EVT TVT, MachineMemOperand *MMO);
SDValue getIndexedStore(SDValue OrigStoe, DebugLoc dl, SDValue Base,
SDValue Offset, ISD::MemIndexedMode AM);
/// getSrcValue - Construct a node to track a Value* through the backend.
SDValue getSrcValue(const Value *v);
/// getMemOperand - Construct a node to track a memory reference
/// through the backend.
SDValue getMemOperand(const MachineMemOperand &MO);
/// getShiftAmountOperand - Return the specified value casted to
/// the target's desired shift amount type.
SDValue getShiftAmountOperand(SDValue Op);
@ -682,8 +698,10 @@ public:
SDNode *getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT1, EVT VT2,
EVT VT3, EVT VT4, const SDValue *Ops, unsigned NumOps);
SDNode *getMachineNode(unsigned Opcode, DebugLoc dl,
const std::vector<EVT> &ResultTys, const SDValue *Ops,
unsigned NumOps);
const std::vector<EVT> &ResultTys, const SDValue *Ops,
unsigned NumOps);
SDNode *getMachineNode(unsigned Opcode, DebugLoc dl, SDVTList VTs,
const SDValue *Ops, unsigned NumOps);
/// getTargetExtractSubreg - A convenience function for creating
/// TargetInstrInfo::EXTRACT_SUBREG nodes.

View File

@ -27,13 +27,10 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/RecyclingAllocator.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/DebugLoc.h"
#include <cassert>
#include <climits>
namespace llvm {
@ -536,11 +533,6 @@ namespace ISD {
// make reference to a value in the LLVM IR.
SRCVALUE,
// MEMOPERAND - This is a node that contains a MachineMemOperand which
// records information about a memory reference. This is used to make
// AliasAnalysis queries from the backend.
MEMOPERAND,
// PCMARKER - This corresponds to the pcmarker intrinsic.
PCMARKER,
@ -617,10 +609,17 @@ namespace ISD {
ATOMIC_LOAD_UMIN,
ATOMIC_LOAD_UMAX,
// BUILTIN_OP_END - This must be the last enum value in this list.
/// BUILTIN_OP_END - This must be the last enum value in this list.
/// The target-specific pre-isel opcode values start here.
BUILTIN_OP_END
};
/// FIRST_TARGET_MEMORY_OPCODE - Target-specific pre-isel operations
/// which do not reference a specific memory location should be less than
/// this value. Those that do must not be less than this value, and can
/// be used with SelectionDAG::getMemIntrinsicNode.
static const int FIRST_TARGET_MEMORY_OPCODE = 1 << 14;
/// Node predicates
/// isBuildVectorAllOnes - Return true if the specified node is a
@ -867,6 +866,7 @@ public:
inline unsigned getNumOperands() const;
inline const SDValue &getOperand(unsigned i) const;
inline uint64_t getConstantOperandVal(unsigned i) const;
inline bool isTargetMemoryOpcode() const;
inline bool isTargetOpcode() const;
inline bool isMachineOpcode() const;
inline unsigned getMachineOpcode() const;
@ -1031,17 +1031,17 @@ class SDNode : public FoldingSetNode, public ilist_node<SDNode> {
private:
/// NodeType - The operation that this node performs.
///
short NodeType;
int16_t NodeType;
/// OperandsNeedDelete - This is true if OperandList was new[]'d. If true,
/// then they will be delete[]'d when the node is destroyed.
unsigned short OperandsNeedDelete : 1;
uint16_t OperandsNeedDelete : 1;
protected:
/// SubclassData - This member is defined by this class, but is not used for
/// anything. Subclasses can use it to hold whatever state they find useful.
/// This field is initialized to zero by the ctor.
unsigned short SubclassData : 15;
uint16_t SubclassData : 15;
private:
/// NodeId - Unique id per SDNode in the DAG.
@ -1085,6 +1085,13 @@ public:
/// \<target\>ISD namespace).
bool isTargetOpcode() const { return NodeType >= ISD::BUILTIN_OP_END; }
/// isTargetMemoryOpcode - Test if this node has a target-specific
/// memory-referencing opcode (in the \<target\>ISD namespace and
/// greater than FIRST_TARGET_MEMORY_OPCODE).
bool isTargetMemoryOpcode() const {
return NodeType >= ISD::FIRST_TARGET_MEMORY_OPCODE;
}
/// isMachineOpcode - Test if this node has a post-isel opcode, directly
/// corresponding to a MachineInstr opcode.
bool isMachineOpcode() const { return NodeType < 0; }
@ -1417,6 +1424,9 @@ inline uint64_t SDValue::getConstantOperandVal(unsigned i) const {
inline bool SDValue::isTargetOpcode() const {
return Node->isTargetOpcode();
}
inline bool SDValue::isTargetMemoryOpcode() const {
return Node->isTargetMemoryOpcode();
}
inline bool SDValue::isMachineOpcode() const {
return Node->isMachineOpcode();
}
@ -1515,47 +1525,55 @@ private:
// MemoryVT - VT of in-memory value.
EVT MemoryVT;
//! SrcValue - Memory location for alias analysis.
const Value *SrcValue;
protected:
/// MMO - Memory reference information.
MachineMemOperand *MMO;
//! SVOffset - Memory location offset. Note that base is defined in MemSDNode
int SVOffset;
public:
MemSDNode(unsigned Opc, DebugLoc dl, SDVTList VTs, EVT MemoryVT,
const Value *srcValue, int SVOff, unsigned alignment,
bool isvolatile);
MachineMemOperand *MMO);
MemSDNode(unsigned Opc, DebugLoc dl, SDVTList VTs, const SDValue *Ops,
unsigned NumOps, EVT MemoryVT, const Value *srcValue, int SVOff,
unsigned alignment, bool isvolatile);
unsigned NumOps, EVT MemoryVT, MachineMemOperand *MMO);
bool readMem() const { return MMO->isLoad(); }
bool writeMem() const { return MMO->isStore(); }
/// Returns alignment and volatility of the memory access
unsigned getOriginalAlignment() const {
return (1u << (SubclassData >> 6)) >> 1;
return MMO->getBaseAlignment();
}
unsigned getAlignment() const {
return MinAlign(getOriginalAlignment(), SVOffset);
return MMO->getAlignment();
}
bool isVolatile() const { return (SubclassData >> 5) & 1; }
/// getRawSubclassData - Return the SubclassData value, which contains an
/// encoding of the alignment and volatile information, as well as bits
/// used by subclasses. This function should only be used to compute a
/// FoldingSetNodeID value.
/// encoding of the volatile flag, as well as bits used by subclasses. This
/// function should only be used to compute a FoldingSetNodeID value.
unsigned getRawSubclassData() const {
return SubclassData;
}
bool isVolatile() const { return (SubclassData >> 5) & 1; }
/// Returns the SrcValue and offset that describes the location of the access
const Value *getSrcValue() const { return SrcValue; }
int getSrcValueOffset() const { return SVOffset; }
const Value *getSrcValue() const { return MMO->getValue(); }
int64_t getSrcValueOffset() const { return MMO->getOffset(); }
/// getMemoryVT - Return the type of the in-memory value.
EVT getMemoryVT() const { return MemoryVT; }
/// getMemOperand - Return a MachineMemOperand object describing the memory
/// reference performed by operation.
MachineMemOperand getMemOperand() const;
MachineMemOperand *getMemOperand() const { return MMO; }
/// refineAlignment - Update this MemSDNode's MachineMemOperand information
/// to reflect the alignment of NewMMO, if it has a greater alignment.
/// This must only be used when the new alignment applies to all users of
/// this MachineMemOperand.
void refineAlignment(const MachineMemOperand *NewMMO) {
MMO->refineAlignment(NewMMO);
}
const SDValue &getChain() const { return getOperand(0); }
const SDValue &getBasePtr() const {
@ -1583,7 +1601,7 @@ public:
N->getOpcode() == ISD::ATOMIC_LOAD_UMAX ||
N->getOpcode() == ISD::INTRINSIC_W_CHAIN ||
N->getOpcode() == ISD::INTRINSIC_VOID ||
N->isTargetOpcode();
N->isTargetMemoryOpcode();
}
};
@ -1603,17 +1621,18 @@ public:
// Align: alignment of memory
AtomicSDNode(unsigned Opc, DebugLoc dl, SDVTList VTL, EVT MemVT,
SDValue Chain, SDValue Ptr,
SDValue Cmp, SDValue Swp, const Value* SrcVal,
unsigned Align=0)
: MemSDNode(Opc, dl, VTL, MemVT, SrcVal, /*SVOffset=*/0,
Align, /*isVolatile=*/true) {
SDValue Cmp, SDValue Swp, MachineMemOperand *MMO)
: MemSDNode(Opc, dl, VTL, MemVT, MMO) {
assert(readMem() && "Atomic MachineMemOperand is not a load!");
assert(writeMem() && "Atomic MachineMemOperand is not a store!");
InitOperands(Ops, Chain, Ptr, Cmp, Swp);
}
AtomicSDNode(unsigned Opc, DebugLoc dl, SDVTList VTL, EVT MemVT,
SDValue Chain, SDValue Ptr,
SDValue Val, const Value* SrcVal, unsigned Align=0)
: MemSDNode(Opc, dl, VTL, MemVT, SrcVal, /*SVOffset=*/0,
Align, /*isVolatile=*/true) {
SDValue Val, MachineMemOperand *MMO)
: MemSDNode(Opc, dl, VTL, MemVT, MMO) {
assert(readMem() && "Atomic MachineMemOperand is not a load!");
assert(writeMem() && "Atomic MachineMemOperand is not a store!");
InitOperands(Ops, Chain, Ptr, Val);
}
@ -1643,24 +1662,18 @@ public:
}
};
/// MemIntrinsicSDNode - This SDNode is used for target intrinsic that touches
/// memory and need an associated memory operand.
///
/// MemIntrinsicSDNode - This SDNode is used for target intrinsics that touch
/// memory and need an associated MachineMemOperand. Its opcode may be
/// INTRINSIC_VOID, INTRINSIC_W_CHAIN, or a target-specific opcode with a
/// value not less than FIRST_TARGET_MEMORY_OPCODE.
class MemIntrinsicSDNode : public MemSDNode {
bool ReadMem; // Intrinsic reads memory
bool WriteMem; // Intrinsic writes memory
public:
MemIntrinsicSDNode(unsigned Opc, DebugLoc dl, SDVTList VTs,
const SDValue *Ops, unsigned NumOps,
EVT MemoryVT, const Value *srcValue, int SVO,
unsigned Align, bool Vol, bool ReadMem, bool WriteMem)
: MemSDNode(Opc, dl, VTs, Ops, NumOps, MemoryVT, srcValue, SVO, Align, Vol),
ReadMem(ReadMem), WriteMem(WriteMem) {
EVT MemoryVT, MachineMemOperand *MMO)
: MemSDNode(Opc, dl, VTs, Ops, NumOps, MemoryVT, MMO) {
}
bool readMem() const { return ReadMem; }
bool writeMem() const { return WriteMem; }
// Methods to support isa and dyn_cast
static bool classof(const MemIntrinsicSDNode *) { return true; }
static bool classof(const SDNode *N) {
@ -1668,7 +1681,7 @@ public:
// early a node with a target opcode can be of this class
return N->getOpcode() == ISD::INTRINSIC_W_CHAIN ||
N->getOpcode() == ISD::INTRINSIC_VOID ||
N->isTargetOpcode();
N->isTargetMemoryOpcode();
}
};
@ -1956,10 +1969,6 @@ public:
/// used when the SelectionDAG needs to make a simple reference to something
/// in the LLVM IR representation.
///
/// Note that this is not used for carrying alias information; that is done
/// with MemOperandSDNode, which includes a Value which is required to be a
/// pointer, and several other fields specific to memory references.
///
class SrcValueSDNode : public SDNode {
const Value *V;
friend class SelectionDAG;
@ -1979,28 +1988,6 @@ public:
};
/// MemOperandSDNode - An SDNode that holds a MachineMemOperand. This is
/// used to represent a reference to memory after ISD::LOAD
/// and ISD::STORE have been lowered.
///
class MemOperandSDNode : public SDNode {
friend class SelectionDAG;
/// Create a MachineMemOperand node
explicit MemOperandSDNode(const MachineMemOperand &mo)
: SDNode(ISD::MEMOPERAND, DebugLoc::getUnknownLoc(),
getSDVTList(MVT::Other)), MO(mo) {}
public:
/// MO - The contained MachineMemOperand.
const MachineMemOperand MO;
static bool classof(const MemOperandSDNode *) { return true; }
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::MEMOPERAND;
}
};
class RegisterSDNode : public SDNode {
unsigned Reg;
friend class SelectionDAG;
@ -2269,9 +2256,8 @@ class LSBaseSDNode : public MemSDNode {
public:
LSBaseSDNode(ISD::NodeType NodeTy, DebugLoc dl, SDValue *Operands,
unsigned numOperands, SDVTList VTs, ISD::MemIndexedMode AM,
EVT VT, const Value *SV, int SVO, unsigned Align, bool Vol)
: MemSDNode(NodeTy, dl, VTs, VT, SV, SVO, Align, Vol) {
assert(Align != 0 && "Loads and stores should have non-zero aligment");
EVT MemVT, MachineMemOperand *MMO)
: MemSDNode(NodeTy, dl, VTs, MemVT, MMO) {
SubclassData |= AM << 2;
assert(getAddressingMode() == AM && "MemIndexedMode encoding error!");
InitOperands(Ops, Operands, numOperands);
@ -2307,12 +2293,14 @@ public:
class LoadSDNode : public LSBaseSDNode {
friend class SelectionDAG;
LoadSDNode(SDValue *ChainPtrOff, DebugLoc dl, SDVTList VTs,
ISD::MemIndexedMode AM, ISD::LoadExtType ETy, EVT LVT,
const Value *SV, int O=0, unsigned Align=0, bool Vol=false)
ISD::MemIndexedMode AM, ISD::LoadExtType ETy, EVT MemVT,
MachineMemOperand *MMO)
: LSBaseSDNode(ISD::LOAD, dl, ChainPtrOff, 3,
VTs, AM, LVT, SV, O, Align, Vol) {
VTs, AM, MemVT, MMO) {
SubclassData |= (unsigned short)ETy;
assert(getExtensionType() == ETy && "LoadExtType encoding error!");
assert(readMem() && "Load MachineMemOperand is not a load!");
assert(!writeMem() && "Load MachineMemOperand is a store!");
}
public:
@ -2336,12 +2324,14 @@ public:
class StoreSDNode : public LSBaseSDNode {
friend class SelectionDAG;
StoreSDNode(SDValue *ChainValuePtrOff, DebugLoc dl, SDVTList VTs,
ISD::MemIndexedMode AM, bool isTrunc, EVT SVT,
const Value *SV, int O=0, unsigned Align=0, bool Vol=false)
ISD::MemIndexedMode AM, bool isTrunc, EVT MemVT,
MachineMemOperand *MMO)
: LSBaseSDNode(ISD::STORE, dl, ChainValuePtrOff, 4,
VTs, AM, SVT, SV, O, Align, Vol) {
VTs, AM, MemVT, MMO) {
SubclassData |= (unsigned short)isTrunc;
assert(isTruncatingStore() == isTrunc && "isTrunc encoding error!");
assert(!readMem() && "Store MachineMemOperand is a load!");
assert(writeMem() && "Store MachineMemOperand is not a store!");
}
public:
@ -2360,6 +2350,44 @@ public:
}
};
/// MachineSDNode - An SDNode that represents everything that will be needed
/// to construct a MachineInstr. These nodes are created during the
/// instruction selection proper phase.
///
class MachineSDNode : public SDNode {
public:
typedef MachineMemOperand **mmo_iterator;
private:
friend class SelectionDAG;
MachineSDNode(unsigned Opc, const DebugLoc DL, SDVTList VTs)
: SDNode(Opc, DL, VTs), MemRefs(0), MemRefsEnd(0) {}
/// LocalOperands - Operands for this instruction, if they fit here. If
/// they don't, this field is unused.
SDUse LocalOperands[4];
/// MemRefs - Memory reference descriptions for this instruction.
mmo_iterator MemRefs;
mmo_iterator MemRefsEnd;
public:
mmo_iterator memoperands_begin() const { return MemRefs; }
mmo_iterator memoperands_end() const { return MemRefsEnd; }
bool memoperands_empty() const { return MemRefsEnd == MemRefs; }
/// setMemRefs - Assign this MachineSDNodes's memory reference descriptor
/// list. This does not transfer ownership.
void setMemRefs(mmo_iterator NewMemRefs, mmo_iterator NewMemRefsEnd) {
MemRefs = NewMemRefs;
MemRefsEnd = NewMemRefsEnd;
}
static bool classof(const MachineSDNode *) { return true; }
static bool classof(const SDNode *N) {
return N->isMachineOpcode();
}
};
class SDNodeIterator : public std::iterator<std::forward_iterator_tag,
SDNode, ptrdiff_t> {

View File

@ -25,6 +25,7 @@
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
@ -1443,12 +1444,12 @@ bool LiveIntervals::isReMaterializable(const LiveInterval &li,
// If the instruction accesses memory and the memory could be non-constant,
// assume the instruction is not rematerializable.
for (std::list<MachineMemOperand>::const_iterator
I = MI->memoperands_begin(), E = MI->memoperands_end(); I != E; ++I){
const MachineMemOperand &MMO = *I;
if (MMO.isVolatile() || MMO.isStore())
for (MachineInstr::mmo_iterator I = MI->memoperands_begin(),
E = MI->memoperands_end(); I != E; ++I){
const MachineMemOperand *MMO = *I;
if (MMO->isVolatile() || MMO->isStore())
return false;
const Value *V = MMO.getValue();
const Value *V = MMO->getValue();
if (!V)
return false;
if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {

View File

@ -190,11 +190,6 @@ MachineFunction::CloneMachineInstr(const MachineInstr *Orig) {
///
void
MachineFunction::DeleteMachineInstr(MachineInstr *MI) {
// Clear the instructions memoperands. This must be done manually because
// the instruction's parent pointer is now null, so it can't properly
// deallocate them on its own.
MI->clearMemOperands(*this);
MI->~MachineInstr();
InstructionRecycler.Deallocate(Allocator, MI);
}
@ -217,6 +212,29 @@ MachineFunction::DeleteMachineBasicBlock(MachineBasicBlock *MBB) {
BasicBlockRecycler.Deallocate(Allocator, MBB);
}
MachineMemOperand *
MachineFunction::getMachineMemOperand(const Value *v, unsigned f,
int64_t o, uint64_t s,
unsigned base_alignment) {
return new (Allocator.Allocate<MachineMemOperand>())
MachineMemOperand(v, f, o, s, base_alignment);
}
MachineMemOperand *
MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO,
int64_t Offset, uint64_t Size) {
return new (Allocator.Allocate<MachineMemOperand>())
MachineMemOperand(MMO->getValue(), MMO->getFlags(),
int64_t(uint64_t(MMO->getOffset()) +
uint64_t(Offset)),
Size, MMO->getBaseAlignment());
}
MachineInstr::mmo_iterator
MachineFunction::allocateMemRefsArray(unsigned long Num) {
return Allocator.Allocate<MachineMemOperand *>(Num);
}
void MachineFunction::dump() const {
print(errs());
}

View File

@ -17,6 +17,7 @@
#include "llvm/Value.h"
#include "llvm/Assembly/Writer.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/Target/TargetMachine.h"
@ -298,40 +299,56 @@ void MachineMemOperand::Profile(FoldingSetNodeID &ID) const {
ID.AddInteger(Flags);
}
raw_ostream &llvm::operator<<(raw_ostream &OS, const MachineMemOperand &MRO) {
assert((MRO.isLoad() || MRO.isStore()) &&
void MachineMemOperand::refineAlignment(const MachineMemOperand *MMO) {
// The Value and Offset may differ due to CSE. But the flags and size
// should be the same.
assert(MMO->getFlags() == getFlags() && "Flags mismatch!");
assert(MMO->getSize() == getSize() && "Size mismatch!");
if (MMO->getBaseAlignment() >= getBaseAlignment()) {
// Update the alignment value.
Flags = (Flags & 7) | ((Log2_32(MMO->getBaseAlignment()) + 1) << 3);
// Also update the base and offset, because the new alignment may
// not be applicable with the old ones.
V = MMO->getValue();
Offset = MMO->getOffset();
}
}
raw_ostream &llvm::operator<<(raw_ostream &OS, const MachineMemOperand &MMO) {
assert((MMO.isLoad() || MMO.isStore()) &&
"SV has to be a load, store or both.");
if (MRO.isVolatile())
if (MMO.isVolatile())
OS << "Volatile ";
if (MRO.isLoad())
if (MMO.isLoad())
OS << "LD";
if (MRO.isStore())
if (MMO.isStore())
OS << "ST";
OS << MRO.getSize();
OS << MMO.getSize();
// Print the address information.
OS << "[";
if (!MRO.getValue())
if (!MMO.getValue())
OS << "<unknown>";
else
WriteAsOperand(OS, MRO.getValue(), /*PrintType=*/false);
WriteAsOperand(OS, MMO.getValue(), /*PrintType=*/false);
// If the alignment of the memory reference itself differs from the alignment
// of the base pointer, print the base alignment explicitly, next to the base
// pointer.
if (MRO.getBaseAlignment() != MRO.getAlignment())
OS << "(align=" << MRO.getBaseAlignment() << ")";
if (MMO.getBaseAlignment() != MMO.getAlignment())
OS << "(align=" << MMO.getBaseAlignment() << ")";
if (MRO.getOffset() != 0)
OS << "+" << MRO.getOffset();
if (MMO.getOffset() != 0)
OS << "+" << MMO.getOffset();
OS << "]";
// Print the alignment of the reference.
if (MRO.getBaseAlignment() != MRO.getAlignment() ||
MRO.getBaseAlignment() != MRO.getSize())
OS << "(align=" << MRO.getAlignment() << ")";
if (MMO.getBaseAlignment() != MMO.getAlignment() ||
MMO.getBaseAlignment() != MMO.getSize())
OS << "(align=" << MMO.getAlignment() << ")";
return OS;
}
@ -343,7 +360,8 @@ raw_ostream &llvm::operator<<(raw_ostream &OS, const MachineMemOperand &MRO) {
/// MachineInstr ctor - This constructor creates a dummy MachineInstr with
/// TID NULL and no operands.
MachineInstr::MachineInstr()
: TID(0), NumImplicitOps(0), Parent(0), debugLoc(DebugLoc::getUnknownLoc()) {
: TID(0), NumImplicitOps(0), MemRefs(0), MemRefsEnd(0),
Parent(0), debugLoc(DebugLoc::getUnknownLoc()) {
// Make sure that we get added to a machine basicblock
LeakDetector::addGarbageObject(this);
}
@ -362,7 +380,7 @@ void MachineInstr::addImplicitDefUseOperands() {
/// TargetInstrDesc or the numOperands if it is not zero. (for
/// instructions with variable number of operands).
MachineInstr::MachineInstr(const TargetInstrDesc &tid, bool NoImp)
: TID(&tid), NumImplicitOps(0), Parent(0),
: TID(&tid), NumImplicitOps(0), MemRefs(0), MemRefsEnd(0), Parent(0),
debugLoc(DebugLoc::getUnknownLoc()) {
if (!NoImp && TID->getImplicitDefs())
for (const unsigned *ImpDefs = TID->getImplicitDefs(); *ImpDefs; ++ImpDefs)
@ -380,7 +398,8 @@ MachineInstr::MachineInstr(const TargetInstrDesc &tid, bool NoImp)
/// MachineInstr ctor - As above, but with a DebugLoc.
MachineInstr::MachineInstr(const TargetInstrDesc &tid, const DebugLoc dl,
bool NoImp)
: TID(&tid), NumImplicitOps(0), Parent(0), debugLoc(dl) {
: TID(&tid), NumImplicitOps(0), MemRefs(0), MemRefsEnd(0),
Parent(0), debugLoc(dl) {
if (!NoImp && TID->getImplicitDefs())
for (const unsigned *ImpDefs = TID->getImplicitDefs(); *ImpDefs; ++ImpDefs)
NumImplicitOps++;
@ -399,7 +418,7 @@ MachineInstr::MachineInstr(const TargetInstrDesc &tid, const DebugLoc dl,
/// basic block.
///
MachineInstr::MachineInstr(MachineBasicBlock *MBB, const TargetInstrDesc &tid)
: TID(&tid), NumImplicitOps(0), Parent(0),
: TID(&tid), NumImplicitOps(0), MemRefs(0), MemRefsEnd(0), Parent(0),
debugLoc(DebugLoc::getUnknownLoc()) {
assert(MBB && "Cannot use inserting ctor with null basic block!");
if (TID->ImplicitDefs)
@ -419,7 +438,8 @@ MachineInstr::MachineInstr(MachineBasicBlock *MBB, const TargetInstrDesc &tid)
///
MachineInstr::MachineInstr(MachineBasicBlock *MBB, const DebugLoc dl,
const TargetInstrDesc &tid)
: TID(&tid), NumImplicitOps(0), Parent(0), debugLoc(dl) {
: TID(&tid), NumImplicitOps(0), MemRefs(0), MemRefsEnd(0),
Parent(0), debugLoc(dl) {
assert(MBB && "Cannot use inserting ctor with null basic block!");
if (TID->ImplicitDefs)
for (const unsigned *ImpDefs = TID->getImplicitDefs(); *ImpDefs; ++ImpDefs)
@ -437,8 +457,9 @@ MachineInstr::MachineInstr(MachineBasicBlock *MBB, const DebugLoc dl,
/// MachineInstr ctor - Copies MachineInstr arg exactly
///
MachineInstr::MachineInstr(MachineFunction &MF, const MachineInstr &MI)
: TID(&MI.getDesc()), NumImplicitOps(0), Parent(0),
debugLoc(MI.getDebugLoc()) {
: TID(&MI.getDesc()), NumImplicitOps(0),
MemRefs(MI.MemRefs), MemRefsEnd(MI.MemRefsEnd),
Parent(0), debugLoc(MI.getDebugLoc()) {
Operands.reserve(MI.getNumOperands());
// Add operands
@ -446,11 +467,6 @@ MachineInstr::MachineInstr(MachineFunction &MF, const MachineInstr &MI)
addOperand(MI.getOperand(i));
NumImplicitOps = MI.NumImplicitOps;
// Add memory operands.
for (std::list<MachineMemOperand>::const_iterator i = MI.memoperands_begin(),
j = MI.memoperands_end(); i != j; ++i)
addMemOperand(MF, *i);
// Set parent to null.
Parent = 0;
@ -459,8 +475,6 @@ MachineInstr::MachineInstr(MachineFunction &MF, const MachineInstr &MI)
MachineInstr::~MachineInstr() {
LeakDetector::removeGarbageObject(this);
assert(MemOperands.empty() &&
"MachineInstr being deleted with live memoperands!");
#ifndef NDEBUG
for (unsigned i = 0, e = Operands.size(); i != e; ++i) {
assert(Operands[i].ParentMI == this && "ParentMI mismatch!");
@ -621,18 +635,24 @@ void MachineInstr::RemoveOperand(unsigned OpNo) {
}
}
/// addMemOperand - Add a MachineMemOperand to the machine instruction,
/// referencing arbitrary storage.
/// addMemOperand - Add a MachineMemOperand to the machine instruction.
/// This function should be used only occasionally. The setMemRefs function
/// is the primary method for setting up a MachineInstr's MemRefs list.
void MachineInstr::addMemOperand(MachineFunction &MF,
const MachineMemOperand &MO) {
MemOperands.push_back(MO);
}
MachineMemOperand *MO) {
mmo_iterator OldMemRefs = MemRefs;
mmo_iterator OldMemRefsEnd = MemRefsEnd;
/// clearMemOperands - Erase all of this MachineInstr's MachineMemOperands.
void MachineInstr::clearMemOperands(MachineFunction &MF) {
MemOperands.clear();
}
size_t NewNum = (MemRefsEnd - MemRefs) + 1;
mmo_iterator NewMemRefs = MF.allocateMemRefsArray(NewNum);
mmo_iterator NewMemRefsEnd = NewMemRefs + NewNum;
std::copy(OldMemRefs, OldMemRefsEnd, NewMemRefs);
NewMemRefs[NewNum - 1] = MO;
MemRefs = NewMemRefs;
MemRefsEnd = NewMemRefsEnd;
}
/// removeFromParent - This method unlinks 'this' from the containing basic
/// block, and returns it, but does not delete it.
@ -972,9 +992,8 @@ bool MachineInstr::hasVolatileMemoryRef() const {
return true;
// Check the memory reference information for volatile references.
for (std::list<MachineMemOperand>::const_iterator I = memoperands_begin(),
E = memoperands_end(); I != E; ++I)
if (I->isVolatile())
for (mmo_iterator I = memoperands_begin(), E = memoperands_end(); I != E; ++I)
if ((*I)->isVolatile())
return true;
return false;
@ -1004,9 +1023,9 @@ void MachineInstr::print(raw_ostream &OS, const TargetMachine *TM) const {
if (!memoperands_empty()) {
OS << ", Mem:";
for (std::list<MachineMemOperand>::const_iterator i = memoperands_begin(),
e = memoperands_end(); i != e; ++i) {
OS << *i;
for (mmo_iterator i = memoperands_begin(), e = memoperands_end();
i != e; ++i) {
OS << **i;
if (next(i) != e)
OS << " ";
}

View File

@ -28,10 +28,6 @@
#include "llvm/Support/MathExtras.h"
using namespace llvm;
void ScheduleDAG::AddMemOperand(MachineInstr *MI, const MachineMemOperand &MO) {
MI->addMemOperand(MF, MO);
}
void ScheduleDAG::EmitNoop() {
TII->insertNoop(*BB, InsertPos);
}

View File

@ -17,6 +17,7 @@
#include "llvm/Operator.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/Target/TargetMachine.h"
@ -96,11 +97,11 @@ static const Value *getUnderlyingObject(const Value *V) {
/// object, return the Value for that object. Otherwise return null.
static const Value *getUnderlyingObjectForInstr(const MachineInstr *MI) {
if (!MI->hasOneMemOperand() ||
!MI->memoperands_begin()->getValue() ||
MI->memoperands_begin()->isVolatile())
!(*MI->memoperands_begin())->getValue() ||
(*MI->memoperands_begin())->isVolatile())
return 0;
const Value *V = MI->memoperands_begin()->getValue();
const Value *V = (*MI->memoperands_begin())->getValue();
if (!V)
return 0;
@ -335,10 +336,10 @@ void ScheduleDAGInstrs::BuildSchedGraph() {
if (!ChainTID.isCall() &&
!ChainTID.hasUnmodeledSideEffects() &&
ChainMI->hasOneMemOperand() &&
!ChainMI->memoperands_begin()->isVolatile() &&
ChainMI->memoperands_begin()->getValue())
!(*ChainMI->memoperands_begin())->isVolatile() &&
(*ChainMI->memoperands_begin())->getValue())
// We know that the Chain accesses one specific memory location.
ChainMMO = &*ChainMI->memoperands_begin();
ChainMMO = *ChainMI->memoperands_begin();
else
// Unknown memory accesses. Assume the worst.
ChainMMO = 0;

View File

@ -263,19 +263,10 @@ unsigned ScheduleDAGSDNodes::CountResults(SDNode *Node) {
}
/// CountOperands - The inputs to target nodes have any actual inputs first,
/// followed by special operands that describe memory references, then an
/// optional chain operand, then an optional flag operand. Compute the number
/// of actual operands that will go into the resulting MachineInstr.
/// followed by an optional chain operand, then an optional flag operand.
/// Compute the number of actual operands that will go into the resulting
/// MachineInstr.
unsigned ScheduleDAGSDNodes::CountOperands(SDNode *Node) {
unsigned N = ComputeMemOperandsEnd(Node);
while (N && isa<MemOperandSDNode>(Node->getOperand(N - 1).getNode()))
--N; // Ignore MEMOPERAND nodes
return N;
}
/// ComputeMemOperandsEnd - Find the index one past the last MemOperandSDNode
/// operand
unsigned ScheduleDAGSDNodes::ComputeMemOperandsEnd(SDNode *Node) {
unsigned N = Node->getNumOperands();
while (N && Node->getOperand(N - 1).getValueType() == MVT::Flag)
--N;
@ -284,7 +275,6 @@ unsigned ScheduleDAGSDNodes::ComputeMemOperandsEnd(SDNode *Node) {
return N;
}
void ScheduleDAGSDNodes::dumpNode(const SUnit *SU) const {
if (!SU->getNode()) {
errs() << "PHYS REG COPY\n";

View File

@ -58,7 +58,6 @@ namespace llvm {
if (isa<ConstantPoolSDNode>(Node)) return true;
if (isa<JumpTableSDNode>(Node)) return true;
if (isa<ExternalSymbolSDNode>(Node)) return true;
if (isa<MemOperandSDNode>(Node)) return true;
if (Node->getOpcode() == ISD::EntryToken) return true;
return false;
}
@ -99,15 +98,11 @@ namespace llvm {
static unsigned CountResults(SDNode *Node);
/// CountOperands - The inputs to target nodes have any actual inputs first,
/// followed by special operands that describe memory references, then an
/// optional chain operand, then flag operands. Compute the number of
/// actual operands that will go into the resulting MachineInstr.
/// followed by an optional chain operand, then flag operands. Compute
/// the number of actual operands that will go into the resulting
/// MachineInstr.
static unsigned CountOperands(SDNode *Node);
/// ComputeMemOperandsEnd - Find the index one past the last
/// MemOperandSDNode operand
static unsigned ComputeMemOperandsEnd(SDNode *Node);
/// EmitNode - Generate machine code for an node and needed dependencies.
/// VRBaseMap contains, for each already emitted node, the first virtual
/// register number for the results of the node.

View File

@ -497,7 +497,6 @@ void ScheduleDAGSDNodes::EmitNode(SDNode *Node, bool IsClone, bool IsCloned,
const TargetInstrDesc &II = TII->get(Opc);
unsigned NumResults = CountResults(Node);
unsigned NodeOperands = CountOperands(Node);
unsigned MemOperandsEnd = ComputeMemOperandsEnd(Node);
bool HasPhysRegOuts = (NumResults > II.getNumDefs()) &&
II.getImplicitDefs() != 0;
#ifndef NDEBUG
@ -525,9 +524,9 @@ void ScheduleDAGSDNodes::EmitNode(SDNode *Node, bool IsClone, bool IsCloned,
AddOperand(MI, Node->getOperand(i), i-NumSkip+II.getNumDefs(), &II,
VRBaseMap);
// Emit all of the memory operands of this instruction
for (unsigned i = NodeOperands; i != MemOperandsEnd; ++i)
AddMemOperand(MI,cast<MemOperandSDNode>(Node->getOperand(i+NumSkip))->MO);
// Transfer all of the memory reference descriptions of this instruction.
MI->setMemRefs(cast<MachineSDNode>(Node)->memoperands_begin(),
cast<MachineSDNode>(Node)->memoperands_end());
if (II.usesCustomDAGSchedInsertionHook()) {
// Insert this instruction into the basic block using a target

View File

@ -402,11 +402,6 @@ static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) {
case ISD::SRCVALUE:
ID.AddPointer(cast<SrcValueSDNode>(N)->getValue());
break;
case ISD::MEMOPERAND: {
const MachineMemOperand &MO = cast<MemOperandSDNode>(N)->MO;
MO.Profile(ID);
break;
}
case ISD::FrameIndex:
case ISD::TargetFrameIndex:
ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex());
@ -481,20 +476,18 @@ static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) {
}
/// encodeMemSDNodeFlags - Generic routine for computing a value for use in
/// the CSE map that carries alignment, volatility, indexing mode, and
/// the CSE map that carries volatility, indexing mode, and
/// extension/truncation information.
///
static inline unsigned
encodeMemSDNodeFlags(int ConvType, ISD::MemIndexedMode AM,
bool isVolatile, unsigned Alignment) {
encodeMemSDNodeFlags(int ConvType, ISD::MemIndexedMode AM, bool isVolatile) {
assert((ConvType & 3) == ConvType &&
"ConvType may not require more than 2 bits!");
assert((AM & 7) == AM &&
"AM may not require more than 3 bits!");
return ConvType |
(AM << 2) |
(isVolatile << 5) |
((Log2_32(Alignment) + 1) << 6);
(isVolatile << 5);
}
//===----------------------------------------------------------------------===//
@ -1330,28 +1323,6 @@ SDValue SelectionDAG::getSrcValue(const Value *V) {
return SDValue(N, 0);
}
SDValue SelectionDAG::getMemOperand(const MachineMemOperand &MO) {
#ifndef NDEBUG
const Value *v = MO.getValue();
assert((!v || isa<PointerType>(v->getType())) &&
"SrcValue is not a pointer?");
#endif
FoldingSetNodeID ID;
AddNodeIDNode(ID, ISD::MEMOPERAND, getVTList(MVT::Other), 0, 0);
MO.Profile(ID);
void *IP = 0;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0);
SDNode *N = NodeAllocator.Allocate<MemOperandSDNode>();
new (N) MemOperandSDNode(MO);
CSEMap.InsertNode(N, IP);
AllNodes.push_back(N);
return SDValue(N, 0);
}
/// getShiftAmountOperand - Return the specified value casted to
/// the target's desired shift amount type.
SDValue SelectionDAG::getShiftAmountOperand(SDValue Op) {
@ -3523,25 +3494,49 @@ SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT,
SDValue Ptr, SDValue Cmp,
SDValue Swp, const Value* PtrVal,
unsigned Alignment) {
if (Alignment == 0) // Ensure that codegen never sees alignment 0
Alignment = getEVTAlignment(MemVT);
// Check if the memory reference references a frame index
if (!PtrVal)
if (const FrameIndexSDNode *FI =
dyn_cast<const FrameIndexSDNode>(Ptr.getNode()))
PtrVal = PseudoSourceValue::getFixedStack(FI->getIndex());
MachineFunction &MF = getMachineFunction();
unsigned Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
// For now, atomics are considered to be volatile always.
Flags |= MachineMemOperand::MOVolatile;
MachineMemOperand *MMO =
MF.getMachineMemOperand(PtrVal, Flags, 0,
MemVT.getStoreSize(), Alignment);
return getAtomic(Opcode, dl, MemVT, Chain, Ptr, Cmp, Swp, MMO);
}
SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT,
SDValue Chain,
SDValue Ptr, SDValue Cmp,
SDValue Swp, MachineMemOperand *MMO) {
assert(Opcode == ISD::ATOMIC_CMP_SWAP && "Invalid Atomic Op");
assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types");
EVT VT = Cmp.getValueType();
if (Alignment == 0) // Ensure that codegen never sees alignment 0
Alignment = getEVTAlignment(MemVT);
SDVTList VTs = getVTList(VT, MVT::Other);
FoldingSetNodeID ID;
ID.AddInteger(MemVT.getRawBits());
SDValue Ops[] = {Chain, Ptr, Cmp, Swp};
AddNodeIDNode(ID, Opcode, VTs, Ops, 4);
void* IP = 0;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
cast<AtomicSDNode>(E)->refineAlignment(MMO);
return SDValue(E, 0);
}
SDNode* N = NodeAllocator.Allocate<AtomicSDNode>();
new (N) AtomicSDNode(Opcode, dl, VTs, MemVT,
Chain, Ptr, Cmp, Swp, PtrVal, Alignment);
new (N) AtomicSDNode(Opcode, dl, VTs, MemVT, Chain, Ptr, Cmp, Swp, MMO);
CSEMap.InsertNode(N, IP);
AllNodes.push_back(N);
return SDValue(N, 0);
@ -3552,6 +3547,32 @@ SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT,
SDValue Ptr, SDValue Val,
const Value* PtrVal,
unsigned Alignment) {
if (Alignment == 0) // Ensure that codegen never sees alignment 0
Alignment = getEVTAlignment(MemVT);
// Check if the memory reference references a frame index
if (!PtrVal)
if (const FrameIndexSDNode *FI =
dyn_cast<const FrameIndexSDNode>(Ptr.getNode()))
PtrVal = PseudoSourceValue::getFixedStack(FI->getIndex());
MachineFunction &MF = getMachineFunction();
unsigned Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
// For now, atomics are considered to be volatile always.
Flags |= MachineMemOperand::MOVolatile;
MachineMemOperand *MMO =
MF.getMachineMemOperand(PtrVal, Flags, 0,
MemVT.getStoreSize(), Alignment);
return getAtomic(Opcode, dl, MemVT, Chain, Ptr, Val, MMO);
}
SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT,
SDValue Chain,
SDValue Ptr, SDValue Val,
MachineMemOperand *MMO) {
assert((Opcode == ISD::ATOMIC_LOAD_ADD ||
Opcode == ISD::ATOMIC_LOAD_SUB ||
Opcode == ISD::ATOMIC_LOAD_AND ||
@ -3567,20 +3588,18 @@ SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT,
EVT VT = Val.getValueType();
if (Alignment == 0) // Ensure that codegen never sees alignment 0
Alignment = getEVTAlignment(MemVT);
SDVTList VTs = getVTList(VT, MVT::Other);
FoldingSetNodeID ID;
ID.AddInteger(MemVT.getRawBits());
SDValue Ops[] = {Chain, Ptr, Val};
AddNodeIDNode(ID, Opcode, VTs, Ops, 3);
void* IP = 0;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
cast<AtomicSDNode>(E)->refineAlignment(MMO);
return SDValue(E, 0);
}
SDNode* N = NodeAllocator.Allocate<AtomicSDNode>();
new (N) AtomicSDNode(Opcode, dl, VTs, MemVT,
Chain, Ptr, Val, PtrVal, Alignment);
new (N) AtomicSDNode(Opcode, dl, VTs, MemVT, Chain, Ptr, Val, MMO);
CSEMap.InsertNode(N, IP);
AllNodes.push_back(N);
return SDValue(N, 0);
@ -3619,23 +3638,51 @@ SelectionDAG::getMemIntrinsicNode(unsigned Opcode, DebugLoc dl, SDVTList VTList,
EVT MemVT, const Value *srcValue, int SVOff,
unsigned Align, bool Vol,
bool ReadMem, bool WriteMem) {
if (Align == 0) // Ensure that codegen never sees alignment 0
Align = getEVTAlignment(MemVT);
MachineFunction &MF = getMachineFunction();
unsigned Flags = 0;
if (WriteMem)
Flags |= MachineMemOperand::MOStore;
if (ReadMem)
Flags |= MachineMemOperand::MOLoad;
if (Vol)
Flags |= MachineMemOperand::MOVolatile;
MachineMemOperand *MMO =
MF.getMachineMemOperand(srcValue, Flags, SVOff,
MemVT.getStoreSize(), Align);
return getMemIntrinsicNode(Opcode, dl, VTList, Ops, NumOps, MemVT, MMO);
}
SDValue
SelectionDAG::getMemIntrinsicNode(unsigned Opcode, DebugLoc dl, SDVTList VTList,
const SDValue *Ops, unsigned NumOps,
EVT MemVT, MachineMemOperand *MMO) {
assert((Opcode == ISD::INTRINSIC_VOID ||
Opcode == ISD::INTRINSIC_W_CHAIN ||
(Opcode <= INT_MAX &&
(int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) &&
"Opcode is not a memory-accessing opcode!");
// Memoize the node unless it returns a flag.
MemIntrinsicSDNode *N;
if (VTList.VTs[VTList.NumVTs-1] != MVT::Flag) {
FoldingSetNodeID ID;
AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps);
void *IP = 0;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO);
return SDValue(E, 0);
}
N = NodeAllocator.Allocate<MemIntrinsicSDNode>();
new (N) MemIntrinsicSDNode(Opcode, dl, VTList, Ops, NumOps, MemVT,
srcValue, SVOff, Align, Vol, ReadMem, WriteMem);
new (N) MemIntrinsicSDNode(Opcode, dl, VTList, Ops, NumOps, MemVT, MMO);
CSEMap.InsertNode(N, IP);
} else {
N = NodeAllocator.Allocate<MemIntrinsicSDNode>();
new (N) MemIntrinsicSDNode(Opcode, dl, VTList, Ops, NumOps, MemVT,
srcValue, SVOff, Align, Vol, ReadMem, WriteMem);
new (N) MemIntrinsicSDNode(Opcode, dl, VTList, Ops, NumOps, MemVT, MMO);
}
AllNodes.push_back(N);
return SDValue(N, 0);
@ -3650,6 +3697,27 @@ SelectionDAG::getLoad(ISD::MemIndexedMode AM, DebugLoc dl,
if (Alignment == 0) // Ensure that codegen never sees alignment 0
Alignment = getEVTAlignment(VT);
// Check if the memory reference references a frame index
if (!SV)
if (const FrameIndexSDNode *FI =
dyn_cast<const FrameIndexSDNode>(Ptr.getNode()))
SV = PseudoSourceValue::getFixedStack(FI->getIndex());
MachineFunction &MF = getMachineFunction();
unsigned Flags = MachineMemOperand::MOLoad;
if (isVolatile)
Flags |= MachineMemOperand::MOVolatile;
MachineMemOperand *MMO =
MF.getMachineMemOperand(SV, Flags, SVOffset,
MemVT.getStoreSize(), Alignment);
return getLoad(AM, dl, ExtType, VT, Chain, Ptr, Offset, MemVT, MMO);
}
SDValue
SelectionDAG::getLoad(ISD::MemIndexedMode AM, DebugLoc dl,
ISD::LoadExtType ExtType, EVT VT, SDValue Chain,
SDValue Ptr, SDValue Offset, EVT MemVT,
MachineMemOperand *MMO) {
if (VT == MemVT) {
ExtType = ISD::NON_EXTLOAD;
} else if (ExtType == ISD::NON_EXTLOAD) {
@ -3678,13 +3746,14 @@ SelectionDAG::getLoad(ISD::MemIndexedMode AM, DebugLoc dl,
FoldingSetNodeID ID;
AddNodeIDNode(ID, ISD::LOAD, VTs, Ops, 3);
ID.AddInteger(MemVT.getRawBits());
ID.AddInteger(encodeMemSDNodeFlags(ExtType, AM, isVolatile, Alignment));
ID.AddInteger(encodeMemSDNodeFlags(ExtType, AM, MMO->isVolatile()));
void *IP = 0;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
cast<LoadSDNode>(E)->refineAlignment(MMO);
return SDValue(E, 0);
}
SDNode *N = NodeAllocator.Allocate<LoadSDNode>();
new (N) LoadSDNode(Ops, dl, VTs, AM, ExtType, MemVT, SV, SVOffset,
Alignment, isVolatile);
new (N) LoadSDNode(Ops, dl, VTs, AM, ExtType, MemVT, MMO);
CSEMap.InsertNode(N, IP);
AllNodes.push_back(N);
return SDValue(N, 0);
@ -3724,25 +3793,43 @@ SelectionDAG::getIndexedLoad(SDValue OrigLoad, DebugLoc dl, SDValue Base,
SDValue SelectionDAG::getStore(SDValue Chain, DebugLoc dl, SDValue Val,
SDValue Ptr, const Value *SV, int SVOffset,
bool isVolatile, unsigned Alignment) {
EVT VT = Val.getValueType();
if (Alignment == 0) // Ensure that codegen never sees alignment 0
Alignment = getEVTAlignment(VT);
Alignment = getEVTAlignment(Val.getValueType());
// Check if the memory reference references a frame index
if (!SV)
if (const FrameIndexSDNode *FI =
dyn_cast<const FrameIndexSDNode>(Ptr.getNode()))
SV = PseudoSourceValue::getFixedStack(FI->getIndex());
MachineFunction &MF = getMachineFunction();
unsigned Flags = MachineMemOperand::MOStore;
if (isVolatile)
Flags |= MachineMemOperand::MOVolatile;
MachineMemOperand *MMO =
MF.getMachineMemOperand(SV, Flags, SVOffset,
Val.getValueType().getStoreSize(), Alignment);
return getStore(Chain, dl, Val, Ptr, MMO);
}
SDValue SelectionDAG::getStore(SDValue Chain, DebugLoc dl, SDValue Val,
SDValue Ptr, MachineMemOperand *MMO) {
EVT VT = Val.getValueType();
SDVTList VTs = getVTList(MVT::Other);
SDValue Undef = getUNDEF(Ptr.getValueType());
SDValue Ops[] = { Chain, Val, Ptr, Undef };
FoldingSetNodeID ID;
AddNodeIDNode(ID, ISD::STORE, VTs, Ops, 4);
ID.AddInteger(VT.getRawBits());
ID.AddInteger(encodeMemSDNodeFlags(false, ISD::UNINDEXED,
isVolatile, Alignment));
ID.AddInteger(encodeMemSDNodeFlags(false, ISD::UNINDEXED, MMO->isVolatile()));
void *IP = 0;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
cast<StoreSDNode>(E)->refineAlignment(MMO);
return SDValue(E, 0);
}
SDNode *N = NodeAllocator.Allocate<StoreSDNode>();
new (N) StoreSDNode(Ops, dl, VTs, ISD::UNINDEXED, false,
VT, SV, SVOffset, Alignment, isVolatile);
new (N) StoreSDNode(Ops, dl, VTs, ISD::UNINDEXED, false, VT, MMO);
CSEMap.InsertNode(N, IP);
AllNodes.push_back(N);
return SDValue(N, 0);
@ -3752,17 +3839,37 @@ SDValue SelectionDAG::getTruncStore(SDValue Chain, DebugLoc dl, SDValue Val,
SDValue Ptr, const Value *SV,
int SVOffset, EVT SVT,
bool isVolatile, unsigned Alignment) {
if (Alignment == 0) // Ensure that codegen never sees alignment 0
Alignment = getEVTAlignment(SVT);
// Check if the memory reference references a frame index
if (!SV)
if (const FrameIndexSDNode *FI =
dyn_cast<const FrameIndexSDNode>(Ptr.getNode()))
SV = PseudoSourceValue::getFixedStack(FI->getIndex());
MachineFunction &MF = getMachineFunction();
unsigned Flags = MachineMemOperand::MOStore;
if (isVolatile)
Flags |= MachineMemOperand::MOVolatile;
MachineMemOperand *MMO =
MF.getMachineMemOperand(SV, Flags, SVOffset, SVT.getStoreSize(), Alignment);
return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO);
}
SDValue SelectionDAG::getTruncStore(SDValue Chain, DebugLoc dl, SDValue Val,
SDValue Ptr, EVT SVT,
MachineMemOperand *MMO) {
EVT VT = Val.getValueType();
if (VT == SVT)
return getStore(Chain, dl, Val, Ptr, SV, SVOffset, isVolatile, Alignment);
return getStore(Chain, dl, Val, Ptr, MMO);
assert(VT.bitsGT(SVT) && "Not a truncation?");
assert(VT.isInteger() == SVT.isInteger() &&
"Can't do FP-INT conversion!");
if (Alignment == 0) // Ensure that codegen never sees alignment 0
Alignment = getEVTAlignment(VT);
SDVTList VTs = getVTList(MVT::Other);
SDValue Undef = getUNDEF(Ptr.getValueType());
@ -3770,14 +3877,14 @@ SDValue SelectionDAG::getTruncStore(SDValue Chain, DebugLoc dl, SDValue Val,
FoldingSetNodeID ID;
AddNodeIDNode(ID, ISD::STORE, VTs, Ops, 4);
ID.AddInteger(SVT.getRawBits());
ID.AddInteger(encodeMemSDNodeFlags(true, ISD::UNINDEXED,
isVolatile, Alignment));
ID.AddInteger(encodeMemSDNodeFlags(true, ISD::UNINDEXED, MMO->isVolatile()));
void *IP = 0;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
cast<StoreSDNode>(E)->refineAlignment(MMO);
return SDValue(E, 0);
}
SDNode *N = NodeAllocator.Allocate<StoreSDNode>();
new (N) StoreSDNode(Ops, dl, VTs, ISD::UNINDEXED, true,
SVT, SV, SVOffset, Alignment, isVolatile);
new (N) StoreSDNode(Ops, dl, VTs, ISD::UNINDEXED, true, SVT, MMO);
CSEMap.InsertNode(N, IP);
AllNodes.push_back(N);
return SDValue(N, 0);
@ -3801,8 +3908,7 @@ SelectionDAG::getIndexedStore(SDValue OrigStore, DebugLoc dl, SDValue Base,
SDNode *N = NodeAllocator.Allocate<StoreSDNode>();
new (N) StoreSDNode(Ops, dl, VTs, AM,
ST->isTruncatingStore(), ST->getMemoryVT(),
ST->getSrcValue(), ST->getSrcValueOffset(),
ST->getAlignment(), ST->isVolatile());
ST->getMemOperand());
CSEMap.InsertNode(N, IP);
AllNodes.push_back(N);
return SDValue(N, 0);
@ -4454,29 +4560,35 @@ SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
DeadNodeSet.insert(Used);
}
// If NumOps is larger than the # of operands we currently have, reallocate
// the operand list.
if (NumOps > N->NumOperands) {
if (N->OperandsNeedDelete)
delete[] N->OperandList;
if (N->isMachineOpcode()) {
// We're creating a final node that will live unmorphed for the
// remainder of the current SelectionDAG iteration, so we can allocate
// the operands directly out of a pool with no recycling metadata.
N->OperandList = OperandAllocator.Allocate<SDUse>(NumOps);
N->OperandsNeedDelete = false;
} else {
N->OperandList = new SDUse[NumOps];
if (MachineSDNode *MN = dyn_cast<MachineSDNode>(N)) {
// Initialize the memory references information.
MN->setMemRefs(0, 0);
// If NumOps is larger than the # of operands we can have in a
// MachineSDNode, reallocate the operand list.
if (NumOps > MN->NumOperands || !MN->OperandsNeedDelete) {
if (MN->OperandsNeedDelete)
delete[] MN->OperandList;
if (NumOps > array_lengthof(MN->LocalOperands))
// We're creating a final node that will live unmorphed for the
// remainder of the current SelectionDAG iteration, so we can allocate
// the operands directly out of a pool with no recycling metadata.
MN->InitOperands(OperandAllocator.Allocate<SDUse>(NumOps),
Ops, NumOps);
else
MN->InitOperands(MN->LocalOperands, Ops, NumOps);
MN->OperandsNeedDelete = false;
} else
MN->InitOperands(MN->OperandList, Ops, NumOps);
} else {
// If NumOps is larger than the # of operands we currently have, reallocate
// the operand list.
if (NumOps > N->NumOperands) {
if (N->OperandsNeedDelete)
delete[] N->OperandList;
N->InitOperands(new SDUse[NumOps], Ops, NumOps);
N->OperandsNeedDelete = true;
}
}
// Assign the new operands.
N->NumOperands = NumOps;
for (unsigned i = 0, e = NumOps; i != e; ++i) {
N->OperandList[i].setUser(N);
N->OperandList[i].setInitial(Ops[i]);
} else
MN->InitOperands(MN->OperandList, Ops, NumOps);
}
// Delete any nodes that are still dead after adding the uses for the
@ -4501,41 +4613,49 @@ SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
/// node of the specified opcode and operands, it returns that node instead of
/// the current one.
SDNode *SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT) {
return getNode(~Opcode, dl, VT).getNode();
SDVTList VTs = getVTList(VT);
return getMachineNode(Opcode, dl, VTs, 0, 0);
}
SDNode *SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT,
SDValue Op1) {
return getNode(~Opcode, dl, VT, Op1).getNode();
SDVTList VTs = getVTList(VT);
SDValue Ops[] = { Op1 };
return getMachineNode(Opcode, dl, VTs, Ops, array_lengthof(Ops));
}
SDNode *SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT,
SDValue Op1, SDValue Op2) {
return getNode(~Opcode, dl, VT, Op1, Op2).getNode();
SDVTList VTs = getVTList(VT);
SDValue Ops[] = { Op1, Op2 };
return getMachineNode(Opcode, dl, VTs, Ops, array_lengthof(Ops));
}
SDNode *SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT,
SDValue Op1, SDValue Op2,
SDValue Op3) {
return getNode(~Opcode, dl, VT, Op1, Op2, Op3).getNode();
SDVTList VTs = getVTList(VT);
SDValue Ops[] = { Op1, Op2, Op3 };
return getMachineNode(Opcode, dl, VTs, Ops, array_lengthof(Ops));
}
SDNode *SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT,
const SDValue *Ops, unsigned NumOps) {
return getNode(~Opcode, dl, VT, Ops, NumOps).getNode();
SDVTList VTs = getVTList(VT);
return getMachineNode(Opcode, dl, VTs, Ops, NumOps);
}
SDNode *SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl,
EVT VT1, EVT VT2) {
SDVTList VTs = getVTList(VT1, VT2);
SDValue Op;
return getNode(~Opcode, dl, VTs, &Op, 0).getNode();
return getMachineNode(Opcode, dl, VTs, 0, 0);
}
SDNode *SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT1,
EVT VT2, SDValue Op1) {
SDVTList VTs = getVTList(VT1, VT2);
return getNode(~Opcode, dl, VTs, &Op1, 1).getNode();
SDValue Ops[] = { Op1 };
return getMachineNode(Opcode, dl, VTs, Ops, array_lengthof(Ops));
}
SDNode *SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT1,
@ -4543,7 +4663,7 @@ SDNode *SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT1,
SDValue Op2) {
SDVTList VTs = getVTList(VT1, VT2);
SDValue Ops[] = { Op1, Op2 };
return getNode(~Opcode, dl, VTs, Ops, 2).getNode();
return getMachineNode(Opcode, dl, VTs, Ops, array_lengthof(Ops));
}
SDNode *SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT1,
@ -4551,14 +4671,14 @@ SDNode *SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT1,
SDValue Op2, SDValue Op3) {
SDVTList VTs = getVTList(VT1, VT2);
SDValue Ops[] = { Op1, Op2, Op3 };
return getNode(~Opcode, dl, VTs, Ops, 3).getNode();
return getMachineNode(Opcode, dl, VTs, Ops, array_lengthof(Ops));
}
SDNode *SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl,
EVT VT1, EVT VT2,
const SDValue *Ops, unsigned NumOps) {
SDVTList VTs = getVTList(VT1, VT2);
return getNode(~Opcode, dl, VTs, Ops, NumOps).getNode();
return getMachineNode(Opcode, dl, VTs, Ops, NumOps);
}
SDNode *SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl,
@ -4566,7 +4686,7 @@ SDNode *SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl,
SDValue Op1, SDValue Op2) {
SDVTList VTs = getVTList(VT1, VT2, VT3);
SDValue Ops[] = { Op1, Op2 };
return getNode(~Opcode, dl, VTs, Ops, 2).getNode();
return getMachineNode(Opcode, dl, VTs, Ops, array_lengthof(Ops));
}
SDNode *SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl,
@ -4575,27 +4695,67 @@ SDNode *SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl,
SDValue Op3) {
SDVTList VTs = getVTList(VT1, VT2, VT3);
SDValue Ops[] = { Op1, Op2, Op3 };
return getNode(~Opcode, dl, VTs, Ops, 3).getNode();
return getMachineNode(Opcode, dl, VTs, Ops, array_lengthof(Ops));
}
SDNode *SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl,
EVT VT1, EVT VT2, EVT VT3,
const SDValue *Ops, unsigned NumOps) {
SDVTList VTs = getVTList(VT1, VT2, VT3);
return getNode(~Opcode, dl, VTs, Ops, NumOps).getNode();
return getMachineNode(Opcode, dl, VTs, Ops, NumOps);
}
SDNode *SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT1,
EVT VT2, EVT VT3, EVT VT4,
const SDValue *Ops, unsigned NumOps) {
SDVTList VTs = getVTList(VT1, VT2, VT3, VT4);
return getNode(~Opcode, dl, VTs, Ops, NumOps).getNode();
return getMachineNode(Opcode, dl, VTs, Ops, NumOps);
}
SDNode *SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl,
const std::vector<EVT> &ResultTys,
const SDValue *Ops, unsigned NumOps) {
return getNode(~Opcode, dl, ResultTys, Ops, NumOps).getNode();
SDVTList VTs = getVTList(&ResultTys[0], ResultTys.size());
return getMachineNode(Opcode, dl, VTs, Ops, NumOps);
}
SDNode *SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc DL, SDVTList VTs,
const SDValue *Ops, unsigned NumOps) {
bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Flag;
MachineSDNode *N;
void *IP;
if (DoCSE) {
FoldingSetNodeID ID;
AddNodeIDNode(ID, ~Opcode, VTs, Ops, NumOps);
IP = 0;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
return E;
}
// Allocate a new MachineSDNode.
N = NodeAllocator.Allocate<MachineSDNode>();
new (N) MachineSDNode(~Opcode, DL, VTs);
// Initialize the operands list.
if (NumOps > array_lengthof(N->LocalOperands))
// We're creating a final node that will live unmorphed for the
// remainder of the current SelectionDAG iteration, so we can allocate
// the operands directly out of a pool with no recycling metadata.
N->InitOperands(OperandAllocator.Allocate<SDUse>(NumOps),
Ops, NumOps);
else
N->InitOperands(N->LocalOperands, Ops, NumOps);
N->OperandsNeedDelete = false;
if (DoCSE)
CSEMap.InsertNode(N, IP);
AllNodes.push_back(N);
#ifndef NDEBUG
VerifyNode(N);
#endif
return N;
}
/// getTargetExtractSubreg - A convenience function for creating
@ -4968,57 +5128,21 @@ GlobalAddressSDNode::GlobalAddressSDNode(unsigned Opc, const GlobalValue *GA,
}
MemSDNode::MemSDNode(unsigned Opc, DebugLoc dl, SDVTList VTs, EVT memvt,
const Value *srcValue, int SVO, unsigned alignment,
bool vol)
: SDNode(Opc, dl, VTs), MemoryVT(memvt), SrcValue(srcValue), SVOffset(SVO) {
SubclassData = encodeMemSDNodeFlags(0, ISD::UNINDEXED, vol, alignment);
assert(isPowerOf2_32(alignment) && "Alignment is not a power of 2!");
assert(getOriginalAlignment() == alignment && "Alignment encoding error!");
assert(isVolatile() == vol && "Volatile encoding error!");
MachineMemOperand *mmo)
: SDNode(Opc, dl, VTs), MemoryVT(memvt), MMO(mmo) {
SubclassData = encodeMemSDNodeFlags(0, ISD::UNINDEXED, MMO->isVolatile());
assert(isVolatile() == MMO->isVolatile() && "Volatile encoding error!");
assert(memvt.getStoreSize() == MMO->getSize() && "Size mismatch!");
}
MemSDNode::MemSDNode(unsigned Opc, DebugLoc dl, SDVTList VTs,
const SDValue *Ops, unsigned NumOps, EVT memvt,
const Value *srcValue, int SVO, unsigned alignment,
bool vol)
MachineMemOperand *mmo)
: SDNode(Opc, dl, VTs, Ops, NumOps),
MemoryVT(memvt), SrcValue(srcValue), SVOffset(SVO) {
SubclassData = encodeMemSDNodeFlags(0, ISD::UNINDEXED, vol, alignment);
assert(isPowerOf2_32(alignment) && "Alignment is not a power of 2!");
assert(getOriginalAlignment() == alignment && "Alignment encoding error!");
assert(isVolatile() == vol && "Volatile encoding error!");
}
/// getMemOperand - Return a MachineMemOperand object describing the memory
/// reference performed by this memory reference.
MachineMemOperand MemSDNode::getMemOperand() const {
int Flags = 0;
if (isa<LoadSDNode>(this))
Flags = MachineMemOperand::MOLoad;
else if (isa<StoreSDNode>(this))
Flags = MachineMemOperand::MOStore;
else if (isa<AtomicSDNode>(this)) {
Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
}
else {
const MemIntrinsicSDNode* MemIntrinNode = dyn_cast<MemIntrinsicSDNode>(this);
assert(MemIntrinNode && "Unknown MemSDNode opcode!");
if (MemIntrinNode->readMem()) Flags |= MachineMemOperand::MOLoad;
if (MemIntrinNode->writeMem()) Flags |= MachineMemOperand::MOStore;
}
int Size = (getMemoryVT().getSizeInBits() + 7) >> 3;
if (isVolatile()) Flags |= MachineMemOperand::MOVolatile;
// Check if the memory reference references a frame index
const FrameIndexSDNode *FI =
dyn_cast<const FrameIndexSDNode>(getBasePtr().getNode());
if (!getSrcValue() && FI)
return MachineMemOperand(PseudoSourceValue::getFixedStack(FI->getIndex()),
Flags, 0, Size, getOriginalAlignment());
else
return MachineMemOperand(getSrcValue(), Flags, getSrcValueOffset(),
Size, getOriginalAlignment());
MemoryVT(memvt), MMO(mmo) {
SubclassData = encodeMemSDNodeFlags(0, ISD::UNINDEXED, MMO->isVolatile());
assert(isVolatile() == MMO->isVolatile() && "Volatile encoding error!");
assert(memvt.getStoreSize() == MMO->getSize() && "Size mismatch!");
}
/// Profile - Gather unique data for the node.
@ -5221,7 +5345,6 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
case ISD::PCMARKER: return "PCMarker";
case ISD::READCYCLECOUNTER: return "ReadCycleCounter";
case ISD::SRCVALUE: return "SrcValue";
case ISD::MEMOPERAND: return "MemOperand";
case ISD::EntryToken: return "EntryToken";
case ISD::TokenFactor: return "TokenFactor";
case ISD::AssertSext: return "AssertSext";
@ -5500,8 +5623,20 @@ void SDNode::print_types(raw_ostream &OS, const SelectionDAG *G) const {
}
void SDNode::print_details(raw_ostream &OS, const SelectionDAG *G) const {
if (!isTargetOpcode() && getOpcode() == ISD::VECTOR_SHUFFLE) {
const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(this);
if (const MachineSDNode *MN = dyn_cast<MachineSDNode>(this)) {
if (!MN->memoperands_empty()) {
OS << "<";
OS << "Mem:";
for (MachineSDNode::mmo_iterator i = MN->memoperands_begin(),
e = MN->memoperands_end(); i != e; ++i) {
OS << **i;
if (next(i) != e)
OS << " ";
}
OS << ">";
}
} else if (const ShuffleVectorSDNode *SVN =
dyn_cast<ShuffleVectorSDNode>(this)) {
OS << "<";
for (unsigned i = 0, e = ValueList[0].getVectorNumElements(); i != e; ++i) {
int Idx = SVN->getMaskElt(i);
@ -5512,9 +5647,7 @@ void SDNode::print_details(raw_ostream &OS, const SelectionDAG *G) const {
OS << Idx;
}
OS << ">";
}
if (const ConstantSDNode *CSDN = dyn_cast<ConstantSDNode>(this)) {
} else if (const ConstantSDNode *CSDN = dyn_cast<ConstantSDNode>(this)) {
OS << '<' << CSDN->getAPIntValue() << '>';
} else if (const ConstantFPSDNode *CSDN = dyn_cast<ConstantFPSDNode>(this)) {
if (&CSDN->getValueAPF().getSemantics()==&APFloat::IEEEsingle)
@ -5579,68 +5712,40 @@ void SDNode::print_details(raw_ostream &OS, const SelectionDAG *G) const {
OS << "<" << M->getValue() << ">";
else
OS << "<null>";
} else if (const MemOperandSDNode *M = dyn_cast<MemOperandSDNode>(this)) {
OS << ": " << M->MO;
} else if (const VTSDNode *N = dyn_cast<VTSDNode>(this)) {
OS << ":" << N->getVT().getEVTString();
}
else if (const LoadSDNode *LD = dyn_cast<LoadSDNode>(this)) {
const Value *SrcValue = LD->getSrcValue();
int SrcOffset = LD->getSrcValueOffset();
OS << " <";
if (SrcValue)
OS << SrcValue;
else
OS << "null";
OS << ":" << SrcOffset << ">";
OS << " <" << *LD->getMemOperand();
bool doExt = true;
switch (LD->getExtensionType()) {
default: doExt = false; break;
case ISD::EXTLOAD: OS << " <anyext "; break;
case ISD::SEXTLOAD: OS << " <sext "; break;
case ISD::ZEXTLOAD: OS << " <zext "; break;
case ISD::EXTLOAD: OS << ", anyext"; break;
case ISD::SEXTLOAD: OS << ", sext"; break;
case ISD::ZEXTLOAD: OS << ", zext"; break;
}
if (doExt)
OS << LD->getMemoryVT().getEVTString() << ">";
OS << " from " << LD->getMemoryVT().getEVTString();
const char *AM = getIndexedModeName(LD->getAddressingMode());
if (*AM)
OS << " " << AM;
if (LD->isVolatile())
OS << " <volatile>";
OS << " alignment=" << LD->getAlignment();
OS << ", " << AM;
OS << ">";
} else if (const StoreSDNode *ST = dyn_cast<StoreSDNode>(this)) {
const Value *SrcValue = ST->getSrcValue();
int SrcOffset = ST->getSrcValueOffset();
OS << " <";
if (SrcValue)
OS << SrcValue;
else
OS << "null";
OS << ":" << SrcOffset << ">";
OS << " <" << *ST->getMemOperand();
if (ST->isTruncatingStore())
OS << " <trunc " << ST->getMemoryVT().getEVTString() << ">";
OS << ", trunc to " << ST->getMemoryVT().getEVTString();
const char *AM = getIndexedModeName(ST->getAddressingMode());
if (*AM)
OS << " " << AM;
if (ST->isVolatile())
OS << " <volatile>";
OS << " alignment=" << ST->getAlignment();
} else if (const AtomicSDNode* AT = dyn_cast<AtomicSDNode>(this)) {
const Value *SrcValue = AT->getSrcValue();
int SrcOffset = AT->getSrcValueOffset();
OS << " <";
if (SrcValue)
OS << SrcValue;
else
OS << "null";
OS << ":" << SrcOffset << ">";
if (AT->isVolatile())
OS << " <volatile>";
OS << " alignment=" << AT->getAlignment();
OS << ", " << AM;
OS << ">";
} else if (const MemSDNode* M = dyn_cast<MemSDNode>(this)) {
OS << " <" << *M->getMemOperand() << ">";
}
}

View File

@ -18,6 +18,7 @@
#include "llvm/CodeGen/LiveStackAnalysis.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/Support/CommandLine.h"
@ -451,6 +452,7 @@ bool StackSlotColoring::AllMemRefsCanBeUnfolded(int SS) {
/// to old frame index with new one.
void StackSlotColoring::RewriteInstruction(MachineInstr *MI, int OldFI,
int NewFI, MachineFunction &MF) {
// Update the operands.
for (unsigned i = 0, ee = MI->getNumOperands(); i != ee; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isFI())
@ -461,22 +463,15 @@ void StackSlotColoring::RewriteInstruction(MachineInstr *MI, int OldFI,
MO.setIndex(NewFI);
}
// Update the MachineMemOperand for the new memory location.
// FIXME: We need a better method of managing these too.
SmallVector<MachineMemOperand, 2> MMOs(MI->memoperands_begin(),
MI->memoperands_end());
MI->clearMemOperands(MF);
// Update the memory references. This changes the MachineMemOperands
// directly. They may be in use by multiple instructions, however all
// instructions using OldFI are being rewritten to use NewFI.
const Value *OldSV = PseudoSourceValue::getFixedStack(OldFI);
for (unsigned i = 0, ee = MMOs.size(); i != ee; ++i) {
if (MMOs[i].getValue() != OldSV)
MI->addMemOperand(MF, MMOs[i]);
else {
MachineMemOperand MMO(PseudoSourceValue::getFixedStack(NewFI),
MMOs[i].getFlags(), MMOs[i].getOffset(),
MMOs[i].getSize(), MMOs[i].getBaseAlignment());
MI->addMemOperand(MF, MMO);
}
}
const Value *NewSV = PseudoSourceValue::getFixedStack(NewFI);
for (MachineInstr::mmo_iterator I = MI->memoperands_begin(),
E = MI->memoperands_end(); I != E; ++I)
if ((*I)->getValue() == OldSV)
(*I)->setValue(NewSV);
}
/// PropagateBackward - Traverse backward and look for the definition of

View File

@ -17,6 +17,7 @@
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
@ -203,11 +204,11 @@ TargetInstrInfo::foldMemoryOperand(MachineFunction &MF,
"Folded a use to a non-load!");
const MachineFrameInfo &MFI = *MF.getFrameInfo();
assert(MFI.getObjectOffset(FrameIndex) != -1);
MachineMemOperand MMO(PseudoSourceValue::getFixedStack(FrameIndex),
Flags,
/*Offset=*/0,
MFI.getObjectSize(FrameIndex),
MFI.getObjectAlignment(FrameIndex));
MachineMemOperand *MMO =
MF.getMachineMemOperand(PseudoSourceValue::getFixedStack(FrameIndex),
Flags, /*Offset=*/0,
MFI.getObjectSize(FrameIndex),
MFI.getObjectAlignment(FrameIndex));
NewMI->addMemOperand(MF, MMO);
return NewMI;
@ -232,9 +233,8 @@ TargetInstrInfo::foldMemoryOperand(MachineFunction &MF,
if (!NewMI) return 0;
// Copy the memoperands from the load to the folded instruction.
for (std::list<MachineMemOperand>::iterator I = LoadMI->memoperands_begin(),
E = LoadMI->memoperands_end(); I != E; ++I)
NewMI->addMemOperand(MF, *I);
NewMI->setMemRefs(LoadMI->memoperands_begin(),
LoadMI->memoperands_end());
return NewMI;
}

View File

@ -1174,11 +1174,11 @@ ARMPreAllocLoadStoreOpt::CanFormLdStDWord(MachineInstr *Op0, MachineInstr *Op1,
// Must sure the base address satisfies i64 ld / st alignment requirement.
if (!Op0->hasOneMemOperand() ||
!Op0->memoperands_begin()->getValue() ||
Op0->memoperands_begin()->isVolatile())
!(*Op0->memoperands_begin())->getValue() ||
(*Op0->memoperands_begin())->isVolatile())
return false;
unsigned Align = Op0->memoperands_begin()->getAlignment();
unsigned Align = (*Op0->memoperands_begin())->getAlignment();
unsigned ReqAlign = STI->hasV6Ops()
? TD->getPrefTypeAlignment(
Type::getInt64Ty(Op0->getParent()->getParent()->getFunction()->getContext()))

View File

@ -3377,7 +3377,8 @@ SDValue PPCTargetLowering::LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
// 64-bit registers. In particular, sign extend the input value into the
// 64-bit register with extsw, store the WHOLE 64-bit value into the stack
// then lfd it and fcfid it.
MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo();
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *FrameInfo = MF.getFrameInfo();
int FrameIdx = FrameInfo->CreateStackObject(8, 8);
EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
@ -3386,11 +3387,13 @@ SDValue PPCTargetLowering::LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
Op.getOperand(0));
// STD the extended value into the stack slot.
MachineMemOperand MO(PseudoSourceValue::getFixedStack(FrameIdx),
MachineMemOperand::MOStore, 0, 8, 8);
SDValue Store = DAG.getNode(PPCISD::STD_32, dl, MVT::Other,
DAG.getEntryNode(), Ext64, FIdx,
DAG.getMemOperand(MO));
MachineMemOperand *MMO =
MF.getMachineMemOperand(PseudoSourceValue::getFixedStack(FrameIdx),
MachineMemOperand::MOStore, 0, 8, 8);
SDValue Ops[] = { DAG.getEntryNode(), Ext64, FIdx };
SDValue Store =
DAG.getMemIntrinsicNode(PPCISD::STD_32, dl, DAG.getVTList(MVT::Other),
Ops, 4, MVT::i64, MMO);
// Load the value as a double.
SDValue Ld = DAG.getLoad(MVT::f64, dl, Store, FIdx, NULL, 0);
@ -4931,9 +4934,15 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
if (BSwapOp.getValueType() == MVT::i16)
BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp);
return DAG.getNode(PPCISD::STBRX, dl, MVT::Other, N->getOperand(0),
BSwapOp, N->getOperand(2), N->getOperand(3),
DAG.getValueType(N->getOperand(1).getValueType()));
SDValue Ops[] = {
N->getOperand(0), BSwapOp, N->getOperand(2),
DAG.getValueType(N->getOperand(1).getValueType())
};
return
DAG.getMemIntrinsicNode(PPCISD::STBRX, dl, DAG.getVTList(MVT::Other),
Ops, array_lengthof(Ops),
cast<StoreSDNode>(N)->getMemoryVT(),
cast<StoreSDNode>(N)->getMemOperand());
}
break;
case ISD::BSWAP:
@ -4944,17 +4953,15 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
SDValue Load = N->getOperand(0);
LoadSDNode *LD = cast<LoadSDNode>(Load);
// Create the byte-swapping load.
std::vector<EVT> VTs;
VTs.push_back(MVT::i32);
VTs.push_back(MVT::Other);
SDValue MO = DAG.getMemOperand(LD->getMemOperand());
SDValue Ops[] = {
LD->getChain(), // Chain
LD->getBasePtr(), // Ptr
MO, // MemOperand
DAG.getValueType(N->getValueType(0)) // VT
};
SDValue BSLoad = DAG.getNode(PPCISD::LBRX, dl, VTs, Ops, 4);
SDValue BSLoad =
DAG.getMemIntrinsicNode(PPCISD::LBRX, dl,
DAG.getVTList(MVT::i32, MVT::Other), Ops, 3,
LD->getMemoryVT(), LD->getMemOperand());
// If this is an i16 load, insert the truncate.
SDValue ResVal = BSLoad;

View File

@ -41,8 +41,7 @@ namespace llvm {
FCTIDZ, FCTIWZ,
/// STFIWX - The STFIWX instruction. The first operand is an input token
/// chain, then an f64 value to store, then an address to store it to,
/// then a SRCVALUE for the address.
/// chain, then an f64 value to store, then an address to store it to.
STFIWX,
// VMADDFP, VNMSUBFP - The VMADDFP and VNMSUBFP instructions, taking
@ -80,9 +79,6 @@ namespace llvm {
/// registers.
EXTSW_32,
/// STD_32 - This is the STD instruction for use with "32-bit" registers.
STD_32,
/// CALL - A direct function call.
CALL_Darwin, CALL_SVR4,
@ -124,18 +120,6 @@ namespace llvm {
/// an optional input flag argument.
COND_BRANCH,
/// CHAIN = STBRX CHAIN, GPRC, Ptr, SRCVALUE, Type - This is a
/// byte-swapping store instruction. It byte-swaps the low "Type" bits of
/// the GPRC input, then stores it through Ptr. Type can be either i16 or
/// i32.
STBRX,
/// GPRC, CHAIN = LBRX CHAIN, Ptr, SRCVALUE, Type - This is a
/// byte-swapping load instruction. It loads "Type" bits, byte swaps it,
/// then puts it in the bottom bits of the GPRC. TYPE can be either i16
/// or i32.
LBRX,
// The following 5 instructions are used only as part of the
// long double-to-int conversion sequence.
@ -170,7 +154,22 @@ namespace llvm {
/// operand #1 callee (register or absolute)
/// operand #2 stack adjustment
/// operand #3 optional in flag
TC_RETURN
TC_RETURN,
/// STD_32 - This is the STD instruction for use with "32-bit" registers.
STD_32 = ISD::FIRST_TARGET_MEMORY_OPCODE,
/// CHAIN = STBRX CHAIN, GPRC, Ptr, Type - This is a
/// byte-swapping store instruction. It byte-swaps the low "Type" bits of
/// the GPRC input, then stores it through Ptr. Type can be either i16 or
/// i32.
STBRX,
/// GPRC, CHAIN = LBRX CHAIN, Ptr, Type - This is a
/// byte-swapping load instruction. It loads "Type" bits, byte swaps it,
/// then puts it in the bottom bits of the GPRC. TYPE can be either i16
/// or i32.
LBRX
};
}

View File

@ -35,11 +35,11 @@ def SDT_PPCcondbr : SDTypeProfile<0, 3, [
SDTCisVT<0, i32>, SDTCisVT<2, OtherVT>
]>;
def SDT_PPClbrx : SDTypeProfile<1, 3, [
SDTCisVT<0, i32>, SDTCisPtrTy<1>, SDTCisVT<2, OtherVT>, SDTCisVT<3, OtherVT>
def SDT_PPClbrx : SDTypeProfile<1, 2, [
SDTCisVT<0, i32>, SDTCisPtrTy<1>, SDTCisVT<2, OtherVT>
]>;
def SDT_PPCstbrx : SDTypeProfile<0, 4, [
SDTCisVT<0, i32>, SDTCisPtrTy<1>, SDTCisVT<2, OtherVT>, SDTCisVT<3, OtherVT>
def SDT_PPCstbrx : SDTypeProfile<0, 3, [
SDTCisVT<0, i32>, SDTCisPtrTy<1>, SDTCisVT<2, OtherVT>
]>;
def SDT_PPClarx : SDTypeProfile<1, 1, [
@ -741,10 +741,10 @@ def LWZX : XForm_1<31, 23, (outs GPRC:$rD), (ins memrr:$src),
def LHBRX : XForm_1<31, 790, (outs GPRC:$rD), (ins memrr:$src),
"lhbrx $rD, $src", LdStGeneral,
[(set GPRC:$rD, (PPClbrx xoaddr:$src, srcvalue:$sv, i16))]>;
[(set GPRC:$rD, (PPClbrx xoaddr:$src, i16))]>;
def LWBRX : XForm_1<31, 534, (outs GPRC:$rD), (ins memrr:$src),
"lwbrx $rD, $src", LdStGeneral,
[(set GPRC:$rD, (PPClbrx xoaddr:$src, srcvalue:$sv, i32))]>;
[(set GPRC:$rD, (PPClbrx xoaddr:$src, i32))]>;
def LFSX : XForm_25<31, 535, (outs F4RC:$frD), (ins memrr:$src),
"lfsx $frD, $src", LdStLFDU,
@ -837,11 +837,11 @@ def STWUX : XForm_8<31, 183, (outs), (ins GPRC:$rS, GPRC:$rA, GPRC:$rB),
}
def STHBRX: XForm_8<31, 918, (outs), (ins GPRC:$rS, memrr:$dst),
"sthbrx $rS, $dst", LdStGeneral,
[(PPCstbrx GPRC:$rS, xoaddr:$dst, srcvalue:$dummy, i16)]>,
[(PPCstbrx GPRC:$rS, xoaddr:$dst, i16)]>,
PPC970_DGroup_Cracked;
def STWBRX: XForm_8<31, 662, (outs), (ins GPRC:$rS, memrr:$dst),
"stwbrx $rS, $dst", LdStGeneral,
[(PPCstbrx GPRC:$rS, xoaddr:$dst, srcvalue:$dummy, i32)]>,
[(PPCstbrx GPRC:$rS, xoaddr:$dst, i32)]>,
PPC970_DGroup_Cracked;
def STFIWX: XForm_28<31, 983, (outs), (ins F8RC:$frS, memrr:$dst),

View File

@ -23,6 +23,7 @@
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
namespace llvm {
@ -113,11 +114,11 @@ addFrameReference(const MachineInstrBuilder &MIB, int FI, int Offset = 0) {
Flags |= MachineMemOperand::MOLoad;
if (TID.mayStore())
Flags |= MachineMemOperand::MOStore;
MachineMemOperand MMO(PseudoSourceValue::getFixedStack(FI),
Flags,
Offset,
MFI.getObjectSize(FI),
MFI.getObjectAlignment(FI));
MachineMemOperand *MMO =
MF.getMachineMemOperand(PseudoSourceValue::getFixedStack(FI),
Flags, Offset,
MFI.getObjectSize(FI),
MFI.getObjectAlignment(FI));
return addOffset(MIB.addFrameIndex(FI), Offset)
.addMemOperand(MMO);
}

View File

@ -1465,11 +1465,14 @@ SDNode *X86DAGToDAGISel::SelectAtomic64(SDNode *Node, unsigned Opc) {
SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
if (!SelectAddr(In1, In1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4))
return NULL;
SDValue LSI = Node->getOperand(4); // MemOperand
const SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, In2L, In2H, LSI, Chain};
return CurDAG->getMachineNode(Opc, Node->getDebugLoc(),
MVT::i32, MVT::i32, MVT::Other, Ops,
array_lengthof(Ops));
MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
MemOp[0] = cast<MemSDNode>(Node)->getMemOperand();
const SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, In2L, In2H, Chain};
SDNode *ResNode = CurDAG->getMachineNode(Opc, Node->getDebugLoc(),
MVT::i32, MVT::i32, MVT::Other, Ops,
array_lengthof(Ops));
cast<MachineSDNode>(ResNode)->setMemRefs(MemOp, MemOp + 1);
return ResNode;
}
SDNode *X86DAGToDAGISel::SelectAtomicLoadAdd(SDNode *Node, EVT NVT) {
@ -1605,15 +1608,18 @@ SDNode *X86DAGToDAGISel::SelectAtomicLoadAdd(SDNode *Node, EVT NVT) {
DebugLoc dl = Node->getDebugLoc();
SDValue Undef = SDValue(CurDAG->getMachineNode(TargetInstrInfo::IMPLICIT_DEF,
dl, NVT), 0);
SDValue MemOp = CurDAG->getMemOperand(cast<MemSDNode>(Node)->getMemOperand());
MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
MemOp[0] = cast<MemSDNode>(Node)->getMemOperand();
if (isInc || isDec) {
SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, MemOp, Chain };
SDValue Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 7), 0);
SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Chain };
SDValue Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 6), 0);
cast<MachineSDNode>(Ret)->setMemRefs(MemOp, MemOp + 1);
SDValue RetVals[] = { Undef, Ret };
return CurDAG->getMergeValues(RetVals, 2, dl).getNode();
} else {
SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Val, MemOp, Chain };
SDValue Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 8), 0);
SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Val, Chain };
SDValue Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 7), 0);
cast<MachineSDNode>(Ret)->setMemRefs(MemOp, MemOp + 1);
SDValue RetVals[] = { Undef, Ret };
return CurDAG->getMergeValues(RetVals, 2, dl).getNode();
}

View File

@ -6983,12 +6983,11 @@ ReplaceATOMIC_BINARY_64(SDNode *Node, SmallVectorImpl<SDValue>&Results,
Node->getOperand(2), DAG.getIntPtrConstant(0));
SDValue In2H = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
Node->getOperand(2), DAG.getIntPtrConstant(1));
// This is a generalized SDNode, not an AtomicSDNode, so it doesn't
// have a MemOperand. Pass the info through as a normal operand.
SDValue LSI = DAG.getMemOperand(cast<MemSDNode>(Node)->getMemOperand());
SDValue Ops[] = { Chain, In1, In2L, In2H, LSI };
SDValue Ops[] = { Chain, In1, In2L, In2H };
SDVTList Tys = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
SDValue Result = DAG.getNode(NewOp, dl, Tys, Ops, 5);
SDValue Result =
DAG.getMemIntrinsicNode(NewOp, dl, Tys, Ops, 4, MVT::i64,
cast<MemSDNode>(Node)->getMemOperand());
SDValue OpsF[] = { Result.getValue(0), Result.getValue(1)};
Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, OpsF, 2));
Results.push_back(Result.getValue(2));
@ -7396,7 +7395,8 @@ X86TargetLowering::EmitAtomicBitwiseWithCustomInserter(MachineInstr *bInstr,
(*MIB).addOperand(*argOpers[i]);
MIB.addReg(t2);
assert(bInstr->hasOneMemOperand() && "Unexpected number of memoperand");
(*MIB).addMemOperand(*F, *bInstr->memoperands_begin());
(*MIB).setMemRefs(bInstr->memoperands_begin(),
bInstr->memoperands_end());
MIB = BuildMI(newMBB, dl, TII->get(copyOpc), destOper.getReg());
MIB.addReg(EAXreg);
@ -7548,7 +7548,8 @@ X86TargetLowering::EmitAtomicBit6432WithCustomInserter(MachineInstr *bInstr,
(*MIB).addOperand(*argOpers[i]);
assert(bInstr->hasOneMemOperand() && "Unexpected number of memoperand");
(*MIB).addMemOperand(*F, *bInstr->memoperands_begin());
(*MIB).setMemRefs(bInstr->memoperands_begin(),
bInstr->memoperands_end());
MIB = BuildMI(newMBB, dl, TII->get(copyOpc), t3);
MIB.addReg(X86::EAX);
@ -7652,7 +7653,8 @@ X86TargetLowering::EmitAtomicMinMaxWithCustomInserter(MachineInstr *mInstr,
(*MIB).addOperand(*argOpers[i]);
MIB.addReg(t3);
assert(mInstr->hasOneMemOperand() && "Unexpected number of memoperand");
(*MIB).addMemOperand(*F, *mInstr->memoperands_begin());
(*MIB).setMemRefs(mInstr->memoperands_begin(),
mInstr->memoperands_end());
MIB = BuildMI(newMBB, dl, TII->get(X86::MOV32rr), destOper.getReg());
MIB.addReg(X86::EAX);
@ -7747,6 +7749,11 @@ X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter(
// In the XMM save block, save all the XMM argument registers.
for (int i = 3, e = MI->getNumOperands(); i != e; ++i) {
int64_t Offset = (i - 3) * 16 + VarArgsFPOffset;
MachineMemOperand *MMO =
F->getMachineMemOperand(
PseudoSourceValue::getFixedStack(RegSaveFrameIndex),
MachineMemOperand::MOStore, Offset,
/*Size=*/16, /*Align=*/16);
BuildMI(XMMSaveMBB, DL, TII->get(X86::MOVAPSmr))
.addFrameIndex(RegSaveFrameIndex)
.addImm(/*Scale=*/1)
@ -7754,10 +7761,7 @@ X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter(
.addImm(/*Disp=*/Offset)
.addReg(/*Segment=*/0)
.addReg(MI->getOperand(i).getReg())
.addMemOperand(MachineMemOperand(
PseudoSourceValue::getFixedStack(RegSaveFrameIndex),
MachineMemOperand::MOStore, Offset,
/*Size=*/16, /*Align=*/16));
.addMemOperand(MMO);
}
F->DeleteMachineInstr(MI); // The pseudo instruction is gone now.

View File

@ -204,17 +204,6 @@ namespace llvm {
LCMPXCHG_DAG,
LCMPXCHG8_DAG,
// ATOMADD64_DAG, ATOMSUB64_DAG, ATOMOR64_DAG, ATOMAND64_DAG,
// ATOMXOR64_DAG, ATOMNAND64_DAG, ATOMSWAP64_DAG -
// Atomic 64-bit binary operations.
ATOMADD64_DAG,
ATOMSUB64_DAG,
ATOMOR64_DAG,
ATOMXOR64_DAG,
ATOMAND64_DAG,
ATOMNAND64_DAG,
ATOMSWAP64_DAG,
// FNSTCW16m - Store FP control world into i16 memory.
FNSTCW16m,
@ -248,7 +237,18 @@ namespace llvm {
// VASTART_SAVE_XMM_REGS - Save xmm argument registers to the stack,
// according to %al. An operator is needed so that this can be expanded
// with control flow.
VASTART_SAVE_XMM_REGS
VASTART_SAVE_XMM_REGS,
// ATOMADD64_DAG, ATOMSUB64_DAG, ATOMOR64_DAG, ATOMAND64_DAG,
// ATOMXOR64_DAG, ATOMNAND64_DAG, ATOMSWAP64_DAG -
// Atomic 64-bit binary operations.
ATOMADD64_DAG = ISD::FIRST_TARGET_MEMORY_OPCODE,
ATOMSUB64_DAG,
ATOMOR64_DAG,
ATOMXOR64_DAG,
ATOMAND64_DAG,
ATOMNAND64_DAG,
ATOMSWAP64_DAG
};
}

View File

@ -26,6 +26,7 @@
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
namespace llvm {
@ -142,11 +143,11 @@ addFrameReference(const MachineInstrBuilder &MIB, int FI, int Offset = 0) {
Flags |= MachineMemOperand::MOLoad;
if (TID.mayStore())
Flags |= MachineMemOperand::MOStore;
MachineMemOperand MMO(PseudoSourceValue::getFixedStack(FI),
Flags,
Offset,
MFI.getObjectSize(FI),
MFI.getObjectAlignment(FI));
MachineMemOperand *MMO =
MF.getMachineMemOperand(PseudoSourceValue::getFixedStack(FI),
Flags, Offset,
MFI.getObjectSize(FI),
MFI.getObjectAlignment(FI));
return addOffset(MIB.addFrameIndex(FI), Offset)
.addMemOperand(MMO);
}

View File

@ -2296,7 +2296,7 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
// Determine the alignment of the load.
unsigned Alignment = 0;
if (LoadMI->hasOneMemOperand())
Alignment = LoadMI->memoperands_begin()->getAlignment();
Alignment = (*LoadMI->memoperands_begin())->getAlignment();
else
switch (LoadMI->getOpcode()) {
case X86::V_SET0:
@ -2567,7 +2567,7 @@ X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
std::vector<SDValue> AfterOps;
DebugLoc dl = N->getDebugLoc();
unsigned NumOps = N->getNumOperands();
for (unsigned i = 0; i != NumOps-2; ++i) {
for (unsigned i = 0; i != NumOps-1; ++i) {
SDValue Op = N->getOperand(i);
if (i >= Index-NumDefs && i < Index-NumDefs + X86AddrNumOperands)
AddrOps.push_back(Op);
@ -2576,8 +2576,6 @@ X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
else if (i > Index-NumDefs)
AfterOps.push_back(Op);
}
SDValue MemOp = N->getOperand(NumOps-2);
AddrOps.push_back(MemOp);
SDValue Chain = N->getOperand(NumOps-1);
AddrOps.push_back(Chain);
@ -2614,10 +2612,8 @@ X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
// Emit the store instruction.
if (FoldedStore) {
AddrOps.pop_back();
AddrOps.pop_back();
AddrOps.push_back(SDValue(NewNode, 0));
AddrOps.push_back(MemOp);
AddrOps.push_back(Chain);
bool isAligned = (RI.getStackAlignment() >= 16) ||
RI.needsStackRealignment(MF);

View File

@ -1,4 +1,4 @@
; RUN: llc < %s -mtriple=arm-apple-darwin9 -stats |& grep asm-printer | grep 162
; RUN: llc < %s -mtriple=arm-apple-darwin9 -stats |& grep asm-printer | grep 159
%"struct.Adv5::Ekin<3>" = type <{ i8 }>
%"struct.Adv5::X::Energyflux<3>" = type { double }

View File

@ -1135,24 +1135,18 @@ public:
emitCode("}");
}
// Generate MemOperandSDNodes nodes for each memory accesses covered by
// Populate MemRefs with entries for each memory accesses covered by
// this pattern.
if (II.mayLoad | II.mayStore) {
std::vector<std::string>::const_iterator mi, mie;
for (mi = LSI.begin(), mie = LSI.end(); mi != mie; ++mi) {
std::string LSIName = "LSI_" + *mi;
emitCode("SDValue " + LSIName + " = "
"CurDAG->getMemOperand(cast<MemSDNode>(" +
*mi + ")->getMemOperand());");
if (GenDebug) {
emitCode("CurDAG->setSubgraphColor(" + LSIName +".getNode(), \"yellow\");");
emitCode("CurDAG->setSubgraphColor(" + LSIName +".getNode(), \"black\");");
}
if (IsVariadic)
emitCode("Ops" + utostr(OpsNo) + ".push_back(" + LSIName + ");");
else
AllOps.push_back(LSIName);
}
if (isRoot && !LSI.empty()) {
std::string MemRefs = "MemRefs" + utostr(OpsNo);
emitCode("MachineSDNode::mmo_iterator " + MemRefs + " = "
"MF->allocateMemRefsArray(" + utostr(LSI.size()) + ");");
for (unsigned i = 0, e = LSI.size(); i != e; ++i)
emitCode(MemRefs + "[" + utostr(i) + "] = "
"cast<MemSDNode>(" + LSI[i] + ")->getMemOperand();");
After.push_back("cast<MachineSDNode>(ResNode)->setMemRefs(" +
MemRefs + ", " + MemRefs + " + " + utostr(LSI.size()) +
");");
}
if (NodeHasChain) {
@ -1965,7 +1959,6 @@ void DAGISelEmitter::EmitInstructionSelector(raw_ostream &OS) {
<< " assert(!N.isMachineOpcode() && \"Node already selected!\");\n"
<< " break;\n"
<< " case ISD::EntryToken: // These nodes remain the same.\n"
<< " case ISD::MEMOPERAND:\n"
<< " case ISD::BasicBlock:\n"
<< " case ISD::Register:\n"
<< " case ISD::HANDLENODE:\n"