mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-09-24 23:28:41 +00:00
Revert r164910 because it causes failures to several phase2 builds.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@164911 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
@@ -301,10 +301,6 @@ namespace {
|
||||
/// looking for a better chain (aliasing node.)
|
||||
SDValue FindBetterChain(SDNode *N, SDValue Chain);
|
||||
|
||||
/// Merge consecutive store operations into a wide store.
|
||||
/// \return True if some memory operations were changed.
|
||||
bool MergeConsecutiveStores(StoreSDNode *N);
|
||||
|
||||
public:
|
||||
DAGCombiner(SelectionDAG &D, AliasAnalysis &A, CodeGenOpt::Level OL)
|
||||
: DAG(D), TLI(D.getTargetLoweringInfo()), Level(BeforeLegalizeTypes),
|
||||
@@ -7427,250 +7423,6 @@ SDValue DAGCombiner::TransformFPLoadStorePair(SDNode *N) {
|
||||
return SDValue();
|
||||
}
|
||||
|
||||
/// Returns the base pointer and an integer offset from that object.
|
||||
static std::pair<SDValue, int64_t> GetPointerBaseAndOffset(SDValue Ptr) {
|
||||
if (Ptr->getOpcode() == ISD::ADD && isa<ConstantSDNode>(Ptr->getOperand(1))) {
|
||||
int64_t Offset = cast<ConstantSDNode>(Ptr->getOperand(1))->getSExtValue();
|
||||
SDValue Base = Ptr->getOperand(0);
|
||||
return std::make_pair(Base, Offset);
|
||||
}
|
||||
|
||||
return std::make_pair(Ptr, 0);
|
||||
}
|
||||
|
||||
struct ConsecutiveMemoryChainSorter {
|
||||
typedef std::pair<LSBaseSDNode*, int64_t> MemLink;
|
||||
bool operator()(MemLink LHS, MemLink RHS) {
|
||||
return LHS.second < RHS.second;
|
||||
}
|
||||
};
|
||||
|
||||
bool DAGCombiner::MergeConsecutiveStores(StoreSDNode* St) {
|
||||
EVT MemVT = St->getMemoryVT();
|
||||
int64_t ElementSizeBytes = MemVT.getSizeInBits()/8;
|
||||
|
||||
// Don't handle vectors.
|
||||
if (MemVT.isVector() || !MemVT.isSimple())
|
||||
return false;
|
||||
|
||||
// Perform an early exit check. Do not bother looking at stored values that
|
||||
// are not constants or loads.
|
||||
SDValue StoredVal = St->getValue();
|
||||
if (!isa<ConstantSDNode>(StoredVal) && !isa<ConstantFPSDNode>(StoredVal) &&
|
||||
!isa<LoadSDNode>(StoredVal))
|
||||
return false;
|
||||
|
||||
// Is this a load-to-store or a const-store.
|
||||
bool IsLoadSrc = isa<LoadSDNode>(StoredVal);
|
||||
|
||||
// Only look at ends of store chains.
|
||||
SDValue Chain = SDValue(St, 1);
|
||||
if (Chain->hasOneUse() && Chain->use_begin()->getOpcode() == ISD::STORE)
|
||||
return false;
|
||||
|
||||
// This holds the base pointer and the offset in bytes from the base pointer.
|
||||
std::pair<SDValue, int64_t> BasePtr =
|
||||
GetPointerBaseAndOffset(St->getBasePtr());
|
||||
|
||||
// We must have a base and an offset.
|
||||
if (!BasePtr.first.getNode())
|
||||
return false;
|
||||
|
||||
// Do not handle stores to undef base pointers.
|
||||
if (BasePtr.first.getOpcode() == ISD::UNDEF)
|
||||
return false;
|
||||
|
||||
SmallVector<std::pair<StoreSDNode*, int64_t>, 8> StoreNodes;
|
||||
// Walk up the chain and look for nodes with offsets from the same
|
||||
// base pointer. Stop when reaching an instruction with a different kind
|
||||
// or instruction which has a different base pointer.
|
||||
StoreSDNode *Index = St;
|
||||
while (Index) {
|
||||
// If the chain has more than one use, then we can't reorder the mem ops.
|
||||
if (Index != St && !SDValue(Index, 1)->hasOneUse())
|
||||
break;
|
||||
|
||||
// Find the base pointer and offset for this memory node.
|
||||
std::pair<SDValue, int64_t> Ptr =
|
||||
GetPointerBaseAndOffset(Index->getBasePtr());
|
||||
|
||||
// Check that the base pointer is the same as the original one.
|
||||
if (Ptr.first.getNode() != BasePtr.first.getNode())
|
||||
break;
|
||||
|
||||
// Check that the alignment is the same.
|
||||
if (Index->getAlignment() != St->getAlignment())
|
||||
break;
|
||||
|
||||
// The memory operands must not be volatile.
|
||||
if (Index->isVolatile() || Index->isIndexed())
|
||||
break;
|
||||
|
||||
// No truncation.
|
||||
if (StoreSDNode *St = dyn_cast<StoreSDNode>(Index))
|
||||
if (St->isTruncatingStore())
|
||||
break;
|
||||
|
||||
// The stored memory type must be the same.
|
||||
if (Index->getMemoryVT() != MemVT)
|
||||
break;
|
||||
|
||||
// We found a potential memory operand to merge.
|
||||
StoreNodes.push_back(std::make_pair(Index,Ptr.second));
|
||||
|
||||
// Move up the chain to the next memory operation.
|
||||
Index = dyn_cast<StoreSDNode>(Index->getChain().getNode());
|
||||
}
|
||||
|
||||
// Check if there is anything to merge.
|
||||
if (StoreNodes.size() < 2)
|
||||
return false;
|
||||
|
||||
// Remember which node is the earliest node in the chain.
|
||||
LSBaseSDNode *EarliestOp = StoreNodes.back().first;
|
||||
|
||||
// Sort the memory operands according to their distance from the base pointer.
|
||||
std::sort(StoreNodes.begin(), StoreNodes.end(),
|
||||
ConsecutiveMemoryChainSorter());
|
||||
|
||||
LSBaseSDNode *FirstInChain = StoreNodes[0].first;
|
||||
|
||||
// Scan the memory operations on the chain and find the first non-consecutive
|
||||
// store memory address.
|
||||
unsigned LastConsecutiveStore = 0;
|
||||
int64_t StartAddress = StoreNodes[0].second;
|
||||
for (unsigned i=1; i<StoreNodes.size(); ++i) {
|
||||
int64_t CurrAddress = StoreNodes[i].second;
|
||||
if (CurrAddress - StartAddress != (ElementSizeBytes * i))
|
||||
break;
|
||||
LastConsecutiveStore = i;
|
||||
}
|
||||
|
||||
// Store the constants into memory as one consecutive store.
|
||||
if (!IsLoadSrc) {
|
||||
unsigned LastConst = 0;
|
||||
for (unsigned i=0; i<LastConsecutiveStore+1; ++i) {
|
||||
SDValue StoredVal = StoreNodes[i].first->getValue();
|
||||
bool IsConst = (isa<ConstantSDNode>(StoredVal) || isa<ConstantFPSDNode>(StoredVal));
|
||||
if (!IsConst)
|
||||
break;
|
||||
LastConst = i;
|
||||
}
|
||||
unsigned NumElem = std::min(LastConsecutiveStore + 1, LastConst + 1);
|
||||
if (NumElem < 2)
|
||||
return false;
|
||||
|
||||
EVT JointMemOpVT = EVT::getVectorVT(*DAG.getContext(), MemVT, NumElem);
|
||||
DebugLoc DL = StoreNodes[0].first->getDebugLoc();
|
||||
SmallVector<SDValue, 8> Ops;
|
||||
|
||||
for (unsigned i = 0; i < NumElem ; ++i) {
|
||||
StoreSDNode *St = cast<StoreSDNode>(StoreNodes[i].first);
|
||||
Ops.push_back(St->getValue());
|
||||
}
|
||||
|
||||
SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, DL,
|
||||
JointMemOpVT, &Ops[0], Ops.size());
|
||||
|
||||
SDValue NewStore = DAG.getStore(EarliestOp->getChain(), DL, BV,
|
||||
FirstInChain->getBasePtr(),
|
||||
FirstInChain->getPointerInfo(), false, false,
|
||||
FirstInChain->getAlignment());
|
||||
|
||||
for (unsigned i = 0; i < NumElem ; ++i) {
|
||||
StoreSDNode *St = cast<StoreSDNode>(StoreNodes[i].first);
|
||||
CombineTo(St, NewStore);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// Look for load nodes wich are used by the stored values.
|
||||
SmallVector<std::pair<LoadSDNode*, int64_t>, 8> LoadNodes;
|
||||
|
||||
// Find acceptible loads. Loads need to have the same chain (token factor),
|
||||
// must not be zext, volatile, indexed, and they must be consecutive.
|
||||
SDValue LdBasePtr;
|
||||
for (unsigned i=0; i<LastConsecutiveStore+1; ++i) {
|
||||
LoadSDNode *Ld = dyn_cast<LoadSDNode>(StoreNodes[i].first->getValue());
|
||||
if (!Ld) break;
|
||||
|
||||
// Loads must only have one use.
|
||||
if (!Ld->hasNUsesOfValue(1, 0))
|
||||
break;
|
||||
|
||||
// Check that the alignment is the same as the stores.
|
||||
if (Ld->getAlignment() != St->getAlignment())
|
||||
break;
|
||||
|
||||
// The memory operands must not be volatile.
|
||||
if (Ld->isVolatile() || Ld->isIndexed())
|
||||
break;
|
||||
|
||||
if (Ld->getExtensionType() != ISD::NON_EXTLOAD)
|
||||
break;
|
||||
|
||||
// The stored memory type must be the same.
|
||||
if (Ld->getMemoryVT() != MemVT)
|
||||
break;
|
||||
|
||||
std::pair<SDValue, int64_t> LdPtr =
|
||||
GetPointerBaseAndOffset(Ld->getBasePtr());
|
||||
|
||||
// If this is not the first ptr that we check.
|
||||
if (LdBasePtr.getNode()) {
|
||||
// The base ptr must be the same,
|
||||
if (LdPtr.first != LdBasePtr)
|
||||
break;
|
||||
} else {
|
||||
LdBasePtr = LdPtr.first;
|
||||
}
|
||||
|
||||
// We found a potential memory operand to merge.
|
||||
LoadNodes.push_back(std::make_pair(Ld, LdPtr.second));
|
||||
}
|
||||
|
||||
if (LoadNodes.size() < 2)
|
||||
return false;
|
||||
|
||||
// Scan the memory operations on the chain and find the first non-consecutive
|
||||
// load memory address.
|
||||
unsigned LastConsecutiveLoad = 0;
|
||||
StartAddress = LoadNodes[0].second;
|
||||
for (unsigned i=1; i<LoadNodes.size(); ++i) {
|
||||
int64_t CurrAddress = LoadNodes[i].second;
|
||||
if (CurrAddress - StartAddress != (ElementSizeBytes * i))
|
||||
break;
|
||||
LastConsecutiveLoad = i;
|
||||
}
|
||||
|
||||
unsigned NumElem =
|
||||
std::min(LastConsecutiveStore + 1, LastConsecutiveLoad + 1);
|
||||
|
||||
EVT JointMemOpVT = EVT::getVectorVT(*DAG.getContext(), MemVT, NumElem);
|
||||
DebugLoc LoadDL = LoadNodes[0].first->getDebugLoc();
|
||||
DebugLoc StoreDL = StoreNodes[0].first->getDebugLoc();
|
||||
|
||||
LoadSDNode *FirstLoad = LoadNodes[0].first;
|
||||
SDValue NewLoad = DAG.getLoad(JointMemOpVT, LoadDL,
|
||||
FirstLoad->getChain(),
|
||||
FirstLoad->getBasePtr(),
|
||||
FirstLoad->getPointerInfo(),
|
||||
false, false, false,
|
||||
FirstLoad->getAlignment());
|
||||
|
||||
SDValue NewStore = DAG.getStore(EarliestOp->getChain(), StoreDL, NewLoad,
|
||||
FirstInChain->getBasePtr(),
|
||||
FirstInChain->getPointerInfo(), false, false,
|
||||
FirstInChain->getAlignment());
|
||||
|
||||
for (unsigned i = 0; i < NumElem ; ++i) {
|
||||
StoreSDNode *St = cast<StoreSDNode>(StoreNodes[i].first);
|
||||
CombineTo(St, NewStore);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
SDValue DAGCombiner::visitSTORE(SDNode *N) {
|
||||
StoreSDNode *ST = cast<StoreSDNode>(N);
|
||||
SDValue Chain = ST->getChain();
|
||||
@@ -7872,12 +7624,6 @@ SDValue DAGCombiner::visitSTORE(SDNode *N) {
|
||||
ST->getAlignment());
|
||||
}
|
||||
|
||||
|
||||
// Only perform this optimization before the types are legal, because we
|
||||
// don't want to generate illegal types in this optimization.
|
||||
if (!LegalTypes && MergeConsecutiveStores(ST))
|
||||
return SDValue(N, 0);
|
||||
|
||||
return ReduceLoadOpStoreWidth(N);
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user