mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-13 04:30:23 +00:00
Pass address space to allowsUnalignedMemoryAccesses
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@200888 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
bb7bf85f3c
commit
a9ff3fd942
@ -729,10 +729,11 @@ void SelectionDAGLegalize::LegalizeStoreOps(SDNode *Node) {
|
||||
MVT VT = Value.getSimpleValueType();
|
||||
switch (TLI.getOperationAction(ISD::STORE, VT)) {
|
||||
default: llvm_unreachable("This action is not supported yet!");
|
||||
case TargetLowering::Legal:
|
||||
case TargetLowering::Legal: {
|
||||
// If this is an unaligned store and the target doesn't support it,
|
||||
// expand it.
|
||||
if (!TLI.allowsUnalignedMemoryAccesses(ST->getMemoryVT())) {
|
||||
unsigned AS = ST->getAddressSpace();
|
||||
if (!TLI.allowsUnalignedMemoryAccesses(ST->getMemoryVT(), AS)) {
|
||||
Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext());
|
||||
unsigned ABIAlignment= TLI.getDataLayout()->getABITypeAlignment(Ty);
|
||||
if (ST->getAlignment() < ABIAlignment)
|
||||
@ -740,6 +741,7 @@ void SelectionDAGLegalize::LegalizeStoreOps(SDNode *Node) {
|
||||
DAG, TLI, this);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case TargetLowering::Custom: {
|
||||
SDValue Res = TLI.LowerOperation(SDValue(Node, 0), DAG);
|
||||
if (Res.getNode())
|
||||
@ -840,16 +842,18 @@ void SelectionDAGLegalize::LegalizeStoreOps(SDNode *Node) {
|
||||
switch (TLI.getTruncStoreAction(ST->getValue().getSimpleValueType(),
|
||||
StVT.getSimpleVT())) {
|
||||
default: llvm_unreachable("This action is not supported yet!");
|
||||
case TargetLowering::Legal:
|
||||
case TargetLowering::Legal: {
|
||||
unsigned AS = ST->getAddressSpace();
|
||||
// If this is an unaligned store and the target doesn't support it,
|
||||
// expand it.
|
||||
if (!TLI.allowsUnalignedMemoryAccesses(ST->getMemoryVT())) {
|
||||
if (!TLI.allowsUnalignedMemoryAccesses(ST->getMemoryVT(), AS)) {
|
||||
Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext());
|
||||
unsigned ABIAlignment= TLI.getDataLayout()->getABITypeAlignment(Ty);
|
||||
if (ST->getAlignment() < ABIAlignment)
|
||||
ExpandUnalignedStore(cast<StoreSDNode>(Node), DAG, TLI, this);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case TargetLowering::Custom: {
|
||||
SDValue Res = TLI.LowerOperation(SDValue(Node, 0), DAG);
|
||||
if (Res.getNode())
|
||||
@ -889,10 +893,11 @@ void SelectionDAGLegalize::LegalizeLoadOps(SDNode *Node) {
|
||||
|
||||
switch (TLI.getOperationAction(Node->getOpcode(), VT)) {
|
||||
default: llvm_unreachable("This action is not supported yet!");
|
||||
case TargetLowering::Legal:
|
||||
case TargetLowering::Legal: {
|
||||
unsigned AS = LD->getAddressSpace();
|
||||
// If this is an unaligned load and the target doesn't support it,
|
||||
// expand it.
|
||||
if (!TLI.allowsUnalignedMemoryAccesses(LD->getMemoryVT())) {
|
||||
if (!TLI.allowsUnalignedMemoryAccesses(LD->getMemoryVT(), AS)) {
|
||||
Type *Ty = LD->getMemoryVT().getTypeForEVT(*DAG.getContext());
|
||||
unsigned ABIAlignment =
|
||||
TLI.getDataLayout()->getABITypeAlignment(Ty);
|
||||
@ -901,6 +906,7 @@ void SelectionDAGLegalize::LegalizeLoadOps(SDNode *Node) {
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
case TargetLowering::Custom: {
|
||||
SDValue Res = TLI.LowerOperation(RVal, DAG);
|
||||
if (Res.getNode()) {
|
||||
@ -1074,7 +1080,9 @@ void SelectionDAGLegalize::LegalizeLoadOps(SDNode *Node) {
|
||||
} else {
|
||||
// If this is an unaligned load and the target doesn't support
|
||||
// it, expand it.
|
||||
if (!TLI.allowsUnalignedMemoryAccesses(LD->getMemoryVT())) {
|
||||
EVT MemVT = LD->getMemoryVT();
|
||||
unsigned AS = LD->getAddressSpace();
|
||||
if (!TLI.allowsUnalignedMemoryAccesses(MemVT, AS)) {
|
||||
Type *Ty =
|
||||
LD->getMemoryVT().getTypeForEVT(*DAG.getContext());
|
||||
unsigned ABIAlignment =
|
||||
|
@ -3633,8 +3633,9 @@ static bool FindOptimalMemOpLowering(std::vector<EVT> &MemOps,
|
||||
DAG.getMachineFunction());
|
||||
|
||||
if (VT == MVT::Other) {
|
||||
if (DstAlign >= TLI.getDataLayout()->getPointerPrefAlignment() ||
|
||||
TLI.allowsUnalignedMemoryAccesses(VT)) {
|
||||
unsigned AS = 0;
|
||||
if (DstAlign >= TLI.getDataLayout()->getPointerPrefAlignment(AS) ||
|
||||
TLI.allowsUnalignedMemoryAccesses(VT, AS)) {
|
||||
VT = TLI.getPointerTy();
|
||||
} else {
|
||||
switch (DstAlign & 7) {
|
||||
@ -3691,9 +3692,10 @@ static bool FindOptimalMemOpLowering(std::vector<EVT> &MemOps,
|
||||
// FIXME: Only does this for 64-bit or more since we don't have proper
|
||||
// cost model for unaligned load / store.
|
||||
bool Fast;
|
||||
unsigned AS = 0;
|
||||
if (NumMemOps && AllowOverlap &&
|
||||
VTSize >= 8 && NewVTSize < Size &&
|
||||
TLI.allowsUnalignedMemoryAccesses(VT, 0, &Fast) && Fast)
|
||||
TLI.allowsUnalignedMemoryAccesses(VT, AS, &Fast) && Fast)
|
||||
VTSize = Size;
|
||||
else {
|
||||
VT = NewVT;
|
||||
|
@ -5723,9 +5723,13 @@ bool SelectionDAGBuilder::visitMemCmpCall(const CallInst &I) {
|
||||
// bloat the code.
|
||||
const TargetLowering *TLI = TM.getTargetLowering();
|
||||
if (ActuallyDoIt && CSize->getZExtValue() > 4) {
|
||||
unsigned DstAS = LHS->getType()->getPointerAddressSpace();
|
||||
unsigned SrcAS = RHS->getType()->getPointerAddressSpace();
|
||||
// TODO: Handle 5 byte compare as 4-byte + 1 byte.
|
||||
// TODO: Handle 8 byte compare on x86-32 as two 32-bit loads.
|
||||
if (!TLI->isTypeLegal(LoadVT) ||!TLI->allowsUnalignedMemoryAccesses(LoadVT))
|
||||
if (!TLI->isTypeLegal(LoadVT) ||
|
||||
!TLI->allowsUnalignedMemoryAccesses(LoadVT, SrcAS) ||
|
||||
!TLI->allowsUnalignedMemoryAccesses(LoadVT, DstAS))
|
||||
ActuallyDoIt = false;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user