mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-02-06 06:33:24 +00:00
Mutate TargetLowering::shouldExpandAtomicRMWInIR to specifically dictate how AtomicRMWInsts are expanded.
Summary: In PNaCl, most atomic instructions have their own @llvm.nacl.atomic.* function, each one, with a few exceptions, represents a consistent behaviour across all NaCl-supported targets. Unfortunately, the atomic RMW operations nand, [u]min, and [u]max aren't directly represented by any such @llvm.nacl.atomic.* function. This patch refines shouldExpandAtomicRMWInIR in TargetLowering so that a future `Le32TargetLowering` class can selectively inform the caller how the target desires the atomic RMW instruction to be expanded (ie via load-linked/store-conditional for ARM/AArch64, via cmpxchg for X86/others?, or not at all for Mips) if at all. This does not represent a behavioural change and as such no tests were added. Patch by: Richard Diamond. Reviewers: jfb Reviewed By: jfb Subscribers: jfb, aemerson, t.p.northover, llvm-commits Differential Revision: http://reviews.llvm.org/D7713 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@231250 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
2e37a6f306
commit
81338a4890
@ -123,6 +123,18 @@ public:
|
|||||||
// mask (ex: x86 blends).
|
// mask (ex: x86 blends).
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/// Enum that specifies what a AtomicRMWInst is expanded to, if at all. Exists
|
||||||
|
/// because different targets have different levels of support for these
|
||||||
|
/// atomic RMW instructions, and also have different options w.r.t. what they should
|
||||||
|
/// expand to.
|
||||||
|
enum class AtomicRMWExpansionKind {
|
||||||
|
None, // Don't expand the instruction.
|
||||||
|
LLSC, // Expand the instruction into loadlinked/storeconditional; used
|
||||||
|
// by ARM/AArch64. Implies `hasLoadLinkedStoreConditional`
|
||||||
|
// returns true.
|
||||||
|
CmpXChg, // Expand the instruction into cmpxchg; used by at least X86.
|
||||||
|
};
|
||||||
|
|
||||||
static ISD::NodeType getExtendForContent(BooleanContent Content) {
|
static ISD::NodeType getExtendForContent(BooleanContent Content) {
|
||||||
switch (Content) {
|
switch (Content) {
|
||||||
case UndefinedBooleanContent:
|
case UndefinedBooleanContent:
|
||||||
@ -1064,10 +1076,11 @@ public:
|
|||||||
/// (through emitLoadLinked()).
|
/// (through emitLoadLinked()).
|
||||||
virtual bool shouldExpandAtomicLoadInIR(LoadInst *LI) const { return false; }
|
virtual bool shouldExpandAtomicLoadInIR(LoadInst *LI) const { return false; }
|
||||||
|
|
||||||
/// Returns true if the given AtomicRMW should be expanded by the
|
/// Returns how the IR-level AtomicExpand pass should expand the given
|
||||||
/// IR-level AtomicExpand pass into a loop using LoadLinked/StoreConditional.
|
/// AtomicRMW, if at all. Default is to never expand.
|
||||||
virtual bool shouldExpandAtomicRMWInIR(AtomicRMWInst *RMWI) const {
|
virtual AtomicRMWExpansionKind
|
||||||
return false;
|
shouldExpandAtomicRMWInIR(AtomicRMWInst *) const {
|
||||||
|
return AtomicRMWExpansionKind::None;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// On some platforms, an AtomicRMW that never actually modifies the value
|
/// On some platforms, an AtomicRMW that never actually modifies the value
|
||||||
|
@ -48,7 +48,7 @@ namespace {
|
|||||||
bool expandAtomicLoadToLL(LoadInst *LI);
|
bool expandAtomicLoadToLL(LoadInst *LI);
|
||||||
bool expandAtomicLoadToCmpXchg(LoadInst *LI);
|
bool expandAtomicLoadToCmpXchg(LoadInst *LI);
|
||||||
bool expandAtomicStore(StoreInst *SI);
|
bool expandAtomicStore(StoreInst *SI);
|
||||||
bool expandAtomicRMW(AtomicRMWInst *AI);
|
bool tryExpandAtomicRMW(AtomicRMWInst *AI);
|
||||||
bool expandAtomicRMWToLLSC(AtomicRMWInst *AI);
|
bool expandAtomicRMWToLLSC(AtomicRMWInst *AI);
|
||||||
bool expandAtomicRMWToCmpXchg(AtomicRMWInst *AI);
|
bool expandAtomicRMWToCmpXchg(AtomicRMWInst *AI);
|
||||||
bool expandAtomicCmpXchg(AtomicCmpXchgInst *CI);
|
bool expandAtomicCmpXchg(AtomicCmpXchgInst *CI);
|
||||||
@ -135,9 +135,12 @@ bool AtomicExpand::runOnFunction(Function &F) {
|
|||||||
// - into a load if it is idempotent
|
// - into a load if it is idempotent
|
||||||
// - into a Cmpxchg/LL-SC loop otherwise
|
// - into a Cmpxchg/LL-SC loop otherwise
|
||||||
// we try them in that order.
|
// we try them in that order.
|
||||||
MadeChange |=
|
|
||||||
(isIdempotentRMW(RMWI) && simplifyIdempotentRMW(RMWI)) ||
|
if (isIdempotentRMW(RMWI) && simplifyIdempotentRMW(RMWI)) {
|
||||||
(TLI->shouldExpandAtomicRMWInIR(RMWI) && expandAtomicRMW(RMWI));
|
MadeChange = true;
|
||||||
|
} else {
|
||||||
|
MadeChange |= tryExpandAtomicRMW(RMWI);
|
||||||
|
}
|
||||||
} else if (CASI && TLI->hasLoadLinkedStoreConditional()) {
|
} else if (CASI && TLI->hasLoadLinkedStoreConditional()) {
|
||||||
MadeChange |= expandAtomicCmpXchg(CASI);
|
MadeChange |= expandAtomicCmpXchg(CASI);
|
||||||
}
|
}
|
||||||
@ -211,7 +214,7 @@ bool AtomicExpand::expandAtomicStore(StoreInst *SI) {
|
|||||||
// atomic if implemented as a native store. So we replace them by an
|
// atomic if implemented as a native store. So we replace them by an
|
||||||
// atomic swap, that can be implemented for example as a ldrex/strex on ARM
|
// atomic swap, that can be implemented for example as a ldrex/strex on ARM
|
||||||
// or lock cmpxchg8/16b on X86, as these are atomic for larger sizes.
|
// or lock cmpxchg8/16b on X86, as these are atomic for larger sizes.
|
||||||
// It is the responsibility of the target to only return true in
|
// It is the responsibility of the target to only signal expansion via
|
||||||
// shouldExpandAtomicRMW in cases where this is required and possible.
|
// shouldExpandAtomicRMW in cases where this is required and possible.
|
||||||
IRBuilder<> Builder(SI);
|
IRBuilder<> Builder(SI);
|
||||||
AtomicRMWInst *AI =
|
AtomicRMWInst *AI =
|
||||||
@ -220,15 +223,27 @@ bool AtomicExpand::expandAtomicStore(StoreInst *SI) {
|
|||||||
SI->eraseFromParent();
|
SI->eraseFromParent();
|
||||||
|
|
||||||
// Now we have an appropriate swap instruction, lower it as usual.
|
// Now we have an appropriate swap instruction, lower it as usual.
|
||||||
return expandAtomicRMW(AI);
|
return tryExpandAtomicRMW(AI);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool AtomicExpand::expandAtomicRMW(AtomicRMWInst *AI) {
|
bool AtomicExpand::tryExpandAtomicRMW(AtomicRMWInst *AI) {
|
||||||
if (TLI->hasLoadLinkedStoreConditional())
|
switch (TLI->shouldExpandAtomicRMWInIR(AI)) {
|
||||||
|
case TargetLoweringBase::AtomicRMWExpansionKind::None:
|
||||||
|
return false;
|
||||||
|
case TargetLoweringBase::AtomicRMWExpansionKind::LLSC: {
|
||||||
|
assert(TLI->hasLoadLinkedStoreConditional() &&
|
||||||
|
"TargetLowering requested we expand AtomicRMW instruction into "
|
||||||
|
"load-linked/store-conditional combos, but such instructions aren't "
|
||||||
|
"supported");
|
||||||
|
|
||||||
return expandAtomicRMWToLLSC(AI);
|
return expandAtomicRMWToLLSC(AI);
|
||||||
else
|
}
|
||||||
|
case TargetLoweringBase::AtomicRMWExpansionKind::CmpXChg: {
|
||||||
return expandAtomicRMWToCmpXchg(AI);
|
return expandAtomicRMWToCmpXchg(AI);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
llvm_unreachable("Unhandled case in tryExpandAtomicRMW");
|
||||||
|
}
|
||||||
|
|
||||||
/// Emit IR to implement the given atomicrmw operation on values in registers,
|
/// Emit IR to implement the given atomicrmw operation on values in registers,
|
||||||
/// returning the new value.
|
/// returning the new value.
|
||||||
|
@ -8755,9 +8755,11 @@ bool AArch64TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// For the real atomic operations, we have ldxr/stxr up to 128 bits,
|
// For the real atomic operations, we have ldxr/stxr up to 128 bits,
|
||||||
bool AArch64TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
|
TargetLoweringBase::AtomicRMWExpansionKind
|
||||||
|
AArch64TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
|
||||||
unsigned Size = AI->getType()->getPrimitiveSizeInBits();
|
unsigned Size = AI->getType()->getPrimitiveSizeInBits();
|
||||||
return Size <= 128;
|
return Size <= 128 ? AtomicRMWExpansionKind::LLSC
|
||||||
|
: AtomicRMWExpansionKind::None;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool AArch64TargetLowering::hasLoadLinkedStoreConditional() const {
|
bool AArch64TargetLowering::hasLoadLinkedStoreConditional() const {
|
||||||
|
@ -335,7 +335,8 @@ public:
|
|||||||
|
|
||||||
bool shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
|
bool shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
|
||||||
bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
|
bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
|
||||||
bool shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
|
TargetLoweringBase::AtomicRMWExpansionKind
|
||||||
|
shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
|
||||||
|
|
||||||
bool useLoadStackGuardNode() const override;
|
bool useLoadStackGuardNode() const override;
|
||||||
TargetLoweringBase::LegalizeTypeAction
|
TargetLoweringBase::LegalizeTypeAction
|
||||||
|
@ -11199,9 +11199,12 @@ bool ARMTargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
|
|||||||
|
|
||||||
// For the real atomic operations, we have ldrex/strex up to 32 bits,
|
// For the real atomic operations, we have ldrex/strex up to 32 bits,
|
||||||
// and up to 64 bits on the non-M profiles
|
// and up to 64 bits on the non-M profiles
|
||||||
bool ARMTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
|
TargetLoweringBase::AtomicRMWExpansionKind
|
||||||
|
ARMTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
|
||||||
unsigned Size = AI->getType()->getPrimitiveSizeInBits();
|
unsigned Size = AI->getType()->getPrimitiveSizeInBits();
|
||||||
return Size <= (Subtarget->isMClass() ? 32U : 64U);
|
return (Size <= (Subtarget->isMClass() ? 32U : 64U))
|
||||||
|
? AtomicRMWExpansionKind::LLSC
|
||||||
|
: AtomicRMWExpansionKind::None;
|
||||||
}
|
}
|
||||||
|
|
||||||
// This has so far only been implemented for MachO.
|
// This has so far only been implemented for MachO.
|
||||||
|
@ -404,7 +404,8 @@ namespace llvm {
|
|||||||
|
|
||||||
bool shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
|
bool shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
|
||||||
bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
|
bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
|
||||||
bool shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
|
TargetLoweringBase::AtomicRMWExpansionKind
|
||||||
|
shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
|
||||||
|
|
||||||
bool useLoadStackGuardNode() const override;
|
bool useLoadStackGuardNode() const override;
|
||||||
|
|
||||||
|
@ -16398,14 +16398,17 @@ bool X86TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
|
|||||||
return needsCmpXchgNb(PTy->getElementType());
|
return needsCmpXchgNb(PTy->getElementType());
|
||||||
}
|
}
|
||||||
|
|
||||||
bool X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
|
TargetLoweringBase::AtomicRMWExpansionKind
|
||||||
|
X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
|
||||||
unsigned NativeWidth = Subtarget->is64Bit() ? 64 : 32;
|
unsigned NativeWidth = Subtarget->is64Bit() ? 64 : 32;
|
||||||
const Type *MemType = AI->getType();
|
const Type *MemType = AI->getType();
|
||||||
|
|
||||||
// If the operand is too big, we must see if cmpxchg8/16b is available
|
// If the operand is too big, we must see if cmpxchg8/16b is available
|
||||||
// and default to library calls otherwise.
|
// and default to library calls otherwise.
|
||||||
if (MemType->getPrimitiveSizeInBits() > NativeWidth)
|
if (MemType->getPrimitiveSizeInBits() > NativeWidth) {
|
||||||
return needsCmpXchgNb(MemType);
|
return needsCmpXchgNb(MemType) ? AtomicRMWExpansionKind::CmpXChg
|
||||||
|
: AtomicRMWExpansionKind::None;
|
||||||
|
}
|
||||||
|
|
||||||
AtomicRMWInst::BinOp Op = AI->getOperation();
|
AtomicRMWInst::BinOp Op = AI->getOperation();
|
||||||
switch (Op) {
|
switch (Op) {
|
||||||
@ -16415,13 +16418,14 @@ bool X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
|
|||||||
case AtomicRMWInst::Add:
|
case AtomicRMWInst::Add:
|
||||||
case AtomicRMWInst::Sub:
|
case AtomicRMWInst::Sub:
|
||||||
// It's better to use xadd, xsub or xchg for these in all cases.
|
// It's better to use xadd, xsub or xchg for these in all cases.
|
||||||
return false;
|
return AtomicRMWExpansionKind::None;
|
||||||
case AtomicRMWInst::Or:
|
case AtomicRMWInst::Or:
|
||||||
case AtomicRMWInst::And:
|
case AtomicRMWInst::And:
|
||||||
case AtomicRMWInst::Xor:
|
case AtomicRMWInst::Xor:
|
||||||
// If the atomicrmw's result isn't actually used, we can just add a "lock"
|
// If the atomicrmw's result isn't actually used, we can just add a "lock"
|
||||||
// prefix to a normal instruction for these operations.
|
// prefix to a normal instruction for these operations.
|
||||||
return !AI->use_empty();
|
return !AI->use_empty() ? AtomicRMWExpansionKind::CmpXChg
|
||||||
|
: AtomicRMWExpansionKind::None;
|
||||||
case AtomicRMWInst::Nand:
|
case AtomicRMWInst::Nand:
|
||||||
case AtomicRMWInst::Max:
|
case AtomicRMWInst::Max:
|
||||||
case AtomicRMWInst::Min:
|
case AtomicRMWInst::Min:
|
||||||
@ -16429,7 +16433,7 @@ bool X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
|
|||||||
case AtomicRMWInst::UMin:
|
case AtomicRMWInst::UMin:
|
||||||
// These always require a non-trivial set of data operations on x86. We must
|
// These always require a non-trivial set of data operations on x86. We must
|
||||||
// use a cmpxchg loop.
|
// use a cmpxchg loop.
|
||||||
return true;
|
return AtomicRMWExpansionKind::CmpXChg;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -991,7 +991,8 @@ namespace llvm {
|
|||||||
|
|
||||||
bool shouldExpandAtomicLoadInIR(LoadInst *SI) const override;
|
bool shouldExpandAtomicLoadInIR(LoadInst *SI) const override;
|
||||||
bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
|
bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
|
||||||
bool shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
|
TargetLoweringBase::AtomicRMWExpansionKind
|
||||||
|
shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
|
||||||
|
|
||||||
LoadInst *
|
LoadInst *
|
||||||
lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const override;
|
lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const override;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user