mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-07-30 02:25:19 +00:00
[X86] Make wide loads be managed by AtomicExpand
Summary: AtomicExpand already had logic for expanding wide loads and stores on LL/SC architectures, and for expanding wide stores on CmpXchg architectures, but not for wide loads on CmpXchg architectures. This patch fills this hole, and makes use of this new feature in the X86 backend. Only one functionnal change: we now lose the SynchScope attribute. It is regrettable, but I have another patch that I will submit soon that will solve this for all of AtomicExpand (it seemed better to split it apart as it is a different concern). Test Plan: make check-all (lots of tests for this functionality already exist) Reviewers: jfb Subscribers: llvm-commits Differential Revision: http://reviews.llvm.org/D5404 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@218332 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
@@ -44,6 +44,8 @@ namespace {
|
||||
bool bracketInstWithFences(Instruction *I, AtomicOrdering Order,
|
||||
bool IsStore, bool IsLoad);
|
||||
bool expandAtomicLoad(LoadInst *LI);
|
||||
bool expandAtomicLoadToLL(LoadInst *LI);
|
||||
bool expandAtomicLoadToCmpXchg(LoadInst *LI);
|
||||
bool expandAtomicStore(StoreInst *SI);
|
||||
bool expandAtomicRMW(AtomicRMWInst *AI);
|
||||
bool expandAtomicRMWToLLSC(AtomicRMWInst *AI);
|
||||
@@ -160,6 +162,15 @@ bool AtomicExpand::bracketInstWithFences(Instruction *I, AtomicOrdering Order,
|
||||
}
|
||||
|
||||
bool AtomicExpand::expandAtomicLoad(LoadInst *LI) {
|
||||
if (TM->getSubtargetImpl()
|
||||
->getTargetLowering()
|
||||
->hasLoadLinkedStoreConditional())
|
||||
return expandAtomicLoadToLL(LI);
|
||||
else
|
||||
return expandAtomicLoadToCmpXchg(LI);
|
||||
}
|
||||
|
||||
bool AtomicExpand::expandAtomicLoadToLL(LoadInst *LI) {
|
||||
auto TLI = TM->getSubtargetImpl()->getTargetLowering();
|
||||
IRBuilder<> Builder(LI);
|
||||
|
||||
@@ -175,6 +186,24 @@ bool AtomicExpand::expandAtomicLoad(LoadInst *LI) {
|
||||
return true;
|
||||
}
|
||||
|
||||
bool AtomicExpand::expandAtomicLoadToCmpXchg(LoadInst *LI) {
|
||||
IRBuilder<> Builder(LI);
|
||||
AtomicOrdering Order = LI->getOrdering();
|
||||
Value *Addr = LI->getPointerOperand();
|
||||
Type *Ty = cast<PointerType>(Addr->getType())->getElementType();
|
||||
Constant *DummyVal = Constant::getNullValue(Ty);
|
||||
|
||||
Value *Pair = Builder.CreateAtomicCmpXchg(
|
||||
Addr, DummyVal, DummyVal, Order,
|
||||
AtomicCmpXchgInst::getStrongestFailureOrdering(Order));
|
||||
Value *Loaded = Builder.CreateExtractValue(Pair, 0, "loaded");
|
||||
|
||||
LI->replaceAllUsesWith(Loaded);
|
||||
LI->eraseFromParent();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool AtomicExpand::expandAtomicStore(StoreInst *SI) {
|
||||
// This function is only called on atomic stores that are too large to be
|
||||
// atomic if implemented as a native store. So we replace them by an
|
||||
|
Reference in New Issue
Block a user