Rename AtomicExpandLoadLinked into AtomicExpand

AtomicExpandLoadLinked is currently rather ARM-specific. This patch is the first of
a group that aim at making it more target-independent. See
http://lists.cs.uiuc.edu/pipermail/llvmdev/2014-August/075873.html
for details

The command line option is "atomic-expand"

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@216231 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Robin Morisset 2014-08-21 21:50:01 +00:00
parent 4921d1af7d
commit cf165c36ee
17 changed files with 41 additions and 40 deletions

View File

@ -345,7 +345,7 @@ protected:
/// List of target independent CodeGen pass IDs.
namespace llvm {
FunctionPass *createAtomicExpandLoadLinkedPass(const TargetMachine *TM);
FunctionPass *createAtomicExpandPass(const TargetMachine *TM);
/// \brief Create a basic TargetTransformInfo analysis pass.
///
@ -372,8 +372,9 @@ namespace llvm {
/// matching during instruction selection.
FunctionPass *createCodeGenPreparePass(const TargetMachine *TM = nullptr);
/// AtomicExpandLoadLinkedID -- FIXME
extern char &AtomicExpandLoadLinkedID;
/// AtomicExpandID -- Lowers atomic operations in terms of either cmpxchg
/// load-linked/store-conditional loops.
extern char &AtomicExpandID;
/// MachineLoopInfo - This pass is a loop analysis pass.
extern char &MachineLoopInfoID;

View File

@ -71,7 +71,7 @@ void initializeAliasDebuggerPass(PassRegistry&);
void initializeAliasSetPrinterPass(PassRegistry&);
void initializeAlwaysInlinerPass(PassRegistry&);
void initializeArgPromotionPass(PassRegistry&);
void initializeAtomicExpandLoadLinkedPass(PassRegistry&);
void initializeAtomicExpandPass(PassRegistry&);
void initializeSampleProfileLoaderPass(PassRegistry&);
void initializeBarrierNoopPass(PassRegistry&);
void initializeBasicAliasAnalysisPass(PassRegistry&);

View File

@ -937,7 +937,7 @@ public:
/// @}
//===--------------------------------------------------------------------===//
/// \name Helpers for load-linked/store-conditional atomic expansion.
/// \name Helpers for atomic expansion.
/// @{
/// Perform a load-linked operation on Addr, returning a "Value *" with the
@ -957,7 +957,7 @@ public:
}
/// Return true if the given (atomic) instruction should be expanded by the
/// IR-level AtomicExpandLoadLinked pass into a loop involving
/// IR-level AtomicExpand pass into a loop involving
/// load-linked/store-conditional pairs. Atomic stores will be expanded in the
/// same way as "atomic xchg" operations which ignore their output if needed.
virtual bool shouldExpandAtomicInIR(Instruction *Inst) const {

View File

@ -118,7 +118,7 @@ public:
virtual bool enablePostMachineScheduler() const;
/// \brief True if the subtarget should run the atomic expansion pass.
virtual bool enableAtomicExpandLoadLinked() const;
virtual bool enableAtomicExpand() const;
/// \brief Override generic scheduling policy within a region.
///

View File

@ -1,4 +1,4 @@
//===-- AtomicExpandLoadLinkedPass.cpp - Expand atomic instructions -------===//
//===-- AtomicExpandPass.cpp - Expand atomic instructions -------===//
//
// The LLVM Compiler Infrastructure
//
@ -25,16 +25,16 @@
using namespace llvm;
#define DEBUG_TYPE "arm-atomic-expand"
#define DEBUG_TYPE "atomic-expand"
namespace {
class AtomicExpandLoadLinked : public FunctionPass {
class AtomicExpand: public FunctionPass {
const TargetMachine *TM;
public:
static char ID; // Pass identification, replacement for typeid
explicit AtomicExpandLoadLinked(const TargetMachine *TM = nullptr)
explicit AtomicExpand(const TargetMachine *TM = nullptr)
: FunctionPass(ID), TM(TM) {
initializeAtomicExpandLoadLinkedPass(*PassRegistry::getPassRegistry());
initializeAtomicExpandPass(*PassRegistry::getPassRegistry());
}
bool runOnFunction(Function &F) override;
@ -50,18 +50,18 @@ namespace {
};
}
char AtomicExpandLoadLinked::ID = 0;
char &llvm::AtomicExpandLoadLinkedID = AtomicExpandLoadLinked::ID;
INITIALIZE_TM_PASS(AtomicExpandLoadLinked, "atomic-ll-sc",
"Expand Atomic calls in terms of load-linked & store-conditional",
char AtomicExpand::ID = 0;
char &llvm::AtomicExpandID = AtomicExpand::ID;
INITIALIZE_TM_PASS(AtomicExpand, "atomic-expand",
"Expand Atomic calls in terms of either load-linked & store-conditional or cmpxchg",
false, false)
FunctionPass *llvm::createAtomicExpandLoadLinkedPass(const TargetMachine *TM) {
return new AtomicExpandLoadLinked(TM);
FunctionPass *llvm::createAtomicExpandPass(const TargetMachine *TM) {
return new AtomicExpand(TM);
}
bool AtomicExpandLoadLinked::runOnFunction(Function &F) {
if (!TM || !TM->getSubtargetImpl()->enableAtomicExpandLoadLinked())
bool AtomicExpand::runOnFunction(Function &F) {
if (!TM || !TM->getSubtargetImpl()->enableAtomicExpand())
return false;
SmallVector<Instruction *, 1> AtomicInsts;
@ -97,7 +97,7 @@ bool AtomicExpandLoadLinked::runOnFunction(Function &F) {
return MadeChange;
}
bool AtomicExpandLoadLinked::expandAtomicLoad(LoadInst *LI) {
bool AtomicExpand::expandAtomicLoad(LoadInst *LI) {
// Load instructions don't actually need a leading fence, even in the
// SequentiallyConsistent case.
AtomicOrdering MemOpOrder =
@ -119,7 +119,7 @@ bool AtomicExpandLoadLinked::expandAtomicLoad(LoadInst *LI) {
return true;
}
bool AtomicExpandLoadLinked::expandAtomicStore(StoreInst *SI) {
bool AtomicExpand::expandAtomicStore(StoreInst *SI) {
// The only atomic 64-bit store on ARM is an strexd that succeeds, which means
// we need a loop and the entire instruction is essentially an "atomicrmw
// xchg" that ignores the value loaded.
@ -133,7 +133,7 @@ bool AtomicExpandLoadLinked::expandAtomicStore(StoreInst *SI) {
return expandAtomicRMW(AI);
}
bool AtomicExpandLoadLinked::expandAtomicRMW(AtomicRMWInst *AI) {
bool AtomicExpand::expandAtomicRMW(AtomicRMWInst *AI) {
AtomicOrdering Order = AI->getOrdering();
Value *Addr = AI->getPointerOperand();
BasicBlock *BB = AI->getParent();
@ -233,7 +233,7 @@ bool AtomicExpandLoadLinked::expandAtomicRMW(AtomicRMWInst *AI) {
return true;
}
bool AtomicExpandLoadLinked::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
bool AtomicExpand::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
AtomicOrdering SuccessOrder = CI->getSuccessOrdering();
AtomicOrdering FailureOrder = CI->getFailureOrdering();
Value *Addr = CI->getPointerOperand();
@ -359,7 +359,7 @@ bool AtomicExpandLoadLinked::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
return true;
}
AtomicOrdering AtomicExpandLoadLinked::insertLeadingFence(IRBuilder<> &Builder,
AtomicOrdering AtomicExpand::insertLeadingFence(IRBuilder<> &Builder,
AtomicOrdering Ord) {
if (!TM->getSubtargetImpl()->getTargetLowering()->getInsertFencesForAtomic())
return Ord;
@ -372,7 +372,7 @@ AtomicOrdering AtomicExpandLoadLinked::insertLeadingFence(IRBuilder<> &Builder,
return Monotonic;
}
void AtomicExpandLoadLinked::insertTrailingFence(IRBuilder<> &Builder,
void AtomicExpand::insertTrailingFence(IRBuilder<> &Builder,
AtomicOrdering Ord) {
if (!TM->getSubtargetImpl()->getTargetLowering()->getInsertFencesForAtomic())
return;

View File

@ -2,7 +2,7 @@ add_llvm_library(LLVMCodeGen
AggressiveAntiDepBreaker.cpp
AllocationOrder.cpp
Analysis.cpp
AtomicExpandLoadLinkedPass.cpp
AtomicExpandPass.cpp
BasicTargetTransformInfo.cpp
BranchFolding.cpp
CalcSpillWeights.cpp

View File

@ -20,7 +20,7 @@ using namespace llvm;
/// initializeCodeGen - Initialize all passes linked into the CodeGen library.
void llvm::initializeCodeGen(PassRegistry &Registry) {
initializeAtomicExpandLoadLinkedPass(Registry);
initializeAtomicExpandPass(Registry);
initializeBasicTTIPass(Registry);
initializeBranchFolderPassPass(Registry);
initializeCodeGenPreparePass(Registry);

View File

@ -144,7 +144,7 @@ TargetPassConfig *AArch64TargetMachine::createPassConfig(PassManagerBase &PM) {
void AArch64PassConfig::addIRPasses() {
// Always expand atomic operations, we don't deal with atomicrmw or cmpxchg
// ourselves.
addPass(createAtomicExpandLoadLinkedPass(TM));
addPass(createAtomicExpandPass(TM));
// Cmpxchg instructions are often used with a subsequent comparison to
// determine whether it succeeded. We can exploit existing control-flow in

View File

@ -428,7 +428,7 @@ bool ARMSubtarget::enablePostMachineScheduler() const {
return (!isThumb() || hasThumb2());
}
bool ARMSubtarget::enableAtomicExpandLoadLinked() const {
bool ARMSubtarget::enableAtomicExpand() const {
return hasAnyDataBarrier() && !isThumb1Only();
}

View File

@ -436,8 +436,8 @@ public:
/// True for some subtargets at > -O0.
bool enablePostMachineScheduler() const override;
// enableAtomicExpandLoadLinked - True if we need to expand our atomics.
bool enableAtomicExpandLoadLinked() const override;
// enableAtomicExpand- True if we need to expand our atomics.
bool enableAtomicExpand() const override;
/// getInstrItins - Return the instruction itineraries based on subtarget
/// selection.

View File

@ -161,7 +161,7 @@ void ARMPassConfig::addIRPasses() {
if (TM->Options.ThreadModel == ThreadModel::Single)
addPass(createLowerAtomicPass());
else
addPass(createAtomicExpandLoadLinkedPass(TM));
addPass(createAtomicExpandPass(TM));
// Cmpxchg instructions are often used with a subsequent comparison to
// determine whether it succeeded. We can exploit existing control-flow in

View File

@ -39,7 +39,7 @@ bool TargetSubtargetInfo::useMachineScheduler() const {
return enableMachineScheduler();
}
bool TargetSubtargetInfo::enableAtomicExpandLoadLinked() const {
bool TargetSubtargetInfo::enableAtomicExpand() const {
return true;
}

View File

@ -1,4 +1,4 @@
; RUN: opt -S -o - -mtriple=armv7-apple-ios7.0 -atomic-ll-sc %s | FileCheck %s
; RUN: opt -S -o - -mtriple=armv7-apple-ios7.0 -atomic-expand %s | FileCheck %s
define i8 @test_atomic_xchg_i8(i8* %ptr, i8 %xchgend) {
; CHECK-LABEL: @test_atomic_xchg_i8
@ -361,4 +361,4 @@ define i64 @test_cmpxchg_i64_monotonic_monotonic(i64* %ptr, i64 %desired, i64 %n
%pairold = cmpxchg i64* %ptr, i64 %desired, i64 %newval monotonic monotonic
%old = extractvalue { i64, i1 } %pairold, 0
ret i64 %old
}
}

View File

@ -1,4 +1,4 @@
; RUN: opt -S -o - -mtriple=armv8-linux-gnueabihf -atomic-ll-sc %s | FileCheck %s
; RUN: opt -S -o - -mtriple=armv8-linux-gnueabihf -atomic-expand %s | FileCheck %s
define i8 @test_atomic_xchg_i8(i8* %ptr, i8 %xchgend) {
; CHECK-LABEL: @test_atomic_xchg_i8
@ -223,4 +223,4 @@ define i64 @test_cmpxchg_i64_monotonic_monotonic(i64* %ptr, i64 %desired, i64 %n
%pairold = cmpxchg i64* %ptr, i64 %desired, i64 %newval monotonic monotonic
%old = extractvalue { i64, i1 } %pairold, 0
ret i64 %old
}
}

View File

@ -1,4 +1,4 @@
; RUN: opt -atomic-ll-sc -S -mtriple=thumbv7s-apple-ios7.0 %s | FileCheck %s
; RUN: opt -atomic-expand -S -mtriple=thumbv7s-apple-ios7.0 %s | FileCheck %s
define i32 @test_cmpxchg_seq_cst(i32* %addr, i32 %desired, i32 %new) {
; CHECK-LABEL: @test_cmpxchg_seq_cst

View File

@ -345,7 +345,7 @@ int main(int argc, char **argv) {
// For codegen passes, only passes that do IR to IR transformation are
// supported.
initializeCodeGenPreparePass(Registry);
initializeAtomicExpandLoadLinkedPass(Registry);
initializeAtomicExpandPass(Registry);
#ifdef LINK_POLLY_INTO_TOOLS
polly::initializePollyPasses(Registry);