Rename AtomicExpandLoadLinked into AtomicExpand

AtomicExpandLoadLinked is currently rather ARM-specific. This patch is the first of
a group that aim at making it more target-independent. See
http://lists.cs.uiuc.edu/pipermail/llvmdev/2014-August/075873.html
for details

The command line option is "atomic-expand"

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@216231 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Robin Morisset
2014-08-21 21:50:01 +00:00
parent 4921d1af7d
commit cf165c36ee
17 changed files with 41 additions and 40 deletions

View File

@ -345,7 +345,7 @@ protected:
/// List of target independent CodeGen pass IDs. /// List of target independent CodeGen pass IDs.
namespace llvm { namespace llvm {
FunctionPass *createAtomicExpandLoadLinkedPass(const TargetMachine *TM); FunctionPass *createAtomicExpandPass(const TargetMachine *TM);
/// \brief Create a basic TargetTransformInfo analysis pass. /// \brief Create a basic TargetTransformInfo analysis pass.
/// ///
@ -372,8 +372,9 @@ namespace llvm {
/// matching during instruction selection. /// matching during instruction selection.
FunctionPass *createCodeGenPreparePass(const TargetMachine *TM = nullptr); FunctionPass *createCodeGenPreparePass(const TargetMachine *TM = nullptr);
/// AtomicExpandLoadLinkedID -- FIXME /// AtomicExpandID -- Lowers atomic operations in terms of either cmpxchg
extern char &AtomicExpandLoadLinkedID; /// load-linked/store-conditional loops.
extern char &AtomicExpandID;
/// MachineLoopInfo - This pass is a loop analysis pass. /// MachineLoopInfo - This pass is a loop analysis pass.
extern char &MachineLoopInfoID; extern char &MachineLoopInfoID;

View File

@ -71,7 +71,7 @@ void initializeAliasDebuggerPass(PassRegistry&);
void initializeAliasSetPrinterPass(PassRegistry&); void initializeAliasSetPrinterPass(PassRegistry&);
void initializeAlwaysInlinerPass(PassRegistry&); void initializeAlwaysInlinerPass(PassRegistry&);
void initializeArgPromotionPass(PassRegistry&); void initializeArgPromotionPass(PassRegistry&);
void initializeAtomicExpandLoadLinkedPass(PassRegistry&); void initializeAtomicExpandPass(PassRegistry&);
void initializeSampleProfileLoaderPass(PassRegistry&); void initializeSampleProfileLoaderPass(PassRegistry&);
void initializeBarrierNoopPass(PassRegistry&); void initializeBarrierNoopPass(PassRegistry&);
void initializeBasicAliasAnalysisPass(PassRegistry&); void initializeBasicAliasAnalysisPass(PassRegistry&);

View File

@ -937,7 +937,7 @@ public:
/// @} /// @}
//===--------------------------------------------------------------------===// //===--------------------------------------------------------------------===//
/// \name Helpers for load-linked/store-conditional atomic expansion. /// \name Helpers for atomic expansion.
/// @{ /// @{
/// Perform a load-linked operation on Addr, returning a "Value *" with the /// Perform a load-linked operation on Addr, returning a "Value *" with the
@ -957,7 +957,7 @@ public:
} }
/// Return true if the given (atomic) instruction should be expanded by the /// Return true if the given (atomic) instruction should be expanded by the
/// IR-level AtomicExpandLoadLinked pass into a loop involving /// IR-level AtomicExpand pass into a loop involving
/// load-linked/store-conditional pairs. Atomic stores will be expanded in the /// load-linked/store-conditional pairs. Atomic stores will be expanded in the
/// same way as "atomic xchg" operations which ignore their output if needed. /// same way as "atomic xchg" operations which ignore their output if needed.
virtual bool shouldExpandAtomicInIR(Instruction *Inst) const { virtual bool shouldExpandAtomicInIR(Instruction *Inst) const {

View File

@ -118,7 +118,7 @@ public:
virtual bool enablePostMachineScheduler() const; virtual bool enablePostMachineScheduler() const;
/// \brief True if the subtarget should run the atomic expansion pass. /// \brief True if the subtarget should run the atomic expansion pass.
virtual bool enableAtomicExpandLoadLinked() const; virtual bool enableAtomicExpand() const;
/// \brief Override generic scheduling policy within a region. /// \brief Override generic scheduling policy within a region.
/// ///

View File

@ -1,4 +1,4 @@
//===-- AtomicExpandLoadLinkedPass.cpp - Expand atomic instructions -------===// //===-- AtomicExpandPass.cpp - Expand atomic instructions -------===//
// //
// The LLVM Compiler Infrastructure // The LLVM Compiler Infrastructure
// //
@ -25,16 +25,16 @@
using namespace llvm; using namespace llvm;
#define DEBUG_TYPE "arm-atomic-expand" #define DEBUG_TYPE "atomic-expand"
namespace { namespace {
class AtomicExpandLoadLinked : public FunctionPass { class AtomicExpand: public FunctionPass {
const TargetMachine *TM; const TargetMachine *TM;
public: public:
static char ID; // Pass identification, replacement for typeid static char ID; // Pass identification, replacement for typeid
explicit AtomicExpandLoadLinked(const TargetMachine *TM = nullptr) explicit AtomicExpand(const TargetMachine *TM = nullptr)
: FunctionPass(ID), TM(TM) { : FunctionPass(ID), TM(TM) {
initializeAtomicExpandLoadLinkedPass(*PassRegistry::getPassRegistry()); initializeAtomicExpandPass(*PassRegistry::getPassRegistry());
} }
bool runOnFunction(Function &F) override; bool runOnFunction(Function &F) override;
@ -50,18 +50,18 @@ namespace {
}; };
} }
char AtomicExpandLoadLinked::ID = 0; char AtomicExpand::ID = 0;
char &llvm::AtomicExpandLoadLinkedID = AtomicExpandLoadLinked::ID; char &llvm::AtomicExpandID = AtomicExpand::ID;
INITIALIZE_TM_PASS(AtomicExpandLoadLinked, "atomic-ll-sc", INITIALIZE_TM_PASS(AtomicExpand, "atomic-expand",
"Expand Atomic calls in terms of load-linked & store-conditional", "Expand Atomic calls in terms of either load-linked & store-conditional or cmpxchg",
false, false) false, false)
FunctionPass *llvm::createAtomicExpandLoadLinkedPass(const TargetMachine *TM) { FunctionPass *llvm::createAtomicExpandPass(const TargetMachine *TM) {
return new AtomicExpandLoadLinked(TM); return new AtomicExpand(TM);
} }
bool AtomicExpandLoadLinked::runOnFunction(Function &F) { bool AtomicExpand::runOnFunction(Function &F) {
if (!TM || !TM->getSubtargetImpl()->enableAtomicExpandLoadLinked()) if (!TM || !TM->getSubtargetImpl()->enableAtomicExpand())
return false; return false;
SmallVector<Instruction *, 1> AtomicInsts; SmallVector<Instruction *, 1> AtomicInsts;
@ -97,7 +97,7 @@ bool AtomicExpandLoadLinked::runOnFunction(Function &F) {
return MadeChange; return MadeChange;
} }
bool AtomicExpandLoadLinked::expandAtomicLoad(LoadInst *LI) { bool AtomicExpand::expandAtomicLoad(LoadInst *LI) {
// Load instructions don't actually need a leading fence, even in the // Load instructions don't actually need a leading fence, even in the
// SequentiallyConsistent case. // SequentiallyConsistent case.
AtomicOrdering MemOpOrder = AtomicOrdering MemOpOrder =
@ -119,7 +119,7 @@ bool AtomicExpandLoadLinked::expandAtomicLoad(LoadInst *LI) {
return true; return true;
} }
bool AtomicExpandLoadLinked::expandAtomicStore(StoreInst *SI) { bool AtomicExpand::expandAtomicStore(StoreInst *SI) {
// The only atomic 64-bit store on ARM is an strexd that succeeds, which means // The only atomic 64-bit store on ARM is an strexd that succeeds, which means
// we need a loop and the entire instruction is essentially an "atomicrmw // we need a loop and the entire instruction is essentially an "atomicrmw
// xchg" that ignores the value loaded. // xchg" that ignores the value loaded.
@ -133,7 +133,7 @@ bool AtomicExpandLoadLinked::expandAtomicStore(StoreInst *SI) {
return expandAtomicRMW(AI); return expandAtomicRMW(AI);
} }
bool AtomicExpandLoadLinked::expandAtomicRMW(AtomicRMWInst *AI) { bool AtomicExpand::expandAtomicRMW(AtomicRMWInst *AI) {
AtomicOrdering Order = AI->getOrdering(); AtomicOrdering Order = AI->getOrdering();
Value *Addr = AI->getPointerOperand(); Value *Addr = AI->getPointerOperand();
BasicBlock *BB = AI->getParent(); BasicBlock *BB = AI->getParent();
@ -233,7 +233,7 @@ bool AtomicExpandLoadLinked::expandAtomicRMW(AtomicRMWInst *AI) {
return true; return true;
} }
bool AtomicExpandLoadLinked::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) { bool AtomicExpand::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
AtomicOrdering SuccessOrder = CI->getSuccessOrdering(); AtomicOrdering SuccessOrder = CI->getSuccessOrdering();
AtomicOrdering FailureOrder = CI->getFailureOrdering(); AtomicOrdering FailureOrder = CI->getFailureOrdering();
Value *Addr = CI->getPointerOperand(); Value *Addr = CI->getPointerOperand();
@ -359,7 +359,7 @@ bool AtomicExpandLoadLinked::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
return true; return true;
} }
AtomicOrdering AtomicExpandLoadLinked::insertLeadingFence(IRBuilder<> &Builder, AtomicOrdering AtomicExpand::insertLeadingFence(IRBuilder<> &Builder,
AtomicOrdering Ord) { AtomicOrdering Ord) {
if (!TM->getSubtargetImpl()->getTargetLowering()->getInsertFencesForAtomic()) if (!TM->getSubtargetImpl()->getTargetLowering()->getInsertFencesForAtomic())
return Ord; return Ord;
@ -372,7 +372,7 @@ AtomicOrdering AtomicExpandLoadLinked::insertLeadingFence(IRBuilder<> &Builder,
return Monotonic; return Monotonic;
} }
void AtomicExpandLoadLinked::insertTrailingFence(IRBuilder<> &Builder, void AtomicExpand::insertTrailingFence(IRBuilder<> &Builder,
AtomicOrdering Ord) { AtomicOrdering Ord) {
if (!TM->getSubtargetImpl()->getTargetLowering()->getInsertFencesForAtomic()) if (!TM->getSubtargetImpl()->getTargetLowering()->getInsertFencesForAtomic())
return; return;

View File

@ -2,7 +2,7 @@ add_llvm_library(LLVMCodeGen
AggressiveAntiDepBreaker.cpp AggressiveAntiDepBreaker.cpp
AllocationOrder.cpp AllocationOrder.cpp
Analysis.cpp Analysis.cpp
AtomicExpandLoadLinkedPass.cpp AtomicExpandPass.cpp
BasicTargetTransformInfo.cpp BasicTargetTransformInfo.cpp
BranchFolding.cpp BranchFolding.cpp
CalcSpillWeights.cpp CalcSpillWeights.cpp

View File

@ -20,7 +20,7 @@ using namespace llvm;
/// initializeCodeGen - Initialize all passes linked into the CodeGen library. /// initializeCodeGen - Initialize all passes linked into the CodeGen library.
void llvm::initializeCodeGen(PassRegistry &Registry) { void llvm::initializeCodeGen(PassRegistry &Registry) {
initializeAtomicExpandLoadLinkedPass(Registry); initializeAtomicExpandPass(Registry);
initializeBasicTTIPass(Registry); initializeBasicTTIPass(Registry);
initializeBranchFolderPassPass(Registry); initializeBranchFolderPassPass(Registry);
initializeCodeGenPreparePass(Registry); initializeCodeGenPreparePass(Registry);

View File

@ -144,7 +144,7 @@ TargetPassConfig *AArch64TargetMachine::createPassConfig(PassManagerBase &PM) {
void AArch64PassConfig::addIRPasses() { void AArch64PassConfig::addIRPasses() {
// Always expand atomic operations, we don't deal with atomicrmw or cmpxchg // Always expand atomic operations, we don't deal with atomicrmw or cmpxchg
// ourselves. // ourselves.
addPass(createAtomicExpandLoadLinkedPass(TM)); addPass(createAtomicExpandPass(TM));
// Cmpxchg instructions are often used with a subsequent comparison to // Cmpxchg instructions are often used with a subsequent comparison to
// determine whether it succeeded. We can exploit existing control-flow in // determine whether it succeeded. We can exploit existing control-flow in

View File

@ -428,7 +428,7 @@ bool ARMSubtarget::enablePostMachineScheduler() const {
return (!isThumb() || hasThumb2()); return (!isThumb() || hasThumb2());
} }
bool ARMSubtarget::enableAtomicExpandLoadLinked() const { bool ARMSubtarget::enableAtomicExpand() const {
return hasAnyDataBarrier() && !isThumb1Only(); return hasAnyDataBarrier() && !isThumb1Only();
} }

View File

@ -436,8 +436,8 @@ public:
/// True for some subtargets at > -O0. /// True for some subtargets at > -O0.
bool enablePostMachineScheduler() const override; bool enablePostMachineScheduler() const override;
// enableAtomicExpandLoadLinked - True if we need to expand our atomics. // enableAtomicExpand- True if we need to expand our atomics.
bool enableAtomicExpandLoadLinked() const override; bool enableAtomicExpand() const override;
/// getInstrItins - Return the instruction itineraries based on subtarget /// getInstrItins - Return the instruction itineraries based on subtarget
/// selection. /// selection.

View File

@ -161,7 +161,7 @@ void ARMPassConfig::addIRPasses() {
if (TM->Options.ThreadModel == ThreadModel::Single) if (TM->Options.ThreadModel == ThreadModel::Single)
addPass(createLowerAtomicPass()); addPass(createLowerAtomicPass());
else else
addPass(createAtomicExpandLoadLinkedPass(TM)); addPass(createAtomicExpandPass(TM));
// Cmpxchg instructions are often used with a subsequent comparison to // Cmpxchg instructions are often used with a subsequent comparison to
// determine whether it succeeded. We can exploit existing control-flow in // determine whether it succeeded. We can exploit existing control-flow in

View File

@ -39,7 +39,7 @@ bool TargetSubtargetInfo::useMachineScheduler() const {
return enableMachineScheduler(); return enableMachineScheduler();
} }
bool TargetSubtargetInfo::enableAtomicExpandLoadLinked() const { bool TargetSubtargetInfo::enableAtomicExpand() const {
return true; return true;
} }

View File

@ -1,4 +1,4 @@
; RUN: opt -S -o - -mtriple=armv7-apple-ios7.0 -atomic-ll-sc %s | FileCheck %s ; RUN: opt -S -o - -mtriple=armv7-apple-ios7.0 -atomic-expand %s | FileCheck %s
define i8 @test_atomic_xchg_i8(i8* %ptr, i8 %xchgend) { define i8 @test_atomic_xchg_i8(i8* %ptr, i8 %xchgend) {
; CHECK-LABEL: @test_atomic_xchg_i8 ; CHECK-LABEL: @test_atomic_xchg_i8
@ -361,4 +361,4 @@ define i64 @test_cmpxchg_i64_monotonic_monotonic(i64* %ptr, i64 %desired, i64 %n
%pairold = cmpxchg i64* %ptr, i64 %desired, i64 %newval monotonic monotonic %pairold = cmpxchg i64* %ptr, i64 %desired, i64 %newval monotonic monotonic
%old = extractvalue { i64, i1 } %pairold, 0 %old = extractvalue { i64, i1 } %pairold, 0
ret i64 %old ret i64 %old
} }

View File

@ -1,4 +1,4 @@
; RUN: opt -S -o - -mtriple=armv8-linux-gnueabihf -atomic-ll-sc %s | FileCheck %s ; RUN: opt -S -o - -mtriple=armv8-linux-gnueabihf -atomic-expand %s | FileCheck %s
define i8 @test_atomic_xchg_i8(i8* %ptr, i8 %xchgend) { define i8 @test_atomic_xchg_i8(i8* %ptr, i8 %xchgend) {
; CHECK-LABEL: @test_atomic_xchg_i8 ; CHECK-LABEL: @test_atomic_xchg_i8
@ -223,4 +223,4 @@ define i64 @test_cmpxchg_i64_monotonic_monotonic(i64* %ptr, i64 %desired, i64 %n
%pairold = cmpxchg i64* %ptr, i64 %desired, i64 %newval monotonic monotonic %pairold = cmpxchg i64* %ptr, i64 %desired, i64 %newval monotonic monotonic
%old = extractvalue { i64, i1 } %pairold, 0 %old = extractvalue { i64, i1 } %pairold, 0
ret i64 %old ret i64 %old
} }

View File

@ -1,4 +1,4 @@
; RUN: opt -atomic-ll-sc -S -mtriple=thumbv7s-apple-ios7.0 %s | FileCheck %s ; RUN: opt -atomic-expand -S -mtriple=thumbv7s-apple-ios7.0 %s | FileCheck %s
define i32 @test_cmpxchg_seq_cst(i32* %addr, i32 %desired, i32 %new) { define i32 @test_cmpxchg_seq_cst(i32* %addr, i32 %desired, i32 %new) {
; CHECK-LABEL: @test_cmpxchg_seq_cst ; CHECK-LABEL: @test_cmpxchg_seq_cst

View File

@ -345,7 +345,7 @@ int main(int argc, char **argv) {
// For codegen passes, only passes that do IR to IR transformation are // For codegen passes, only passes that do IR to IR transformation are
// supported. // supported.
initializeCodeGenPreparePass(Registry); initializeCodeGenPreparePass(Registry);
initializeAtomicExpandLoadLinkedPass(Registry); initializeAtomicExpandPass(Registry);
#ifdef LINK_POLLY_INTO_TOOLS #ifdef LINK_POLLY_INTO_TOOLS
polly::initializePollyPasses(Registry); polly::initializePollyPasses(Registry);