Atomics: promote ARM's IR-based atomics pass to CodeGen.

Still only 32-bit ARM using it at this stage, but the promotion allows
direct testing via opt and is a reasonably self-contained patch on the
way to switching ARM64.

At this point, other targets should be able to make use of it without
too much difficulty if they want. (See ARM64 commit coming soon for an
example).

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@206485 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Tim Northover
2014-04-17 18:22:47 +00:00
parent dd0f0d6a9d
commit 09da6b5540
15 changed files with 722 additions and 122 deletions

View File

@ -349,6 +349,8 @@ protected:
/// List of target independent CodeGen pass IDs. /// List of target independent CodeGen pass IDs.
namespace llvm { namespace llvm {
FunctionPass *createAtomicExpandLoadLinkedPass(const TargetMachine *TM);
/// \brief Create a basic TargetTransformInfo analysis pass. /// \brief Create a basic TargetTransformInfo analysis pass.
/// ///
/// This pass implements the target transform info analysis using the target /// This pass implements the target transform info analysis using the target
@ -374,6 +376,9 @@ namespace llvm {
/// matching during instruction selection. /// matching during instruction selection.
FunctionPass *createCodeGenPreparePass(const TargetMachine *TM = nullptr); FunctionPass *createCodeGenPreparePass(const TargetMachine *TM = nullptr);
/// AtomicExpandLoadLinkedID -- FIXME
extern char &AtomicExpandLoadLinkedID;
/// MachineLoopInfo - This pass is a loop analysis pass. /// MachineLoopInfo - This pass is a loop analysis pass.
extern char &MachineLoopInfoID; extern char &MachineLoopInfoID;

View File

@ -71,6 +71,7 @@ void initializeAliasDebuggerPass(PassRegistry&);
void initializeAliasSetPrinterPass(PassRegistry&); void initializeAliasSetPrinterPass(PassRegistry&);
void initializeAlwaysInlinerPass(PassRegistry&); void initializeAlwaysInlinerPass(PassRegistry&);
void initializeArgPromotionPass(PassRegistry&); void initializeArgPromotionPass(PassRegistry&);
void initializeAtomicExpandLoadLinkedPass(PassRegistry&);
void initializeSampleProfileLoaderPass(PassRegistry&); void initializeSampleProfileLoaderPass(PassRegistry&);
void initializeBarrierNoopPass(PassRegistry&); void initializeBarrierNoopPass(PassRegistry&);
void initializeBasicAliasAnalysisPass(PassRegistry&); void initializeBasicAliasAnalysisPass(PassRegistry&);

View File

@ -31,6 +31,7 @@
#include "llvm/IR/CallSite.h" #include "llvm/IR/CallSite.h"
#include "llvm/IR/CallingConv.h" #include "llvm/IR/CallingConv.h"
#include "llvm/IR/InlineAsm.h" #include "llvm/IR/InlineAsm.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/MC/MCRegisterInfo.h" #include "llvm/MC/MCRegisterInfo.h"
#include "llvm/Target/TargetCallingConv.h" #include "llvm/Target/TargetCallingConv.h"
#include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetMachine.h"
@ -896,6 +897,35 @@ public:
/// @} /// @}
//===--------------------------------------------------------------------===//
/// \name Helpers for load-linked/store-conditional atomic expansion.
/// @{
/// Perform a load-linked operation on Addr, returning a "Value *" with the
/// corresponding pointee type. This may entail some non-trivial operations to
/// truncate or reconstruct types that will be illegal in the backend. See
/// ARMISelLowering for an example implementation.
virtual Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
AtomicOrdering Ord) const {
llvm_unreachable("Load linked unimplemented on this target");
}
/// Perform a store-conditional operation to Addr. Return the status of the
/// store. This should be 0 if the store succeeded, non-zero otherwise.
virtual Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val,
Value *Addr, AtomicOrdering Ord) const {
llvm_unreachable("Store conditional unimplemented on this target");
}
/// Return true if the given (atomic) instruction should be expanded by the
/// IR-level AtomicExpandLoadLinked pass into a loop involving
/// load-linked/store-conditional pairs. Atomic stores will be expanded in the
/// same way as "atomic xchg" operations which ignore their output if needed.
virtual bool shouldExpandAtomicInIR(Instruction *Inst) const {
return false;
}
//===--------------------------------------------------------------------===// //===--------------------------------------------------------------------===//
// TargetLowering Configuration Methods - These methods should be invoked by // TargetLowering Configuration Methods - These methods should be invoked by
// the derived class constructor to configure this object for the target. // the derived class constructor to configure this object for the target.

View File

@ -1,4 +1,4 @@
//===-- ARMAtomicExpandPass.cpp - Expand atomic instructions --------------===// //===-- AtomicExpandLoadLinkedPass.cpp - Expand atomic instructions -------===//
// //
// The LLVM Compiler Infrastructure // The LLVM Compiler Infrastructure
// //
@ -13,7 +13,6 @@
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#define DEBUG_TYPE "arm-atomic-expand" #define DEBUG_TYPE "arm-atomic-expand"
#include "ARM.h"
#include "llvm/CodeGen/Passes.h" #include "llvm/CodeGen/Passes.h"
#include "llvm/IR/Function.h" #include "llvm/IR/Function.h"
#include "llvm/IR/IRBuilder.h" #include "llvm/IR/IRBuilder.h"
@ -26,12 +25,14 @@
using namespace llvm; using namespace llvm;
namespace { namespace {
class ARMAtomicExpandPass : public FunctionPass { class AtomicExpandLoadLinked : public FunctionPass {
const TargetLowering *TLI; const TargetLowering *TLI;
public: public:
static char ID; // Pass identification, replacement for typeid static char ID; // Pass identification, replacement for typeid
explicit ARMAtomicExpandPass(const TargetMachine *TM = 0) explicit AtomicExpandLoadLinked(const TargetMachine *TM = 0)
: FunctionPass(ID), TLI(TM->getTargetLowering()) {} : FunctionPass(ID), TLI(TM ? TM->getTargetLowering() : 0) {
initializeAtomicExpandLoadLinkedPass(*PassRegistry::getPassRegistry());
}
bool runOnFunction(Function &F) override; bool runOnFunction(Function &F) override;
bool expandAtomicInsts(Function &F); bool expandAtomicInsts(Function &F);
@ -43,30 +44,36 @@ namespace {
AtomicOrdering insertLeadingFence(IRBuilder<> &Builder, AtomicOrdering Ord); AtomicOrdering insertLeadingFence(IRBuilder<> &Builder, AtomicOrdering Ord);
void insertTrailingFence(IRBuilder<> &Builder, AtomicOrdering Ord); void insertTrailingFence(IRBuilder<> &Builder, AtomicOrdering Ord);
/// Perform a load-linked operation on Addr, returning a "Value *" with the
/// corresponding pointee type. This may entail some non-trivial operations
/// to truncate or reconstruct illegal types since intrinsics must be legal
Value *loadLinked(IRBuilder<> &Builder, Value *Addr, AtomicOrdering Ord);
/// Perform a store-conditional operation to Addr. Return the status of the
/// store: 0 if the it succeeded, non-zero otherwise.
Value *storeConditional(IRBuilder<> &Builder, Value *Val, Value *Addr,
AtomicOrdering Ord);
/// Return true if the given (atomic) instruction should be expanded by this
/// pass.
bool shouldExpandAtomic(Instruction *Inst);
}; };
} }
char ARMAtomicExpandPass::ID = 0; char AtomicExpandLoadLinked::ID = 0;
char &llvm::AtomicExpandLoadLinkedID = AtomicExpandLoadLinked::ID;
FunctionPass *llvm::createARMAtomicExpandPass(const TargetMachine *TM) { static void *initializeAtomicExpandLoadLinkedPassOnce(PassRegistry &Registry) {
return new ARMAtomicExpandPass(TM); PassInfo *PI = new PassInfo(
"Expand Atomic calls in terms of load-linked & store-conditional",
"atomic-ll-sc", &AtomicExpandLoadLinked::ID,
PassInfo::NormalCtor_t(callDefaultCtor<AtomicExpandLoadLinked>), false,
false, PassInfo::TargetMachineCtor_t(
callTargetMachineCtor<AtomicExpandLoadLinked>));
Registry.registerPass(*PI, true);
return PI;
} }
bool ARMAtomicExpandPass::runOnFunction(Function &F) { void llvm::initializeAtomicExpandLoadLinkedPass(PassRegistry &Registry) {
CALL_ONCE_INITIALIZATION(initializeAtomicExpandLoadLinkedPassOnce)
}
FunctionPass *llvm::createAtomicExpandLoadLinkedPass(const TargetMachine *TM) {
return new AtomicExpandLoadLinked(TM);
}
bool AtomicExpandLoadLinked::runOnFunction(Function &F) {
if (!TLI)
return false;
SmallVector<Instruction *, 1> AtomicInsts; SmallVector<Instruction *, 1> AtomicInsts;
// Changing control-flow while iterating through it is a bad idea, so gather a // Changing control-flow while iterating through it is a bad idea, so gather a
@ -81,7 +88,7 @@ bool ARMAtomicExpandPass::runOnFunction(Function &F) {
bool MadeChange = false; bool MadeChange = false;
for (Instruction *Inst : AtomicInsts) { for (Instruction *Inst : AtomicInsts) {
if (!shouldExpandAtomic(Inst)) if (!TLI->shouldExpandAtomicInIR(Inst))
continue; continue;
if (AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(Inst)) if (AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(Inst))
@ -99,7 +106,7 @@ bool ARMAtomicExpandPass::runOnFunction(Function &F) {
return MadeChange; return MadeChange;
} }
bool ARMAtomicExpandPass::expandAtomicLoad(LoadInst *LI) { bool AtomicExpandLoadLinked::expandAtomicLoad(LoadInst *LI) {
// Load instructions don't actually need a leading fence, even in the // Load instructions don't actually need a leading fence, even in the
// SequentiallyConsistent case. // SequentiallyConsistent case.
AtomicOrdering MemOpOrder = AtomicOrdering MemOpOrder =
@ -108,7 +115,8 @@ bool ARMAtomicExpandPass::expandAtomicLoad(LoadInst *LI) {
// The only 64-bit load guaranteed to be single-copy atomic by the ARM ARM is // The only 64-bit load guaranteed to be single-copy atomic by the ARM ARM is
// an ldrexd (A3.5.3). // an ldrexd (A3.5.3).
IRBuilder<> Builder(LI); IRBuilder<> Builder(LI);
Value *Val = loadLinked(Builder, LI->getPointerOperand(), MemOpOrder); Value *Val =
TLI->emitLoadLinked(Builder, LI->getPointerOperand(), MemOpOrder);
insertTrailingFence(Builder, LI->getOrdering()); insertTrailingFence(Builder, LI->getOrdering());
@ -118,7 +126,7 @@ bool ARMAtomicExpandPass::expandAtomicLoad(LoadInst *LI) {
return true; return true;
} }
bool ARMAtomicExpandPass::expandAtomicStore(StoreInst *SI) { bool AtomicExpandLoadLinked::expandAtomicStore(StoreInst *SI) {
// The only atomic 64-bit store on ARM is an strexd that succeeds, which means // The only atomic 64-bit store on ARM is an strexd that succeeds, which means
// we need a loop and the entire instruction is essentially an "atomicrmw // we need a loop and the entire instruction is essentially an "atomicrmw
// xchg" that ignores the value loaded. // xchg" that ignores the value loaded.
@ -132,7 +140,7 @@ bool ARMAtomicExpandPass::expandAtomicStore(StoreInst *SI) {
return expandAtomicRMW(AI); return expandAtomicRMW(AI);
} }
bool ARMAtomicExpandPass::expandAtomicRMW(AtomicRMWInst *AI) { bool AtomicExpandLoadLinked::expandAtomicRMW(AtomicRMWInst *AI) {
AtomicOrdering Order = AI->getOrdering(); AtomicOrdering Order = AI->getOrdering();
Value *Addr = AI->getPointerOperand(); Value *Addr = AI->getPointerOperand();
BasicBlock *BB = AI->getParent(); BasicBlock *BB = AI->getParent();
@ -169,7 +177,7 @@ bool ARMAtomicExpandPass::expandAtomicRMW(AtomicRMWInst *AI) {
// Start the main loop block now that we've taken care of the preliminaries. // Start the main loop block now that we've taken care of the preliminaries.
Builder.SetInsertPoint(LoopBB); Builder.SetInsertPoint(LoopBB);
Value *Loaded = loadLinked(Builder, Addr, MemOpOrder); Value *Loaded = TLI->emitLoadLinked(Builder, Addr, MemOpOrder);
Value *NewVal; Value *NewVal;
switch (AI->getOperation()) { switch (AI->getOperation()) {
@ -215,7 +223,8 @@ bool ARMAtomicExpandPass::expandAtomicRMW(AtomicRMWInst *AI) {
llvm_unreachable("Unknown atomic op"); llvm_unreachable("Unknown atomic op");
} }
Value *StoreSuccess = storeConditional(Builder, NewVal, Addr, MemOpOrder); Value *StoreSuccess =
TLI->emitStoreConditional(Builder, NewVal, Addr, MemOpOrder);
Value *TryAgain = Builder.CreateICmpNE( Value *TryAgain = Builder.CreateICmpNE(
StoreSuccess, ConstantInt::get(IntegerType::get(Ctx, 32), 0), "tryagain"); StoreSuccess, ConstantInt::get(IntegerType::get(Ctx, 32), 0), "tryagain");
Builder.CreateCondBr(TryAgain, LoopBB, ExitBB); Builder.CreateCondBr(TryAgain, LoopBB, ExitBB);
@ -229,7 +238,7 @@ bool ARMAtomicExpandPass::expandAtomicRMW(AtomicRMWInst *AI) {
return true; return true;
} }
bool ARMAtomicExpandPass::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) { bool AtomicExpandLoadLinked::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
AtomicOrdering SuccessOrder = CI->getSuccessOrdering(); AtomicOrdering SuccessOrder = CI->getSuccessOrdering();
AtomicOrdering FailureOrder = CI->getFailureOrdering(); AtomicOrdering FailureOrder = CI->getFailureOrdering();
Value *Addr = CI->getPointerOperand(); Value *Addr = CI->getPointerOperand();
@ -257,8 +266,8 @@ bool ARMAtomicExpandPass::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
// cmpxchg.end: // cmpxchg.end:
// [...] // [...]
BasicBlock *ExitBB = BB->splitBasicBlock(CI, "cmpxchg.end"); BasicBlock *ExitBB = BB->splitBasicBlock(CI, "cmpxchg.end");
auto BarrierBB = BasicBlock::Create(Ctx, "cmpxchg.trystore", F, ExitBB); auto BarrierBB = BasicBlock::Create(Ctx, "cmpxchg.barrier", F, ExitBB);
auto TryStoreBB = BasicBlock::Create(Ctx, "cmpxchg.barrier", F, BarrierBB); auto TryStoreBB = BasicBlock::Create(Ctx, "cmpxchg.trystore", F, BarrierBB);
auto LoopBB = BasicBlock::Create(Ctx, "cmpxchg.start", F, TryStoreBB); auto LoopBB = BasicBlock::Create(Ctx, "cmpxchg.start", F, TryStoreBB);
// This grabs the DebugLoc from CI // This grabs the DebugLoc from CI
@ -274,7 +283,7 @@ bool ARMAtomicExpandPass::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
// Start the main loop block now that we've taken care of the preliminaries. // Start the main loop block now that we've taken care of the preliminaries.
Builder.SetInsertPoint(LoopBB); Builder.SetInsertPoint(LoopBB);
Value *Loaded = loadLinked(Builder, Addr, MemOpOrder); Value *Loaded = TLI->emitLoadLinked(Builder, Addr, MemOpOrder);
Value *ShouldStore = Value *ShouldStore =
Builder.CreateICmpEQ(Loaded, CI->getCompareOperand(), "should_store"); Builder.CreateICmpEQ(Loaded, CI->getCompareOperand(), "should_store");
@ -284,8 +293,8 @@ bool ARMAtomicExpandPass::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
Builder.CreateCondBr(ShouldStore, TryStoreBB, FailureBB); Builder.CreateCondBr(ShouldStore, TryStoreBB, FailureBB);
Builder.SetInsertPoint(TryStoreBB); Builder.SetInsertPoint(TryStoreBB);
Value *StoreSuccess = Value *StoreSuccess = TLI->emitStoreConditional(
storeConditional(Builder, CI->getNewValOperand(), Addr, MemOpOrder); Builder, CI->getNewValOperand(), Addr, MemOpOrder);
Value *TryAgain = Builder.CreateICmpNE( Value *TryAgain = Builder.CreateICmpNE(
StoreSuccess, ConstantInt::get(Type::getInt32Ty(Ctx), 0), "success"); StoreSuccess, ConstantInt::get(Type::getInt32Ty(Ctx), 0), "success");
Builder.CreateCondBr(TryAgain, LoopBB, BarrierBB); Builder.CreateCondBr(TryAgain, LoopBB, BarrierBB);
@ -302,73 +311,7 @@ bool ARMAtomicExpandPass::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
return true; return true;
} }
Value *ARMAtomicExpandPass::loadLinked(IRBuilder<> &Builder, Value *Addr, AtomicOrdering AtomicExpandLoadLinked::insertLeadingFence(IRBuilder<> &Builder,
AtomicOrdering Ord) {
Module *M = Builder.GetInsertBlock()->getParent()->getParent();
Type *ValTy = cast<PointerType>(Addr->getType())->getElementType();
bool IsAcquire =
Ord == Acquire || Ord == AcquireRelease || Ord == SequentiallyConsistent;
// Since i64 isn't legal and intrinsics don't get type-lowered, the ldrexd
// intrinsic must return {i32, i32} and we have to recombine them into a
// single i64 here.
if (ValTy->getPrimitiveSizeInBits() == 64) {
Intrinsic::ID Int =
IsAcquire ? Intrinsic::arm_ldaexd : Intrinsic::arm_ldrexd;
Function *Ldrex = llvm::Intrinsic::getDeclaration(M, Int);
Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext()));
Value *LoHi = Builder.CreateCall(Ldrex, Addr, "lohi");
Value *Lo = Builder.CreateExtractValue(LoHi, 0, "lo");
Value *Hi = Builder.CreateExtractValue(LoHi, 1, "hi");
Lo = Builder.CreateZExt(Lo, ValTy, "lo64");
Hi = Builder.CreateZExt(Hi, ValTy, "hi64");
return Builder.CreateOr(
Lo, Builder.CreateShl(Hi, ConstantInt::get(ValTy, 32)), "val64");
}
Type *Tys[] = { Addr->getType() };
Intrinsic::ID Int = IsAcquire ? Intrinsic::arm_ldaex : Intrinsic::arm_ldrex;
Function *Ldrex = llvm::Intrinsic::getDeclaration(M, Int, Tys);
return Builder.CreateTruncOrBitCast(
Builder.CreateCall(Ldrex, Addr),
cast<PointerType>(Addr->getType())->getElementType());
}
Value *ARMAtomicExpandPass::storeConditional(IRBuilder<> &Builder, Value *Val,
Value *Addr, AtomicOrdering Ord) {
Module *M = Builder.GetInsertBlock()->getParent()->getParent();
bool IsRelease =
Ord == Release || Ord == AcquireRelease || Ord == SequentiallyConsistent;
// Since the intrinsics must have legal type, the i64 intrinsics take two
// parameters: "i32, i32". We must marshal Val into the appropriate form
// before the call.
if (Val->getType()->getPrimitiveSizeInBits() == 64) {
Intrinsic::ID Int =
IsRelease ? Intrinsic::arm_stlexd : Intrinsic::arm_strexd;
Function *Strex = Intrinsic::getDeclaration(M, Int);
Type *Int32Ty = Type::getInt32Ty(M->getContext());
Value *Lo = Builder.CreateTrunc(Val, Int32Ty, "lo");
Value *Hi = Builder.CreateTrunc(Builder.CreateLShr(Val, 32), Int32Ty, "hi");
Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext()));
return Builder.CreateCall3(Strex, Lo, Hi, Addr);
}
Intrinsic::ID Int = IsRelease ? Intrinsic::arm_stlex : Intrinsic::arm_strex;
Type *Tys[] = { Addr->getType() };
Function *Strex = Intrinsic::getDeclaration(M, Int, Tys);
return Builder.CreateCall2(
Strex, Builder.CreateZExtOrBitCast(
Val, Strex->getFunctionType()->getParamType(0)),
Addr);
}
AtomicOrdering ARMAtomicExpandPass::insertLeadingFence(IRBuilder<> &Builder,
AtomicOrdering Ord) { AtomicOrdering Ord) {
if (!TLI->getInsertFencesForAtomic()) if (!TLI->getInsertFencesForAtomic())
return Ord; return Ord;
@ -381,7 +324,7 @@ AtomicOrdering ARMAtomicExpandPass::insertLeadingFence(IRBuilder<> &Builder,
return Monotonic; return Monotonic;
} }
void ARMAtomicExpandPass::insertTrailingFence(IRBuilder<> &Builder, void AtomicExpandLoadLinked::insertTrailingFence(IRBuilder<> &Builder,
AtomicOrdering Ord) { AtomicOrdering Ord) {
if (!TLI->getInsertFencesForAtomic()) if (!TLI->getInsertFencesForAtomic())
return; return;
@ -391,16 +334,3 @@ void ARMAtomicExpandPass::insertTrailingFence(IRBuilder<> &Builder,
else if (Ord == SequentiallyConsistent) else if (Ord == SequentiallyConsistent)
Builder.CreateFence(SequentiallyConsistent); Builder.CreateFence(SequentiallyConsistent);
} }
bool ARMAtomicExpandPass::shouldExpandAtomic(Instruction *Inst) {
// Loads and stores less than 64-bits are already atomic; ones above that
// are doomed anyway, so defer to the default libcall and blame the OS when
// things go wrong:
if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
return SI->getValueOperand()->getType()->getPrimitiveSizeInBits() == 64;
else if (LoadInst *LI = dyn_cast<LoadInst>(Inst))
return LI->getType()->getPrimitiveSizeInBits() == 64;
// For the real atomic operations, we have ldrex/strex up to 64 bits.
return Inst->getType()->getPrimitiveSizeInBits() <= 64;
}

View File

@ -2,6 +2,7 @@ add_llvm_library(LLVMCodeGen
AggressiveAntiDepBreaker.cpp AggressiveAntiDepBreaker.cpp
AllocationOrder.cpp AllocationOrder.cpp
Analysis.cpp Analysis.cpp
AtomicExpandLoadLinkedPass.cpp
BasicTargetTransformInfo.cpp BasicTargetTransformInfo.cpp
BranchFolding.cpp BranchFolding.cpp
CalcSpillWeights.cpp CalcSpillWeights.cpp

View File

@ -20,6 +20,7 @@ using namespace llvm;
/// initializeCodeGen - Initialize all passes linked into the CodeGen library. /// initializeCodeGen - Initialize all passes linked into the CodeGen library.
void llvm::initializeCodeGen(PassRegistry &Registry) { void llvm::initializeCodeGen(PassRegistry &Registry) {
initializeAtomicExpandLoadLinkedPass(Registry);
initializeBasicTTIPass(Registry); initializeBasicTTIPass(Registry);
initializeBranchFolderPassPass(Registry); initializeBranchFolderPassPass(Registry);
initializeCodeGenPreparePass(Registry); initializeCodeGenPreparePass(Registry);

View File

@ -49,8 +49,6 @@ FunctionPass *createThumb2SizeReductionPass();
/// \brief Creates an ARM-specific Target Transformation Info pass. /// \brief Creates an ARM-specific Target Transformation Info pass.
ImmutablePass *createARMTargetTransformInfoPass(const ARMBaseTargetMachine *TM); ImmutablePass *createARMTargetTransformInfoPass(const ARMBaseTargetMachine *TM);
FunctionPass *createARMAtomicExpandPass(const TargetMachine *TM);
void LowerARMMachineInstrToMCInst(const MachineInstr *MI, MCInst &OutMI, void LowerARMMachineInstrToMCInst(const MachineInstr *MI, MCInst &OutMI,
ARMAsmPrinter &AP); ARMAsmPrinter &AP);

View File

@ -37,6 +37,7 @@
#include "llvm/IR/Constants.h" #include "llvm/IR/Constants.h"
#include "llvm/IR/Function.h" #include "llvm/IR/Function.h"
#include "llvm/IR/GlobalValue.h" #include "llvm/IR/GlobalValue.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instruction.h" #include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h" #include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h" #include "llvm/IR/Intrinsics.h"
@ -10494,3 +10495,83 @@ bool ARMTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
return false; return false;
return true; return true;
} }
bool ARMTargetLowering::shouldExpandAtomicInIR(Instruction *Inst) const {
// Loads and stores less than 64-bits are already atomic; ones above that
// are doomed anyway, so defer to the default libcall and blame the OS when
// things go wrong:
if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
return SI->getValueOperand()->getType()->getPrimitiveSizeInBits() == 64;
else if (LoadInst *LI = dyn_cast<LoadInst>(Inst))
return LI->getType()->getPrimitiveSizeInBits() == 64;
// For the real atomic operations, we have ldrex/strex up to 64 bits.
return Inst->getType()->getPrimitiveSizeInBits() <= 64;
}
Value *ARMTargetLowering::emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
AtomicOrdering Ord) const {
Module *M = Builder.GetInsertBlock()->getParent()->getParent();
Type *ValTy = cast<PointerType>(Addr->getType())->getElementType();
bool IsAcquire =
Ord == Acquire || Ord == AcquireRelease || Ord == SequentiallyConsistent;
// Since i64 isn't legal and intrinsics don't get type-lowered, the ldrexd
// intrinsic must return {i32, i32} and we have to recombine them into a
// single i64 here.
if (ValTy->getPrimitiveSizeInBits() == 64) {
Intrinsic::ID Int =
IsAcquire ? Intrinsic::arm_ldaexd : Intrinsic::arm_ldrexd;
Function *Ldrex = llvm::Intrinsic::getDeclaration(M, Int);
Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext()));
Value *LoHi = Builder.CreateCall(Ldrex, Addr, "lohi");
Value *Lo = Builder.CreateExtractValue(LoHi, 0, "lo");
Value *Hi = Builder.CreateExtractValue(LoHi, 1, "hi");
Lo = Builder.CreateZExt(Lo, ValTy, "lo64");
Hi = Builder.CreateZExt(Hi, ValTy, "hi64");
return Builder.CreateOr(
Lo, Builder.CreateShl(Hi, ConstantInt::get(ValTy, 32)), "val64");
}
Type *Tys[] = { Addr->getType() };
Intrinsic::ID Int = IsAcquire ? Intrinsic::arm_ldaex : Intrinsic::arm_ldrex;
Function *Ldrex = llvm::Intrinsic::getDeclaration(M, Int, Tys);
return Builder.CreateTruncOrBitCast(
Builder.CreateCall(Ldrex, Addr),
cast<PointerType>(Addr->getType())->getElementType());
}
Value *ARMTargetLowering::emitStoreConditional(IRBuilder<> &Builder, Value *Val,
Value *Addr,
AtomicOrdering Ord) const {
Module *M = Builder.GetInsertBlock()->getParent()->getParent();
bool IsRelease =
Ord == Release || Ord == AcquireRelease || Ord == SequentiallyConsistent;
// Since the intrinsics must have legal type, the i64 intrinsics take two
// parameters: "i32, i32". We must marshal Val into the appropriate form
// before the call.
if (Val->getType()->getPrimitiveSizeInBits() == 64) {
Intrinsic::ID Int =
IsRelease ? Intrinsic::arm_stlexd : Intrinsic::arm_strexd;
Function *Strex = Intrinsic::getDeclaration(M, Int);
Type *Int32Ty = Type::getInt32Ty(M->getContext());
Value *Lo = Builder.CreateTrunc(Val, Int32Ty, "lo");
Value *Hi = Builder.CreateTrunc(Builder.CreateLShr(Val, 32), Int32Ty, "hi");
Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext()));
return Builder.CreateCall3(Strex, Lo, Hi, Addr);
}
Intrinsic::ID Int = IsRelease ? Intrinsic::arm_stlex : Intrinsic::arm_strex;
Type *Tys[] = { Addr->getType() };
Function *Strex = Intrinsic::getDeclaration(M, Int, Tys);
return Builder.CreateCall2(
Strex, Builder.CreateZExtOrBitCast(
Val, Strex->getFunctionType()->getParamType(0)),
Addr);
}

View File

@ -384,6 +384,13 @@ namespace llvm {
bool shouldConvertConstantLoadToIntImm(const APInt &Imm, bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
Type *Ty) const override; Type *Ty) const override;
Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
AtomicOrdering Ord) const override;
Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val,
Value *Addr, AtomicOrdering Ord) const override;
bool shouldExpandAtomicInIR(Instruction *Inst) const override;
protected: protected:
std::pair<const TargetRegisterClass*, uint8_t> std::pair<const TargetRegisterClass*, uint8_t>
findRepresentativeClass(MVT VT) const override; findRepresentativeClass(MVT VT) const override;

View File

@ -228,7 +228,7 @@ TargetPassConfig *ARMBaseTargetMachine::createPassConfig(PassManagerBase &PM) {
bool ARMPassConfig::addPreISel() { bool ARMPassConfig::addPreISel() {
const ARMSubtarget *Subtarget = &getARMSubtarget(); const ARMSubtarget *Subtarget = &getARMSubtarget();
if (Subtarget->hasAnyDataBarrier() && !Subtarget->isThumb1Only()) if (Subtarget->hasAnyDataBarrier() && !Subtarget->isThumb1Only())
addPass(createARMAtomicExpandPass(TM)); addPass(createAtomicExpandLoadLinkedPass(TM));
if (TM->getOptLevel() != CodeGenOpt::None) if (TM->getOptLevel() != CodeGenOpt::None)
addPass(createGlobalMergePass(TM)); addPass(createGlobalMergePass(TM));

View File

@ -17,7 +17,6 @@ add_public_tablegen_target(ARMCommonTableGen)
add_llvm_target(ARMCodeGen add_llvm_target(ARMCodeGen
A15SDOptimizer.cpp A15SDOptimizer.cpp
ARMAsmPrinter.cpp ARMAsmPrinter.cpp
ARMAtomicExpandPass.cpp
ARMBaseInstrInfo.cpp ARMBaseInstrInfo.cpp
ARMBaseRegisterInfo.cpp ARMBaseRegisterInfo.cpp
ARMCodeEmitter.cpp ARMCodeEmitter.cpp

View File

@ -0,0 +1,340 @@
; RUN: opt -S -o - -mtriple=armv7-apple-ios7.0 -atomic-ll-sc %s | FileCheck %s
define i8 @test_atomic_xchg_i8(i8* %ptr, i8 %xchgend) {
; CHECK-LABEL: @test_atomic_xchg_i8
; CHECK-NOT: fence
; CHECK: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK: [[OLDVAL32:%.*]] = call i32 @llvm.arm.ldrex.p0i8(i8* %ptr)
; CHECK: [[OLDVAL:%.*]] = trunc i32 [[OLDVAL32]] to i8
; CHECK: [[NEWVAL32:%.*]] = zext i8 %xchgend to i32
; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i8(i32 [[NEWVAL32]], i8* %ptr)
; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[END:.*]]
; CHECK: [[END]]:
; CHECK-NOT: fence
; CHECK: ret i8 [[OLDVAL]]
%res = atomicrmw xchg i8* %ptr, i8 %xchgend monotonic
ret i8 %res
}
define i16 @test_atomic_add_i16(i16* %ptr, i16 %addend) {
; CHECK-LABEL: @test_atomic_add_i16
; CHECK: fence release
; CHECK: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK: [[OLDVAL32:%.*]] = call i32 @llvm.arm.ldrex.p0i16(i16* %ptr)
; CHECK: [[OLDVAL:%.*]] = trunc i32 [[OLDVAL32]] to i16
; CHECK: [[NEWVAL:%.*]] = add i16 [[OLDVAL]], %addend
; CHECK: [[NEWVAL32:%.*]] = zext i16 [[NEWVAL]] to i32
; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i16(i32 [[NEWVAL32]], i16* %ptr)
; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[END:.*]]
; CHECK: [[END]]:
; CHECK: fence seq_cst
; CHECK: ret i16 [[OLDVAL]]
%res = atomicrmw add i16* %ptr, i16 %addend seq_cst
ret i16 %res
}
define i32 @test_atomic_sub_i32(i32* %ptr, i32 %subend) {
; CHECK-LABEL: @test_atomic_sub_i32
; CHECK-NOT: fence
; CHECK: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK: [[OLDVAL:%.*]] = call i32 @llvm.arm.ldrex.p0i32(i32* %ptr)
; CHECK: [[NEWVAL:%.*]] = sub i32 [[OLDVAL]], %subend
; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i32(i32 [[NEWVAL]], i32* %ptr)
; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[END:.*]]
; CHECK: [[END]]:
; CHECK: fence acquire
; CHECK: ret i32 [[OLDVAL]]
%res = atomicrmw sub i32* %ptr, i32 %subend acquire
ret i32 %res
}
define i8 @test_atomic_and_i8(i8* %ptr, i8 %andend) {
; CHECK-LABEL: @test_atomic_and_i8
; CHECK: fence release
; CHECK: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK: [[OLDVAL32:%.*]] = call i32 @llvm.arm.ldrex.p0i8(i8* %ptr)
; CHECK: [[OLDVAL:%.*]] = trunc i32 [[OLDVAL32]] to i8
; CHECK: [[NEWVAL:%.*]] = and i8 [[OLDVAL]], %andend
; CHECK: [[NEWVAL32:%.*]] = zext i8 [[NEWVAL]] to i32
; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i8(i32 [[NEWVAL32]], i8* %ptr)
; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[END:.*]]
; CHECK: [[END]]:
; CHECK-NOT: fence
; CHECK: ret i8 [[OLDVAL]]
%res = atomicrmw and i8* %ptr, i8 %andend release
ret i8 %res
}
define i16 @test_atomic_nand_i16(i16* %ptr, i16 %nandend) {
; CHECK-LABEL: @test_atomic_nand_i16
; CHECK: fence release
; CHECK: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK: [[OLDVAL32:%.*]] = call i32 @llvm.arm.ldrex.p0i16(i16* %ptr)
; CHECK: [[OLDVAL:%.*]] = trunc i32 [[OLDVAL32]] to i16
; CHECK: [[NEWVAL_TMP:%.*]] = xor i16 %nandend, -1
; CHECK: [[NEWVAL:%.*]] = and i16 [[OLDVAL]], [[NEWVAL_TMP]]
; CHECK: [[NEWVAL32:%.*]] = zext i16 [[NEWVAL]] to i32
; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i16(i32 [[NEWVAL32]], i16* %ptr)
; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[END:.*]]
; CHECK: [[END]]:
; CHECK: fence seq_cst
; CHECK: ret i16 [[OLDVAL]]
%res = atomicrmw nand i16* %ptr, i16 %nandend seq_cst
ret i16 %res
}
define i64 @test_atomic_or_i64(i64* %ptr, i64 %orend) {
; CHECK-LABEL: @test_atomic_or_i64
; CHECK: fence release
; CHECK: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK: [[PTR8:%.*]] = bitcast i64* %ptr to i8*
; CHECK: [[LOHI:%.*]] = call { i32, i32 } @llvm.arm.ldrexd(i8* [[PTR8]])
; CHECK: [[LO:%.*]] = extractvalue { i32, i32 } [[LOHI]], 0
; CHECK: [[HI:%.*]] = extractvalue { i32, i32 } [[LOHI]], 1
; CHECK: [[LO64:%.*]] = zext i32 [[LO]] to i64
; CHECK: [[HI64_TMP:%.*]] = zext i32 [[HI]] to i64
; CHECK: [[HI64:%.*]] = shl i64 [[HI64_TMP]], 32
; CHECK: [[OLDVAL:%.*]] = or i64 [[LO64]], [[HI64]]
; CHECK: [[NEWVAL:%.*]] = or i64 [[OLDVAL]], %orend
; CHECK: [[NEWLO:%.*]] = trunc i64 [[NEWVAL]] to i32
; CHECK: [[NEWHI_TMP:%.*]] = lshr i64 [[NEWVAL]], 32
; CHECK: [[NEWHI:%.*]] = trunc i64 [[NEWHI_TMP]] to i32
; CHECK: [[PTR8:%.*]] = bitcast i64* %ptr to i8*
; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strexd(i32 [[NEWLO]], i32 [[NEWHI]], i8* [[PTR8]])
; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[END:.*]]
; CHECK: [[END]]:
; CHECK: fence seq_cst
; CHECK: ret i64 [[OLDVAL]]
%res = atomicrmw or i64* %ptr, i64 %orend seq_cst
ret i64 %res
}
define i8 @test_atomic_xor_i8(i8* %ptr, i8 %xorend) {
; CHECK-LABEL: @test_atomic_xor_i8
; CHECK: fence release
; CHECK: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK: [[OLDVAL32:%.*]] = call i32 @llvm.arm.ldrex.p0i8(i8* %ptr)
; CHECK: [[OLDVAL:%.*]] = trunc i32 [[OLDVAL32]] to i8
; CHECK: [[NEWVAL:%.*]] = xor i8 [[OLDVAL]], %xorend
; CHECK: [[NEWVAL32:%.*]] = zext i8 [[NEWVAL]] to i32
; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i8(i32 [[NEWVAL32]], i8* %ptr)
; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[END:.*]]
; CHECK: [[END]]:
; CHECK: fence seq_cst
; CHECK: ret i8 [[OLDVAL]]
%res = atomicrmw xor i8* %ptr, i8 %xorend seq_cst
ret i8 %res
}
define i8 @test_atomic_max_i8(i8* %ptr, i8 %maxend) {
; CHECK-LABEL: @test_atomic_max_i8
; CHECK: fence release
; CHECK: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK: [[OLDVAL32:%.*]] = call i32 @llvm.arm.ldrex.p0i8(i8* %ptr)
; CHECK: [[OLDVAL:%.*]] = trunc i32 [[OLDVAL32]] to i8
; CHECK: [[WANT_OLD:%.*]] = icmp sgt i8 [[OLDVAL]], %maxend
; CHECK: [[NEWVAL:%.*]] = select i1 [[WANT_OLD]], i8 [[OLDVAL]], i8 %maxend
; CHECK: [[NEWVAL32:%.*]] = zext i8 [[NEWVAL]] to i32
; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i8(i32 [[NEWVAL32]], i8* %ptr)
; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[END:.*]]
; CHECK: [[END]]:
; CHECK: fence seq_cst
; CHECK: ret i8 [[OLDVAL]]
%res = atomicrmw max i8* %ptr, i8 %maxend seq_cst
ret i8 %res
}
define i8 @test_atomic_min_i8(i8* %ptr, i8 %minend) {
; CHECK-LABEL: @test_atomic_min_i8
; CHECK: fence release
; CHECK: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK: [[OLDVAL32:%.*]] = call i32 @llvm.arm.ldrex.p0i8(i8* %ptr)
; CHECK: [[OLDVAL:%.*]] = trunc i32 [[OLDVAL32]] to i8
; CHECK: [[WANT_OLD:%.*]] = icmp sle i8 [[OLDVAL]], %minend
; CHECK: [[NEWVAL:%.*]] = select i1 [[WANT_OLD]], i8 [[OLDVAL]], i8 %minend
; CHECK: [[NEWVAL32:%.*]] = zext i8 [[NEWVAL]] to i32
; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i8(i32 [[NEWVAL32]], i8* %ptr)
; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[END:.*]]
; CHECK: [[END]]:
; CHECK: fence seq_cst
; CHECK: ret i8 [[OLDVAL]]
%res = atomicrmw min i8* %ptr, i8 %minend seq_cst
ret i8 %res
}
define i8 @test_atomic_umax_i8(i8* %ptr, i8 %umaxend) {
; CHECK-LABEL: @test_atomic_umax_i8
; CHECK: fence release
; CHECK: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK: [[OLDVAL32:%.*]] = call i32 @llvm.arm.ldrex.p0i8(i8* %ptr)
; CHECK: [[OLDVAL:%.*]] = trunc i32 [[OLDVAL32]] to i8
; CHECK: [[WANT_OLD:%.*]] = icmp ugt i8 [[OLDVAL]], %umaxend
; CHECK: [[NEWVAL:%.*]] = select i1 [[WANT_OLD]], i8 [[OLDVAL]], i8 %umaxend
; CHECK: [[NEWVAL32:%.*]] = zext i8 [[NEWVAL]] to i32
; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i8(i32 [[NEWVAL32]], i8* %ptr)
; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[END:.*]]
; CHECK: [[END]]:
; CHECK: fence seq_cst
; CHECK: ret i8 [[OLDVAL]]
%res = atomicrmw umax i8* %ptr, i8 %umaxend seq_cst
ret i8 %res
}
define i8 @test_atomic_umin_i8(i8* %ptr, i8 %uminend) {
; CHECK-LABEL: @test_atomic_umin_i8
; CHECK: fence release
; CHECK: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK: [[OLDVAL32:%.*]] = call i32 @llvm.arm.ldrex.p0i8(i8* %ptr)
; CHECK: [[OLDVAL:%.*]] = trunc i32 [[OLDVAL32]] to i8
; CHECK: [[WANT_OLD:%.*]] = icmp ule i8 [[OLDVAL]], %uminend
; CHECK: [[NEWVAL:%.*]] = select i1 [[WANT_OLD]], i8 [[OLDVAL]], i8 %uminend
; CHECK: [[NEWVAL32:%.*]] = zext i8 [[NEWVAL]] to i32
; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i8(i32 [[NEWVAL32]], i8* %ptr)
; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[END:.*]]
; CHECK: [[END]]:
; CHECK: fence seq_cst
; CHECK: ret i8 [[OLDVAL]]
%res = atomicrmw umin i8* %ptr, i8 %uminend seq_cst
ret i8 %res
}
define i8 @test_cmpxchg_i8_seqcst_seqcst(i8* %ptr, i8 %desired, i8 %newval) {
; CHECK-LABEL: @test_cmpxchg_i8_seqcst_seqcst
; CHECK: fence release
; CHECK: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK: [[OLDVAL32:%.*]] = call i32 @llvm.arm.ldrex.p0i8(i8* %ptr)
; CHECK: [[OLDVAL:%.*]] = trunc i32 %1 to i8
; CHECK: [[SHOULD_STORE:%.*]] = icmp eq i8 [[OLDVAL]], %desired
; CHECK: br i1 [[SHOULD_STORE]], label %[[TRY_STORE:.*]], label %[[BARRIER:.*]]
; CHECK: [[TRY_STORE]]:
; CHECK: [[NEWVAL32:%.*]] = zext i8 %newval to i32
; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i8(i32 [[NEWVAL32]], i8* %ptr)
; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[BARRIER:.*]]
; CHECK: [[BARRIER]]:
; CHECK: fence seq_cst
; CHECK: br label %[[DONE:.*]]
; CHECK: [[DONE]]:
; CHECK: ret i8 [[OLDVAL]]
%old = cmpxchg i8* %ptr, i8 %desired, i8 %newval seq_cst seq_cst
ret i8 %old
}
define i16 @test_cmpxchg_i16_seqcst_monotonic(i16* %ptr, i16 %desired, i16 %newval) {
; CHECK-LABEL: @test_cmpxchg_i16_seqcst_monotonic
; CHECK: fence release
; CHECK: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK: [[OLDVAL32:%.*]] = call i32 @llvm.arm.ldrex.p0i16(i16* %ptr)
; CHECK: [[OLDVAL:%.*]] = trunc i32 %1 to i16
; CHECK: [[SHOULD_STORE:%.*]] = icmp eq i16 [[OLDVAL]], %desired
; CHECK: br i1 [[SHOULD_STORE]], label %[[TRY_STORE:.*]], label %[[DONE:.*]]
; CHECK: [[TRY_STORE]]:
; CHECK: [[NEWVAL32:%.*]] = zext i16 %newval to i32
; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i16(i32 [[NEWVAL32]], i16* %ptr)
; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[BARRIER:.*]]
; CHECK: [[BARRIER]]:
; CHECK: fence seq_cst
; CHECK: br label %[[DONE:.*]]
; CHECK: [[DONE]]:
; CHECK: ret i16 [[OLDVAL]]
%old = cmpxchg i16* %ptr, i16 %desired, i16 %newval seq_cst monotonic
ret i16 %old
}
define i32 @test_cmpxchg_i32_acquire_acquire(i32* %ptr, i32 %desired, i32 %newval) {
; CHECK-LABEL: @test_cmpxchg_i32_acquire_acquire
; CHECK-NOT: fence
; CHECK: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK: [[OLDVAL:%.*]] = call i32 @llvm.arm.ldrex.p0i32(i32* %ptr)
; CHECK: [[SHOULD_STORE:%.*]] = icmp eq i32 [[OLDVAL]], %desired
; CHECK: br i1 [[SHOULD_STORE]], label %[[TRY_STORE:.*]], label %[[DONE:.*]]
; CHECK: [[TRY_STORE]]:
; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i32(i32 %newval, i32* %ptr)
; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[BARRIER:.*]]
; CHECK: [[BARRIER]]:
; CHECK: fence acquire
; CHECK: br label %[[DONE:.*]]
; CHECK: [[DONE]]:
; CHECK: ret i32 [[OLDVAL]]
%old = cmpxchg i32* %ptr, i32 %desired, i32 %newval acquire acquire
ret i32 %old
}
define i64 @test_cmpxchg_i64_monotonic_monotonic(i64* %ptr, i64 %desired, i64 %newval) {
; CHECK-LABEL: @test_cmpxchg_i64_monotonic_monotonic
; CHECK-NOT: fence
; CHECK: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK: [[PTR8:%.*]] = bitcast i64* %ptr to i8*
; CHECK: [[LOHI:%.*]] = call { i32, i32 } @llvm.arm.ldrexd(i8* [[PTR8]])
; CHECK: [[LO:%.*]] = extractvalue { i32, i32 } [[LOHI]], 0
; CHECK: [[HI:%.*]] = extractvalue { i32, i32 } [[LOHI]], 1
; CHECK: [[LO64:%.*]] = zext i32 [[LO]] to i64
; CHECK: [[HI64_TMP:%.*]] = zext i32 [[HI]] to i64
; CHECK: [[HI64:%.*]] = shl i64 [[HI64_TMP]], 32
; CHECK: [[OLDVAL:%.*]] = or i64 [[LO64]], [[HI64]]
; CHECK: [[SHOULD_STORE:%.*]] = icmp eq i64 [[OLDVAL]], %desired
; CHECK: br i1 [[SHOULD_STORE]], label %[[TRY_STORE:.*]], label %[[DONE:.*]]
; CHECK: [[TRY_STORE]]:
; CHECK: [[NEWLO:%.*]] = trunc i64 %newval to i32
; CHECK: [[NEWHI_TMP:%.*]] = lshr i64 %newval, 32
; CHECK: [[NEWHI:%.*]] = trunc i64 [[NEWHI_TMP]] to i32
; CHECK: [[PTR8:%.*]] = bitcast i64* %ptr to i8*
; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strexd(i32 [[NEWLO]], i32 [[NEWHI]], i8* [[PTR8]])
; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[BARRIER:.*]]
; CHECK: [[BARRIER]]:
; CHECK-NOT: fence
; CHECK: br label %[[DONE:.*]]
; CHECK: [[DONE]]:
; CHECK: ret i64 [[OLDVAL]]
%old = cmpxchg i64* %ptr, i64 %desired, i64 %newval monotonic monotonic
ret i64 %old
}

View File

@ -0,0 +1,202 @@
; RUN: opt -S -o - -mtriple=armv8-linux-gnueabihf -atomic-ll-sc %s | FileCheck %s
define i8 @test_atomic_xchg_i8(i8* %ptr, i8 %xchgend) {
; CHECK-LABEL: @test_atomic_xchg_i8
; CHECK-NOT: fence
; CHECK: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK: [[OLDVAL32:%.*]] = call i32 @llvm.arm.ldrex.p0i8(i8* %ptr)
; CHECK: [[OLDVAL:%.*]] = trunc i32 [[OLDVAL32]] to i8
; CHECK: [[NEWVAL32:%.*]] = zext i8 %xchgend to i32
; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i8(i32 [[NEWVAL32]], i8* %ptr)
; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[END:.*]]
; CHECK: [[END]]:
; CHECK-NOT: fence
; CHECK: ret i8 [[OLDVAL]]
%res = atomicrmw xchg i8* %ptr, i8 %xchgend monotonic
ret i8 %res
}
define i16 @test_atomic_add_i16(i16* %ptr, i16 %addend) {
; CHECK-LABEL: @test_atomic_add_i16
; CHECK-NOT: fence
; CHECK: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK: [[OLDVAL32:%.*]] = call i32 @llvm.arm.ldaex.p0i16(i16* %ptr)
; CHECK: [[OLDVAL:%.*]] = trunc i32 [[OLDVAL32]] to i16
; CHECK: [[NEWVAL:%.*]] = add i16 [[OLDVAL]], %addend
; CHECK: [[NEWVAL32:%.*]] = zext i16 [[NEWVAL]] to i32
; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.stlex.p0i16(i32 [[NEWVAL32]], i16* %ptr)
; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[END:.*]]
; CHECK: [[END]]:
; CHECK-NOT: fence
; CHECK: ret i16 [[OLDVAL]]
%res = atomicrmw add i16* %ptr, i16 %addend seq_cst
ret i16 %res
}
define i32 @test_atomic_sub_i32(i32* %ptr, i32 %subend) {
; CHECK-LABEL: @test_atomic_sub_i32
; CHECK-NOT: fence
; CHECK: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK: [[OLDVAL:%.*]] = call i32 @llvm.arm.ldaex.p0i32(i32* %ptr)
; CHECK: [[NEWVAL:%.*]] = sub i32 [[OLDVAL]], %subend
; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i32(i32 [[NEWVAL]], i32* %ptr)
; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[END:.*]]
; CHECK: [[END]]:
; CHECK-NOT: fence
; CHECK: ret i32 [[OLDVAL]]
%res = atomicrmw sub i32* %ptr, i32 %subend acquire
ret i32 %res
}
define i64 @test_atomic_or_i64(i64* %ptr, i64 %orend) {
; CHECK-LABEL: @test_atomic_or_i64
; CHECK-NOT: fence
; CHECK: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK: [[PTR8:%.*]] = bitcast i64* %ptr to i8*
; CHECK: [[LOHI:%.*]] = call { i32, i32 } @llvm.arm.ldaexd(i8* [[PTR8]])
; CHECK: [[LO:%.*]] = extractvalue { i32, i32 } [[LOHI]], 0
; CHECK: [[HI:%.*]] = extractvalue { i32, i32 } [[LOHI]], 1
; CHECK: [[LO64:%.*]] = zext i32 [[LO]] to i64
; CHECK: [[HI64_TMP:%.*]] = zext i32 [[HI]] to i64
; CHECK: [[HI64:%.*]] = shl i64 [[HI64_TMP]], 32
; CHECK: [[OLDVAL:%.*]] = or i64 [[LO64]], [[HI64]]
; CHECK: [[NEWVAL:%.*]] = or i64 [[OLDVAL]], %orend
; CHECK: [[NEWLO:%.*]] = trunc i64 [[NEWVAL]] to i32
; CHECK: [[NEWHI_TMP:%.*]] = lshr i64 [[NEWVAL]], 32
; CHECK: [[NEWHI:%.*]] = trunc i64 [[NEWHI_TMP]] to i32
; CHECK: [[PTR8:%.*]] = bitcast i64* %ptr to i8*
; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.stlexd(i32 [[NEWLO]], i32 [[NEWHI]], i8* [[PTR8]])
; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[END:.*]]
; CHECK: [[END]]:
; CHECK-NOT: fence
; CHECK: ret i64 [[OLDVAL]]
%res = atomicrmw or i64* %ptr, i64 %orend seq_cst
ret i64 %res
}
define i8 @test_cmpxchg_i8_seqcst_seqcst(i8* %ptr, i8 %desired, i8 %newval) {
; CHECK-LABEL: @test_cmpxchg_i8_seqcst_seqcst
; CHECK-NOT: fence
; CHECK: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK: [[OLDVAL32:%.*]] = call i32 @llvm.arm.ldaex.p0i8(i8* %ptr)
; CHECK: [[OLDVAL:%.*]] = trunc i32 %1 to i8
; CHECK: [[SHOULD_STORE:%.*]] = icmp eq i8 [[OLDVAL]], %desired
; CHECK: br i1 [[SHOULD_STORE]], label %[[TRY_STORE:.*]], label %[[BARRIER:.*]]
; CHECK: [[TRY_STORE]]:
; CHECK: [[NEWVAL32:%.*]] = zext i8 %newval to i32
; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.stlex.p0i8(i32 [[NEWVAL32]], i8* %ptr)
; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[BARRIER:.*]]
; CHECK: [[BARRIER]]:
; CHECK-NOT: fence
; CHECK: br label %[[DONE:.*]]
; CHECK: [[DONE]]:
; CHECK: ret i8 [[OLDVAL]]
%old = cmpxchg i8* %ptr, i8 %desired, i8 %newval seq_cst seq_cst
ret i8 %old
}
define i16 @test_cmpxchg_i16_seqcst_monotonic(i16* %ptr, i16 %desired, i16 %newval) {
; CHECK-LABEL: @test_cmpxchg_i16_seqcst_monotonic
; CHECK-NOT: fence
; CHECK: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK: [[OLDVAL32:%.*]] = call i32 @llvm.arm.ldaex.p0i16(i16* %ptr)
; CHECK: [[OLDVAL:%.*]] = trunc i32 %1 to i16
; CHECK: [[SHOULD_STORE:%.*]] = icmp eq i16 [[OLDVAL]], %desired
; CHECK: br i1 [[SHOULD_STORE]], label %[[TRY_STORE:.*]], label %[[DONE:.*]]
; CHECK: [[TRY_STORE]]:
; CHECK: [[NEWVAL32:%.*]] = zext i16 %newval to i32
; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.stlex.p0i16(i32 [[NEWVAL32]], i16* %ptr)
; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[BARRIER:.*]]
; CHECK: [[BARRIER]]:
; CHECK-NOT: fence
; CHECK: br label %[[DONE:.*]]
; CHECK: [[DONE]]:
; CHECK: ret i16 [[OLDVAL]]
%old = cmpxchg i16* %ptr, i16 %desired, i16 %newval seq_cst monotonic
ret i16 %old
}
define i32 @test_cmpxchg_i32_acquire_acquire(i32* %ptr, i32 %desired, i32 %newval) {
; CHECK-LABEL: @test_cmpxchg_i32_acquire_acquire
; CHECK-NOT: fence
; CHECK: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK: [[OLDVAL:%.*]] = call i32 @llvm.arm.ldaex.p0i32(i32* %ptr)
; CHECK: [[SHOULD_STORE:%.*]] = icmp eq i32 [[OLDVAL]], %desired
; CHECK: br i1 [[SHOULD_STORE]], label %[[TRY_STORE:.*]], label %[[DONE:.*]]
; CHECK: [[TRY_STORE]]:
; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i32(i32 %newval, i32* %ptr)
; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[BARRIER:.*]]
; CHECK: [[BARRIER]]:
; CHECK-NOT: fence
; CHECK: br label %[[DONE:.*]]
; CHECK: [[DONE]]:
; CHECK: ret i32 [[OLDVAL]]
%old = cmpxchg i32* %ptr, i32 %desired, i32 %newval acquire acquire
ret i32 %old
}
define i64 @test_cmpxchg_i64_monotonic_monotonic(i64* %ptr, i64 %desired, i64 %newval) {
; CHECK-LABEL: @test_cmpxchg_i64_monotonic_monotonic
; CHECK-NOT: fence
; CHECK: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK: [[PTR8:%.*]] = bitcast i64* %ptr to i8*
; CHECK: [[LOHI:%.*]] = call { i32, i32 } @llvm.arm.ldrexd(i8* [[PTR8]])
; CHECK: [[LO:%.*]] = extractvalue { i32, i32 } [[LOHI]], 0
; CHECK: [[HI:%.*]] = extractvalue { i32, i32 } [[LOHI]], 1
; CHECK: [[LO64:%.*]] = zext i32 [[LO]] to i64
; CHECK: [[HI64_TMP:%.*]] = zext i32 [[HI]] to i64
; CHECK: [[HI64:%.*]] = shl i64 [[HI64_TMP]], 32
; CHECK: [[OLDVAL:%.*]] = or i64 [[LO64]], [[HI64]]
; CHECK: [[SHOULD_STORE:%.*]] = icmp eq i64 [[OLDVAL]], %desired
; CHECK: br i1 [[SHOULD_STORE]], label %[[TRY_STORE:.*]], label %[[DONE:.*]]
; CHECK: [[TRY_STORE]]:
; CHECK: [[NEWLO:%.*]] = trunc i64 %newval to i32
; CHECK: [[NEWHI_TMP:%.*]] = lshr i64 %newval, 32
; CHECK: [[NEWHI:%.*]] = trunc i64 [[NEWHI_TMP]] to i32
; CHECK: [[PTR8:%.*]] = bitcast i64* %ptr to i8*
; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strexd(i32 [[NEWLO]], i32 [[NEWHI]], i8* [[PTR8]])
; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[BARRIER:.*]]
; CHECK: [[BARRIER]]:
; CHECK-NOT: fence
; CHECK: br label %[[DONE:.*]]
; CHECK: [[DONE]]:
; CHECK: ret i64 [[OLDVAL]]
%old = cmpxchg i64* %ptr, i64 %desired, i64 %newval monotonic monotonic
ret i64 %old
}

View File

@ -0,0 +1,4 @@
targets = set(config.root.targets_to_build.split())
if not 'ARM' in targets:
config.unsupported = True

View File

@ -351,8 +351,9 @@ int main(int argc, char **argv) {
initializeInstrumentation(Registry); initializeInstrumentation(Registry);
initializeTarget(Registry); initializeTarget(Registry);
// For codegen passes, only passes that do IR to IR transformation are // For codegen passes, only passes that do IR to IR transformation are
// supported. For now, just add CodeGenPrepare. // supported.
initializeCodeGenPreparePass(Registry); initializeCodeGenPreparePass(Registry);
initializeAtomicExpandLoadLinkedPass(Registry);
#ifdef LINK_POLLY_INTO_TOOLS #ifdef LINK_POLLY_INTO_TOOLS
polly::initializePollyPasses(Registry); polly::initializePollyPasses(Registry);