Remove unused ShouldFoldAtomicFences flag.

I think it's almost impossible to fold atomic fences profitably under
LLVM/C++11 semantics. As a result, this is now unused and just
cluttering up the target interface.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@179940 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Tim Northover 2013-04-20 12:32:43 +00:00
parent 6265d5c91a
commit 8b71994fde
5 changed files with 0 additions and 32 deletions

View File

@ -810,13 +810,6 @@ public:
return PrefLoopAlignment;
}
/// getShouldFoldAtomicFences - return whether the combiner should fold
/// fence MEMBARRIER instructions into the atomic intrinsic instructions.
///
bool getShouldFoldAtomicFences() const {
return ShouldFoldAtomicFences;
}
/// getInsertFencesFor - return whether the DAG builder should automatically
/// insert fences and reduce ordering for atomics.
///
@ -1101,12 +1094,6 @@ protected:
MinStackArgumentAlignment = Align;
}
/// setShouldFoldAtomicFences - Set if the target's implementation of the
/// atomic operation intrinsics includes locking. Default is false.
void setShouldFoldAtomicFences(bool fold) {
ShouldFoldAtomicFences = fold;
}
/// setInsertFencesForAtomic - Set if the DAG builder should
/// automatically insert fences and reduce the order of atomic memory
/// operations to Monotonic.
@ -1364,11 +1351,6 @@ private:
///
unsigned PrefLoopAlignment;
/// ShouldFoldAtomicFences - Whether fencing MEMBARRIER instructions should
/// be folded into the enclosed atomic intrinsic instruction by the
/// combiner.
bool ShouldFoldAtomicFences;
/// InsertFencesForAtomic - Whether the DAG builder should automatically
/// insert fences and reduce ordering for atomics. (This will be set for
/// for most architectures with weak memory ordering.)

View File

@ -647,7 +647,6 @@ TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm,
PrefFunctionAlignment = 0;
PrefLoopAlignment = 0;
MinStackArgumentAlignment = 1;
ShouldFoldAtomicFences = false;
InsertFencesForAtomic = false;
SupportJumpTables = true;
MinimumJumpTableEntries = 4;

View File

@ -59,10 +59,6 @@ AArch64TargetLowering::AArch64TargetLowering(AArch64TargetMachine &TM)
computeRegisterProperties();
// We have particularly efficient implementations of atomic fences if they can
// be combined with nearby atomic loads and stores.
setShouldFoldAtomicFences(true);
// We combine OR nodes for bitfield and NEON BSL operations.
setTargetDAGCombine(ISD::OR);

View File

@ -763,8 +763,6 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
// Unordered/Monotonic case.
setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom);
setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom);
// Since the libcalls include locking, fold in the fences
setShouldFoldAtomicFences(true);
}
setOperationAction(ISD::PREFETCH, MVT::Other, Custom);

View File

@ -528,13 +528,6 @@ void X86TargetLowering::resetOperationActions() {
setOperationAction(ISD::ATOMIC_FENCE , MVT::Other, Custom);
// On X86 and X86-64, atomic operations are lowered to locked instructions.
// Locked instructions, in turn, have implicit fence semantics (all memory
// operations are flushed before issuing the locked instruction, and they
// are not buffered), so we can fold away the common pattern of
// fence-atomic-fence.
setShouldFoldAtomicFences(true);
// Expand certain atomics
for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) {
MVT VT = IntVTs[i];