diff --git a/include/llvm/Target/TargetLowering.h b/include/llvm/Target/TargetLowering.h index 1e7ccd8f8e1..d5c9ebe0f2a 100644 --- a/include/llvm/Target/TargetLowering.h +++ b/include/llvm/Target/TargetLowering.h @@ -810,13 +810,6 @@ public: return PrefLoopAlignment; } - /// getShouldFoldAtomicFences - return whether the combiner should fold - /// fence MEMBARRIER instructions into the atomic intrinsic instructions. - /// - bool getShouldFoldAtomicFences() const { - return ShouldFoldAtomicFences; - } - /// getInsertFencesFor - return whether the DAG builder should automatically /// insert fences and reduce ordering for atomics. /// @@ -1101,12 +1094,6 @@ protected: MinStackArgumentAlignment = Align; } - /// setShouldFoldAtomicFences - Set if the target's implementation of the - /// atomic operation intrinsics includes locking. Default is false. - void setShouldFoldAtomicFences(bool fold) { - ShouldFoldAtomicFences = fold; - } - /// setInsertFencesForAtomic - Set if the DAG builder should /// automatically insert fences and reduce the order of atomic memory /// operations to Monotonic. @@ -1364,11 +1351,6 @@ private: /// unsigned PrefLoopAlignment; - /// ShouldFoldAtomicFences - Whether fencing MEMBARRIER instructions should - /// be folded into the enclosed atomic intrinsic instruction by the - /// combiner. - bool ShouldFoldAtomicFences; - /// InsertFencesForAtomic - Whether the DAG builder should automatically /// insert fences and reduce ordering for atomics. (This will be set for /// for most architectures with weak memory ordering.) diff --git a/lib/CodeGen/TargetLoweringBase.cpp b/lib/CodeGen/TargetLoweringBase.cpp index 82bb37ef978..8074d167f44 100644 --- a/lib/CodeGen/TargetLoweringBase.cpp +++ b/lib/CodeGen/TargetLoweringBase.cpp @@ -647,7 +647,6 @@ TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm, PrefFunctionAlignment = 0; PrefLoopAlignment = 0; MinStackArgumentAlignment = 1; - ShouldFoldAtomicFences = false; InsertFencesForAtomic = false; SupportJumpTables = true; MinimumJumpTableEntries = 4; diff --git a/lib/Target/AArch64/AArch64ISelLowering.cpp b/lib/Target/AArch64/AArch64ISelLowering.cpp index 6deae75488e..786b1ba1d50 100644 --- a/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -59,10 +59,6 @@ AArch64TargetLowering::AArch64TargetLowering(AArch64TargetMachine &TM) computeRegisterProperties(); - // We have particularly efficient implementations of atomic fences if they can - // be combined with nearby atomic loads and stores. - setShouldFoldAtomicFences(true); - // We combine OR nodes for bitfield and NEON BSL operations. setTargetDAGCombine(ISD::OR); diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp index 698c8a7e3ec..23d7ef1290f 100644 --- a/lib/Target/ARM/ARMISelLowering.cpp +++ b/lib/Target/ARM/ARMISelLowering.cpp @@ -763,8 +763,6 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM) // Unordered/Monotonic case. setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom); setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom); - // Since the libcalls include locking, fold in the fences - setShouldFoldAtomicFences(true); } setOperationAction(ISD::PREFETCH, MVT::Other, Custom); diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 94370ae4ee8..b7ba0b81886 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -528,13 +528,6 @@ void X86TargetLowering::resetOperationActions() { setOperationAction(ISD::ATOMIC_FENCE , MVT::Other, Custom); - // On X86 and X86-64, atomic operations are lowered to locked instructions. - // Locked instructions, in turn, have implicit fence semantics (all memory - // operations are flushed before issuing the locked instruction, and they - // are not buffered), so we can fold away the common pattern of - // fence-atomic-fence. - setShouldFoldAtomicFences(true); - // Expand certain atomics for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) { MVT VT = IntVTs[i];