mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-04-09 01:38:03 +00:00
Some targets don't require the fencing MEMBARRIER instructions surrounding
atomic intrinsics, either because the use locking instructions for the atomics, or because they perform the locking directly. Add support in the DAG combiner to fold away the fences. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@106630 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
b9cd499dec
commit
9a526495e0
@ -702,6 +702,13 @@ public:
|
||||
return PrefLoopAlignment;
|
||||
}
|
||||
|
||||
/// getShouldFoldAtomicFences - return whether the combiner should fold
|
||||
/// fence MEMBARRIER instructions into the atomic intrinsic instructions.
|
||||
///
|
||||
bool getShouldFoldAtomicFences() const {
|
||||
return ShouldFoldAtomicFences;
|
||||
}
|
||||
|
||||
/// getPreIndexedAddressParts - returns true by value, base pointer and
|
||||
/// offset pointer and addressing mode by reference if the node's address
|
||||
/// can be legally represented as pre-indexed load / store address.
|
||||
@ -1090,6 +1097,12 @@ protected:
|
||||
PrefLoopAlignment = Align;
|
||||
}
|
||||
|
||||
/// setShouldFoldAtomicFences - Set if the target's implementation of the
|
||||
/// atomic operation intrinsics includes locking. Default is false.
|
||||
void setShouldFoldAtomicFences(bool fold) {
|
||||
ShouldFoldAtomicFences = fold;
|
||||
}
|
||||
|
||||
public:
|
||||
//===--------------------------------------------------------------------===//
|
||||
// Lowering methods - These methods must be implemented by targets so that
|
||||
@ -1542,6 +1555,11 @@ private:
|
||||
///
|
||||
unsigned PrefLoopAlignment;
|
||||
|
||||
/// ShouldFoldAtomicFences - Whether fencing MEMBARRIER instructions should
|
||||
/// be folded into the enclosed atomic intrinsic instruction by the
|
||||
/// combiner.
|
||||
bool ShouldFoldAtomicFences;
|
||||
|
||||
/// StackPointerRegisterToSaveRestore - If set to a physical register, this
|
||||
/// specifies the register that llvm.savestack/llvm.restorestack should save
|
||||
/// and restore.
|
||||
|
@ -211,6 +211,7 @@ namespace {
|
||||
SDValue visitBUILD_VECTOR(SDNode *N);
|
||||
SDValue visitCONCAT_VECTORS(SDNode *N);
|
||||
SDValue visitVECTOR_SHUFFLE(SDNode *N);
|
||||
SDValue visitMEMBARRIER(SDNode *N);
|
||||
|
||||
SDValue XformToShuffleWithZero(SDNode *N);
|
||||
SDValue ReassociateOps(unsigned Opc, DebugLoc DL, SDValue LHS, SDValue RHS);
|
||||
@ -1079,6 +1080,7 @@ SDValue DAGCombiner::visit(SDNode *N) {
|
||||
case ISD::BUILD_VECTOR: return visitBUILD_VECTOR(N);
|
||||
case ISD::CONCAT_VECTORS: return visitCONCAT_VECTORS(N);
|
||||
case ISD::VECTOR_SHUFFLE: return visitVECTOR_SHUFFLE(N);
|
||||
case ISD::MEMBARRIER: return visitMEMBARRIER(N);
|
||||
}
|
||||
return SDValue();
|
||||
}
|
||||
@ -6363,6 +6365,59 @@ SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) {
|
||||
return SDValue();
|
||||
}
|
||||
|
||||
SDValue DAGCombiner::visitMEMBARRIER(SDNode* N) {
|
||||
if (!TLI.getShouldFoldAtomicFences())
|
||||
return SDValue();
|
||||
|
||||
SDValue atomic = N->getOperand(0);
|
||||
switch (atomic.getOpcode()) {
|
||||
case ISD::ATOMIC_CMP_SWAP:
|
||||
case ISD::ATOMIC_SWAP:
|
||||
case ISD::ATOMIC_LOAD_ADD:
|
||||
case ISD::ATOMIC_LOAD_SUB:
|
||||
case ISD::ATOMIC_LOAD_AND:
|
||||
case ISD::ATOMIC_LOAD_OR:
|
||||
case ISD::ATOMIC_LOAD_XOR:
|
||||
case ISD::ATOMIC_LOAD_NAND:
|
||||
case ISD::ATOMIC_LOAD_MIN:
|
||||
case ISD::ATOMIC_LOAD_MAX:
|
||||
case ISD::ATOMIC_LOAD_UMIN:
|
||||
case ISD::ATOMIC_LOAD_UMAX:
|
||||
break;
|
||||
default:
|
||||
return SDValue();
|
||||
}
|
||||
|
||||
SDValue fence = atomic.getOperand(0);
|
||||
if (fence.getOpcode() != ISD::MEMBARRIER)
|
||||
return SDValue();
|
||||
|
||||
switch (atomic.getOpcode()) {
|
||||
case ISD::ATOMIC_CMP_SWAP:
|
||||
return SDValue(DAG.UpdateNodeOperands(atomic.getNode(),
|
||||
fence.getOperand(0),
|
||||
atomic.getOperand(1), atomic.getOperand(2),
|
||||
atomic.getOperand(3)), atomic.getResNo());
|
||||
case ISD::ATOMIC_SWAP:
|
||||
case ISD::ATOMIC_LOAD_ADD:
|
||||
case ISD::ATOMIC_LOAD_SUB:
|
||||
case ISD::ATOMIC_LOAD_AND:
|
||||
case ISD::ATOMIC_LOAD_OR:
|
||||
case ISD::ATOMIC_LOAD_XOR:
|
||||
case ISD::ATOMIC_LOAD_NAND:
|
||||
case ISD::ATOMIC_LOAD_MIN:
|
||||
case ISD::ATOMIC_LOAD_MAX:
|
||||
case ISD::ATOMIC_LOAD_UMIN:
|
||||
case ISD::ATOMIC_LOAD_UMAX:
|
||||
return SDValue(DAG.UpdateNodeOperands(atomic.getNode(),
|
||||
fence.getOperand(0),
|
||||
atomic.getOperand(1), atomic.getOperand(2)),
|
||||
atomic.getResNo());
|
||||
default:
|
||||
return SDValue();
|
||||
}
|
||||
}
|
||||
|
||||
/// XformToShuffleWithZero - Returns a vector_shuffle if it able to transform
|
||||
/// an AND to a vector_shuffle with the destination vector and a zero vector.
|
||||
/// e.g. AND V, <0xffffffff, 0, 0xffffffff, 0>. ==>
|
||||
|
@ -581,6 +581,7 @@ TargetLowering::TargetLowering(const TargetMachine &tm,
|
||||
IfCvtBlockSizeLimit = 2;
|
||||
IfCvtDupBlockSizeLimit = 0;
|
||||
PrefLoopAlignment = 0;
|
||||
ShouldFoldAtomicFences = false;
|
||||
|
||||
InitLibcallNames(LibcallRoutineNames);
|
||||
InitCmpLibcallCCs(CmpLibcallCCs);
|
||||
|
Loading…
x
Reference in New Issue
Block a user