mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-01-10 02:36:06 +00:00
Perform this folding as a target specific dag combine:
(or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c) The isel patterns may not catch all the cases if general dag combine has reduced width of source operands. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@92513 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
f2d7fb3eac
commit
760d1945bc
@ -978,6 +978,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
|
||||
setTargetDAGCombine(ISD::SHL);
|
||||
setTargetDAGCombine(ISD::SRA);
|
||||
setTargetDAGCombine(ISD::SRL);
|
||||
setTargetDAGCombine(ISD::OR);
|
||||
setTargetDAGCombine(ISD::STORE);
|
||||
setTargetDAGCombine(ISD::MEMBARRIER);
|
||||
setTargetDAGCombine(ISD::ZERO_EXTEND);
|
||||
@ -9108,6 +9109,64 @@ static SDValue PerformShiftCombine(SDNode* N, SelectionDAG &DAG,
|
||||
return SDValue();
|
||||
}
|
||||
|
||||
static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG,
|
||||
const X86Subtarget *Subtarget) {
|
||||
EVT VT = N->getValueType(0);
|
||||
if (VT != MVT::i64 || !Subtarget->is64Bit())
|
||||
return SDValue();
|
||||
|
||||
// fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
|
||||
SDValue N0 = N->getOperand(0);
|
||||
SDValue N1 = N->getOperand(1);
|
||||
if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL)
|
||||
std::swap(N0, N1);
|
||||
if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL)
|
||||
return SDValue();
|
||||
|
||||
SDValue ShAmt0 = N0.getOperand(1);
|
||||
if (ShAmt0.getValueType() != MVT::i8)
|
||||
return SDValue();
|
||||
SDValue ShAmt1 = N1.getOperand(1);
|
||||
if (ShAmt1.getValueType() != MVT::i8)
|
||||
return SDValue();
|
||||
if (ShAmt0.getOpcode() == ISD::TRUNCATE)
|
||||
ShAmt0 = ShAmt0.getOperand(0);
|
||||
if (ShAmt1.getOpcode() == ISD::TRUNCATE)
|
||||
ShAmt1 = ShAmt1.getOperand(0);
|
||||
|
||||
DebugLoc DL = N->getDebugLoc();
|
||||
unsigned Opc = X86ISD::SHLD;
|
||||
SDValue Op0 = N0.getOperand(0);
|
||||
SDValue Op1 = N1.getOperand(0);
|
||||
if (ShAmt0.getOpcode() == ISD::SUB) {
|
||||
Opc = X86ISD::SHRD;
|
||||
std::swap(Op0, Op1);
|
||||
std::swap(ShAmt0, ShAmt1);
|
||||
}
|
||||
|
||||
if (ShAmt1.getOpcode() == ISD::SUB) {
|
||||
SDValue Sum = ShAmt1.getOperand(0);
|
||||
if (ConstantSDNode *SumC = dyn_cast<ConstantSDNode>(Sum)) {
|
||||
if (SumC->getSExtValue() == 64 &&
|
||||
ShAmt1.getOperand(1) == ShAmt0)
|
||||
return DAG.getNode(Opc, DL, VT,
|
||||
Op0, Op1,
|
||||
DAG.getNode(ISD::TRUNCATE, DL,
|
||||
MVT::i8, ShAmt0));
|
||||
}
|
||||
} else if (ConstantSDNode *ShAmt1C = dyn_cast<ConstantSDNode>(ShAmt1)) {
|
||||
ConstantSDNode *ShAmt0C = dyn_cast<ConstantSDNode>(ShAmt0);
|
||||
if (ShAmt0C &&
|
||||
ShAmt0C->getSExtValue() + ShAmt1C->getSExtValue() == 64)
|
||||
return DAG.getNode(Opc, DL, VT,
|
||||
N0.getOperand(0), N1.getOperand(0),
|
||||
DAG.getNode(ISD::TRUNCATE, DL,
|
||||
MVT::i8, ShAmt0));
|
||||
}
|
||||
|
||||
return SDValue();
|
||||
}
|
||||
|
||||
/// PerformSTORECombine - Do target-specific dag combines on STORE nodes.
|
||||
static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
|
||||
const X86Subtarget *Subtarget) {
|
||||
@ -9370,6 +9429,7 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
|
||||
case ISD::SHL:
|
||||
case ISD::SRA:
|
||||
case ISD::SRL: return PerformShiftCombine(N, DAG, Subtarget);
|
||||
case ISD::OR: return PerformOrCombine(N, DAG, Subtarget);
|
||||
case ISD::STORE: return PerformSTORECombine(N, DAG, Subtarget);
|
||||
case X86ISD::FXOR:
|
||||
case X86ISD::FOR: return PerformFORCombine(N, DAG);
|
||||
|
@ -2098,24 +2098,7 @@ def : Pat<(sra GR64:$src1, (and CL:$amt, 63)),
|
||||
def : Pat<(store (sra (loadi64 addr:$dst), (and CL:$amt, 63)), addr:$dst),
|
||||
(SAR64mCL addr:$dst)>;
|
||||
|
||||
// (or (x >> c) | (y << (64 - c))) ==> (shrd64 x, y, c)
|
||||
def : Pat<(or (srl GR64:$src1, CL:$amt),
|
||||
(shl GR64:$src2, (sub 64, CL:$amt))),
|
||||
(SHRD64rrCL GR64:$src1, GR64:$src2)>;
|
||||
|
||||
def : Pat<(store (or (srl (loadi64 addr:$dst), CL:$amt),
|
||||
(shl GR64:$src2, (sub 64, CL:$amt))), addr:$dst),
|
||||
(SHRD64mrCL addr:$dst, GR64:$src2)>;
|
||||
|
||||
def : Pat<(or (srl GR64:$src1, (i8 (trunc RCX:$amt))),
|
||||
(shl GR64:$src2, (i8 (trunc (sub 64, RCX:$amt))))),
|
||||
(SHRD64rrCL GR64:$src1, GR64:$src2)>;
|
||||
|
||||
def : Pat<(store (or (srl (loadi64 addr:$dst), (i8 (trunc RCX:$amt))),
|
||||
(shl GR64:$src2, (i8 (trunc (sub 64, RCX:$amt))))),
|
||||
addr:$dst),
|
||||
(SHRD64mrCL addr:$dst, GR64:$src2)>;
|
||||
|
||||
// Double shift patterns
|
||||
def : Pat<(shrd GR64:$src1, (i8 imm:$amt1), GR64:$src2, (i8 imm:$amt2)),
|
||||
(SHRD64rri8 GR64:$src1, GR64:$src2, (i8 imm:$amt1))>;
|
||||
|
||||
@ -2123,24 +2106,6 @@ def : Pat<(store (shrd (loadi64 addr:$dst), (i8 imm:$amt1),
|
||||
GR64:$src2, (i8 imm:$amt2)), addr:$dst),
|
||||
(SHRD64mri8 addr:$dst, GR64:$src2, (i8 imm:$amt1))>;
|
||||
|
||||
// (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
|
||||
def : Pat<(or (shl GR64:$src1, CL:$amt),
|
||||
(srl GR64:$src2, (sub 64, CL:$amt))),
|
||||
(SHLD64rrCL GR64:$src1, GR64:$src2)>;
|
||||
|
||||
def : Pat<(store (or (shl (loadi64 addr:$dst), CL:$amt),
|
||||
(srl GR64:$src2, (sub 64, CL:$amt))), addr:$dst),
|
||||
(SHLD64mrCL addr:$dst, GR64:$src2)>;
|
||||
|
||||
def : Pat<(or (shl GR64:$src1, (i8 (trunc RCX:$amt))),
|
||||
(srl GR64:$src2, (i8 (trunc (sub 64, RCX:$amt))))),
|
||||
(SHLD64rrCL GR64:$src1, GR64:$src2)>;
|
||||
|
||||
def : Pat<(store (or (shl (loadi64 addr:$dst), (i8 (trunc RCX:$amt))),
|
||||
(srl GR64:$src2, (i8 (trunc (sub 64, RCX:$amt))))),
|
||||
addr:$dst),
|
||||
(SHLD64mrCL addr:$dst, GR64:$src2)>;
|
||||
|
||||
def : Pat<(shld GR64:$src1, (i8 imm:$amt1), GR64:$src2, (i8 imm:$amt2)),
|
||||
(SHLD64rri8 GR64:$src1, GR64:$src2, (i8 imm:$amt1))>;
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user