mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-11-03 14:21:30 +00:00
[ARM64] Prevent bit extraction to be adjusted by following shift
For pattern like ((x >> C1) & Mask) << C2, DAG combiner may convert it into (x >> (C1-C2)) & (Mask << C2), which makes pattern matching of ubfx more difficult. For example: Given %shr = lshr i64 %x, 4 %and = and i64 %shr, 15 %arrayidx = getelementptr inbounds [8 x [64 x i64]]* @arr, i64 0, %i64 2, i64 %and %0 = load i64* %arrayidx With current shift folding, it takes 3 instrs to compute base address: lsr x8, x0, #1 and x8, x8, #0x78 add x8, x9, x8 If using ubfx, it only needs 2 instrs: ubfx x8, x0, #4, #4 add x8, x9, x8, lsl #3 This fixes bug 19589 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@207702 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
@@ -5931,6 +5931,21 @@ ARM64TargetLowering::getScratchRegisters(CallingConv::ID) const {
|
||||
return ScratchRegs;
|
||||
}
|
||||
|
||||
bool ARM64TargetLowering::isDesirableToCommuteWithShift(const SDNode *N) const {
|
||||
EVT VT = N->getValueType(0);
|
||||
// If N is unsigned bit extraction: ((x >> C) & mask), then do not combine
|
||||
// it with shift to let it be lowered to UBFX.
|
||||
if (N->getOpcode() == ISD::AND && (VT == MVT::i32 || VT == MVT::i64) &&
|
||||
isa<ConstantSDNode>(N->getOperand(1))) {
|
||||
uint64_t TruncMask = N->getConstantOperandVal(1);
|
||||
if (isMask_64(TruncMask) &&
|
||||
N->getOperand(0).getOpcode() == ISD::SRL &&
|
||||
isa<ConstantSDNode>(N->getOperand(0)->getOperand(1)))
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ARM64TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
|
||||
Type *Ty) const {
|
||||
assert(Ty->isIntegerTy());
|
||||
|
||||
Reference in New Issue
Block a user