mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-11-01 15:17:25 +00:00
When lowering vector shifts a check is performed to see if the value to shift by
is an immediate, in this check the value is negated and stored in and int64_t. The value can be -2^63 yet the result cannot be stored in an int64_t and this gives some undefined behaviour causing failures. The negation is only necessary when the values is within a certain range and so it should not need to negate -2^63, this patch introduces this and also a regression test. Differential Revision: http://reviews.llvm.org/D11408 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@243100 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
@@ -6440,26 +6440,20 @@ static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) {
|
|||||||
/// 0 <= Value <= ElementBits for a long left shift.
|
/// 0 <= Value <= ElementBits for a long left shift.
|
||||||
static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) {
|
static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) {
|
||||||
assert(VT.isVector() && "vector shift count is not a vector type");
|
assert(VT.isVector() && "vector shift count is not a vector type");
|
||||||
unsigned ElementBits = VT.getVectorElementType().getSizeInBits();
|
int64_t ElementBits = VT.getVectorElementType().getSizeInBits();
|
||||||
if (!getVShiftImm(Op, ElementBits, Cnt))
|
if (!getVShiftImm(Op, ElementBits, Cnt))
|
||||||
return false;
|
return false;
|
||||||
return (Cnt >= 0 && (isLong ? Cnt - 1 : Cnt) < ElementBits);
|
return (Cnt >= 0 && (isLong ? Cnt - 1 : Cnt) < ElementBits);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// isVShiftRImm - Check if this is a valid build_vector for the immediate
|
/// isVShiftRImm - Check if this is a valid build_vector for the immediate
|
||||||
/// operand of a vector shift right operation. For a shift opcode, the value
|
/// operand of a vector shift right operation. The value must be in the range:
|
||||||
/// is positive, but for an intrinsic the value count must be negative. The
|
/// 1 <= Value <= ElementBits for a right shift; or
|
||||||
/// absolute value must be in the range:
|
static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, int64_t &Cnt) {
|
||||||
/// 1 <= |Value| <= ElementBits for a right shift; or
|
|
||||||
/// 1 <= |Value| <= ElementBits/2 for a narrow right shift.
|
|
||||||
static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic,
|
|
||||||
int64_t &Cnt) {
|
|
||||||
assert(VT.isVector() && "vector shift count is not a vector type");
|
assert(VT.isVector() && "vector shift count is not a vector type");
|
||||||
unsigned ElementBits = VT.getVectorElementType().getSizeInBits();
|
int64_t ElementBits = VT.getVectorElementType().getSizeInBits();
|
||||||
if (!getVShiftImm(Op, ElementBits, Cnt))
|
if (!getVShiftImm(Op, ElementBits, Cnt))
|
||||||
return false;
|
return false;
|
||||||
if (isIntrinsic)
|
|
||||||
Cnt = -Cnt;
|
|
||||||
return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits / 2 : ElementBits));
|
return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits / 2 : ElementBits));
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -6488,8 +6482,7 @@ SDValue AArch64TargetLowering::LowerVectorSRA_SRL_SHL(SDValue Op,
|
|||||||
case ISD::SRA:
|
case ISD::SRA:
|
||||||
case ISD::SRL:
|
case ISD::SRL:
|
||||||
// Right shift immediate
|
// Right shift immediate
|
||||||
if (isVShiftRImm(Op.getOperand(1), VT, false, false, Cnt) &&
|
if (isVShiftRImm(Op.getOperand(1), VT, false, Cnt) && Cnt < EltSize) {
|
||||||
Cnt < EltSize) {
|
|
||||||
unsigned Opc =
|
unsigned Opc =
|
||||||
(Op.getOpcode() == ISD::SRA) ? AArch64ISD::VASHR : AArch64ISD::VLSHR;
|
(Op.getOpcode() == ISD::SRA) ? AArch64ISD::VASHR : AArch64ISD::VLSHR;
|
||||||
return DAG.getNode(Opc, DL, VT, Op.getOperand(0),
|
return DAG.getNode(Opc, DL, VT, Op.getOperand(0),
|
||||||
|
|||||||
@@ -9691,7 +9691,7 @@ static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) {
|
|||||||
/// 0 <= Value <= ElementBits for a long left shift.
|
/// 0 <= Value <= ElementBits for a long left shift.
|
||||||
static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) {
|
static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) {
|
||||||
assert(VT.isVector() && "vector shift count is not a vector type");
|
assert(VT.isVector() && "vector shift count is not a vector type");
|
||||||
unsigned ElementBits = VT.getVectorElementType().getSizeInBits();
|
int64_t ElementBits = VT.getVectorElementType().getSizeInBits();
|
||||||
if (! getVShiftImm(Op, ElementBits, Cnt))
|
if (! getVShiftImm(Op, ElementBits, Cnt))
|
||||||
return false;
|
return false;
|
||||||
return (Cnt >= 0 && (isLong ? Cnt-1 : Cnt) < ElementBits);
|
return (Cnt >= 0 && (isLong ? Cnt-1 : Cnt) < ElementBits);
|
||||||
@@ -9706,12 +9706,16 @@ static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) {
|
|||||||
static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic,
|
static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic,
|
||||||
int64_t &Cnt) {
|
int64_t &Cnt) {
|
||||||
assert(VT.isVector() && "vector shift count is not a vector type");
|
assert(VT.isVector() && "vector shift count is not a vector type");
|
||||||
unsigned ElementBits = VT.getVectorElementType().getSizeInBits();
|
int64_t ElementBits = VT.getVectorElementType().getSizeInBits();
|
||||||
if (! getVShiftImm(Op, ElementBits, Cnt))
|
if (! getVShiftImm(Op, ElementBits, Cnt))
|
||||||
return false;
|
return false;
|
||||||
if (isIntrinsic)
|
if (!isIntrinsic)
|
||||||
|
return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits/2 : ElementBits));
|
||||||
|
if (Cnt >= -(isNarrow ? ElementBits/2 : ElementBits) && Cnt <= -1) {
|
||||||
Cnt = -Cnt;
|
Cnt = -Cnt;
|
||||||
return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits/2 : ElementBits));
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics.
|
/// PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics.
|
||||||
|
|||||||
13
test/CodeGen/ARM/neon_vshl_minint.ll
Normal file
13
test/CodeGen/ARM/neon_vshl_minint.ll
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
; RUN: llc < %s -mtriple=arm-none-eabi -mcpu=cortex-a8 2>&1 | FileCheck %s
|
||||||
|
; RUN: llc < %s -mtriple=thumb-none-eabi -mcpu=cortex-a8 2>&1 | FileCheck %s
|
||||||
|
|
||||||
|
define <1 x i64> @vshl_minint() #0 {
|
||||||
|
entry:
|
||||||
|
; CHECK-LABEL: vshl_minint
|
||||||
|
; CHECK: vldr
|
||||||
|
; CHECK: vshl.u64
|
||||||
|
%vshl.i = tail call <1 x i64> @llvm.arm.neon.vshiftu.v1i64(<1 x i64> undef, <1 x i64> <i64 -9223372036854775808>)
|
||||||
|
ret <1 x i64> %vshl.i
|
||||||
|
}
|
||||||
|
|
||||||
|
declare <1 x i64> @llvm.arm.neon.vshiftu.v1i64(<1 x i64>, <1 x i64>)
|
||||||
Reference in New Issue
Block a user