mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-15 20:29:48 +00:00
[FastISel][AArch64] Add select folding support for the XALU intrinsics.
This improves the code generation for the XALU intrinsics when the condition is feeding a select instruction. This also updates and enables the XALU unit tests for FastISel. This fixes <rdar://problem/17831117>. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@214350 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
29c424cb12
commit
cb99212bc1
@ -1052,54 +1052,56 @@ bool AArch64FastISel::SelectSelect(const Instruction *I) {
|
||||
DestVT != MVT::f64)
|
||||
return false;
|
||||
|
||||
unsigned CondReg = getRegForValue(SI->getCondition());
|
||||
if (CondReg == 0)
|
||||
return false;
|
||||
unsigned TrueReg = getRegForValue(SI->getTrueValue());
|
||||
if (TrueReg == 0)
|
||||
return false;
|
||||
unsigned FalseReg = getRegForValue(SI->getFalseValue());
|
||||
if (FalseReg == 0)
|
||||
return false;
|
||||
unsigned SelectOpc;
|
||||
switch (DestVT.SimpleTy) {
|
||||
default: return false;
|
||||
case MVT::i32: SelectOpc = AArch64::CSELWr; break;
|
||||
case MVT::i64: SelectOpc = AArch64::CSELXr; break;
|
||||
case MVT::f32: SelectOpc = AArch64::FCSELSrrr; break;
|
||||
case MVT::f64: SelectOpc = AArch64::FCSELDrrr; break;
|
||||
}
|
||||
|
||||
const Value *Cond = SI->getCondition();
|
||||
bool NeedTest = true;
|
||||
AArch64CC::CondCode CC = AArch64CC::NE;
|
||||
if (foldXALUIntrinsic(CC, I, Cond))
|
||||
NeedTest = false;
|
||||
|
||||
MRI.constrainRegClass(CondReg, &AArch64::GPR32RegClass);
|
||||
unsigned ANDReg = createResultReg(&AArch64::GPR32spRegClass);
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ANDWri),
|
||||
ANDReg)
|
||||
.addReg(CondReg)
|
||||
unsigned CondReg = getRegForValue(Cond);
|
||||
if (!CondReg)
|
||||
return false;
|
||||
bool CondIsKill = hasTrivialKill(Cond);
|
||||
|
||||
if (NeedTest) {
|
||||
MRI.constrainRegClass(CondReg, &AArch64::GPR32RegClass);
|
||||
unsigned ANDReg = createResultReg(&AArch64::GPR32spRegClass);
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ANDWri),
|
||||
ANDReg)
|
||||
.addReg(CondReg, getKillRegState(CondIsKill))
|
||||
.addImm(AArch64_AM::encodeLogicalImmediate(1, 32));
|
||||
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::SUBSWri))
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::SUBSWri))
|
||||
.addReg(ANDReg)
|
||||
.addReg(ANDReg)
|
||||
.addImm(0)
|
||||
.addImm(0);
|
||||
|
||||
unsigned SelectOpc;
|
||||
switch (DestVT.SimpleTy) {
|
||||
default:
|
||||
return false;
|
||||
case MVT::i32:
|
||||
SelectOpc = AArch64::CSELWr;
|
||||
break;
|
||||
case MVT::i64:
|
||||
SelectOpc = AArch64::CSELXr;
|
||||
break;
|
||||
case MVT::f32:
|
||||
SelectOpc = AArch64::FCSELSrrr;
|
||||
break;
|
||||
case MVT::f64:
|
||||
SelectOpc = AArch64::FCSELDrrr;
|
||||
break;
|
||||
}
|
||||
|
||||
unsigned TrueReg = getRegForValue(SI->getTrueValue());
|
||||
bool TrueIsKill = hasTrivialKill(SI->getTrueValue());
|
||||
|
||||
unsigned FalseReg = getRegForValue(SI->getFalseValue());
|
||||
bool FalseIsKill = hasTrivialKill(SI->getFalseValue());
|
||||
|
||||
if (!TrueReg || !FalseReg)
|
||||
return false;
|
||||
|
||||
unsigned ResultReg = createResultReg(TLI.getRegClassFor(DestVT));
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(SelectOpc),
|
||||
ResultReg)
|
||||
.addReg(TrueReg)
|
||||
.addReg(FalseReg)
|
||||
.addImm(AArch64CC::NE);
|
||||
.addReg(TrueReg, getKillRegState(TrueIsKill))
|
||||
.addReg(FalseReg, getKillRegState(FalseIsKill))
|
||||
.addImm(CC);
|
||||
|
||||
UpdateValueMap(I, ResultReg);
|
||||
return true;
|
||||
|
@ -1,13 +1,14 @@
|
||||
; RUN: llc < %s -march=arm64 -aarch64-atomic-cfg-tidy=0 | FileCheck %s
|
||||
; RUN: llc -march=arm64 -aarch64-atomic-cfg-tidy=0 < %s | FileCheck %s
|
||||
; RUN: llc -march=arm64 -aarch64-atomic-cfg-tidy=0 -fast-isel -fast-isel-abort < %s | FileCheck %s
|
||||
|
||||
;
|
||||
; Get the actual value of the overflow bit.
|
||||
;
|
||||
define i1 @saddo.i32(i32 %v1, i32 %v2, i32* %res) {
|
||||
define zeroext i1 @saddo.i32(i32 %v1, i32 %v2, i32* %res) {
|
||||
entry:
|
||||
; CHECK-LABEL: saddo.i32
|
||||
; CHECK: adds w8, w0, w1
|
||||
; CHECK-NEXT: cset w0, vs
|
||||
; CHECK: adds {{w[0-9]+}}, w0, w1
|
||||
; CHECK-NEXT: cset {{w[0-9]+}}, vs
|
||||
%t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2)
|
||||
%val = extractvalue {i32, i1} %t, 0
|
||||
%obit = extractvalue {i32, i1} %t, 1
|
||||
@ -15,11 +16,11 @@ entry:
|
||||
ret i1 %obit
|
||||
}
|
||||
|
||||
define i1 @saddo.i64(i64 %v1, i64 %v2, i64* %res) {
|
||||
define zeroext i1 @saddo.i64(i64 %v1, i64 %v2, i64* %res) {
|
||||
entry:
|
||||
; CHECK-LABEL: saddo.i64
|
||||
; CHECK: adds x8, x0, x1
|
||||
; CHECK-NEXT: cset w0, vs
|
||||
; CHECK: adds {{x[0-9]+}}, x0, x1
|
||||
; CHECK-NEXT: cset {{w[0-9]+}}, vs
|
||||
%t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 %v2)
|
||||
%val = extractvalue {i64, i1} %t, 0
|
||||
%obit = extractvalue {i64, i1} %t, 1
|
||||
@ -27,11 +28,11 @@ entry:
|
||||
ret i1 %obit
|
||||
}
|
||||
|
||||
define i1 @uaddo.i32(i32 %v1, i32 %v2, i32* %res) {
|
||||
define zeroext i1 @uaddo.i32(i32 %v1, i32 %v2, i32* %res) {
|
||||
entry:
|
||||
; CHECK-LABEL: uaddo.i32
|
||||
; CHECK: adds w8, w0, w1
|
||||
; CHECK-NEXT: cset w0, hs
|
||||
; CHECK: adds {{w[0-9]+}}, w0, w1
|
||||
; CHECK-NEXT: cset {{w[0-9]+}}, hs
|
||||
%t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2)
|
||||
%val = extractvalue {i32, i1} %t, 0
|
||||
%obit = extractvalue {i32, i1} %t, 1
|
||||
@ -39,11 +40,11 @@ entry:
|
||||
ret i1 %obit
|
||||
}
|
||||
|
||||
define i1 @uaddo.i64(i64 %v1, i64 %v2, i64* %res) {
|
||||
define zeroext i1 @uaddo.i64(i64 %v1, i64 %v2, i64* %res) {
|
||||
entry:
|
||||
; CHECK-LABEL: uaddo.i64
|
||||
; CHECK: adds x8, x0, x1
|
||||
; CHECK-NEXT: cset w0, hs
|
||||
; CHECK: adds {{x[0-9]+}}, x0, x1
|
||||
; CHECK-NEXT: cset {{w[0-9]+}}, hs
|
||||
%t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 %v2)
|
||||
%val = extractvalue {i64, i1} %t, 0
|
||||
%obit = extractvalue {i64, i1} %t, 1
|
||||
@ -51,11 +52,11 @@ entry:
|
||||
ret i1 %obit
|
||||
}
|
||||
|
||||
define i1 @ssubo.i32(i32 %v1, i32 %v2, i32* %res) {
|
||||
define zeroext i1 @ssubo.i32(i32 %v1, i32 %v2, i32* %res) {
|
||||
entry:
|
||||
; CHECK-LABEL: ssubo.i32
|
||||
; CHECK: subs w8, w0, w1
|
||||
; CHECK-NEXT: cset w0, vs
|
||||
; CHECK: subs {{w[0-9]+}}, w0, w1
|
||||
; CHECK-NEXT: cset {{w[0-9]+}}, vs
|
||||
%t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 %v2)
|
||||
%val = extractvalue {i32, i1} %t, 0
|
||||
%obit = extractvalue {i32, i1} %t, 1
|
||||
@ -63,11 +64,11 @@ entry:
|
||||
ret i1 %obit
|
||||
}
|
||||
|
||||
define i1 @ssubo.i64(i64 %v1, i64 %v2, i64* %res) {
|
||||
define zeroext i1 @ssubo.i64(i64 %v1, i64 %v2, i64* %res) {
|
||||
entry:
|
||||
; CHECK-LABEL: ssubo.i64
|
||||
; CHECK: subs x8, x0, x1
|
||||
; CHECK-NEXT: cset w0, vs
|
||||
; CHECK: subs {{x[0-9]+}}, x0, x1
|
||||
; CHECK-NEXT: cset {{w[0-9]+}}, vs
|
||||
%t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %v1, i64 %v2)
|
||||
%val = extractvalue {i64, i1} %t, 0
|
||||
%obit = extractvalue {i64, i1} %t, 1
|
||||
@ -75,11 +76,11 @@ entry:
|
||||
ret i1 %obit
|
||||
}
|
||||
|
||||
define i1 @usubo.i32(i32 %v1, i32 %v2, i32* %res) {
|
||||
define zeroext i1 @usubo.i32(i32 %v1, i32 %v2, i32* %res) {
|
||||
entry:
|
||||
; CHECK-LABEL: usubo.i32
|
||||
; CHECK: subs w8, w0, w1
|
||||
; CHECK-NEXT: cset w0, lo
|
||||
; CHECK: subs {{w[0-9]+}}, w0, w1
|
||||
; CHECK-NEXT: cset {{w[0-9]+}}, lo
|
||||
%t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2)
|
||||
%val = extractvalue {i32, i1} %t, 0
|
||||
%obit = extractvalue {i32, i1} %t, 1
|
||||
@ -87,11 +88,11 @@ entry:
|
||||
ret i1 %obit
|
||||
}
|
||||
|
||||
define i1 @usubo.i64(i64 %v1, i64 %v2, i64* %res) {
|
||||
define zeroext i1 @usubo.i64(i64 %v1, i64 %v2, i64* %res) {
|
||||
entry:
|
||||
; CHECK-LABEL: usubo.i64
|
||||
; CHECK: subs x8, x0, x1
|
||||
; CHECK-NEXT: cset w0, lo
|
||||
; CHECK: subs {{x[0-9]+}}, x0, x1
|
||||
; CHECK-NEXT: cset {{w[0-9]+}}, lo
|
||||
%t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2)
|
||||
%val = extractvalue {i64, i1} %t, 0
|
||||
%obit = extractvalue {i64, i1} %t, 1
|
||||
@ -99,13 +100,13 @@ entry:
|
||||
ret i1 %obit
|
||||
}
|
||||
|
||||
define i1 @smulo.i32(i32 %v1, i32 %v2, i32* %res) {
|
||||
define zeroext i1 @smulo.i32(i32 %v1, i32 %v2, i32* %res) {
|
||||
entry:
|
||||
; CHECK-LABEL: smulo.i32
|
||||
; CHECK: smull x8, w0, w1
|
||||
; CHECK-NEXT: lsr x9, x8, #32
|
||||
; CHECK-NEXT: cmp w9, w8, asr #31
|
||||
; CHECK-NEXT: cset w0, ne
|
||||
; CHECK: smull x[[MREG:[0-9]+]], w0, w1
|
||||
; CHECK-NEXT: lsr x[[SREG:[0-9]+]], x[[MREG]], #32
|
||||
; CHECK-NEXT: cmp w[[SREG]], w[[MREG]], asr #31
|
||||
; CHECK-NEXT: cset {{w[0-9]+}}, ne
|
||||
%t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2)
|
||||
%val = extractvalue {i32, i1} %t, 0
|
||||
%obit = extractvalue {i32, i1} %t, 1
|
||||
@ -113,13 +114,13 @@ entry:
|
||||
ret i1 %obit
|
||||
}
|
||||
|
||||
define i1 @smulo.i64(i64 %v1, i64 %v2, i64* %res) {
|
||||
define zeroext i1 @smulo.i64(i64 %v1, i64 %v2, i64* %res) {
|
||||
entry:
|
||||
; CHECK-LABEL: smulo.i64
|
||||
; CHECK: mul x8, x0, x1
|
||||
; CHECK-NEXT: smulh x9, x0, x1
|
||||
; CHECK-NEXT: cmp x9, x8, asr #63
|
||||
; CHECK-NEXT: cset w0, ne
|
||||
; CHECK: mul [[MREG:x[0-9]+]], x0, x1
|
||||
; CHECK-NEXT: smulh [[HREG:x[0-9]+]], x0, x1
|
||||
; CHECK-NEXT: cmp [[HREG]], [[MREG]], asr #63
|
||||
; CHECK-NEXT: cset {{w[0-9]+}}, ne
|
||||
%t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2)
|
||||
%val = extractvalue {i64, i1} %t, 0
|
||||
%obit = extractvalue {i64, i1} %t, 1
|
||||
@ -127,12 +128,12 @@ entry:
|
||||
ret i1 %obit
|
||||
}
|
||||
|
||||
define i1 @umulo.i32(i32 %v1, i32 %v2, i32* %res) {
|
||||
define zeroext i1 @umulo.i32(i32 %v1, i32 %v2, i32* %res) {
|
||||
entry:
|
||||
; CHECK-LABEL: umulo.i32
|
||||
; CHECK: umull x8, w0, w1
|
||||
; CHECK-NEXT: cmp xzr, x8, lsr #32
|
||||
; CHECK-NEXT: cset w0, ne
|
||||
; CHECK: umull [[MREG:x[0-9]+]], w0, w1
|
||||
; CHECK-NEXT: cmp xzr, [[MREG]], lsr #32
|
||||
; CHECK-NEXT: cset {{w[0-9]+}}, ne
|
||||
%t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2)
|
||||
%val = extractvalue {i32, i1} %t, 0
|
||||
%obit = extractvalue {i32, i1} %t, 1
|
||||
@ -140,13 +141,12 @@ entry:
|
||||
ret i1 %obit
|
||||
}
|
||||
|
||||
define i1 @umulo.i64(i64 %v1, i64 %v2, i64* %res) {
|
||||
define zeroext i1 @umulo.i64(i64 %v1, i64 %v2, i64* %res) {
|
||||
entry:
|
||||
; CHECK-LABEL: umulo.i64
|
||||
; CHECK: umulh x8, x0, x1
|
||||
; CHECK-NEXT: cmp xzr, x8
|
||||
; CHECK-NEXT: cset w8, ne
|
||||
; CHECK-NEXT: mul x9, x0, x1
|
||||
; CHECK: umulh [[MREG:x[0-9]+]], x0, x1
|
||||
; CHECK-NEXT: cmp xzr, [[MREG]]
|
||||
; CHECK-NEXT: cset {{w[0-9]+}}, ne
|
||||
%t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 %v2)
|
||||
%val = extractvalue {i64, i1} %t, 0
|
||||
%obit = extractvalue {i64, i1} %t, 1
|
||||
@ -249,9 +249,9 @@ entry:
|
||||
define i32 @smulo.select.i32(i32 %v1, i32 %v2) {
|
||||
entry:
|
||||
; CHECK-LABEL: smulo.select.i32
|
||||
; CHECK: smull x8, w0, w1
|
||||
; CHECK-NEXT: lsr x9, x8, #32
|
||||
; CHECK-NEXT: cmp w9, w8, asr #31
|
||||
; CHECK: smull x[[MREG:[0-9]+]], w0, w1
|
||||
; CHECK-NEXT: lsr x[[SREG:[0-9]+]], x[[MREG]], #32
|
||||
; CHECK-NEXT: cmp w[[SREG]], w[[MREG]], asr #31
|
||||
; CHECK-NEXT: csel w0, w0, w1, ne
|
||||
%t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2)
|
||||
%obit = extractvalue {i32, i1} %t, 1
|
||||
@ -262,9 +262,9 @@ entry:
|
||||
define i64 @smulo.select.i64(i64 %v1, i64 %v2) {
|
||||
entry:
|
||||
; CHECK-LABEL: smulo.select.i64
|
||||
; CHECK: mul x8, x0, x1
|
||||
; CHECK-NEXT: smulh x9, x0, x1
|
||||
; CHECK-NEXT: cmp x9, x8, asr #63
|
||||
; CHECK: mul [[MREG:x[0-9]+]], x0, x1
|
||||
; CHECK-NEXT: smulh [[HREG:x[0-9]+]], x0, x1
|
||||
; CHECK-NEXT: cmp [[HREG]], [[MREG]], asr #63
|
||||
; CHECK-NEXT: csel x0, x0, x1, ne
|
||||
%t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2)
|
||||
%obit = extractvalue {i64, i1} %t, 1
|
||||
@ -275,8 +275,8 @@ entry:
|
||||
define i32 @umulo.select.i32(i32 %v1, i32 %v2) {
|
||||
entry:
|
||||
; CHECK-LABEL: umulo.select.i32
|
||||
; CHECK: umull x8, w0, w1
|
||||
; CHECK-NEXT: cmp xzr, x8, lsr #32
|
||||
; CHECK: umull [[MREG:x[0-9]+]], w0, w1
|
||||
; CHECK-NEXT: cmp xzr, [[MREG]], lsr #32
|
||||
; CHECK-NEXT: csel w0, w0, w1, ne
|
||||
%t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2)
|
||||
%obit = extractvalue {i32, i1} %t, 1
|
||||
@ -287,8 +287,8 @@ entry:
|
||||
define i64 @umulo.select.i64(i64 %v1, i64 %v2) {
|
||||
entry:
|
||||
; CHECK-LABEL: umulo.select.i64
|
||||
; CHECK: umulh x8, x0, x1
|
||||
; CHECK-NEXT: cmp xzr, x8
|
||||
; CHECK: umulh [[MREG:x[0-9]+]], x0, x1
|
||||
; CHECK-NEXT: cmp xzr, [[MREG]]
|
||||
; CHECK-NEXT: csel x0, x0, x1, ne
|
||||
%t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 %v2)
|
||||
%obit = extractvalue {i64, i1} %t, 1
|
||||
@ -300,7 +300,7 @@ entry:
|
||||
;
|
||||
; Check the use of the overflow bit in combination with a branch instruction.
|
||||
;
|
||||
define i1 @saddo.br.i32(i32 %v1, i32 %v2) {
|
||||
define zeroext i1 @saddo.br.i32(i32 %v1, i32 %v2) {
|
||||
entry:
|
||||
; CHECK-LABEL: saddo.br.i32
|
||||
; CHECK: cmn w0, w1
|
||||
@ -317,7 +317,7 @@ continue:
|
||||
ret i1 true
|
||||
}
|
||||
|
||||
define i1 @saddo.br.i64(i64 %v1, i64 %v2) {
|
||||
define zeroext i1 @saddo.br.i64(i64 %v1, i64 %v2) {
|
||||
entry:
|
||||
; CHECK-LABEL: saddo.br.i64
|
||||
; CHECK: cmn x0, x1
|
||||
@ -334,7 +334,7 @@ continue:
|
||||
ret i1 true
|
||||
}
|
||||
|
||||
define i1 @uaddo.br.i32(i32 %v1, i32 %v2) {
|
||||
define zeroext i1 @uaddo.br.i32(i32 %v1, i32 %v2) {
|
||||
entry:
|
||||
; CHECK-LABEL: uaddo.br.i32
|
||||
; CHECK: cmn w0, w1
|
||||
@ -351,7 +351,7 @@ continue:
|
||||
ret i1 true
|
||||
}
|
||||
|
||||
define i1 @uaddo.br.i64(i64 %v1, i64 %v2) {
|
||||
define zeroext i1 @uaddo.br.i64(i64 %v1, i64 %v2) {
|
||||
entry:
|
||||
; CHECK-LABEL: uaddo.br.i64
|
||||
; CHECK: cmn x0, x1
|
||||
@ -368,7 +368,7 @@ continue:
|
||||
ret i1 true
|
||||
}
|
||||
|
||||
define i1 @ssubo.br.i32(i32 %v1, i32 %v2) {
|
||||
define zeroext i1 @ssubo.br.i32(i32 %v1, i32 %v2) {
|
||||
entry:
|
||||
; CHECK-LABEL: ssubo.br.i32
|
||||
; CHECK: cmp w0, w1
|
||||
@ -385,7 +385,7 @@ continue:
|
||||
ret i1 true
|
||||
}
|
||||
|
||||
define i1 @ssubo.br.i64(i64 %v1, i64 %v2) {
|
||||
define zeroext i1 @ssubo.br.i64(i64 %v1, i64 %v2) {
|
||||
entry:
|
||||
; CHECK-LABEL: ssubo.br.i64
|
||||
; CHECK: cmp x0, x1
|
||||
@ -402,7 +402,7 @@ continue:
|
||||
ret i1 true
|
||||
}
|
||||
|
||||
define i1 @usubo.br.i32(i32 %v1, i32 %v2) {
|
||||
define zeroext i1 @usubo.br.i32(i32 %v1, i32 %v2) {
|
||||
entry:
|
||||
; CHECK-LABEL: usubo.br.i32
|
||||
; CHECK: cmp w0, w1
|
||||
@ -419,7 +419,7 @@ continue:
|
||||
ret i1 true
|
||||
}
|
||||
|
||||
define i1 @usubo.br.i64(i64 %v1, i64 %v2) {
|
||||
define zeroext i1 @usubo.br.i64(i64 %v1, i64 %v2) {
|
||||
entry:
|
||||
; CHECK-LABEL: usubo.br.i64
|
||||
; CHECK: cmp x0, x1
|
||||
@ -436,12 +436,12 @@ continue:
|
||||
ret i1 true
|
||||
}
|
||||
|
||||
define i1 @smulo.br.i32(i32 %v1, i32 %v2) {
|
||||
define zeroext i1 @smulo.br.i32(i32 %v1, i32 %v2) {
|
||||
entry:
|
||||
; CHECK-LABEL: smulo.br.i32
|
||||
; CHECK: smull x8, w0, w1
|
||||
; CHECK-NEXT: lsr x9, x8, #32
|
||||
; CHECK-NEXT: cmp w9, w8, asr #31
|
||||
; CHECK: smull x[[MREG:[0-9]+]], w0, w1
|
||||
; CHECK-NEXT: lsr x[[SREG:[0-9]+]], x8, #32
|
||||
; CHECK-NEXT: cmp w[[SREG]], w[[MREG]], asr #31
|
||||
; CHECK-NEXT: b.eq
|
||||
%t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2)
|
||||
%val = extractvalue {i32, i1} %t, 0
|
||||
@ -455,12 +455,12 @@ continue:
|
||||
ret i1 true
|
||||
}
|
||||
|
||||
define i1 @smulo.br.i64(i64 %v1, i64 %v2) {
|
||||
define zeroext i1 @smulo.br.i64(i64 %v1, i64 %v2) {
|
||||
entry:
|
||||
; CHECK-LABEL: smulo.br.i64
|
||||
; CHECK: mul x8, x0, x1
|
||||
; CHECK-NEXT: smulh x9, x0, x1
|
||||
; CHECK-NEXT: cmp x9, x8, asr #63
|
||||
; CHECK: mul [[MREG:x[0-9]+]], x0, x1
|
||||
; CHECK-NEXT: smulh [[HREG:x[0-9]+]], x0, x1
|
||||
; CHECK-NEXT: cmp [[HREG]], [[MREG]], asr #63
|
||||
; CHECK-NEXT: b.eq
|
||||
%t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2)
|
||||
%val = extractvalue {i64, i1} %t, 0
|
||||
@ -474,11 +474,11 @@ continue:
|
||||
ret i1 true
|
||||
}
|
||||
|
||||
define i1 @umulo.br.i32(i32 %v1, i32 %v2) {
|
||||
define zeroext i1 @umulo.br.i32(i32 %v1, i32 %v2) {
|
||||
entry:
|
||||
; CHECK-LABEL: umulo.br.i32
|
||||
; CHECK: umull x8, w0, w1
|
||||
; CHECK-NEXT: cmp xzr, x8, lsr #32
|
||||
; CHECK: umull [[MREG:x[0-9]+]], w0, w1
|
||||
; CHECK-NEXT: cmp xzr, [[MREG]], lsr #32
|
||||
; CHECK-NEXT: b.eq
|
||||
%t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2)
|
||||
%val = extractvalue {i32, i1} %t, 0
|
||||
@ -492,11 +492,11 @@ continue:
|
||||
ret i1 true
|
||||
}
|
||||
|
||||
define i1 @umulo.br.i64(i64 %v1, i64 %v2) {
|
||||
define zeroext i1 @umulo.br.i64(i64 %v1, i64 %v2) {
|
||||
entry:
|
||||
; CHECK-LABEL: umulo.br.i64
|
||||
; CHECK: umulh x8, x0, x1
|
||||
; CHECK-NEXT: cbz
|
||||
; CHECK: umulh [[REG:x[0-9]+]], x0, x1
|
||||
; CHECK-NEXT: {{cbz|cmp}}
|
||||
%t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 %v2)
|
||||
%val = extractvalue {i64, i1} %t, 0
|
||||
%obit = extractvalue {i64, i1} %t, 1
|
||||
|
Loading…
Reference in New Issue
Block a user