diff --git a/lib/Target/AArch64/AArch64FastISel.cpp b/lib/Target/AArch64/AArch64FastISel.cpp index 28e4b0a4599..1b1aaf214fb 100644 --- a/lib/Target/AArch64/AArch64FastISel.cpp +++ b/lib/Target/AArch64/AArch64FastISel.cpp @@ -150,6 +150,7 @@ private: bool foldXALUIntrinsic(AArch64CC::CondCode &CC, const Instruction *I, const Value *Cond); bool optimizeIntExtLoad(const Instruction *I, MVT RetVT, MVT SrcVT); + bool optimizeSelect(const SelectInst *SI); // Emit helper routines. unsigned emitAddSub(bool UseAdd, MVT RetVT, const Value *LHS, @@ -2496,6 +2497,63 @@ bool AArch64FastISel::selectCmp(const Instruction *I) { return true; } +/// \brief Optimize selects of i1 if one of the operands has a 'true' or 'false' +/// value. +bool AArch64FastISel::optimizeSelect(const SelectInst *SI) { + if (!SI->getType()->isIntegerTy(1)) + return false; + + const Value *Src1Val, *Src2Val; + unsigned Opc = 0; + bool NeedExtraOp = false; + if (auto *CI = dyn_cast(SI->getTrueValue())) { + if (CI->isOne()) { + Src1Val = SI->getCondition(); + Src2Val = SI->getFalseValue(); + Opc = AArch64::ORRWrr; + } else { + assert(CI->isZero()); + Src1Val = SI->getFalseValue(); + Src2Val = SI->getCondition(); + Opc = AArch64::BICWrr; + } + } else if (auto *CI = dyn_cast(SI->getFalseValue())) { + if (CI->isOne()) { + Src1Val = SI->getCondition(); + Src2Val = SI->getTrueValue(); + Opc = AArch64::ORRWrr; + NeedExtraOp = true; + } else { + assert(CI->isZero()); + Src1Val = SI->getCondition(); + Src2Val = SI->getTrueValue(); + Opc = AArch64::ANDWrr; + } + } + + if (!Opc) + return false; + + unsigned Src1Reg = getRegForValue(Src1Val); + if (!Src1Reg) + return false; + bool Src1IsKill = hasTrivialKill(Src1Val); + + unsigned Src2Reg = getRegForValue(Src2Val); + if (!Src2Reg) + return false; + bool Src2IsKill = hasTrivialKill(Src2Val); + + if (NeedExtraOp) { + Src1Reg = emitLogicalOp_ri(ISD::XOR, MVT::i32, Src1Reg, Src1IsKill, 1); + Src1IsKill = true; + } + unsigned ResultReg = fastEmitInst_rr(Opc, &AArch64::GPR32spRegClass, Src1Reg, + Src1IsKill, Src2Reg, Src2IsKill); + updateValueMap(SI, ResultReg); + return true; +} + bool AArch64FastISel::selectSelect(const Instruction *I) { assert(isa(I) && "Expected a select instruction."); MVT VT; @@ -2533,6 +2591,9 @@ bool AArch64FastISel::selectSelect(const Instruction *I) { AArch64CC::CondCode CC = AArch64CC::NE; AArch64CC::CondCode ExtraCC = AArch64CC::AL; + if (optimizeSelect(SI)) + return true; + // Try to pickup the flags, so we don't have to emit another compare. if (foldXALUIntrinsic(CC, I, Cond)) { // Fake request the condition to force emission of the XALU intrinsic. diff --git a/test/CodeGen/AArch64/fast-isel-select.ll b/test/CodeGen/AArch64/fast-isel-select.ll index a4fc1b54a53..928e9d46741 100644 --- a/test/CodeGen/AArch64/fast-isel-select.ll +++ b/test/CodeGen/AArch64/fast-isel-select.ll @@ -284,3 +284,33 @@ define float @select_icmp_sle(i32 %x, i32 %y, float %a, float %b) { %2 = select i1 %1, float %a, float %b ret float %2 } + +; Test peephole optimizations for select. +define zeroext i1 @select_opt1(i1 zeroext %c, i1 zeroext %a) { +; CHECK-LABEL: select_opt1 +; CHECK: orr {{w[0-9]+}}, w0, w1 + %1 = select i1 %c, i1 true, i1 %a + ret i1 %1 +} + +define zeroext i1 @select_opt2(i1 zeroext %c, i1 zeroext %a) { +; CHECK-LABEL: select_opt2 +; CHECK: eor [[REG:w[0-9]+]], w0, #0x1 +; CHECK: orr {{w[0-9]+}}, [[REG]], w1 + %1 = select i1 %c, i1 %a, i1 true + ret i1 %1 +} + +define zeroext i1 @select_opt3(i1 zeroext %c, i1 zeroext %a) { +; CHECK-LABEL: select_opt3 +; CHECK: bic {{w[0-9]+}}, w1, w0 + %1 = select i1 %c, i1 false, i1 %a + ret i1 %1 +} + +define zeroext i1 @select_opt4(i1 zeroext %c, i1 zeroext %a) { +; CHECK-LABEL: select_opt4 +; CHECK: and {{w[0-9]+}}, w0, w1 + %1 = select i1 %c, i1 %a, i1 false + ret i1 %1 +}