- Use patterns instead of creating completely new instruction matching patterns,

which are identical to the original patterns.

- Change the multiply with overflow so that we distinguish between signed and
  unsigned multiplication. Currently, unsigned multiplication with overflow
  isn't working!


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@60963 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Bill Wendling 2008-12-12 21:15:41 +00:00
parent e062b07689
commit d350e02e19
6 changed files with 507 additions and 543 deletions

View File

@ -5217,9 +5217,10 @@ SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) {
if (Cond.getOpcode() == ISD::SETCC)
Cond = LowerSETCC(Cond, DAG);
else if (Cond.getOpcode() == X86ISD::ADD ||
Cond.getOpcode() == X86ISD::SUB ||
Cond.getOpcode() == X86ISD::MUL)
else if (Cond.getOpcode() == X86ISD::ADD ||
Cond.getOpcode() == X86ISD::SUB ||
Cond.getOpcode() == X86ISD::SMUL ||
Cond.getOpcode() == X86ISD::UMUL)
Cond = LowerXALUO(Cond, DAG);
// If condition flag is set by a X86ISD::CMP, then use it as the condition
@ -6165,11 +6166,11 @@ SDValue X86TargetLowering::LowerXALUO(SDValue Op, SelectionDAG &DAG) {
Cond = X86::COND_C;
break;
case ISD::SMULO:
BaseOp = X86ISD::MUL;
BaseOp = X86ISD::SMUL;
Cond = X86::COND_O;
break;
case ISD::UMULO:
BaseOp = X86ISD::MUL;
BaseOp = X86ISD::UMUL;
Cond = X86::COND_C;
break;
}
@ -6497,7 +6498,8 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::PCMPGTQ: return "X86ISD::PCMPGTQ";
case X86ISD::ADD: return "X86ISD::ADD";
case X86ISD::SUB: return "X86ISD::SUB";
case X86ISD::MUL: return "X86ISD::MUL";
case X86ISD::SMUL: return "X86ISD::SMUL";
case X86ISD::UMUL: return "X86ISD::UMUL";
}
}

View File

@ -229,9 +229,9 @@ namespace llvm {
PCMPEQB, PCMPEQW, PCMPEQD, PCMPEQQ,
PCMPGTB, PCMPGTW, PCMPGTD, PCMPGTQ,
// ADD, SUB, MUL - Arithmetic operations with overflow/carry
// ADD, SUB, SMUL, UMUL - Arithmetic operations with overflow/carry
// intrinsics.
ADD, SUB, MUL
ADD, SUB, SMUL, UMUL
};
}

View File

@ -315,73 +315,40 @@ let isCommutable = 1 in
// Register-Register Addition
def ADD64rr : RI<0x01, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
"add{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, (add GR64:$src1, GR64:$src2))]>;
// Register-Register Addition with Overflow
def ADDOvf64rr : RI<0x01, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
"add{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, (X86add_ovf GR64:$src1, GR64:$src2)),
[(set GR64:$dst, (add GR64:$src1, GR64:$src2)),
(implicit EFLAGS)]>;
// Register-Integer Addition
def ADD64ri32 : RIi32<0x81, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
"add{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, (add GR64:$src1, i64immSExt32:$src2))]>;
[(set GR64:$dst, (add GR64:$src1, i64immSExt32:$src2)),
(implicit EFLAGS)]>;
def ADD64ri8 : RIi8<0x83, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
"add{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, (add GR64:$src1, i64immSExt8:$src2))]>;
// Register-Integer Addition with Overflow
def ADDOvf64ri32 : RIi32<0x81, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
"add{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, (X86add_ovf GR64:$src1, i64immSExt32:$src2)),
(implicit EFLAGS)]>;
def ADDOvf64ri8 : RIi8<0x83, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
"add{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, (X86add_ovf GR64:$src1, i64immSExt8:$src2)),
(implicit EFLAGS)]>;
[(set GR64:$dst, (add GR64:$src1, i64immSExt8:$src2)),
(implicit EFLAGS)]>;
} // isConvertibleToThreeAddress
// Register-Memory Addition
def ADD64rm : RI<0x03, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
"add{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, (add GR64:$src1, (load addr:$src2)))]>;
// Register-Memory Addition with Overflow
def ADDOvf64rm : RI<0x03, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
"add{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, (X86add_ovf GR64:$src1, (load addr:$src2))),
[(set GR64:$dst, (add GR64:$src1, (load addr:$src2))),
(implicit EFLAGS)]>;
} // isTwoAddress
// Memory-Register Addition
def ADD64mr : RI<0x01, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
"add{q}\t{$src2, $dst|$dst, $src2}",
[(store (add (load addr:$dst), GR64:$src2), addr:$dst)]>;
[(store (add (load addr:$dst), GR64:$src2), addr:$dst),
(implicit EFLAGS)]>;
def ADD64mi32 : RIi32<0x81, MRM0m, (outs), (ins i64mem:$dst, i64i32imm :$src2),
"add{q}\t{$src2, $dst|$dst, $src2}",
[(store (add (load addr:$dst), i64immSExt32:$src2), addr:$dst)]>;
[(store (add (load addr:$dst), i64immSExt32:$src2), addr:$dst),
(implicit EFLAGS)]>;
def ADD64mi8 : RIi8<0x83, MRM0m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
"add{q}\t{$src2, $dst|$dst, $src2}",
[(store (add (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
// Memory-Register Addition with Overflow
def ADDOvf64mr : RI<0x01, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
"add{q}\t{$src2, $dst|$dst, $src2}",
[(store (X86add_ovf (load addr:$dst), GR64:$src2),
addr:$dst),
(implicit EFLAGS)]>;
def ADDOvf64mi32 : RIi32<0x81, MRM0m, (outs),(ins i64mem:$dst, i64i32imm:$src2),
"add{q}\t{$src2, $dst|$dst, $src2}",
[(store (X86add_ovf (load addr:$dst),
i64immSExt32:$src2),
addr:$dst),
(implicit EFLAGS)]>;
def ADDOvf64mi8 : RIi8<0x83, MRM0m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
"add{q}\t{$src2, $dst|$dst, $src2}",
[(store (X86add_ovf (load addr:$dst), i64immSExt8:$src2),
addr:$dst),
(implicit EFLAGS)]>;
[(store (add (load addr:$dst), i64immSExt8:$src2), addr:$dst),
(implicit EFLAGS)]>;
let Uses = [EFLAGS] in {
let isTwoAddress = 1 in {
@ -417,83 +384,45 @@ let isTwoAddress = 1 in {
// Register-Register Subtraction
def SUB64rr : RI<0x29, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
"sub{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, (sub GR64:$src1, GR64:$src2))]>;
// Register-Register Subtraction with Overflow
def SUBOvf64rr : RI<0x29, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
"sub{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, (X86sub_ovf GR64:$src1, GR64:$src2)),
(implicit EFLAGS)]>;
[(set GR64:$dst, (sub GR64:$src1, GR64:$src2)),
(implicit EFLAGS)]>;
// Register-Memory Subtraction
def SUB64rm : RI<0x2B, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
"sub{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, (sub GR64:$src1, (load addr:$src2)))]>;
// Register-Memory Subtraction with Overflow
def SUBOvf64rm : RI<0x2B, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
"sub{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, (X86sub_ovf GR64:$src1, (load addr:$src2))),
(implicit EFLAGS)]>;
[(set GR64:$dst, (sub GR64:$src1, (load addr:$src2))),
(implicit EFLAGS)]>;
// Register-Integer Subtraction
def SUB64ri32 : RIi32<0x81, MRM5r, (outs GR64:$dst),
(ins GR64:$src1, i64i32imm:$src2),
"sub{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, (sub GR64:$src1, i64immSExt32:$src2))]>;
[(set GR64:$dst, (sub GR64:$src1, i64immSExt32:$src2)),
(implicit EFLAGS)]>;
def SUB64ri8 : RIi8<0x83, MRM5r, (outs GR64:$dst),
(ins GR64:$src1, i64i8imm:$src2),
"sub{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, (sub GR64:$src1, i64immSExt8:$src2))]>;
// Register-Integer Subtraction with Overflow
def SUBOvf64ri32 : RIi32<0x81, MRM5r, (outs GR64:$dst),
(ins GR64:$src1, i64i32imm:$src2),
"sub{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, (X86sub_ovf GR64:$src1,
i64immSExt32:$src2)),
(implicit EFLAGS)]>;
def SUBOvf64ri8 : RIi8<0x83, MRM5r, (outs GR64:$dst),
(ins GR64:$src1, i64i8imm:$src2),
"sub{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, (X86sub_ovf GR64:$src1,
i64immSExt8:$src2)),
(implicit EFLAGS)]>;
[(set GR64:$dst, (sub GR64:$src1, i64immSExt8:$src2)),
(implicit EFLAGS)]>;
} // isTwoAddress
// Memory-Register Subtraction
def SUB64mr : RI<0x29, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
"sub{q}\t{$src2, $dst|$dst, $src2}",
[(store (sub (load addr:$dst), GR64:$src2), addr:$dst)]>;
// Memory-Register Subtraction with Overflow
def SUBOvf64mr : RI<0x29, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
"sub{q}\t{$src2, $dst|$dst, $src2}",
[(store (X86sub_ovf (load addr:$dst), GR64:$src2),
addr:$dst),
(implicit EFLAGS)]>;
[(store (sub (load addr:$dst), GR64:$src2), addr:$dst),
(implicit EFLAGS)]>;
// Memory-Integer Subtraction
def SUB64mi32 : RIi32<0x81, MRM5m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
"sub{q}\t{$src2, $dst|$dst, $src2}",
[(store (sub (load addr:$dst), i64immSExt32:$src2),
addr:$dst)]>;
addr:$dst),
(implicit EFLAGS)]>;
def SUB64mi8 : RIi8<0x83, MRM5m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
"sub{q}\t{$src2, $dst|$dst, $src2}",
[(store (sub (load addr:$dst), i64immSExt8:$src2),
addr:$dst)]>;
// Memory-Integer Subtraction with Overflow
def SUBOvf64mi32 : RIi32<0x81, MRM5m, (outs), (ins i64mem:$dst,i64i32imm:$src2),
"sub{q}\t{$src2, $dst|$dst, $src2}",
[(store (X86sub_ovf (load addr:$dst),
i64immSExt32:$src2), addr:$dst),
(implicit EFLAGS)]>;
def SUBOvf64mi8 : RIi8<0x83, MRM5m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
"sub{q}\t{$src2, $dst|$dst, $src2}",
[(store (X86sub_ovf (load addr:$dst), i64immSExt8:$src2),
addr:$dst),
(implicit EFLAGS)]>;
addr:$dst),
(implicit EFLAGS)]>;
let Uses = [EFLAGS] in {
let isTwoAddress = 1 in {
@ -544,85 +473,48 @@ def IMUL64m : RI<0xF7, MRM5m, (outs), (ins i64mem:$src),
let Defs = [EFLAGS] in {
let isTwoAddress = 1 in {
let isCommutable = 1 in
// Register-Register Integer Multiplication
// Register-Register Signed Integer Multiplication
def IMUL64rr : RI<0xAF, MRMSrcReg, (outs GR64:$dst),
(ins GR64:$src1, GR64:$src2),
"imul{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, (mul GR64:$src1, GR64:$src2))]>, TB;
[(set GR64:$dst, (mul GR64:$src1, GR64:$src2)),
(implicit EFLAGS)]>, TB;
// Register-Register Multiplication with Overflow
def IMULOvf64rr : RI<0xAF, MRMSrcReg, (outs GR64:$dst),
(ins GR64:$src1, GR64:$src2),
"imul{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, (X86mul_ovf GR64:$src1, GR64:$src2)),
(implicit EFLAGS)]>, TB;
// Register-Memory Integer Multiplication
// Register-Memory Signed Integer Multiplication
def IMUL64rm : RI<0xAF, MRMSrcMem, (outs GR64:$dst),
(ins GR64:$src1, i64mem:$src2),
"imul{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, (mul GR64:$src1, (load addr:$src2)))]>, TB;
// Register-Memory Integer Multiplication with Overflow
def IMULOvf64rm : RI<0xAF, MRMSrcMem, (outs GR64:$dst),
(ins GR64:$src1, i64mem:$src2),
"imul{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, (X86mul_ovf GR64:$src1,
(load addr:$src2))),
(implicit EFLAGS)]>, TB;
[(set GR64:$dst, (mul GR64:$src1, (load addr:$src2))),
(implicit EFLAGS)]>, TB;
} // isTwoAddress
// Suprisingly enough, these are not two address instructions!
// Register-Integer Integer Multiplication
// Register-Integer Signed Integer Multiplication
def IMUL64rri32 : RIi32<0x69, MRMSrcReg, // GR64 = GR64*I32
(outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
"imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR64:$dst, (mul GR64:$src1, i64immSExt32:$src2))]>;
[(set GR64:$dst, (mul GR64:$src1, i64immSExt32:$src2)),
(implicit EFLAGS)]>;
def IMUL64rri8 : RIi8<0x6B, MRMSrcReg, // GR64 = GR64*I8
(outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
"imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR64:$dst, (mul GR64:$src1, i64immSExt8:$src2))]>;
[(set GR64:$dst, (mul GR64:$src1, i64immSExt8:$src2)),
(implicit EFLAGS)]>;
// Register-Integer Integer Multiplication with Overflow
def IMULOvf64rri32 : RIi32<0x69, MRMSrcReg, // GR64 = GR64*I32
(outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
"imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR64:$dst, (X86mul_ovf GR64:$src1,
i64immSExt32:$src2)),
(implicit EFLAGS)]>;
def IMULOvf64rri8 : RIi8<0x6B, MRMSrcReg, // GR64 = GR64*I8
(outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
"imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR64:$dst, (X86mul_ovf GR64:$src1,
i64immSExt8:$src2)),
(implicit EFLAGS)]>;
// Memory-Integer Integer Multiplication
// Memory-Integer Signed Integer Multiplication
def IMUL64rmi32 : RIi32<0x69, MRMSrcMem, // GR64 = [mem64]*I32
(outs GR64:$dst), (ins i64mem:$src1, i64i32imm:$src2),
"imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR64:$dst, (mul (load addr:$src1),
i64immSExt32:$src2))]>;
i64immSExt32:$src2)),
(implicit EFLAGS)]>;
def IMUL64rmi8 : RIi8<0x6B, MRMSrcMem, // GR64 = [mem64]*I8
(outs GR64:$dst), (ins i64mem:$src1, i64i8imm: $src2),
"imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR64:$dst, (mul (load addr:$src1),
i64immSExt8:$src2))]>;
// Memory-Integer Integer Multiplication with Overflow
def IMULOvf64rmi32 : RIi32<0x69, MRMSrcMem, // GR64 = [mem64]*I32
(outs GR64:$dst), (ins i64mem:$src1, i64i32imm:$src2),
"imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR64:$dst, (X86mul_ovf (load addr:$src1),
i64immSExt32:$src2)),
(implicit EFLAGS)]>;
def IMULOvf64rmi8 : RIi8<0x6B, MRMSrcMem, // GR64 = [mem64]*I8
(outs GR64:$dst), (ins i64mem:$src1, i64i8imm: $src2),
"imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR64:$dst, (X86mul_ovf (load addr:$src1),
i64immSExt8:$src2)),
(implicit EFLAGS)]>;
i64immSExt8:$src2)),
(implicit EFLAGS)]>;
} // Defs = [EFLAGS]
// Unsigned division / remainder
@ -1614,6 +1506,101 @@ def : Pat<(subc GR64:$src1, imm:$src2),
def : Pat<(subc GR64:$src1, i64immSExt8:$src2),
(SUB64ri8 GR64:$src1, i64immSExt8:$src2)>;
//===----------------------------------------------------------------------===//
// Overflow Patterns
//===----------------------------------------------------------------------===//
// Register-Register Addition with Overflow
def : Pat<(parallel (X86add_ovf GR64:$src1, GR64:$src2),
(implicit EFLAGS)),
(ADD64rr GR64:$src1, GR64:$src2)>;
// Register-Integer Addition with Overflow
def : Pat<(parallel (X86add_ovf GR64:$src1, i64immSExt32:$src2),
(implicit EFLAGS)),
(ADD64ri32 GR64:$src1, i64immSExt32:$src2)>;
def : Pat<(parallel (X86add_ovf GR64:$src1, i64immSExt8:$src2),
(implicit EFLAGS)),
(ADD64ri8 GR64:$src1, i64immSExt8:$src2)>;
// Register-Memory Addition with Overflow
def : Pat<(parallel (X86add_ovf GR64:$src1, (load addr:$src2)),
(implicit EFLAGS)),
(ADD64rm GR64:$src1, addr:$src2)>;
// Memory-Register Addition with Overflow
def : Pat<(parallel (store (X86add_ovf (load addr:$dst), GR64:$src2),
addr:$dst),
(implicit EFLAGS)),
(ADD64mr addr:$dst, GR64:$src2)>;
def : Pat<(parallel (store (X86add_ovf (load addr:$dst), i64immSExt32:$src2),
addr:$dst),
(implicit EFLAGS)),
(ADD64mi32 addr:$dst, i64immSExt32:$src2)>;
def : Pat<(parallel (store (X86add_ovf (load addr:$dst), i64immSExt8:$src2),
addr:$dst),
(implicit EFLAGS)),
(ADD64mi8 addr:$dst, i64immSExt8:$src2)>;
// Register-Register Subtraction with Overflow
def : Pat<(parallel (X86sub_ovf GR64:$src1, GR64:$src2),
(implicit EFLAGS)),
(SUB64rr GR64:$src1, GR64:$src2)>;
// Register-Memory Subtraction with Overflow
def : Pat<(parallel (X86sub_ovf GR64:$src1, (load addr:$src2)),
(implicit EFLAGS)),
(SUB64rm GR64:$src1, addr:$src2)>;
// Register-Integer Subtraction with Overflow
def : Pat<(parallel (X86sub_ovf GR64:$src1, i64immSExt32:$src2),
(implicit EFLAGS)),
(SUB64ri32 GR64:$src1, i64immSExt32:$src2)>;
def : Pat<(parallel (X86sub_ovf GR64:$src1, i64immSExt8:$src2),
(implicit EFLAGS)),
(SUB64ri8 GR64:$src1, i64immSExt8:$src2)>;
// Memory-Register Subtraction with Overflow
def : Pat<(parallel (store (X86sub_ovf (load addr:$dst), GR64:$src2),
addr:$dst),
(implicit EFLAGS)),
(SUB64mr addr:$dst, GR64:$src2)>;
// Memory-Integer Subtraction with Overflow
def : Pat<(parallel (store (X86sub_ovf (load addr:$dst), i64immSExt32:$src2),
addr:$dst),
(implicit EFLAGS)),
(SUB64mi32 addr:$dst, i64immSExt32:$src2)>;
def : Pat<(parallel (store (X86sub_ovf (load addr:$dst), i64immSExt8:$src2),
addr:$dst),
(implicit EFLAGS)),
(SUB64mi8 addr:$dst, i64immSExt8:$src2)>;
// Register-Register Signed Integer Multiplication with Overflow
def : Pat<(parallel (X86smul_ovf GR64:$src1, GR64:$src2),
(implicit EFLAGS)),
(IMUL64rr GR64:$src1, GR64:$src2)>;
// Register-Memory Signed Integer Multiplication with Overflow
def : Pat<(parallel (X86smul_ovf GR64:$src1, (load addr:$src2)),
(implicit EFLAGS)),
(IMUL64rm GR64:$src1, addr:$src2)>;
// Register-Integer Signed Integer Multiplication with Overflow
def : Pat<(parallel (X86smul_ovf GR64:$src1, i64immSExt32:$src2),
(implicit EFLAGS)),
(IMUL64rri32 GR64:$src1, i64immSExt32:$src2)>;
def : Pat<(parallel (X86smul_ovf GR64:$src1, i64immSExt8:$src2),
(implicit EFLAGS)),
(IMUL64rri8 GR64:$src1, i64immSExt8:$src2)>;
// Memory-Integer Signed Integer Multiplication with Overflow
def : Pat<(parallel (X86smul_ovf (load addr:$src1), i64immSExt32:$src2),
(implicit EFLAGS)),
(IMUL64rmi32 addr:$src1, i64immSExt32:$src2)>;
def : Pat<(parallel (X86smul_ovf (load addr:$src1), i64immSExt8:$src2),
(implicit EFLAGS)),
(IMUL64rmi8 addr:$src1, i64immSExt8:$src2)>;
//===----------------------------------------------------------------------===//
// X86-64 SSE Instructions

View File

@ -27,9 +27,11 @@ def SDTX86Cmov : SDTypeProfile<1, 4,
[SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2>,
SDTCisVT<3, i8>, SDTCisVT<4, i32>]>;
def SDTArithOvf : SDTypeProfile<1, 2,
[SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>,
SDTCisInt<0>]>;
def SDTUnaryArithOvf : SDTypeProfile<1, 1,
[SDTCisInt<0>]>;
def SDTBinaryArithOvf : SDTypeProfile<1, 2,
[SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>,
SDTCisInt<0>]>;
def SDTX86BrCond : SDTypeProfile<0, 3,
[SDTCisVT<0, OtherVT>,
@ -144,9 +146,10 @@ def X86ehret : SDNode<"X86ISD::EH_RETURN", SDT_X86EHRET,
def X86tcret : SDNode<"X86ISD::TC_RETURN", SDT_X86TCRET,
[SDNPHasChain, SDNPOptInFlag]>;
def X86add_ovf : SDNode<"X86ISD::ADD", SDTArithOvf>;
def X86sub_ovf : SDNode<"X86ISD::SUB", SDTArithOvf>;
def X86mul_ovf : SDNode<"X86ISD::MUL", SDTArithOvf>;
def X86add_ovf : SDNode<"X86ISD::ADD", SDTBinaryArithOvf>;
def X86sub_ovf : SDNode<"X86ISD::SUB", SDTBinaryArithOvf>;
def X86smul_ovf : SDNode<"X86ISD::SMUL", SDTBinaryArithOvf>;
def X86umul_ovf : SDNode<"X86ISD::UMUL", SDTUnaryArithOvf>;
//===----------------------------------------------------------------------===//
// X86 Operand Definitions.
@ -717,27 +720,38 @@ def MUL8r : I<0xF6, MRM4r, (outs), (ins GR8:$src), "mul{b}\t$src",
// FIXME: Used for 8-bit mul, ignore result upper 8 bits.
// This probably ought to be moved to a def : Pat<> if the
// syntax can be accepted.
[(set AL, (mul AL, GR8:$src))]>; // AL,AH = AL*GR8
[(set AL, (mul AL, GR8:$src)),
(implicit EFLAGS)]>; // AL,AH = AL*GR8
let Defs = [AX,DX,EFLAGS], Uses = [AX], neverHasSideEffects = 1 in
def MUL16r : I<0xF7, MRM4r, (outs), (ins GR16:$src), "mul{w}\t$src",
[]>, OpSize; // AX,DX = AX*GR16
def MUL16r : I<0xF7, MRM4r, (outs), (ins GR16:$src),
"mul{w}\t$src",
[]>, OpSize; // AX,DX = AX*GR16
let Defs = [EAX,EDX,EFLAGS], Uses = [EAX], neverHasSideEffects = 1 in
def MUL32r : I<0xF7, MRM4r, (outs), (ins GR32:$src), "mul{l}\t$src", []>;
// EAX,EDX = EAX*GR32
def MUL32r : I<0xF7, MRM4r, (outs), (ins GR32:$src),
"mul{l}\t$src",
[]>; // EAX,EDX = EAX*GR32
let Defs = [AL,AH,EFLAGS], Uses = [AL] in
def MUL8m : I<0xF6, MRM4m, (outs), (ins i8mem :$src),
"mul{b}\t$src",
// FIXME: Used for 8-bit mul, ignore result upper 8 bits.
// This probably ought to be moved to a def : Pat<> if the
// syntax can be accepted.
[(set AL, (mul AL, (loadi8 addr:$src)))]>; // AL,AH = AL*[mem8]
[(set AL, (mul AL, (loadi8 addr:$src))),
(implicit EFLAGS)]>; // AL,AH = AL*[mem8]
let mayLoad = 1, neverHasSideEffects = 1 in {
let Defs = [AX,DX,EFLAGS], Uses = [AX] in
def MUL16m : I<0xF7, MRM4m, (outs), (ins i16mem:$src),
"mul{w}\t$src", []>, OpSize; // AX,DX = AX*[mem16]
"mul{w}\t$src",
[]>, OpSize; // AX,DX = AX*[mem16]
let Defs = [EAX,EDX,EFLAGS], Uses = [EAX] in
def MUL32m : I<0xF7, MRM4m, (outs), (ins i32mem:$src),
"mul{l}\t$src", []>; // EAX,EDX = EAX*[mem32]
"mul{l}\t$src",
[]>; // EAX,EDX = EAX*[mem32]
}
let neverHasSideEffects = 1 in {
@ -1935,13 +1949,7 @@ let isCommutable = 1 in { // X = ADD Y, Z --> X = ADD Z, Y
def ADD8rr : I<0x00, MRMDestReg, (outs GR8 :$dst),
(ins GR8 :$src1, GR8 :$src2),
"add{b}\t{$src2, $dst|$dst, $src2}",
[(set GR8:$dst, (add GR8:$src1, GR8:$src2))]>;
// Register-Register Addition with Overflow
def ADDOvf8rr : I<0x00, MRMDestReg, (outs GR8 :$dst),
(ins GR8 :$src1, GR8 :$src2),
"add{b}\t{$src2, $dst|$dst, $src2}",
[(set GR8:$dst, (X86add_ovf GR8:$src1, GR8:$src2)),
[(set GR8:$dst, (add GR8:$src1, GR8:$src2)),
(implicit EFLAGS)]>;
let isConvertibleToThreeAddress = 1 in { // Can transform into LEA.
@ -1949,23 +1957,13 @@ let isConvertibleToThreeAddress = 1 in { // Can transform into LEA.
def ADD16rr : I<0x01, MRMDestReg, (outs GR16:$dst),
(ins GR16:$src1, GR16:$src2),
"add{w}\t{$src2, $dst|$dst, $src2}",
[(set GR16:$dst, (add GR16:$src1, GR16:$src2))]>, OpSize;
[(set GR16:$dst, (add GR16:$src1, GR16:$src2)),
(implicit EFLAGS)]>, OpSize;
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst),
(ins GR32:$src1, GR32:$src2),
"add{l}\t{$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
// Register-Register Addition with Overflow
def ADDOvf16rr : I<0x01, MRMDestReg, (outs GR16:$dst),
(ins GR16:$src1, GR16:$src2),
"add{w}\t{$src2, $dst|$dst, $src2}",
[(set GR16:$dst, (X86add_ovf GR16:$src1, GR16:$src2)),
(implicit EFLAGS)]>, OpSize;
def ADDOvf32rr : I<0x01, MRMDestReg, (outs GR32:$dst),
(ins GR32:$src1, GR32:$src2),
"add{l}\t{$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (X86add_ovf GR32:$src1, GR32:$src2)),
(implicit EFLAGS)]>;
[(set GR32:$dst, (add GR32:$src1, GR32:$src2)),
(implicit EFLAGS)]>;
} // end isConvertibleToThreeAddress
} // end isCommutable
@ -1973,160 +1971,85 @@ def ADDOvf32rr : I<0x01, MRMDestReg, (outs GR32:$dst),
def ADD8rm : I<0x02, MRMSrcMem, (outs GR8 :$dst),
(ins GR8 :$src1, i8mem :$src2),
"add{b}\t{$src2, $dst|$dst, $src2}",
[(set GR8:$dst, (add GR8:$src1, (load addr:$src2)))]>;
[(set GR8:$dst, (add GR8:$src1, (load addr:$src2))),
(implicit EFLAGS)]>;
def ADD16rm : I<0x03, MRMSrcMem, (outs GR16:$dst),
(ins GR16:$src1, i16mem:$src2),
"add{w}\t{$src2, $dst|$dst, $src2}",
[(set GR16:$dst, (add GR16:$src1, (load addr:$src2)))]>,OpSize;
[(set GR16:$dst, (add GR16:$src1, (load addr:$src2))),
(implicit EFLAGS)]>, OpSize;
def ADD32rm : I<0x03, MRMSrcMem, (outs GR32:$dst),
(ins GR32:$src1, i32mem:$src2),
"add{l}\t{$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, (load addr:$src2)))]>;
// Register-Memory Addition with Overflow
def ADDOvf8rm : I<0x02, MRMSrcMem, (outs GR8 :$dst),
(ins GR8 :$src1, i8mem :$src2),
"add{b}\t{$src2, $dst|$dst, $src2}",
[(set GR8:$dst, (X86add_ovf GR8:$src1, (load addr:$src2))),
(implicit EFLAGS)]>;
def ADDOvf16rm : I<0x03, MRMSrcMem, (outs GR16:$dst),
(ins GR16:$src1, i16mem:$src2),
"add{w}\t{$src2, $dst|$dst, $src2}",
[(set GR16:$dst, (X86add_ovf GR16:$src1, (load addr:$src2))),
(implicit EFLAGS)]>, OpSize;
def ADDOvf32rm : I<0x03, MRMSrcMem, (outs GR32:$dst),
(ins GR32:$src1, i32mem:$src2),
"add{l}\t{$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (X86add_ovf GR32:$src1, (load addr:$src2))),
(implicit EFLAGS)]>;
[(set GR32:$dst, (add GR32:$src1, (load addr:$src2))),
(implicit EFLAGS)]>;
// Register-Integer Addition
def ADD8ri : Ii8<0x80, MRM0r, (outs GR8:$dst), (ins GR8:$src1, i8imm:$src2),
"add{b}\t{$src2, $dst|$dst, $src2}",
[(set GR8:$dst, (add GR8:$src1, imm:$src2))]>;
// Register-Integer Addition with Overflow
def ADDOvf8ri : Ii8<0x80, MRM0r, (outs GR8:$dst), (ins GR8:$src1, i8imm:$src2),
"add{b}\t{$src2, $dst|$dst, $src2}",
[(set GR8:$dst, (X86add_ovf GR8:$src1, imm:$src2)),
(implicit EFLAGS)]>;
[(set GR8:$dst, (add GR8:$src1, imm:$src2)),
(implicit EFLAGS)]>;
let isConvertibleToThreeAddress = 1 in { // Can transform into LEA.
// Register-Integer Addition
def ADD16ri : Ii16<0x81, MRM0r, (outs GR16:$dst),
(ins GR16:$src1, i16imm:$src2),
"add{w}\t{$src2, $dst|$dst, $src2}",
[(set GR16:$dst, (add GR16:$src1, imm:$src2))]>, OpSize;
[(set GR16:$dst, (add GR16:$src1, imm:$src2)),
(implicit EFLAGS)]>, OpSize;
def ADD32ri : Ii32<0x81, MRM0r, (outs GR32:$dst),
(ins GR32:$src1, i32imm:$src2),
"add{l}\t{$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, imm:$src2))]>;
[(set GR32:$dst, (add GR32:$src1, imm:$src2)),
(implicit EFLAGS)]>;
def ADD16ri8 : Ii8<0x83, MRM0r, (outs GR16:$dst),
(ins GR16:$src1, i16i8imm:$src2),
"add{w}\t{$src2, $dst|$dst, $src2}",
[(set GR16:$dst, (add GR16:$src1, i16immSExt8:$src2))]>, OpSize;
[(set GR16:$dst, (add GR16:$src1, i16immSExt8:$src2)),
(implicit EFLAGS)]>, OpSize;
def ADD32ri8 : Ii8<0x83, MRM0r, (outs GR32:$dst),
(ins GR32:$src1, i32i8imm:$src2),
"add{l}\t{$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, i32immSExt8:$src2))]>;
// Register-Integer Addition with Overflow
def ADDOvf16ri : Ii16<0x81, MRM0r, (outs GR16:$dst),
(ins GR16:$src1, i16imm:$src2),
"add{w}\t{$src2, $dst|$dst, $src2}",
[(set GR16:$dst, (X86add_ovf GR16:$src1, imm:$src2)),
(implicit EFLAGS)]>, OpSize;
def ADDOvf32ri : Ii32<0x81, MRM0r, (outs GR32:$dst),
(ins GR32:$src1, i32imm:$src2),
"add{l}\t{$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (X86add_ovf GR32:$src1, imm:$src2)),
(implicit EFLAGS)]>;
def ADDOvf16ri8 : Ii8<0x83, MRM0r, (outs GR16:$dst),
(ins GR16:$src1, i16i8imm:$src2),
"add{w}\t{$src2, $dst|$dst, $src2}",
[(set GR16:$dst, (X86add_ovf GR16:$src1,
i16immSExt8:$src2)),
(implicit EFLAGS)]>, OpSize;
def ADDOvf32ri8 : Ii8<0x83, MRM0r, (outs GR32:$dst),
(ins GR32:$src1, i32i8imm:$src2),
"add{l}\t{$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (X86add_ovf GR32:$src1,
i32immSExt8:$src2)),
(implicit EFLAGS)]>;
[(set GR32:$dst, (add GR32:$src1, i32immSExt8:$src2)),
(implicit EFLAGS)]>;
}
let isTwoAddress = 0 in {
// Memory-Register Addition
def ADD8mr : I<0x00, MRMDestMem, (outs), (ins i8mem :$dst, GR8 :$src2),
def ADD8mr : I<0x00, MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src2),
"add{b}\t{$src2, $dst|$dst, $src2}",
[(store (add (load addr:$dst), GR8:$src2), addr:$dst)]>;
[(store (add (load addr:$dst), GR8:$src2), addr:$dst),
(implicit EFLAGS)]>;
def ADD16mr : I<0x01, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
"add{w}\t{$src2, $dst|$dst, $src2}",
[(store (add (load addr:$dst), GR16:$src2), addr:$dst)]>,
OpSize;
[(store (add (load addr:$dst), GR16:$src2), addr:$dst),
(implicit EFLAGS)]>, OpSize;
def ADD32mr : I<0x01, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
"add{l}\t{$src2, $dst|$dst, $src2}",
[(store (add (load addr:$dst), GR32:$src2), addr:$dst)]>;
[(store (add (load addr:$dst), GR32:$src2), addr:$dst),
(implicit EFLAGS)]>;
def ADD8mi : Ii8<0x80, MRM0m, (outs), (ins i8mem :$dst, i8imm :$src2),
"add{b}\t{$src2, $dst|$dst, $src2}",
[(store (add (loadi8 addr:$dst), imm:$src2), addr:$dst)]>;
[(store (add (loadi8 addr:$dst), imm:$src2), addr:$dst),
(implicit EFLAGS)]>;
def ADD16mi : Ii16<0x81, MRM0m, (outs), (ins i16mem:$dst, i16imm:$src2),
"add{w}\t{$src2, $dst|$dst, $src2}",
[(store (add (loadi16 addr:$dst), imm:$src2), addr:$dst)]>,
OpSize;
[(store (add (loadi16 addr:$dst), imm:$src2), addr:$dst),
(implicit EFLAGS)]>, OpSize;
def ADD32mi : Ii32<0x81, MRM0m, (outs), (ins i32mem:$dst, i32imm:$src2),
"add{l}\t{$src2, $dst|$dst, $src2}",
[(store (add (loadi32 addr:$dst), imm:$src2), addr:$dst)]>;
[(store (add (loadi32 addr:$dst), imm:$src2), addr:$dst),
(implicit EFLAGS)]>;
def ADD16mi8 : Ii8<0x83, MRM0m, (outs), (ins i16mem:$dst, i16i8imm :$src2),
"add{w}\t{$src2, $dst|$dst, $src2}",
[(store (add (load addr:$dst), i16immSExt8:$src2),
addr:$dst)]>, OpSize;
[(store (add (load addr:$dst), i16immSExt8:$src2),
addr:$dst),
(implicit EFLAGS)]>, OpSize;
def ADD32mi8 : Ii8<0x83, MRM0m, (outs), (ins i32mem:$dst, i32i8imm :$src2),
"add{l}\t{$src2, $dst|$dst, $src2}",
[(store (add (load addr:$dst), i32immSExt8:$src2),
addr:$dst)]>;
// Memory-Register Addition with Overflow
def ADDOvf8mr : I<0x00, MRMDestMem, (outs), (ins i8mem :$dst, GR8 :$src2),
"add{b}\t{$src2, $dst|$dst, $src2}",
[(store (X86add_ovf (load addr:$dst), GR8:$src2),
addr:$dst),
(implicit EFLAGS)]>;
def ADDOvf16mr : I<0x01, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
"add{w}\t{$src2, $dst|$dst, $src2}",
[(store (X86add_ovf (load addr:$dst), GR16:$src2),
addr:$dst),
(implicit EFLAGS)]>, OpSize;
def ADDOvf32mr : I<0x01, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
"add{l}\t{$src2, $dst|$dst, $src2}",
[(store (X86add_ovf (load addr:$dst), GR32:$src2),
addr:$dst),
(implicit EFLAGS)]>;
def ADDOvf8mi : Ii8<0x80, MRM0m, (outs), (ins i8mem :$dst, i8imm :$src2),
"add{b}\t{$src2, $dst|$dst, $src2}",
[(store (X86add_ovf (loadi8 addr:$dst), imm:$src2),
addr:$dst),
(implicit EFLAGS)]>;
def ADDOvf16mi : Ii16<0x81, MRM0m, (outs), (ins i16mem:$dst, i16imm:$src2),
"add{w}\t{$src2, $dst|$dst, $src2}",
[(store (X86add_ovf (loadi16 addr:$dst), imm:$src2),
addr:$dst),
(implicit EFLAGS)]>, OpSize;
def ADDOvf32mi : Ii32<0x81, MRM0m, (outs), (ins i32mem:$dst, i32imm:$src2),
"add{l}\t{$src2, $dst|$dst, $src2}",
[(store (X86add_ovf (loadi32 addr:$dst), imm:$src2),
addr:$dst),
(implicit EFLAGS)]>;
def ADDOvf16mi8 : Ii8<0x83, MRM0m, (outs), (ins i16mem:$dst, i16i8imm :$src2),
"add{w}\t{$src2, $dst|$dst, $src2}",
[(store (X86add_ovf (load addr:$dst),i16immSExt8:$src2),
addr:$dst),
(implicit EFLAGS)]>, OpSize;
def ADDOvf32mi8 : Ii8<0x83, MRM0m, (outs), (ins i32mem:$dst, i32i8imm :$src2),
"add{l}\t{$src2, $dst|$dst, $src2}",
[(store (X86add_ovf (load addr:$dst),i32immSExt8:$src2),
addr:$dst),
(implicit EFLAGS)]>;
addr:$dst),
(implicit EFLAGS)]>;
}
let Uses = [EFLAGS] in {
@ -2161,190 +2084,99 @@ let isTwoAddress = 0 in {
// Register-Register Subtraction
def SUB8rr : I<0x28, MRMDestReg, (outs GR8:$dst), (ins GR8:$src1, GR8:$src2),
"sub{b}\t{$src2, $dst|$dst, $src2}",
[(set GR8:$dst, (sub GR8:$src1, GR8:$src2))]>;
[(set GR8:$dst, (sub GR8:$src1, GR8:$src2)),
(implicit EFLAGS)]>;
def SUB16rr : I<0x29, MRMDestReg, (outs GR16:$dst), (ins GR16:$src1,GR16:$src2),
"sub{w}\t{$src2, $dst|$dst, $src2}",
[(set GR16:$dst, (sub GR16:$src1, GR16:$src2))]>, OpSize;
[(set GR16:$dst, (sub GR16:$src1, GR16:$src2)),
(implicit EFLAGS)]>, OpSize;
def SUB32rr : I<0x29, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1,GR32:$src2),
"sub{l}\t{$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (sub GR32:$src1, GR32:$src2))]>;
// Register-Register Subtraction with Overflow
def SUBOvf8rr : I<0x28, MRMDestReg, (outs GR8:$dst),
(ins GR8:$src1, GR8:$src2),
"sub{b}\t{$src2, $dst|$dst, $src2}",
[(set GR8:$dst, (X86sub_ovf GR8:$src1, GR8:$src2)),
(implicit EFLAGS)]>;
def SUBOvf16rr : I<0x29, MRMDestReg, (outs GR16:$dst),
(ins GR16:$src1, GR16:$src2),
"sub{w}\t{$src2, $dst|$dst, $src2}",
[(set GR16:$dst, (X86sub_ovf GR16:$src1, GR16:$src2)),
(implicit EFLAGS)]>, OpSize;
def SUBOvf32rr : I<0x29, MRMDestReg, (outs GR32:$dst),
(ins GR32:$src1, GR32:$src2),
"sub{l}\t{$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (X86sub_ovf GR32:$src1, GR32:$src2)),
(implicit EFLAGS)]>;
[(set GR32:$dst, (sub GR32:$src1, GR32:$src2)),
(implicit EFLAGS)]>;
// Register-Memory Subtraction
def SUB8rm : I<0x2A, MRMSrcMem, (outs GR8 :$dst),
(ins GR8 :$src1, i8mem :$src2),
"sub{b}\t{$src2, $dst|$dst, $src2}",
[(set GR8:$dst, (sub GR8:$src1, (load addr:$src2)))]>;
[(set GR8:$dst, (sub GR8:$src1, (load addr:$src2))),
(implicit EFLAGS)]>;
def SUB16rm : I<0x2B, MRMSrcMem, (outs GR16:$dst),
(ins GR16:$src1, i16mem:$src2),
"sub{w}\t{$src2, $dst|$dst, $src2}",
[(set GR16:$dst, (sub GR16:$src1, (load addr:$src2)))]>, OpSize;
[(set GR16:$dst, (sub GR16:$src1, (load addr:$src2))),
(implicit EFLAGS)]>, OpSize;
def SUB32rm : I<0x2B, MRMSrcMem, (outs GR32:$dst),
(ins GR32:$src1, i32mem:$src2),
"sub{l}\t{$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (sub GR32:$src1, (load addr:$src2)))]>;
// Register-Memory Subtraction with Overflow
def SUBOvf8rm : I<0x2A, MRMSrcMem, (outs GR8:$dst),
(ins GR8:$src1, i8mem:$src2),
"sub{b}\t{$src2, $dst|$dst, $src2}",
[(set GR8:$dst, (X86sub_ovf GR8:$src1, (load addr:$src2))),
(implicit EFLAGS)]>;
def SUBOvf16rm : I<0x2B, MRMSrcMem, (outs GR16:$dst),
(ins GR16:$src1, i16mem:$src2),
"sub{w}\t{$src2, $dst|$dst, $src2}",
[(set GR16:$dst, (X86sub_ovf GR16:$src1, (load addr:$src2))),
(implicit EFLAGS)]>, OpSize;
def SUBOvf32rm : I<0x2B, MRMSrcMem, (outs GR32:$dst),
(ins GR32:$src1, i32mem:$src2),
"sub{l}\t{$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (X86sub_ovf GR32:$src1, (load addr:$src2))),
(implicit EFLAGS)]>;
[(set GR32:$dst, (sub GR32:$src1, (load addr:$src2))),
(implicit EFLAGS)]>;
// Register-Integer Subtraction
def SUB8ri : Ii8 <0x80, MRM5r, (outs GR8:$dst),
(ins GR8:$src1, i8imm:$src2),
"sub{b}\t{$src2, $dst|$dst, $src2}",
[(set GR8:$dst, (sub GR8:$src1, imm:$src2))]>;
[(set GR8:$dst, (sub GR8:$src1, imm:$src2)),
(implicit EFLAGS)]>;
def SUB16ri : Ii16<0x81, MRM5r, (outs GR16:$dst),
(ins GR16:$src1, i16imm:$src2),
"sub{w}\t{$src2, $dst|$dst, $src2}",
[(set GR16:$dst, (sub GR16:$src1, imm:$src2))]>, OpSize;
[(set GR16:$dst, (sub GR16:$src1, imm:$src2)),
(implicit EFLAGS)]>, OpSize;
def SUB32ri : Ii32<0x81, MRM5r, (outs GR32:$dst),
(ins GR32:$src1, i32imm:$src2),
"sub{l}\t{$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (sub GR32:$src1, imm:$src2))]>;
[(set GR32:$dst, (sub GR32:$src1, imm:$src2)),
(implicit EFLAGS)]>;
def SUB16ri8 : Ii8<0x83, MRM5r, (outs GR16:$dst),
(ins GR16:$src1, i16i8imm:$src2),
"sub{w}\t{$src2, $dst|$dst, $src2}",
[(set GR16:$dst, (sub GR16:$src1, i16immSExt8:$src2))]>,
OpSize;
[(set GR16:$dst, (sub GR16:$src1, i16immSExt8:$src2)),
(implicit EFLAGS)]>, OpSize;
def SUB32ri8 : Ii8<0x83, MRM5r, (outs GR32:$dst),
(ins GR32:$src1, i32i8imm:$src2),
"sub{l}\t{$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (sub GR32:$src1, i32immSExt8:$src2))]>;
// Register-Integer Subtraction with Overflow
def SUBOvf8ri : Ii8 <0x80, MRM5r, (outs GR8:$dst),
(ins GR8:$src1, i8imm:$src2),
"sub{b}\t{$src2, $dst|$dst, $src2}",
[(set GR8:$dst, (X86sub_ovf GR8:$src1, imm:$src2)),
(implicit EFLAGS)]>;
def SUBOvf16ri : Ii16<0x81, MRM5r, (outs GR16:$dst),
(ins GR16:$src1, i16imm:$src2),
"sub{w}\t{$src2, $dst|$dst, $src2}",
[(set GR16:$dst, (X86sub_ovf GR16:$src1, imm:$src2)),
(implicit EFLAGS)]>, OpSize;
def SUBOvf32ri : Ii32<0x81, MRM5r, (outs GR32:$dst),
(ins GR32:$src1, i32imm:$src2),
"sub{l}\t{$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (X86sub_ovf GR32:$src1, imm:$src2)),
(implicit EFLAGS)]>;
def SUBOvf16ri8 : Ii8<0x83, MRM5r, (outs GR16:$dst),
(ins GR16:$src1, i16i8imm:$src2),
"sub{w}\t{$src2, $dst|$dst, $src2}",
[(set GR16:$dst, (X86sub_ovf GR16:$src1,
i16immSExt8:$src2)),
(implicit EFLAGS)]>, OpSize;
def SUBOvf32ri8 : Ii8<0x83, MRM5r, (outs GR32:$dst),
(ins GR32:$src1, i32i8imm:$src2),
"sub{l}\t{$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (X86sub_ovf GR32:$src1,
i32immSExt8:$src2)),
(implicit EFLAGS)]>;
[(set GR32:$dst, (sub GR32:$src1, i32immSExt8:$src2)),
(implicit EFLAGS)]>;
let isTwoAddress = 0 in {
// Memory-Register Subtraction
def SUB8mr : I<0x28, MRMDestMem, (outs), (ins i8mem :$dst, GR8 :$src2),
"sub{b}\t{$src2, $dst|$dst, $src2}",
[(store (sub (load addr:$dst), GR8:$src2), addr:$dst)]>;
[(store (sub (load addr:$dst), GR8:$src2), addr:$dst),
(implicit EFLAGS)]>;
def SUB16mr : I<0x29, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
"sub{w}\t{$src2, $dst|$dst, $src2}",
[(store (sub (load addr:$dst), GR16:$src2), addr:$dst)]>,
OpSize;
[(store (sub (load addr:$dst), GR16:$src2), addr:$dst),
(implicit EFLAGS)]>, OpSize;
def SUB32mr : I<0x29, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
"sub{l}\t{$src2, $dst|$dst, $src2}",
[(store (sub (load addr:$dst), GR32:$src2), addr:$dst)]>;
// Memory-Register Subtraction with Overflow
def SUBOvf8mr : I<0x28, MRMDestMem, (outs), (ins i8mem :$dst, GR8 :$src2),
"sub{b}\t{$src2, $dst|$dst, $src2}",
[(store (X86sub_ovf (load addr:$dst), GR8:$src2),
addr:$dst),
(implicit EFLAGS)]>;
def SUBOvf16mr : I<0x29, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
"sub{w}\t{$src2, $dst|$dst, $src2}",
[(store (X86sub_ovf (load addr:$dst), GR16:$src2),
addr:$dst),
(implicit EFLAGS)]>, OpSize;
def SUBOvf32mr : I<0x29, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
"sub{l}\t{$src2, $dst|$dst, $src2}",
[(store (X86sub_ovf (load addr:$dst), GR32:$src2),
addr:$dst),
(implicit EFLAGS)]>;
[(store (sub (load addr:$dst), GR32:$src2), addr:$dst),
(implicit EFLAGS)]>;
// Memory-Integer Subtraction
def SUB8mi : Ii8<0x80, MRM5m, (outs), (ins i8mem :$dst, i8imm:$src2),
"sub{b}\t{$src2, $dst|$dst, $src2}",
[(store (sub (loadi8 addr:$dst), imm:$src2), addr:$dst)]>;
[(store (sub (loadi8 addr:$dst), imm:$src2), addr:$dst),
(implicit EFLAGS)]>;
def SUB16mi : Ii16<0x81, MRM5m, (outs), (ins i16mem:$dst, i16imm:$src2),
"sub{w}\t{$src2, $dst|$dst, $src2}",
[(store (sub (loadi16 addr:$dst), imm:$src2),addr:$dst)]>,
OpSize;
[(store (sub (loadi16 addr:$dst), imm:$src2),addr:$dst),
(implicit EFLAGS)]>, OpSize;
def SUB32mi : Ii32<0x81, MRM5m, (outs), (ins i32mem:$dst, i32imm:$src2),
"sub{l}\t{$src2, $dst|$dst, $src2}",
[(store (sub (loadi32 addr:$dst), imm:$src2),addr:$dst)]>;
[(store (sub (loadi32 addr:$dst), imm:$src2),addr:$dst),
(implicit EFLAGS)]>;
def SUB16mi8 : Ii8<0x83, MRM5m, (outs), (ins i16mem:$dst, i16i8imm :$src2),
"sub{w}\t{$src2, $dst|$dst, $src2}",
[(store (sub (load addr:$dst), i16immSExt8:$src2),
addr:$dst)]>, OpSize;
addr:$dst),
(implicit EFLAGS)]>, OpSize;
def SUB32mi8 : Ii8<0x83, MRM5m, (outs), (ins i32mem:$dst, i32i8imm :$src2),
"sub{l}\t{$src2, $dst|$dst, $src2}",
[(store (sub (load addr:$dst), i32immSExt8:$src2),
addr:$dst)]>;
// Memory-Integer Subtraction with Overflow
def SUBOvf8mi : Ii8<0x80, MRM5m, (outs), (ins i8mem :$dst, i8imm:$src2),
"sub{b}\t{$src2, $dst|$dst, $src2}",
[(store (X86sub_ovf (loadi8 addr:$dst), imm:$src2),
addr:$dst),
(implicit EFLAGS)]>;
def SUBOvf16mi : Ii16<0x81, MRM5m, (outs), (ins i16mem:$dst, i16imm:$src2),
"sub{w}\t{$src2, $dst|$dst, $src2}",
[(store (X86sub_ovf (loadi16 addr:$dst), imm:$src2),
addr:$dst),
(implicit EFLAGS)]>, OpSize;
def SUBOvf32mi : Ii32<0x81, MRM5m, (outs), (ins i32mem:$dst, i32imm:$src2),
"sub{l}\t{$src2, $dst|$dst, $src2}",
[(store (X86sub_ovf (loadi32 addr:$dst), imm:$src2),
addr:$dst),
(implicit EFLAGS)]>;
def SUBOvf16mi8 : Ii8<0x83, MRM5m, (outs), (ins i16mem:$dst, i16i8imm :$src2),
"sub{w}\t{$src2, $dst|$dst, $src2}",
[(store (X86sub_ovf (load addr:$dst),i16immSExt8:$src2),
addr:$dst),
(implicit EFLAGS)]>, OpSize;
def SUBOvf32mi8 : Ii8<0x83, MRM5m, (outs), (ins i32mem:$dst, i32i8imm :$src2),
"sub{l}\t{$src2, $dst|$dst, $src2}",
[(store (X86sub_ovf (load addr:$dst),i32immSExt8:$src2),
addr:$dst),
(implicit EFLAGS)]>;
addr:$dst),
(implicit EFLAGS)]>;
}
let Uses = [EFLAGS] in {
@ -2380,143 +2212,77 @@ def SBB32ri8 : Ii8<0x83, MRM3r, (outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src
let Defs = [EFLAGS] in {
let isCommutable = 1 in { // X = IMUL Y, Z --> X = IMUL Z, Y
// Register-Register Integer Multiply
// Register-Register Signed Integer Multiply
def IMUL16rr : I<0xAF, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src1,GR16:$src2),
"imul{w}\t{$src2, $dst|$dst, $src2}",
[(set GR16:$dst, (mul GR16:$src1, GR16:$src2))]>, TB, OpSize;
[(set GR16:$dst, (mul GR16:$src1, GR16:$src2)),
(implicit EFLAGS)]>, TB, OpSize;
def IMUL32rr : I<0xAF, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src1,GR32:$src2),
"imul{l}\t{$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (mul GR32:$src1, GR32:$src2))]>, TB;
// Register-Register Integer Multiply
def IMULOvf16rr : I<0xAF, MRMSrcReg, (outs GR16:$dst),
(ins GR16:$src1, GR16:$src2),
"imul{w}\t{$src2, $dst|$dst, $src2}",
[(set GR16:$dst, (X86mul_ovf GR16:$src1, GR16:$src2)),
(implicit EFLAGS)]>, TB, OpSize;
def IMULOvf32rr : I<0xAF, MRMSrcReg, (outs GR32:$dst),
(ins GR32:$src1, GR32:$src2),
"imul{l}\t{$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (X86mul_ovf GR32:$src1, GR32:$src2)),
(implicit EFLAGS)]>, TB;
[(set GR32:$dst, (mul GR32:$src1, GR32:$src2)),
(implicit EFLAGS)]>, TB;
}
// Register-Memory Integer Multiply
// Register-Memory Signed Integer Multiply
def IMUL16rm : I<0xAF, MRMSrcMem, (outs GR16:$dst),
(ins GR16:$src1, i16mem:$src2),
"imul{w}\t{$src2, $dst|$dst, $src2}",
[(set GR16:$dst, (mul GR16:$src1, (load addr:$src2)))]>,
TB, OpSize;
[(set GR16:$dst, (mul GR16:$src1, (load addr:$src2))),
(implicit EFLAGS)]>, TB, OpSize;
def IMUL32rm : I<0xAF, MRMSrcMem, (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
"imul{l}\t{$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (mul GR32:$src1, (load addr:$src2)))]>, TB;
// Register-Memory Integer Multiply with Overflow
def IMULOvf16rm : I<0xAF, MRMSrcMem, (outs GR16:$dst),
(ins GR16:$src1, i16mem:$src2),
"imul{w}\t{$src2, $dst|$dst, $src2}",
[(set GR16:$dst, (X86mul_ovf GR16:$src1,(load addr:$src2))),
(implicit EFLAGS)]>,
TB, OpSize;
def IMULOvf32rm : I<0xAF, MRMSrcMem, (outs GR32:$dst),
(ins GR32:$src1, i32mem:$src2),
"imul{l}\t{$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (X86mul_ovf GR32:$src1,(load addr:$src2))),
(implicit EFLAGS)]>, TB;
[(set GR32:$dst, (mul GR32:$src1, (load addr:$src2))),
(implicit EFLAGS)]>, TB;
} // Defs = [EFLAGS]
} // end Two Address instructions
// Suprisingly enough, these are not two address instructions!
let Defs = [EFLAGS] in {
// Register-Integer Integer Multiply
// Register-Integer Signed Integer Multiply
def IMUL16rri : Ii16<0x69, MRMSrcReg, // GR16 = GR16*I16
(outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
"imul{w}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR16:$dst, (mul GR16:$src1, imm:$src2))]>, OpSize;
[(set GR16:$dst, (mul GR16:$src1, imm:$src2)),
(implicit EFLAGS)]>, OpSize;
def IMUL32rri : Ii32<0x69, MRMSrcReg, // GR32 = GR32*I32
(outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
"imul{l}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR32:$dst, (mul GR32:$src1, imm:$src2))]>;
[(set GR32:$dst, (mul GR32:$src1, imm:$src2)),
(implicit EFLAGS)]>;
def IMUL16rri8 : Ii8<0x6B, MRMSrcReg, // GR16 = GR16*I8
(outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2),
"imul{w}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR16:$dst, (mul GR16:$src1, i16immSExt8:$src2))]>,
OpSize;
[(set GR16:$dst, (mul GR16:$src1, i16immSExt8:$src2)),
(implicit EFLAGS)]>, OpSize;
def IMUL32rri8 : Ii8<0x6B, MRMSrcReg, // GR32 = GR32*I8
(outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2),
"imul{l}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR32:$dst, (mul GR32:$src1, i32immSExt8:$src2))]>;
[(set GR32:$dst, (mul GR32:$src1, i32immSExt8:$src2)),
(implicit EFLAGS)]>;
// Register-Integer Integer Multiply with Overflow
def IMULOvf16rri : Ii16<0x69, MRMSrcReg, // GR16 = GR16*I16
(outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
"imul{w}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR16:$dst, (X86mul_ovf GR16:$src1, imm:$src2)),
(implicit EFLAGS)]>, OpSize;
def IMULOvf32rri : Ii32<0x69, MRMSrcReg, // GR32 = GR32*I32
(outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
"imul{l}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR32:$dst, (X86mul_ovf GR32:$src1, imm:$src2)),
(implicit EFLAGS)]>;
def IMULOvf16rri8 : Ii8<0x6B, MRMSrcReg, // GR16 = GR16*I8
(outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2),
"imul{w}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR16:$dst, (X86mul_ovf GR16:$src1,
i16immSExt8:$src2)),
(implicit EFLAGS)]>, OpSize;
def IMULOvf32rri8 : Ii8<0x6B, MRMSrcReg, // GR32 = GR32*I8
(outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2),
"imul{l}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR32:$dst, (X86mul_ovf GR32:$src1,
i32immSExt8:$src2)),
(implicit EFLAGS)]>;
// Memory-Integer Integer Multiply
// Memory-Integer Signed Integer Multiply
def IMUL16rmi : Ii16<0x69, MRMSrcMem, // GR16 = [mem16]*I16
(outs GR16:$dst), (ins i16mem:$src1, i16imm:$src2),
"imul{w}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR16:$dst, (mul (load addr:$src1), imm:$src2))]>,
OpSize;
[(set GR16:$dst, (mul (load addr:$src1), imm:$src2)),
(implicit EFLAGS)]>, OpSize;
def IMUL32rmi : Ii32<0x69, MRMSrcMem, // GR32 = [mem32]*I32
(outs GR32:$dst), (ins i32mem:$src1, i32imm:$src2),
"imul{l}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR32:$dst, (mul (load addr:$src1), imm:$src2))]>;
[(set GR32:$dst, (mul (load addr:$src1), imm:$src2)),
(implicit EFLAGS)]>;
def IMUL16rmi8 : Ii8<0x6B, MRMSrcMem, // GR16 = [mem16]*I8
(outs GR16:$dst), (ins i16mem:$src1, i16i8imm :$src2),
"imul{w}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR16:$dst, (mul (load addr:$src1),
i16immSExt8:$src2))]>, OpSize;
i16immSExt8:$src2)),
(implicit EFLAGS)]>, OpSize;
def IMUL32rmi8 : Ii8<0x6B, MRMSrcMem, // GR32 = [mem32]*I8
(outs GR32:$dst), (ins i32mem:$src1, i32i8imm: $src2),
"imul{l}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR32:$dst, (mul (load addr:$src1),
i32immSExt8:$src2))]>;
// Memory-Integer Integer Multiply with Overflow
def IMULOvf16rmi : Ii16<0x69, MRMSrcMem, // GR16 = [mem16]*I16
(outs GR16:$dst), (ins i16mem:$src1, i16imm:$src2),
"imul{w}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR16:$dst, (X86mul_ovf (load addr:$src1),
imm:$src2)),
(implicit EFLAGS)]>, OpSize;
def IMULOvf32rmi : Ii32<0x69, MRMSrcMem, // GR32 = [mem32]*I32
(outs GR32:$dst), (ins i32mem:$src1, i32imm:$src2),
"imul{l}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR32:$dst, (X86mul_ovf (load addr:$src1),
imm:$src2)),
(implicit EFLAGS)]>;
def IMULOvf16rmi8 : Ii8<0x6B, MRMSrcMem, // GR16 = [mem16]*I8
(outs GR16:$dst), (ins i16mem:$src1, i16i8imm :$src2),
"imul{w}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR16:$dst, (X86mul_ovf (load addr:$src1),
i16immSExt8:$src2)),
(implicit EFLAGS)]>, OpSize;
def IMULOvf32rmi8 : Ii8<0x6B, MRMSrcMem, // GR32 = [mem32]*I8
(outs GR32:$dst), (ins i32mem:$src1, i32i8imm: $src2),
"imul{l}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR32:$dst, (X86mul_ovf (load addr:$src1),
i32immSExt8:$src2)),
(implicit EFLAGS)]>;
i32immSExt8:$src2)),
(implicit EFLAGS)]>;
} // Defs = [EFLAGS]
//===----------------------------------------------------------------------===//
@ -3493,6 +3259,207 @@ def : Pat<(store (shld (loadi16 addr:$dst), (i8 imm:$amt1),
GR16:$src2, (i8 imm:$amt2)), addr:$dst),
(SHLD16mri8 addr:$dst, GR16:$src2, (i8 imm:$amt1))>;
//===----------------------------------------------------------------------===//
// Overflow Patterns
//===----------------------------------------------------------------------===//
// Register-Register Addition with Overflow
def : Pat<(parallel (X86add_ovf GR8:$src1, GR8:$src2),
(implicit EFLAGS)),
(ADD8rr GR8:$src1, GR8:$src2)>;
// Register-Register Addition with Overflow
def : Pat<(parallel (X86add_ovf GR16:$src1, GR16:$src2),
(implicit EFLAGS)),
(ADD16rr GR16:$src1, GR16:$src2)>;
def : Pat<(parallel (X86add_ovf GR32:$src1, GR32:$src2),
(implicit EFLAGS)),
(ADD32rr GR32:$src1, GR32:$src2)>;
// Register-Memory Addition with Overflow
def : Pat<(parallel (X86add_ovf GR8:$src1, (load addr:$src2)),
(implicit EFLAGS)),
(ADD8rm GR8:$src1, addr:$src2)>;
def : Pat<(parallel (X86add_ovf GR16:$src1, (load addr:$src2)),
(implicit EFLAGS)),
(ADD16rm GR16:$src1, addr:$src2)>;
def : Pat<(parallel (X86add_ovf GR32:$src1, (load addr:$src2)),
(implicit EFLAGS)),
(ADD32rm GR32:$src1, addr:$src2)>;
// Register-Integer Addition with Overflow
def : Pat<(parallel (X86add_ovf GR8:$src1, imm:$src2),
(implicit EFLAGS)),
(ADD8ri GR8:$src1, imm:$src2)>;
// Register-Integer Addition with Overflow
def : Pat<(parallel (X86add_ovf GR16:$src1, imm:$src2),
(implicit EFLAGS)),
(ADD16ri GR16:$src1, imm:$src2)>;
def : Pat<(parallel (X86add_ovf GR32:$src1, imm:$src2),
(implicit EFLAGS)),
(ADD32ri GR32:$src1, imm:$src2)>;
def : Pat<(parallel (X86add_ovf GR16:$src1, i16immSExt8:$src2),
(implicit EFLAGS)),
(ADD16ri8 GR16:$src1, i16immSExt8:$src2)>;
def : Pat<(parallel (X86add_ovf GR32:$src1, i32immSExt8:$src2),
(implicit EFLAGS)),
(ADD32ri8 GR32:$src1, i32immSExt8:$src2)>;
// Memory-Register Addition with Overflow
def : Pat<(parallel (store (X86add_ovf (load addr:$dst), GR8:$src2),
addr:$dst),
(implicit EFLAGS)),
(ADD8mr addr:$dst, GR8:$src2)>;
def : Pat<(parallel (store (X86add_ovf (load addr:$dst), GR16:$src2),
addr:$dst),
(implicit EFLAGS)),
(ADD16mr addr:$dst, GR16:$src2)>;
def : Pat<(parallel (store (X86add_ovf (load addr:$dst), GR32:$src2),
addr:$dst),
(implicit EFLAGS)),
(ADD32mr addr:$dst, GR32:$src2)>;
def : Pat<(parallel (store (X86add_ovf (loadi8 addr:$dst), imm:$src2),
addr:$dst),
(implicit EFLAGS)),
(ADD8mi addr:$dst, imm:$src2)>;
def : Pat<(parallel (store (X86add_ovf (loadi16 addr:$dst), imm:$src2),
addr:$dst),
(implicit EFLAGS)),
(ADD16mi addr:$dst, imm:$src2)>;
def : Pat<(parallel (store (X86add_ovf (loadi32 addr:$dst), imm:$src2),
addr:$dst),
(implicit EFLAGS)),
(ADD32mi addr:$dst, imm:$src2)>;
def : Pat<(parallel (store (X86add_ovf (load addr:$dst), i16immSExt8:$src2),
addr:$dst),
(implicit EFLAGS)),
(ADD16mi8 addr:$dst, i16immSExt8:$src2)>;
def : Pat<(parallel (store (X86add_ovf (load addr:$dst), i32immSExt8:$src2),
addr:$dst),
(implicit EFLAGS)),
(ADD32mi8 addr:$dst, i32immSExt8:$src2)>;
// Register-Register Subtraction with Overflow
def : Pat<(parallel (X86sub_ovf GR8:$src1, GR8:$src2),
(implicit EFLAGS)),
(SUB8rr GR8:$src1, GR8:$src2)>;
def : Pat<(parallel (X86sub_ovf GR16:$src1, GR16:$src2),
(implicit EFLAGS)),
(SUB16rr GR16:$src1, GR16:$src2)>;
def : Pat<(parallel (X86sub_ovf GR32:$src1, GR32:$src2),
(implicit EFLAGS)),
(SUB32rr GR32:$src1, GR32:$src2)>;
// Register-Memory Subtraction with Overflow
def : Pat<(parallel (X86sub_ovf GR8:$src1, (load addr:$src2)),
(implicit EFLAGS)),
(SUB8rm GR8:$src1, addr:$src2)>;
def : Pat<(parallel (X86sub_ovf GR16:$src1, (load addr:$src2)),
(implicit EFLAGS)),
(SUB16rm GR16:$src1, addr:$src2)>;
def : Pat<(parallel (X86sub_ovf GR32:$src1, (load addr:$src2)),
(implicit EFLAGS)),
(SUB32rm GR32:$src1, addr:$src2)>;
// Register-Integer Subtraction with Overflow
def : Pat<(parallel (X86sub_ovf GR8:$src1, imm:$src2),
(implicit EFLAGS)),
(SUB8ri GR8:$src1, imm:$src2)>;
def : Pat<(parallel (X86sub_ovf GR16:$src1, imm:$src2),
(implicit EFLAGS)),
(SUB16ri GR16:$src1, imm:$src2)>;
def : Pat<(parallel (X86sub_ovf GR32:$src1, imm:$src2),
(implicit EFLAGS)),
(SUB32ri GR32:$src1, imm:$src2)>;
def : Pat<(parallel (X86sub_ovf GR16:$src1, i16immSExt8:$src2),
(implicit EFLAGS)),
(SUB16ri8 GR16:$src1, i16immSExt8:$src2)>;
def : Pat<(parallel (X86sub_ovf GR32:$src1, i32immSExt8:$src2),
(implicit EFLAGS)),
(SUB32ri8 GR32:$src1, i32immSExt8:$src2)>;
// Memory-Register Subtraction with Overflow
def : Pat<(parallel (store (X86sub_ovf (load addr:$dst), GR8:$src2),
addr:$dst),
(implicit EFLAGS)),
(SUB8mr addr:$dst, GR8:$src2)>;
def : Pat<(parallel (store (X86sub_ovf (load addr:$dst), GR16:$src2),
addr:$dst),
(implicit EFLAGS)),
(SUB16mr addr:$dst, GR16:$src2)>;
def : Pat<(parallel (store (X86sub_ovf (load addr:$dst), GR32:$src2),
addr:$dst),
(implicit EFLAGS)),
(SUB32mr addr:$dst, GR32:$src2)>;
// Memory-Integer Subtraction with Overflow
def : Pat<(parallel (store (X86sub_ovf (loadi8 addr:$dst), imm:$src2),
addr:$dst),
(implicit EFLAGS)),
(SUB8mi addr:$dst, imm:$src2)>;
def : Pat<(parallel (store (X86sub_ovf (loadi16 addr:$dst), imm:$src2),
addr:$dst),
(implicit EFLAGS)),
(SUB16mi addr:$dst, imm:$src2)>;
def : Pat<(parallel (store (X86sub_ovf (loadi32 addr:$dst), imm:$src2),
addr:$dst),
(implicit EFLAGS)),
(SUB32mi addr:$dst, imm:$src2)>;
def : Pat<(parallel (store (X86sub_ovf (load addr:$dst), i16immSExt8:$src2),
addr:$dst),
(implicit EFLAGS)),
(SUB16mi8 addr:$dst, i16immSExt8:$src2)>;
def : Pat<(parallel (store (X86sub_ovf (load addr:$dst), i32immSExt8:$src2),
addr:$dst),
(implicit EFLAGS)),
(SUB32mi8 addr:$dst, i32immSExt8:$src2)>;
// Register-Register Signed Integer Multiply with Overflow
def : Pat<(parallel (X86smul_ovf GR16:$src1, GR16:$src2),
(implicit EFLAGS)),
(IMUL16rr GR16:$src1, GR16:$src2)>;
def : Pat<(parallel (X86smul_ovf GR32:$src1, GR32:$src2),
(implicit EFLAGS)),
(IMUL32rr GR32:$src1, GR32:$src2)>;
// Register-Memory Signed Integer Multiply with Overflow
def : Pat<(parallel (X86smul_ovf GR16:$src1, (load addr:$src2)),
(implicit EFLAGS)),
(IMUL16rm GR16:$src1, addr:$src2)>;
def : Pat<(parallel (X86smul_ovf GR32:$src1, (load addr:$src2)),
(implicit EFLAGS)),
(IMUL32rm GR32:$src1, addr:$src2)>;
// Register-Integer Signed Integer Multiply with Overflow
def : Pat<(parallel (X86smul_ovf GR16:$src1, imm:$src2),
(implicit EFLAGS)),
(IMUL16rri GR16:$src1, imm:$src2)>;
def : Pat<(parallel (X86smul_ovf GR32:$src1, imm:$src2),
(implicit EFLAGS)),
(IMUL32rri GR32:$src1, imm:$src2)>;
def : Pat<(parallel (X86smul_ovf GR16:$src1, i16immSExt8:$src2),
(implicit EFLAGS)),
(IMUL16rri8 GR16:$src1, i16immSExt8:$src2)>;
def : Pat<(parallel (X86smul_ovf GR32:$src1, i32immSExt8:$src2),
(implicit EFLAGS)),
(IMUL32rri8 GR32:$src1, i32immSExt8:$src2)>;
// Memory-Integer Signed Integer Multiply with Overflow
def : Pat<(parallel (X86smul_ovf (load addr:$src1), imm:$src2),
(implicit EFLAGS)),
(IMUL16rmi addr:$src1, imm:$src2)>;
def : Pat<(parallel (X86smul_ovf (load addr:$src1), imm:$src2),
(implicit EFLAGS)),
(IMUL32rmi addr:$src1, imm:$src2)>;
def : Pat<(parallel (X86smul_ovf (load addr:$src1), i16immSExt8:$src2),
(implicit EFLAGS)),
(IMUL16rmi8 addr:$src1, i16immSExt8:$src2)>;
def : Pat<(parallel (X86smul_ovf (load addr:$src1), i32immSExt8:$src2),
(implicit EFLAGS)),
(IMUL32rmi8 addr:$src1, i32immSExt8:$src2)>;
//===----------------------------------------------------------------------===//
// Floating Point Stack Support
//===----------------------------------------------------------------------===//

View File

@ -1,5 +1,4 @@
; RUN: llvm-as < %s | llc -march=x86 | grep {jo} | count 1
; RUN: llvm-as < %s | llc -march=x86 | grep {jc} | count 1
@ok = internal constant [4 x i8] c"%d\0A\00"
@no = internal constant [4 x i8] c"no\0A\00"
@ -20,22 +19,5 @@ overflow:
ret i1 false
}
define i1 @func2(i32 %v1, i32 %v2) nounwind {
entry:
%t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2)
%sum = extractvalue {i32, i1} %t, 0
%obit = extractvalue {i32, i1} %t, 1
br i1 %obit, label %carry, label %normal
normal:
%t1 = tail call i32 (i8*, ...)* @printf( i8* getelementptr ([4 x i8]* @ok, i32 0, i32 0), i32 %sum ) nounwind
ret i1 true
carry:
%t2 = tail call i32 (i8*, ...)* @printf( i8* getelementptr ([4 x i8]* @no, i32 0, i32 0) ) nounwind
ret i1 false
}
declare i32 @printf(i8*, ...) nounwind
declare {i32, i1} @llvm.smul.with.overflow.i32(i32, i32)
declare {i32, i1} @llvm.umul.with.overflow.i32(i32, i32)

View File

@ -0,0 +1,26 @@
; RUN: llvm-as < %s | llc -march=x86 | grep {jc} | count 1
; XFAIL: *
; FIXME: umul-with-overflow not supported yet.
@ok = internal constant [4 x i8] c"%d\0A\00"
@no = internal constant [4 x i8] c"no\0A\00"
define i1 @func(i32 %v1, i32 %v2) nounwind {
entry:
%t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2)
%sum = extractvalue {i32, i1} %t, 0
%obit = extractvalue {i32, i1} %t, 1
br i1 %obit, label %carry, label %normal
normal:
%t1 = tail call i32 (i8*, ...)* @printf( i8* getelementptr ([4 x i8]* @ok, i32 0, i32 0), i32 %sum ) nounwind
ret i1 true
carry:
%t2 = tail call i32 (i8*, ...)* @printf( i8* getelementptr ([4 x i8]* @no, i32 0, i32 0) ) nounwind
ret i1 false
}
declare i32 @printf(i8*, ...) nounwind
declare {i32, i1} @llvm.umul.with.overflow.i32(i32, i32)