Fix a few issues in ArithBinOpEFLAGS that made it specific to and.

Start using ArithBinOpEFLAGS for OR, XOR, and SUB.

This removes 500 lines from the .td file.  Now AND/OR/XOR/SUB are all
defined exactly the same way instead of being close relatives.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@115896 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Chris Lattner 2010-10-07 01:26:27 +00:00
parent 4b181c87de
commit 521ff704a1

View File

@ -668,7 +668,7 @@ class BinOpMI<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
: ITy<opcode, f, typeinfo,
(outs), (ins typeinfo.MemOperand:$dst, typeinfo.ImmOperand:$src),
mnemonic, "{$src, $dst|$dst, $src}",
[(store (opnode (typeinfo.LoadNode addr:$dst),
[(store (opnode (typeinfo.VT (load addr:$dst)),
typeinfo.ImmOperator:$src), addr:$dst),
(implicit EFLAGS)]> {
let ImmT = typeinfo.ImmEncoding;
@ -738,19 +738,19 @@ multiclass ArithBinOpEFLAGS<bits<8> BaseOpc, bits<8> BaseOpc2, bits<8> BaseOpc4,
def #NAME#64ri8 : BinOpRI8<0x82, mnemonic, Xi64, opnodeflag, RegMRM>;
} // Constraints = "$src1 = $dst"
def #NAME#8mr : BinOpMR<BaseOpc, mnemonic, Xi8 , and>;
def #NAME#16mr : BinOpMR<BaseOpc, mnemonic, Xi16, and>;
def #NAME#32mr : BinOpMR<BaseOpc, mnemonic, Xi32, and>;
def #NAME#64mr : BinOpMR<BaseOpc, mnemonic, Xi64, and>;
def #NAME#8mr : BinOpMR<BaseOpc, mnemonic, Xi8 , opnode>;
def #NAME#16mr : BinOpMR<BaseOpc, mnemonic, Xi16, opnode>;
def #NAME#32mr : BinOpMR<BaseOpc, mnemonic, Xi32, opnode>;
def #NAME#64mr : BinOpMR<BaseOpc, mnemonic, Xi64, opnode>;
def #NAME#8mi : BinOpMI<0x80, mnemonic, Xi8 , and, MemMRM>;
def #NAME#16mi : BinOpMI<0x80, mnemonic, Xi16, and, MemMRM>;
def #NAME#32mi : BinOpMI<0x80, mnemonic, Xi32, and, MemMRM>;
def #NAME#64mi32 : BinOpMI<0x80, mnemonic, Xi64, and, MemMRM>;
def #NAME#8mi : BinOpMI<0x80, mnemonic, Xi8 , opnode, MemMRM>;
def #NAME#16mi : BinOpMI<0x80, mnemonic, Xi16, opnode, MemMRM>;
def #NAME#32mi : BinOpMI<0x80, mnemonic, Xi32, opnode, MemMRM>;
def #NAME#64mi32 : BinOpMI<0x80, mnemonic, Xi64, opnode, MemMRM>;
def #NAME#16mi8 : BinOpMI8<0x82, mnemonic, Xi16, and, MemMRM>;
def #NAME#32mi8 : BinOpMI8<0x82, mnemonic, Xi32, and, MemMRM>;
def #NAME#64mi8 : BinOpMI8<0x82, mnemonic, Xi64, and, MemMRM>;
def #NAME#16mi8 : BinOpMI8<0x82, mnemonic, Xi16, opnode, MemMRM>;
def #NAME#32mi8 : BinOpMI8<0x82, mnemonic, Xi32, opnode, MemMRM>;
def #NAME#64mi8 : BinOpMI8<0x82, mnemonic, Xi64, opnode, MemMRM>;
def #NAME#8i8 : BinOpAI<BaseOpc4, mnemonic, Xi8 , AL>;
def #NAME#16i16 : BinOpAI<BaseOpc4, mnemonic, Xi16, AX>;
@ -761,339 +761,10 @@ multiclass ArithBinOpEFLAGS<bits<8> BaseOpc, bits<8> BaseOpc2, bits<8> BaseOpc4,
defm AND : ArithBinOpEFLAGS<0x20, 0x22, 0x24, "and", MRM4r, MRM4m,
X86and_flag, and, 1>;
// Logical operators.
let Defs = [EFLAGS] in {
let Constraints = "$src1 = $dst" in {
let isCommutable = 1 in { // X = OR Y, Z --> X = OR Z, Y
def OR8rr : I<0x08, MRMDestReg, (outs GR8 :$dst),
(ins GR8 :$src1, GR8 :$src2),
"or{b}\t{$src2, $dst|$dst, $src2}",
[(set GR8:$dst, EFLAGS, (X86or_flag GR8:$src1, GR8:$src2))]>;
def OR16rr : I<0x09, MRMDestReg, (outs GR16:$dst),
(ins GR16:$src1, GR16:$src2),
"or{w}\t{$src2, $dst|$dst, $src2}",
[(set GR16:$dst, EFLAGS, (X86or_flag GR16:$src1,GR16:$src2))]>,
OpSize;
def OR32rr : I<0x09, MRMDestReg, (outs GR32:$dst),
(ins GR32:$src1, GR32:$src2),
"or{l}\t{$src2, $dst|$dst, $src2}",
[(set GR32:$dst, EFLAGS, (X86or_flag GR32:$src1,GR32:$src2))]>;
def OR64rr : RI<0x09, MRMDestReg, (outs GR64:$dst),
(ins GR64:$src1, GR64:$src2),
"or{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, EFLAGS,
(X86or_flag GR64:$src1, GR64:$src2))]>;
}
// OR instructions with the destination register in REG and the source register
// in R/M. Included for the disassembler.
let isCodeGenOnly = 1 in {
def OR8rr_REV : I<0x0A, MRMSrcReg, (outs GR8:$dst), (ins GR8:$src1, GR8:$src2),
"or{b}\t{$src2, $dst|$dst, $src2}", []>;
def OR16rr_REV : I<0x0B, MRMSrcReg, (outs GR16:$dst),
(ins GR16:$src1, GR16:$src2),
"or{w}\t{$src2, $dst|$dst, $src2}", []>, OpSize;
def OR32rr_REV : I<0x0B, MRMSrcReg, (outs GR32:$dst),
(ins GR32:$src1, GR32:$src2),
"or{l}\t{$src2, $dst|$dst, $src2}", []>;
def OR64rr_REV : RI<0x0B, MRMSrcReg, (outs GR64:$dst),
(ins GR64:$src1, GR64:$src2),
"or{q}\t{$src2, $dst|$dst, $src2}", []>;
}
def OR8rm : I<0x0A, MRMSrcMem, (outs GR8 :$dst),
(ins GR8 :$src1, i8mem :$src2),
"or{b}\t{$src2, $dst|$dst, $src2}",
[(set GR8:$dst, EFLAGS, (X86or_flag GR8:$src1,
(load addr:$src2)))]>;
def OR16rm : I<0x0B, MRMSrcMem, (outs GR16:$dst),
(ins GR16:$src1, i16mem:$src2),
"or{w}\t{$src2, $dst|$dst, $src2}",
[(set GR16:$dst, EFLAGS, (X86or_flag GR16:$src1,
(load addr:$src2)))]>,
OpSize;
def OR32rm : I<0x0B, MRMSrcMem, (outs GR32:$dst),
(ins GR32:$src1, i32mem:$src2),
"or{l}\t{$src2, $dst|$dst, $src2}",
[(set GR32:$dst, EFLAGS, (X86or_flag GR32:$src1,
(load addr:$src2)))]>;
def OR64rm : RI<0x0B, MRMSrcMem , (outs GR64:$dst),
(ins GR64:$src1, i64mem:$src2),
"or{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, EFLAGS,
(X86or_flag GR64:$src1, (load addr:$src2)))]>;
def OR8ri : Ii8 <0x80, MRM1r, (outs GR8 :$dst),
(ins GR8 :$src1, i8imm:$src2),
"or{b}\t{$src2, $dst|$dst, $src2}",
[(set GR8:$dst,EFLAGS, (X86or_flag GR8:$src1, imm:$src2))]>;
def OR16ri : Ii16<0x81, MRM1r, (outs GR16:$dst),
(ins GR16:$src1, i16imm:$src2),
"or{w}\t{$src2, $dst|$dst, $src2}",
[(set GR16:$dst, EFLAGS, (X86or_flag GR16:$src1,
imm:$src2))]>, OpSize;
def OR32ri : Ii32<0x81, MRM1r, (outs GR32:$dst),
(ins GR32:$src1, i32imm:$src2),
"or{l}\t{$src2, $dst|$dst, $src2}",
[(set GR32:$dst, EFLAGS, (X86or_flag GR32:$src1,
imm:$src2))]>;
def OR64ri32 : RIi32<0x81, MRM1r, (outs GR64:$dst),
(ins GR64:$src1, i64i32imm:$src2),
"or{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, EFLAGS,
(X86or_flag GR64:$src1, i64immSExt32:$src2))]>;
def OR16ri8 : Ii8<0x83, MRM1r, (outs GR16:$dst),
(ins GR16:$src1, i16i8imm:$src2),
"or{w}\t{$src2, $dst|$dst, $src2}",
[(set GR16:$dst, EFLAGS, (X86or_flag GR16:$src1,
i16immSExt8:$src2))]>, OpSize;
def OR32ri8 : Ii8<0x83, MRM1r, (outs GR32:$dst),
(ins GR32:$src1, i32i8imm:$src2),
"or{l}\t{$src2, $dst|$dst, $src2}",
[(set GR32:$dst, EFLAGS, (X86or_flag GR32:$src1,
i32immSExt8:$src2))]>;
def OR64ri8 : RIi8<0x83, MRM1r, (outs GR64:$dst),
(ins GR64:$src1, i64i8imm:$src2),
"or{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, EFLAGS,
(X86or_flag GR64:$src1, i64immSExt8:$src2))]>;
} // Constraints = "$src1 = $dst"
def OR8mr : I<0x08, MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src),
"or{b}\t{$src, $dst|$dst, $src}",
[(store (or (load addr:$dst), GR8:$src), addr:$dst),
(implicit EFLAGS)]>;
def OR16mr : I<0x09, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
"or{w}\t{$src, $dst|$dst, $src}",
[(store (or (load addr:$dst), GR16:$src), addr:$dst),
(implicit EFLAGS)]>, OpSize;
def OR32mr : I<0x09, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
"or{l}\t{$src, $dst|$dst, $src}",
[(store (or (load addr:$dst), GR32:$src), addr:$dst),
(implicit EFLAGS)]>;
def OR64mr : RI<0x09, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
"or{q}\t{$src, $dst|$dst, $src}",
[(store (or (load addr:$dst), GR64:$src), addr:$dst),
(implicit EFLAGS)]>;
def OR8mi : Ii8<0x80, MRM1m, (outs), (ins i8mem :$dst, i8imm:$src),
"or{b}\t{$src, $dst|$dst, $src}",
[(store (or (loadi8 addr:$dst), imm:$src), addr:$dst),
(implicit EFLAGS)]>;
def OR16mi : Ii16<0x81, MRM1m, (outs), (ins i16mem:$dst, i16imm:$src),
"or{w}\t{$src, $dst|$dst, $src}",
[(store (or (loadi16 addr:$dst), imm:$src), addr:$dst),
(implicit EFLAGS)]>,
OpSize;
def OR32mi : Ii32<0x81, MRM1m, (outs), (ins i32mem:$dst, i32imm:$src),
"or{l}\t{$src, $dst|$dst, $src}",
[(store (or (loadi32 addr:$dst), imm:$src), addr:$dst),
(implicit EFLAGS)]>;
def OR64mi32 : RIi32<0x81, MRM1m, (outs), (ins i64mem:$dst, i64i32imm:$src),
"or{q}\t{$src, $dst|$dst, $src}",
[(store (or (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst),
(implicit EFLAGS)]>;
def OR16mi8 : Ii8<0x83, MRM1m, (outs), (ins i16mem:$dst, i16i8imm:$src),
"or{w}\t{$src, $dst|$dst, $src}",
[(store (or (load addr:$dst), i16immSExt8:$src), addr:$dst),
(implicit EFLAGS)]>,
OpSize;
def OR32mi8 : Ii8<0x83, MRM1m, (outs), (ins i32mem:$dst, i32i8imm:$src),
"or{l}\t{$src, $dst|$dst, $src}",
[(store (or (load addr:$dst), i32immSExt8:$src), addr:$dst),
(implicit EFLAGS)]>;
def OR64mi8 : RIi8<0x83, MRM1m, (outs), (ins i64mem:$dst, i64i8imm:$src),
"or{q}\t{$src, $dst|$dst, $src}",
[(store (or (load addr:$dst), i64immSExt8:$src), addr:$dst),
(implicit EFLAGS)]>;
def OR8i8 : Ii8 <0x0C, RawFrm, (outs), (ins i8imm:$src),
"or{b}\t{$src, %al|%al, $src}", []>;
def OR16i16 : Ii16 <0x0D, RawFrm, (outs), (ins i16imm:$src),
"or{w}\t{$src, %ax|%ax, $src}", []>, OpSize;
def OR32i32 : Ii32 <0x0D, RawFrm, (outs), (ins i32imm:$src),
"or{l}\t{$src, %eax|%eax, $src}", []>;
def OR64i32 : RIi32<0x0D, RawFrm, (outs), (ins i64i32imm:$src),
"or{q}\t{$src, %rax|%rax, $src}", []>;
let Constraints = "$src1 = $dst" in {
let isCommutable = 1 in { // X = XOR Y, Z --> X = XOR Z, Y
def XOR8rr : I<0x30, MRMDestReg,
(outs GR8 :$dst), (ins GR8 :$src1, GR8 :$src2),
"xor{b}\t{$src2, $dst|$dst, $src2}",
[(set GR8:$dst, EFLAGS, (X86xor_flag GR8:$src1,
GR8:$src2))]>;
def XOR16rr : I<0x31, MRMDestReg,
(outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
"xor{w}\t{$src2, $dst|$dst, $src2}",
[(set GR16:$dst, EFLAGS, (X86xor_flag GR16:$src1,
GR16:$src2))]>, OpSize;
def XOR32rr : I<0x31, MRMDestReg,
(outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"xor{l}\t{$src2, $dst|$dst, $src2}",
[(set GR32:$dst, EFLAGS, (X86xor_flag GR32:$src1,
GR32:$src2))]>;
def XOR64rr : RI<0x31, MRMDestReg, (outs GR64:$dst),
(ins GR64:$src1, GR64:$src2),
"xor{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, EFLAGS,
(X86xor_flag GR64:$src1, GR64:$src2))]>;
} // isCommutable = 1
// XOR instructions with the destination register in REG and the source register
// in R/M. Included for the disassembler.
let isCodeGenOnly = 1 in {
def XOR8rr_REV : I<0x32, MRMSrcReg, (outs GR8:$dst), (ins GR8:$src1, GR8:$src2),
"xor{b}\t{$src2, $dst|$dst, $src2}", []>;
def XOR16rr_REV : I<0x33, MRMSrcReg, (outs GR16:$dst),
(ins GR16:$src1, GR16:$src2),
"xor{w}\t{$src2, $dst|$dst, $src2}", []>, OpSize;
def XOR32rr_REV : I<0x33, MRMSrcReg, (outs GR32:$dst),
(ins GR32:$src1, GR32:$src2),
"xor{l}\t{$src2, $dst|$dst, $src2}", []>;
def XOR64rr_REV : RI<0x33, MRMSrcReg, (outs GR64:$dst),
(ins GR64:$src1, GR64:$src2),
"xor{q}\t{$src2, $dst|$dst, $src2}", []>;
}
def XOR8rm : I<0x32, MRMSrcMem,
(outs GR8 :$dst), (ins GR8:$src1, i8mem :$src2),
"xor{b}\t{$src2, $dst|$dst, $src2}",
[(set GR8:$dst, EFLAGS, (X86xor_flag GR8:$src1,
(load addr:$src2)))]>;
def XOR16rm : I<0x33, MRMSrcMem,
(outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
"xor{w}\t{$src2, $dst|$dst, $src2}",
[(set GR16:$dst, EFLAGS, (X86xor_flag GR16:$src1,
(load addr:$src2)))]>,
OpSize;
def XOR32rm : I<0x33, MRMSrcMem,
(outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
"xor{l}\t{$src2, $dst|$dst, $src2}",
[(set GR32:$dst, EFLAGS, (X86xor_flag GR32:$src1,
(load addr:$src2)))]>;
def XOR64rm : RI<0x33, MRMSrcMem, (outs GR64:$dst),
(ins GR64:$src1, i64mem:$src2),
"xor{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, EFLAGS,
(X86xor_flag GR64:$src1, (load addr:$src2)))]>;
def XOR8ri : Ii8<0x80, MRM6r,
(outs GR8:$dst), (ins GR8:$src1, i8imm:$src2),
"xor{b}\t{$src2, $dst|$dst, $src2}",
[(set GR8:$dst, EFLAGS, (X86xor_flag GR8:$src1, imm:$src2))]>;
def XOR16ri : Ii16<0x81, MRM6r,
(outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
"xor{w}\t{$src2, $dst|$dst, $src2}",
[(set GR16:$dst, EFLAGS, (X86xor_flag GR16:$src1,
imm:$src2))]>, OpSize;
def XOR32ri : Ii32<0x81, MRM6r,
(outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
"xor{l}\t{$src2, $dst|$dst, $src2}",
[(set GR32:$dst, EFLAGS, (X86xor_flag GR32:$src1,
imm:$src2))]>;
def XOR64ri32 : RIi32<0x81, MRM6r,
(outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
"xor{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, EFLAGS,
(X86xor_flag GR64:$src1, i64immSExt32:$src2))]>;
def XOR16ri8 : Ii8<0x83, MRM6r,
(outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2),
"xor{w}\t{$src2, $dst|$dst, $src2}",
[(set GR16:$dst, EFLAGS, (X86xor_flag GR16:$src1,
i16immSExt8:$src2))]>,
OpSize;
def XOR32ri8 : Ii8<0x83, MRM6r,
(outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2),
"xor{l}\t{$src2, $dst|$dst, $src2}",
[(set GR32:$dst, EFLAGS, (X86xor_flag GR32:$src1,
i32immSExt8:$src2))]>;
def XOR64ri8 : RIi8<0x83, MRM6r, (outs GR64:$dst),
(ins GR64:$src1, i64i8imm:$src2),
"xor{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, EFLAGS,
(X86xor_flag GR64:$src1, i64immSExt8:$src2))]>;
} // Constraints = "$src1 = $dst"
def XOR8mr : I<0x30, MRMDestMem,
(outs), (ins i8mem :$dst, GR8 :$src),
"xor{b}\t{$src, $dst|$dst, $src}",
[(store (xor (load addr:$dst), GR8:$src), addr:$dst),
(implicit EFLAGS)]>;
def XOR16mr : I<0x31, MRMDestMem,
(outs), (ins i16mem:$dst, GR16:$src),
"xor{w}\t{$src, $dst|$dst, $src}",
[(store (xor (load addr:$dst), GR16:$src), addr:$dst),
(implicit EFLAGS)]>,
OpSize;
def XOR32mr : I<0x31, MRMDestMem,
(outs), (ins i32mem:$dst, GR32:$src),
"xor{l}\t{$src, $dst|$dst, $src}",
[(store (xor (load addr:$dst), GR32:$src), addr:$dst),
(implicit EFLAGS)]>;
def XOR64mr : RI<0x31, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
"xor{q}\t{$src, $dst|$dst, $src}",
[(store (xor (load addr:$dst), GR64:$src), addr:$dst),
(implicit EFLAGS)]>;
def XOR8mi : Ii8<0x80, MRM6m,
(outs), (ins i8mem :$dst, i8imm :$src),
"xor{b}\t{$src, $dst|$dst, $src}",
[(store (xor (loadi8 addr:$dst), imm:$src), addr:$dst),
(implicit EFLAGS)]>;
def XOR16mi : Ii16<0x81, MRM6m,
(outs), (ins i16mem:$dst, i16imm:$src),
"xor{w}\t{$src, $dst|$dst, $src}",
[(store (xor (loadi16 addr:$dst), imm:$src), addr:$dst),
(implicit EFLAGS)]>,
OpSize;
def XOR32mi : Ii32<0x81, MRM6m,
(outs), (ins i32mem:$dst, i32imm:$src),
"xor{l}\t{$src, $dst|$dst, $src}",
[(store (xor (loadi32 addr:$dst), imm:$src), addr:$dst),
(implicit EFLAGS)]>;
def XOR64mi32 : RIi32<0x81, MRM6m, (outs), (ins i64mem:$dst, i64i32imm:$src),
"xor{q}\t{$src, $dst|$dst, $src}",
[(store (xor (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst),
(implicit EFLAGS)]>;
def XOR16mi8 : Ii8<0x83, MRM6m,
(outs), (ins i16mem:$dst, i16i8imm :$src),
"xor{w}\t{$src, $dst|$dst, $src}",
[(store (xor (load addr:$dst), i16immSExt8:$src), addr:$dst),
(implicit EFLAGS)]>,
OpSize;
def XOR32mi8 : Ii8<0x83, MRM6m,
(outs), (ins i32mem:$dst, i32i8imm :$src),
"xor{l}\t{$src, $dst|$dst, $src}",
[(store (xor (load addr:$dst), i32immSExt8:$src), addr:$dst),
(implicit EFLAGS)]>;
def XOR64mi8 : RIi8<0x83, MRM6m, (outs), (ins i64mem:$dst, i64i8imm :$src),
"xor{q}\t{$src, $dst|$dst, $src}",
[(store (xor (load addr:$dst), i64immSExt8:$src), addr:$dst),
(implicit EFLAGS)]>;
def XOR8i8 : Ii8 <0x34, RawFrm, (outs), (ins i8imm:$src),
"xor{b}\t{$src, %al|%al, $src}", []>;
def XOR16i16 : Ii16<0x35, RawFrm, (outs), (ins i16imm:$src),
"xor{w}\t{$src, %ax|%ax, $src}", []>, OpSize;
def XOR32i32 : Ii32<0x35, RawFrm, (outs), (ins i32imm:$src),
"xor{l}\t{$src, %eax|%eax, $src}", []>;
def XOR64i32 : RIi32<0x35, RawFrm, (outs), (ins i64i32imm:$src),
"xor{q}\t{$src, %rax|%rax, $src}", []>;
} // Defs = [EFLAGS]
defm OR : ArithBinOpEFLAGS<0x08, 0x0A, 0x0C, "or", MRM1r, MRM1m,
X86or_flag, or, 1>;
defm XOR : ArithBinOpEFLAGS<0x30, 0x32, 0x34, "xor", MRM6r, MRM6m,
X86xor_flag, xor, 1>;
// Arithmetic.
@ -1395,161 +1066,11 @@ def ADC64i32 : RIi32<0x15, RawFrm, (outs), (ins i64i32imm:$src),
"adc{q}\t{$src, %rax|%rax, $src}", []>;
} // Uses = [EFLAGS]
let Constraints = "$src1 = $dst" in {
// Register-Register Subtraction
def SUB8rr : I<0x28, MRMDestReg, (outs GR8:$dst), (ins GR8:$src1, GR8:$src2),
"sub{b}\t{$src2, $dst|$dst, $src2}",
[(set GR8:$dst, EFLAGS,
(X86sub_flag GR8:$src1, GR8:$src2))]>;
def SUB16rr : I<0x29, MRMDestReg, (outs GR16:$dst), (ins GR16:$src1,GR16:$src2),
"sub{w}\t{$src2, $dst|$dst, $src2}",
[(set GR16:$dst, EFLAGS,
(X86sub_flag GR16:$src1, GR16:$src2))]>, OpSize;
def SUB32rr : I<0x29, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1,GR32:$src2),
"sub{l}\t{$src2, $dst|$dst, $src2}",
[(set GR32:$dst, EFLAGS,
(X86sub_flag GR32:$src1, GR32:$src2))]>;
def SUB64rr : RI<0x29, MRMDestReg, (outs GR64:$dst),
(ins GR64:$src1, GR64:$src2),
"sub{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, EFLAGS,
(X86sub_flag GR64:$src1, GR64:$src2))]>;
let isCodeGenOnly = 1 in {
def SUB8rr_REV : I<0x2A, MRMSrcReg, (outs GR8:$dst), (ins GR8:$src1, GR8:$src2),
"sub{b}\t{$src2, $dst|$dst, $src2}", []>;
def SUB16rr_REV : I<0x2B, MRMSrcReg, (outs GR16:$dst),
(ins GR16:$src1, GR16:$src2),
"sub{w}\t{$src2, $dst|$dst, $src2}", []>, OpSize;
def SUB32rr_REV : I<0x2B, MRMSrcReg, (outs GR32:$dst),
(ins GR32:$src1, GR32:$src2),
"sub{l}\t{$src2, $dst|$dst, $src2}", []>;
def SUB64rr_REV : RI<0x2B, MRMSrcReg, (outs GR64:$dst),
(ins GR64:$src1, GR64:$src2),
"sub{q}\t{$src2, $dst|$dst, $src2}", []>;
}
defm SUB : ArithBinOpEFLAGS<0x28, 0x2A, 0x2C, "sub", MRM5r, MRM5m,
X86sub_flag, sub, 0>;
// Register-Memory Subtraction
def SUB8rm : I<0x2A, MRMSrcMem, (outs GR8 :$dst),
(ins GR8 :$src1, i8mem :$src2),
"sub{b}\t{$src2, $dst|$dst, $src2}",
[(set GR8:$dst, EFLAGS,
(X86sub_flag GR8:$src1, (load addr:$src2)))]>;
def SUB16rm : I<0x2B, MRMSrcMem, (outs GR16:$dst),
(ins GR16:$src1, i16mem:$src2),
"sub{w}\t{$src2, $dst|$dst, $src2}",
[(set GR16:$dst, EFLAGS,
(X86sub_flag GR16:$src1, (load addr:$src2)))]>, OpSize;
def SUB32rm : I<0x2B, MRMSrcMem, (outs GR32:$dst),
(ins GR32:$src1, i32mem:$src2),
"sub{l}\t{$src2, $dst|$dst, $src2}",
[(set GR32:$dst, EFLAGS,
(X86sub_flag GR32:$src1, (load addr:$src2)))]>;
def SUB64rm : RI<0x2B, MRMSrcMem, (outs GR64:$dst),
(ins GR64:$src1, i64mem:$src2),
"sub{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, EFLAGS,
(X86sub_flag GR64:$src1, (load addr:$src2)))]>;
// Register-Integer Subtraction
def SUB8ri : Ii8 <0x80, MRM5r, (outs GR8:$dst),
(ins GR8:$src1, i8imm:$src2),
"sub{b}\t{$src2, $dst|$dst, $src2}",
[(set GR8:$dst, EFLAGS,
(X86sub_flag GR8:$src1, imm:$src2))]>;
def SUB16ri : Ii16<0x81, MRM5r, (outs GR16:$dst),
(ins GR16:$src1, i16imm:$src2),
"sub{w}\t{$src2, $dst|$dst, $src2}",
[(set GR16:$dst, EFLAGS,
(X86sub_flag GR16:$src1, imm:$src2))]>, OpSize;
def SUB32ri : Ii32<0x81, MRM5r, (outs GR32:$dst),
(ins GR32:$src1, i32imm:$src2),
"sub{l}\t{$src2, $dst|$dst, $src2}",
[(set GR32:$dst, EFLAGS,
(X86sub_flag GR32:$src1, imm:$src2))]>;
def SUB64ri32 : RIi32<0x81, MRM5r, (outs GR64:$dst),
(ins GR64:$src1, i64i32imm:$src2),
"sub{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, EFLAGS,
(X86sub_flag GR64:$src1, i64immSExt32:$src2))]>;
def SUB16ri8 : Ii8<0x83, MRM5r, (outs GR16:$dst),
(ins GR16:$src1, i16i8imm:$src2),
"sub{w}\t{$src2, $dst|$dst, $src2}",
[(set GR16:$dst, EFLAGS,
(X86sub_flag GR16:$src1, i16immSExt8:$src2))]>, OpSize;
def SUB32ri8 : Ii8<0x83, MRM5r, (outs GR32:$dst),
(ins GR32:$src1, i32i8imm:$src2),
"sub{l}\t{$src2, $dst|$dst, $src2}",
[(set GR32:$dst, EFLAGS,
(X86sub_flag GR32:$src1, i32immSExt8:$src2))]>;
def SUB64ri8 : RIi8<0x83, MRM5r, (outs GR64:$dst),
(ins GR64:$src1, i64i8imm:$src2),
"sub{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, EFLAGS,
(X86sub_flag GR64:$src1, i64immSExt8:$src2))]>;
} // Constraints = "$src1 = $dst"
// Memory-Register Subtraction
def SUB8mr : I<0x28, MRMDestMem, (outs), (ins i8mem :$dst, GR8 :$src2),
"sub{b}\t{$src2, $dst|$dst, $src2}",
[(store (sub (load addr:$dst), GR8:$src2), addr:$dst),
(implicit EFLAGS)]>;
def SUB16mr : I<0x29, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
"sub{w}\t{$src2, $dst|$dst, $src2}",
[(store (sub (load addr:$dst), GR16:$src2), addr:$dst),
(implicit EFLAGS)]>, OpSize;
def SUB32mr : I<0x29, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
"sub{l}\t{$src2, $dst|$dst, $src2}",
[(store (sub (load addr:$dst), GR32:$src2), addr:$dst),
(implicit EFLAGS)]>;
def SUB64mr : RI<0x29, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
"sub{q}\t{$src2, $dst|$dst, $src2}",
[(store (sub (load addr:$dst), GR64:$src2), addr:$dst),
(implicit EFLAGS)]>;
// Memory-Integer Subtraction
def SUB8mi : Ii8<0x80, MRM5m, (outs), (ins i8mem :$dst, i8imm:$src2),
"sub{b}\t{$src2, $dst|$dst, $src2}",
[(store (sub (loadi8 addr:$dst), imm:$src2), addr:$dst),
(implicit EFLAGS)]>;
def SUB16mi : Ii16<0x81, MRM5m, (outs), (ins i16mem:$dst, i16imm:$src2),
"sub{w}\t{$src2, $dst|$dst, $src2}",
[(store (sub (loadi16 addr:$dst), imm:$src2),addr:$dst),
(implicit EFLAGS)]>, OpSize;
def SUB32mi : Ii32<0x81, MRM5m, (outs), (ins i32mem:$dst, i32imm:$src2),
"sub{l}\t{$src2, $dst|$dst, $src2}",
[(store (sub (loadi32 addr:$dst), imm:$src2),addr:$dst),
(implicit EFLAGS)]>;
def SUB64mi32 : RIi32<0x81, MRM5m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
"sub{q}\t{$src2, $dst|$dst, $src2}",
[(store (sub (load addr:$dst), i64immSExt32:$src2),
addr:$dst),
(implicit EFLAGS)]>;
def SUB16mi8 : Ii8<0x83, MRM5m, (outs), (ins i16mem:$dst, i16i8imm :$src2),
"sub{w}\t{$src2, $dst|$dst, $src2}",
[(store (sub (load addr:$dst), i16immSExt8:$src2),
addr:$dst),
(implicit EFLAGS)]>, OpSize;
def SUB32mi8 : Ii8<0x83, MRM5m, (outs), (ins i32mem:$dst, i32i8imm :$src2),
"sub{l}\t{$src2, $dst|$dst, $src2}",
[(store (sub (load addr:$dst), i32immSExt8:$src2),
addr:$dst),
(implicit EFLAGS)]>;
def SUB64mi8 : RIi8<0x83, MRM5m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
"sub{q}\t{$src2, $dst|$dst, $src2}",
[(store (sub (load addr:$dst), i64immSExt8:$src2),
addr:$dst),
(implicit EFLAGS)]>;
def SUB8i8 : Ii8<0x2C, RawFrm, (outs), (ins i8imm:$src),
"sub{b}\t{$src, %al|%al, $src}", []>;
def SUB16i16 : Ii16<0x2D, RawFrm, (outs), (ins i16imm:$src),
"sub{w}\t{$src, %ax|%ax, $src}", []>, OpSize;
def SUB32i32 : Ii32<0x2D, RawFrm, (outs), (ins i32imm:$src),
"sub{l}\t{$src, %eax|%eax, $src}", []>;
def SUB64i32 : RIi32<0x2D, RawFrm, (outs), (ins i64i32imm:$src),
"sub{q}\t{$src, %rax|%rax, $src}", []>;
let Uses = [EFLAGS] in {
let Constraints = "$src1 = $dst" in {