diff --git a/lib/Target/X86/X86InstrCMovSetCC.td b/lib/Target/X86/X86InstrCMovSetCC.td index f22fe4de768..3a43b22ddf3 100644 --- a/lib/Target/X86/X86InstrCMovSetCC.td +++ b/lib/Target/X86/X86InstrCMovSetCC.td @@ -34,7 +34,7 @@ multiclass CMOV opc, string Mnemonic, PatLeaf CondNode> { (X86cmov GR64:$src1, GR64:$src2, CondNode, EFLAGS))]>, TB; } - let Uses = [EFLAGS], Predicates = [HasCMov], Constraints = "$src1 = $dst"in { + let Uses = [EFLAGS], Predicates = [HasCMov], Constraints = "$src1 = $dst" in { def #NAME#16rm : I opc, string Mnemonic, PatLeaf CondNode> { // Conditional Moves. +defm CMOVO : CMOV<0x40, "cmovo" , X86_COND_O>; +defm CMOVNO : CMOV<0x41, "cmovno", X86_COND_NO>; +defm CMOVB : CMOV<0x42, "cmovb" , X86_COND_B>; +defm CMOVAE : CMOV<0x43, "cmovae", X86_COND_AE>; +defm CMOVE : CMOV<0x44, "cmove" , X86_COND_E>; +defm CMOVNE : CMOV<0x45, "cmovne", X86_COND_NE>; defm CMOVBE : CMOV<0x46, "cmovbe", X86_COND_BE>; - - -let Constraints = "$src1 = $dst" in { - -// Conditional moves -let Uses = [EFLAGS] in { - -let Predicates = [HasCMov] in { -let isCommutable = 1 in { -def CMOVB16rr : I<0x42, MRMSrcReg, // if , - TB, OpSize; -def CMOVB32rr : I<0x42, MRMSrcReg, // if , - TB; -def CMOVAE16rr: I<0x43, MRMSrcReg, // if >=u, GR16 = GR16 - (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), - "cmovae{w}\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, - X86_COND_AE, EFLAGS))]>, - TB, OpSize; -def CMOVAE32rr: I<0x43, MRMSrcReg, // if >=u, GR32 = GR32 - (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), - "cmovae{l}\t{$src2, $dst|$dst, $src2}", - [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, - X86_COND_AE, EFLAGS))]>, - TB; -def CMOVE16rr : I<0x44, MRMSrcReg, // if ==, GR16 = GR16 - (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), - "cmove{w}\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, - X86_COND_E, EFLAGS))]>, - TB, OpSize; -def CMOVE32rr : I<0x44, MRMSrcReg, // if ==, GR32 = GR32 - (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), - "cmove{l}\t{$src2, $dst|$dst, $src2}", - [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, - X86_COND_E, EFLAGS))]>, - TB; -def CMOVNE16rr: I<0x45, MRMSrcReg, // if !=, GR16 = GR16 - (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), - "cmovne{w}\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, - X86_COND_NE, EFLAGS))]>, - TB, OpSize; -def CMOVNE32rr: I<0x45, MRMSrcReg, // if !=, GR32 = GR32 - (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), - "cmovne{l}\t{$src2, $dst|$dst, $src2}", - [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, - X86_COND_NE, EFLAGS))]>, - TB; -def CMOVA16rr : I<0x47, MRMSrcReg, // if >u, GR16 = GR16 - (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), - "cmova{w}\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, - X86_COND_A, EFLAGS))]>, - TB, OpSize; -def CMOVA32rr : I<0x47, MRMSrcReg, // if >u, GR32 = GR32 - (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), - "cmova{l}\t{$src2, $dst|$dst, $src2}", - [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, - X86_COND_A, EFLAGS))]>, - TB; -def CMOVL16rr : I<0x4C, MRMSrcReg, // if , - TB, OpSize; -def CMOVL32rr : I<0x4C, MRMSrcReg, // if , - TB; -def CMOVGE16rr: I<0x4D, MRMSrcReg, // if >=s, GR16 = GR16 - (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), - "cmovge{w}\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, - X86_COND_GE, EFLAGS))]>, - TB, OpSize; -def CMOVGE32rr: I<0x4D, MRMSrcReg, // if >=s, GR32 = GR32 - (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), - "cmovge{l}\t{$src2, $dst|$dst, $src2}", - [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, - X86_COND_GE, EFLAGS))]>, - TB; -def CMOVLE16rr: I<0x4E, MRMSrcReg, // if <=s, GR16 = GR16 - (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), - "cmovle{w}\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, - X86_COND_LE, EFLAGS))]>, - TB, OpSize; -def CMOVLE32rr: I<0x4E, MRMSrcReg, // if <=s, GR32 = GR32 - (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), - "cmovle{l}\t{$src2, $dst|$dst, $src2}", - [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, - X86_COND_LE, EFLAGS))]>, - TB; -def CMOVG16rr : I<0x4F, MRMSrcReg, // if >s, GR16 = GR16 - (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), - "cmovg{w}\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, - X86_COND_G, EFLAGS))]>, - TB, OpSize; -def CMOVG32rr : I<0x4F, MRMSrcReg, // if >s, GR32 = GR32 - (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), - "cmovg{l}\t{$src2, $dst|$dst, $src2}", - [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, - X86_COND_G, EFLAGS))]>, - TB; -def CMOVS16rr : I<0x48, MRMSrcReg, // if signed, GR16 = GR16 - (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), - "cmovs{w}\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, - X86_COND_S, EFLAGS))]>, - TB, OpSize; -def CMOVS32rr : I<0x48, MRMSrcReg, // if signed, GR32 = GR32 - (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), - "cmovs{l}\t{$src2, $dst|$dst, $src2}", - [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, - X86_COND_S, EFLAGS))]>, - TB; -def CMOVNS16rr: I<0x49, MRMSrcReg, // if !signed, GR16 = GR16 - (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), - "cmovns{w}\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, - X86_COND_NS, EFLAGS))]>, - TB, OpSize; -def CMOVNS32rr: I<0x49, MRMSrcReg, // if !signed, GR32 = GR32 - (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), - "cmovns{l}\t{$src2, $dst|$dst, $src2}", - [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, - X86_COND_NS, EFLAGS))]>, - TB; -def CMOVP16rr : I<0x4A, MRMSrcReg, // if parity, GR16 = GR16 - (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), - "cmovp{w}\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, - X86_COND_P, EFLAGS))]>, - TB, OpSize; -def CMOVP32rr : I<0x4A, MRMSrcReg, // if parity, GR32 = GR32 - (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), - "cmovp{l}\t{$src2, $dst|$dst, $src2}", - [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, - X86_COND_P, EFLAGS))]>, - TB; -def CMOVNP16rr : I<0x4B, MRMSrcReg, // if !parity, GR16 = GR16 - (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), - "cmovnp{w}\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, - X86_COND_NP, EFLAGS))]>, - TB, OpSize; -def CMOVNP32rr : I<0x4B, MRMSrcReg, // if !parity, GR32 = GR32 - (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), - "cmovnp{l}\t{$src2, $dst|$dst, $src2}", - [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, - X86_COND_NP, EFLAGS))]>, - TB; -def CMOVO16rr : I<0x40, MRMSrcReg, // if overflow, GR16 = GR16 - (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), - "cmovo{w}\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, - X86_COND_O, EFLAGS))]>, - TB, OpSize; -def CMOVO32rr : I<0x40, MRMSrcReg, // if overflow, GR32 = GR32 - (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), - "cmovo{l}\t{$src2, $dst|$dst, $src2}", - [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, - X86_COND_O, EFLAGS))]>, - TB; -def CMOVNO16rr : I<0x41, MRMSrcReg, // if !overflow, GR16 = GR16 - (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), - "cmovno{w}\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, - X86_COND_NO, EFLAGS))]>, - TB, OpSize; -def CMOVNO32rr : I<0x41, MRMSrcReg, // if !overflow, GR32 = GR32 - (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), - "cmovno{l}\t{$src2, $dst|$dst, $src2}", - [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, - X86_COND_NO, EFLAGS))]>, - TB; -} // isCommutable = 1 - -def CMOVB16rm : I<0x42, MRMSrcMem, // if , - TB, OpSize; -def CMOVB32rm : I<0x42, MRMSrcMem, // if , - TB; -def CMOVAE16rm: I<0x43, MRMSrcMem, // if >=u, GR16 = [mem16] - (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmovae{w}\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), - X86_COND_AE, EFLAGS))]>, - TB, OpSize; -def CMOVAE32rm: I<0x43, MRMSrcMem, // if >=u, GR32 = [mem32] - (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmovae{l}\t{$src2, $dst|$dst, $src2}", - [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), - X86_COND_AE, EFLAGS))]>, - TB; -def CMOVE16rm : I<0x44, MRMSrcMem, // if ==, GR16 = [mem16] - (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmove{w}\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), - X86_COND_E, EFLAGS))]>, - TB, OpSize; -def CMOVE32rm : I<0x44, MRMSrcMem, // if ==, GR32 = [mem32] - (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmove{l}\t{$src2, $dst|$dst, $src2}", - [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), - X86_COND_E, EFLAGS))]>, - TB; -def CMOVNE16rm: I<0x45, MRMSrcMem, // if !=, GR16 = [mem16] - (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmovne{w}\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), - X86_COND_NE, EFLAGS))]>, - TB, OpSize; -def CMOVNE32rm: I<0x45, MRMSrcMem, // if !=, GR32 = [mem32] - (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmovne{l}\t{$src2, $dst|$dst, $src2}", - [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), - X86_COND_NE, EFLAGS))]>, - TB; -def CMOVA16rm : I<0x47, MRMSrcMem, // if >u, GR16 = [mem16] - (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmova{w}\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), - X86_COND_A, EFLAGS))]>, - TB, OpSize; -def CMOVA32rm : I<0x47, MRMSrcMem, // if >u, GR32 = [mem32] - (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmova{l}\t{$src2, $dst|$dst, $src2}", - [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), - X86_COND_A, EFLAGS))]>, - TB; -def CMOVL16rm : I<0x4C, MRMSrcMem, // if , - TB, OpSize; -def CMOVL32rm : I<0x4C, MRMSrcMem, // if , - TB; -def CMOVGE16rm: I<0x4D, MRMSrcMem, // if >=s, GR16 = [mem16] - (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmovge{w}\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), - X86_COND_GE, EFLAGS))]>, - TB, OpSize; -def CMOVGE32rm: I<0x4D, MRMSrcMem, // if >=s, GR32 = [mem32] - (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmovge{l}\t{$src2, $dst|$dst, $src2}", - [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), - X86_COND_GE, EFLAGS))]>, - TB; -def CMOVLE16rm: I<0x4E, MRMSrcMem, // if <=s, GR16 = [mem16] - (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmovle{w}\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), - X86_COND_LE, EFLAGS))]>, - TB, OpSize; -def CMOVLE32rm: I<0x4E, MRMSrcMem, // if <=s, GR32 = [mem32] - (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmovle{l}\t{$src2, $dst|$dst, $src2}", - [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), - X86_COND_LE, EFLAGS))]>, - TB; -def CMOVG16rm : I<0x4F, MRMSrcMem, // if >s, GR16 = [mem16] - (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmovg{w}\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), - X86_COND_G, EFLAGS))]>, - TB, OpSize; -def CMOVG32rm : I<0x4F, MRMSrcMem, // if >s, GR32 = [mem32] - (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmovg{l}\t{$src2, $dst|$dst, $src2}", - [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), - X86_COND_G, EFLAGS))]>, - TB; -def CMOVS16rm : I<0x48, MRMSrcMem, // if signed, GR16 = [mem16] - (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmovs{w}\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), - X86_COND_S, EFLAGS))]>, - TB, OpSize; -def CMOVS32rm : I<0x48, MRMSrcMem, // if signed, GR32 = [mem32] - (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmovs{l}\t{$src2, $dst|$dst, $src2}", - [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), - X86_COND_S, EFLAGS))]>, - TB; -def CMOVNS16rm: I<0x49, MRMSrcMem, // if !signed, GR16 = [mem16] - (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmovns{w}\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), - X86_COND_NS, EFLAGS))]>, - TB, OpSize; -def CMOVNS32rm: I<0x49, MRMSrcMem, // if !signed, GR32 = [mem32] - (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmovns{l}\t{$src2, $dst|$dst, $src2}", - [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), - X86_COND_NS, EFLAGS))]>, - TB; -def CMOVP16rm : I<0x4A, MRMSrcMem, // if parity, GR16 = [mem16] - (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmovp{w}\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), - X86_COND_P, EFLAGS))]>, - TB, OpSize; -def CMOVP32rm : I<0x4A, MRMSrcMem, // if parity, GR32 = [mem32] - (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmovp{l}\t{$src2, $dst|$dst, $src2}", - [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), - X86_COND_P, EFLAGS))]>, - TB; -def CMOVNP16rm : I<0x4B, MRMSrcMem, // if !parity, GR16 = [mem16] - (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmovnp{w}\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), - X86_COND_NP, EFLAGS))]>, - TB, OpSize; -def CMOVNP32rm : I<0x4B, MRMSrcMem, // if !parity, GR32 = [mem32] - (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmovnp{l}\t{$src2, $dst|$dst, $src2}", - [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), - X86_COND_NP, EFLAGS))]>, - TB; -def CMOVO16rm : I<0x40, MRMSrcMem, // if overflow, GR16 = [mem16] - (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmovo{w}\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), - X86_COND_O, EFLAGS))]>, - TB, OpSize; -def CMOVO32rm : I<0x40, MRMSrcMem, // if overflow, GR32 = [mem32] - (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmovo{l}\t{$src2, $dst|$dst, $src2}", - [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), - X86_COND_O, EFLAGS))]>, - TB; -def CMOVNO16rm : I<0x41, MRMSrcMem, // if !overflow, GR16 = [mem16] - (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmovno{w}\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), - X86_COND_NO, EFLAGS))]>, - TB, OpSize; -def CMOVNO32rm : I<0x41, MRMSrcMem, // if !overflow, GR32 = [mem32] - (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmovno{l}\t{$src2, $dst|$dst, $src2}", - [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), - X86_COND_NO, EFLAGS))]>, - TB; -} // Predicates = [HasCMov] - -// X86 doesn't have 8-bit conditional moves. Use a customInserter to -// emit control flow. An alternative to this is to mark i8 SELECT as Promote, -// however that requires promoting the operands, and can induce additional -// i8 register pressure. Note that CMOV_GR8 is conservatively considered to -// clobber EFLAGS, because if one of the operands is zero, the expansion -// could involve an xor. -let usesCustomInserter = 1, Constraints = "", Defs = [EFLAGS] in { -def CMOV_GR8 : I<0, Pseudo, - (outs GR8:$dst), (ins GR8:$src1, GR8:$src2, i8imm:$cond), - "#CMOV_GR8 PSEUDO!", - [(set GR8:$dst, (X86cmov GR8:$src1, GR8:$src2, - imm:$cond, EFLAGS))]>; - -let Predicates = [NoCMov] in { -def CMOV_GR32 : I<0, Pseudo, - (outs GR32:$dst), (ins GR32:$src1, GR32:$src2, i8imm:$cond), - "#CMOV_GR32* PSEUDO!", - [(set GR32:$dst, - (X86cmov GR32:$src1, GR32:$src2, imm:$cond, EFLAGS))]>; -def CMOV_GR16 : I<0, Pseudo, - (outs GR16:$dst), (ins GR16:$src1, GR16:$src2, i8imm:$cond), - "#CMOV_GR16* PSEUDO!", - [(set GR16:$dst, - (X86cmov GR16:$src1, GR16:$src2, imm:$cond, EFLAGS))]>; -def CMOV_RFP32 : I<0, Pseudo, - (outs RFP32:$dst), - (ins RFP32:$src1, RFP32:$src2, i8imm:$cond), - "#CMOV_RFP32 PSEUDO!", - [(set RFP32:$dst, - (X86cmov RFP32:$src1, RFP32:$src2, imm:$cond, - EFLAGS))]>; -def CMOV_RFP64 : I<0, Pseudo, - (outs RFP64:$dst), - (ins RFP64:$src1, RFP64:$src2, i8imm:$cond), - "#CMOV_RFP64 PSEUDO!", - [(set RFP64:$dst, - (X86cmov RFP64:$src1, RFP64:$src2, imm:$cond, - EFLAGS))]>; -def CMOV_RFP80 : I<0, Pseudo, - (outs RFP80:$dst), - (ins RFP80:$src1, RFP80:$src2, i8imm:$cond), - "#CMOV_RFP80 PSEUDO!", - [(set RFP80:$dst, - (X86cmov RFP80:$src1, RFP80:$src2, imm:$cond, - EFLAGS))]>; -} // Predicates = [NoCMov] -} // UsesCustomInserter = 1, Constraints = "", Defs = [EFLAGS] -} // Uses = [EFLAGS] - -} // Constraints = "$src1 = $dst" in - - -// Conditional moves -let Uses = [EFLAGS], Constraints = "$src1 = $dst" in { -let isCommutable = 1 in { -def CMOVB64rr : RI<0x42, MRMSrcReg, // if , TB; -def CMOVAE64rr: RI<0x43, MRMSrcReg, // if >=u, GR64 = GR64 - (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "cmovae{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2, - X86_COND_AE, EFLAGS))]>, TB; -def CMOVE64rr : RI<0x44, MRMSrcReg, // if ==, GR64 = GR64 - (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "cmove{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2, - X86_COND_E, EFLAGS))]>, TB; -def CMOVNE64rr: RI<0x45, MRMSrcReg, // if !=, GR64 = GR64 - (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "cmovne{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2, - X86_COND_NE, EFLAGS))]>, TB; -def CMOVA64rr : RI<0x47, MRMSrcReg, // if >u, GR64 = GR64 - (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "cmova{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2, - X86_COND_A, EFLAGS))]>, TB; -def CMOVL64rr : RI<0x4C, MRMSrcReg, // if , TB; -def CMOVGE64rr: RI<0x4D, MRMSrcReg, // if >=s, GR64 = GR64 - (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "cmovge{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2, - X86_COND_GE, EFLAGS))]>, TB; -def CMOVLE64rr: RI<0x4E, MRMSrcReg, // if <=s, GR64 = GR64 - (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "cmovle{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2, - X86_COND_LE, EFLAGS))]>, TB; -def CMOVG64rr : RI<0x4F, MRMSrcReg, // if >s, GR64 = GR64 - (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "cmovg{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2, - X86_COND_G, EFLAGS))]>, TB; -def CMOVS64rr : RI<0x48, MRMSrcReg, // if signed, GR64 = GR64 - (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "cmovs{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2, - X86_COND_S, EFLAGS))]>, TB; -def CMOVNS64rr: RI<0x49, MRMSrcReg, // if !signed, GR64 = GR64 - (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "cmovns{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2, - X86_COND_NS, EFLAGS))]>, TB; -def CMOVP64rr : RI<0x4A, MRMSrcReg, // if parity, GR64 = GR64 - (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "cmovp{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2, - X86_COND_P, EFLAGS))]>, TB; -def CMOVNP64rr : RI<0x4B, MRMSrcReg, // if !parity, GR64 = GR64 - (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "cmovnp{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2, - X86_COND_NP, EFLAGS))]>, TB; -def CMOVO64rr : RI<0x40, MRMSrcReg, // if overflow, GR64 = GR64 - (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "cmovo{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2, - X86_COND_O, EFLAGS))]>, TB; -def CMOVNO64rr : RI<0x41, MRMSrcReg, // if !overflow, GR64 = GR64 - (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "cmovno{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2, - X86_COND_NO, EFLAGS))]>, TB; -} // isCommutable = 1 - -def CMOVB64rm : RI<0x42, MRMSrcMem, // if , TB; -def CMOVAE64rm: RI<0x43, MRMSrcMem, // if >=u, GR64 = [mem64] - (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), - "cmovae{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2), - X86_COND_AE, EFLAGS))]>, TB; -def CMOVE64rm : RI<0x44, MRMSrcMem, // if ==, GR64 = [mem64] - (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), - "cmove{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2), - X86_COND_E, EFLAGS))]>, TB; -def CMOVNE64rm: RI<0x45, MRMSrcMem, // if !=, GR64 = [mem64] - (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), - "cmovne{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2), - X86_COND_NE, EFLAGS))]>, TB; -def CMOVA64rm : RI<0x47, MRMSrcMem, // if >u, GR64 = [mem64] - (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), - "cmova{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2), - X86_COND_A, EFLAGS))]>, TB; -def CMOVL64rm : RI<0x4C, MRMSrcMem, // if , TB; -def CMOVGE64rm: RI<0x4D, MRMSrcMem, // if >=s, GR64 = [mem64] - (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), - "cmovge{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2), - X86_COND_GE, EFLAGS))]>, TB; -def CMOVLE64rm: RI<0x4E, MRMSrcMem, // if <=s, GR64 = [mem64] - (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), - "cmovle{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2), - X86_COND_LE, EFLAGS))]>, TB; -def CMOVG64rm : RI<0x4F, MRMSrcMem, // if >s, GR64 = [mem64] - (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), - "cmovg{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2), - X86_COND_G, EFLAGS))]>, TB; -def CMOVS64rm : RI<0x48, MRMSrcMem, // if signed, GR64 = [mem64] - (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), - "cmovs{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2), - X86_COND_S, EFLAGS))]>, TB; -def CMOVNS64rm: RI<0x49, MRMSrcMem, // if !signed, GR64 = [mem64] - (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), - "cmovns{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2), - X86_COND_NS, EFLAGS))]>, TB; -def CMOVP64rm : RI<0x4A, MRMSrcMem, // if parity, GR64 = [mem64] - (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), - "cmovp{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2), - X86_COND_P, EFLAGS))]>, TB; -def CMOVNP64rm : RI<0x4B, MRMSrcMem, // if !parity, GR64 = [mem64] - (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), - "cmovnp{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2), - X86_COND_NP, EFLAGS))]>, TB; -def CMOVO64rm : RI<0x40, MRMSrcMem, // if overflow, GR64 = [mem64] - (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), - "cmovo{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2), - X86_COND_O, EFLAGS))]>, TB; -def CMOVNO64rm : RI<0x41, MRMSrcMem, // if !overflow, GR64 = [mem64] - (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), - "cmovno{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2), - X86_COND_NO, EFLAGS))]>, TB; -} // Constraints = "$src1 = $dst" +defm CMOVA : CMOV<0x47, "cmova" , X86_COND_A>; +defm CMOVS : CMOV<0x48, "cmovs" , X86_COND_S>; +defm CMOVNS : CMOV<0x49, "cmovns", X86_COND_NS>; +defm CMOVP : CMOV<0x4A, "cmovp" , X86_COND_P>; +defm CMOVNP : CMOV<0x4B, "cmovnp", X86_COND_NP>; +defm CMOVL : CMOV<0x4C, "cmovl" , X86_COND_L>; +defm CMOVGE : CMOV<0x4D, "cmovge", X86_COND_GE>; +defm CMOVLE : CMOV<0x4E, "cmovle", X86_COND_LE>; +defm CMOVG : CMOV<0x4F, "cmovg" , X86_COND_G>; // SetCC instructions. diff --git a/lib/Target/X86/X86InstrCompiler.td b/lib/Target/X86/X86InstrCompiler.td index 39901764052..b8e1b808171 100644 --- a/lib/Target/X86/X86InstrCompiler.td +++ b/lib/Target/X86/X86InstrCompiler.td @@ -273,6 +273,67 @@ def TLSCall_64 : I<0, Pseudo, (outs), (ins i64mem:$sym), [(X86TLSCall addr:$sym)]>, Requires<[In64BitMode]>; + +//===----------------------------------------------------------------------===// +// Conditional Move Pseudo Instructions + +let Constraints = "$src1 = $dst" in { + +// Conditional moves +let Uses = [EFLAGS] in { + +// X86 doesn't have 8-bit conditional moves. Use a customInserter to +// emit control flow. An alternative to this is to mark i8 SELECT as Promote, +// however that requires promoting the operands, and can induce additional +// i8 register pressure. Note that CMOV_GR8 is conservatively considered to +// clobber EFLAGS, because if one of the operands is zero, the expansion +// could involve an xor. +let usesCustomInserter = 1, Constraints = "", Defs = [EFLAGS] in { +def CMOV_GR8 : I<0, Pseudo, + (outs GR8:$dst), (ins GR8:$src1, GR8:$src2, i8imm:$cond), + "#CMOV_GR8 PSEUDO!", + [(set GR8:$dst, (X86cmov GR8:$src1, GR8:$src2, + imm:$cond, EFLAGS))]>; + +let Predicates = [NoCMov] in { +def CMOV_GR32 : I<0, Pseudo, + (outs GR32:$dst), (ins GR32:$src1, GR32:$src2, i8imm:$cond), + "#CMOV_GR32* PSEUDO!", + [(set GR32:$dst, + (X86cmov GR32:$src1, GR32:$src2, imm:$cond, EFLAGS))]>; +def CMOV_GR16 : I<0, Pseudo, + (outs GR16:$dst), (ins GR16:$src1, GR16:$src2, i8imm:$cond), + "#CMOV_GR16* PSEUDO!", + [(set GR16:$dst, + (X86cmov GR16:$src1, GR16:$src2, imm:$cond, EFLAGS))]>; +def CMOV_RFP32 : I<0, Pseudo, + (outs RFP32:$dst), + (ins RFP32:$src1, RFP32:$src2, i8imm:$cond), + "#CMOV_RFP32 PSEUDO!", + [(set RFP32:$dst, + (X86cmov RFP32:$src1, RFP32:$src2, imm:$cond, + EFLAGS))]>; +def CMOV_RFP64 : I<0, Pseudo, + (outs RFP64:$dst), + (ins RFP64:$src1, RFP64:$src2, i8imm:$cond), + "#CMOV_RFP64 PSEUDO!", + [(set RFP64:$dst, + (X86cmov RFP64:$src1, RFP64:$src2, imm:$cond, + EFLAGS))]>; +def CMOV_RFP80 : I<0, Pseudo, + (outs RFP80:$dst), + (ins RFP80:$src1, RFP80:$src2, i8imm:$cond), + "#CMOV_RFP80 PSEUDO!", + [(set RFP80:$dst, + (X86cmov RFP80:$src1, RFP80:$src2, imm:$cond, + EFLAGS))]>; +} // Predicates = [NoCMov] +} // UsesCustomInserter = 1, Constraints = "", Defs = [EFLAGS] +} // Uses = [EFLAGS] + +} // Constraints = "$src1 = $dst" in + + //===----------------------------------------------------------------------===// // Atomic Instruction Pseudo Instructions //===----------------------------------------------------------------------===//