diff --git a/lib/Target/X86/X86InstrCompiler.td b/lib/Target/X86/X86InstrCompiler.td index a8aff44d06c..370e4d875c6 100644 --- a/lib/Target/X86/X86InstrCompiler.td +++ b/lib/Target/X86/X86InstrCompiler.td @@ -1530,62 +1530,32 @@ def : Pat<(shl GR64:$src1, (i8 1)), (ADD64rr GR64:$src1, GR64:$src1)>; def immShift32 : ImmLeaf= 5; }]>; def immShift64 : ImmLeaf= 6; }]>; -// (shl x (and y, 31)) ==> (shl x, y) -def : Pat<(shl GR8:$src1, (and CL, immShift32)), - (SHL8rCL GR8:$src1)>; -def : Pat<(shl GR16:$src1, (and CL, immShift32)), - (SHL16rCL GR16:$src1)>; -def : Pat<(shl GR32:$src1, (and CL, immShift32)), - (SHL32rCL GR32:$src1)>; -def : Pat<(store (shl (loadi8 addr:$dst), (and CL, immShift32)), addr:$dst), - (SHL8mCL addr:$dst)>; -def : Pat<(store (shl (loadi16 addr:$dst), (and CL, immShift32)), addr:$dst), - (SHL16mCL addr:$dst)>; -def : Pat<(store (shl (loadi32 addr:$dst), (and CL, immShift32)), addr:$dst), - (SHL32mCL addr:$dst)>; +// Shift amount is implicitly masked. +multiclass MaskedShiftAmountPats { + // (shift x (and y, 31)) ==> (shift x, y) + def : Pat<(frag GR8:$src1, (and CL, immShift32)), + (!cast(name # "8rCL") GR8:$src1)>; + def : Pat<(frag GR16:$src1, (and CL, immShift32)), + (!cast(name # "16rCL") GR16:$src1)>; + def : Pat<(frag GR32:$src1, (and CL, immShift32)), + (!cast(name # "32rCL") GR32:$src1)>; + def : Pat<(store (frag (loadi8 addr:$dst), (and CL, immShift32)), addr:$dst), + (!cast(name # "8mCL") addr:$dst)>; + def : Pat<(store (frag (loadi16 addr:$dst), (and CL, immShift32)), addr:$dst), + (!cast(name # "16mCL") addr:$dst)>; + def : Pat<(store (frag (loadi32 addr:$dst), (and CL, immShift32)), addr:$dst), + (!cast(name # "32mCL") addr:$dst)>; -def : Pat<(srl GR8:$src1, (and CL, immShift32)), - (SHR8rCL GR8:$src1)>; -def : Pat<(srl GR16:$src1, (and CL, immShift32)), - (SHR16rCL GR16:$src1)>; -def : Pat<(srl GR32:$src1, (and CL, immShift32)), - (SHR32rCL GR32:$src1)>; -def : Pat<(store (srl (loadi8 addr:$dst), (and CL, immShift32)), addr:$dst), - (SHR8mCL addr:$dst)>; -def : Pat<(store (srl (loadi16 addr:$dst), (and CL, immShift32)), addr:$dst), - (SHR16mCL addr:$dst)>; -def : Pat<(store (srl (loadi32 addr:$dst), (and CL, immShift32)), addr:$dst), - (SHR32mCL addr:$dst)>; - -def : Pat<(sra GR8:$src1, (and CL, immShift32)), - (SAR8rCL GR8:$src1)>; -def : Pat<(sra GR16:$src1, (and CL, immShift32)), - (SAR16rCL GR16:$src1)>; -def : Pat<(sra GR32:$src1, (and CL, immShift32)), - (SAR32rCL GR32:$src1)>; -def : Pat<(store (sra (loadi8 addr:$dst), (and CL, immShift32)), addr:$dst), - (SAR8mCL addr:$dst)>; -def : Pat<(store (sra (loadi16 addr:$dst), (and CL, immShift32)), addr:$dst), - (SAR16mCL addr:$dst)>; -def : Pat<(store (sra (loadi32 addr:$dst), (and CL, immShift32)), addr:$dst), - (SAR32mCL addr:$dst)>; - -// (shl x (and y, 63)) ==> (shl x, y) -def : Pat<(shl GR64:$src1, (and CL, immShift64)), - (SHL64rCL GR64:$src1)>; -def : Pat<(store (shl (loadi64 addr:$dst), (and CL, 63)), addr:$dst), - (SHL64mCL addr:$dst)>; - -def : Pat<(srl GR64:$src1, (and CL, immShift64)), - (SHR64rCL GR64:$src1)>; -def : Pat<(store (srl (loadi64 addr:$dst), (and CL, 63)), addr:$dst), - (SHR64mCL addr:$dst)>; - -def : Pat<(sra GR64:$src1, (and CL, immShift64)), - (SAR64rCL GR64:$src1)>; -def : Pat<(store (sra (loadi64 addr:$dst), (and CL, 63)), addr:$dst), - (SAR64mCL addr:$dst)>; + // (shift x (and y, 63)) ==> (shift x, y) + def : Pat<(frag GR64:$src1, (and CL, immShift64)), + (!cast(name # "64rCL") GR64:$src1)>; + def : Pat<(store (frag (loadi64 addr:$dst), (and CL, 63)), addr:$dst), + (!cast(name # "64mCL") addr:$dst)>; +} +defm : MaskedShiftAmountPats; +defm : MaskedShiftAmountPats; +defm : MaskedShiftAmountPats; // (anyext (setcc_carry)) -> (setcc_carry) def : Pat<(i16 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),