Rename isSimpleLoad to canFoldAsLoad, to better reflect its meaning.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@60487 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Dan Gohman
2008-12-03 18:15:48 +00:00
parent 8cf7713753
commit 15511cf166
22 changed files with 54 additions and 54 deletions

View File

@ -138,7 +138,7 @@ file prints this (at the time of this writing):</p>
<b>bit</b> isIndirectBranch = 0; <b>bit</b> isIndirectBranch = 0;
<b>bit</b> isBarrier = 0; <b>bit</b> isBarrier = 0;
<b>bit</b> isCall = 0; <b>bit</b> isCall = 0;
<b>bit</b> isSimpleLoad = 0; <b>bit</b> canFoldAsLoad = 0;
<b>bit</b> mayLoad = 0; <b>bit</b> mayLoad = 0;
<b>bit</b> mayStore = 0; <b>bit</b> mayStore = 0;
<b>bit</b> isImplicitDef = 0; <b>bit</b> isImplicitDef = 0;

View File

@ -189,7 +189,7 @@ class Instruction {
bit isIndirectBranch = 0; // Is this instruction an indirect branch? bit isIndirectBranch = 0; // Is this instruction an indirect branch?
bit isBarrier = 0; // Can control flow fall through this instruction? bit isBarrier = 0; // Can control flow fall through this instruction?
bit isCall = 0; // Is this instruction a call instruction? bit isCall = 0; // Is this instruction a call instruction?
bit isSimpleLoad = 0; // Can this be folded as a memory operand? bit canFoldAsLoad = 0; // Can this be folded as a simple memory operand?
bit mayLoad = 0; // Is it possible for this inst to read memory? bit mayLoad = 0; // Is it possible for this inst to read memory?
bit mayStore = 0; // Is it possible for this inst to write memory? bit mayStore = 0; // Is it possible for this inst to write memory?
bit isTwoAddress = 0; // Is this a two address instruction? bit isTwoAddress = 0; // Is this a two address instruction?

View File

@ -90,7 +90,7 @@ namespace TID {
Predicable, Predicable,
NotDuplicable, NotDuplicable,
DelaySlot, DelaySlot,
SimpleLoad, FoldableAsLoad,
MayLoad, MayLoad,
MayStore, MayStore,
UnmodeledSideEffects, UnmodeledSideEffects,
@ -301,7 +301,7 @@ public:
return Flags & (1 << TID::DelaySlot); return Flags & (1 << TID::DelaySlot);
} }
/// isSimpleLoad - Return true for instructions that can be folded as /// canFoldAsLoad - Return true for instructions that can be folded as
/// memory operands in other instructions. The most common use for this /// memory operands in other instructions. The most common use for this
/// is instructions that are simple loads from memory that don't modify /// is instructions that are simple loads from memory that don't modify
/// the loaded value in any way, but it can also be used for instructions /// the loaded value in any way, but it can also be used for instructions
@ -309,8 +309,8 @@ public:
/// on x86, to allow them to be folded when it is beneficial. /// on x86, to allow them to be folded when it is beneficial.
/// This should only be set on instructions that return a value in their /// This should only be set on instructions that return a value in their
/// only virtual register definition. /// only virtual register definition.
bool isSimpleLoad() const { bool canFoldAsLoad() const {
return Flags & (1 << TID::SimpleLoad); return Flags & (1 << TID::FoldableAsLoad);
} }
//===--------------------------------------------------------------------===// //===--------------------------------------------------------------------===//

View File

@ -1841,7 +1841,7 @@ addIntervalsForSpills(const LiveInterval &li,
int LdSlot = 0; int LdSlot = 0;
bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot); bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
bool isLoad = isLoadSS || bool isLoad = isLoadSS ||
(DefIsReMat && (ReMatDefMI->getDesc().isSimpleLoad())); (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
bool IsFirstRange = true; bool IsFirstRange = true;
for (LiveInterval::Ranges::const_iterator for (LiveInterval::Ranges::const_iterator
I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) { I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
@ -1927,7 +1927,7 @@ addIntervalsForSpills(const LiveInterval &li,
int LdSlot = 0; int LdSlot = 0;
bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot); bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
bool isLoad = isLoadSS || bool isLoad = isLoadSS ||
(DefIsReMat && ReMatDefMI->getDesc().isSimpleLoad()); (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI, rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
Slot, LdSlot, isLoad, isLoadSS, DefIsReMat, Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
CanDelete, vrm, rc, ReMatIds, loopInfo, CanDelete, vrm, rc, ReMatIds, loopInfo,
@ -2056,7 +2056,7 @@ addIntervalsForSpills(const LiveInterval &li,
int LdSlot = 0; int LdSlot = 0;
bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot); bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
// If the rematerializable def is a load, also try to fold it. // If the rematerializable def is a load, also try to fold it.
if (isLoadSS || ReMatDefMI->getDesc().isSimpleLoad()) if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index, Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
Ops, isLoadSS, LdSlot, VReg); Ops, isLoadSS, LdSlot, VReg);
unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI); unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);

View File

@ -479,7 +479,7 @@ def PICADD : AXI1<0b0100, (outs GPR:$dst), (ins GPR:$a, pclabel:$cp, pred:$p),
[(set GPR:$dst, (ARMpic_add GPR:$a, imm:$cp))]>; [(set GPR:$dst, (ARMpic_add GPR:$a, imm:$cp))]>;
let AddedComplexity = 10 in { let AddedComplexity = 10 in {
let isSimpleLoad = 1 in let canFoldAsLoad = 1 in
def PICLDR : AXI2ldw<(outs GPR:$dst), (ins addrmodepc:$addr, pred:$p), def PICLDR : AXI2ldw<(outs GPR:$dst), (ins addrmodepc:$addr, pred:$p),
Pseudo, "${addr:label}:\n\tldr$p $dst, $addr", Pseudo, "${addr:label}:\n\tldr$p $dst, $addr",
[(set GPR:$dst, (load addrmodepc:$addr))]>; [(set GPR:$dst, (load addrmodepc:$addr))]>;
@ -614,13 +614,13 @@ let isBranch = 1, isTerminator = 1 in {
// //
// Load // Load
let isSimpleLoad = 1 in let canFoldAsLoad = 1 in
def LDR : AI2ldw<(outs GPR:$dst), (ins addrmode2:$addr), LdFrm, def LDR : AI2ldw<(outs GPR:$dst), (ins addrmode2:$addr), LdFrm,
"ldr", " $dst, $addr", "ldr", " $dst, $addr",
[(set GPR:$dst, (load addrmode2:$addr))]>; [(set GPR:$dst, (load addrmode2:$addr))]>;
// Special LDR for loads from non-pc-relative constpools. // Special LDR for loads from non-pc-relative constpools.
let isSimpleLoad = 1, mayLoad = 1, isReMaterializable = 1 in let canFoldAsLoad = 1, mayLoad = 1, isReMaterializable = 1 in
def LDRcp : AI2ldw<(outs GPR:$dst), (ins addrmode2:$addr), LdFrm, def LDRcp : AI2ldw<(outs GPR:$dst), (ins addrmode2:$addr), LdFrm,
"ldr", " $dst, $addr", []>; "ldr", " $dst, $addr", []>;

View File

@ -192,7 +192,7 @@ let isBranch = 1, isTerminator = 1 in
// Load Store Instructions. // Load Store Instructions.
// //
let isSimpleLoad = 1 in let canFoldAsLoad = 1 in
def tLDR : TI4<(outs GPR:$dst), (ins t_addrmode_s4:$addr), def tLDR : TI4<(outs GPR:$dst), (ins t_addrmode_s4:$addr),
"ldr $dst, $addr", "ldr $dst, $addr",
[(set GPR:$dst, (load t_addrmode_s4:$addr))]>; [(set GPR:$dst, (load t_addrmode_s4:$addr))]>;
@ -213,25 +213,25 @@ def tLDRSH : TI2<(outs GPR:$dst), (ins t_addrmode_rr:$addr),
"ldrsh $dst, $addr", "ldrsh $dst, $addr",
[(set GPR:$dst, (sextloadi16 t_addrmode_rr:$addr))]>; [(set GPR:$dst, (sextloadi16 t_addrmode_rr:$addr))]>;
let isSimpleLoad = 1 in let canFoldAsLoad = 1 in
def tLDRspi : TIs<(outs GPR:$dst), (ins t_addrmode_sp:$addr), def tLDRspi : TIs<(outs GPR:$dst), (ins t_addrmode_sp:$addr),
"ldr $dst, $addr", "ldr $dst, $addr",
[(set GPR:$dst, (load t_addrmode_sp:$addr))]>; [(set GPR:$dst, (load t_addrmode_sp:$addr))]>;
// Special instruction for restore. It cannot clobber condition register // Special instruction for restore. It cannot clobber condition register
// when it's expanded by eliminateCallFramePseudoInstr(). // when it's expanded by eliminateCallFramePseudoInstr().
let isSimpleLoad = 1, mayLoad = 1 in let canFoldAsLoad = 1, mayLoad = 1 in
def tRestore : TIs<(outs GPR:$dst), (ins t_addrmode_sp:$addr), def tRestore : TIs<(outs GPR:$dst), (ins t_addrmode_sp:$addr),
"ldr $dst, $addr", []>; "ldr $dst, $addr", []>;
// Load tconstpool // Load tconstpool
let isSimpleLoad = 1 in let canFoldAsLoad = 1 in
def tLDRpci : TIs<(outs GPR:$dst), (ins i32imm:$addr), def tLDRpci : TIs<(outs GPR:$dst), (ins i32imm:$addr),
"ldr $dst, $addr", "ldr $dst, $addr",
[(set GPR:$dst, (load (ARMWrapper tconstpool:$addr)))]>; [(set GPR:$dst, (load (ARMWrapper tconstpool:$addr)))]>;
// Special LDR for loads from non-pc-relative constpools. // Special LDR for loads from non-pc-relative constpools.
let isSimpleLoad = 1, mayLoad = 1, isReMaterializable = 1 in let canFoldAsLoad = 1, mayLoad = 1, isReMaterializable = 1 in
def tLDRcp : TIs<(outs GPR:$dst), (ins i32imm:$addr), def tLDRcp : TIs<(outs GPR:$dst), (ins i32imm:$addr),
"ldr $dst, $addr", []>; "ldr $dst, $addr", []>;

View File

@ -34,7 +34,7 @@ def arm_fmdrr : SDNode<"ARMISD::FMDRR", SDT_FMDRR>;
// Load / store Instructions. // Load / store Instructions.
// //
let isSimpleLoad = 1 in { let canFoldAsLoad = 1 in {
def FLDD : ADI5<0b1101, 0b01, (outs DPR:$dst), (ins addrmode5:$addr), def FLDD : ADI5<0b1101, 0b01, (outs DPR:$dst), (ins addrmode5:$addr),
"fldd", " $dst, $addr", "fldd", " $dst, $addr",
[(set DPR:$dst, (load addrmode5:$addr))]>; [(set DPR:$dst, (load addrmode5:$addr))]>;
@ -42,7 +42,7 @@ def FLDD : ADI5<0b1101, 0b01, (outs DPR:$dst), (ins addrmode5:$addr),
def FLDS : ASI5<0b1101, 0b01, (outs SPR:$dst), (ins addrmode5:$addr), def FLDS : ASI5<0b1101, 0b01, (outs SPR:$dst), (ins addrmode5:$addr),
"flds", " $dst, $addr", "flds", " $dst, $addr",
[(set SPR:$dst, (load addrmode5:$addr))]>; [(set SPR:$dst, (load addrmode5:$addr))]>;
} // isSimpleLoad } // canFoldAsLoad
def FSTD : ADI5<0b1101, 0b00, (outs), (ins DPR:$src, addrmode5:$addr), def FSTD : ADI5<0b1101, 0b00, (outs), (ins DPR:$src, addrmode5:$addr),
"fstd", " $src, $addr", "fstd", " $src, $addr",

View File

@ -41,7 +41,7 @@ class InstAlpha<bits<6> op, string asmstr, InstrItinClass itin> : Instruction {
class MForm<bits<6> opcode, bit load, string asmstr, list<dag> pattern, InstrItinClass itin> class MForm<bits<6> opcode, bit load, string asmstr, list<dag> pattern, InstrItinClass itin>
: InstAlpha<opcode, asmstr, itin> { : InstAlpha<opcode, asmstr, itin> {
let Pattern = pattern; let Pattern = pattern;
let isSimpleLoad = load; let canFoldAsLoad = load;
let Defs = [R28]; //We may use this for frame index calculations, so reserve it here let Defs = [R28]; //We may use this for frame index calculations, so reserve it here
bits<5> Ra; bits<5> Ra;

View File

@ -47,7 +47,7 @@ def DWARF_LOC : Pseudo<(outs), (ins i32imm:$line, i32imm:$col, i32imm:$fi
// finally the X-form with the register-register. // finally the X-form with the register-register.
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
let isSimpleLoad = 1 in { let canFoldAsLoad = 1 in {
class LoadDFormVec<ValueType vectype> class LoadDFormVec<ValueType vectype>
: RI10Form<0b00101100, (outs VECREG:$rT), (ins memri10:$src), : RI10Form<0b00101100, (outs VECREG:$rT), (ins memri10:$src),
"lqd\t$rT, $src", "lqd\t$rT, $src",

View File

@ -546,7 +546,7 @@ let mayStore = 1 in {
"stf.spill [$dstPtr] = $value">, isM; "stf.spill [$dstPtr] = $value">, isM;
} }
let isSimpleLoad = 1 in { let canFoldAsLoad = 1 in {
def LD1 : AForm<0x03, 0x0b, (outs GR:$dst), (ins GR:$srcPtr), def LD1 : AForm<0x03, 0x0b, (outs GR:$dst), (ins GR:$srcPtr),
"ld1 $dst = [$srcPtr]">, isM; "ld1 $dst = [$srcPtr]">, isM;
def LD2 : AForm<0x03, 0x0b, (outs GR:$dst), (ins GR:$srcPtr), def LD2 : AForm<0x03, 0x0b, (outs GR:$dst), (ins GR:$srcPtr),

View File

@ -224,7 +224,7 @@ class LoadUpper<bits<6> op, string instr_asm>:
[], IIAlu>; [], IIAlu>;
// Memory Load/Store // Memory Load/Store
let isSimpleLoad = 1, hasDelaySlot = 1 in let canFoldAsLoad = 1, hasDelaySlot = 1 in
class LoadM<bits<6> op, string instr_asm, PatFrag OpNode>: class LoadM<bits<6> op, string instr_asm, PatFrag OpNode>:
FI< op, FI< op,
(outs CPURegs:$dst), (outs CPURegs:$dst),

View File

@ -468,7 +468,7 @@ def RLDICR : MDForm_1<30, 1,
// Sign extending loads. // Sign extending loads.
let isSimpleLoad = 1, PPC970_Unit = 2 in { let canFoldAsLoad = 1, PPC970_Unit = 2 in {
def LHA8: DForm_1<42, (outs G8RC:$rD), (ins memri:$src), def LHA8: DForm_1<42, (outs G8RC:$rD), (ins memri:$src),
"lha $rD, $src", LdStLHA, "lha $rD, $src", LdStLHA,
[(set G8RC:$rD, (sextloadi16 iaddr:$src))]>, [(set G8RC:$rD, (sextloadi16 iaddr:$src))]>,
@ -498,7 +498,7 @@ def LHAU8 : DForm_1<43, (outs G8RC:$rD, ptr_rc:$ea_result), (ins symbolLo:$disp,
} }
// Zero extending loads. // Zero extending loads.
let isSimpleLoad = 1, PPC970_Unit = 2 in { let canFoldAsLoad = 1, PPC970_Unit = 2 in {
def LBZ8 : DForm_1<34, (outs G8RC:$rD), (ins memri:$src), def LBZ8 : DForm_1<34, (outs G8RC:$rD), (ins memri:$src),
"lbz $rD, $src", LdStGeneral, "lbz $rD, $src", LdStGeneral,
[(set G8RC:$rD, (zextloadi8 iaddr:$src))]>; [(set G8RC:$rD, (zextloadi8 iaddr:$src))]>;
@ -539,7 +539,7 @@ def LWZU8 : DForm_1<33, (outs G8RC:$rD, ptr_rc:$ea_result), (ins memri:$addr),
// Full 8-byte loads. // Full 8-byte loads.
let isSimpleLoad = 1, PPC970_Unit = 2 in { let canFoldAsLoad = 1, PPC970_Unit = 2 in {
def LD : DSForm_1<58, 0, (outs G8RC:$rD), (ins memrix:$src), def LD : DSForm_1<58, 0, (outs G8RC:$rD), (ins memrix:$src),
"ld $rD, $src", LdStLD, "ld $rD, $src", LdStLD,
[(set G8RC:$rD, (load ixaddr:$src))]>, isPPC64; [(set G8RC:$rD, (load ixaddr:$src))]>, isPPC64;

View File

@ -199,7 +199,7 @@ def MTVSCR : VXForm_5<1604, (outs), (ins VRRC:$vB),
"mtvscr $vB", LdStGeneral, "mtvscr $vB", LdStGeneral,
[(int_ppc_altivec_mtvscr VRRC:$vB)]>; [(int_ppc_altivec_mtvscr VRRC:$vB)]>;
let isSimpleLoad = 1, PPC970_Unit = 2 in { // Loads. let canFoldAsLoad = 1, PPC970_Unit = 2 in { // Loads.
def LVEBX: XForm_1<31, 7, (outs VRRC:$vD), (ins memrr:$src), def LVEBX: XForm_1<31, 7, (outs VRRC:$vD), (ins memrr:$src),
"lvebx $vD, $src", LdStGeneral, "lvebx $vD, $src", LdStGeneral,
[(set VRRC:$vD, (int_ppc_altivec_lvebx xoaddr:$src))]>; [(set VRRC:$vD, (int_ppc_altivec_lvebx xoaddr:$src))]>;

View File

@ -659,7 +659,7 @@ def TRAP : XForm_24<31, 4, (outs), (ins), "trap", LdStGeneral, [(trap)]>;
// //
// Unindexed (r+i) Loads. // Unindexed (r+i) Loads.
let isSimpleLoad = 1, PPC970_Unit = 2 in { let canFoldAsLoad = 1, PPC970_Unit = 2 in {
def LBZ : DForm_1<34, (outs GPRC:$rD), (ins memri:$src), def LBZ : DForm_1<34, (outs GPRC:$rD), (ins memri:$src),
"lbz $rD, $src", LdStGeneral, "lbz $rD, $src", LdStGeneral,
[(set GPRC:$rD, (zextloadi8 iaddr:$src))]>; [(set GPRC:$rD, (zextloadi8 iaddr:$src))]>;
@ -718,7 +718,7 @@ def LFDU : DForm_1<51, (outs F8RC:$rD, ptr_rc:$ea_result), (ins memri:$addr),
// Indexed (r+r) Loads. // Indexed (r+r) Loads.
// //
let isSimpleLoad = 1, PPC970_Unit = 2 in { let canFoldAsLoad = 1, PPC970_Unit = 2 in {
def LBZX : XForm_1<31, 87, (outs GPRC:$rD), (ins memrr:$src), def LBZX : XForm_1<31, 87, (outs GPRC:$rD), (ins memrr:$src),
"lbzx $rD, $src", LdStGeneral, "lbzx $rD, $src", LdStGeneral,
[(set GPRC:$rD, (zextloadi8 xaddr:$src))]>; [(set GPRC:$rD, (zextloadi8 xaddr:$src))]>;

View File

@ -232,7 +232,7 @@ def MOV64ri32 : RIi32<0xC7, MRM0r, (outs GR64:$dst), (ins i64i32imm:$src),
[(set GR64:$dst, i64immSExt32:$src)]>; [(set GR64:$dst, i64immSExt32:$src)]>;
} }
let isSimpleLoad = 1 in let canFoldAsLoad = 1 in
def MOV64rm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src), def MOV64rm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
"mov{q}\t{$src, $dst|$dst, $src}", "mov{q}\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, (load addr:$src))]>; [(set GR64:$dst, (load addr:$src))]>;

View File

@ -342,7 +342,7 @@ def CMOVNP_F : FPI<0xD8, AddRegFrm, (outs RST:$op), (ins),
"fcmovnu\t{$op, %st(0)|%ST(0), $op}">, DB; "fcmovnu\t{$op, %st(0)|%ST(0), $op}">, DB;
// Floating point loads & stores. // Floating point loads & stores.
let isSimpleLoad = 1 in { let canFoldAsLoad = 1 in {
def LD_Fp32m : FpIf32<(outs RFP32:$dst), (ins f32mem:$src), ZeroArgFP, def LD_Fp32m : FpIf32<(outs RFP32:$dst), (ins f32mem:$src), ZeroArgFP,
[(set RFP32:$dst, (loadf32 addr:$src))]>; [(set RFP32:$dst, (loadf32 addr:$src))]>;
let isReMaterializable = 1, mayHaveSideEffects = 1 in let isReMaterializable = 1, mayHaveSideEffects = 1 in

View File

@ -677,7 +677,7 @@ def MOV32mi : Ii32<0xC7, MRM0m, (outs), (ins i32mem:$dst, i32imm:$src),
"mov{l}\t{$src, $dst|$dst, $src}", "mov{l}\t{$src, $dst|$dst, $src}",
[(store (i32 imm:$src), addr:$dst)]>; [(store (i32 imm:$src), addr:$dst)]>;
let isSimpleLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in { let canFoldAsLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in {
def MOV8rm : I<0x8A, MRMSrcMem, (outs GR8 :$dst), (ins i8mem :$src), def MOV8rm : I<0x8A, MRMSrcMem, (outs GR8 :$dst), (ins i8mem :$src),
"mov{b}\t{$src, $dst|$dst, $src}", "mov{b}\t{$src, $dst|$dst, $src}",
[(set GR8:$dst, (load addr:$src))]>; [(set GR8:$dst, (load addr:$src))]>;
@ -2666,7 +2666,7 @@ def MOV32_rr : I<0x89, MRMDestReg, (outs GR32_:$dst), (ins GR32_:$src),
"mov{l}\t{$src, $dst|$dst, $src}", []>; "mov{l}\t{$src, $dst|$dst, $src}", []>;
} // neverHasSideEffects } // neverHasSideEffects
let isSimpleLoad = 1, mayLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in { let canFoldAsLoad = 1, mayLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in {
def MOV16_rm : I<0x8B, MRMSrcMem, (outs GR16_:$dst), (ins i16mem:$src), def MOV16_rm : I<0x8B, MRMSrcMem, (outs GR16_:$dst), (ins i16mem:$src),
"mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize; "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize;
def MOV32_rm : I<0x8B, MRMSrcMem, (outs GR32_:$dst), (ins i32mem:$src), def MOV32_rm : I<0x8B, MRMSrcMem, (outs GR32_:$dst), (ins i32mem:$src),

View File

@ -146,7 +146,7 @@ def MMX_FEMMS : MMXI<0x0E, RawFrm, (outs), (ins), "femms", [(int_x86_mmx_femms)]
def MMX_MOVD64rr : MMXI<0x6E, MRMSrcReg, (outs VR64:$dst), (ins GR32:$src), def MMX_MOVD64rr : MMXI<0x6E, MRMSrcReg, (outs VR64:$dst), (ins GR32:$src),
"movd\t{$src, $dst|$dst, $src}", "movd\t{$src, $dst|$dst, $src}",
[(set VR64:$dst, (v2i32 (scalar_to_vector GR32:$src)))]>; [(set VR64:$dst, (v2i32 (scalar_to_vector GR32:$src)))]>;
let isSimpleLoad = 1, isReMaterializable = 1 in let canFoldAsLoad = 1, isReMaterializable = 1 in
def MMX_MOVD64rm : MMXI<0x6E, MRMSrcMem, (outs VR64:$dst), (ins i32mem:$src), def MMX_MOVD64rm : MMXI<0x6E, MRMSrcMem, (outs VR64:$dst), (ins i32mem:$src),
"movd\t{$src, $dst|$dst, $src}", "movd\t{$src, $dst|$dst, $src}",
[(set VR64:$dst, (v2i32 (scalar_to_vector (loadi32 addr:$src))))]>; [(set VR64:$dst, (v2i32 (scalar_to_vector (loadi32 addr:$src))))]>;
@ -165,7 +165,7 @@ def MMX_MOVD64from64rr : MMXRI<0x7E, MRMSrcReg, (outs GR64:$dst), (ins VR64:$src
let neverHasSideEffects = 1 in let neverHasSideEffects = 1 in
def MMX_MOVQ64rr : MMXI<0x6F, MRMSrcReg, (outs VR64:$dst), (ins VR64:$src), def MMX_MOVQ64rr : MMXI<0x6F, MRMSrcReg, (outs VR64:$dst), (ins VR64:$src),
"movq\t{$src, $dst|$dst, $src}", []>; "movq\t{$src, $dst|$dst, $src}", []>;
let isSimpleLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in let canFoldAsLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in
def MMX_MOVQ64rm : MMXI<0x6F, MRMSrcMem, (outs VR64:$dst), (ins i64mem:$src), def MMX_MOVQ64rm : MMXI<0x6F, MRMSrcMem, (outs VR64:$dst), (ins i64mem:$src),
"movq\t{$src, $dst|$dst, $src}", "movq\t{$src, $dst|$dst, $src}",
[(set VR64:$dst, (load_mmx addr:$src))]>; [(set VR64:$dst, (load_mmx addr:$src))]>;

View File

@ -315,7 +315,7 @@ let Uses = [EFLAGS], usesCustomDAGSchedInserter = 1 in {
let neverHasSideEffects = 1 in let neverHasSideEffects = 1 in
def MOVSSrr : SSI<0x10, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src), def MOVSSrr : SSI<0x10, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
"movss\t{$src, $dst|$dst, $src}", []>; "movss\t{$src, $dst|$dst, $src}", []>;
let isSimpleLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in let canFoldAsLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in
def MOVSSrm : SSI<0x10, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src), def MOVSSrm : SSI<0x10, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
"movss\t{$src, $dst|$dst, $src}", "movss\t{$src, $dst|$dst, $src}",
[(set FR32:$dst, (loadf32 addr:$src))]>; [(set FR32:$dst, (loadf32 addr:$src))]>;
@ -474,7 +474,7 @@ def FsMOVAPSrr : PSI<0x28, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
// Alias instruction to load FR32 from f128mem using movaps. Upper bits are // Alias instruction to load FR32 from f128mem using movaps. Upper bits are
// disregarded. // disregarded.
let isSimpleLoad = 1 in let canFoldAsLoad = 1 in
def FsMOVAPSrm : PSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src), def FsMOVAPSrm : PSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
"movaps\t{$src, $dst|$dst, $src}", "movaps\t{$src, $dst|$dst, $src}",
[(set FR32:$dst, (alignedloadfsf32 addr:$src))]>; [(set FR32:$dst, (alignedloadfsf32 addr:$src))]>;
@ -667,7 +667,7 @@ defm MIN : sse1_fp_binop_rm<0x5D, "min", X86fmin,
let neverHasSideEffects = 1 in let neverHasSideEffects = 1 in
def MOVAPSrr : PSI<0x28, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), def MOVAPSrr : PSI<0x28, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"movaps\t{$src, $dst|$dst, $src}", []>; "movaps\t{$src, $dst|$dst, $src}", []>;
let isSimpleLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in let canFoldAsLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in
def MOVAPSrm : PSI<0x28, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src), def MOVAPSrm : PSI<0x28, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"movaps\t{$src, $dst|$dst, $src}", "movaps\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (alignedloadv4f32 addr:$src))]>; [(set VR128:$dst, (alignedloadv4f32 addr:$src))]>;
@ -679,7 +679,7 @@ def MOVAPSmr : PSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
let neverHasSideEffects = 1 in let neverHasSideEffects = 1 in
def MOVUPSrr : PSI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), def MOVUPSrr : PSI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"movups\t{$src, $dst|$dst, $src}", []>; "movups\t{$src, $dst|$dst, $src}", []>;
let isSimpleLoad = 1 in let canFoldAsLoad = 1 in
def MOVUPSrm : PSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src), def MOVUPSrm : PSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"movups\t{$src, $dst|$dst, $src}", "movups\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (loadv4f32 addr:$src))]>; [(set VR128:$dst, (loadv4f32 addr:$src))]>;
@ -688,7 +688,7 @@ def MOVUPSmr : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
[(store (v4f32 VR128:$src), addr:$dst)]>; [(store (v4f32 VR128:$src), addr:$dst)]>;
// Intrinsic forms of MOVUPS load and store // Intrinsic forms of MOVUPS load and store
let isSimpleLoad = 1 in let canFoldAsLoad = 1 in
def MOVUPSrm_Int : PSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src), def MOVUPSrm_Int : PSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"movups\t{$src, $dst|$dst, $src}", "movups\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse_loadu_ps addr:$src))]>; [(set VR128:$dst, (int_x86_sse_loadu_ps addr:$src))]>;
@ -987,9 +987,9 @@ def STMXCSR : PSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
"stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>; "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>;
// Alias instructions that map zero vector to pxor / xorp* for sse. // Alias instructions that map zero vector to pxor / xorp* for sse.
// We set isSimpleLoad because this can be converted to a constant-pool // We set canFoldAsLoad because this can be converted to a constant-pool
// load of an all-zeros value if folding it would be beneficial. // load of an all-zeros value if folding it would be beneficial.
let isReMaterializable = 1, isAsCheapAsAMove = 1, isSimpleLoad = 1 in let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1 in
def V_SET0 : PSI<0x57, MRMInitReg, (outs VR128:$dst), (ins), def V_SET0 : PSI<0x57, MRMInitReg, (outs VR128:$dst), (ins),
"xorps\t$dst, $dst", "xorps\t$dst, $dst",
[(set VR128:$dst, (v4i32 immAllZerosV))]>; [(set VR128:$dst, (v4i32 immAllZerosV))]>;
@ -1063,7 +1063,7 @@ def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
let neverHasSideEffects = 1 in let neverHasSideEffects = 1 in
def MOVSDrr : SDI<0x10, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src), def MOVSDrr : SDI<0x10, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
"movsd\t{$src, $dst|$dst, $src}", []>; "movsd\t{$src, $dst|$dst, $src}", []>;
let isSimpleLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in let canFoldAsLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in
def MOVSDrm : SDI<0x10, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src), def MOVSDrm : SDI<0x10, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src),
"movsd\t{$src, $dst|$dst, $src}", "movsd\t{$src, $dst|$dst, $src}",
[(set FR64:$dst, (loadf64 addr:$src))]>; [(set FR64:$dst, (loadf64 addr:$src))]>;
@ -1215,7 +1215,7 @@ def FsMOVAPDrr : PDI<0x28, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
// Alias instruction to load FR64 from f128mem using movapd. Upper bits are // Alias instruction to load FR64 from f128mem using movapd. Upper bits are
// disregarded. // disregarded.
let isSimpleLoad = 1 in let canFoldAsLoad = 1 in
def FsMOVAPDrm : PDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src), def FsMOVAPDrm : PDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
"movapd\t{$src, $dst|$dst, $src}", "movapd\t{$src, $dst|$dst, $src}",
[(set FR64:$dst, (alignedloadfsf64 addr:$src))]>; [(set FR64:$dst, (alignedloadfsf64 addr:$src))]>;
@ -1410,7 +1410,7 @@ defm MIN : sse2_fp_binop_rm<0x5D, "min", X86fmin,
let neverHasSideEffects = 1 in let neverHasSideEffects = 1 in
def MOVAPDrr : PDI<0x28, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), def MOVAPDrr : PDI<0x28, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"movapd\t{$src, $dst|$dst, $src}", []>; "movapd\t{$src, $dst|$dst, $src}", []>;
let isSimpleLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in let canFoldAsLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in
def MOVAPDrm : PDI<0x28, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src), def MOVAPDrm : PDI<0x28, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"movapd\t{$src, $dst|$dst, $src}", "movapd\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (alignedloadv2f64 addr:$src))]>; [(set VR128:$dst, (alignedloadv2f64 addr:$src))]>;
@ -1422,7 +1422,7 @@ def MOVAPDmr : PDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
let neverHasSideEffects = 1 in let neverHasSideEffects = 1 in
def MOVUPDrr : PDI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), def MOVUPDrr : PDI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"movupd\t{$src, $dst|$dst, $src}", []>; "movupd\t{$src, $dst|$dst, $src}", []>;
let isSimpleLoad = 1 in let canFoldAsLoad = 1 in
def MOVUPDrm : PDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src), def MOVUPDrm : PDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"movupd\t{$src, $dst|$dst, $src}", "movupd\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (loadv2f64 addr:$src))]>; [(set VR128:$dst, (loadv2f64 addr:$src))]>;
@ -1790,7 +1790,7 @@ let Constraints = "$src1 = $dst" in {
let neverHasSideEffects = 1 in let neverHasSideEffects = 1 in
def MOVDQArr : PDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), def MOVDQArr : PDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"movdqa\t{$src, $dst|$dst, $src}", []>; "movdqa\t{$src, $dst|$dst, $src}", []>;
let isSimpleLoad = 1, mayLoad = 1 in let canFoldAsLoad = 1, mayLoad = 1 in
def MOVDQArm : PDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src), def MOVDQArm : PDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
"movdqa\t{$src, $dst|$dst, $src}", "movdqa\t{$src, $dst|$dst, $src}",
[/*(set VR128:$dst, (alignedloadv2i64 addr:$src))*/]>; [/*(set VR128:$dst, (alignedloadv2i64 addr:$src))*/]>;
@ -1798,7 +1798,7 @@ let mayStore = 1 in
def MOVDQAmr : PDI<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src), def MOVDQAmr : PDI<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
"movdqa\t{$src, $dst|$dst, $src}", "movdqa\t{$src, $dst|$dst, $src}",
[/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/]>; [/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/]>;
let isSimpleLoad = 1, mayLoad = 1 in let canFoldAsLoad = 1, mayLoad = 1 in
def MOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src), def MOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
"movdqu\t{$src, $dst|$dst, $src}", "movdqu\t{$src, $dst|$dst, $src}",
[/*(set VR128:$dst, (loadv2i64 addr:$src))*/]>, [/*(set VR128:$dst, (loadv2i64 addr:$src))*/]>,
@ -1810,7 +1810,7 @@ def MOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
XS, Requires<[HasSSE2]>; XS, Requires<[HasSSE2]>;
// Intrinsic forms of MOVDQU load and store // Intrinsic forms of MOVDQU load and store
let isSimpleLoad = 1 in let canFoldAsLoad = 1 in
def MOVDQUrm_Int : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src), def MOVDQUrm_Int : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
"movdqu\t{$src, $dst|$dst, $src}", "movdqu\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse2_loadu_dq addr:$src))]>, [(set VR128:$dst, (int_x86_sse2_loadu_dq addr:$src))]>,
@ -2255,9 +2255,9 @@ def : Pat<(membarrier (i8 imm:$ll), (i8 imm:$ls), (i8 imm:$sl), (i8 imm:$ss),
(i8 1)), (MFENCE)>; (i8 1)), (MFENCE)>;
// Alias instructions that map zero vector to pxor / xorp* for sse. // Alias instructions that map zero vector to pxor / xorp* for sse.
// We set isSimpleLoad because this can be converted to a constant-pool // We set canFoldAsLoad because this can be converted to a constant-pool
// load of an all-ones value if folding it would be beneficial. // load of an all-ones value if folding it would be beneficial.
let isReMaterializable = 1, isAsCheapAsAMove = 1, isSimpleLoad = 1 in let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1 in
def V_SETALLONES : PDI<0x76, MRMInitReg, (outs VR128:$dst), (ins), def V_SETALLONES : PDI<0x76, MRMInitReg, (outs VR128:$dst), (ins),
"pcmpeqd\t$dst, $dst", "pcmpeqd\t$dst, $dst",
[(set VR128:$dst, (v4i32 immAllOnesV))]>; [(set VR128:$dst, (v4i32 immAllOnesV))]>;

View File

@ -84,7 +84,7 @@ CodeGenInstruction::CodeGenInstruction(Record *R, const std::string &AsmStr)
isIndirectBranch = R->getValueAsBit("isIndirectBranch"); isIndirectBranch = R->getValueAsBit("isIndirectBranch");
isBarrier = R->getValueAsBit("isBarrier"); isBarrier = R->getValueAsBit("isBarrier");
isCall = R->getValueAsBit("isCall"); isCall = R->getValueAsBit("isCall");
isSimpleLoad = R->getValueAsBit("isSimpleLoad"); canFoldAsLoad = R->getValueAsBit("canFoldAsLoad");
mayLoad = R->getValueAsBit("mayLoad"); mayLoad = R->getValueAsBit("mayLoad");
mayStore = R->getValueAsBit("mayStore"); mayStore = R->getValueAsBit("mayStore");
bool isTwoAddress = R->getValueAsBit("isTwoAddress"); bool isTwoAddress = R->getValueAsBit("isTwoAddress");

View File

@ -89,7 +89,7 @@ namespace llvm {
bool isIndirectBranch; bool isIndirectBranch;
bool isBarrier; bool isBarrier;
bool isCall; bool isCall;
bool isSimpleLoad; bool canFoldAsLoad;
bool mayLoad, mayStore; bool mayLoad, mayStore;
bool isPredicable; bool isPredicable;
bool isConvertibleToThreeAddress; bool isConvertibleToThreeAddress;

View File

@ -262,7 +262,7 @@ void InstrInfoEmitter::emitRecord(const CodeGenInstruction &Inst, unsigned Num,
if (Inst.isBarrier) OS << "|(1<<TID::Barrier)"; if (Inst.isBarrier) OS << "|(1<<TID::Barrier)";
if (Inst.hasDelaySlot) OS << "|(1<<TID::DelaySlot)"; if (Inst.hasDelaySlot) OS << "|(1<<TID::DelaySlot)";
if (Inst.isCall) OS << "|(1<<TID::Call)"; if (Inst.isCall) OS << "|(1<<TID::Call)";
if (Inst.isSimpleLoad) OS << "|(1<<TID::SimpleLoad)"; if (Inst.canFoldAsLoad) OS << "|(1<<TID::FoldableAsLoad)";
if (Inst.mayLoad) OS << "|(1<<TID::MayLoad)"; if (Inst.mayLoad) OS << "|(1<<TID::MayLoad)";
if (Inst.mayStore) OS << "|(1<<TID::MayStore)"; if (Inst.mayStore) OS << "|(1<<TID::MayStore)";
if (Inst.isPredicable) OS << "|(1<<TID::Predicable)"; if (Inst.isPredicable) OS << "|(1<<TID::Predicable)";