mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-02-13 10:32:06 +00:00
Whitespace cleanup. Remove trailing whitespace.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78666 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
f128787f94
commit
764ab52dd8
@ -98,7 +98,7 @@ FunctionPass *createARMCodeEmitterPass(ARMBaseTargetMachine &TM,
|
|||||||
MachineCodeEmitter &MCE);
|
MachineCodeEmitter &MCE);
|
||||||
FunctionPass *createARMJITCodeEmitterPass(ARMBaseTargetMachine &TM,
|
FunctionPass *createARMJITCodeEmitterPass(ARMBaseTargetMachine &TM,
|
||||||
JITCodeEmitter &JCE);
|
JITCodeEmitter &JCE);
|
||||||
FunctionPass *createARMObjectCodeEmitterPass(ARMBaseTargetMachine &TM,
|
FunctionPass *createARMObjectCodeEmitterPass(ARMBaseTargetMachine &TM,
|
||||||
ObjectCodeEmitter &OCE);
|
ObjectCodeEmitter &OCE);
|
||||||
|
|
||||||
FunctionPass *createARMLoadStoreOptimizationPass(bool PreAlloc = false);
|
FunctionPass *createARMLoadStoreOptimizationPass(bool PreAlloc = false);
|
||||||
|
@ -20,7 +20,7 @@
|
|||||||
#include <cassert>
|
#include <cassert>
|
||||||
|
|
||||||
namespace llvm {
|
namespace llvm {
|
||||||
|
|
||||||
/// ARM_AM - ARM Addressing Mode Stuff
|
/// ARM_AM - ARM Addressing Mode Stuff
|
||||||
namespace ARM_AM {
|
namespace ARM_AM {
|
||||||
enum ShiftOpc {
|
enum ShiftOpc {
|
||||||
@ -31,11 +31,11 @@ namespace ARM_AM {
|
|||||||
ror,
|
ror,
|
||||||
rrx
|
rrx
|
||||||
};
|
};
|
||||||
|
|
||||||
enum AddrOpc {
|
enum AddrOpc {
|
||||||
add = '+', sub = '-'
|
add = '+', sub = '-'
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline const char *getShiftOpcStr(ShiftOpc Op) {
|
static inline const char *getShiftOpcStr(ShiftOpc Op) {
|
||||||
switch (Op) {
|
switch (Op) {
|
||||||
default: llvm_unreachable("Unknown shift opc!");
|
default: llvm_unreachable("Unknown shift opc!");
|
||||||
@ -46,7 +46,7 @@ namespace ARM_AM {
|
|||||||
case ARM_AM::rrx: return "rrx";
|
case ARM_AM::rrx: return "rrx";
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline ShiftOpc getShiftOpcForNode(SDValue N) {
|
static inline ShiftOpc getShiftOpcForNode(SDValue N) {
|
||||||
switch (N.getOpcode()) {
|
switch (N.getOpcode()) {
|
||||||
default: return ARM_AM::no_shift;
|
default: return ARM_AM::no_shift;
|
||||||
@ -95,14 +95,14 @@ namespace ARM_AM {
|
|||||||
assert(Amt < 32 && "Invalid rotate amount");
|
assert(Amt < 32 && "Invalid rotate amount");
|
||||||
return (Val >> Amt) | (Val << ((32-Amt)&31));
|
return (Val >> Amt) | (Val << ((32-Amt)&31));
|
||||||
}
|
}
|
||||||
|
|
||||||
/// rotl32 - Rotate a 32-bit unsigned value left by a specified # bits.
|
/// rotl32 - Rotate a 32-bit unsigned value left by a specified # bits.
|
||||||
///
|
///
|
||||||
static inline unsigned rotl32(unsigned Val, unsigned Amt) {
|
static inline unsigned rotl32(unsigned Val, unsigned Amt) {
|
||||||
assert(Amt < 32 && "Invalid rotate amount");
|
assert(Amt < 32 && "Invalid rotate amount");
|
||||||
return (Val << Amt) | (Val >> ((32-Amt)&31));
|
return (Val << Amt) | (Val >> ((32-Amt)&31));
|
||||||
}
|
}
|
||||||
|
|
||||||
//===--------------------------------------------------------------------===//
|
//===--------------------------------------------------------------------===//
|
||||||
// Addressing Mode #1: shift_operand with registers
|
// Addressing Mode #1: shift_operand with registers
|
||||||
//===--------------------------------------------------------------------===//
|
//===--------------------------------------------------------------------===//
|
||||||
@ -137,7 +137,7 @@ namespace ARM_AM {
|
|||||||
static inline unsigned getSOImmValRot(unsigned Imm) {
|
static inline unsigned getSOImmValRot(unsigned Imm) {
|
||||||
return (Imm >> 8) * 2;
|
return (Imm >> 8) * 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// getSOImmValRotate - Try to handle Imm with an immediate shifter operand,
|
/// getSOImmValRotate - Try to handle Imm with an immediate shifter operand,
|
||||||
/// computing the rotate amount to use. If this immediate value cannot be
|
/// computing the rotate amount to use. If this immediate value cannot be
|
||||||
/// handled with a single shifter-op, determine a good rotate amount that will
|
/// handled with a single shifter-op, determine a good rotate amount that will
|
||||||
@ -146,14 +146,14 @@ namespace ARM_AM {
|
|||||||
// 8-bit (or less) immediates are trivially shifter_operands with a rotate
|
// 8-bit (or less) immediates are trivially shifter_operands with a rotate
|
||||||
// of zero.
|
// of zero.
|
||||||
if ((Imm & ~255U) == 0) return 0;
|
if ((Imm & ~255U) == 0) return 0;
|
||||||
|
|
||||||
// Use CTZ to compute the rotate amount.
|
// Use CTZ to compute the rotate amount.
|
||||||
unsigned TZ = CountTrailingZeros_32(Imm);
|
unsigned TZ = CountTrailingZeros_32(Imm);
|
||||||
|
|
||||||
// Rotate amount must be even. Something like 0x200 must be rotated 8 bits,
|
// Rotate amount must be even. Something like 0x200 must be rotated 8 bits,
|
||||||
// not 9.
|
// not 9.
|
||||||
unsigned RotAmt = TZ & ~1;
|
unsigned RotAmt = TZ & ~1;
|
||||||
|
|
||||||
// If we can handle this spread, return it.
|
// If we can handle this spread, return it.
|
||||||
if ((rotr32(Imm, RotAmt) & ~255U) == 0)
|
if ((rotr32(Imm, RotAmt) & ~255U) == 0)
|
||||||
return (32-RotAmt)&31; // HW rotates right, not left.
|
return (32-RotAmt)&31; // HW rotates right, not left.
|
||||||
@ -166,16 +166,16 @@ namespace ARM_AM {
|
|||||||
// Restart the search for a high-order bit after the initial seconds of
|
// Restart the search for a high-order bit after the initial seconds of
|
||||||
// ones.
|
// ones.
|
||||||
unsigned TZ2 = CountTrailingZeros_32(Imm & ~((1 << TrailingOnes)-1));
|
unsigned TZ2 = CountTrailingZeros_32(Imm & ~((1 << TrailingOnes)-1));
|
||||||
|
|
||||||
// Rotate amount must be even.
|
// Rotate amount must be even.
|
||||||
unsigned RotAmt2 = TZ2 & ~1;
|
unsigned RotAmt2 = TZ2 & ~1;
|
||||||
|
|
||||||
// If this fits, use it.
|
// If this fits, use it.
|
||||||
if (RotAmt2 != 32 && (rotr32(Imm, RotAmt2) & ~255U) == 0)
|
if (RotAmt2 != 32 && (rotr32(Imm, RotAmt2) & ~255U) == 0)
|
||||||
return (32-RotAmt2)&31; // HW rotates right, not left.
|
return (32-RotAmt2)&31; // HW rotates right, not left.
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Otherwise, we have no way to cover this span of bits with a single
|
// Otherwise, we have no way to cover this span of bits with a single
|
||||||
// shifter_op immediate. Return a chunk of bits that will be useful to
|
// shifter_op immediate. Return a chunk of bits that will be useful to
|
||||||
// handle.
|
// handle.
|
||||||
@ -189,17 +189,17 @@ namespace ARM_AM {
|
|||||||
// 8-bit (or less) immediates are trivially shifter_operands with a rotate
|
// 8-bit (or less) immediates are trivially shifter_operands with a rotate
|
||||||
// of zero.
|
// of zero.
|
||||||
if ((Arg & ~255U) == 0) return Arg;
|
if ((Arg & ~255U) == 0) return Arg;
|
||||||
|
|
||||||
unsigned RotAmt = getSOImmValRotate(Arg);
|
unsigned RotAmt = getSOImmValRotate(Arg);
|
||||||
|
|
||||||
// If this cannot be handled with a single shifter_op, bail out.
|
// If this cannot be handled with a single shifter_op, bail out.
|
||||||
if (rotr32(~255U, RotAmt) & Arg)
|
if (rotr32(~255U, RotAmt) & Arg)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
// Encode this correctly.
|
// Encode this correctly.
|
||||||
return rotl32(Arg, RotAmt) | ((RotAmt>>1) << 8);
|
return rotl32(Arg, RotAmt) | ((RotAmt>>1) << 8);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// isSOImmTwoPartVal - Return true if the specified value can be obtained by
|
/// isSOImmTwoPartVal - Return true if the specified value can be obtained by
|
||||||
/// or'ing together two SOImmVal's.
|
/// or'ing together two SOImmVal's.
|
||||||
static inline bool isSOImmTwoPartVal(unsigned V) {
|
static inline bool isSOImmTwoPartVal(unsigned V) {
|
||||||
@ -207,12 +207,12 @@ namespace ARM_AM {
|
|||||||
V = rotr32(~255U, getSOImmValRotate(V)) & V;
|
V = rotr32(~255U, getSOImmValRotate(V)) & V;
|
||||||
if (V == 0)
|
if (V == 0)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
// If this can be handled with two shifter_op's, accept.
|
// If this can be handled with two shifter_op's, accept.
|
||||||
V = rotr32(~255U, getSOImmValRotate(V)) & V;
|
V = rotr32(~255U, getSOImmValRotate(V)) & V;
|
||||||
return V == 0;
|
return V == 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// getSOImmTwoPartFirst - If V is a value that satisfies isSOImmTwoPartVal,
|
/// getSOImmTwoPartFirst - If V is a value that satisfies isSOImmTwoPartVal,
|
||||||
/// return the first chunk of it.
|
/// return the first chunk of it.
|
||||||
static inline unsigned getSOImmTwoPartFirst(unsigned V) {
|
static inline unsigned getSOImmTwoPartFirst(unsigned V) {
|
||||||
@ -222,14 +222,14 @@ namespace ARM_AM {
|
|||||||
/// getSOImmTwoPartSecond - If V is a value that satisfies isSOImmTwoPartVal,
|
/// getSOImmTwoPartSecond - If V is a value that satisfies isSOImmTwoPartVal,
|
||||||
/// return the second chunk of it.
|
/// return the second chunk of it.
|
||||||
static inline unsigned getSOImmTwoPartSecond(unsigned V) {
|
static inline unsigned getSOImmTwoPartSecond(unsigned V) {
|
||||||
// Mask out the first hunk.
|
// Mask out the first hunk.
|
||||||
V = rotr32(~255U, getSOImmValRotate(V)) & V;
|
V = rotr32(~255U, getSOImmValRotate(V)) & V;
|
||||||
|
|
||||||
// Take what's left.
|
// Take what's left.
|
||||||
assert(V == (rotr32(255U, getSOImmValRotate(V)) & V));
|
assert(V == (rotr32(255U, getSOImmValRotate(V)) & V));
|
||||||
return V;
|
return V;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// getThumbImmValShift - Try to handle Imm with a 8-bit immediate followed
|
/// getThumbImmValShift - Try to handle Imm with a 8-bit immediate followed
|
||||||
/// by a left shift. Returns the shift amount to use.
|
/// by a left shift. Returns the shift amount to use.
|
||||||
static inline unsigned getThumbImmValShift(unsigned Imm) {
|
static inline unsigned getThumbImmValShift(unsigned Imm) {
|
||||||
@ -244,7 +244,7 @@ namespace ARM_AM {
|
|||||||
/// isThumbImmShiftedVal - Return true if the specified value can be obtained
|
/// isThumbImmShiftedVal - Return true if the specified value can be obtained
|
||||||
/// by left shifting a 8-bit immediate.
|
/// by left shifting a 8-bit immediate.
|
||||||
static inline bool isThumbImmShiftedVal(unsigned V) {
|
static inline bool isThumbImmShiftedVal(unsigned V) {
|
||||||
// If this can be handled with
|
// If this can be handled with
|
||||||
V = (~255U << getThumbImmValShift(V)) & V;
|
V = (~255U << getThumbImmValShift(V)) & V;
|
||||||
return V == 0;
|
return V == 0;
|
||||||
}
|
}
|
||||||
@ -260,10 +260,10 @@ namespace ARM_AM {
|
|||||||
return CountTrailingZeros_32(Imm);
|
return CountTrailingZeros_32(Imm);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// isThumbImm16ShiftedVal - Return true if the specified value can be
|
/// isThumbImm16ShiftedVal - Return true if the specified value can be
|
||||||
/// obtained by left shifting a 16-bit immediate.
|
/// obtained by left shifting a 16-bit immediate.
|
||||||
static inline bool isThumbImm16ShiftedVal(unsigned V) {
|
static inline bool isThumbImm16ShiftedVal(unsigned V) {
|
||||||
// If this can be handled with
|
// If this can be handled with
|
||||||
V = (~65535U << getThumbImm16ValShift(V)) & V;
|
V = (~65535U << getThumbImm16ValShift(V)) & V;
|
||||||
return V == 0;
|
return V == 0;
|
||||||
}
|
}
|
||||||
@ -287,9 +287,9 @@ namespace ARM_AM {
|
|||||||
static inline int getT2SOImmValSplatVal(unsigned V) {
|
static inline int getT2SOImmValSplatVal(unsigned V) {
|
||||||
unsigned u, Vs, Imm;
|
unsigned u, Vs, Imm;
|
||||||
// control = 0
|
// control = 0
|
||||||
if ((V & 0xffffff00) == 0)
|
if ((V & 0xffffff00) == 0)
|
||||||
return V;
|
return V;
|
||||||
|
|
||||||
// If the value is zeroes in the first byte, just shift those off
|
// If the value is zeroes in the first byte, just shift those off
|
||||||
Vs = ((V & 0xff) == 0) ? V >> 8 : V;
|
Vs = ((V & 0xff) == 0) ? V >> 8 : V;
|
||||||
// Any passing value only has 8 bits of payload, splatted across the word
|
// Any passing value only has 8 bits of payload, splatted across the word
|
||||||
@ -325,7 +325,7 @@ namespace ARM_AM {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// getT2SOImmVal - Given a 32-bit immediate, if it is something that can fit
|
/// getT2SOImmVal - Given a 32-bit immediate, if it is something that can fit
|
||||||
/// into a Thumb-2 shifter_operand immediate operand, return the 12-bit
|
/// into a Thumb-2 shifter_operand immediate operand, return the 12-bit
|
||||||
/// encoding for it. If not, return -1.
|
/// encoding for it. If not, return -1.
|
||||||
/// See ARM Reference Manual A6.3.2.
|
/// See ARM Reference Manual A6.3.2.
|
||||||
static inline int getT2SOImmVal(unsigned Arg) {
|
static inline int getT2SOImmVal(unsigned Arg) {
|
||||||
@ -333,7 +333,7 @@ namespace ARM_AM {
|
|||||||
int Splat = getT2SOImmValSplatVal(Arg);
|
int Splat = getT2SOImmValSplatVal(Arg);
|
||||||
if (Splat != -1)
|
if (Splat != -1)
|
||||||
return Splat;
|
return Splat;
|
||||||
|
|
||||||
// If 'Arg' can be handled with a single shifter_op return the value.
|
// If 'Arg' can be handled with a single shifter_op return the value.
|
||||||
int Rot = getT2SOImmValRotateVal(Arg);
|
int Rot = getT2SOImmValRotateVal(Arg);
|
||||||
if (Rot != -1)
|
if (Rot != -1)
|
||||||
@ -341,7 +341,7 @@ namespace ARM_AM {
|
|||||||
|
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
//===--------------------------------------------------------------------===//
|
//===--------------------------------------------------------------------===//
|
||||||
// Addressing Mode #2
|
// Addressing Mode #2
|
||||||
@ -359,7 +359,7 @@ namespace ARM_AM {
|
|||||||
// If this addressing mode is a frame index (before prolog/epilog insertion
|
// If this addressing mode is a frame index (before prolog/epilog insertion
|
||||||
// and code rewriting), this operand will have the form: FI#, reg0, <offs>
|
// and code rewriting), this operand will have the form: FI#, reg0, <offs>
|
||||||
// with no shift amount for the frame offset.
|
// with no shift amount for the frame offset.
|
||||||
//
|
//
|
||||||
static inline unsigned getAM2Opc(AddrOpc Opc, unsigned Imm12, ShiftOpc SO) {
|
static inline unsigned getAM2Opc(AddrOpc Opc, unsigned Imm12, ShiftOpc SO) {
|
||||||
assert(Imm12 < (1 << 12) && "Imm too large!");
|
assert(Imm12 < (1 << 12) && "Imm too large!");
|
||||||
bool isSub = Opc == sub;
|
bool isSub = Opc == sub;
|
||||||
@ -374,8 +374,8 @@ namespace ARM_AM {
|
|||||||
static inline ShiftOpc getAM2ShiftOpc(unsigned AM2Opc) {
|
static inline ShiftOpc getAM2ShiftOpc(unsigned AM2Opc) {
|
||||||
return (ShiftOpc)(AM2Opc >> 13);
|
return (ShiftOpc)(AM2Opc >> 13);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
//===--------------------------------------------------------------------===//
|
//===--------------------------------------------------------------------===//
|
||||||
// Addressing Mode #3
|
// Addressing Mode #3
|
||||||
//===--------------------------------------------------------------------===//
|
//===--------------------------------------------------------------------===//
|
||||||
@ -388,7 +388,7 @@ namespace ARM_AM {
|
|||||||
// The first operand is always a Reg. The second operand is a reg if in
|
// The first operand is always a Reg. The second operand is a reg if in
|
||||||
// reg/reg form, otherwise it's reg#0. The third field encodes the operation
|
// reg/reg form, otherwise it's reg#0. The third field encodes the operation
|
||||||
// in bit 8, the immediate in bits 0-7.
|
// in bit 8, the immediate in bits 0-7.
|
||||||
|
|
||||||
/// getAM3Opc - This function encodes the addrmode3 opc field.
|
/// getAM3Opc - This function encodes the addrmode3 opc field.
|
||||||
static inline unsigned getAM3Opc(AddrOpc Opc, unsigned char Offset) {
|
static inline unsigned getAM3Opc(AddrOpc Opc, unsigned char Offset) {
|
||||||
bool isSub = Opc == sub;
|
bool isSub = Opc == sub;
|
||||||
@ -400,7 +400,7 @@ namespace ARM_AM {
|
|||||||
static inline AddrOpc getAM3Op(unsigned AM3Opc) {
|
static inline AddrOpc getAM3Op(unsigned AM3Opc) {
|
||||||
return ((AM3Opc >> 8) & 1) ? sub : add;
|
return ((AM3Opc >> 8) & 1) ? sub : add;
|
||||||
}
|
}
|
||||||
|
|
||||||
//===--------------------------------------------------------------------===//
|
//===--------------------------------------------------------------------===//
|
||||||
// Addressing Mode #4
|
// Addressing Mode #4
|
||||||
//===--------------------------------------------------------------------===//
|
//===--------------------------------------------------------------------===//
|
||||||
@ -448,7 +448,7 @@ namespace ARM_AM {
|
|||||||
//
|
//
|
||||||
// IA - Increment after
|
// IA - Increment after
|
||||||
// DB - Decrement before
|
// DB - Decrement before
|
||||||
|
|
||||||
/// getAM5Opc - This function encodes the addrmode5 opc field.
|
/// getAM5Opc - This function encodes the addrmode5 opc field.
|
||||||
static inline unsigned getAM5Opc(AddrOpc Opc, unsigned char Offset) {
|
static inline unsigned getAM5Opc(AddrOpc Opc, unsigned char Offset) {
|
||||||
bool isSub = Opc == sub;
|
bool isSub = Opc == sub;
|
||||||
|
@ -523,7 +523,7 @@ ARMBaseInstrInfo::isMoveInstr(const MachineInstr &MI,
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned
|
unsigned
|
||||||
ARMBaseInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
|
ARMBaseInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
|
||||||
int &FrameIndex) const {
|
int &FrameIndex) const {
|
||||||
switch (MI->getOpcode()) {
|
switch (MI->getOpcode()) {
|
||||||
@ -805,7 +805,7 @@ foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
|
|||||||
return NewMI;
|
return NewMI;
|
||||||
}
|
}
|
||||||
|
|
||||||
MachineInstr*
|
MachineInstr*
|
||||||
ARMBaseInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
|
ARMBaseInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
|
||||||
MachineInstr* MI,
|
MachineInstr* MI,
|
||||||
const SmallVectorImpl<unsigned> &Ops,
|
const SmallVectorImpl<unsigned> &Ops,
|
||||||
@ -899,11 +899,11 @@ int llvm::rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
|
|||||||
const TargetInstrDesc &Desc = MI.getDesc();
|
const TargetInstrDesc &Desc = MI.getDesc();
|
||||||
unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
|
unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
|
||||||
bool isSub = false;
|
bool isSub = false;
|
||||||
|
|
||||||
// Memory operands in inline assembly always use AddrMode2.
|
// Memory operands in inline assembly always use AddrMode2.
|
||||||
if (Opcode == ARM::INLINEASM)
|
if (Opcode == ARM::INLINEASM)
|
||||||
AddrMode = ARMII::AddrMode2;
|
AddrMode = ARMII::AddrMode2;
|
||||||
|
|
||||||
if (Opcode == ARM::ADDri) {
|
if (Opcode == ARM::ADDri) {
|
||||||
Offset += MI.getOperand(FrameRegIdx+1).getImm();
|
Offset += MI.getOperand(FrameRegIdx+1).getImm();
|
||||||
if (Offset == 0) {
|
if (Offset == 0) {
|
||||||
@ -997,7 +997,7 @@ int llvm::rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
|
|||||||
ImmOp.ChangeToImmediate(ImmedOffset);
|
ImmOp.ChangeToImmediate(ImmedOffset);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Otherwise, it didn't fit. Pull in what we can to simplify the immed.
|
// Otherwise, it didn't fit. Pull in what we can to simplify the immed.
|
||||||
ImmedOffset = ImmedOffset & Mask;
|
ImmedOffset = ImmedOffset & Mask;
|
||||||
if (isSub)
|
if (isSub)
|
||||||
|
@ -243,7 +243,7 @@ public:
|
|||||||
|
|
||||||
virtual bool canFoldMemoryOperand(const MachineInstr *MI,
|
virtual bool canFoldMemoryOperand(const MachineInstr *MI,
|
||||||
const SmallVectorImpl<unsigned> &Ops) const;
|
const SmallVectorImpl<unsigned> &Ops) const;
|
||||||
|
|
||||||
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||||
MachineInstr* MI,
|
MachineInstr* MI,
|
||||||
const SmallVectorImpl<unsigned> &Ops,
|
const SmallVectorImpl<unsigned> &Ops,
|
||||||
@ -311,7 +311,7 @@ void emitT2RegPlusImmediate(MachineBasicBlock &MBB,
|
|||||||
const ARMBaseInstrInfo &TII);
|
const ARMBaseInstrInfo &TII);
|
||||||
|
|
||||||
|
|
||||||
/// rewriteARMFrameIndex / rewriteT2FrameIndex -
|
/// rewriteARMFrameIndex / rewriteT2FrameIndex -
|
||||||
/// Rewrite MI to access 'Offset' bytes from the FP. Return the offset that
|
/// Rewrite MI to access 'Offset' bytes from the FP. Return the offset that
|
||||||
/// could not be handled directly in MI.
|
/// could not be handled directly in MI.
|
||||||
int rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
|
int rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
|
||||||
|
@ -470,13 +470,13 @@ ARMBaseRegisterInfo::estimateRSStackSizeLimit(MachineFunction &MF) const {
|
|||||||
I != E; ++I) {
|
I != E; ++I) {
|
||||||
for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
|
for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
|
||||||
if (!I->getOperand(i).isFI()) continue;
|
if (!I->getOperand(i).isFI()) continue;
|
||||||
|
|
||||||
const TargetInstrDesc &Desc = TII.get(I->getOpcode());
|
const TargetInstrDesc &Desc = TII.get(I->getOpcode());
|
||||||
unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
|
unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
|
||||||
if (AddrMode == ARMII::AddrMode3 ||
|
if (AddrMode == ARMII::AddrMode3 ||
|
||||||
AddrMode == ARMII::AddrModeT2_i8)
|
AddrMode == ARMII::AddrModeT2_i8)
|
||||||
return (1 << 8) - 1;
|
return (1 << 8) - 1;
|
||||||
|
|
||||||
if (AddrMode == ARMII::AddrMode5 ||
|
if (AddrMode == ARMII::AddrMode5 ||
|
||||||
AddrMode == ARMII::AddrModeT2_i8s4)
|
AddrMode == ARMII::AddrModeT2_i8s4)
|
||||||
Limit = std::min(Limit, ((1U << 8) - 1) * 4);
|
Limit = std::min(Limit, ((1U << 8) - 1) * 4);
|
||||||
@ -1235,7 +1235,7 @@ static bool isCalleeSavedRegister(unsigned Reg, const unsigned *CSRegs) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
static bool isCSRestore(MachineInstr *MI,
|
static bool isCSRestore(MachineInstr *MI,
|
||||||
const ARMBaseInstrInfo &TII,
|
const ARMBaseInstrInfo &TII,
|
||||||
const unsigned *CSRegs) {
|
const unsigned *CSRegs) {
|
||||||
return ((MI->getOpcode() == (int)ARM::FLDD ||
|
return ((MI->getOpcode() == (int)ARM::FLDD ||
|
||||||
MI->getOpcode() == (int)ARM::LDR ||
|
MI->getOpcode() == (int)ARM::LDR ||
|
||||||
@ -1297,7 +1297,7 @@ emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const {
|
|||||||
ARMCC::AL, 0, TII);
|
ARMCC::AL, 0, TII);
|
||||||
} else {
|
} else {
|
||||||
// Thumb2 or ARM.
|
// Thumb2 or ARM.
|
||||||
if (isARM)
|
if (isARM)
|
||||||
BuildMI(MBB, MBBI, dl, TII.get(ARM::MOVr), ARM::SP)
|
BuildMI(MBB, MBBI, dl, TII.get(ARM::MOVr), ARM::SP)
|
||||||
.addReg(FramePtr)
|
.addReg(FramePtr)
|
||||||
.addImm((unsigned)ARMCC::AL).addReg(0).addReg(0);
|
.addImm((unsigned)ARMCC::AL).addReg(0).addReg(0);
|
||||||
|
@ -204,10 +204,10 @@ bool Emitter<CodeEmitter>::runOnMachineFunction(MachineFunction &MF) {
|
|||||||
JTI->Initialize(MF, IsPIC);
|
JTI->Initialize(MF, IsPIC);
|
||||||
|
|
||||||
do {
|
do {
|
||||||
DEBUG(errs() << "JITTing function '"
|
DEBUG(errs() << "JITTing function '"
|
||||||
<< MF.getFunction()->getName() << "'\n");
|
<< MF.getFunction()->getName() << "'\n");
|
||||||
MCE.startFunction(MF);
|
MCE.startFunction(MF);
|
||||||
for (MachineFunction::iterator MBB = MF.begin(), E = MF.end();
|
for (MachineFunction::iterator MBB = MF.begin(), E = MF.end();
|
||||||
MBB != E; ++MBB) {
|
MBB != E; ++MBB) {
|
||||||
MCE.StartMachineBasicBlock(MBB);
|
MCE.StartMachineBasicBlock(MBB);
|
||||||
for (MachineBasicBlock::const_iterator I = MBB->begin(), E = MBB->end();
|
for (MachineBasicBlock::const_iterator I = MBB->begin(), E = MBB->end();
|
||||||
@ -300,7 +300,7 @@ void Emitter<CodeEmitter>::emitConstPoolAddress(unsigned CPI,
|
|||||||
/// be emitted to the current location in the function, and allow it to be PC
|
/// be emitted to the current location in the function, and allow it to be PC
|
||||||
/// relative.
|
/// relative.
|
||||||
template<class CodeEmitter>
|
template<class CodeEmitter>
|
||||||
void Emitter<CodeEmitter>::emitJumpTableAddress(unsigned JTIndex,
|
void Emitter<CodeEmitter>::emitJumpTableAddress(unsigned JTIndex,
|
||||||
unsigned Reloc) {
|
unsigned Reloc) {
|
||||||
MCE.addRelocation(MachineRelocation::getJumpTable(MCE.getCurrentPCOffset(),
|
MCE.addRelocation(MachineRelocation::getJumpTable(MCE.getCurrentPCOffset(),
|
||||||
Reloc, JTIndex, 0, true));
|
Reloc, JTIndex, 0, true));
|
||||||
@ -408,7 +408,7 @@ void Emitter<CodeEmitter>::emitConstPoolInstruction(const MachineInstr &MI) {
|
|||||||
unsigned CPI = MI.getOperand(0).getImm(); // CP instruction index.
|
unsigned CPI = MI.getOperand(0).getImm(); // CP instruction index.
|
||||||
unsigned CPIndex = MI.getOperand(1).getIndex(); // Actual cp entry index.
|
unsigned CPIndex = MI.getOperand(1).getIndex(); // Actual cp entry index.
|
||||||
const MachineConstantPoolEntry &MCPE = (*MCPEs)[CPIndex];
|
const MachineConstantPoolEntry &MCPE = (*MCPEs)[CPIndex];
|
||||||
|
|
||||||
// Remember the CONSTPOOL_ENTRY address for later relocation.
|
// Remember the CONSTPOOL_ENTRY address for later relocation.
|
||||||
JTI->addConstantPoolEntryAddr(CPI, MCE.getCurrentPCValue());
|
JTI->addConstantPoolEntryAddr(CPI, MCE.getCurrentPCValue());
|
||||||
|
|
||||||
@ -428,7 +428,7 @@ void Emitter<CodeEmitter>::emitConstPoolInstruction(const MachineInstr &MI) {
|
|||||||
MCE.addRelocation(MachineRelocation::getIndirectSymbol(
|
MCE.addRelocation(MachineRelocation::getIndirectSymbol(
|
||||||
MCE.getCurrentPCOffset(), ARM::reloc_arm_machine_cp_entry, GV,
|
MCE.getCurrentPCOffset(), ARM::reloc_arm_machine_cp_entry, GV,
|
||||||
(intptr_t)ACPV, false));
|
(intptr_t)ACPV, false));
|
||||||
else
|
else
|
||||||
emitGlobalAddress(GV, ARM::reloc_arm_machine_cp_entry,
|
emitGlobalAddress(GV, ARM::reloc_arm_machine_cp_entry,
|
||||||
ACPV->isStub() || isa<Function>(GV), (intptr_t)ACPV);
|
ACPV->isStub() || isa<Function>(GV), (intptr_t)ACPV);
|
||||||
} else {
|
} else {
|
||||||
@ -515,7 +515,7 @@ void Emitter<CodeEmitter>::emitMOVi2piecesInstruction(const MachineInstr &MI) {
|
|||||||
template<class CodeEmitter>
|
template<class CodeEmitter>
|
||||||
void Emitter<CodeEmitter>::emitLEApcrelJTInstruction(const MachineInstr &MI) {
|
void Emitter<CodeEmitter>::emitLEApcrelJTInstruction(const MachineInstr &MI) {
|
||||||
// It's basically add r, pc, (LJTI - $+8)
|
// It's basically add r, pc, (LJTI - $+8)
|
||||||
|
|
||||||
const TargetInstrDesc &TID = MI.getDesc();
|
const TargetInstrDesc &TID = MI.getDesc();
|
||||||
|
|
||||||
// Emit the 'add' instruction.
|
// Emit the 'add' instruction.
|
||||||
@ -1117,7 +1117,7 @@ void Emitter<CodeEmitter>::emitMiscArithInstruction(const MachineInstr &MI) {
|
|||||||
unsigned ShiftAmt = MI.getOperand(OpIdx).getImm();
|
unsigned ShiftAmt = MI.getOperand(OpIdx).getImm();
|
||||||
assert(ShiftAmt < 32 && "shift_imm range is 0 to 31!");
|
assert(ShiftAmt < 32 && "shift_imm range is 0 to 31!");
|
||||||
Binary |= ShiftAmt << ARMII::ShiftShift;
|
Binary |= ShiftAmt << ARMII::ShiftShift;
|
||||||
|
|
||||||
emitWordLE(Binary);
|
emitWordLE(Binary);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1194,7 +1194,7 @@ void Emitter<CodeEmitter>::emitMiscBranchInstruction(const MachineInstr &MI) {
|
|||||||
if (TID.Opcode == ARM::BX_RET)
|
if (TID.Opcode == ARM::BX_RET)
|
||||||
// The return register is LR.
|
// The return register is LR.
|
||||||
Binary |= ARMRegisterInfo::getRegisterNumbering(ARM::LR);
|
Binary |= ARMRegisterInfo::getRegisterNumbering(ARM::LR);
|
||||||
else
|
else
|
||||||
// otherwise, set the return register
|
// otherwise, set the return register
|
||||||
Binary |= getMachineOpValue(MI, 0);
|
Binary |= getMachineOpValue(MI, 0);
|
||||||
|
|
||||||
@ -1279,7 +1279,7 @@ void Emitter<CodeEmitter>::emitVFPArithInstruction(const MachineInstr &MI) {
|
|||||||
|
|
||||||
// Encode Dm / Sm.
|
// Encode Dm / Sm.
|
||||||
Binary |= encodeVFPRm(MI, OpIdx);
|
Binary |= encodeVFPRm(MI, OpIdx);
|
||||||
|
|
||||||
emitWordLE(Binary);
|
emitWordLE(Binary);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -907,7 +907,7 @@ static inline unsigned getUnconditionalBrDisp(int Opc) {
|
|||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
return ((1<<23)-1)*4;
|
return ((1<<23)-1)*4;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -73,7 +73,7 @@ public:
|
|||||||
return 2;
|
return 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
virtual int getExistingMachineCPValue(MachineConstantPool *CP,
|
virtual int getExistingMachineCPValue(MachineConstantPool *CP,
|
||||||
unsigned Alignment);
|
unsigned Alignment);
|
||||||
|
|
||||||
@ -91,7 +91,7 @@ inline std::ostream &operator<<(std::ostream &O,
|
|||||||
V.print(O);
|
V.print(O);
|
||||||
return O;
|
return O;
|
||||||
}
|
}
|
||||||
|
|
||||||
inline raw_ostream &operator<<(raw_ostream &O, const ARMConstantPoolValue &V) {
|
inline raw_ostream &operator<<(raw_ostream &O, const ARMConstantPoolValue &V) {
|
||||||
V.print(O);
|
V.print(O);
|
||||||
return O;
|
return O;
|
||||||
|
@ -152,7 +152,7 @@ bool ARMDAGToDAGISel::SelectShifterOperandReg(SDValue Op,
|
|||||||
// Don't match base register only case. That is matched to a separate
|
// Don't match base register only case. That is matched to a separate
|
||||||
// lower complexity pattern with explicit register operand.
|
// lower complexity pattern with explicit register operand.
|
||||||
if (ShOpcVal == ARM_AM::no_shift) return false;
|
if (ShOpcVal == ARM_AM::no_shift) return false;
|
||||||
|
|
||||||
BaseReg = N.getOperand(0);
|
BaseReg = N.getOperand(0);
|
||||||
unsigned ShImmVal = 0;
|
unsigned ShImmVal = 0;
|
||||||
if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
|
if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
|
||||||
@ -206,7 +206,7 @@ bool ARMDAGToDAGISel::SelectAddrMode2(SDValue Op, SDValue N,
|
|||||||
EVT::i32);
|
EVT::i32);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Match simple R +/- imm12 operands.
|
// Match simple R +/- imm12 operands.
|
||||||
if (N.getOpcode() == ISD::ADD)
|
if (N.getOpcode() == ISD::ADD)
|
||||||
if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
|
if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
|
||||||
@ -231,15 +231,15 @@ bool ARMDAGToDAGISel::SelectAddrMode2(SDValue Op, SDValue N,
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Otherwise this is R +/- [possibly shifted] R
|
// Otherwise this is R +/- [possibly shifted] R
|
||||||
ARM_AM::AddrOpc AddSub = N.getOpcode() == ISD::ADD ? ARM_AM::add:ARM_AM::sub;
|
ARM_AM::AddrOpc AddSub = N.getOpcode() == ISD::ADD ? ARM_AM::add:ARM_AM::sub;
|
||||||
ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(1));
|
ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(1));
|
||||||
unsigned ShAmt = 0;
|
unsigned ShAmt = 0;
|
||||||
|
|
||||||
Base = N.getOperand(0);
|
Base = N.getOperand(0);
|
||||||
Offset = N.getOperand(1);
|
Offset = N.getOperand(1);
|
||||||
|
|
||||||
if (ShOpcVal != ARM_AM::no_shift) {
|
if (ShOpcVal != ARM_AM::no_shift) {
|
||||||
// Check to see if the RHS of the shift is a constant, if not, we can't fold
|
// Check to see if the RHS of the shift is a constant, if not, we can't fold
|
||||||
// it.
|
// it.
|
||||||
@ -251,7 +251,7 @@ bool ARMDAGToDAGISel::SelectAddrMode2(SDValue Op, SDValue N,
|
|||||||
ShOpcVal = ARM_AM::no_shift;
|
ShOpcVal = ARM_AM::no_shift;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Try matching (R shl C) + (R).
|
// Try matching (R shl C) + (R).
|
||||||
if (N.getOpcode() == ISD::ADD && ShOpcVal == ARM_AM::no_shift) {
|
if (N.getOpcode() == ISD::ADD && ShOpcVal == ARM_AM::no_shift) {
|
||||||
ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(0));
|
ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(0));
|
||||||
@ -268,7 +268,7 @@ bool ARMDAGToDAGISel::SelectAddrMode2(SDValue Op, SDValue N,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
|
Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
|
||||||
EVT::i32);
|
EVT::i32);
|
||||||
return true;
|
return true;
|
||||||
@ -323,7 +323,7 @@ bool ARMDAGToDAGISel::SelectAddrMode3(SDValue Op, SDValue N,
|
|||||||
Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::sub, 0),EVT::i32);
|
Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::sub, 0),EVT::i32);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (N.getOpcode() != ISD::ADD) {
|
if (N.getOpcode() != ISD::ADD) {
|
||||||
Base = N;
|
Base = N;
|
||||||
if (N.getOpcode() == ISD::FrameIndex) {
|
if (N.getOpcode() == ISD::FrameIndex) {
|
||||||
@ -334,7 +334,7 @@ bool ARMDAGToDAGISel::SelectAddrMode3(SDValue Op, SDValue N,
|
|||||||
Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0),EVT::i32);
|
Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0),EVT::i32);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the RHS is +/- imm8, fold into addr mode.
|
// If the RHS is +/- imm8, fold into addr mode.
|
||||||
if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
|
if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
|
||||||
int RHSC = (int)RHS->getZExtValue();
|
int RHSC = (int)RHS->getZExtValue();
|
||||||
@ -356,7 +356,7 @@ bool ARMDAGToDAGISel::SelectAddrMode3(SDValue Op, SDValue N,
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Base = N.getOperand(0);
|
Base = N.getOperand(0);
|
||||||
Offset = N.getOperand(1);
|
Offset = N.getOperand(1);
|
||||||
Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0), EVT::i32);
|
Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0), EVT::i32);
|
||||||
@ -406,7 +406,7 @@ bool ARMDAGToDAGISel::SelectAddrMode5(SDValue Op, SDValue N,
|
|||||||
EVT::i32);
|
EVT::i32);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the RHS is +/- imm8, fold into addr mode.
|
// If the RHS is +/- imm8, fold into addr mode.
|
||||||
if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
|
if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
|
||||||
int RHSC = (int)RHS->getZExtValue();
|
int RHSC = (int)RHS->getZExtValue();
|
||||||
@ -431,7 +431,7 @@ bool ARMDAGToDAGISel::SelectAddrMode5(SDValue Op, SDValue N,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Base = N;
|
Base = N;
|
||||||
Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0),
|
Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0),
|
||||||
EVT::i32);
|
EVT::i32);
|
||||||
@ -579,7 +579,7 @@ bool ARMDAGToDAGISel::SelectThumbAddrModeSP(SDValue Op, SDValue N,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -659,7 +659,7 @@ bool ARMDAGToDAGISel::SelectT2AddrModeImm8(SDValue Op, SDValue N,
|
|||||||
int RHSC = (int)RHS->getSExtValue();
|
int RHSC = (int)RHS->getSExtValue();
|
||||||
if (N.getOpcode() == ISD::SUB)
|
if (N.getOpcode() == ISD::SUB)
|
||||||
RHSC = -RHSC;
|
RHSC = -RHSC;
|
||||||
|
|
||||||
if ((RHSC >= -255) && (RHSC < 0)) { // 8 bits (always negative)
|
if ((RHSC >= -255) && (RHSC < 0)) { // 8 bits (always negative)
|
||||||
Base = N.getOperand(0);
|
Base = N.getOperand(0);
|
||||||
if (Base.getOpcode() == ISD::FrameIndex) {
|
if (Base.getOpcode() == ISD::FrameIndex) {
|
||||||
@ -747,8 +747,8 @@ bool ARMDAGToDAGISel::SelectT2AddrModeSoReg(SDValue Op, SDValue N,
|
|||||||
ShOpcVal = ARM_AM::getShiftOpcForNode(Base);
|
ShOpcVal = ARM_AM::getShiftOpcForNode(Base);
|
||||||
if (ShOpcVal == ARM_AM::lsl)
|
if (ShOpcVal == ARM_AM::lsl)
|
||||||
std::swap(Base, OffReg);
|
std::swap(Base, OffReg);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ShOpcVal == ARM_AM::lsl) {
|
if (ShOpcVal == ARM_AM::lsl) {
|
||||||
// Check to see if the RHS of the shift is a constant, if not, we can't fold
|
// Check to see if the RHS of the shift is a constant, if not, we can't fold
|
||||||
// it.
|
// it.
|
||||||
@ -763,7 +763,7 @@ bool ARMDAGToDAGISel::SelectT2AddrModeSoReg(SDValue Op, SDValue N,
|
|||||||
ShOpcVal = ARM_AM::no_shift;
|
ShOpcVal = ARM_AM::no_shift;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ShImm = CurDAG->getTargetConstant(ShAmt, EVT::i32);
|
ShImm = CurDAG->getTargetConstant(ShAmt, EVT::i32);
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
@ -901,7 +901,7 @@ SDNode *ARMDAGToDAGISel::SelectDYN_ALLOC(SDValue Op) {
|
|||||||
// Use tADDrSPr since Thumb1 does not have a sub r, sp, r. ARMISelLowering
|
// Use tADDrSPr since Thumb1 does not have a sub r, sp, r. ARMISelLowering
|
||||||
// should have negated the size operand already. FIXME: We can't insert
|
// should have negated the size operand already. FIXME: We can't insert
|
||||||
// new target independent node at this stage so we are forced to negate
|
// new target independent node at this stage so we are forced to negate
|
||||||
// it earlier. Is there a better solution?
|
// it earlier. Is there a better solution?
|
||||||
return CurDAG->SelectNodeTo(N, ARM::tADDspr_, VT, EVT::Other, SP, Size,
|
return CurDAG->SelectNodeTo(N, ARM::tADDspr_, VT, EVT::Other, SP, Size,
|
||||||
Chain);
|
Chain);
|
||||||
} else if (Subtarget->isThumb2()) {
|
} else if (Subtarget->isThumb2()) {
|
||||||
@ -964,7 +964,7 @@ SDNode *ARMDAGToDAGISel::Select(SDValue Op) {
|
|||||||
Ops, 4);
|
Ops, 4);
|
||||||
} else {
|
} else {
|
||||||
SDValue Ops[] = {
|
SDValue Ops[] = {
|
||||||
CPIdx,
|
CPIdx,
|
||||||
CurDAG->getRegister(0, EVT::i32),
|
CurDAG->getRegister(0, EVT::i32),
|
||||||
CurDAG->getTargetConstant(0, EVT::i32),
|
CurDAG->getTargetConstant(0, EVT::i32),
|
||||||
getAL(CurDAG),
|
getAL(CurDAG),
|
||||||
@ -977,7 +977,7 @@ SDNode *ARMDAGToDAGISel::Select(SDValue Op) {
|
|||||||
ReplaceUses(Op, SDValue(ResNode, 0));
|
ReplaceUses(Op, SDValue(ResNode, 0));
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Other cases are autogenerated.
|
// Other cases are autogenerated.
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -1096,7 +1096,7 @@ SDNode *ARMDAGToDAGISel::Select(SDValue Op) {
|
|||||||
// Emits: (t2Bcc:void (bb:Other):$dst, (imm:i32):$cc)
|
// Emits: (t2Bcc:void (bb:Other):$dst, (imm:i32):$cc)
|
||||||
// Pattern complexity = 6 cost = 1 size = 0
|
// Pattern complexity = 6 cost = 1 size = 0
|
||||||
|
|
||||||
unsigned Opc = Subtarget->isThumb() ?
|
unsigned Opc = Subtarget->isThumb() ?
|
||||||
((Subtarget->hasThumb2()) ? ARM::t2Bcc : ARM::tBcc) : ARM::Bcc;
|
((Subtarget->hasThumb2()) ? ARM::t2Bcc : ARM::tBcc) : ARM::Bcc;
|
||||||
SDValue Chain = Op.getOperand(0);
|
SDValue Chain = Op.getOperand(0);
|
||||||
SDValue N1 = Op.getOperand(1);
|
SDValue N1 = Op.getOperand(1);
|
||||||
@ -1111,7 +1111,7 @@ SDNode *ARMDAGToDAGISel::Select(SDValue Op) {
|
|||||||
cast<ConstantSDNode>(N2)->getZExtValue()),
|
cast<ConstantSDNode>(N2)->getZExtValue()),
|
||||||
EVT::i32);
|
EVT::i32);
|
||||||
SDValue Ops[] = { N1, Tmp2, N3, Chain, InFlag };
|
SDValue Ops[] = { N1, Tmp2, N3, Chain, InFlag };
|
||||||
SDNode *ResNode = CurDAG->getTargetNode(Opc, dl, EVT::Other,
|
SDNode *ResNode = CurDAG->getTargetNode(Opc, dl, EVT::Other,
|
||||||
EVT::Flag, Ops, 5);
|
EVT::Flag, Ops, 5);
|
||||||
Chain = SDValue(ResNode, 0);
|
Chain = SDValue(ResNode, 0);
|
||||||
if (Op.getNode()->getNumValues() == 2) {
|
if (Op.getNode()->getNumValues() == 2) {
|
||||||
@ -1233,7 +1233,7 @@ SDNode *ARMDAGToDAGISel::Select(SDValue Op) {
|
|||||||
break;
|
break;
|
||||||
case EVT::f64:
|
case EVT::f64:
|
||||||
Opc = ARM::FCPYDcc;
|
Opc = ARM::FCPYDcc;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
return CurDAG->SelectNodeTo(Op.getNode(), Opc, VT, Ops, 5);
|
return CurDAG->SelectNodeTo(Op.getNode(), Opc, VT, Ops, 5);
|
||||||
}
|
}
|
||||||
@ -1297,7 +1297,7 @@ SDNode *ARMDAGToDAGISel::Select(SDValue Op) {
|
|||||||
ReplaceUses(Op.getValue(0), Chain);
|
ReplaceUses(Op.getValue(0), Chain);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
SDValue Tmp1 = CurDAG->getTargetFrameIndex(FINode->getIndex(),
|
SDValue Tmp1 = CurDAG->getTargetFrameIndex(FINode->getIndex(),
|
||||||
TLI.getPointerTy());
|
TLI.getPointerTy());
|
||||||
SDValue Tmp2 = CurDAG->getTargetGlobalAddress(GV, TLI.getPointerTy());
|
SDValue Tmp2 = CurDAG->getTargetGlobalAddress(GV, TLI.getPointerTy());
|
||||||
@ -1519,7 +1519,7 @@ SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode,
|
|||||||
SDValue Base, Offset, Opc;
|
SDValue Base, Offset, Opc;
|
||||||
if (!SelectAddrMode2(Op, Op, Base, Offset, Opc))
|
if (!SelectAddrMode2(Op, Op, Base, Offset, Opc))
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
OutOps.push_back(Base);
|
OutOps.push_back(Base);
|
||||||
OutOps.push_back(Offset);
|
OutOps.push_back(Offset);
|
||||||
OutOps.push_back(Opc);
|
OutOps.push_back(Opc);
|
||||||
|
@ -66,7 +66,7 @@ void ARMTargetLowering::addTypeForNEON(EVT VT, EVT PromotedLdStVT,
|
|||||||
PromotedLdStVT.getSimpleVT());
|
PromotedLdStVT.getSimpleVT());
|
||||||
|
|
||||||
setOperationAction(ISD::STORE, VT.getSimpleVT(), Promote);
|
setOperationAction(ISD::STORE, VT.getSimpleVT(), Promote);
|
||||||
AddPromotedToType (ISD::STORE, VT.getSimpleVT(),
|
AddPromotedToType (ISD::STORE, VT.getSimpleVT(),
|
||||||
PromotedLdStVT.getSimpleVT());
|
PromotedLdStVT.getSimpleVT());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -91,10 +91,10 @@ void ARMTargetLowering::addTypeForNEON(EVT VT, EVT PromotedLdStVT,
|
|||||||
AddPromotedToType (ISD::AND, VT.getSimpleVT(),
|
AddPromotedToType (ISD::AND, VT.getSimpleVT(),
|
||||||
PromotedBitwiseVT.getSimpleVT());
|
PromotedBitwiseVT.getSimpleVT());
|
||||||
setOperationAction(ISD::OR, VT.getSimpleVT(), Promote);
|
setOperationAction(ISD::OR, VT.getSimpleVT(), Promote);
|
||||||
AddPromotedToType (ISD::OR, VT.getSimpleVT(),
|
AddPromotedToType (ISD::OR, VT.getSimpleVT(),
|
||||||
PromotedBitwiseVT.getSimpleVT());
|
PromotedBitwiseVT.getSimpleVT());
|
||||||
setOperationAction(ISD::XOR, VT.getSimpleVT(), Promote);
|
setOperationAction(ISD::XOR, VT.getSimpleVT(), Promote);
|
||||||
AddPromotedToType (ISD::XOR, VT.getSimpleVT(),
|
AddPromotedToType (ISD::XOR, VT.getSimpleVT(),
|
||||||
PromotedBitwiseVT.getSimpleVT());
|
PromotedBitwiseVT.getSimpleVT());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -3317,7 +3317,7 @@ ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
|
|||||||
if (Subtarget->isThumb() && Subtarget->hasThumb2())
|
if (Subtarget->isThumb() && Subtarget->hasThumb2())
|
||||||
isLegal = getT2IndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base,
|
isLegal = getT2IndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base,
|
||||||
Offset, isInc, DAG);
|
Offset, isInc, DAG);
|
||||||
else
|
else
|
||||||
isLegal = getARMIndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base,
|
isLegal = getARMIndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base,
|
||||||
Offset, isInc, DAG);
|
Offset, isInc, DAG);
|
||||||
if (!isLegal)
|
if (!isLegal)
|
||||||
@ -3354,7 +3354,7 @@ bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op,
|
|||||||
if (Subtarget->isThumb() && Subtarget->hasThumb2())
|
if (Subtarget->isThumb() && Subtarget->hasThumb2())
|
||||||
isLegal = getT2IndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset,
|
isLegal = getT2IndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset,
|
||||||
isInc, DAG);
|
isInc, DAG);
|
||||||
else
|
else
|
||||||
isLegal = getARMIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset,
|
isLegal = getARMIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset,
|
||||||
isInc, DAG);
|
isInc, DAG);
|
||||||
if (!isLegal)
|
if (!isLegal)
|
||||||
|
@ -47,7 +47,7 @@ static TargetJITInfo::JITCompilerFn JITCompilerFunction;
|
|||||||
// CompilationCallback stub - We can't use a C function with inline assembly in
|
// CompilationCallback stub - We can't use a C function with inline assembly in
|
||||||
// it, because we the prolog/epilog inserted by GCC won't work for us (we need
|
// it, because we the prolog/epilog inserted by GCC won't work for us (we need
|
||||||
// to preserve more context and manipulate the stack directly). Instead,
|
// to preserve more context and manipulate the stack directly). Instead,
|
||||||
// write our own wrapper, which does things our way, so we have complete
|
// write our own wrapper, which does things our way, so we have complete
|
||||||
// control over register saving and restoring.
|
// control over register saving and restoring.
|
||||||
extern "C" {
|
extern "C" {
|
||||||
#if defined(__arm__)
|
#if defined(__arm__)
|
||||||
@ -79,11 +79,11 @@ extern "C" {
|
|||||||
// order for the registers.
|
// order for the registers.
|
||||||
// +--------+
|
// +--------+
|
||||||
// 0 | LR | Original return address
|
// 0 | LR | Original return address
|
||||||
// +--------+
|
// +--------+
|
||||||
// 1 | LR | Stub address (start of stub)
|
// 1 | LR | Stub address (start of stub)
|
||||||
// 2-5 | R3..R0 | Saved registers (we need to preserve all regs)
|
// 2-5 | R3..R0 | Saved registers (we need to preserve all regs)
|
||||||
// 6-20 | D0..D7 | Saved VFP registers
|
// 6-20 | D0..D7 | Saved VFP registers
|
||||||
// +--------+
|
// +--------+
|
||||||
//
|
//
|
||||||
#ifndef __SOFTFP__
|
#ifndef __SOFTFP__
|
||||||
// Restore VFP caller-saved registers.
|
// Restore VFP caller-saved registers.
|
||||||
@ -110,9 +110,9 @@ extern "C" {
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
/// ARMCompilationCallbackC - This is the target-specific function invoked
|
/// ARMCompilationCallbackC - This is the target-specific function invoked
|
||||||
/// by the function stub when we did not know the real target of a call.
|
/// by the function stub when we did not know the real target of a call.
|
||||||
/// This function must locate the start of the stub or call site and pass
|
/// This function must locate the start of the stub or call site and pass
|
||||||
/// it into the JIT compiler function.
|
/// it into the JIT compiler function.
|
||||||
extern "C" void ARMCompilationCallbackC(intptr_t StubAddr) {
|
extern "C" void ARMCompilationCallbackC(intptr_t StubAddr) {
|
||||||
// Get the address of the compiled code for this function.
|
// Get the address of the compiled code for this function.
|
||||||
@ -161,7 +161,7 @@ void *ARMJITInfo::emitFunctionStub(const Function* F, void *Fn,
|
|||||||
// In PIC mode, the function stub is loading a lazy-ptr.
|
// In PIC mode, the function stub is loading a lazy-ptr.
|
||||||
LazyPtr= (intptr_t)emitGlobalValueIndirectSym((GlobalValue*)F, Fn, JCE);
|
LazyPtr= (intptr_t)emitGlobalValueIndirectSym((GlobalValue*)F, Fn, JCE);
|
||||||
DEBUG(if (F)
|
DEBUG(if (F)
|
||||||
errs() << "JIT: Indirect symbol emitted at [" << LazyPtr
|
errs() << "JIT: Indirect symbol emitted at [" << LazyPtr
|
||||||
<< "] for GV '" << F->getName() << "'\n";
|
<< "] for GV '" << F->getName() << "'\n";
|
||||||
else
|
else
|
||||||
errs() << "JIT: Stub emitted at [" << LazyPtr
|
errs() << "JIT: Stub emitted at [" << LazyPtr
|
||||||
@ -184,7 +184,7 @@ void *ARMJITInfo::emitFunctionStub(const Function* F, void *Fn,
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// The compilation callback will overwrite the first two words of this
|
// The compilation callback will overwrite the first two words of this
|
||||||
// stub with indirect branch instructions targeting the compiled code.
|
// stub with indirect branch instructions targeting the compiled code.
|
||||||
// This stub sets the return address to restart the stub, so that
|
// This stub sets the return address to restart the stub, so that
|
||||||
// the new branch will be invoked when we come back.
|
// the new branch will be invoked when we come back.
|
||||||
//
|
//
|
||||||
|
@ -162,7 +162,7 @@ static bool isi32Store(unsigned Opc) {
|
|||||||
|
|
||||||
/// MergeOps - Create and insert a LDM or STM with Base as base register and
|
/// MergeOps - Create and insert a LDM or STM with Base as base register and
|
||||||
/// registers in Regs as the register operands that would be loaded / stored.
|
/// registers in Regs as the register operands that would be loaded / stored.
|
||||||
/// It returns true if the transformation is done.
|
/// It returns true if the transformation is done.
|
||||||
bool
|
bool
|
||||||
ARMLoadStoreOpt::MergeOps(MachineBasicBlock &MBB,
|
ARMLoadStoreOpt::MergeOps(MachineBasicBlock &MBB,
|
||||||
MachineBasicBlock::iterator MBBI,
|
MachineBasicBlock::iterator MBBI,
|
||||||
@ -968,7 +968,7 @@ bool ARMLoadStoreOpt::LoadStoreMultipleOpti(MachineBasicBlock &MBB) {
|
|||||||
if (MergeBaseUpdateLoadStore(MBB, MemOps[i].MBBI, TII,Advance,MBBI))
|
if (MergeBaseUpdateLoadStore(MBB, MemOps[i].MBBI, TII,Advance,MBBI))
|
||||||
++NumMerges;
|
++NumMerges;
|
||||||
|
|
||||||
// RS may be pointing to an instruction that's deleted.
|
// RS may be pointing to an instruction that's deleted.
|
||||||
RS->skipTo(prior(MBBI));
|
RS->skipTo(prior(MBBI));
|
||||||
} else if (NumMemOps == 1) {
|
} else if (NumMemOps == 1) {
|
||||||
// Try folding preceeding/trailing base inc/dec into the single
|
// Try folding preceeding/trailing base inc/dec into the single
|
||||||
|
@ -1,10 +1,10 @@
|
|||||||
//====- ARMMachineFuctionInfo.h - ARM machine function info -----*- C++ -*-===//
|
//====- ARMMachineFuctionInfo.h - ARM machine function info -----*- C++ -*-===//
|
||||||
//
|
//
|
||||||
// The LLVM Compiler Infrastructure
|
// The LLVM Compiler Infrastructure
|
||||||
//
|
//
|
||||||
// This file is distributed under the University of Illinois Open Source
|
// This file is distributed under the University of Illinois Open Source
|
||||||
// License. See LICENSE.TXT for details.
|
// License. See LICENSE.TXT for details.
|
||||||
//
|
//
|
||||||
//===----------------------------------------------------------------------===//
|
//===----------------------------------------------------------------------===//
|
||||||
//
|
//
|
||||||
// This file declares ARM-specific per-machine-function information.
|
// This file declares ARM-specific per-machine-function information.
|
||||||
@ -140,7 +140,7 @@ public:
|
|||||||
|
|
||||||
unsigned getFramePtrSpillOffset() const { return FramePtrSpillOffset; }
|
unsigned getFramePtrSpillOffset() const { return FramePtrSpillOffset; }
|
||||||
void setFramePtrSpillOffset(unsigned o) { FramePtrSpillOffset = o; }
|
void setFramePtrSpillOffset(unsigned o) { FramePtrSpillOffset = o; }
|
||||||
|
|
||||||
unsigned getGPRCalleeSavedArea1Offset() const { return GPRCS1Offset; }
|
unsigned getGPRCalleeSavedArea1Offset() const { return GPRCS1Offset; }
|
||||||
unsigned getGPRCalleeSavedArea2Offset() const { return GPRCS2Offset; }
|
unsigned getGPRCalleeSavedArea2Offset() const { return GPRCS2Offset; }
|
||||||
unsigned getDPRCalleeSavedAreaOffset() const { return DPRCSOffset; }
|
unsigned getDPRCalleeSavedAreaOffset() const { return DPRCSOffset; }
|
||||||
|
@ -65,7 +65,7 @@ protected:
|
|||||||
|
|
||||||
/// Selected instruction itineraries (one entry per itinerary class.)
|
/// Selected instruction itineraries (one entry per itinerary class.)
|
||||||
InstrItineraryData InstrItins;
|
InstrItineraryData InstrItins;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
enum {
|
enum {
|
||||||
isELF, isDarwin
|
isELF, isDarwin
|
||||||
@ -103,9 +103,9 @@ protected:
|
|||||||
bool hasVFP2() const { return ARMFPUType >= VFPv2; }
|
bool hasVFP2() const { return ARMFPUType >= VFPv2; }
|
||||||
bool hasVFP3() const { return ARMFPUType >= VFPv3; }
|
bool hasVFP3() const { return ARMFPUType >= VFPv3; }
|
||||||
bool hasNEON() const { return ARMFPUType >= NEON; }
|
bool hasNEON() const { return ARMFPUType >= NEON; }
|
||||||
bool useNEONForSinglePrecisionFP() const {
|
bool useNEONForSinglePrecisionFP() const {
|
||||||
return hasNEON() && UseNEONForSinglePrecisionFP; }
|
return hasNEON() && UseNEONForSinglePrecisionFP; }
|
||||||
|
|
||||||
bool isTargetDarwin() const { return TargetType == isDarwin; }
|
bool isTargetDarwin() const { return TargetType == isDarwin; }
|
||||||
bool isTargetELF() const { return TargetType == isELF; }
|
bool isTargetELF() const { return TargetType == isELF; }
|
||||||
|
|
||||||
@ -121,7 +121,7 @@ protected:
|
|||||||
|
|
||||||
const std::string & getCPUString() const { return CPUString; }
|
const std::string & getCPUString() const { return CPUString; }
|
||||||
|
|
||||||
/// getInstrItins - Return the instruction itineraies based on subtarget
|
/// getInstrItins - Return the instruction itineraies based on subtarget
|
||||||
/// selection.
|
/// selection.
|
||||||
const InstrItineraryData &getInstrItineraryData() const { return InstrItins; }
|
const InstrItineraryData &getInstrItineraryData() const { return InstrItins; }
|
||||||
|
|
||||||
|
@ -27,7 +27,7 @@ static cl::opt<bool> DisableLdStOpti("disable-arm-loadstore-opti", cl::Hidden,
|
|||||||
static cl::opt<bool> DisableIfConversion("disable-arm-if-conversion",cl::Hidden,
|
static cl::opt<bool> DisableIfConversion("disable-arm-if-conversion",cl::Hidden,
|
||||||
cl::desc("Disable if-conversion pass"));
|
cl::desc("Disable if-conversion pass"));
|
||||||
|
|
||||||
extern "C" void LLVMInitializeARMTarget() {
|
extern "C" void LLVMInitializeARMTarget() {
|
||||||
// Register the target.
|
// Register the target.
|
||||||
RegisterTargetMachine<ARMTargetMachine> X(TheARMTarget);
|
RegisterTargetMachine<ARMTargetMachine> X(TheARMTarget);
|
||||||
RegisterTargetMachine<ThumbTargetMachine> Y(TheThumbTarget);
|
RegisterTargetMachine<ThumbTargetMachine> Y(TheThumbTarget);
|
||||||
|
@ -13,14 +13,14 @@
|
|||||||
#include "llvm/Target/TargetLoweringObjectFile.h"
|
#include "llvm/Target/TargetLoweringObjectFile.h"
|
||||||
|
|
||||||
namespace llvm {
|
namespace llvm {
|
||||||
|
|
||||||
class ARMElfTargetObjectFile : public TargetLoweringObjectFileELF {
|
class ARMElfTargetObjectFile : public TargetLoweringObjectFileELF {
|
||||||
public:
|
public:
|
||||||
ARMElfTargetObjectFile() : TargetLoweringObjectFileELF() {}
|
ARMElfTargetObjectFile() : TargetLoweringObjectFileELF() {}
|
||||||
|
|
||||||
void Initialize(MCContext &Ctx, const TargetMachine &TM) {
|
void Initialize(MCContext &Ctx, const TargetMachine &TM) {
|
||||||
TargetLoweringObjectFileELF::Initialize(Ctx, TM);
|
TargetLoweringObjectFileELF::Initialize(Ctx, TM);
|
||||||
|
|
||||||
// FIXME: Add new attribute/flag to MCSection for init_array/fini_array.
|
// FIXME: Add new attribute/flag to MCSection for init_array/fini_array.
|
||||||
// That will allow not treating these as "directives".
|
// That will allow not treating these as "directives".
|
||||||
if (TM.getSubtarget<ARMSubtarget>().isAAPCS_ABI()) {
|
if (TM.getSubtarget<ARMSubtarget>().isAAPCS_ABI()) {
|
||||||
|
@ -69,7 +69,7 @@ public:
|
|||||||
MachineInstr* MI,
|
MachineInstr* MI,
|
||||||
const SmallVectorImpl<unsigned> &Ops,
|
const SmallVectorImpl<unsigned> &Ops,
|
||||||
int FrameIndex) const;
|
int FrameIndex) const;
|
||||||
|
|
||||||
MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||||
MachineInstr* MI,
|
MachineInstr* MI,
|
||||||
const SmallVectorImpl<unsigned> &Ops,
|
const SmallVectorImpl<unsigned> &Ops,
|
||||||
|
@ -389,7 +389,7 @@ static void removeOperands(MachineInstr &MI, unsigned i) {
|
|||||||
int Thumb1RegisterInfo::
|
int Thumb1RegisterInfo::
|
||||||
rewriteFrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
|
rewriteFrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
|
||||||
unsigned FrameReg, int Offset,
|
unsigned FrameReg, int Offset,
|
||||||
unsigned MOVOpc, unsigned ADDriOpc, unsigned SUBriOpc) const
|
unsigned MOVOpc, unsigned ADDriOpc, unsigned SUBriOpc) const
|
||||||
{
|
{
|
||||||
// if/when eliminateFrameIndex() conforms with ARMBaseRegisterInfo
|
// if/when eliminateFrameIndex() conforms with ARMBaseRegisterInfo
|
||||||
// version then can pull out Thumb1 specific parts here
|
// version then can pull out Thumb1 specific parts here
|
||||||
|
@ -330,7 +330,7 @@ int llvm::rewriteT2FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
|
|||||||
// Memory operands in inline assembly always use AddrModeT2_i12.
|
// Memory operands in inline assembly always use AddrModeT2_i12.
|
||||||
if (Opcode == ARM::INLINEASM)
|
if (Opcode == ARM::INLINEASM)
|
||||||
AddrMode = ARMII::AddrModeT2_i12; // FIXME. mode for thumb2?
|
AddrMode = ARMII::AddrModeT2_i12; // FIXME. mode for thumb2?
|
||||||
|
|
||||||
if (Opcode == ARM::t2ADDri || Opcode == ARM::t2ADDri12) {
|
if (Opcode == ARM::t2ADDri || Opcode == ARM::t2ADDri12) {
|
||||||
Offset += MI.getOperand(FrameRegIdx+1).getImm();
|
Offset += MI.getOperand(FrameRegIdx+1).getImm();
|
||||||
|
|
||||||
@ -389,7 +389,7 @@ int llvm::rewriteT2FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
|
|||||||
MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
|
MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
|
||||||
return Offset;
|
return Offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
MI.RemoveOperand(FrameRegIdx+1);
|
MI.RemoveOperand(FrameRegIdx+1);
|
||||||
MI.getOperand(FrameRegIdx+1).ChangeToImmediate(0);
|
MI.getOperand(FrameRegIdx+1).ChangeToImmediate(0);
|
||||||
NewOpc = immediateOffsetOpcode(Opcode);
|
NewOpc = immediateOffsetOpcode(Opcode);
|
||||||
@ -444,13 +444,13 @@ int llvm::rewriteT2FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
|
|||||||
if (AddrMode == ARMII::AddrMode5)
|
if (AddrMode == ARMII::AddrMode5)
|
||||||
// FIXME: Not consistent.
|
// FIXME: Not consistent.
|
||||||
ImmedOffset |= 1 << NumBits;
|
ImmedOffset |= 1 << NumBits;
|
||||||
else
|
else
|
||||||
ImmedOffset = -ImmedOffset;
|
ImmedOffset = -ImmedOffset;
|
||||||
}
|
}
|
||||||
ImmOp.ChangeToImmediate(ImmedOffset);
|
ImmOp.ChangeToImmediate(ImmedOffset);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Otherwise, offset doesn't fit. Pull in what we can to simplify
|
// Otherwise, offset doesn't fit. Pull in what we can to simplify
|
||||||
ImmedOffset = ImmedOffset & Mask;
|
ImmedOffset = ImmedOffset & Mask;
|
||||||
if (isSub) {
|
if (isSub) {
|
||||||
|
Loading…
x
Reference in New Issue
Block a user