Whitespace cleanup. Remove trailing whitespace.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78666 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Jim Grosbach 2009-08-11 15:33:49 +00:00
parent f128787f94
commit 764ab52dd8
19 changed files with 116 additions and 116 deletions

View File

@ -98,7 +98,7 @@ FunctionPass *createARMCodeEmitterPass(ARMBaseTargetMachine &TM,
MachineCodeEmitter &MCE);
FunctionPass *createARMJITCodeEmitterPass(ARMBaseTargetMachine &TM,
JITCodeEmitter &JCE);
FunctionPass *createARMObjectCodeEmitterPass(ARMBaseTargetMachine &TM,
FunctionPass *createARMObjectCodeEmitterPass(ARMBaseTargetMachine &TM,
ObjectCodeEmitter &OCE);
FunctionPass *createARMLoadStoreOptimizationPass(bool PreAlloc = false);

View File

@ -20,7 +20,7 @@
#include <cassert>
namespace llvm {
/// ARM_AM - ARM Addressing Mode Stuff
namespace ARM_AM {
enum ShiftOpc {
@ -31,11 +31,11 @@ namespace ARM_AM {
ror,
rrx
};
enum AddrOpc {
add = '+', sub = '-'
};
static inline const char *getShiftOpcStr(ShiftOpc Op) {
switch (Op) {
default: llvm_unreachable("Unknown shift opc!");
@ -46,7 +46,7 @@ namespace ARM_AM {
case ARM_AM::rrx: return "rrx";
}
}
static inline ShiftOpc getShiftOpcForNode(SDValue N) {
switch (N.getOpcode()) {
default: return ARM_AM::no_shift;
@ -95,14 +95,14 @@ namespace ARM_AM {
assert(Amt < 32 && "Invalid rotate amount");
return (Val >> Amt) | (Val << ((32-Amt)&31));
}
/// rotl32 - Rotate a 32-bit unsigned value left by a specified # bits.
///
static inline unsigned rotl32(unsigned Val, unsigned Amt) {
assert(Amt < 32 && "Invalid rotate amount");
return (Val << Amt) | (Val >> ((32-Amt)&31));
}
//===--------------------------------------------------------------------===//
// Addressing Mode #1: shift_operand with registers
//===--------------------------------------------------------------------===//
@ -137,7 +137,7 @@ namespace ARM_AM {
static inline unsigned getSOImmValRot(unsigned Imm) {
return (Imm >> 8) * 2;
}
/// getSOImmValRotate - Try to handle Imm with an immediate shifter operand,
/// computing the rotate amount to use. If this immediate value cannot be
/// handled with a single shifter-op, determine a good rotate amount that will
@ -146,14 +146,14 @@ namespace ARM_AM {
// 8-bit (or less) immediates are trivially shifter_operands with a rotate
// of zero.
if ((Imm & ~255U) == 0) return 0;
// Use CTZ to compute the rotate amount.
unsigned TZ = CountTrailingZeros_32(Imm);
// Rotate amount must be even. Something like 0x200 must be rotated 8 bits,
// not 9.
unsigned RotAmt = TZ & ~1;
// If we can handle this spread, return it.
if ((rotr32(Imm, RotAmt) & ~255U) == 0)
return (32-RotAmt)&31; // HW rotates right, not left.
@ -166,16 +166,16 @@ namespace ARM_AM {
// Restart the search for a high-order bit after the initial seconds of
// ones.
unsigned TZ2 = CountTrailingZeros_32(Imm & ~((1 << TrailingOnes)-1));
// Rotate amount must be even.
unsigned RotAmt2 = TZ2 & ~1;
// If this fits, use it.
if (RotAmt2 != 32 && (rotr32(Imm, RotAmt2) & ~255U) == 0)
return (32-RotAmt2)&31; // HW rotates right, not left.
}
}
// Otherwise, we have no way to cover this span of bits with a single
// shifter_op immediate. Return a chunk of bits that will be useful to
// handle.
@ -189,17 +189,17 @@ namespace ARM_AM {
// 8-bit (or less) immediates are trivially shifter_operands with a rotate
// of zero.
if ((Arg & ~255U) == 0) return Arg;
unsigned RotAmt = getSOImmValRotate(Arg);
// If this cannot be handled with a single shifter_op, bail out.
if (rotr32(~255U, RotAmt) & Arg)
return -1;
// Encode this correctly.
return rotl32(Arg, RotAmt) | ((RotAmt>>1) << 8);
}
/// isSOImmTwoPartVal - Return true if the specified value can be obtained by
/// or'ing together two SOImmVal's.
static inline bool isSOImmTwoPartVal(unsigned V) {
@ -207,12 +207,12 @@ namespace ARM_AM {
V = rotr32(~255U, getSOImmValRotate(V)) & V;
if (V == 0)
return false;
// If this can be handled with two shifter_op's, accept.
V = rotr32(~255U, getSOImmValRotate(V)) & V;
return V == 0;
}
/// getSOImmTwoPartFirst - If V is a value that satisfies isSOImmTwoPartVal,
/// return the first chunk of it.
static inline unsigned getSOImmTwoPartFirst(unsigned V) {
@ -222,14 +222,14 @@ namespace ARM_AM {
/// getSOImmTwoPartSecond - If V is a value that satisfies isSOImmTwoPartVal,
/// return the second chunk of it.
static inline unsigned getSOImmTwoPartSecond(unsigned V) {
// Mask out the first hunk.
// Mask out the first hunk.
V = rotr32(~255U, getSOImmValRotate(V)) & V;
// Take what's left.
assert(V == (rotr32(255U, getSOImmValRotate(V)) & V));
return V;
}
/// getThumbImmValShift - Try to handle Imm with a 8-bit immediate followed
/// by a left shift. Returns the shift amount to use.
static inline unsigned getThumbImmValShift(unsigned Imm) {
@ -244,7 +244,7 @@ namespace ARM_AM {
/// isThumbImmShiftedVal - Return true if the specified value can be obtained
/// by left shifting a 8-bit immediate.
static inline bool isThumbImmShiftedVal(unsigned V) {
// If this can be handled with
// If this can be handled with
V = (~255U << getThumbImmValShift(V)) & V;
return V == 0;
}
@ -260,10 +260,10 @@ namespace ARM_AM {
return CountTrailingZeros_32(Imm);
}
/// isThumbImm16ShiftedVal - Return true if the specified value can be
/// isThumbImm16ShiftedVal - Return true if the specified value can be
/// obtained by left shifting a 16-bit immediate.
static inline bool isThumbImm16ShiftedVal(unsigned V) {
// If this can be handled with
// If this can be handled with
V = (~65535U << getThumbImm16ValShift(V)) & V;
return V == 0;
}
@ -287,9 +287,9 @@ namespace ARM_AM {
static inline int getT2SOImmValSplatVal(unsigned V) {
unsigned u, Vs, Imm;
// control = 0
if ((V & 0xffffff00) == 0)
if ((V & 0xffffff00) == 0)
return V;
// If the value is zeroes in the first byte, just shift those off
Vs = ((V & 0xff) == 0) ? V >> 8 : V;
// Any passing value only has 8 bits of payload, splatted across the word
@ -325,7 +325,7 @@ namespace ARM_AM {
}
/// getT2SOImmVal - Given a 32-bit immediate, if it is something that can fit
/// into a Thumb-2 shifter_operand immediate operand, return the 12-bit
/// into a Thumb-2 shifter_operand immediate operand, return the 12-bit
/// encoding for it. If not, return -1.
/// See ARM Reference Manual A6.3.2.
static inline int getT2SOImmVal(unsigned Arg) {
@ -333,7 +333,7 @@ namespace ARM_AM {
int Splat = getT2SOImmValSplatVal(Arg);
if (Splat != -1)
return Splat;
// If 'Arg' can be handled with a single shifter_op return the value.
int Rot = getT2SOImmValRotateVal(Arg);
if (Rot != -1)
@ -341,7 +341,7 @@ namespace ARM_AM {
return -1;
}
//===--------------------------------------------------------------------===//
// Addressing Mode #2
@ -359,7 +359,7 @@ namespace ARM_AM {
// If this addressing mode is a frame index (before prolog/epilog insertion
// and code rewriting), this operand will have the form: FI#, reg0, <offs>
// with no shift amount for the frame offset.
//
//
static inline unsigned getAM2Opc(AddrOpc Opc, unsigned Imm12, ShiftOpc SO) {
assert(Imm12 < (1 << 12) && "Imm too large!");
bool isSub = Opc == sub;
@ -374,8 +374,8 @@ namespace ARM_AM {
static inline ShiftOpc getAM2ShiftOpc(unsigned AM2Opc) {
return (ShiftOpc)(AM2Opc >> 13);
}
//===--------------------------------------------------------------------===//
// Addressing Mode #3
//===--------------------------------------------------------------------===//
@ -388,7 +388,7 @@ namespace ARM_AM {
// The first operand is always a Reg. The second operand is a reg if in
// reg/reg form, otherwise it's reg#0. The third field encodes the operation
// in bit 8, the immediate in bits 0-7.
/// getAM3Opc - This function encodes the addrmode3 opc field.
static inline unsigned getAM3Opc(AddrOpc Opc, unsigned char Offset) {
bool isSub = Opc == sub;
@ -400,7 +400,7 @@ namespace ARM_AM {
static inline AddrOpc getAM3Op(unsigned AM3Opc) {
return ((AM3Opc >> 8) & 1) ? sub : add;
}
//===--------------------------------------------------------------------===//
// Addressing Mode #4
//===--------------------------------------------------------------------===//
@ -448,7 +448,7 @@ namespace ARM_AM {
//
// IA - Increment after
// DB - Decrement before
/// getAM5Opc - This function encodes the addrmode5 opc field.
static inline unsigned getAM5Opc(AddrOpc Opc, unsigned char Offset) {
bool isSub = Opc == sub;

View File

@ -523,7 +523,7 @@ ARMBaseInstrInfo::isMoveInstr(const MachineInstr &MI,
return false;
}
unsigned
unsigned
ARMBaseInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
int &FrameIndex) const {
switch (MI->getOpcode()) {
@ -805,7 +805,7 @@ foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
return NewMI;
}
MachineInstr*
MachineInstr*
ARMBaseInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
MachineInstr* MI,
const SmallVectorImpl<unsigned> &Ops,
@ -899,11 +899,11 @@ int llvm::rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
const TargetInstrDesc &Desc = MI.getDesc();
unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
bool isSub = false;
// Memory operands in inline assembly always use AddrMode2.
if (Opcode == ARM::INLINEASM)
AddrMode = ARMII::AddrMode2;
if (Opcode == ARM::ADDri) {
Offset += MI.getOperand(FrameRegIdx+1).getImm();
if (Offset == 0) {
@ -997,7 +997,7 @@ int llvm::rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
ImmOp.ChangeToImmediate(ImmedOffset);
return 0;
}
// Otherwise, it didn't fit. Pull in what we can to simplify the immed.
ImmedOffset = ImmedOffset & Mask;
if (isSub)

View File

@ -243,7 +243,7 @@ public:
virtual bool canFoldMemoryOperand(const MachineInstr *MI,
const SmallVectorImpl<unsigned> &Ops) const;
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
MachineInstr* MI,
const SmallVectorImpl<unsigned> &Ops,
@ -311,7 +311,7 @@ void emitT2RegPlusImmediate(MachineBasicBlock &MBB,
const ARMBaseInstrInfo &TII);
/// rewriteARMFrameIndex / rewriteT2FrameIndex -
/// rewriteARMFrameIndex / rewriteT2FrameIndex -
/// Rewrite MI to access 'Offset' bytes from the FP. Return the offset that
/// could not be handled directly in MI.
int rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx,

View File

@ -470,13 +470,13 @@ ARMBaseRegisterInfo::estimateRSStackSizeLimit(MachineFunction &MF) const {
I != E; ++I) {
for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
if (!I->getOperand(i).isFI()) continue;
const TargetInstrDesc &Desc = TII.get(I->getOpcode());
unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
if (AddrMode == ARMII::AddrMode3 ||
AddrMode == ARMII::AddrModeT2_i8)
return (1 << 8) - 1;
if (AddrMode == ARMII::AddrMode5 ||
AddrMode == ARMII::AddrModeT2_i8s4)
Limit = std::min(Limit, ((1U << 8) - 1) * 4);
@ -1235,7 +1235,7 @@ static bool isCalleeSavedRegister(unsigned Reg, const unsigned *CSRegs) {
}
static bool isCSRestore(MachineInstr *MI,
const ARMBaseInstrInfo &TII,
const ARMBaseInstrInfo &TII,
const unsigned *CSRegs) {
return ((MI->getOpcode() == (int)ARM::FLDD ||
MI->getOpcode() == (int)ARM::LDR ||
@ -1297,7 +1297,7 @@ emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const {
ARMCC::AL, 0, TII);
} else {
// Thumb2 or ARM.
if (isARM)
if (isARM)
BuildMI(MBB, MBBI, dl, TII.get(ARM::MOVr), ARM::SP)
.addReg(FramePtr)
.addImm((unsigned)ARMCC::AL).addReg(0).addReg(0);

View File

@ -204,10 +204,10 @@ bool Emitter<CodeEmitter>::runOnMachineFunction(MachineFunction &MF) {
JTI->Initialize(MF, IsPIC);
do {
DEBUG(errs() << "JITTing function '"
DEBUG(errs() << "JITTing function '"
<< MF.getFunction()->getName() << "'\n");
MCE.startFunction(MF);
for (MachineFunction::iterator MBB = MF.begin(), E = MF.end();
for (MachineFunction::iterator MBB = MF.begin(), E = MF.end();
MBB != E; ++MBB) {
MCE.StartMachineBasicBlock(MBB);
for (MachineBasicBlock::const_iterator I = MBB->begin(), E = MBB->end();
@ -300,7 +300,7 @@ void Emitter<CodeEmitter>::emitConstPoolAddress(unsigned CPI,
/// be emitted to the current location in the function, and allow it to be PC
/// relative.
template<class CodeEmitter>
void Emitter<CodeEmitter>::emitJumpTableAddress(unsigned JTIndex,
void Emitter<CodeEmitter>::emitJumpTableAddress(unsigned JTIndex,
unsigned Reloc) {
MCE.addRelocation(MachineRelocation::getJumpTable(MCE.getCurrentPCOffset(),
Reloc, JTIndex, 0, true));
@ -408,7 +408,7 @@ void Emitter<CodeEmitter>::emitConstPoolInstruction(const MachineInstr &MI) {
unsigned CPI = MI.getOperand(0).getImm(); // CP instruction index.
unsigned CPIndex = MI.getOperand(1).getIndex(); // Actual cp entry index.
const MachineConstantPoolEntry &MCPE = (*MCPEs)[CPIndex];
// Remember the CONSTPOOL_ENTRY address for later relocation.
JTI->addConstantPoolEntryAddr(CPI, MCE.getCurrentPCValue());
@ -428,7 +428,7 @@ void Emitter<CodeEmitter>::emitConstPoolInstruction(const MachineInstr &MI) {
MCE.addRelocation(MachineRelocation::getIndirectSymbol(
MCE.getCurrentPCOffset(), ARM::reloc_arm_machine_cp_entry, GV,
(intptr_t)ACPV, false));
else
else
emitGlobalAddress(GV, ARM::reloc_arm_machine_cp_entry,
ACPV->isStub() || isa<Function>(GV), (intptr_t)ACPV);
} else {
@ -515,7 +515,7 @@ void Emitter<CodeEmitter>::emitMOVi2piecesInstruction(const MachineInstr &MI) {
template<class CodeEmitter>
void Emitter<CodeEmitter>::emitLEApcrelJTInstruction(const MachineInstr &MI) {
// It's basically add r, pc, (LJTI - $+8)
const TargetInstrDesc &TID = MI.getDesc();
// Emit the 'add' instruction.
@ -1117,7 +1117,7 @@ void Emitter<CodeEmitter>::emitMiscArithInstruction(const MachineInstr &MI) {
unsigned ShiftAmt = MI.getOperand(OpIdx).getImm();
assert(ShiftAmt < 32 && "shift_imm range is 0 to 31!");
Binary |= ShiftAmt << ARMII::ShiftShift;
emitWordLE(Binary);
}
@ -1194,7 +1194,7 @@ void Emitter<CodeEmitter>::emitMiscBranchInstruction(const MachineInstr &MI) {
if (TID.Opcode == ARM::BX_RET)
// The return register is LR.
Binary |= ARMRegisterInfo::getRegisterNumbering(ARM::LR);
else
else
// otherwise, set the return register
Binary |= getMachineOpValue(MI, 0);
@ -1279,7 +1279,7 @@ void Emitter<CodeEmitter>::emitVFPArithInstruction(const MachineInstr &MI) {
// Encode Dm / Sm.
Binary |= encodeVFPRm(MI, OpIdx);
emitWordLE(Binary);
}

View File

@ -907,7 +907,7 @@ static inline unsigned getUnconditionalBrDisp(int Opc) {
default:
break;
}
return ((1<<23)-1)*4;
}

View File

@ -73,7 +73,7 @@ public:
return 2;
}
virtual int getExistingMachineCPValue(MachineConstantPool *CP,
unsigned Alignment);
@ -91,7 +91,7 @@ inline std::ostream &operator<<(std::ostream &O,
V.print(O);
return O;
}
inline raw_ostream &operator<<(raw_ostream &O, const ARMConstantPoolValue &V) {
V.print(O);
return O;

View File

@ -152,7 +152,7 @@ bool ARMDAGToDAGISel::SelectShifterOperandReg(SDValue Op,
// Don't match base register only case. That is matched to a separate
// lower complexity pattern with explicit register operand.
if (ShOpcVal == ARM_AM::no_shift) return false;
BaseReg = N.getOperand(0);
unsigned ShImmVal = 0;
if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
@ -206,7 +206,7 @@ bool ARMDAGToDAGISel::SelectAddrMode2(SDValue Op, SDValue N,
EVT::i32);
return true;
}
// Match simple R +/- imm12 operands.
if (N.getOpcode() == ISD::ADD)
if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
@ -231,15 +231,15 @@ bool ARMDAGToDAGISel::SelectAddrMode2(SDValue Op, SDValue N,
return true;
}
}
// Otherwise this is R +/- [possibly shifted] R
ARM_AM::AddrOpc AddSub = N.getOpcode() == ISD::ADD ? ARM_AM::add:ARM_AM::sub;
ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(1));
unsigned ShAmt = 0;
Base = N.getOperand(0);
Offset = N.getOperand(1);
if (ShOpcVal != ARM_AM::no_shift) {
// Check to see if the RHS of the shift is a constant, if not, we can't fold
// it.
@ -251,7 +251,7 @@ bool ARMDAGToDAGISel::SelectAddrMode2(SDValue Op, SDValue N,
ShOpcVal = ARM_AM::no_shift;
}
}
// Try matching (R shl C) + (R).
if (N.getOpcode() == ISD::ADD && ShOpcVal == ARM_AM::no_shift) {
ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(0));
@ -268,7 +268,7 @@ bool ARMDAGToDAGISel::SelectAddrMode2(SDValue Op, SDValue N,
}
}
}
Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
EVT::i32);
return true;
@ -323,7 +323,7 @@ bool ARMDAGToDAGISel::SelectAddrMode3(SDValue Op, SDValue N,
Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::sub, 0),EVT::i32);
return true;
}
if (N.getOpcode() != ISD::ADD) {
Base = N;
if (N.getOpcode() == ISD::FrameIndex) {
@ -334,7 +334,7 @@ bool ARMDAGToDAGISel::SelectAddrMode3(SDValue Op, SDValue N,
Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0),EVT::i32);
return true;
}
// If the RHS is +/- imm8, fold into addr mode.
if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
int RHSC = (int)RHS->getZExtValue();
@ -356,7 +356,7 @@ bool ARMDAGToDAGISel::SelectAddrMode3(SDValue Op, SDValue N,
return true;
}
}
Base = N.getOperand(0);
Offset = N.getOperand(1);
Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0), EVT::i32);
@ -406,7 +406,7 @@ bool ARMDAGToDAGISel::SelectAddrMode5(SDValue Op, SDValue N,
EVT::i32);
return true;
}
// If the RHS is +/- imm8, fold into addr mode.
if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
int RHSC = (int)RHS->getZExtValue();
@ -431,7 +431,7 @@ bool ARMDAGToDAGISel::SelectAddrMode5(SDValue Op, SDValue N,
}
}
}
Base = N;
Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0),
EVT::i32);
@ -579,7 +579,7 @@ bool ARMDAGToDAGISel::SelectThumbAddrModeSP(SDValue Op, SDValue N,
}
}
}
return false;
}
@ -659,7 +659,7 @@ bool ARMDAGToDAGISel::SelectT2AddrModeImm8(SDValue Op, SDValue N,
int RHSC = (int)RHS->getSExtValue();
if (N.getOpcode() == ISD::SUB)
RHSC = -RHSC;
if ((RHSC >= -255) && (RHSC < 0)) { // 8 bits (always negative)
Base = N.getOperand(0);
if (Base.getOpcode() == ISD::FrameIndex) {
@ -747,8 +747,8 @@ bool ARMDAGToDAGISel::SelectT2AddrModeSoReg(SDValue Op, SDValue N,
ShOpcVal = ARM_AM::getShiftOpcForNode(Base);
if (ShOpcVal == ARM_AM::lsl)
std::swap(Base, OffReg);
}
}
if (ShOpcVal == ARM_AM::lsl) {
// Check to see if the RHS of the shift is a constant, if not, we can't fold
// it.
@ -763,7 +763,7 @@ bool ARMDAGToDAGISel::SelectT2AddrModeSoReg(SDValue Op, SDValue N,
ShOpcVal = ARM_AM::no_shift;
}
}
ShImm = CurDAG->getTargetConstant(ShAmt, EVT::i32);
return true;
@ -901,7 +901,7 @@ SDNode *ARMDAGToDAGISel::SelectDYN_ALLOC(SDValue Op) {
// Use tADDrSPr since Thumb1 does not have a sub r, sp, r. ARMISelLowering
// should have negated the size operand already. FIXME: We can't insert
// new target independent node at this stage so we are forced to negate
// it earlier. Is there a better solution?
// it earlier. Is there a better solution?
return CurDAG->SelectNodeTo(N, ARM::tADDspr_, VT, EVT::Other, SP, Size,
Chain);
} else if (Subtarget->isThumb2()) {
@ -964,7 +964,7 @@ SDNode *ARMDAGToDAGISel::Select(SDValue Op) {
Ops, 4);
} else {
SDValue Ops[] = {
CPIdx,
CPIdx,
CurDAG->getRegister(0, EVT::i32),
CurDAG->getTargetConstant(0, EVT::i32),
getAL(CurDAG),
@ -977,7 +977,7 @@ SDNode *ARMDAGToDAGISel::Select(SDValue Op) {
ReplaceUses(Op, SDValue(ResNode, 0));
return NULL;
}
// Other cases are autogenerated.
break;
}
@ -1096,7 +1096,7 @@ SDNode *ARMDAGToDAGISel::Select(SDValue Op) {
// Emits: (t2Bcc:void (bb:Other):$dst, (imm:i32):$cc)
// Pattern complexity = 6 cost = 1 size = 0
unsigned Opc = Subtarget->isThumb() ?
unsigned Opc = Subtarget->isThumb() ?
((Subtarget->hasThumb2()) ? ARM::t2Bcc : ARM::tBcc) : ARM::Bcc;
SDValue Chain = Op.getOperand(0);
SDValue N1 = Op.getOperand(1);
@ -1111,7 +1111,7 @@ SDNode *ARMDAGToDAGISel::Select(SDValue Op) {
cast<ConstantSDNode>(N2)->getZExtValue()),
EVT::i32);
SDValue Ops[] = { N1, Tmp2, N3, Chain, InFlag };
SDNode *ResNode = CurDAG->getTargetNode(Opc, dl, EVT::Other,
SDNode *ResNode = CurDAG->getTargetNode(Opc, dl, EVT::Other,
EVT::Flag, Ops, 5);
Chain = SDValue(ResNode, 0);
if (Op.getNode()->getNumValues() == 2) {
@ -1233,7 +1233,7 @@ SDNode *ARMDAGToDAGISel::Select(SDValue Op) {
break;
case EVT::f64:
Opc = ARM::FCPYDcc;
break;
break;
}
return CurDAG->SelectNodeTo(Op.getNode(), Opc, VT, Ops, 5);
}
@ -1297,7 +1297,7 @@ SDNode *ARMDAGToDAGISel::Select(SDValue Op) {
ReplaceUses(Op.getValue(0), Chain);
return NULL;
}
SDValue Tmp1 = CurDAG->getTargetFrameIndex(FINode->getIndex(),
TLI.getPointerTy());
SDValue Tmp2 = CurDAG->getTargetGlobalAddress(GV, TLI.getPointerTy());
@ -1519,7 +1519,7 @@ SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode,
SDValue Base, Offset, Opc;
if (!SelectAddrMode2(Op, Op, Base, Offset, Opc))
return true;
OutOps.push_back(Base);
OutOps.push_back(Offset);
OutOps.push_back(Opc);

View File

@ -66,7 +66,7 @@ void ARMTargetLowering::addTypeForNEON(EVT VT, EVT PromotedLdStVT,
PromotedLdStVT.getSimpleVT());
setOperationAction(ISD::STORE, VT.getSimpleVT(), Promote);
AddPromotedToType (ISD::STORE, VT.getSimpleVT(),
AddPromotedToType (ISD::STORE, VT.getSimpleVT(),
PromotedLdStVT.getSimpleVT());
}
@ -91,10 +91,10 @@ void ARMTargetLowering::addTypeForNEON(EVT VT, EVT PromotedLdStVT,
AddPromotedToType (ISD::AND, VT.getSimpleVT(),
PromotedBitwiseVT.getSimpleVT());
setOperationAction(ISD::OR, VT.getSimpleVT(), Promote);
AddPromotedToType (ISD::OR, VT.getSimpleVT(),
AddPromotedToType (ISD::OR, VT.getSimpleVT(),
PromotedBitwiseVT.getSimpleVT());
setOperationAction(ISD::XOR, VT.getSimpleVT(), Promote);
AddPromotedToType (ISD::XOR, VT.getSimpleVT(),
AddPromotedToType (ISD::XOR, VT.getSimpleVT(),
PromotedBitwiseVT.getSimpleVT());
}
}
@ -3317,7 +3317,7 @@ ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
if (Subtarget->isThumb() && Subtarget->hasThumb2())
isLegal = getT2IndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base,
Offset, isInc, DAG);
else
else
isLegal = getARMIndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base,
Offset, isInc, DAG);
if (!isLegal)
@ -3354,7 +3354,7 @@ bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op,
if (Subtarget->isThumb() && Subtarget->hasThumb2())
isLegal = getT2IndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset,
isInc, DAG);
else
else
isLegal = getARMIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset,
isInc, DAG);
if (!isLegal)

View File

@ -47,7 +47,7 @@ static TargetJITInfo::JITCompilerFn JITCompilerFunction;
// CompilationCallback stub - We can't use a C function with inline assembly in
// it, because we the prolog/epilog inserted by GCC won't work for us (we need
// to preserve more context and manipulate the stack directly). Instead,
// write our own wrapper, which does things our way, so we have complete
// write our own wrapper, which does things our way, so we have complete
// control over register saving and restoring.
extern "C" {
#if defined(__arm__)
@ -79,11 +79,11 @@ extern "C" {
// order for the registers.
// +--------+
// 0 | LR | Original return address
// +--------+
// +--------+
// 1 | LR | Stub address (start of stub)
// 2-5 | R3..R0 | Saved registers (we need to preserve all regs)
// 6-20 | D0..D7 | Saved VFP registers
// +--------+
// +--------+
//
#ifndef __SOFTFP__
// Restore VFP caller-saved registers.
@ -110,9 +110,9 @@ extern "C" {
#endif
}
/// ARMCompilationCallbackC - This is the target-specific function invoked
/// by the function stub when we did not know the real target of a call.
/// This function must locate the start of the stub or call site and pass
/// ARMCompilationCallbackC - This is the target-specific function invoked
/// by the function stub when we did not know the real target of a call.
/// This function must locate the start of the stub or call site and pass
/// it into the JIT compiler function.
extern "C" void ARMCompilationCallbackC(intptr_t StubAddr) {
// Get the address of the compiled code for this function.
@ -161,7 +161,7 @@ void *ARMJITInfo::emitFunctionStub(const Function* F, void *Fn,
// In PIC mode, the function stub is loading a lazy-ptr.
LazyPtr= (intptr_t)emitGlobalValueIndirectSym((GlobalValue*)F, Fn, JCE);
DEBUG(if (F)
errs() << "JIT: Indirect symbol emitted at [" << LazyPtr
errs() << "JIT: Indirect symbol emitted at [" << LazyPtr
<< "] for GV '" << F->getName() << "'\n";
else
errs() << "JIT: Stub emitted at [" << LazyPtr
@ -184,7 +184,7 @@ void *ARMJITInfo::emitFunctionStub(const Function* F, void *Fn,
}
} else {
// The compilation callback will overwrite the first two words of this
// stub with indirect branch instructions targeting the compiled code.
// stub with indirect branch instructions targeting the compiled code.
// This stub sets the return address to restart the stub, so that
// the new branch will be invoked when we come back.
//

View File

@ -162,7 +162,7 @@ static bool isi32Store(unsigned Opc) {
/// MergeOps - Create and insert a LDM or STM with Base as base register and
/// registers in Regs as the register operands that would be loaded / stored.
/// It returns true if the transformation is done.
/// It returns true if the transformation is done.
bool
ARMLoadStoreOpt::MergeOps(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
@ -968,7 +968,7 @@ bool ARMLoadStoreOpt::LoadStoreMultipleOpti(MachineBasicBlock &MBB) {
if (MergeBaseUpdateLoadStore(MBB, MemOps[i].MBBI, TII,Advance,MBBI))
++NumMerges;
// RS may be pointing to an instruction that's deleted.
// RS may be pointing to an instruction that's deleted.
RS->skipTo(prior(MBBI));
} else if (NumMemOps == 1) {
// Try folding preceeding/trailing base inc/dec into the single

View File

@ -1,10 +1,10 @@
//====- ARMMachineFuctionInfo.h - ARM machine function info -----*- C++ -*-===//
//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This file declares ARM-specific per-machine-function information.
@ -140,7 +140,7 @@ public:
unsigned getFramePtrSpillOffset() const { return FramePtrSpillOffset; }
void setFramePtrSpillOffset(unsigned o) { FramePtrSpillOffset = o; }
unsigned getGPRCalleeSavedArea1Offset() const { return GPRCS1Offset; }
unsigned getGPRCalleeSavedArea2Offset() const { return GPRCS2Offset; }
unsigned getDPRCalleeSavedAreaOffset() const { return DPRCSOffset; }

View File

@ -65,7 +65,7 @@ protected:
/// Selected instruction itineraries (one entry per itinerary class.)
InstrItineraryData InstrItins;
public:
enum {
isELF, isDarwin
@ -103,9 +103,9 @@ protected:
bool hasVFP2() const { return ARMFPUType >= VFPv2; }
bool hasVFP3() const { return ARMFPUType >= VFPv3; }
bool hasNEON() const { return ARMFPUType >= NEON; }
bool useNEONForSinglePrecisionFP() const {
bool useNEONForSinglePrecisionFP() const {
return hasNEON() && UseNEONForSinglePrecisionFP; }
bool isTargetDarwin() const { return TargetType == isDarwin; }
bool isTargetELF() const { return TargetType == isELF; }
@ -121,7 +121,7 @@ protected:
const std::string & getCPUString() const { return CPUString; }
/// getInstrItins - Return the instruction itineraies based on subtarget
/// getInstrItins - Return the instruction itineraies based on subtarget
/// selection.
const InstrItineraryData &getInstrItineraryData() const { return InstrItins; }

View File

@ -27,7 +27,7 @@ static cl::opt<bool> DisableLdStOpti("disable-arm-loadstore-opti", cl::Hidden,
static cl::opt<bool> DisableIfConversion("disable-arm-if-conversion",cl::Hidden,
cl::desc("Disable if-conversion pass"));
extern "C" void LLVMInitializeARMTarget() {
extern "C" void LLVMInitializeARMTarget() {
// Register the target.
RegisterTargetMachine<ARMTargetMachine> X(TheARMTarget);
RegisterTargetMachine<ThumbTargetMachine> Y(TheThumbTarget);

View File

@ -13,14 +13,14 @@
#include "llvm/Target/TargetLoweringObjectFile.h"
namespace llvm {
class ARMElfTargetObjectFile : public TargetLoweringObjectFileELF {
public:
ARMElfTargetObjectFile() : TargetLoweringObjectFileELF() {}
void Initialize(MCContext &Ctx, const TargetMachine &TM) {
TargetLoweringObjectFileELF::Initialize(Ctx, TM);
// FIXME: Add new attribute/flag to MCSection for init_array/fini_array.
// That will allow not treating these as "directives".
if (TM.getSubtarget<ARMSubtarget>().isAAPCS_ABI()) {

View File

@ -69,7 +69,7 @@ public:
MachineInstr* MI,
const SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const;
MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
MachineInstr* MI,
const SmallVectorImpl<unsigned> &Ops,

View File

@ -389,7 +389,7 @@ static void removeOperands(MachineInstr &MI, unsigned i) {
int Thumb1RegisterInfo::
rewriteFrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
unsigned FrameReg, int Offset,
unsigned MOVOpc, unsigned ADDriOpc, unsigned SUBriOpc) const
unsigned MOVOpc, unsigned ADDriOpc, unsigned SUBriOpc) const
{
// if/when eliminateFrameIndex() conforms with ARMBaseRegisterInfo
// version then can pull out Thumb1 specific parts here

View File

@ -330,7 +330,7 @@ int llvm::rewriteT2FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
// Memory operands in inline assembly always use AddrModeT2_i12.
if (Opcode == ARM::INLINEASM)
AddrMode = ARMII::AddrModeT2_i12; // FIXME. mode for thumb2?
if (Opcode == ARM::t2ADDri || Opcode == ARM::t2ADDri12) {
Offset += MI.getOperand(FrameRegIdx+1).getImm();
@ -389,7 +389,7 @@ int llvm::rewriteT2FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
return Offset;
}
MI.RemoveOperand(FrameRegIdx+1);
MI.getOperand(FrameRegIdx+1).ChangeToImmediate(0);
NewOpc = immediateOffsetOpcode(Opcode);
@ -444,13 +444,13 @@ int llvm::rewriteT2FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
if (AddrMode == ARMII::AddrMode5)
// FIXME: Not consistent.
ImmedOffset |= 1 << NumBits;
else
else
ImmedOffset = -ImmedOffset;
}
ImmOp.ChangeToImmediate(ImmedOffset);
return 0;
}
// Otherwise, offset doesn't fit. Pull in what we can to simplify
ImmedOffset = ImmedOffset & Mask;
if (isSub) {