mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-15 20:29:48 +00:00
whitespace
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@140133 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
a54b3ac96e
commit
3af7a67629
@ -87,7 +87,7 @@ EmitCopyFromReg(SDNode *Node, unsigned ResNo, bool IsClone, bool IsCloned,
|
||||
UI != E; ++UI) {
|
||||
SDNode *User = *UI;
|
||||
bool Match = true;
|
||||
if (User->getOpcode() == ISD::CopyToReg &&
|
||||
if (User->getOpcode() == ISD::CopyToReg &&
|
||||
User->getOperand(2).getNode() == Node &&
|
||||
User->getOperand(2).getResNo() == ResNo) {
|
||||
unsigned DestReg = cast<RegisterSDNode>(User->getOperand(1))->getReg();
|
||||
@ -139,7 +139,7 @@ EmitCopyFromReg(SDNode *Node, unsigned ResNo, bool IsClone, bool IsCloned,
|
||||
} else {
|
||||
DstRC = TLI->getRegClassFor(VT);
|
||||
}
|
||||
|
||||
|
||||
// If all uses are reading from the src physical register and copying the
|
||||
// register is either impossible or very expensive, then don't create a copy.
|
||||
if (MatchReg && SrcRC->getCopyCost() < 0) {
|
||||
@ -167,7 +167,7 @@ unsigned InstrEmitter::getDstOfOnlyCopyToRegUse(SDNode *Node,
|
||||
return 0;
|
||||
|
||||
SDNode *User = *Node->use_begin();
|
||||
if (User->getOpcode() == ISD::CopyToReg &&
|
||||
if (User->getOpcode() == ISD::CopyToReg &&
|
||||
User->getOperand(2).getNode() == Node &&
|
||||
User->getOperand(2).getResNo() == ResNo) {
|
||||
unsigned Reg = cast<RegisterSDNode>(User->getOperand(1))->getReg();
|
||||
@ -202,7 +202,7 @@ void InstrEmitter::CreateVirtualRegisters(SDNode *Node, MachineInstr *MI,
|
||||
for (SDNode::use_iterator UI = Node->use_begin(), E = Node->use_end();
|
||||
UI != E; ++UI) {
|
||||
SDNode *User = *UI;
|
||||
if (User->getOpcode() == ISD::CopyToReg &&
|
||||
if (User->getOpcode() == ISD::CopyToReg &&
|
||||
User->getOperand(2).getNode() == Node &&
|
||||
User->getOperand(2).getResNo() == i) {
|
||||
unsigned Reg = cast<RegisterSDNode>(User->getOperand(1))->getReg();
|
||||
@ -326,7 +326,7 @@ InstrEmitter::AddRegisterOperand(MachineInstr *MI, SDValue Op,
|
||||
|
||||
/// AddOperand - Add the specified operand to the specified machine instr. II
|
||||
/// specifies the instruction information for the node, and IIOpNum is the
|
||||
/// operand number (in the II) that we are adding. IIOpNum and II are used for
|
||||
/// operand number (in the II) that we are adding. IIOpNum and II are used for
|
||||
/// assertions only.
|
||||
void InstrEmitter::AddOperand(MachineInstr *MI, SDValue Op,
|
||||
unsigned IIOpNum,
|
||||
@ -365,7 +365,7 @@ void InstrEmitter::AddOperand(MachineInstr *MI, SDValue Op,
|
||||
Align = TM->getTargetData()->getTypeAllocSize(Type);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
unsigned Idx;
|
||||
MachineConstantPool *MCP = MF->getConstantPool();
|
||||
if (CP->isMachineConstantPoolEntry())
|
||||
@ -406,18 +406,18 @@ getSuperRegisterRegClass(const TargetRegisterClass *TRC,
|
||||
|
||||
/// EmitSubregNode - Generate machine code for subreg nodes.
|
||||
///
|
||||
void InstrEmitter::EmitSubregNode(SDNode *Node,
|
||||
void InstrEmitter::EmitSubregNode(SDNode *Node,
|
||||
DenseMap<SDValue, unsigned> &VRBaseMap,
|
||||
bool IsClone, bool IsCloned) {
|
||||
unsigned VRBase = 0;
|
||||
unsigned Opc = Node->getMachineOpcode();
|
||||
|
||||
|
||||
// If the node is only used by a CopyToReg and the dest reg is a vreg, use
|
||||
// the CopyToReg'd destination register instead of creating a new vreg.
|
||||
for (SDNode::use_iterator UI = Node->use_begin(), E = Node->use_end();
|
||||
UI != E; ++UI) {
|
||||
SDNode *User = *UI;
|
||||
if (User->getOpcode() == ISD::CopyToReg &&
|
||||
if (User->getOpcode() == ISD::CopyToReg &&
|
||||
User->getOperand(2).getNode() == Node) {
|
||||
unsigned DestReg = cast<RegisterSDNode>(User->getOperand(1))->getReg();
|
||||
if (TargetRegisterInfo::isVirtualRegister(DestReg)) {
|
||||
@ -426,7 +426,7 @@ void InstrEmitter::EmitSubregNode(SDNode *Node,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if (Opc == TargetOpcode::EXTRACT_SUBREG) {
|
||||
// EXTRACT_SUBREG is lowered as %dst = COPY %src:sub
|
||||
unsigned SubIdx = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
|
||||
@ -498,7 +498,7 @@ void InstrEmitter::EmitSubregNode(SDNode *Node,
|
||||
// Create the insert_subreg or subreg_to_reg machine instruction.
|
||||
MachineInstr *MI = BuildMI(*MF, Node->getDebugLoc(), TII->get(Opc));
|
||||
MI->addOperand(MachineOperand::CreateReg(VRBase, true));
|
||||
|
||||
|
||||
// If creating a subreg_to_reg, then the first input operand
|
||||
// is an implicit value immediate, otherwise it's a register
|
||||
if (Opc == TargetOpcode::SUBREG_TO_REG) {
|
||||
@ -514,7 +514,7 @@ void InstrEmitter::EmitSubregNode(SDNode *Node,
|
||||
MBB->insert(InsertPos, MI);
|
||||
} else
|
||||
llvm_unreachable("Node is not insert_subreg, extract_subreg, or subreg_to_reg");
|
||||
|
||||
|
||||
SDValue Op(Node, 0);
|
||||
bool isNew = VRBaseMap.insert(std::make_pair(Op, VRBase)).second;
|
||||
(void)isNew; // Silence compiler warning.
|
||||
@ -643,9 +643,9 @@ void InstrEmitter::
|
||||
EmitMachineNode(SDNode *Node, bool IsClone, bool IsCloned,
|
||||
DenseMap<SDValue, unsigned> &VRBaseMap) {
|
||||
unsigned Opc = Node->getMachineOpcode();
|
||||
|
||||
|
||||
// Handle subreg insert/extract specially
|
||||
if (Opc == TargetOpcode::EXTRACT_SUBREG ||
|
||||
if (Opc == TargetOpcode::EXTRACT_SUBREG ||
|
||||
Opc == TargetOpcode::INSERT_SUBREG ||
|
||||
Opc == TargetOpcode::SUBREG_TO_REG) {
|
||||
EmitSubregNode(Node, VRBaseMap, IsClone, IsCloned);
|
||||
@ -667,7 +667,7 @@ EmitMachineNode(SDNode *Node, bool IsClone, bool IsCloned,
|
||||
if (Opc == TargetOpcode::IMPLICIT_DEF)
|
||||
// We want a unique VR for each IMPLICIT_DEF use.
|
||||
return;
|
||||
|
||||
|
||||
const MCInstrDesc &II = TII->get(Opc);
|
||||
unsigned NumResults = CountResults(Node);
|
||||
unsigned NodeOperands = CountOperands(Node);
|
||||
@ -712,12 +712,12 @@ EmitMachineNode(SDNode *Node, bool IsClone, bool IsCloned,
|
||||
// Then mark unused registers as dead.
|
||||
MI->setPhysRegsDeadExcept(UsedRegs, *TRI);
|
||||
}
|
||||
|
||||
|
||||
// Add result register values for things that are defined by this
|
||||
// instruction.
|
||||
if (NumResults)
|
||||
CreateVirtualRegisters(Node, MI, II, IsClone, IsCloned, VRBaseMap);
|
||||
|
||||
|
||||
// Emit all of the actual operands of this instruction, adding them to the
|
||||
// instruction as appropriate.
|
||||
bool HasOptPRefs = II.getNumDefs() > NumResults;
|
||||
@ -751,7 +751,7 @@ EmitMachineNode(SDNode *Node, bool IsClone, bool IsCloned,
|
||||
MI->addRegisterDead(Reg, TRI);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// If the instruction has implicit defs and the node doesn't, mark the
|
||||
// implicit def as dead. If the node has any glue outputs, we don't do this
|
||||
// because we don't know what implicit defs are being used by glued nodes.
|
||||
@ -792,7 +792,7 @@ EmitSpecialNode(SDNode *Node, bool IsClone, bool IsCloned,
|
||||
SrcReg = R->getReg();
|
||||
else
|
||||
SrcReg = getVR(SrcVal, VRBaseMap);
|
||||
|
||||
|
||||
unsigned DestReg = cast<RegisterSDNode>(Node->getOperand(1))->getReg();
|
||||
if (SrcReg == DestReg) // Coalesced away the copy? Ignore.
|
||||
break;
|
||||
@ -812,12 +812,12 @@ EmitSpecialNode(SDNode *Node, bool IsClone, bool IsCloned,
|
||||
TII->get(TargetOpcode::EH_LABEL)).addSym(S);
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
case ISD::INLINEASM: {
|
||||
unsigned NumOps = Node->getNumOperands();
|
||||
if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue)
|
||||
--NumOps; // Ignore the glue operand.
|
||||
|
||||
|
||||
// Create the inline asm machine instruction.
|
||||
MachineInstr *MI = BuildMI(*MF, Node->getDebugLoc(),
|
||||
TII->get(TargetOpcode::INLINEASM));
|
||||
@ -826,7 +826,7 @@ EmitSpecialNode(SDNode *Node, bool IsClone, bool IsCloned,
|
||||
SDValue AsmStrV = Node->getOperand(InlineAsm::Op_AsmString);
|
||||
const char *AsmStr = cast<ExternalSymbolSDNode>(AsmStrV)->getSymbol();
|
||||
MI->addOperand(MachineOperand::CreateES(AsmStr));
|
||||
|
||||
|
||||
// Add the HasSideEffect and isAlignStack bits.
|
||||
int64_t ExtraInfo =
|
||||
cast<ConstantSDNode>(Node->getOperand(InlineAsm::Op_ExtraInfo))->
|
||||
@ -838,10 +838,10 @@ EmitSpecialNode(SDNode *Node, bool IsClone, bool IsCloned,
|
||||
unsigned Flags =
|
||||
cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue();
|
||||
unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags);
|
||||
|
||||
|
||||
MI->addOperand(MachineOperand::CreateImm(Flags));
|
||||
++i; // Skip the ID value.
|
||||
|
||||
|
||||
switch (InlineAsm::getKind(Flags)) {
|
||||
default: llvm_unreachable("Bad flags!");
|
||||
case InlineAsm::Kind_RegDef:
|
||||
@ -877,13 +877,13 @@ EmitSpecialNode(SDNode *Node, bool IsClone, bool IsCloned,
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Get the mdnode from the asm if it exists and add it to the instruction.
|
||||
SDValue MDV = Node->getOperand(InlineAsm::Op_MDNode);
|
||||
const MDNode *MD = cast<MDNodeSDNode>(MDV)->getMD();
|
||||
if (MD)
|
||||
MI->addOperand(MachineOperand::CreateMetadata(MD));
|
||||
|
||||
|
||||
MBB->insert(InsertPos, MI);
|
||||
break;
|
||||
}
|
||||
|
@ -752,7 +752,7 @@ bool SelectionDAGISel::TryToFoldFastISelLoad(const LoadInst *LI,
|
||||
// isn't one of the folded instructions, then we can't succeed here. Handle
|
||||
// this by scanning the single-use users of the load until we get to FoldInst.
|
||||
unsigned MaxUsers = 6; // Don't scan down huge single-use chains of instrs.
|
||||
|
||||
|
||||
const Instruction *TheUser = LI->use_back();
|
||||
while (TheUser != FoldInst && // Scan up until we find FoldInst.
|
||||
// Stay in the right block.
|
||||
@ -761,15 +761,15 @@ bool SelectionDAGISel::TryToFoldFastISelLoad(const LoadInst *LI,
|
||||
// If there are multiple or no uses of this instruction, then bail out.
|
||||
if (!TheUser->hasOneUse())
|
||||
return false;
|
||||
|
||||
|
||||
TheUser = TheUser->use_back();
|
||||
}
|
||||
|
||||
|
||||
// If we didn't find the fold instruction, then we failed to collapse the
|
||||
// sequence.
|
||||
if (TheUser != FoldInst)
|
||||
return false;
|
||||
|
||||
|
||||
// Don't try to fold volatile loads. Target has to deal with alignment
|
||||
// constraints.
|
||||
if (LI->isVolatile()) return false;
|
||||
|
@ -4890,7 +4890,7 @@ ReplaceATOMIC_OP_64(SDNode *Node, SmallVectorImpl<SDValue>& Results,
|
||||
// High part of Val1
|
||||
Ops.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
|
||||
Node->getOperand(2), DAG.getIntPtrConstant(1)));
|
||||
if (NewOp == ARMISD::ATOMCMPXCHG64_DAG) {
|
||||
if (NewOp == ARMISD::ATOMCMPXCHG64_DAG) {
|
||||
// High part of Val1
|
||||
Ops.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
|
||||
Node->getOperand(3), DAG.getIntPtrConstant(0)));
|
||||
@ -5410,7 +5410,7 @@ ARMTargetLowering::EmitAtomicBinary64(MachineInstr *MI, MachineBasicBlock *BB,
|
||||
// Note that the registers are explicitly specified because there is not any
|
||||
// way to force the register allocator to allocate a register pair.
|
||||
//
|
||||
// FIXME: The hardcoded registers are not necessary for Thumb2, but we
|
||||
// FIXME: The hardcoded registers are not necessary for Thumb2, but we
|
||||
// need to properly enforce the restriction that the two output registers
|
||||
// for ldrexd must be different.
|
||||
BB = loopMBB;
|
||||
@ -7928,7 +7928,7 @@ ARMTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
|
||||
return RCPair(0U, ARM::GPRRegisterClass);
|
||||
case 'h': // High regs or no regs.
|
||||
if (Subtarget->isThumb())
|
||||
return RCPair(0U, ARM::hGPRRegisterClass);
|
||||
return RCPair(0U, ARM::hGPRRegisterClass);
|
||||
break;
|
||||
case 'r':
|
||||
return RCPair(0U, ARM::GPRRegisterClass);
|
||||
@ -7942,15 +7942,15 @@ ARMTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
|
||||
break;
|
||||
case 'x':
|
||||
if (VT == MVT::f32)
|
||||
return RCPair(0U, ARM::SPR_8RegisterClass);
|
||||
return RCPair(0U, ARM::SPR_8RegisterClass);
|
||||
if (VT.getSizeInBits() == 64)
|
||||
return RCPair(0U, ARM::DPR_8RegisterClass);
|
||||
return RCPair(0U, ARM::DPR_8RegisterClass);
|
||||
if (VT.getSizeInBits() == 128)
|
||||
return RCPair(0U, ARM::QPR_8RegisterClass);
|
||||
return RCPair(0U, ARM::QPR_8RegisterClass);
|
||||
break;
|
||||
case 't':
|
||||
if (VT == MVT::f32)
|
||||
return RCPair(0U, ARM::SPRRegisterClass);
|
||||
return RCPair(0U, ARM::SPRRegisterClass);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -7990,12 +7990,12 @@ void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
|
||||
|
||||
switch (ConstraintLetter) {
|
||||
case 'j':
|
||||
// Constant suitable for movw, must be between 0 and
|
||||
// 65535.
|
||||
if (Subtarget->hasV6T2Ops())
|
||||
if (CVal >= 0 && CVal <= 65535)
|
||||
break;
|
||||
return;
|
||||
// Constant suitable for movw, must be between 0 and
|
||||
// 65535.
|
||||
if (Subtarget->hasV6T2Ops())
|
||||
if (CVal >= 0 && CVal <= 65535)
|
||||
break;
|
||||
return;
|
||||
case 'I':
|
||||
if (Subtarget->isThumb1Only()) {
|
||||
// This must be a constant between 0 and 255, for ADD
|
||||
|
Loading…
Reference in New Issue
Block a user