Fix formatting in AArch64 backend.

This should fix three purely whitespace issues:
    + 80 column violations.
    + Tab characters.
    + TableGen brace placement.

No functional changes.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@174370 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Tim Northover 2013-02-05 13:24:56 +00:00
parent 19254c49a8
commit dfe076af98
24 changed files with 575 additions and 839 deletions

View File

@ -1,4 +1,4 @@
//===- AArch64.td - Describe the AArch64 Target Machine ---------*- tblgen -*-==// //===- AArch64.td - Describe the AArch64 Target Machine -------*- tblgen -*-==//
// //
// The LLVM Compiler Infrastructure // The LLVM Compiler Infrastructure
// //

View File

@ -34,7 +34,8 @@ AArch64AsmPrinter::getDebugValueLocation(const MachineInstr *MI) const {
// expected to be created. // expected to be created.
assert(MI->getNumOperands() == 4 && MI->getOperand(0).isReg() assert(MI->getNumOperands() == 4 && MI->getOperand(0).isReg()
&& MI->getOperand(1).isImm() && "unexpected custom DBG_VALUE"); && MI->getOperand(1).isImm() && "unexpected custom DBG_VALUE");
return MachineLocation(MI->getOperand(0).getReg(), MI->getOperand(1).getImm()); return MachineLocation(MI->getOperand(0).getReg(),
MI->getOperand(1).getImm());
} }
/// Try to print a floating-point register as if it belonged to a specified /// Try to print a floating-point register as if it belonged to a specified
@ -90,7 +91,8 @@ bool AArch64AsmPrinter::printSymbolicAddress(const MachineOperand &MO,
StringRef Name; StringRef Name;
StringRef Modifier; StringRef Modifier;
switch (MO.getType()) { switch (MO.getType()) {
default: llvm_unreachable("Unexpected operand for symbolic address constraint"); default:
llvm_unreachable("Unexpected operand for symbolic address constraint");
case MachineOperand::MO_GlobalAddress: case MachineOperand::MO_GlobalAddress:
Name = Mang->getSymbol(MO.getGlobal())->getName(); Name = Mang->getSymbol(MO.getGlobal())->getName();

View File

@ -46,7 +46,8 @@ STATISTIC(NumCBrFixed, "Number of cond branches fixed");
// FIXME: This option should be removed once it has received sufficient testing. // FIXME: This option should be removed once it has received sufficient testing.
static cl::opt<bool> static cl::opt<bool>
AlignConstantIslands("aarch64-align-constant-islands", cl::Hidden, AlignConstantIslands("aarch64-align-constant-islands", cl::Hidden,
cl::init(true), cl::desc("Align constant islands in code")); cl::init(true),
cl::desc("Align constant islands in code"));
/// Return the worst case padding that could result from unknown offset bits. /// Return the worst case padding that could result from unknown offset bits.
/// This does not include alignment padding caused by known offset bits. /// This does not include alignment padding caused by known offset bits.
@ -828,7 +829,8 @@ bool AArch64ConstantIslands::isWaterInRange(unsigned UserOffset,
bool AArch64ConstantIslands::isCPEntryInRange(MachineInstr *MI, bool AArch64ConstantIslands::isCPEntryInRange(MachineInstr *MI,
unsigned UserOffset, unsigned UserOffset,
MachineInstr *CPEMI, MachineInstr *CPEMI,
unsigned OffsetBits, bool DoDump) { unsigned OffsetBits,
bool DoDump) {
unsigned CPEOffset = getOffsetOf(CPEMI); unsigned CPEOffset = getOffsetOf(CPEMI);
if (DoDump) { if (DoDump) {
@ -930,7 +932,8 @@ int AArch64ConstantIslands::findInRangeCPEntry(CPUser& U, unsigned UserOffset)
// Removing CPEs can leave empty entries, skip // Removing CPEs can leave empty entries, skip
if (CPEs[i].CPEMI == NULL) if (CPEs[i].CPEMI == NULL)
continue; continue;
if (isCPEntryInRange(UserMI, UserOffset, CPEs[i].CPEMI, U.getOffsetBits())) { if (isCPEntryInRange(UserMI, UserOffset, CPEs[i].CPEMI,
U.getOffsetBits())) {
DEBUG(dbgs() << "Replacing CPE#" << CPI << " with CPE#" DEBUG(dbgs() << "Replacing CPE#" << CPI << " with CPE#"
<< CPEs[i].CPI << "\n"); << CPEs[i].CPI << "\n");
// Point the CPUser node to the replacement // Point the CPUser node to the replacement

View File

@ -180,7 +180,8 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF) const {
for (std::vector<CalleeSavedInfo>::const_iterator I = CSI.begin(), for (std::vector<CalleeSavedInfo>::const_iterator I = CSI.begin(),
E = CSI.end(); I != E; ++I) { E = CSI.end(); I != E; ++I) {
MachineLocation Dst(MachineLocation::VirtualFP, MFI->getObjectOffset(I->getFrameIdx())); MachineLocation Dst(MachineLocation::VirtualFP,
MFI->getObjectOffset(I->getFrameIdx()));
MachineLocation Src(I->getReg()); MachineLocation Src(I->getReg());
Moves.push_back(MachineMove(CSLabel, Dst, Src)); Moves.push_back(MachineMove(CSLabel, Dst, Src));
} }
@ -537,7 +538,8 @@ AArch64FrameLowering::emitFrameMemOps(bool isPrologue, MachineBasicBlock &MBB,
State = RegState::Define; State = RegState::Define;
} }
NewMI = BuildMI(MBB, MBBI, DL, TII.get(PossClasses[ClassIdx].SingleOpcode)) NewMI = BuildMI(MBB, MBBI, DL,
TII.get(PossClasses[ClassIdx].SingleOpcode))
.addReg(CSI[i].getReg(), State); .addReg(CSI[i].getReg(), State);
} }
@ -549,9 +551,9 @@ AArch64FrameLowering::emitFrameMemOps(bool isPrologue, MachineBasicBlock &MBB,
Flags = isPrologue ? MachineMemOperand::MOStore : MachineMemOperand::MOLoad; Flags = isPrologue ? MachineMemOperand::MOStore : MachineMemOperand::MOLoad;
MachineMemOperand *MMO = MachineMemOperand *MMO =
MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FrameIdx), MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FrameIdx),
Flags, Flags,
Pair ? TheClass.getSize() * 2 : TheClass.getSize(), Pair ? TheClass.getSize() * 2 : TheClass.getSize(),
MFI.getObjectAlignment(FrameIdx)); MFI.getObjectAlignment(FrameIdx));
NewMI.addFrameIndex(FrameIdx) NewMI.addFrameIndex(FrameIdx)
.addImm(0) // address-register offset .addImm(0) // address-register offset

View File

@ -29,7 +29,7 @@ private:
struct LoadStoreMethod { struct LoadStoreMethod {
const TargetRegisterClass *RegClass; // E.g. GPR64RegClass const TargetRegisterClass *RegClass; // E.g. GPR64RegClass
// The preferred instruction. // The preferred instruction.
unsigned PairOpcode; // E.g. LSPair64_STR unsigned PairOpcode; // E.g. LSPair64_STR
// Sometimes only a single register can be handled at once. // Sometimes only a single register can be handled at once.

View File

@ -72,7 +72,8 @@ public:
bool SelectFPZeroOperand(SDValue N, SDValue &Dummy); bool SelectFPZeroOperand(SDValue N, SDValue &Dummy);
bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos, unsigned RegWidth); bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos,
unsigned RegWidth);
bool SelectInlineAsmMemoryOperand(const SDValue &Op, bool SelectInlineAsmMemoryOperand(const SDValue &Op,
char ConstraintCode, char ConstraintCode,
@ -130,8 +131,8 @@ AArch64DAGToDAGISel::SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos,
bool bool
AArch64DAGToDAGISel::SelectInlineAsmMemoryOperand(const SDValue &Op, AArch64DAGToDAGISel::SelectInlineAsmMemoryOperand(const SDValue &Op,
char ConstraintCode, char ConstraintCode,
std::vector<SDValue> &OutOps) { std::vector<SDValue> &OutOps) {
switch (ConstraintCode) { switch (ConstraintCode) {
default: llvm_unreachable("Unrecognised AArch64 memory constraint"); default: llvm_unreachable("Unrecognised AArch64 memory constraint");
case 'm': case 'm':
@ -152,7 +153,7 @@ AArch64DAGToDAGISel::SelectFPZeroOperand(SDValue N, SDValue &Dummy) {
ConstantFPSDNode *Imm = dyn_cast<ConstantFPSDNode>(N); ConstantFPSDNode *Imm = dyn_cast<ConstantFPSDNode>(N);
if (!Imm || !Imm->getValueAPF().isPosZero()) if (!Imm || !Imm->getValueAPF().isPosZero())
return false; return false;
// Doesn't actually carry any information, but keeps TableGen quiet. // Doesn't actually carry any information, but keeps TableGen quiet.
Dummy = CurDAG->getTargetConstant(0, MVT::i32); Dummy = CurDAG->getTargetConstant(0, MVT::i32);
return true; return true;

View File

@ -841,7 +841,8 @@ AArch64TargetLowering::SaveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG,
DebugLoc DL, SDValue &Chain) const { DebugLoc DL, SDValue &Chain) const {
MachineFunction &MF = DAG.getMachineFunction(); MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo(); MachineFrameInfo *MFI = MF.getFrameInfo();
AArch64MachineFunctionInfo *FuncInfo = MF.getInfo<AArch64MachineFunctionInfo>(); AArch64MachineFunctionInfo *FuncInfo
= MF.getInfo<AArch64MachineFunctionInfo>();
SmallVector<SDValue, 8> MemOps; SmallVector<SDValue, 8> MemOps;
@ -1045,10 +1046,11 @@ AArch64TargetLowering::LowerReturn(SDValue Chain,
SDValue Flag; SDValue Flag;
for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
// PCS: "If the type, T, of the result of a function is such that void func(T // PCS: "If the type, T, of the result of a function is such that
// arg) would require that arg be passed as a value in a register (or set of // void func(T arg) would require that arg be passed as a value in a
// registers) according to the rules in 5.4, then the result is returned in // register (or set of registers) according to the rules in 5.4, then the
// the same registers as would be used for such an argument. // result is returned in the same registers as would be used for such an
// argument.
// //
// Otherwise, the caller shall reserve a block of memory of sufficient // Otherwise, the caller shall reserve a block of memory of sufficient
// size and alignment to hold the result. The address of the memory block // size and alignment to hold the result. The address of the memory block
@ -1166,7 +1168,8 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
if (!IsSibCall) if (!IsSibCall)
Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true)); Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true));
SDValue StackPtr = DAG.getCopyFromReg(Chain, dl, AArch64::XSP, getPointerTy()); SDValue StackPtr = DAG.getCopyFromReg(Chain, dl, AArch64::XSP,
getPointerTy());
SmallVector<SDValue, 8> MemOpChains; SmallVector<SDValue, 8> MemOpChains;
SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
@ -1874,9 +1877,10 @@ AArch64TargetLowering::LowerGlobalAddressELF(SDValue Op,
if (Alignment == 0) { if (Alignment == 0) {
const PointerType *GVPtrTy = cast<PointerType>(GV->getType()); const PointerType *GVPtrTy = cast<PointerType>(GV->getType());
if (GVPtrTy->getElementType()->isSized()) if (GVPtrTy->getElementType()->isSized()) {
Alignment = getDataLayout()->getABITypeAlignment(GVPtrTy->getElementType()); Alignment
else { = getDataLayout()->getABITypeAlignment(GVPtrTy->getElementType());
} else {
// Be conservative if we can't guess, not that it really matters: // Be conservative if we can't guess, not that it really matters:
// functions and labels aren't valid for loads, and the methods used to // functions and labels aren't valid for loads, and the methods used to
// actually calculate an address work with any alignment. // actually calculate an address work with any alignment.
@ -1954,7 +1958,8 @@ SDValue AArch64TargetLowering::LowerTLSDescCall(SDValue SymAddr,
Ops.push_back(Glue); Ops.push_back(Glue);
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
Chain = DAG.getNode(AArch64ISD::TLSDESCCALL, DL, NodeTys, &Ops[0], Ops.size()); Chain = DAG.getNode(AArch64ISD::TLSDESCCALL, DL, NodeTys, &Ops[0],
Ops.size());
Glue = Chain.getValue(1); Glue = Chain.getValue(1);
// After the call, the offset from TPIDR_EL0 is in X0, copy it out and pass it // After the call, the offset from TPIDR_EL0 is in X0, copy it out and pass it
@ -1995,7 +2000,8 @@ AArch64TargetLowering::LowerGlobalTLSAddress(SDValue Op,
TPOff = SDValue(DAG.getMachineNode(AArch64::MOVZxii, DL, PtrVT, HiVar, TPOff = SDValue(DAG.getMachineNode(AArch64::MOVZxii, DL, PtrVT, HiVar,
DAG.getTargetConstant(0, MVT::i32)), 0); DAG.getTargetConstant(0, MVT::i32)), 0);
TPOff = SDValue(DAG.getMachineNode(AArch64::MOVKxii, DL, PtrVT, TPOff, LoVar, TPOff = SDValue(DAG.getMachineNode(AArch64::MOVKxii, DL, PtrVT,
TPOff, LoVar,
DAG.getTargetConstant(0, MVT::i32)), 0); DAG.getTargetConstant(0, MVT::i32)), 0);
} else if (Model == TLSModel::GeneralDynamic) { } else if (Model == TLSModel::GeneralDynamic) {
// Accesses used in this sequence go via the TLS descriptor which lives in // Accesses used in this sequence go via the TLS descriptor which lives in
@ -2005,7 +2011,8 @@ AArch64TargetLowering::LowerGlobalTLSAddress(SDValue Op,
SDValue LoDesc = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, SDValue LoDesc = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
AArch64II::MO_TLSDESC_LO12); AArch64II::MO_TLSDESC_LO12);
SDValue DescAddr = DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT, SDValue DescAddr = DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT,
HiDesc, LoDesc, DAG.getConstant(8, MVT::i32)); HiDesc, LoDesc,
DAG.getConstant(8, MVT::i32));
SDValue SymAddr = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0); SDValue SymAddr = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0);
TPOff = LowerTLSDescCall(SymAddr, DescAddr, DL, DAG); TPOff = LowerTLSDescCall(SymAddr, DescAddr, DL, DAG);
@ -2027,7 +2034,8 @@ AArch64TargetLowering::LowerGlobalTLSAddress(SDValue Op,
SDValue LoDesc = DAG.getTargetExternalSymbol("_TLS_MODULE_BASE_", PtrVT, SDValue LoDesc = DAG.getTargetExternalSymbol("_TLS_MODULE_BASE_", PtrVT,
AArch64II::MO_TLSDESC_LO12); AArch64II::MO_TLSDESC_LO12);
SDValue DescAddr = DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT, SDValue DescAddr = DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT,
HiDesc, LoDesc, DAG.getConstant(8, MVT::i32)); HiDesc, LoDesc,
DAG.getConstant(8, MVT::i32));
SDValue SymAddr = DAG.getTargetExternalSymbol("_TLS_MODULE_BASE_", PtrVT); SDValue SymAddr = DAG.getTargetExternalSymbol("_TLS_MODULE_BASE_", PtrVT);
ThreadBase = LowerTLSDescCall(SymAddr, DescAddr, DL, DAG); ThreadBase = LowerTLSDescCall(SymAddr, DescAddr, DL, DAG);
@ -2040,7 +2048,8 @@ AArch64TargetLowering::LowerGlobalTLSAddress(SDValue Op,
TPOff = SDValue(DAG.getMachineNode(AArch64::MOVZxii, DL, PtrVT, HiVar, TPOff = SDValue(DAG.getMachineNode(AArch64::MOVZxii, DL, PtrVT, HiVar,
DAG.getTargetConstant(0, MVT::i32)), 0); DAG.getTargetConstant(0, MVT::i32)), 0);
TPOff = SDValue(DAG.getMachineNode(AArch64::MOVKxii, DL, PtrVT, TPOff, LoVar, TPOff = SDValue(DAG.getMachineNode(AArch64::MOVKxii, DL, PtrVT,
TPOff, LoVar,
DAG.getTargetConstant(0, MVT::i32)), 0); DAG.getTargetConstant(0, MVT::i32)), 0);
} else } else
llvm_unreachable("Unsupported TLS access model"); llvm_unreachable("Unsupported TLS access model");
@ -2123,7 +2132,8 @@ AArch64TargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
SDValue A64cc = DAG.getConstant(CondCode, MVT::i32); SDValue A64cc = DAG.getConstant(CondCode, MVT::i32);
SDValue SetCC = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, LHS, RHS, SDValue SetCC = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, LHS, RHS,
DAG.getCondCode(CC)); DAG.getCondCode(CC));
SDValue A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl, Op.getValueType(), SDValue A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl,
Op.getValueType(),
SetCC, IfTrue, IfFalse, A64cc); SetCC, IfTrue, IfFalse, A64cc);
if (Alternative != A64CC::Invalid) { if (Alternative != A64CC::Invalid) {
@ -2231,7 +2241,8 @@ AArch64TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
// The layout of the va_list struct is specified in the AArch64 Procedure Call // The layout of the va_list struct is specified in the AArch64 Procedure Call
// Standard, section B.3. // Standard, section B.3.
MachineFunction &MF = DAG.getMachineFunction(); MachineFunction &MF = DAG.getMachineFunction();
AArch64MachineFunctionInfo *FuncInfo = MF.getInfo<AArch64MachineFunctionInfo>(); AArch64MachineFunctionInfo *FuncInfo
= MF.getInfo<AArch64MachineFunctionInfo>();
DebugLoc DL = Op.getDebugLoc(); DebugLoc DL = Op.getDebugLoc();
SDValue Chain = Op.getOperand(0); SDValue Chain = Op.getOperand(0);
@ -2365,7 +2376,7 @@ static SDValue PerformANDCombine(SDNode *N,
} }
static SDValue PerformATOMIC_FENCECombine(SDNode *FenceNode, static SDValue PerformATOMIC_FENCECombine(SDNode *FenceNode,
TargetLowering::DAGCombinerInfo &DCI) { TargetLowering::DAGCombinerInfo &DCI) {
// An atomic operation followed by an acquiring atomic fence can be reduced to // An atomic operation followed by an acquiring atomic fence can be reduced to
// an acquiring load. The atomic operation provides a convenient pointer to // an acquiring load. The atomic operation provides a convenient pointer to
// load from. If the original operation was a load anyway we can actually // load from. If the original operation was a load anyway we can actually
@ -2407,7 +2418,7 @@ static SDValue PerformATOMIC_FENCECombine(SDNode *FenceNode,
} }
static SDValue PerformATOMIC_STORECombine(SDNode *N, static SDValue PerformATOMIC_STORECombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI) { TargetLowering::DAGCombinerInfo &DCI) {
// A releasing atomic fence followed by an atomic store can be combined into a // A releasing atomic fence followed by an atomic store can be combined into a
// single store operation. // single store operation.
SelectionDAG &DAG = DCI.DAG; SelectionDAG &DAG = DCI.DAG;
@ -2821,7 +2832,8 @@ AArch64TargetLowering::getConstraintType(const std::string &Constraint) const {
} }
// FIXME: Ump, Utf, Usa, Ush // FIXME: Ump, Utf, Usa, Ush
// Ump: A memory address suitable for ldp/stp in SI, DI, SF and DF modes, whatever they may be // Ump: A memory address suitable for ldp/stp in SI, DI, SF and DF modes,
// whatever they may be
// Utf: A memory address suitable for ldp/stp in TF mode, whatever it may be // Utf: A memory address suitable for ldp/stp in TF mode, whatever it may be
// Usa: An absolute symbolic address // Usa: An absolute symbolic address
// Ush: The high part (bits 32:12) of a pc-relative symbolic address // Ush: The high part (bits 32:12) of a pc-relative symbolic address
@ -2893,7 +2905,8 @@ AArch64TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op)) { if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
Result = DAG.getTargetGlobalAddress(GA->getGlobal(), Op.getDebugLoc(), Result = DAG.getTargetGlobalAddress(GA->getGlobal(), Op.getDebugLoc(),
GA->getValueType(0)); GA->getValueType(0));
} else if (const BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(Op)) { } else if (const BlockAddressSDNode *BA
= dyn_cast<BlockAddressSDNode>(Op)) {
Result = DAG.getTargetBlockAddress(BA->getBlockAddress(), Result = DAG.getTargetBlockAddress(BA->getBlockAddress(),
BA->getValueType(0)); BA->getValueType(0));
} else if (const ExternalSymbolSDNode *ES } else if (const ExternalSymbolSDNode *ES
@ -2924,8 +2937,9 @@ AArch64TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
} }
std::pair<unsigned, const TargetRegisterClass*> std::pair<unsigned, const TargetRegisterClass*>
AArch64TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, AArch64TargetLowering::getRegForInlineAsmConstraint(
EVT VT) const { const std::string &Constraint,
EVT VT) const {
if (Constraint.size() == 1) { if (Constraint.size() == 1) {
switch (Constraint[0]) { switch (Constraint[0]) {
case 'r': case 'r':

View File

@ -161,8 +161,8 @@ public:
SelectionDAG& DAG) const; SelectionDAG& DAG) const;
/// Finds the incoming stack arguments which overlap the given fixed stack /// Finds the incoming stack arguments which overlap the given fixed stack
/// object and incorporates their load into the current chain. This prevents an /// object and incorporates their load into the current chain. This prevents
/// upcoming store from clobbering the stack argument before it's used. /// an upcoming store from clobbering the stack argument before it's used.
SDValue addTokenForArgument(SDValue Chain, SelectionDAG &DAG, SDValue addTokenForArgument(SDValue Chain, SelectionDAG &DAG,
MachineFrameInfo *MFI, int ClobberedFI) const; MachineFrameInfo *MFI, int ClobberedFI) const;

View File

@ -16,8 +16,7 @@
// architecture. // architecture.
class A64Inst<dag outs, dag ins, string asmstr, list<dag> patterns, class A64Inst<dag outs, dag ins, string asmstr, list<dag> patterns,
InstrItinClass itin> InstrItinClass itin>
: Instruction : Instruction {
{
// All A64 instructions are 32-bit. This field will be filled in // All A64 instructions are 32-bit. This field will be filled in
// graually going down the hierarchy. // graually going down the hierarchy.
field bits<32> Inst; field bits<32> Inst;
@ -40,8 +39,7 @@ class A64Inst<dag outs, dag ins, string asmstr, list<dag> patterns,
let Itinerary = itin; let Itinerary = itin;
} }
class PseudoInst<dag outs, dag ins, list<dag> patterns> : Instruction class PseudoInst<dag outs, dag ins, list<dag> patterns> : Instruction {
{
let Namespace = "AArch64"; let Namespace = "AArch64";
let OutOperandList = outs; let OutOperandList = outs;
@ -54,8 +52,7 @@ class PseudoInst<dag outs, dag ins, list<dag> patterns> : Instruction
// Represents a pseudo-instruction that represents a single A64 instruction for // Represents a pseudo-instruction that represents a single A64 instruction for
// whatever reason, the eventual result will be a 32-bit real instruction. // whatever reason, the eventual result will be a 32-bit real instruction.
class A64PseudoInst<dag outs, dag ins, list<dag> patterns> class A64PseudoInst<dag outs, dag ins, list<dag> patterns>
: PseudoInst<outs, ins, patterns> : PseudoInst<outs, ins, patterns> {
{
let Size = 4; let Size = 4;
} }
@ -70,8 +67,7 @@ class A64PseudoExpand<dag outs, dag ins, list<dag> patterns, dag Result>
class A64InstRd<dag outs, dag ins, string asmstr, class A64InstRd<dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin> list<dag> patterns, InstrItinClass itin>
: A64Inst<outs, ins, asmstr, patterns, itin> : A64Inst<outs, ins, asmstr, patterns, itin> {
{
bits<5> Rd; bits<5> Rd;
let Inst{4-0} = Rd; let Inst{4-0} = Rd;
@ -79,8 +75,7 @@ class A64InstRd<dag outs, dag ins, string asmstr,
class A64InstRt<dag outs, dag ins, string asmstr, class A64InstRt<dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin> list<dag> patterns, InstrItinClass itin>
: A64Inst<outs, ins, asmstr, patterns, itin> : A64Inst<outs, ins, asmstr, patterns, itin> {
{
bits<5> Rt; bits<5> Rt;
let Inst{4-0} = Rt; let Inst{4-0} = Rt;
@ -89,8 +84,7 @@ class A64InstRt<dag outs, dag ins, string asmstr,
class A64InstRdn<dag outs, dag ins, string asmstr, class A64InstRdn<dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin> list<dag> patterns, InstrItinClass itin>
: A64InstRd<outs, ins, asmstr, patterns, itin> : A64InstRd<outs, ins, asmstr, patterns, itin> {
{
// Inherit rdt // Inherit rdt
bits<5> Rn; bits<5> Rn;
@ -99,8 +93,7 @@ class A64InstRdn<dag outs, dag ins, string asmstr,
class A64InstRtn<dag outs, dag ins, string asmstr, class A64InstRtn<dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin> list<dag> patterns, InstrItinClass itin>
: A64InstRt<outs, ins, asmstr, patterns, itin> : A64InstRt<outs, ins, asmstr, patterns, itin> {
{
// Inherit rdt // Inherit rdt
bits<5> Rn; bits<5> Rn;
@ -110,8 +103,7 @@ class A64InstRtn<dag outs, dag ins, string asmstr,
// Instructions taking Rt,Rt2,Rn // Instructions taking Rt,Rt2,Rn
class A64InstRtt2n<dag outs, dag ins, string asmstr, class A64InstRtt2n<dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin> list<dag> patterns, InstrItinClass itin>
: A64InstRtn<outs, ins, asmstr, patterns, itin> : A64InstRtn<outs, ins, asmstr, patterns, itin> {
{
bits<5> Rt2; bits<5> Rt2;
let Inst{14-10} = Rt2; let Inst{14-10} = Rt2;
@ -119,8 +111,7 @@ class A64InstRtt2n<dag outs, dag ins, string asmstr,
class A64InstRdnm<dag outs, dag ins, string asmstr, class A64InstRdnm<dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin> list<dag> patterns, InstrItinClass itin>
: A64InstRdn<outs, ins, asmstr, patterns, itin> : A64InstRdn<outs, ins, asmstr, patterns, itin> {
{
bits<5> Rm; bits<5> Rm;
let Inst{20-16} = Rm; let Inst{20-16} = Rm;
@ -135,8 +126,7 @@ class A64InstRdnm<dag outs, dag ins, string asmstr,
class A64I_addsubext<bit sf, bit op, bit S, bits<2> opt, bits<3> option, class A64I_addsubext<bit sf, bit op, bit S, bits<2> opt, bits<3> option,
dag outs, dag ins, string asmstr, list<dag> patterns, dag outs, dag ins, string asmstr, list<dag> patterns,
InstrItinClass itin> InstrItinClass itin>
: A64InstRdnm<outs, ins, asmstr, patterns, itin> : A64InstRdnm<outs, ins, asmstr, patterns, itin> {
{
bits<3> Imm3; bits<3> Imm3;
let Inst{31} = sf; let Inst{31} = sf;
@ -156,8 +146,7 @@ class A64I_addsubext<bit sf, bit op, bit S, bits<2> opt, bits<3> option,
class A64I_addsubimm<bit sf, bit op, bit S, bits<2> shift, class A64I_addsubimm<bit sf, bit op, bit S, bits<2> shift,
dag outs, dag ins, string asmstr, dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin> list<dag> patterns, InstrItinClass itin>
: A64InstRdn<outs, ins, asmstr, patterns, itin> : A64InstRdn<outs, ins, asmstr, patterns, itin> {
{
bits<12> Imm12; bits<12> Imm12;
let Inst{31} = sf; let Inst{31} = sf;
@ -172,8 +161,7 @@ class A64I_addsubimm<bit sf, bit op, bit S, bits<2> shift,
class A64I_addsubshift<bit sf, bit op, bit S, bits<2> shift, class A64I_addsubshift<bit sf, bit op, bit S, bits<2> shift,
dag outs, dag ins, string asmstr, list<dag> patterns, dag outs, dag ins, string asmstr, list<dag> patterns,
InstrItinClass itin> InstrItinClass itin>
: A64InstRdnm<outs, ins, asmstr, patterns, itin> : A64InstRdnm<outs, ins, asmstr, patterns, itin> {
{
bits<6> Imm6; bits<6> Imm6;
let Inst{31} = sf; let Inst{31} = sf;
@ -192,8 +180,7 @@ class A64I_addsubshift<bit sf, bit op, bit S, bits<2> shift,
class A64I_addsubcarry<bit sf, bit op, bit S, bits<6> opcode2, class A64I_addsubcarry<bit sf, bit op, bit S, bits<6> opcode2,
dag outs, dag ins, string asmstr, list<dag> patterns, dag outs, dag ins, string asmstr, list<dag> patterns,
InstrItinClass itin> InstrItinClass itin>
: A64InstRdnm<outs, ins, asmstr, patterns, itin> : A64InstRdnm<outs, ins, asmstr, patterns, itin> {
{
let Inst{31} = sf; let Inst{31} = sf;
let Inst{30} = op; let Inst{30} = op;
let Inst{29} = S; let Inst{29} = S;
@ -209,8 +196,7 @@ class A64I_addsubcarry<bit sf, bit op, bit S, bits<6> opcode2,
class A64I_bitfield<bit sf, bits<2> opc, bit n, class A64I_bitfield<bit sf, bits<2> opc, bit n,
dag outs, dag ins, string asmstr, dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin> list<dag> patterns, InstrItinClass itin>
: A64InstRdn<outs, ins, asmstr, patterns, itin> : A64InstRdn<outs, ins, asmstr, patterns, itin> {
{
bits<6> ImmR; bits<6> ImmR;
bits<6> ImmS; bits<6> ImmS;
@ -228,8 +214,7 @@ class A64I_bitfield<bit sf, bits<2> opc, bit n,
class A64I_cmpbr<bit sf, bit op, class A64I_cmpbr<bit sf, bit op,
dag outs, dag ins, string asmstr, dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin> list<dag> patterns, InstrItinClass itin>
: A64InstRt<outs, ins, asmstr, patterns, itin> : A64InstRt<outs, ins, asmstr, patterns, itin> {
{
bits<19> Label; bits<19> Label;
let Inst{31} = sf; let Inst{31} = sf;
@ -243,8 +228,7 @@ class A64I_cmpbr<bit sf, bit op,
class A64I_condbr<bit o1, bit o0, class A64I_condbr<bit o1, bit o0,
dag outs, dag ins, string asmstr, dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin> list<dag> patterns, InstrItinClass itin>
: A64Inst<outs, ins, asmstr, patterns, itin> : A64Inst<outs, ins, asmstr, patterns, itin> {
{
bits<19> Label; bits<19> Label;
bits<4> Cond; bits<4> Cond;
@ -259,8 +243,7 @@ class A64I_condbr<bit o1, bit o0,
class A64I_condcmpimm<bit sf, bit op, bit o2, bit o3, bit s, class A64I_condcmpimm<bit sf, bit op, bit o2, bit o3, bit s,
dag outs, dag ins, string asmstr, dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin> list<dag> patterns, InstrItinClass itin>
: A64Inst<outs, ins, asmstr, patterns, itin> : A64Inst<outs, ins, asmstr, patterns, itin> {
{
bits<5> Rn; bits<5> Rn;
bits<5> UImm5; bits<5> UImm5;
bits<4> NZCVImm; bits<4> NZCVImm;
@ -283,8 +266,7 @@ class A64I_condcmpimm<bit sf, bit op, bit o2, bit o3, bit s,
class A64I_condcmpreg<bit sf, bit op, bit o2, bit o3, bit s, class A64I_condcmpreg<bit sf, bit op, bit o2, bit o3, bit s,
dag outs, dag ins, string asmstr, dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin> list<dag> patterns, InstrItinClass itin>
: A64Inst<outs, ins, asmstr, patterns, itin> : A64Inst<outs, ins, asmstr, patterns, itin> {
{
bits<5> Rn; bits<5> Rn;
bits<5> Rm; bits<5> Rm;
bits<4> NZCVImm; bits<4> NZCVImm;
@ -308,8 +290,7 @@ class A64I_condcmpreg<bit sf, bit op, bit o2, bit o3, bit s,
class A64I_condsel<bit sf, bit op, bit s, bits<2> op2, class A64I_condsel<bit sf, bit op, bit s, bits<2> op2,
dag outs, dag ins, string asmstr, dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin> list<dag> patterns, InstrItinClass itin>
: A64InstRdnm<outs, ins, asmstr, patterns, itin> : A64InstRdnm<outs, ins, asmstr, patterns, itin> {
{
bits<4> Cond; bits<4> Cond;
let Inst{31} = sf; let Inst{31} = sf;
@ -327,8 +308,7 @@ class A64I_condsel<bit sf, bit op, bit s, bits<2> op2,
class A64I_dp_1src<bit sf, bit S, bits<5> opcode2, bits<6> opcode, class A64I_dp_1src<bit sf, bit S, bits<5> opcode2, bits<6> opcode,
string asmstr, dag outs, dag ins, string asmstr, dag outs, dag ins,
list<dag> patterns, InstrItinClass itin> list<dag> patterns, InstrItinClass itin>
: A64InstRdn<outs, ins, asmstr, patterns, itin> : A64InstRdn<outs, ins, asmstr, patterns, itin> {
{
let Inst{31} = sf; let Inst{31} = sf;
let Inst{30} = 0b1; let Inst{30} = 0b1;
let Inst{29} = S; let Inst{29} = S;
@ -341,8 +321,7 @@ class A64I_dp_1src<bit sf, bit S, bits<5> opcode2, bits<6> opcode,
class A64I_dp_2src<bit sf, bits<6> opcode, bit S, class A64I_dp_2src<bit sf, bits<6> opcode, bit S,
string asmstr, dag outs, dag ins, string asmstr, dag outs, dag ins,
list<dag> patterns, InstrItinClass itin> list<dag> patterns, InstrItinClass itin>
: A64InstRdnm<outs, ins, asmstr, patterns, itin> : A64InstRdnm<outs, ins, asmstr, patterns, itin> {
{
let Inst{31} = sf; let Inst{31} = sf;
let Inst{30} = 0b0; let Inst{30} = 0b0;
let Inst{29} = S; let Inst{29} = S;
@ -355,8 +334,7 @@ class A64I_dp_2src<bit sf, bits<6> opcode, bit S,
class A64I_dp3<bit sf, bits<6> opcode, class A64I_dp3<bit sf, bits<6> opcode,
dag outs, dag ins, string asmstr, dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin> list<dag> patterns, InstrItinClass itin>
: A64InstRdnm<outs, ins, asmstr, patterns, itin> : A64InstRdnm<outs, ins, asmstr, patterns, itin> {
{
bits<5> Ra; bits<5> Ra;
let Inst{31} = sf; let Inst{31} = sf;
@ -374,8 +352,7 @@ class A64I_dp3<bit sf, bits<6> opcode,
class A64I_exception<bits<3> opc, bits<3> op2, bits<2> ll, class A64I_exception<bits<3> opc, bits<3> op2, bits<2> ll,
dag outs, dag ins, string asmstr, dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin> list<dag> patterns, InstrItinClass itin>
: A64Inst<outs, ins, asmstr, patterns, itin> : A64Inst<outs, ins, asmstr, patterns, itin> {
{
bits<16> UImm16; bits<16> UImm16;
let Inst{31-24} = 0b11010100; let Inst{31-24} = 0b11010100;
@ -389,8 +366,7 @@ class A64I_exception<bits<3> opc, bits<3> op2, bits<2> ll,
class A64I_extract<bit sf, bits<3> op, bit n, class A64I_extract<bit sf, bits<3> op, bit n,
dag outs, dag ins, string asmstr, dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin> list<dag> patterns, InstrItinClass itin>
: A64InstRdnm<outs, ins, asmstr, patterns, itin> : A64InstRdnm<outs, ins, asmstr, patterns, itin> {
{
bits<6> LSB; bits<6> LSB;
let Inst{31} = sf; let Inst{31} = sf;
@ -408,8 +384,7 @@ class A64I_extract<bit sf, bits<3> op, bit n,
class A64I_fpcmp<bit m, bit s, bits<2> type, bits<2> op, bits<5> opcode2, class A64I_fpcmp<bit m, bit s, bits<2> type, bits<2> op, bits<5> opcode2,
dag outs, dag ins, string asmstr, dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin> list<dag> patterns, InstrItinClass itin>
: A64Inst<outs, ins, asmstr, patterns, itin> : A64Inst<outs, ins, asmstr, patterns, itin> {
{
bits<5> Rn; bits<5> Rn;
bits<5> Rm; bits<5> Rm;
@ -430,8 +405,7 @@ class A64I_fpcmp<bit m, bit s, bits<2> type, bits<2> op, bits<5> opcode2,
class A64I_fpccmp<bit m, bit s, bits<2> type, bit op, class A64I_fpccmp<bit m, bit s, bits<2> type, bit op,
dag outs, dag ins, string asmstr, dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin> list<dag> patterns, InstrItinClass itin>
: A64InstRdn<outs, ins, asmstr, patterns, itin> : A64InstRdn<outs, ins, asmstr, patterns, itin> {
{
bits<5> Rn; bits<5> Rn;
bits<5> Rm; bits<5> Rm;
bits<4> NZCVImm; bits<4> NZCVImm;
@ -455,8 +429,7 @@ class A64I_fpccmp<bit m, bit s, bits<2> type, bit op,
class A64I_fpcondsel<bit m, bit s, bits<2> type, class A64I_fpcondsel<bit m, bit s, bits<2> type,
dag outs, dag ins, string asmstr, dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin> list<dag> patterns, InstrItinClass itin>
: A64InstRdnm<outs, ins, asmstr, patterns, itin> : A64InstRdnm<outs, ins, asmstr, patterns, itin> {
{
bits<4> Cond; bits<4> Cond;
let Inst{31} = m; let Inst{31} = m;
@ -477,8 +450,7 @@ class A64I_fpcondsel<bit m, bit s, bits<2> type,
class A64I_fpdp1<bit m, bit s, bits<2> type, bits<6> opcode, class A64I_fpdp1<bit m, bit s, bits<2> type, bits<6> opcode,
dag outs, dag ins, string asmstr, dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin> list<dag> patterns, InstrItinClass itin>
: A64InstRdn<outs, ins, asmstr, patterns, itin> : A64InstRdn<outs, ins, asmstr, patterns, itin> {
{
let Inst{31} = m; let Inst{31} = m;
let Inst{30} = 0b0; let Inst{30} = 0b0;
let Inst{29} = s; let Inst{29} = s;
@ -495,8 +467,7 @@ class A64I_fpdp1<bit m, bit s, bits<2> type, bits<6> opcode,
class A64I_fpdp2<bit m, bit s, bits<2> type, bits<4> opcode, class A64I_fpdp2<bit m, bit s, bits<2> type, bits<4> opcode,
dag outs, dag ins, string asmstr, dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin> list<dag> patterns, InstrItinClass itin>
: A64InstRdnm<outs, ins, asmstr, patterns, itin> : A64InstRdnm<outs, ins, asmstr, patterns, itin> {
{
let Inst{31} = m; let Inst{31} = m;
let Inst{30} = 0b0; let Inst{30} = 0b0;
let Inst{29} = s; let Inst{29} = s;
@ -514,8 +485,7 @@ class A64I_fpdp2<bit m, bit s, bits<2> type, bits<4> opcode,
class A64I_fpdp3<bit m, bit s, bits<2> type, bit o1, bit o0, class A64I_fpdp3<bit m, bit s, bits<2> type, bit o1, bit o0,
dag outs, dag ins, string asmstr, dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin> list<dag> patterns, InstrItinClass itin>
: A64InstRdnm<outs, ins, asmstr, patterns, itin> : A64InstRdnm<outs, ins, asmstr, patterns, itin> {
{
bits<5> Ra; bits<5> Ra;
let Inst{31} = m; let Inst{31} = m;
@ -535,8 +505,7 @@ class A64I_fpdp3<bit m, bit s, bits<2> type, bit o1, bit o0,
class A64I_fpfixed<bit sf, bit s, bits<2> type, bits<2> mode, bits<3> opcode, class A64I_fpfixed<bit sf, bit s, bits<2> type, bits<2> mode, bits<3> opcode,
dag outs, dag ins, string asmstr, dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin> list<dag> patterns, InstrItinClass itin>
: A64InstRdn<outs, ins, asmstr, patterns, itin> : A64InstRdn<outs, ins, asmstr, patterns, itin> {
{
bits<6> Scale; bits<6> Scale;
let Inst{31} = sf; let Inst{31} = sf;
@ -556,8 +525,7 @@ class A64I_fpfixed<bit sf, bit s, bits<2> type, bits<2> mode, bits<3> opcode,
class A64I_fpint<bit sf, bit s, bits<2> type, bits<2> rmode, bits<3> opcode, class A64I_fpint<bit sf, bit s, bits<2> type, bits<2> rmode, bits<3> opcode,
dag outs, dag ins, string asmstr, dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin> list<dag> patterns, InstrItinClass itin>
: A64InstRdn<outs, ins, asmstr, patterns, itin> : A64InstRdn<outs, ins, asmstr, patterns, itin> {
{
let Inst{31} = sf; let Inst{31} = sf;
let Inst{30} = 0b0; let Inst{30} = 0b0;
let Inst{29} = s; let Inst{29} = s;
@ -576,8 +544,7 @@ class A64I_fpint<bit sf, bit s, bits<2> type, bits<2> rmode, bits<3> opcode,
class A64I_fpimm<bit m, bit s, bits<2> type, bits<5> imm5, class A64I_fpimm<bit m, bit s, bits<2> type, bits<5> imm5,
dag outs, dag ins, string asmstr, dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin> list<dag> patterns, InstrItinClass itin>
: A64InstRd<outs, ins, asmstr, patterns, itin> : A64InstRd<outs, ins, asmstr, patterns, itin> {
{
bits<8> Imm8; bits<8> Imm8;
let Inst{31} = m; let Inst{31} = m;
@ -596,8 +563,7 @@ class A64I_fpimm<bit m, bit s, bits<2> type, bits<5> imm5,
class A64I_LDRlit<bits<2> opc, bit v, class A64I_LDRlit<bits<2> opc, bit v,
dag outs, dag ins, string asmstr, dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin> list<dag> patterns, InstrItinClass itin>
: A64InstRt<outs, ins, asmstr, patterns, itin> : A64InstRt<outs, ins, asmstr, patterns, itin> {
{
bits<19> Imm19; bits<19> Imm19;
let Inst{31-30} = opc; let Inst{31-30} = opc;
@ -612,8 +578,7 @@ class A64I_LDRlit<bits<2> opc, bit v,
class A64I_LDSTex_tn<bits<2> size, bit o2, bit L, bit o1, bit o0, class A64I_LDSTex_tn<bits<2> size, bit o2, bit L, bit o1, bit o0,
dag outs, dag ins, string asmstr, dag outs, dag ins, string asmstr,
list <dag> patterns, InstrItinClass itin> list <dag> patterns, InstrItinClass itin>
: A64InstRtn<outs, ins, asmstr, patterns, itin> : A64InstRtn<outs, ins, asmstr, patterns, itin> {
{
let Inst{31-30} = size; let Inst{31-30} = size;
let Inst{29-24} = 0b001000; let Inst{29-24} = 0b001000;
let Inst{23} = o2; let Inst{23} = o2;
@ -650,8 +615,7 @@ class A64I_LDSTex_stt2n<bits<2> size, bit o2, bit L, bit o1, bit o0,
class A64I_LSpostind<bits<2> size, bit v, bits<2> opc, class A64I_LSpostind<bits<2> size, bit v, bits<2> opc,
dag outs, dag ins, string asmstr, dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin> list<dag> patterns, InstrItinClass itin>
: A64InstRtn<outs, ins, asmstr, patterns, itin> : A64InstRtn<outs, ins, asmstr, patterns, itin> {
{
bits<9> SImm9; bits<9> SImm9;
let Inst{31-30} = size; let Inst{31-30} = size;
@ -670,8 +634,7 @@ class A64I_LSpostind<bits<2> size, bit v, bits<2> opc,
class A64I_LSpreind<bits<2> size, bit v, bits<2> opc, class A64I_LSpreind<bits<2> size, bit v, bits<2> opc,
dag outs, dag ins, string asmstr, dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin> list<dag> patterns, InstrItinClass itin>
: A64InstRtn<outs, ins, asmstr, patterns, itin> : A64InstRtn<outs, ins, asmstr, patterns, itin> {
{
bits<9> SImm9; bits<9> SImm9;
@ -691,8 +654,7 @@ class A64I_LSpreind<bits<2> size, bit v, bits<2> opc,
class A64I_LSunpriv<bits<2> size, bit v, bits<2> opc, class A64I_LSunpriv<bits<2> size, bit v, bits<2> opc,
dag outs, dag ins, string asmstr, dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin> list<dag> patterns, InstrItinClass itin>
: A64InstRtn<outs, ins, asmstr, patterns, itin> : A64InstRtn<outs, ins, asmstr, patterns, itin> {
{
bits<9> SImm9; bits<9> SImm9;
@ -712,8 +674,7 @@ class A64I_LSunpriv<bits<2> size, bit v, bits<2> opc,
class A64I_LSunalimm<bits<2> size, bit v, bits<2> opc, class A64I_LSunalimm<bits<2> size, bit v, bits<2> opc,
dag outs, dag ins, string asmstr, dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin> list<dag> patterns, InstrItinClass itin>
: A64InstRtn<outs, ins, asmstr, patterns, itin> : A64InstRtn<outs, ins, asmstr, patterns, itin> {
{
bits<9> SImm9; bits<9> SImm9;
let Inst{31-30} = size; let Inst{31-30} = size;
@ -733,8 +694,7 @@ class A64I_LSunalimm<bits<2> size, bit v, bits<2> opc,
class A64I_LSunsigimm<bits<2> size, bit v, bits<2> opc, class A64I_LSunsigimm<bits<2> size, bit v, bits<2> opc,
dag outs, dag ins, string asmstr, dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin> list<dag> patterns, InstrItinClass itin>
: A64InstRtn<outs, ins, asmstr, patterns, itin> : A64InstRtn<outs, ins, asmstr, patterns, itin> {
{
bits<12> UImm12; bits<12> UImm12;
let Inst{31-30} = size; let Inst{31-30} = size;
@ -749,8 +709,7 @@ class A64I_LSunsigimm<bits<2> size, bit v, bits<2> opc,
class A64I_LSregoff<bits<2> size, bit v, bits<2> opc, bit optionlo, class A64I_LSregoff<bits<2> size, bit v, bits<2> opc, bit optionlo,
dag outs, dag ins, string asmstr, dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin> list<dag> patterns, InstrItinClass itin>
: A64InstRtn<outs, ins, asmstr, patterns, itin> : A64InstRtn<outs, ins, asmstr, patterns, itin> {
{
bits<5> Rm; bits<5> Rm;
// Complex operand selection needed for these instructions, so they // Complex operand selection needed for these instructions, so they
@ -780,8 +739,7 @@ class A64I_LSregoff<bits<2> size, bit v, bits<2> opc, bit optionlo,
class A64I_LSPoffset<bits<2> opc, bit v, bit l, class A64I_LSPoffset<bits<2> opc, bit v, bit l,
dag outs, dag ins, string asmstr, dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin> list<dag> patterns, InstrItinClass itin>
: A64InstRtt2n<outs, ins, asmstr, patterns, itin> : A64InstRtt2n<outs, ins, asmstr, patterns, itin> {
{
bits<7> SImm7; bits<7> SImm7;
let Inst{31-30} = opc; let Inst{31-30} = opc;
@ -799,8 +757,7 @@ class A64I_LSPoffset<bits<2> opc, bit v, bit l,
class A64I_LSPpostind<bits<2> opc, bit v, bit l, class A64I_LSPpostind<bits<2> opc, bit v, bit l,
dag outs, dag ins, string asmstr, dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin> list<dag> patterns, InstrItinClass itin>
: A64InstRtt2n<outs, ins, asmstr, patterns, itin> : A64InstRtt2n<outs, ins, asmstr, patterns, itin> {
{
bits<7> SImm7; bits<7> SImm7;
let Inst{31-30} = opc; let Inst{31-30} = opc;
@ -818,8 +775,7 @@ class A64I_LSPpostind<bits<2> opc, bit v, bit l,
class A64I_LSPpreind<bits<2> opc, bit v, bit l, class A64I_LSPpreind<bits<2> opc, bit v, bit l,
dag outs, dag ins, string asmstr, dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin> list<dag> patterns, InstrItinClass itin>
: A64InstRtt2n<outs, ins, asmstr, patterns, itin> : A64InstRtt2n<outs, ins, asmstr, patterns, itin> {
{
bits<7> SImm7; bits<7> SImm7;
let Inst{31-30} = opc; let Inst{31-30} = opc;
@ -837,8 +793,7 @@ class A64I_LSPpreind<bits<2> opc, bit v, bit l,
class A64I_LSPnontemp<bits<2> opc, bit v, bit l, class A64I_LSPnontemp<bits<2> opc, bit v, bit l,
dag outs, dag ins, string asmstr, dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin> list<dag> patterns, InstrItinClass itin>
: A64InstRtt2n<outs, ins, asmstr, patterns, itin> : A64InstRtt2n<outs, ins, asmstr, patterns, itin> {
{
bits<7> SImm7; bits<7> SImm7;
let Inst{31-30} = opc; let Inst{31-30} = opc;
@ -856,8 +811,7 @@ class A64I_LSPnontemp<bits<2> opc, bit v, bit l,
class A64I_logicalimm<bit sf, bits<2> opc, class A64I_logicalimm<bit sf, bits<2> opc,
dag outs, dag ins, string asmstr, dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin> list<dag> patterns, InstrItinClass itin>
: A64InstRdn<outs, ins, asmstr, patterns, itin> : A64InstRdn<outs, ins, asmstr, patterns, itin> {
{
bit N; bit N;
bits<6> ImmR; bits<6> ImmR;
bits<6> ImmS; bits<6> ImmS;
@ -883,8 +837,7 @@ class A64I_logicalimm<bit sf, bits<2> opc,
class A64I_logicalshift<bit sf, bits<2> opc, bits<2> shift, bit N, class A64I_logicalshift<bit sf, bits<2> opc, bits<2> shift, bit N,
dag outs, dag ins, string asmstr, dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin> list<dag> patterns, InstrItinClass itin>
: A64InstRdnm<outs, ins, asmstr, patterns, itin> : A64InstRdnm<outs, ins, asmstr, patterns, itin> {
{
bits<6> Imm6; bits<6> Imm6;
let Inst{31} = sf; let Inst{31} = sf;
@ -902,8 +855,7 @@ class A64I_logicalshift<bit sf, bits<2> opc, bits<2> shift, bit N,
class A64I_movw<bit sf, bits<2> opc, class A64I_movw<bit sf, bits<2> opc,
dag outs, dag ins, string asmstr, dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin> list<dag> patterns, InstrItinClass itin>
: A64InstRd<outs, ins, asmstr, patterns, itin> : A64InstRd<outs, ins, asmstr, patterns, itin> {
{
bits<16> UImm16; bits<16> UImm16;
bits<2> Shift; // Called "hw" officially bits<2> Shift; // Called "hw" officially
@ -919,8 +871,7 @@ class A64I_movw<bit sf, bits<2> opc,
class A64I_PCADR<bit op, class A64I_PCADR<bit op,
dag outs, dag ins, string asmstr, dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin> list<dag> patterns, InstrItinClass itin>
: A64InstRd<outs, ins, asmstr, patterns, itin> : A64InstRd<outs, ins, asmstr, patterns, itin> {
{
bits<21> Label; bits<21> Label;
let Inst{31} = op; let Inst{31} = op;
@ -933,8 +884,7 @@ class A64I_PCADR<bit op,
class A64I_system<bit l, class A64I_system<bit l,
dag outs, dag ins, string asmstr, dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin> list<dag> patterns, InstrItinClass itin>
: A64Inst<outs, ins, asmstr, patterns, itin> : A64Inst<outs, ins, asmstr, patterns, itin> {
{
bits<2> Op0; bits<2> Op0;
bits<3> Op1; bits<3> Op1;
bits<4> CRn; bits<4> CRn;
@ -959,8 +909,7 @@ class A64I_system<bit l,
class A64I_Bimm<bit op, class A64I_Bimm<bit op,
dag outs, dag ins, string asmstr, dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin> list<dag> patterns, InstrItinClass itin>
: A64Inst<outs, ins, asmstr, patterns, itin> : A64Inst<outs, ins, asmstr, patterns, itin> {
{
// Doubly special in not even sharing register fields with other // Doubly special in not even sharing register fields with other
// instructions, so we create our own Rn here. // instructions, so we create our own Rn here.
bits<26> Label; bits<26> Label;
@ -974,8 +923,7 @@ class A64I_Bimm<bit op,
class A64I_TBimm<bit op, class A64I_TBimm<bit op,
dag outs, dag ins, string asmstr, dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin> list<dag> patterns, InstrItinClass itin>
: A64InstRt<outs, ins, asmstr, patterns, itin> : A64InstRt<outs, ins, asmstr, patterns, itin> {
{
// Doubly special in not even sharing register fields with other // Doubly special in not even sharing register fields with other
// instructions, so we create our own Rn here. // instructions, so we create our own Rn here.
bits<6> Imm; bits<6> Imm;
@ -995,8 +943,7 @@ class A64I_TBimm<bit op,
class A64I_Breg<bits<4> opc, bits<5> op2, bits<6> op3, bits<5> op4, class A64I_Breg<bits<4> opc, bits<5> op2, bits<6> op3, bits<5> op4,
dag outs, dag ins, string asmstr, dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin> list<dag> patterns, InstrItinClass itin>
: A64Inst<outs, ins, asmstr, patterns, itin> : A64Inst<outs, ins, asmstr, patterns, itin> {
{
// Doubly special in not even sharing register fields with other // Doubly special in not even sharing register fields with other
// instructions, so we create our own Rn here. // instructions, so we create our own Rn here.
bits<5> Rn; bits<5> Rn;

View File

@ -613,7 +613,8 @@ bool llvm::rewriteA64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
llvm_unreachable("Unimplemented rewriteFrameIndex"); llvm_unreachable("Unimplemented rewriteFrameIndex");
} }
void llvm::emitRegUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, void llvm::emitRegUpdate(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
DebugLoc dl, const TargetInstrInfo &TII, DebugLoc dl, const TargetInstrInfo &TII,
unsigned DstReg, unsigned SrcReg, unsigned ScratchReg, unsigned DstReg, unsigned SrcReg, unsigned ScratchReg,
int64_t NumBytes, MachineInstr::MIFlag MIFlags) { int64_t NumBytes, MachineInstr::MIFlag MIFlags) {
@ -695,7 +696,8 @@ namespace {
LDTLSCleanup() : MachineFunctionPass(ID) {} LDTLSCleanup() : MachineFunctionPass(ID) {}
virtual bool runOnMachineFunction(MachineFunction &MF) { virtual bool runOnMachineFunction(MachineFunction &MF) {
AArch64MachineFunctionInfo* MFI = MF.getInfo<AArch64MachineFunctionInfo>(); AArch64MachineFunctionInfo* MFI
= MF.getInfo<AArch64MachineFunctionInfo>();
if (MFI->getNumLocalDynamicTLSAccesses() < 2) { if (MFI->getNumLocalDynamicTLSAccesses() < 2) {
// No point folding accesses if there isn't at least two. // No point folding accesses if there isn't at least two.
return false; return false;

File diff suppressed because it is too large Load Diff

View File

@ -121,7 +121,9 @@ public:
void setBytesInStackArgArea (unsigned bytes) { BytesInStackArgArea = bytes;} void setBytesInStackArgArea (unsigned bytes) { BytesInStackArgArea = bytes;}
unsigned getArgumentStackToRestore() const { return ArgumentStackToRestore; } unsigned getArgumentStackToRestore() const { return ArgumentStackToRestore; }
void setArgumentStackToRestore(unsigned bytes) { ArgumentStackToRestore = bytes; } void setArgumentStackToRestore(unsigned bytes) {
ArgumentStackToRestore = bytes;
}
unsigned getInitialStackAdjust() const { return InitialStackAdjust; } unsigned getInitialStackAdjust() const { return InitialStackAdjust; }
void setInitialStackAdjust(unsigned bytes) { InitialStackAdjust = bytes; } void setInitialStackAdjust(unsigned bytes) { InitialStackAdjust = bytes; }

View File

@ -7,7 +7,8 @@
// //
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// //
// This file contains the AArch64 implementation of the TargetRegisterInfo class. // This file contains the AArch64 implementation of the TargetRegisterInfo
// class.
// //
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
@ -87,7 +88,7 @@ AArch64RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MBBI,
MachineFunction &MF = *MBB.getParent(); MachineFunction &MF = *MBB.getParent();
MachineFrameInfo *MFI = MF.getFrameInfo(); MachineFrameInfo *MFI = MF.getFrameInfo();
const AArch64FrameLowering *TFI = const AArch64FrameLowering *TFI =
static_cast<const AArch64FrameLowering *>(MF.getTarget().getFrameLowering()); static_cast<const AArch64FrameLowering *>(MF.getTarget().getFrameLowering());
// In order to work out the base and offset for addressing, the FrameLowering // In order to work out the base and offset for addressing, the FrameLowering
// code needs to know (sometimes) whether the instruction is storing/loading a // code needs to know (sometimes) whether the instruction is storing/loading a
@ -202,6 +203,7 @@ AArch64RegisterInfo::getFrameRegister(const MachineFunction &MF) const {
bool bool
AArch64RegisterInfo::useFPForScavengingIndex(const MachineFunction &MF) const { AArch64RegisterInfo::useFPForScavengingIndex(const MachineFunction &MF) const {
const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
const AArch64FrameLowering *AFI = static_cast<const AArch64FrameLowering*>(TFI); const AArch64FrameLowering *AFI
= static_cast<const AArch64FrameLowering*>(TFI);
return AFI->useFPForAddressing(MF); return AFI->useFPForAddressing(MF);
} }

View File

@ -193,13 +193,11 @@ def VPR128 : RegisterClass<"AArch64",
(sequence "V%u", 0, 31)>; (sequence "V%u", 0, 31)>;
// Flags register // Flags register
def NZCV : Register<"nzcv"> def NZCV : Register<"nzcv"> {
{
let Namespace = "AArch64"; let Namespace = "AArch64";
} }
def FlagClass : RegisterClass<"AArch64", [i32], 32, (add NZCV)> def FlagClass : RegisterClass<"AArch64", [i32], 32, (add NZCV)> {
{
let CopyCost = -1; let CopyCost = -1;
let isAllocatable = 0; let isAllocatable = 0;
} }

View File

@ -1,4 +1,4 @@
//===-- AArch64SelectionDAGInfo.h - AArch64 SelectionDAG Info -----*- C++ -*-===// //===-- AArch64SelectionDAGInfo.h - AArch64 SelectionDAG Info ---*- C++ -*-===//
// //
// The LLVM Compiler Infrastructure // The LLVM Compiler Infrastructure
// //

View File

@ -1,4 +1,4 @@
//===-- AArch64TargetObjectFile.cpp - AArch64 Object Info ------------------===// //===-- AArch64TargetObjectFile.cpp - AArch64 Object Info -----------------===//
// //
// The LLVM Compiler Infrastructure // The LLVM Compiler Infrastructure
// //

View File

@ -53,7 +53,7 @@ public:
// These are the public interface of the MCTargetAsmParser // These are the public interface of the MCTargetAsmParser
bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc); bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name, bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
SMLoc NameLoc, SMLoc NameLoc,
SmallVectorImpl<MCParsedAsmOperand*> &Operands); SmallVectorImpl<MCParsedAsmOperand*> &Operands);
@ -116,7 +116,7 @@ public:
ParseSysRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands); ParseSysRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
bool validateInstruction(MCInst &Inst, bool validateInstruction(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &Operands); const SmallVectorImpl<MCParsedAsmOperand*> &Operands);
/// Scan the next token (which had better be an identifier) and determine /// Scan the next token (which had better be an identifier) and determine
/// whether it represents a general-purpose or vector register. It returns /// whether it represents a general-purpose or vector register. It returns
@ -1674,7 +1674,8 @@ AArch64AsmParser::ParseShiftExtend(
if (Parser.getTok().is(AsmToken::Comma) || if (Parser.getTok().is(AsmToken::Comma) ||
Parser.getTok().is(AsmToken::EndOfStatement) || Parser.getTok().is(AsmToken::EndOfStatement) ||
Parser.getTok().is(AsmToken::RBrac)) { Parser.getTok().is(AsmToken::RBrac)) {
Operands.push_back(AArch64Operand::CreateShiftExtend(Spec, 0, true, S, E)); Operands.push_back(AArch64Operand::CreateShiftExtend(Spec, 0, true,
S, E));
return MatchOperand_Success; return MatchOperand_Success;
} }
} }
@ -1697,7 +1698,8 @@ AArch64AsmParser::ParseShiftExtend(
Parser.Lex(); Parser.Lex();
E = Parser.getTok().getLoc(); E = Parser.getTok().getLoc();
Operands.push_back(AArch64Operand::CreateShiftExtend(Spec, Amount, false, S, E)); Operands.push_back(AArch64Operand::CreateShiftExtend(Spec, Amount, false,
S, E));
return MatchOperand_Success; return MatchOperand_Success;
} }
@ -1942,7 +1944,7 @@ bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
bool MatchingInlineAsm) { bool MatchingInlineAsm) {
MCInst Inst; MCInst Inst;
unsigned MatchResult; unsigned MatchResult;
MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo,
MatchingInlineAsm); MatchingInlineAsm);
switch (MatchResult) { switch (MatchResult) {
default: break; default: break;

View File

@ -77,10 +77,12 @@ static DecodeStatus DecodeFPR32RegisterClass(llvm::MCInst &Inst, unsigned RegNo,
uint64_t Address, const void *Decoder); uint64_t Address, const void *Decoder);
static DecodeStatus DecodeFPR64RegisterClass(llvm::MCInst &Inst, unsigned RegNo, static DecodeStatus DecodeFPR64RegisterClass(llvm::MCInst &Inst, unsigned RegNo,
uint64_t Address, const void *Decoder); uint64_t Address, const void *Decoder);
static DecodeStatus DecodeFPR128RegisterClass(llvm::MCInst &Inst, unsigned RegNo, static DecodeStatus DecodeFPR128RegisterClass(llvm::MCInst &Inst,
uint64_t Address, const void *Decoder); unsigned RegNo, uint64_t Address,
static DecodeStatus DecodeVPR128RegisterClass(llvm::MCInst &Inst, unsigned RegNo, const void *Decoder);
uint64_t Address, const void *Decoder); static DecodeStatus DecodeVPR128RegisterClass(llvm::MCInst &Inst,
unsigned RegNo, uint64_t Address,
const void *Decoder);
static DecodeStatus DecodeAddrRegExtendOperand(llvm::MCInst &Inst, static DecodeStatus DecodeAddrRegExtendOperand(llvm::MCInst &Inst,
unsigned OptionHiS, unsigned OptionHiS,
@ -143,11 +145,10 @@ static DecodeStatus DecodeNamedImmOperand(llvm::MCInst &Inst,
uint64_t Address, uint64_t Address,
const void *Decoder); const void *Decoder);
static DecodeStatus DecodeSysRegOperand(const A64SysReg::SysRegMapper &InstMapper, static DecodeStatus
llvm::MCInst &Inst, DecodeSysRegOperand(const A64SysReg::SysRegMapper &InstMapper,
unsigned Val, llvm::MCInst &Inst, unsigned Val,
uint64_t Address, uint64_t Address, const void *Decoder);
const void *Decoder);
static DecodeStatus DecodeMRSOperand(llvm::MCInst &Inst, static DecodeStatus DecodeMRSOperand(llvm::MCInst &Inst,
unsigned Val, unsigned Val,
@ -247,7 +248,8 @@ DecodeGPR64xspRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
} }
static DecodeStatus DecodeGPR32RegisterClass(llvm::MCInst &Inst, unsigned RegNo, static DecodeStatus DecodeGPR32RegisterClass(llvm::MCInst &Inst, unsigned RegNo,
uint64_t Address, const void *Decoder) { uint64_t Address,
const void *Decoder) {
if (RegNo > 31) if (RegNo > 31)
return MCDisassembler::Fail; return MCDisassembler::Fail;
@ -460,8 +462,10 @@ static DecodeStatus DecodeBitfieldInstruction(llvm::MCInst &Inst, unsigned Insn,
} }
// ASR and LSR have more specific patterns so they won't get here: // ASR and LSR have more specific patterns so they won't get here:
assert(!(ImmS == 31 && !SF && Opc != BFM) && "shift should have used auto decode"); assert(!(ImmS == 31 && !SF && Opc != BFM)
assert(!(ImmS == 63 && SF && Opc != BFM) && "shift should have used auto decode"); && "shift should have used auto decode");
assert(!(ImmS == 63 && SF && Opc != BFM)
&& "shift should have used auto decode");
// Extension instructions similarly: // Extension instructions similarly:
if (Opc == SBFM && ImmR == 0) { if (Opc == SBFM && ImmR == 0) {

View File

@ -114,7 +114,7 @@ public:
} }
void printShiftOperand(const char *name, const MCInst *MI, void printShiftOperand(const char *name, const MCInst *MI,
unsigned OpIdx, raw_ostream &O); unsigned OpIdx, raw_ostream &O);
void printLSLOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O); void printLSLOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);

View File

@ -94,73 +94,73 @@ public:
// This table *must* be in the order that the fixup_* kinds are defined in // This table *must* be in the order that the fixup_* kinds are defined in
// AArch64FixupKinds.h. // AArch64FixupKinds.h.
// //
// Name Offset (bits) Size (bits) Flags // Name Offset (bits) Size (bits) Flags
{ "fixup_a64_ld_prel", 0, 32, MCFixupKindInfo::FKF_IsPCRel }, { "fixup_a64_ld_prel", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
{ "fixup_a64_adr_prel", 0, 32, MCFixupKindInfo::FKF_IsPCRel }, { "fixup_a64_adr_prel", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
{ "fixup_a64_adr_prel_page", 0, 32, MCFixupKindInfo::FKF_IsPCRel }, { "fixup_a64_adr_prel_page", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
{ "fixup_a64_add_lo12", 0, 32, 0 }, { "fixup_a64_add_lo12", 0, 32, 0 },
{ "fixup_a64_ldst8_lo12", 0, 32, 0 }, { "fixup_a64_ldst8_lo12", 0, 32, 0 },
{ "fixup_a64_ldst16_lo12", 0, 32, 0 }, { "fixup_a64_ldst16_lo12", 0, 32, 0 },
{ "fixup_a64_ldst32_lo12", 0, 32, 0 }, { "fixup_a64_ldst32_lo12", 0, 32, 0 },
{ "fixup_a64_ldst64_lo12", 0, 32, 0 }, { "fixup_a64_ldst64_lo12", 0, 32, 0 },
{ "fixup_a64_ldst128_lo12", 0, 32, 0 }, { "fixup_a64_ldst128_lo12", 0, 32, 0 },
{ "fixup_a64_tstbr", 0, 32, MCFixupKindInfo::FKF_IsPCRel }, { "fixup_a64_tstbr", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
{ "fixup_a64_condbr", 0, 32, MCFixupKindInfo::FKF_IsPCRel }, { "fixup_a64_condbr", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
{ "fixup_a64_uncondbr", 0, 32, MCFixupKindInfo::FKF_IsPCRel }, { "fixup_a64_uncondbr", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
{ "fixup_a64_call", 0, 32, MCFixupKindInfo::FKF_IsPCRel }, { "fixup_a64_call", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
{ "fixup_a64_movw_uabs_g0", 0, 32, 0 }, { "fixup_a64_movw_uabs_g0", 0, 32, 0 },
{ "fixup_a64_movw_uabs_g0_nc", 0, 32, 0 }, { "fixup_a64_movw_uabs_g0_nc", 0, 32, 0 },
{ "fixup_a64_movw_uabs_g1", 0, 32, 0 }, { "fixup_a64_movw_uabs_g1", 0, 32, 0 },
{ "fixup_a64_movw_uabs_g1_nc", 0, 32, 0 }, { "fixup_a64_movw_uabs_g1_nc", 0, 32, 0 },
{ "fixup_a64_movw_uabs_g2", 0, 32, 0 }, { "fixup_a64_movw_uabs_g2", 0, 32, 0 },
{ "fixup_a64_movw_uabs_g2_nc", 0, 32, 0 }, { "fixup_a64_movw_uabs_g2_nc", 0, 32, 0 },
{ "fixup_a64_movw_uabs_g3", 0, 32, 0 }, { "fixup_a64_movw_uabs_g3", 0, 32, 0 },
{ "fixup_a64_movw_sabs_g0", 0, 32, 0 }, { "fixup_a64_movw_sabs_g0", 0, 32, 0 },
{ "fixup_a64_movw_sabs_g1", 0, 32, 0 }, { "fixup_a64_movw_sabs_g1", 0, 32, 0 },
{ "fixup_a64_movw_sabs_g2", 0, 32, 0 }, { "fixup_a64_movw_sabs_g2", 0, 32, 0 },
{ "fixup_a64_adr_prel_got_page", 0, 32, MCFixupKindInfo::FKF_IsPCRel }, { "fixup_a64_adr_prel_got_page", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
{ "fixup_a64_ld64_got_lo12_nc", 0, 32, 0 }, { "fixup_a64_ld64_got_lo12_nc", 0, 32, 0 },
{ "fixup_a64_movw_dtprel_g2", 0, 32, 0 }, { "fixup_a64_movw_dtprel_g2", 0, 32, 0 },
{ "fixup_a64_movw_dtprel_g1", 0, 32, 0 }, { "fixup_a64_movw_dtprel_g1", 0, 32, 0 },
{ "fixup_a64_movw_dtprel_g1_nc", 0, 32, 0 }, { "fixup_a64_movw_dtprel_g1_nc", 0, 32, 0 },
{ "fixup_a64_movw_dtprel_g0", 0, 32, 0 }, { "fixup_a64_movw_dtprel_g0", 0, 32, 0 },
{ "fixup_a64_movw_dtprel_g0_nc", 0, 32, 0 }, { "fixup_a64_movw_dtprel_g0_nc", 0, 32, 0 },
{ "fixup_a64_add_dtprel_hi12", 0, 32, 0 }, { "fixup_a64_add_dtprel_hi12", 0, 32, 0 },
{ "fixup_a64_add_dtprel_lo12", 0, 32, 0 }, { "fixup_a64_add_dtprel_lo12", 0, 32, 0 },
{ "fixup_a64_add_dtprel_lo12_nc", 0, 32, 0 }, { "fixup_a64_add_dtprel_lo12_nc", 0, 32, 0 },
{ "fixup_a64_ldst8_dtprel_lo12", 0, 32, 0 }, { "fixup_a64_ldst8_dtprel_lo12", 0, 32, 0 },
{ "fixup_a64_ldst8_dtprel_lo12_nc", 0, 32, 0 }, { "fixup_a64_ldst8_dtprel_lo12_nc", 0, 32, 0 },
{ "fixup_a64_ldst16_dtprel_lo12", 0, 32, 0 }, { "fixup_a64_ldst16_dtprel_lo12", 0, 32, 0 },
{ "fixup_a64_ldst16_dtprel_lo12_nc", 0, 32, 0 }, { "fixup_a64_ldst16_dtprel_lo12_nc", 0, 32, 0 },
{ "fixup_a64_ldst32_dtprel_lo12", 0, 32, 0 }, { "fixup_a64_ldst32_dtprel_lo12", 0, 32, 0 },
{ "fixup_a64_ldst32_dtprel_lo12_nc", 0, 32, 0 }, { "fixup_a64_ldst32_dtprel_lo12_nc", 0, 32, 0 },
{ "fixup_a64_ldst64_dtprel_lo12", 0, 32, 0 }, { "fixup_a64_ldst64_dtprel_lo12", 0, 32, 0 },
{ "fixup_a64_ldst64_dtprel_lo12_nc", 0, 32, 0 }, { "fixup_a64_ldst64_dtprel_lo12_nc", 0, 32, 0 },
{ "fixup_a64_movw_gottprel_g1", 0, 32, 0 }, { "fixup_a64_movw_gottprel_g1", 0, 32, 0 },
{ "fixup_a64_movw_gottprel_g0_nc", 0, 32, 0 }, { "fixup_a64_movw_gottprel_g0_nc", 0, 32, 0 },
{ "fixup_a64_adr_gottprel_page", 0, 32, MCFixupKindInfo::FKF_IsPCRel }, { "fixup_a64_adr_gottprel_page", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
{ "fixup_a64_ld64_gottprel_lo12_nc", 0, 32, 0 }, { "fixup_a64_ld64_gottprel_lo12_nc", 0, 32, 0 },
{ "fixup_a64_ld_gottprel_prel19", 0, 32, MCFixupKindInfo::FKF_IsPCRel }, { "fixup_a64_ld_gottprel_prel19", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
{ "fixup_a64_movw_tprel_g2", 0, 32, 0 }, { "fixup_a64_movw_tprel_g2", 0, 32, 0 },
{ "fixup_a64_movw_tprel_g1", 0, 32, 0 }, { "fixup_a64_movw_tprel_g1", 0, 32, 0 },
{ "fixup_a64_movw_tprel_g1_nc", 0, 32, 0 }, { "fixup_a64_movw_tprel_g1_nc", 0, 32, 0 },
{ "fixup_a64_movw_tprel_g0", 0, 32, 0 }, { "fixup_a64_movw_tprel_g0", 0, 32, 0 },
{ "fixup_a64_movw_tprel_g0_nc", 0, 32, 0 }, { "fixup_a64_movw_tprel_g0_nc", 0, 32, 0 },
{ "fixup_a64_add_tprel_hi12", 0, 32, 0 }, { "fixup_a64_add_tprel_hi12", 0, 32, 0 },
{ "fixup_a64_add_tprel_lo12", 0, 32, 0 }, { "fixup_a64_add_tprel_lo12", 0, 32, 0 },
{ "fixup_a64_add_tprel_lo12_nc", 0, 32, 0 }, { "fixup_a64_add_tprel_lo12_nc", 0, 32, 0 },
{ "fixup_a64_ldst8_tprel_lo12", 0, 32, 0 }, { "fixup_a64_ldst8_tprel_lo12", 0, 32, 0 },
{ "fixup_a64_ldst8_tprel_lo12_nc", 0, 32, 0 }, { "fixup_a64_ldst8_tprel_lo12_nc", 0, 32, 0 },
{ "fixup_a64_ldst16_tprel_lo12", 0, 32, 0 }, { "fixup_a64_ldst16_tprel_lo12", 0, 32, 0 },
{ "fixup_a64_ldst16_tprel_lo12_nc", 0, 32, 0 }, { "fixup_a64_ldst16_tprel_lo12_nc", 0, 32, 0 },
{ "fixup_a64_ldst32_tprel_lo12", 0, 32, 0 }, { "fixup_a64_ldst32_tprel_lo12", 0, 32, 0 },
{ "fixup_a64_ldst32_tprel_lo12_nc", 0, 32, 0 }, { "fixup_a64_ldst32_tprel_lo12_nc", 0, 32, 0 },
{ "fixup_a64_ldst64_tprel_lo12", 0, 32, 0 }, { "fixup_a64_ldst64_tprel_lo12", 0, 32, 0 },
{ "fixup_a64_ldst64_tprel_lo12_nc", 0, 32, 0 }, { "fixup_a64_ldst64_tprel_lo12_nc", 0, 32, 0 },
{ "fixup_a64_tlsdesc_adr_page", 0, 32, MCFixupKindInfo::FKF_IsPCRel }, { "fixup_a64_tlsdesc_adr_page", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
{ "fixup_a64_tlsdesc_ld64_lo12_nc", 0, 32, 0 }, { "fixup_a64_tlsdesc_ld64_lo12_nc", 0, 32, 0 },
{ "fixup_a64_tlsdesc_add_lo12_nc", 0, 32, 0 }, { "fixup_a64_tlsdesc_add_lo12_nc", 0, 32, 0 },
{ "fixup_a64_tlsdesc_call", 0, 0, 0 } { "fixup_a64_tlsdesc_call", 0, 0, 0 }
}; };
if (Kind < FirstTargetFixupKind) if (Kind < FirstTargetFixupKind)
return MCAsmBackend::getFixupKindInfo(Kind); return MCAsmBackend::getFixupKindInfo(Kind);

View File

@ -364,7 +364,7 @@ AArch64MCCodeEmitter::getMachineOpValue(const MCInst &MI,
unsigned unsigned
AArch64MCCodeEmitter::getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx, AArch64MCCodeEmitter::getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx,
SmallVectorImpl<MCFixup> &Fixups) const { SmallVectorImpl<MCFixup> &Fixups) const {
const MCOperand &UImm16MO = MI.getOperand(OpIdx); const MCOperand &UImm16MO = MI.getOperand(OpIdx);
const MCOperand &ShiftMO = MI.getOperand(OpIdx + 1); const MCOperand &ShiftMO = MI.getOperand(OpIdx + 1);

View File

@ -83,7 +83,8 @@ public:
return Create(VK_AARCH64_GOT, Expr, Ctx); return Create(VK_AARCH64_GOT, Expr, Ctx);
} }
static const AArch64MCExpr *CreateGOTLo12(const MCExpr *Expr, MCContext &Ctx) { static const AArch64MCExpr *CreateGOTLo12(const MCExpr *Expr,
MCContext &Ctx) {
return Create(VK_AARCH64_GOT_LO12, Expr, Ctx); return Create(VK_AARCH64_GOT_LO12, Expr, Ctx);
} }

View File

@ -1,4 +1,4 @@
//===-- AArch64TargetInfo.cpp - AArch64 Target Implementation ---------------===// //===-- AArch64TargetInfo.cpp - AArch64 Target Implementation -------------===//
// //
// The LLVM Compiler Infrastructure // The LLVM Compiler Infrastructure
// //

View File

@ -725,7 +725,8 @@ bool A64Imms::isLogicalImm(unsigned RegWidth, uint64_t Imm, uint32_t &Bits) {
} }
bool A64Imms::isLogicalImmBits(unsigned RegWidth, uint32_t Bits, uint64_t &Imm) { bool A64Imms::isLogicalImmBits(unsigned RegWidth, uint32_t Bits,
uint64_t &Imm) {
uint32_t N = Bits >> 12; uint32_t N = Bits >> 12;
uint32_t ImmR = (Bits >> 6) & 0x3f; uint32_t ImmR = (Bits >> 6) & 0x3f;
uint32_t ImmS = Bits & 0x3f; uint32_t ImmS = Bits & 0x3f;