mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-06-23 17:24:48 +00:00
Nuke whitespace and fix some indenting.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@113463 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
@ -63,7 +63,7 @@ class ARMFastISel : public FastISel {
|
|||||||
bool isThumb;
|
bool isThumb;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
explicit ARMFastISel(FunctionLoweringInfo &funcInfo)
|
explicit ARMFastISel(FunctionLoweringInfo &funcInfo)
|
||||||
: FastISel(funcInfo),
|
: FastISel(funcInfo),
|
||||||
TM(funcInfo.MF->getTarget()),
|
TM(funcInfo.MF->getTarget()),
|
||||||
TII(*TM.getInstrInfo()),
|
TII(*TM.getInstrInfo()),
|
||||||
@ -102,13 +102,13 @@ class ARMFastISel : public FastISel {
|
|||||||
virtual unsigned FastEmitInst_extractsubreg(MVT RetVT,
|
virtual unsigned FastEmitInst_extractsubreg(MVT RetVT,
|
||||||
unsigned Op0, bool Op0IsKill,
|
unsigned Op0, bool Op0IsKill,
|
||||||
uint32_t Idx);
|
uint32_t Idx);
|
||||||
|
|
||||||
// Backend specific FastISel code.
|
// Backend specific FastISel code.
|
||||||
virtual bool TargetSelectInstruction(const Instruction *I);
|
virtual bool TargetSelectInstruction(const Instruction *I);
|
||||||
virtual unsigned TargetMaterializeConstant(const Constant *C);
|
virtual unsigned TargetMaterializeConstant(const Constant *C);
|
||||||
|
|
||||||
#include "ARMGenFastISel.inc"
|
#include "ARMGenFastISel.inc"
|
||||||
|
|
||||||
// Instruction selection routines.
|
// Instruction selection routines.
|
||||||
virtual bool ARMSelectLoad(const Instruction *I);
|
virtual bool ARMSelectLoad(const Instruction *I);
|
||||||
virtual bool ARMSelectStore(const Instruction *I);
|
virtual bool ARMSelectStore(const Instruction *I);
|
||||||
@ -128,7 +128,7 @@ class ARMFastISel : public FastISel {
|
|||||||
bool ARMComputeRegOffset(const Value *Obj, unsigned &Reg, int &Offset);
|
bool ARMComputeRegOffset(const Value *Obj, unsigned &Reg, int &Offset);
|
||||||
unsigned ARMMaterializeFP(const ConstantFP *CFP, EVT VT);
|
unsigned ARMMaterializeFP(const ConstantFP *CFP, EVT VT);
|
||||||
unsigned ARMMaterializeInt(const Constant *C);
|
unsigned ARMMaterializeInt(const Constant *C);
|
||||||
|
|
||||||
bool DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR);
|
bool DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR);
|
||||||
const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB);
|
const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB);
|
||||||
};
|
};
|
||||||
@ -164,7 +164,7 @@ ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) {
|
|||||||
// Do we use a predicate?
|
// Do we use a predicate?
|
||||||
if (TII.isPredicable(MI))
|
if (TII.isPredicable(MI))
|
||||||
AddDefaultPred(MIB);
|
AddDefaultPred(MIB);
|
||||||
|
|
||||||
// Do we optionally set a predicate? Preds is size > 0 iff the predicate
|
// Do we optionally set a predicate? Preds is size > 0 iff the predicate
|
||||||
// defines CPSR. All other OptionalDefines in ARM are the CCR register.
|
// defines CPSR. All other OptionalDefines in ARM are the CCR register.
|
||||||
bool CPSR = false;
|
bool CPSR = false;
|
||||||
@ -301,7 +301,7 @@ unsigned ARMFastISel::FastEmitInst_i(unsigned MachineInstOpcode,
|
|||||||
uint64_t Imm) {
|
uint64_t Imm) {
|
||||||
unsigned ResultReg = createResultReg(RC);
|
unsigned ResultReg = createResultReg(RC);
|
||||||
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
|
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
|
||||||
|
|
||||||
if (II.getNumDefs() >= 1)
|
if (II.getNumDefs() >= 1)
|
||||||
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
|
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
|
||||||
.addImm(Imm));
|
.addImm(Imm));
|
||||||
@ -333,7 +333,7 @@ unsigned ARMFastISel::FastEmitInst_extractsubreg(MVT RetVT,
|
|||||||
unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, EVT VT) {
|
unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, EVT VT) {
|
||||||
const APFloat Val = CFP->getValueAPF();
|
const APFloat Val = CFP->getValueAPF();
|
||||||
bool is64bit = VT.getSimpleVT().SimpleTy == MVT::f64;
|
bool is64bit = VT.getSimpleVT().SimpleTy == MVT::f64;
|
||||||
|
|
||||||
// This checks to see if we can use VFP3 instructions to materialize
|
// This checks to see if we can use VFP3 instructions to materialize
|
||||||
// a constant, otherwise we have to go through the constant pool.
|
// a constant, otherwise we have to go through the constant pool.
|
||||||
if (TLI.isFPImmLegal(Val, VT)) {
|
if (TLI.isFPImmLegal(Val, VT)) {
|
||||||
@ -344,10 +344,10 @@ unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, EVT VT) {
|
|||||||
.addFPImm(CFP));
|
.addFPImm(CFP));
|
||||||
return DestReg;
|
return DestReg;
|
||||||
}
|
}
|
||||||
|
|
||||||
// No 64-bit at the moment.
|
// No 64-bit at the moment.
|
||||||
if (is64bit) return 0;
|
if (is64bit) return 0;
|
||||||
|
|
||||||
// Load this from the constant pool.
|
// Load this from the constant pool.
|
||||||
unsigned DestReg = ARMMaterializeInt(cast<Constant>(CFP));
|
unsigned DestReg = ARMMaterializeInt(cast<Constant>(CFP));
|
||||||
|
|
||||||
@ -379,7 +379,7 @@ unsigned ARMFastISel::ARMMaterializeInt(const Constant *C) {
|
|||||||
TII.get(ARM::LDRcp))
|
TII.get(ARM::LDRcp))
|
||||||
.addReg(DestReg).addConstantPoolIndex(Idx)
|
.addReg(DestReg).addConstantPoolIndex(Idx)
|
||||||
.addReg(0).addImm(0));
|
.addReg(0).addImm(0));
|
||||||
|
|
||||||
return DestReg;
|
return DestReg;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -396,10 +396,10 @@ unsigned ARMFastISel::TargetMaterializeConstant(const Constant *C) {
|
|||||||
|
|
||||||
bool ARMFastISel::isTypeLegal(const Type *Ty, EVT &VT) {
|
bool ARMFastISel::isTypeLegal(const Type *Ty, EVT &VT) {
|
||||||
VT = TLI.getValueType(Ty, true);
|
VT = TLI.getValueType(Ty, true);
|
||||||
|
|
||||||
// Only handle simple types.
|
// Only handle simple types.
|
||||||
if (VT == MVT::Other || !VT.isSimple()) return false;
|
if (VT == MVT::Other || !VT.isSimple()) return false;
|
||||||
|
|
||||||
// Handle all legal types, i.e. a register that will directly hold this
|
// Handle all legal types, i.e. a register that will directly hold this
|
||||||
// value.
|
// value.
|
||||||
return TLI.isTypeLegal(VT);
|
return TLI.isTypeLegal(VT);
|
||||||
@ -407,12 +407,12 @@ bool ARMFastISel::isTypeLegal(const Type *Ty, EVT &VT) {
|
|||||||
|
|
||||||
bool ARMFastISel::isLoadTypeLegal(const Type *Ty, EVT &VT) {
|
bool ARMFastISel::isLoadTypeLegal(const Type *Ty, EVT &VT) {
|
||||||
if (isTypeLegal(Ty, VT)) return true;
|
if (isTypeLegal(Ty, VT)) return true;
|
||||||
|
|
||||||
// If this is a type than can be sign or zero-extended to a basic operation
|
// If this is a type than can be sign or zero-extended to a basic operation
|
||||||
// go ahead and accept it now.
|
// go ahead and accept it now.
|
||||||
if (VT == MVT::i8 || VT == MVT::i16)
|
if (VT == MVT::i8 || VT == MVT::i16)
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -441,9 +441,9 @@ bool ARMFastISel::ARMComputeRegOffset(const Value *Obj, unsigned &Reg,
|
|||||||
// Fast instruction selection doesn't support the special
|
// Fast instruction selection doesn't support the special
|
||||||
// address spaces.
|
// address spaces.
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
switch (Opcode) {
|
switch (Opcode) {
|
||||||
default:
|
default:
|
||||||
//errs() << "Failing Opcode is: " << *Op1 << "\n";
|
//errs() << "Failing Opcode is: " << *Op1 << "\n";
|
||||||
break;
|
break;
|
||||||
case Instruction::Alloca: {
|
case Instruction::Alloca: {
|
||||||
@ -451,13 +451,13 @@ bool ARMFastISel::ARMComputeRegOffset(const Value *Obj, unsigned &Reg,
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (const GlobalValue *GV = dyn_cast<GlobalValue>(Obj)) {
|
if (const GlobalValue *GV = dyn_cast<GlobalValue>(Obj)) {
|
||||||
//errs() << "Failing GV is: " << GV << "\n";
|
//errs() << "Failing GV is: " << GV << "\n";
|
||||||
(void)GV;
|
(void)GV;
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Try to get this in a register if nothing else has worked.
|
// Try to get this in a register if nothing else has worked.
|
||||||
Reg = getRegForValue(Obj);
|
Reg = getRegForValue(Obj);
|
||||||
if (Reg == 0) return false;
|
if (Reg == 0) return false;
|
||||||
@ -483,7 +483,7 @@ bool ARMFastISel::ARMComputeRegOffset(const Value *Obj, unsigned &Reg,
|
|||||||
static_cast<const ARMBaseInstrInfo&>(TII));
|
static_cast<const ARMBaseInstrInfo&>(TII));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -510,12 +510,12 @@ bool ARMFastISel::ARMLoadAlloca(const Instruction *I, EVT VT) {
|
|||||||
|
|
||||||
bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg,
|
bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg,
|
||||||
unsigned Reg, int Offset) {
|
unsigned Reg, int Offset) {
|
||||||
|
|
||||||
assert(VT.isSimple() && "Non-simple types are invalid here!");
|
assert(VT.isSimple() && "Non-simple types are invalid here!");
|
||||||
unsigned Opc;
|
unsigned Opc;
|
||||||
|
|
||||||
switch (VT.getSimpleVT().SimpleTy) {
|
switch (VT.getSimpleVT().SimpleTy) {
|
||||||
default:
|
default:
|
||||||
assert(false && "Trying to emit for an unhandled type!");
|
assert(false && "Trying to emit for an unhandled type!");
|
||||||
return false;
|
return false;
|
||||||
case MVT::i16:
|
case MVT::i16:
|
||||||
@ -530,9 +530,9 @@ bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg,
|
|||||||
Opc = isThumb ? ARM::tLDR : ARM::LDR;
|
Opc = isThumb ? ARM::tLDR : ARM::LDR;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
ResultReg = createResultReg(TLI.getRegClassFor(VT));
|
ResultReg = createResultReg(TLI.getRegClassFor(VT));
|
||||||
|
|
||||||
// TODO: Fix the Addressing modes so that these can share some code.
|
// TODO: Fix the Addressing modes so that these can share some code.
|
||||||
// Since this is a Thumb1 load this will work in Thumb1 or 2 mode.
|
// Since this is a Thumb1 load this will work in Thumb1 or 2 mode.
|
||||||
if (isThumb)
|
if (isThumb)
|
||||||
@ -543,7 +543,7 @@ bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg,
|
|||||||
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
|
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
|
||||||
TII.get(Opc), ResultReg)
|
TII.get(Opc), ResultReg)
|
||||||
.addReg(Reg).addReg(0).addImm(Offset));
|
.addReg(Reg).addReg(0).addImm(Offset));
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -585,7 +585,7 @@ bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg,
|
|||||||
StrOpc = ARM::VSTRD;
|
StrOpc = ARM::VSTRD;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (isThumb)
|
if (isThumb)
|
||||||
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
|
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
|
||||||
TII.get(StrOpc), SrcReg)
|
TII.get(StrOpc), SrcReg)
|
||||||
@ -594,7 +594,7 @@ bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg,
|
|||||||
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
|
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
|
||||||
TII.get(StrOpc), SrcReg)
|
TII.get(StrOpc), SrcReg)
|
||||||
.addReg(DstReg).addReg(0).addImm(Offset));
|
.addReg(DstReg).addReg(0).addImm(Offset));
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -611,24 +611,24 @@ bool ARMFastISel::ARMSelectStore(const Instruction *I) {
|
|||||||
SrcReg = getRegForValue(Op0);
|
SrcReg = getRegForValue(Op0);
|
||||||
if (SrcReg == 0)
|
if (SrcReg == 0)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
// If we're an alloca we know we have a frame index and can emit the store
|
// If we're an alloca we know we have a frame index and can emit the store
|
||||||
// quickly.
|
// quickly.
|
||||||
if (ARMStoreAlloca(I, SrcReg, VT))
|
if (ARMStoreAlloca(I, SrcReg, VT))
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
// Our register and offset with innocuous defaults.
|
// Our register and offset with innocuous defaults.
|
||||||
unsigned Reg = 0;
|
unsigned Reg = 0;
|
||||||
int Offset = 0;
|
int Offset = 0;
|
||||||
|
|
||||||
// See if we can handle this as Reg + Offset
|
// See if we can handle this as Reg + Offset
|
||||||
if (!ARMComputeRegOffset(I->getOperand(1), Reg, Offset))
|
if (!ARMComputeRegOffset(I->getOperand(1), Reg, Offset))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (!ARMEmitStore(VT, SrcReg, Reg, Offset /* 0 */)) return false;
|
if (!ARMEmitStore(VT, SrcReg, Reg, Offset /* 0 */)) return false;
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ARMFastISel::ARMSelectLoad(const Instruction *I) {
|
bool ARMFastISel::ARMSelectLoad(const Instruction *I) {
|
||||||
@ -636,23 +636,23 @@ bool ARMFastISel::ARMSelectLoad(const Instruction *I) {
|
|||||||
EVT VT;
|
EVT VT;
|
||||||
if (!isLoadTypeLegal(I->getType(), VT))
|
if (!isLoadTypeLegal(I->getType(), VT))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
// If we're an alloca we know we have a frame index and can emit the load
|
// If we're an alloca we know we have a frame index and can emit the load
|
||||||
// directly in short order.
|
// directly in short order.
|
||||||
if (ARMLoadAlloca(I, VT))
|
if (ARMLoadAlloca(I, VT))
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
// Our register and offset with innocuous defaults.
|
// Our register and offset with innocuous defaults.
|
||||||
unsigned Reg = 0;
|
unsigned Reg = 0;
|
||||||
int Offset = 0;
|
int Offset = 0;
|
||||||
|
|
||||||
// See if we can handle this as Reg + Offset
|
// See if we can handle this as Reg + Offset
|
||||||
if (!ARMComputeRegOffset(I->getOperand(0), Reg, Offset))
|
if (!ARMComputeRegOffset(I->getOperand(0), Reg, Offset))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
unsigned ResultReg;
|
unsigned ResultReg;
|
||||||
if (!ARMEmitLoad(VT, ResultReg, Reg, Offset /* 0 */)) return false;
|
if (!ARMEmitLoad(VT, ResultReg, Reg, Offset /* 0 */)) return false;
|
||||||
|
|
||||||
UpdateValueMap(I, ResultReg);
|
UpdateValueMap(I, ResultReg);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -661,11 +661,11 @@ bool ARMFastISel::ARMSelectBranch(const Instruction *I) {
|
|||||||
const BranchInst *BI = cast<BranchInst>(I);
|
const BranchInst *BI = cast<BranchInst>(I);
|
||||||
MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)];
|
MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)];
|
||||||
MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)];
|
MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)];
|
||||||
|
|
||||||
// Simple branch support.
|
// Simple branch support.
|
||||||
unsigned CondReg = getRegForValue(BI->getCondition());
|
unsigned CondReg = getRegForValue(BI->getCondition());
|
||||||
if (CondReg == 0) return false;
|
if (CondReg == 0) return false;
|
||||||
|
|
||||||
unsigned CmpOpc = isThumb ? ARM::t2CMPrr : ARM::CMPrr;
|
unsigned CmpOpc = isThumb ? ARM::t2CMPrr : ARM::CMPrr;
|
||||||
unsigned BrOpc = isThumb ? ARM::t2Bcc : ARM::Bcc;
|
unsigned BrOpc = isThumb ? ARM::t2Bcc : ARM::Bcc;
|
||||||
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc))
|
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc))
|
||||||
@ -679,16 +679,16 @@ bool ARMFastISel::ARMSelectBranch(const Instruction *I) {
|
|||||||
|
|
||||||
bool ARMFastISel::ARMSelectCmp(const Instruction *I) {
|
bool ARMFastISel::ARMSelectCmp(const Instruction *I) {
|
||||||
const CmpInst *CI = cast<CmpInst>(I);
|
const CmpInst *CI = cast<CmpInst>(I);
|
||||||
|
|
||||||
EVT VT;
|
EVT VT;
|
||||||
const Type *Ty = CI->getOperand(0)->getType();
|
const Type *Ty = CI->getOperand(0)->getType();
|
||||||
if (!isTypeLegal(Ty, VT))
|
if (!isTypeLegal(Ty, VT))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy());
|
bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy());
|
||||||
if (isFloat && !Subtarget->hasVFP2())
|
if (isFloat && !Subtarget->hasVFP2())
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
unsigned CmpOpc;
|
unsigned CmpOpc;
|
||||||
switch (VT.getSimpleVT().SimpleTy) {
|
switch (VT.getSimpleVT().SimpleTy) {
|
||||||
default: return false;
|
default: return false;
|
||||||
@ -706,13 +706,13 @@ bool ARMFastISel::ARMSelectCmp(const Instruction *I) {
|
|||||||
|
|
||||||
unsigned Arg1 = getRegForValue(CI->getOperand(0));
|
unsigned Arg1 = getRegForValue(CI->getOperand(0));
|
||||||
if (Arg1 == 0) return false;
|
if (Arg1 == 0) return false;
|
||||||
|
|
||||||
unsigned Arg2 = getRegForValue(CI->getOperand(1));
|
unsigned Arg2 = getRegForValue(CI->getOperand(1));
|
||||||
if (Arg2 == 0) return false;
|
if (Arg2 == 0) return false;
|
||||||
|
|
||||||
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc))
|
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc))
|
||||||
.addReg(Arg1).addReg(Arg2));
|
.addReg(Arg1).addReg(Arg2));
|
||||||
|
|
||||||
// For floating point we need to move the result to a register we can
|
// For floating point we need to move the result to a register we can
|
||||||
// actually do something with.
|
// actually do something with.
|
||||||
if (isFloat)
|
if (isFloat)
|
||||||
@ -724,17 +724,17 @@ bool ARMFastISel::ARMSelectCmp(const Instruction *I) {
|
|||||||
bool ARMFastISel::ARMSelectFPExt(const Instruction *I) {
|
bool ARMFastISel::ARMSelectFPExt(const Instruction *I) {
|
||||||
// Make sure we have VFP and that we're extending float to double.
|
// Make sure we have VFP and that we're extending float to double.
|
||||||
if (!Subtarget->hasVFP2()) return false;
|
if (!Subtarget->hasVFP2()) return false;
|
||||||
|
|
||||||
Value *V = I->getOperand(0);
|
Value *V = I->getOperand(0);
|
||||||
if (!I->getType()->isDoubleTy() ||
|
if (!I->getType()->isDoubleTy() ||
|
||||||
!V->getType()->isFloatTy()) return false;
|
!V->getType()->isFloatTy()) return false;
|
||||||
|
|
||||||
unsigned Op = getRegForValue(V);
|
unsigned Op = getRegForValue(V);
|
||||||
if (Op == 0) return false;
|
if (Op == 0) return false;
|
||||||
|
|
||||||
unsigned Result = createResultReg(ARM::DPRRegisterClass);
|
unsigned Result = createResultReg(ARM::DPRRegisterClass);
|
||||||
|
|
||||||
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
|
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
|
||||||
TII.get(ARM::VCVTDS), Result)
|
TII.get(ARM::VCVTDS), Result)
|
||||||
.addReg(Op));
|
.addReg(Op));
|
||||||
UpdateValueMap(I, Result);
|
UpdateValueMap(I, Result);
|
||||||
@ -743,7 +743,7 @@ bool ARMFastISel::ARMSelectFPExt(const Instruction *I) {
|
|||||||
|
|
||||||
bool ARMFastISel::ARMSelectBinaryOp(const Instruction *I, unsigned ISDOpcode) {
|
bool ARMFastISel::ARMSelectBinaryOp(const Instruction *I, unsigned ISDOpcode) {
|
||||||
EVT VT = TLI.getValueType(I->getType(), true);
|
EVT VT = TLI.getValueType(I->getType(), true);
|
||||||
|
|
||||||
// We can get here in the case when we want to use NEON for our fp
|
// We can get here in the case when we want to use NEON for our fp
|
||||||
// operations, but can't figure out how to. Just use the vfp instructions
|
// operations, but can't figure out how to. Just use the vfp instructions
|
||||||
// if we have them.
|
// if we have them.
|
||||||
@ -752,13 +752,13 @@ bool ARMFastISel::ARMSelectBinaryOp(const Instruction *I, unsigned ISDOpcode) {
|
|||||||
bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy());
|
bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy());
|
||||||
if (isFloat && !Subtarget->hasVFP2())
|
if (isFloat && !Subtarget->hasVFP2())
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
unsigned Op1 = getRegForValue(I->getOperand(0));
|
unsigned Op1 = getRegForValue(I->getOperand(0));
|
||||||
if (Op1 == 0) return false;
|
if (Op1 == 0) return false;
|
||||||
|
|
||||||
unsigned Op2 = getRegForValue(I->getOperand(1));
|
unsigned Op2 = getRegForValue(I->getOperand(1));
|
||||||
if (Op2 == 0) return false;
|
if (Op2 == 0) return false;
|
||||||
|
|
||||||
unsigned Opc;
|
unsigned Opc;
|
||||||
bool is64bit = VT.getSimpleVT().SimpleTy == MVT::f64 ||
|
bool is64bit = VT.getSimpleVT().SimpleTy == MVT::f64 ||
|
||||||
VT.getSimpleVT().SimpleTy == MVT::i64;
|
VT.getSimpleVT().SimpleTy == MVT::i64;
|
||||||
@ -785,7 +785,7 @@ bool ARMFastISel::ARMSelectBinaryOp(const Instruction *I, unsigned ISDOpcode) {
|
|||||||
bool ARMFastISel::TargetSelectInstruction(const Instruction *I) {
|
bool ARMFastISel::TargetSelectInstruction(const Instruction *I) {
|
||||||
// No Thumb-1 for now.
|
// No Thumb-1 for now.
|
||||||
if (isThumb && !AFI->isThumb2Function()) return false;
|
if (isThumb && !AFI->isThumb2Function()) return false;
|
||||||
|
|
||||||
switch (I->getOpcode()) {
|
switch (I->getOpcode()) {
|
||||||
case Instruction::Load:
|
case Instruction::Load:
|
||||||
return ARMSelectLoad(I);
|
return ARMSelectLoad(I);
|
||||||
@ -795,15 +795,15 @@ bool ARMFastISel::TargetSelectInstruction(const Instruction *I) {
|
|||||||
return ARMSelectBranch(I);
|
return ARMSelectBranch(I);
|
||||||
case Instruction::ICmp:
|
case Instruction::ICmp:
|
||||||
case Instruction::FCmp:
|
case Instruction::FCmp:
|
||||||
return ARMSelectCmp(I);
|
return ARMSelectCmp(I);
|
||||||
case Instruction::FPExt:
|
case Instruction::FPExt:
|
||||||
return ARMSelectFPExt(I);
|
return ARMSelectFPExt(I);
|
||||||
case Instruction::FAdd:
|
case Instruction::FAdd:
|
||||||
return ARMSelectBinaryOp(I, ISD::FADD);
|
return ARMSelectBinaryOp(I, ISD::FADD);
|
||||||
case Instruction::FSub:
|
case Instruction::FSub:
|
||||||
return ARMSelectBinaryOp(I, ISD::FSUB);
|
return ARMSelectBinaryOp(I, ISD::FSUB);
|
||||||
case Instruction::FMul:
|
case Instruction::FMul:
|
||||||
return ARMSelectBinaryOp(I, ISD::FMUL);
|
return ARMSelectBinaryOp(I, ISD::FMUL);
|
||||||
default: break;
|
default: break;
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
|
Reference in New Issue
Block a user