2007-01-19 07:51:42 +00:00
|
|
|
//===-- ARMISelLowering.cpp - ARM DAG Lowering Implementation -------------===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
2007-12-29 20:36:04 +00:00
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
2007-01-19 07:51:42 +00:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file defines the interfaces that ARM uses to lower LLVM code into a
|
|
|
|
// selection DAG.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "ARM.h"
|
|
|
|
#include "ARMAddressingModes.h"
|
|
|
|
#include "ARMConstantPoolValue.h"
|
|
|
|
#include "ARMISelLowering.h"
|
|
|
|
#include "ARMMachineFunctionInfo.h"
|
2009-08-21 12:41:24 +00:00
|
|
|
#include "ARMPerfectShuffle.h"
|
2007-01-19 07:51:42 +00:00
|
|
|
#include "ARMRegisterInfo.h"
|
|
|
|
#include "ARMSubtarget.h"
|
|
|
|
#include "ARMTargetMachine.h"
|
2009-08-02 00:34:36 +00:00
|
|
|
#include "ARMTargetObjectFile.h"
|
2007-01-19 07:51:42 +00:00
|
|
|
#include "llvm/CallingConv.h"
|
|
|
|
#include "llvm/Constants.h"
|
2009-04-17 19:07:39 +00:00
|
|
|
#include "llvm/Function.h"
|
2009-10-20 11:44:38 +00:00
|
|
|
#include "llvm/GlobalValue.h"
|
2007-03-16 08:43:56 +00:00
|
|
|
#include "llvm/Instruction.h"
|
2007-11-08 17:20:05 +00:00
|
|
|
#include "llvm/Intrinsics.h"
|
2009-10-20 11:44:38 +00:00
|
|
|
#include "llvm/Type.h"
|
2009-04-17 19:07:39 +00:00
|
|
|
#include "llvm/CodeGen/CallingConvLower.h"
|
2007-01-19 07:51:42 +00:00
|
|
|
#include "llvm/CodeGen/MachineBasicBlock.h"
|
|
|
|
#include "llvm/CodeGen/MachineFrameInfo.h"
|
|
|
|
#include "llvm/CodeGen/MachineFunction.h"
|
|
|
|
#include "llvm/CodeGen/MachineInstrBuilder.h"
|
2007-12-31 04:13:23 +00:00
|
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
2009-04-17 19:07:39 +00:00
|
|
|
#include "llvm/CodeGen/PseudoSourceValue.h"
|
2007-01-19 07:51:42 +00:00
|
|
|
#include "llvm/CodeGen/SelectionDAG.h"
|
2007-01-31 08:40:13 +00:00
|
|
|
#include "llvm/Target/TargetOptions.h"
|
2007-01-19 07:51:42 +00:00
|
|
|
#include "llvm/ADT/VectorExtras.h"
|
2009-11-24 00:44:37 +00:00
|
|
|
#include "llvm/Support/CommandLine.h"
|
2009-07-08 18:01:40 +00:00
|
|
|
#include "llvm/Support/ErrorHandling.h"
|
2007-03-12 23:30:29 +00:00
|
|
|
#include "llvm/Support/MathExtras.h"
|
2009-09-01 01:57:56 +00:00
|
|
|
#include <sstream>
|
2007-01-19 07:51:42 +00:00
|
|
|
using namespace llvm;
|
|
|
|
|
2009-08-10 22:56:29 +00:00
|
|
|
static bool CC_ARM_APCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
|
2009-04-17 19:07:39 +00:00
|
|
|
CCValAssign::LocInfo &LocInfo,
|
|
|
|
ISD::ArgFlagsTy &ArgFlags,
|
|
|
|
CCState &State);
|
2009-08-10 22:56:29 +00:00
|
|
|
static bool CC_ARM_AAPCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
|
2009-04-17 19:07:39 +00:00
|
|
|
CCValAssign::LocInfo &LocInfo,
|
|
|
|
ISD::ArgFlagsTy &ArgFlags,
|
|
|
|
CCState &State);
|
2009-08-10 22:56:29 +00:00
|
|
|
static bool RetCC_ARM_APCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
|
2009-04-17 19:07:39 +00:00
|
|
|
CCValAssign::LocInfo &LocInfo,
|
|
|
|
ISD::ArgFlagsTy &ArgFlags,
|
|
|
|
CCState &State);
|
2009-08-10 22:56:29 +00:00
|
|
|
static bool RetCC_ARM_AAPCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
|
2009-04-17 19:07:39 +00:00
|
|
|
CCValAssign::LocInfo &LocInfo,
|
|
|
|
ISD::ArgFlagsTy &ArgFlags,
|
|
|
|
CCState &State);
|
|
|
|
|
2009-08-10 22:56:29 +00:00
|
|
|
void ARMTargetLowering::addTypeForNEON(EVT VT, EVT PromotedLdStVT,
|
|
|
|
EVT PromotedBitwiseVT) {
|
2009-06-22 23:27:02 +00:00
|
|
|
if (VT != PromotedLdStVT) {
|
2009-08-10 20:18:46 +00:00
|
|
|
setOperationAction(ISD::LOAD, VT.getSimpleVT(), Promote);
|
2009-08-10 20:46:15 +00:00
|
|
|
AddPromotedToType (ISD::LOAD, VT.getSimpleVT(),
|
|
|
|
PromotedLdStVT.getSimpleVT());
|
2009-06-22 23:27:02 +00:00
|
|
|
|
2009-08-10 20:18:46 +00:00
|
|
|
setOperationAction(ISD::STORE, VT.getSimpleVT(), Promote);
|
2009-08-11 15:33:49 +00:00
|
|
|
AddPromotedToType (ISD::STORE, VT.getSimpleVT(),
|
2009-08-10 20:46:15 +00:00
|
|
|
PromotedLdStVT.getSimpleVT());
|
2009-06-22 23:27:02 +00:00
|
|
|
}
|
|
|
|
|
2009-08-10 22:56:29 +00:00
|
|
|
EVT ElemTy = VT.getVectorElementType();
|
2009-08-11 20:47:22 +00:00
|
|
|
if (ElemTy != MVT::i64 && ElemTy != MVT::f64)
|
2009-08-10 20:18:46 +00:00
|
|
|
setOperationAction(ISD::VSETCC, VT.getSimpleVT(), Custom);
|
2009-08-11 20:47:22 +00:00
|
|
|
if (ElemTy == MVT::i8 || ElemTy == MVT::i16)
|
2009-08-10 20:18:46 +00:00
|
|
|
setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT.getSimpleVT(), Custom);
|
2009-09-16 20:20:44 +00:00
|
|
|
if (ElemTy != MVT::i32) {
|
|
|
|
setOperationAction(ISD::SINT_TO_FP, VT.getSimpleVT(), Expand);
|
|
|
|
setOperationAction(ISD::UINT_TO_FP, VT.getSimpleVT(), Expand);
|
|
|
|
setOperationAction(ISD::FP_TO_SINT, VT.getSimpleVT(), Expand);
|
|
|
|
setOperationAction(ISD::FP_TO_UINT, VT.getSimpleVT(), Expand);
|
|
|
|
}
|
2009-08-10 20:18:46 +00:00
|
|
|
setOperationAction(ISD::BUILD_VECTOR, VT.getSimpleVT(), Custom);
|
|
|
|
setOperationAction(ISD::VECTOR_SHUFFLE, VT.getSimpleVT(), Custom);
|
|
|
|
setOperationAction(ISD::CONCAT_VECTORS, VT.getSimpleVT(), Custom);
|
2009-08-21 12:40:35 +00:00
|
|
|
setOperationAction(ISD::EXTRACT_SUBVECTOR, VT.getSimpleVT(), Expand);
|
2009-06-22 23:27:02 +00:00
|
|
|
if (VT.isInteger()) {
|
2009-08-10 20:18:46 +00:00
|
|
|
setOperationAction(ISD::SHL, VT.getSimpleVT(), Custom);
|
|
|
|
setOperationAction(ISD::SRA, VT.getSimpleVT(), Custom);
|
|
|
|
setOperationAction(ISD::SRL, VT.getSimpleVT(), Custom);
|
2009-06-22 23:27:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Promote all bit-wise operations.
|
|
|
|
if (VT.isInteger() && VT != PromotedBitwiseVT) {
|
2009-08-10 20:18:46 +00:00
|
|
|
setOperationAction(ISD::AND, VT.getSimpleVT(), Promote);
|
2009-08-10 20:46:15 +00:00
|
|
|
AddPromotedToType (ISD::AND, VT.getSimpleVT(),
|
|
|
|
PromotedBitwiseVT.getSimpleVT());
|
2009-08-10 20:18:46 +00:00
|
|
|
setOperationAction(ISD::OR, VT.getSimpleVT(), Promote);
|
2009-08-11 15:33:49 +00:00
|
|
|
AddPromotedToType (ISD::OR, VT.getSimpleVT(),
|
2009-08-10 20:46:15 +00:00
|
|
|
PromotedBitwiseVT.getSimpleVT());
|
2009-08-10 20:18:46 +00:00
|
|
|
setOperationAction(ISD::XOR, VT.getSimpleVT(), Promote);
|
2009-08-11 15:33:49 +00:00
|
|
|
AddPromotedToType (ISD::XOR, VT.getSimpleVT(),
|
2009-08-10 20:46:15 +00:00
|
|
|
PromotedBitwiseVT.getSimpleVT());
|
2009-06-22 23:27:02 +00:00
|
|
|
}
|
2009-09-16 00:17:28 +00:00
|
|
|
|
|
|
|
// Neon does not support vector divide/remainder operations.
|
|
|
|
setOperationAction(ISD::SDIV, VT.getSimpleVT(), Expand);
|
|
|
|
setOperationAction(ISD::UDIV, VT.getSimpleVT(), Expand);
|
|
|
|
setOperationAction(ISD::FDIV, VT.getSimpleVT(), Expand);
|
|
|
|
setOperationAction(ISD::SREM, VT.getSimpleVT(), Expand);
|
|
|
|
setOperationAction(ISD::UREM, VT.getSimpleVT(), Expand);
|
|
|
|
setOperationAction(ISD::FREM, VT.getSimpleVT(), Expand);
|
2009-06-22 23:27:02 +00:00
|
|
|
}
|
|
|
|
|
2009-08-10 22:56:29 +00:00
|
|
|
void ARMTargetLowering::addDRTypeForNEON(EVT VT) {
|
2009-06-22 23:27:02 +00:00
|
|
|
addRegisterClass(VT, ARM::DPRRegisterClass);
|
2009-08-11 20:47:22 +00:00
|
|
|
addTypeForNEON(VT, MVT::f64, MVT::v2i32);
|
2009-06-22 23:27:02 +00:00
|
|
|
}
|
|
|
|
|
2009-08-10 22:56:29 +00:00
|
|
|
void ARMTargetLowering::addQRTypeForNEON(EVT VT) {
|
2009-06-22 23:27:02 +00:00
|
|
|
addRegisterClass(VT, ARM::QPRRegisterClass);
|
2009-08-11 20:47:22 +00:00
|
|
|
addTypeForNEON(VT, MVT::v2f64, MVT::v4i32);
|
2009-06-22 23:27:02 +00:00
|
|
|
}
|
|
|
|
|
2009-07-28 03:13:23 +00:00
|
|
|
static TargetLoweringObjectFile *createTLOF(TargetMachine &TM) {
|
|
|
|
if (TM.getSubtarget<ARMSubtarget>().isTargetDarwin())
|
2009-07-31 17:42:42 +00:00
|
|
|
return new TargetLoweringObjectFileMachO();
|
2009-08-02 00:34:36 +00:00
|
|
|
return new ARMElfTargetObjectFile();
|
2009-07-28 03:13:23 +00:00
|
|
|
}
|
|
|
|
|
2007-01-19 07:51:42 +00:00
|
|
|
ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
|
2009-11-06 22:24:13 +00:00
|
|
|
: TargetLowering(TM, createTLOF(TM)) {
|
2007-01-19 07:51:42 +00:00
|
|
|
Subtarget = &TM.getSubtarget<ARMSubtarget>();
|
|
|
|
|
2007-04-27 08:15:43 +00:00
|
|
|
if (Subtarget->isTargetDarwin()) {
|
|
|
|
// Uses VFP for Thumb libfuncs if available.
|
|
|
|
if (Subtarget->isThumb() && Subtarget->hasVFP2()) {
|
|
|
|
// Single-precision floating-point arithmetic.
|
|
|
|
setLibcallName(RTLIB::ADD_F32, "__addsf3vfp");
|
|
|
|
setLibcallName(RTLIB::SUB_F32, "__subsf3vfp");
|
|
|
|
setLibcallName(RTLIB::MUL_F32, "__mulsf3vfp");
|
|
|
|
setLibcallName(RTLIB::DIV_F32, "__divsf3vfp");
|
2007-01-19 07:51:42 +00:00
|
|
|
|
2007-04-27 08:15:43 +00:00
|
|
|
// Double-precision floating-point arithmetic.
|
|
|
|
setLibcallName(RTLIB::ADD_F64, "__adddf3vfp");
|
|
|
|
setLibcallName(RTLIB::SUB_F64, "__subdf3vfp");
|
|
|
|
setLibcallName(RTLIB::MUL_F64, "__muldf3vfp");
|
|
|
|
setLibcallName(RTLIB::DIV_F64, "__divdf3vfp");
|
2007-01-31 09:30:58 +00:00
|
|
|
|
2007-04-27 08:15:43 +00:00
|
|
|
// Single-precision comparisons.
|
|
|
|
setLibcallName(RTLIB::OEQ_F32, "__eqsf2vfp");
|
|
|
|
setLibcallName(RTLIB::UNE_F32, "__nesf2vfp");
|
|
|
|
setLibcallName(RTLIB::OLT_F32, "__ltsf2vfp");
|
|
|
|
setLibcallName(RTLIB::OLE_F32, "__lesf2vfp");
|
|
|
|
setLibcallName(RTLIB::OGE_F32, "__gesf2vfp");
|
|
|
|
setLibcallName(RTLIB::OGT_F32, "__gtsf2vfp");
|
|
|
|
setLibcallName(RTLIB::UO_F32, "__unordsf2vfp");
|
|
|
|
setLibcallName(RTLIB::O_F32, "__unordsf2vfp");
|
2007-01-19 07:51:42 +00:00
|
|
|
|
2007-04-27 08:15:43 +00:00
|
|
|
setCmpLibcallCC(RTLIB::OEQ_F32, ISD::SETNE);
|
|
|
|
setCmpLibcallCC(RTLIB::UNE_F32, ISD::SETNE);
|
|
|
|
setCmpLibcallCC(RTLIB::OLT_F32, ISD::SETNE);
|
|
|
|
setCmpLibcallCC(RTLIB::OLE_F32, ISD::SETNE);
|
|
|
|
setCmpLibcallCC(RTLIB::OGE_F32, ISD::SETNE);
|
|
|
|
setCmpLibcallCC(RTLIB::OGT_F32, ISD::SETNE);
|
|
|
|
setCmpLibcallCC(RTLIB::UO_F32, ISD::SETNE);
|
|
|
|
setCmpLibcallCC(RTLIB::O_F32, ISD::SETEQ);
|
2007-01-31 09:30:58 +00:00
|
|
|
|
2007-04-27 08:15:43 +00:00
|
|
|
// Double-precision comparisons.
|
|
|
|
setLibcallName(RTLIB::OEQ_F64, "__eqdf2vfp");
|
|
|
|
setLibcallName(RTLIB::UNE_F64, "__nedf2vfp");
|
|
|
|
setLibcallName(RTLIB::OLT_F64, "__ltdf2vfp");
|
|
|
|
setLibcallName(RTLIB::OLE_F64, "__ledf2vfp");
|
|
|
|
setLibcallName(RTLIB::OGE_F64, "__gedf2vfp");
|
|
|
|
setLibcallName(RTLIB::OGT_F64, "__gtdf2vfp");
|
|
|
|
setLibcallName(RTLIB::UO_F64, "__unorddf2vfp");
|
|
|
|
setLibcallName(RTLIB::O_F64, "__unorddf2vfp");
|
2007-01-19 07:51:42 +00:00
|
|
|
|
2007-04-27 08:15:43 +00:00
|
|
|
setCmpLibcallCC(RTLIB::OEQ_F64, ISD::SETNE);
|
|
|
|
setCmpLibcallCC(RTLIB::UNE_F64, ISD::SETNE);
|
|
|
|
setCmpLibcallCC(RTLIB::OLT_F64, ISD::SETNE);
|
|
|
|
setCmpLibcallCC(RTLIB::OLE_F64, ISD::SETNE);
|
|
|
|
setCmpLibcallCC(RTLIB::OGE_F64, ISD::SETNE);
|
|
|
|
setCmpLibcallCC(RTLIB::OGT_F64, ISD::SETNE);
|
|
|
|
setCmpLibcallCC(RTLIB::UO_F64, ISD::SETNE);
|
|
|
|
setCmpLibcallCC(RTLIB::O_F64, ISD::SETEQ);
|
2007-01-19 07:51:42 +00:00
|
|
|
|
2007-04-27 08:15:43 +00:00
|
|
|
// Floating-point to integer conversions.
|
|
|
|
// i64 conversions are done via library routines even when generating VFP
|
|
|
|
// instructions, so use the same ones.
|
|
|
|
setLibcallName(RTLIB::FPTOSINT_F64_I32, "__fixdfsivfp");
|
|
|
|
setLibcallName(RTLIB::FPTOUINT_F64_I32, "__fixunsdfsivfp");
|
|
|
|
setLibcallName(RTLIB::FPTOSINT_F32_I32, "__fixsfsivfp");
|
|
|
|
setLibcallName(RTLIB::FPTOUINT_F32_I32, "__fixunssfsivfp");
|
2007-01-19 07:51:42 +00:00
|
|
|
|
2007-04-27 08:15:43 +00:00
|
|
|
// Conversions between floating types.
|
|
|
|
setLibcallName(RTLIB::FPROUND_F64_F32, "__truncdfsf2vfp");
|
|
|
|
setLibcallName(RTLIB::FPEXT_F32_F64, "__extendsfdf2vfp");
|
|
|
|
|
|
|
|
// Integer to floating-point conversions.
|
|
|
|
// i64 conversions are done via library routines even when generating VFP
|
|
|
|
// instructions, so use the same ones.
|
2009-03-20 23:16:43 +00:00
|
|
|
// FIXME: There appears to be some naming inconsistency in ARM libgcc:
|
|
|
|
// e.g., __floatunsidf vs. __floatunssidfvfp.
|
2007-04-27 08:15:43 +00:00
|
|
|
setLibcallName(RTLIB::SINTTOFP_I32_F64, "__floatsidfvfp");
|
|
|
|
setLibcallName(RTLIB::UINTTOFP_I32_F64, "__floatunssidfvfp");
|
|
|
|
setLibcallName(RTLIB::SINTTOFP_I32_F32, "__floatsisfvfp");
|
|
|
|
setLibcallName(RTLIB::UINTTOFP_I32_F32, "__floatunssisfvfp");
|
|
|
|
}
|
2007-01-19 07:51:42 +00:00
|
|
|
}
|
|
|
|
|
2009-05-22 17:38:41 +00:00
|
|
|
// These libcalls are not available in 32-bit.
|
|
|
|
setLibcallName(RTLIB::SHL_I128, 0);
|
|
|
|
setLibcallName(RTLIB::SRL_I128, 0);
|
|
|
|
setLibcallName(RTLIB::SRA_I128, 0);
|
|
|
|
|
2009-08-14 20:10:52 +00:00
|
|
|
// Libcalls should use the AAPCS base standard ABI, even if hard float
|
|
|
|
// is in effect, as per the ARM RTABI specification, section 4.1.2.
|
|
|
|
if (Subtarget->isAAPCS_ABI()) {
|
|
|
|
for (int i = 0; i < RTLIB::UNKNOWN_LIBCALL; ++i) {
|
|
|
|
setLibcallCallingConv(static_cast<RTLIB::Libcall>(i),
|
|
|
|
CallingConv::ARM_AAPCS);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-07-08 23:10:31 +00:00
|
|
|
if (Subtarget->isThumb1Only())
|
2009-08-11 20:47:22 +00:00
|
|
|
addRegisterClass(MVT::i32, ARM::tGPRRegisterClass);
|
2009-04-07 20:34:09 +00:00
|
|
|
else
|
2009-08-11 20:47:22 +00:00
|
|
|
addRegisterClass(MVT::i32, ARM::GPRRegisterClass);
|
2009-07-08 23:10:31 +00:00
|
|
|
if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) {
|
2009-08-11 20:47:22 +00:00
|
|
|
addRegisterClass(MVT::f32, ARM::SPRRegisterClass);
|
|
|
|
addRegisterClass(MVT::f64, ARM::DPRRegisterClass);
|
2009-03-20 22:42:55 +00:00
|
|
|
|
2009-08-11 20:47:22 +00:00
|
|
|
setTruncStoreAction(MVT::f64, MVT::f32, Expand);
|
2007-01-19 07:51:42 +00:00
|
|
|
}
|
2009-06-22 23:27:02 +00:00
|
|
|
|
|
|
|
if (Subtarget->hasNEON()) {
|
2009-08-11 20:47:22 +00:00
|
|
|
addDRTypeForNEON(MVT::v2f32);
|
|
|
|
addDRTypeForNEON(MVT::v8i8);
|
|
|
|
addDRTypeForNEON(MVT::v4i16);
|
|
|
|
addDRTypeForNEON(MVT::v2i32);
|
|
|
|
addDRTypeForNEON(MVT::v1i64);
|
|
|
|
|
|
|
|
addQRTypeForNEON(MVT::v4f32);
|
|
|
|
addQRTypeForNEON(MVT::v2f64);
|
|
|
|
addQRTypeForNEON(MVT::v16i8);
|
|
|
|
addQRTypeForNEON(MVT::v8i16);
|
|
|
|
addQRTypeForNEON(MVT::v4i32);
|
|
|
|
addQRTypeForNEON(MVT::v2i64);
|
2009-06-22 23:27:02 +00:00
|
|
|
|
2009-09-15 23:55:57 +00:00
|
|
|
// v2f64 is legal so that QR subregs can be extracted as f64 elements, but
|
|
|
|
// neither Neon nor VFP support any arithmetic operations on it.
|
|
|
|
setOperationAction(ISD::FADD, MVT::v2f64, Expand);
|
|
|
|
setOperationAction(ISD::FSUB, MVT::v2f64, Expand);
|
|
|
|
setOperationAction(ISD::FMUL, MVT::v2f64, Expand);
|
|
|
|
setOperationAction(ISD::FDIV, MVT::v2f64, Expand);
|
|
|
|
setOperationAction(ISD::FREM, MVT::v2f64, Expand);
|
|
|
|
setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Expand);
|
|
|
|
setOperationAction(ISD::VSETCC, MVT::v2f64, Expand);
|
|
|
|
setOperationAction(ISD::FNEG, MVT::v2f64, Expand);
|
|
|
|
setOperationAction(ISD::FABS, MVT::v2f64, Expand);
|
|
|
|
setOperationAction(ISD::FSQRT, MVT::v2f64, Expand);
|
|
|
|
setOperationAction(ISD::FSIN, MVT::v2f64, Expand);
|
|
|
|
setOperationAction(ISD::FCOS, MVT::v2f64, Expand);
|
|
|
|
setOperationAction(ISD::FPOWI, MVT::v2f64, Expand);
|
|
|
|
setOperationAction(ISD::FPOW, MVT::v2f64, Expand);
|
|
|
|
setOperationAction(ISD::FLOG, MVT::v2f64, Expand);
|
|
|
|
setOperationAction(ISD::FLOG2, MVT::v2f64, Expand);
|
|
|
|
setOperationAction(ISD::FLOG10, MVT::v2f64, Expand);
|
|
|
|
setOperationAction(ISD::FEXP, MVT::v2f64, Expand);
|
|
|
|
setOperationAction(ISD::FEXP2, MVT::v2f64, Expand);
|
|
|
|
setOperationAction(ISD::FCEIL, MVT::v2f64, Expand);
|
|
|
|
setOperationAction(ISD::FTRUNC, MVT::v2f64, Expand);
|
|
|
|
setOperationAction(ISD::FRINT, MVT::v2f64, Expand);
|
|
|
|
setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Expand);
|
|
|
|
setOperationAction(ISD::FFLOOR, MVT::v2f64, Expand);
|
|
|
|
|
2009-09-16 00:32:15 +00:00
|
|
|
// Neon does not support some operations on v1i64 and v2i64 types.
|
|
|
|
setOperationAction(ISD::MUL, MVT::v1i64, Expand);
|
|
|
|
setOperationAction(ISD::MUL, MVT::v2i64, Expand);
|
|
|
|
setOperationAction(ISD::VSETCC, MVT::v1i64, Expand);
|
|
|
|
setOperationAction(ISD::VSETCC, MVT::v2i64, Expand);
|
|
|
|
|
2009-06-22 23:27:02 +00:00
|
|
|
setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
|
|
|
|
setTargetDAGCombine(ISD::SHL);
|
|
|
|
setTargetDAGCombine(ISD::SRL);
|
|
|
|
setTargetDAGCombine(ISD::SRA);
|
|
|
|
setTargetDAGCombine(ISD::SIGN_EXTEND);
|
|
|
|
setTargetDAGCombine(ISD::ZERO_EXTEND);
|
|
|
|
setTargetDAGCombine(ISD::ANY_EXTEND);
|
|
|
|
}
|
|
|
|
|
2007-05-18 00:19:34 +00:00
|
|
|
computeRegisterProperties();
|
2007-01-19 07:51:42 +00:00
|
|
|
|
|
|
|
// ARM does not have f32 extending load.
|
2009-08-11 20:47:22 +00:00
|
|
|
setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
|
2007-01-19 07:51:42 +00:00
|
|
|
|
2008-01-23 20:39:46 +00:00
|
|
|
// ARM does not have i1 sign extending load.
|
2009-08-11 20:47:22 +00:00
|
|
|
setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
|
2008-01-23 20:39:46 +00:00
|
|
|
|
2007-01-19 07:51:42 +00:00
|
|
|
// ARM supports all 4 flavors of integer indexed load / store.
|
2009-07-02 07:28:31 +00:00
|
|
|
if (!Subtarget->isThumb1Only()) {
|
|
|
|
for (unsigned im = (unsigned)ISD::PRE_INC;
|
|
|
|
im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) {
|
2009-08-11 20:47:22 +00:00
|
|
|
setIndexedLoadAction(im, MVT::i1, Legal);
|
|
|
|
setIndexedLoadAction(im, MVT::i8, Legal);
|
|
|
|
setIndexedLoadAction(im, MVT::i16, Legal);
|
|
|
|
setIndexedLoadAction(im, MVT::i32, Legal);
|
|
|
|
setIndexedStoreAction(im, MVT::i1, Legal);
|
|
|
|
setIndexedStoreAction(im, MVT::i8, Legal);
|
|
|
|
setIndexedStoreAction(im, MVT::i16, Legal);
|
|
|
|
setIndexedStoreAction(im, MVT::i32, Legal);
|
2009-07-02 07:28:31 +00:00
|
|
|
}
|
2007-01-19 07:51:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// i64 operation support.
|
2009-07-07 01:17:28 +00:00
|
|
|
if (Subtarget->isThumb1Only()) {
|
2009-08-11 20:47:22 +00:00
|
|
|
setOperationAction(ISD::MUL, MVT::i64, Expand);
|
|
|
|
setOperationAction(ISD::MULHU, MVT::i32, Expand);
|
|
|
|
setOperationAction(ISD::MULHS, MVT::i32, Expand);
|
|
|
|
setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
|
|
|
|
setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
|
2007-01-19 07:51:42 +00:00
|
|
|
} else {
|
2009-08-11 20:47:22 +00:00
|
|
|
setOperationAction(ISD::MUL, MVT::i64, Expand);
|
|
|
|
setOperationAction(ISD::MULHU, MVT::i32, Expand);
|
2009-08-01 00:16:10 +00:00
|
|
|
if (!Subtarget->hasV6Ops())
|
2009-08-11 20:47:22 +00:00
|
|
|
setOperationAction(ISD::MULHS, MVT::i32, Expand);
|
2007-01-19 07:51:42 +00:00
|
|
|
}
|
2009-10-31 19:38:01 +00:00
|
|
|
setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
|
2009-10-31 21:00:56 +00:00
|
|
|
setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
|
2009-10-31 21:42:19 +00:00
|
|
|
setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
|
2009-08-11 20:47:22 +00:00
|
|
|
setOperationAction(ISD::SRL, MVT::i64, Custom);
|
|
|
|
setOperationAction(ISD::SRA, MVT::i64, Custom);
|
2007-01-19 07:51:42 +00:00
|
|
|
|
|
|
|
// ARM does not have ROTL.
|
2009-08-11 20:47:22 +00:00
|
|
|
setOperationAction(ISD::ROTL, MVT::i32, Expand);
|
|
|
|
setOperationAction(ISD::CTTZ, MVT::i32, Expand);
|
|
|
|
setOperationAction(ISD::CTPOP, MVT::i32, Expand);
|
2009-06-26 20:47:43 +00:00
|
|
|
if (!Subtarget->hasV5TOps() || Subtarget->isThumb1Only())
|
2009-08-11 20:47:22 +00:00
|
|
|
setOperationAction(ISD::CTLZ, MVT::i32, Expand);
|
2007-01-19 07:51:42 +00:00
|
|
|
|
2007-03-16 22:54:16 +00:00
|
|
|
// Only ARMv6 has BSWAP.
|
|
|
|
if (!Subtarget->hasV6Ops())
|
2009-08-11 20:47:22 +00:00
|
|
|
setOperationAction(ISD::BSWAP, MVT::i32, Expand);
|
2007-03-16 22:54:16 +00:00
|
|
|
|
2007-01-19 07:51:42 +00:00
|
|
|
// These are expanded into libcalls.
|
2009-08-11 20:47:22 +00:00
|
|
|
setOperationAction(ISD::SDIV, MVT::i32, Expand);
|
|
|
|
setOperationAction(ISD::UDIV, MVT::i32, Expand);
|
|
|
|
setOperationAction(ISD::SREM, MVT::i32, Expand);
|
|
|
|
setOperationAction(ISD::UREM, MVT::i32, Expand);
|
|
|
|
setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
|
|
|
|
setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
|
2009-03-20 22:42:55 +00:00
|
|
|
|
2009-08-11 20:47:22 +00:00
|
|
|
setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
|
|
|
|
setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
|
|
|
|
setOperationAction(ISD::GLOBAL_OFFSET_TABLE, MVT::i32, Custom);
|
|
|
|
setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
|
2009-10-30 05:45:42 +00:00
|
|
|
setOperationAction(ISD::BlockAddress, MVT::i32, Custom);
|
2007-01-19 07:51:42 +00:00
|
|
|
|
|
|
|
// Use the default implementation.
|
2009-08-11 20:47:22 +00:00
|
|
|
setOperationAction(ISD::VASTART, MVT::Other, Custom);
|
|
|
|
setOperationAction(ISD::VAARG, MVT::Other, Expand);
|
|
|
|
setOperationAction(ISD::VACOPY, MVT::Other, Expand);
|
|
|
|
setOperationAction(ISD::VAEND, MVT::Other, Expand);
|
|
|
|
setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
|
|
|
|
setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
|
2009-08-12 17:38:44 +00:00
|
|
|
setOperationAction(ISD::EHSELECTION, MVT::i32, Expand);
|
|
|
|
// FIXME: Shouldn't need this, since no register is used, but the legalizer
|
|
|
|
// doesn't yet know how to not do that for SjLj.
|
|
|
|
setExceptionSelectorRegister(ARM::R0);
|
2009-08-07 00:34:42 +00:00
|
|
|
if (Subtarget->isThumb())
|
2009-08-11 20:47:22 +00:00
|
|
|
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
|
2009-08-07 00:34:42 +00:00
|
|
|
else
|
2009-08-11 20:47:22 +00:00
|
|
|
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
|
2009-12-10 00:11:09 +00:00
|
|
|
setOperationAction(ISD::MEMBARRIER, MVT::Other, Custom);
|
2007-01-19 07:51:42 +00:00
|
|
|
|
2009-07-03 01:43:10 +00:00
|
|
|
if (!Subtarget->hasV6Ops() && !Subtarget->isThumb2()) {
|
2009-08-11 20:47:22 +00:00
|
|
|
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
|
|
|
|
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
|
2007-01-19 07:51:42 +00:00
|
|
|
}
|
2009-08-11 20:47:22 +00:00
|
|
|
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
|
2007-01-19 07:51:42 +00:00
|
|
|
|
2009-07-08 23:10:31 +00:00
|
|
|
if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only())
|
2009-11-09 00:11:35 +00:00
|
|
|
// Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR iff target supports vfp2.
|
2009-08-11 20:47:22 +00:00
|
|
|
setOperationAction(ISD::BIT_CONVERT, MVT::i64, Custom);
|
2007-11-08 17:20:05 +00:00
|
|
|
|
|
|
|
// We want to custom lower some of our intrinsics.
|
2009-08-11 20:47:22 +00:00
|
|
|
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
|
|
|
|
|
|
|
|
setOperationAction(ISD::SETCC, MVT::i32, Expand);
|
|
|
|
setOperationAction(ISD::SETCC, MVT::f32, Expand);
|
|
|
|
setOperationAction(ISD::SETCC, MVT::f64, Expand);
|
|
|
|
setOperationAction(ISD::SELECT, MVT::i32, Expand);
|
|
|
|
setOperationAction(ISD::SELECT, MVT::f32, Expand);
|
|
|
|
setOperationAction(ISD::SELECT, MVT::f64, Expand);
|
|
|
|
setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
|
|
|
|
setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
|
|
|
|
setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
|
|
|
|
|
|
|
|
setOperationAction(ISD::BRCOND, MVT::Other, Expand);
|
|
|
|
setOperationAction(ISD::BR_CC, MVT::i32, Custom);
|
|
|
|
setOperationAction(ISD::BR_CC, MVT::f32, Custom);
|
|
|
|
setOperationAction(ISD::BR_CC, MVT::f64, Custom);
|
|
|
|
setOperationAction(ISD::BR_JT, MVT::Other, Custom);
|
2007-01-19 07:51:42 +00:00
|
|
|
|
2007-10-11 23:21:31 +00:00
|
|
|
// We don't support sin/cos/fmod/copysign/pow
|
2009-08-11 20:47:22 +00:00
|
|
|
setOperationAction(ISD::FSIN, MVT::f64, Expand);
|
|
|
|
setOperationAction(ISD::FSIN, MVT::f32, Expand);
|
|
|
|
setOperationAction(ISD::FCOS, MVT::f32, Expand);
|
|
|
|
setOperationAction(ISD::FCOS, MVT::f64, Expand);
|
|
|
|
setOperationAction(ISD::FREM, MVT::f64, Expand);
|
|
|
|
setOperationAction(ISD::FREM, MVT::f32, Expand);
|
2009-07-08 23:10:31 +00:00
|
|
|
if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) {
|
2009-08-11 20:47:22 +00:00
|
|
|
setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
|
|
|
|
setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
|
2008-04-01 01:50:16 +00:00
|
|
|
}
|
2009-08-11 20:47:22 +00:00
|
|
|
setOperationAction(ISD::FPOW, MVT::f64, Expand);
|
|
|
|
setOperationAction(ISD::FPOW, MVT::f32, Expand);
|
2009-03-20 22:42:55 +00:00
|
|
|
|
2007-01-19 07:51:42 +00:00
|
|
|
// int <-> fp are custom expanded into bit_convert + ARMISD ops.
|
2009-07-08 23:10:31 +00:00
|
|
|
if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) {
|
2009-08-11 20:47:22 +00:00
|
|
|
setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
|
|
|
|
setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
|
|
|
|
setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
|
|
|
|
setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
|
2008-04-01 01:50:16 +00:00
|
|
|
}
|
2007-01-19 07:51:42 +00:00
|
|
|
|
2007-11-27 22:36:16 +00:00
|
|
|
// We have target-specific dag combine patterns for the following nodes:
|
2009-11-09 00:11:35 +00:00
|
|
|
// ARMISD::VMOVRRD - No need to call setTargetDAGCombine
|
Move 3 "(add (select cc, 0, c), x) -> (select cc, x, (add, x, c))"
related transformations out of target-specific dag combine into the
ARM backend. These were added by Evan in r37685 with no testcases
and only seems to help ARM (e.g. test/CodeGen/ARM/select_xform.ll).
Add some simple X86-specific (for now) DAG combines that turn things
like cond ? 8 : 0 -> (zext(cond) << 3). This happens frequently
with the recently added cp constant select optimization, but is a
very general xform. For example, we now compile the second example
in const-select.ll to:
_test:
movsd LCPI2_0, %xmm0
ucomisd 8(%esp), %xmm0
seta %al
movzbl %al, %eax
movl 4(%esp), %ecx
movsbl (%ecx,%eax,4), %eax
ret
instead of:
_test:
movl 4(%esp), %eax
leal 4(%eax), %ecx
movsd LCPI2_0, %xmm0
ucomisd 8(%esp), %xmm0
cmovbe %eax, %ecx
movsbl (%ecx), %eax
ret
This passes multisource and dejagnu.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@66779 91177308-0d34-0410-b5e6-96231b3b80d8
2009-03-12 06:52:53 +00:00
|
|
|
setTargetDAGCombine(ISD::ADD);
|
|
|
|
setTargetDAGCombine(ISD::SUB);
|
2009-03-20 22:42:55 +00:00
|
|
|
|
2007-01-19 07:51:42 +00:00
|
|
|
setStackPointerRegisterToSaveRestore(ARM::SP);
|
|
|
|
setSchedulingPreference(SchedulingForRegPressure);
|
2009-08-15 07:59:10 +00:00
|
|
|
|
|
|
|
// FIXME: If-converter should use instruction latency to determine
|
|
|
|
// profitability rather than relying on fixed limits.
|
|
|
|
if (Subtarget->getCPUString() == "generic") {
|
|
|
|
// Generic (and overly aggressive) if-conversion limits.
|
|
|
|
setIfCvtBlockSizeLimit(10);
|
|
|
|
setIfCvtDupBlockSizeLimit(2);
|
|
|
|
} else if (Subtarget->hasV6Ops()) {
|
|
|
|
setIfCvtBlockSizeLimit(2);
|
|
|
|
setIfCvtDupBlockSizeLimit(1);
|
|
|
|
} else {
|
|
|
|
setIfCvtBlockSizeLimit(3);
|
|
|
|
setIfCvtDupBlockSizeLimit(2);
|
2009-06-19 01:51:50 +00:00
|
|
|
}
|
|
|
|
|
2007-05-17 21:31:21 +00:00
|
|
|
maxStoresPerMemcpy = 1; //// temporary - rewrite interface to use type
|
2009-05-18 20:55:32 +00:00
|
|
|
// Do not enable CodePlacementOpt for now: it currently runs after the
|
|
|
|
// ARMConstantIslandPass and messes up branch relaxation and placement
|
|
|
|
// of constant islands.
|
|
|
|
// benefitFromCodePlacementOpt = true;
|
2007-01-19 07:51:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
|
|
|
|
switch (Opcode) {
|
|
|
|
default: return 0;
|
|
|
|
case ARMISD::Wrapper: return "ARMISD::Wrapper";
|
|
|
|
case ARMISD::WrapperJT: return "ARMISD::WrapperJT";
|
|
|
|
case ARMISD::CALL: return "ARMISD::CALL";
|
2007-06-19 21:05:09 +00:00
|
|
|
case ARMISD::CALL_PRED: return "ARMISD::CALL_PRED";
|
2007-01-19 07:51:42 +00:00
|
|
|
case ARMISD::CALL_NOLINK: return "ARMISD::CALL_NOLINK";
|
|
|
|
case ARMISD::tCALL: return "ARMISD::tCALL";
|
|
|
|
case ARMISD::BRCOND: return "ARMISD::BRCOND";
|
|
|
|
case ARMISD::BR_JT: return "ARMISD::BR_JT";
|
2009-07-29 02:18:14 +00:00
|
|
|
case ARMISD::BR2_JT: return "ARMISD::BR2_JT";
|
2007-01-19 07:51:42 +00:00
|
|
|
case ARMISD::RET_FLAG: return "ARMISD::RET_FLAG";
|
|
|
|
case ARMISD::PIC_ADD: return "ARMISD::PIC_ADD";
|
|
|
|
case ARMISD::CMP: return "ARMISD::CMP";
|
2009-06-29 15:33:01 +00:00
|
|
|
case ARMISD::CMPZ: return "ARMISD::CMPZ";
|
2007-01-19 07:51:42 +00:00
|
|
|
case ARMISD::CMPFP: return "ARMISD::CMPFP";
|
|
|
|
case ARMISD::CMPFPw0: return "ARMISD::CMPFPw0";
|
|
|
|
case ARMISD::FMSTAT: return "ARMISD::FMSTAT";
|
|
|
|
case ARMISD::CMOV: return "ARMISD::CMOV";
|
|
|
|
case ARMISD::CNEG: return "ARMISD::CNEG";
|
2009-03-20 22:42:55 +00:00
|
|
|
|
2007-01-19 07:51:42 +00:00
|
|
|
case ARMISD::FTOSI: return "ARMISD::FTOSI";
|
|
|
|
case ARMISD::FTOUI: return "ARMISD::FTOUI";
|
|
|
|
case ARMISD::SITOF: return "ARMISD::SITOF";
|
|
|
|
case ARMISD::UITOF: return "ARMISD::UITOF";
|
|
|
|
|
|
|
|
case ARMISD::SRL_FLAG: return "ARMISD::SRL_FLAG";
|
|
|
|
case ARMISD::SRA_FLAG: return "ARMISD::SRA_FLAG";
|
|
|
|
case ARMISD::RRX: return "ARMISD::RRX";
|
2009-03-20 22:42:55 +00:00
|
|
|
|
2009-11-09 00:11:35 +00:00
|
|
|
case ARMISD::VMOVRRD: return "ARMISD::VMOVRRD";
|
|
|
|
case ARMISD::VMOVDRR: return "ARMISD::VMOVDRR";
|
2007-04-27 13:54:47 +00:00
|
|
|
|
2009-10-28 06:55:03 +00:00
|
|
|
case ARMISD::EH_SJLJ_SETJMP: return "ARMISD::EH_SJLJ_SETJMP";
|
|
|
|
case ARMISD::EH_SJLJ_LONGJMP:return "ARMISD::EH_SJLJ_LONGJMP";
|
|
|
|
|
2007-04-27 13:54:47 +00:00
|
|
|
case ARMISD::THREAD_POINTER:return "ARMISD::THREAD_POINTER";
|
2009-06-22 23:27:02 +00:00
|
|
|
|
2009-08-07 00:34:42 +00:00
|
|
|
case ARMISD::DYN_ALLOC: return "ARMISD::DYN_ALLOC";
|
|
|
|
|
2009-12-10 00:11:09 +00:00
|
|
|
case ARMISD::MEMBARRIER: return "ARMISD::MEMBARRIER";
|
|
|
|
case ARMISD::SYNCBARRIER: return "ARMISD::SYNCBARRIER";
|
|
|
|
|
2009-06-22 23:27:02 +00:00
|
|
|
case ARMISD::VCEQ: return "ARMISD::VCEQ";
|
|
|
|
case ARMISD::VCGE: return "ARMISD::VCGE";
|
|
|
|
case ARMISD::VCGEU: return "ARMISD::VCGEU";
|
|
|
|
case ARMISD::VCGT: return "ARMISD::VCGT";
|
|
|
|
case ARMISD::VCGTU: return "ARMISD::VCGTU";
|
|
|
|
case ARMISD::VTST: return "ARMISD::VTST";
|
|
|
|
|
|
|
|
case ARMISD::VSHL: return "ARMISD::VSHL";
|
|
|
|
case ARMISD::VSHRs: return "ARMISD::VSHRs";
|
|
|
|
case ARMISD::VSHRu: return "ARMISD::VSHRu";
|
|
|
|
case ARMISD::VSHLLs: return "ARMISD::VSHLLs";
|
|
|
|
case ARMISD::VSHLLu: return "ARMISD::VSHLLu";
|
|
|
|
case ARMISD::VSHLLi: return "ARMISD::VSHLLi";
|
|
|
|
case ARMISD::VSHRN: return "ARMISD::VSHRN";
|
|
|
|
case ARMISD::VRSHRs: return "ARMISD::VRSHRs";
|
|
|
|
case ARMISD::VRSHRu: return "ARMISD::VRSHRu";
|
|
|
|
case ARMISD::VRSHRN: return "ARMISD::VRSHRN";
|
|
|
|
case ARMISD::VQSHLs: return "ARMISD::VQSHLs";
|
|
|
|
case ARMISD::VQSHLu: return "ARMISD::VQSHLu";
|
|
|
|
case ARMISD::VQSHLsu: return "ARMISD::VQSHLsu";
|
|
|
|
case ARMISD::VQSHRNs: return "ARMISD::VQSHRNs";
|
|
|
|
case ARMISD::VQSHRNu: return "ARMISD::VQSHRNu";
|
|
|
|
case ARMISD::VQSHRNsu: return "ARMISD::VQSHRNsu";
|
|
|
|
case ARMISD::VQRSHRNs: return "ARMISD::VQRSHRNs";
|
|
|
|
case ARMISD::VQRSHRNu: return "ARMISD::VQRSHRNu";
|
|
|
|
case ARMISD::VQRSHRNsu: return "ARMISD::VQRSHRNsu";
|
|
|
|
case ARMISD::VGETLANEu: return "ARMISD::VGETLANEu";
|
|
|
|
case ARMISD::VGETLANEs: return "ARMISD::VGETLANEs";
|
2009-08-14 05:13:08 +00:00
|
|
|
case ARMISD::VDUP: return "ARMISD::VDUP";
|
2009-08-14 05:08:32 +00:00
|
|
|
case ARMISD::VDUPLANE: return "ARMISD::VDUPLANE";
|
2009-08-19 17:03:43 +00:00
|
|
|
case ARMISD::VEXT: return "ARMISD::VEXT";
|
2009-08-12 22:31:50 +00:00
|
|
|
case ARMISD::VREV64: return "ARMISD::VREV64";
|
|
|
|
case ARMISD::VREV32: return "ARMISD::VREV32";
|
|
|
|
case ARMISD::VREV16: return "ARMISD::VREV16";
|
2009-08-21 12:41:42 +00:00
|
|
|
case ARMISD::VZIP: return "ARMISD::VZIP";
|
|
|
|
case ARMISD::VUZP: return "ARMISD::VUZP";
|
|
|
|
case ARMISD::VTRN: return "ARMISD::VTRN";
|
2007-01-19 07:51:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-07-01 18:50:55 +00:00
|
|
|
/// getFunctionAlignment - Return the Log2 alignment of this function.
|
2009-06-30 22:38:32 +00:00
|
|
|
unsigned ARMTargetLowering::getFunctionAlignment(const Function *F) const {
|
2009-10-02 06:57:25 +00:00
|
|
|
return getTargetMachine().getSubtarget<ARMSubtarget>().isThumb() ? 0 : 1;
|
2009-06-30 22:38:32 +00:00
|
|
|
}
|
|
|
|
|
2007-01-19 07:51:42 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Lowering Code
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
/// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC
|
|
|
|
static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) {
|
|
|
|
switch (CC) {
|
2009-07-14 16:55:14 +00:00
|
|
|
default: llvm_unreachable("Unknown condition code!");
|
2007-01-19 07:51:42 +00:00
|
|
|
case ISD::SETNE: return ARMCC::NE;
|
|
|
|
case ISD::SETEQ: return ARMCC::EQ;
|
|
|
|
case ISD::SETGT: return ARMCC::GT;
|
|
|
|
case ISD::SETGE: return ARMCC::GE;
|
|
|
|
case ISD::SETLT: return ARMCC::LT;
|
|
|
|
case ISD::SETLE: return ARMCC::LE;
|
|
|
|
case ISD::SETUGT: return ARMCC::HI;
|
|
|
|
case ISD::SETUGE: return ARMCC::HS;
|
|
|
|
case ISD::SETULT: return ARMCC::LO;
|
|
|
|
case ISD::SETULE: return ARMCC::LS;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-09-09 23:14:54 +00:00
|
|
|
/// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC.
|
|
|
|
static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode,
|
2007-01-19 07:51:42 +00:00
|
|
|
ARMCC::CondCodes &CondCode2) {
|
|
|
|
CondCode2 = ARMCC::AL;
|
|
|
|
switch (CC) {
|
2009-07-14 16:55:14 +00:00
|
|
|
default: llvm_unreachable("Unknown FP condition!");
|
2007-01-19 07:51:42 +00:00
|
|
|
case ISD::SETEQ:
|
|
|
|
case ISD::SETOEQ: CondCode = ARMCC::EQ; break;
|
|
|
|
case ISD::SETGT:
|
|
|
|
case ISD::SETOGT: CondCode = ARMCC::GT; break;
|
|
|
|
case ISD::SETGE:
|
|
|
|
case ISD::SETOGE: CondCode = ARMCC::GE; break;
|
|
|
|
case ISD::SETOLT: CondCode = ARMCC::MI; break;
|
2009-09-09 23:14:54 +00:00
|
|
|
case ISD::SETOLE: CondCode = ARMCC::LS; break;
|
2007-01-19 07:51:42 +00:00
|
|
|
case ISD::SETONE: CondCode = ARMCC::MI; CondCode2 = ARMCC::GT; break;
|
|
|
|
case ISD::SETO: CondCode = ARMCC::VC; break;
|
|
|
|
case ISD::SETUO: CondCode = ARMCC::VS; break;
|
|
|
|
case ISD::SETUEQ: CondCode = ARMCC::EQ; CondCode2 = ARMCC::VS; break;
|
|
|
|
case ISD::SETUGT: CondCode = ARMCC::HI; break;
|
|
|
|
case ISD::SETUGE: CondCode = ARMCC::PL; break;
|
|
|
|
case ISD::SETLT:
|
|
|
|
case ISD::SETULT: CondCode = ARMCC::LT; break;
|
|
|
|
case ISD::SETLE:
|
|
|
|
case ISD::SETULE: CondCode = ARMCC::LE; break;
|
|
|
|
case ISD::SETNE:
|
|
|
|
case ISD::SETUNE: CondCode = ARMCC::NE; break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-04-17 19:07:39 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Calling Convention Implementation
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "ARMGenCallingConv.inc"
|
|
|
|
|
|
|
|
// APCS f64 is in register pairs, possibly split to stack
|
2009-08-10 22:56:29 +00:00
|
|
|
static bool f64AssignAPCS(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
|
2009-06-22 23:27:02 +00:00
|
|
|
CCValAssign::LocInfo &LocInfo,
|
|
|
|
CCState &State, bool CanFail) {
|
|
|
|
static const unsigned RegList[] = { ARM::R0, ARM::R1, ARM::R2, ARM::R3 };
|
|
|
|
|
|
|
|
// Try to get the first register.
|
|
|
|
if (unsigned Reg = State.AllocateReg(RegList, 4))
|
|
|
|
State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
|
|
|
|
else {
|
|
|
|
// For the 2nd half of a v2f64, do not fail.
|
|
|
|
if (CanFail)
|
|
|
|
return false;
|
2009-04-17 19:07:39 +00:00
|
|
|
|
2009-06-22 23:27:02 +00:00
|
|
|
// Put the whole thing on the stack.
|
|
|
|
State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
|
|
|
|
State.AllocateStack(8, 4),
|
|
|
|
LocVT, LocInfo));
|
|
|
|
return true;
|
|
|
|
}
|
2009-04-17 19:07:39 +00:00
|
|
|
|
2009-06-22 23:27:02 +00:00
|
|
|
// Try to get the second register.
|
|
|
|
if (unsigned Reg = State.AllocateReg(RegList, 4))
|
|
|
|
State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
|
2009-04-17 20:40:45 +00:00
|
|
|
else
|
|
|
|
State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
|
|
|
|
State.AllocateStack(4, 4),
|
2009-06-22 23:27:02 +00:00
|
|
|
LocVT, LocInfo));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2009-08-10 22:56:29 +00:00
|
|
|
static bool CC_ARM_APCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
|
2009-06-22 23:27:02 +00:00
|
|
|
CCValAssign::LocInfo &LocInfo,
|
|
|
|
ISD::ArgFlagsTy &ArgFlags,
|
|
|
|
CCState &State) {
|
|
|
|
if (!f64AssignAPCS(ValNo, ValVT, LocVT, LocInfo, State, true))
|
|
|
|
return false;
|
2009-08-11 20:47:22 +00:00
|
|
|
if (LocVT == MVT::v2f64 &&
|
2009-06-22 23:27:02 +00:00
|
|
|
!f64AssignAPCS(ValNo, ValVT, LocVT, LocInfo, State, false))
|
|
|
|
return false;
|
2009-04-17 20:40:45 +00:00
|
|
|
return true; // we handled it
|
2009-04-17 19:07:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// AAPCS f64 is in aligned register pairs
|
2009-08-10 22:56:29 +00:00
|
|
|
static bool f64AssignAAPCS(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
|
2009-06-22 23:27:02 +00:00
|
|
|
CCValAssign::LocInfo &LocInfo,
|
|
|
|
CCState &State, bool CanFail) {
|
2009-04-17 19:07:39 +00:00
|
|
|
static const unsigned HiRegList[] = { ARM::R0, ARM::R2 };
|
|
|
|
static const unsigned LoRegList[] = { ARM::R1, ARM::R3 };
|
|
|
|
|
2009-04-17 20:40:45 +00:00
|
|
|
unsigned Reg = State.AllocateReg(HiRegList, LoRegList, 2);
|
2009-06-22 23:27:02 +00:00
|
|
|
if (Reg == 0) {
|
|
|
|
// For the 2nd half of a v2f64, do not just fail.
|
|
|
|
if (CanFail)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Put the whole thing on the stack.
|
|
|
|
State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
|
|
|
|
State.AllocateStack(8, 8),
|
|
|
|
LocVT, LocInfo));
|
|
|
|
return true;
|
|
|
|
}
|
2009-04-17 19:07:39 +00:00
|
|
|
|
2009-04-17 20:40:45 +00:00
|
|
|
unsigned i;
|
|
|
|
for (i = 0; i < 2; ++i)
|
|
|
|
if (HiRegList[i] == Reg)
|
|
|
|
break;
|
2009-04-17 19:07:39 +00:00
|
|
|
|
2009-06-22 23:27:02 +00:00
|
|
|
State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
|
2009-04-17 20:40:45 +00:00
|
|
|
State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, LoRegList[i],
|
2009-06-22 23:27:02 +00:00
|
|
|
LocVT, LocInfo));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2009-08-10 22:56:29 +00:00
|
|
|
static bool CC_ARM_AAPCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
|
2009-06-22 23:27:02 +00:00
|
|
|
CCValAssign::LocInfo &LocInfo,
|
|
|
|
ISD::ArgFlagsTy &ArgFlags,
|
|
|
|
CCState &State) {
|
|
|
|
if (!f64AssignAAPCS(ValNo, ValVT, LocVT, LocInfo, State, true))
|
|
|
|
return false;
|
2009-08-11 20:47:22 +00:00
|
|
|
if (LocVT == MVT::v2f64 &&
|
2009-06-22 23:27:02 +00:00
|
|
|
!f64AssignAAPCS(ValNo, ValVT, LocVT, LocInfo, State, false))
|
|
|
|
return false;
|
2009-04-17 20:40:45 +00:00
|
|
|
return true; // we handled it
|
2009-04-17 19:07:39 +00:00
|
|
|
}
|
|
|
|
|
2009-08-10 22:56:29 +00:00
|
|
|
static bool f64RetAssign(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
|
2009-06-22 23:27:02 +00:00
|
|
|
CCValAssign::LocInfo &LocInfo, CCState &State) {
|
2009-04-17 19:07:39 +00:00
|
|
|
static const unsigned HiRegList[] = { ARM::R0, ARM::R2 };
|
|
|
|
static const unsigned LoRegList[] = { ARM::R1, ARM::R3 };
|
|
|
|
|
2009-04-17 20:40:45 +00:00
|
|
|
unsigned Reg = State.AllocateReg(HiRegList, LoRegList, 2);
|
|
|
|
if (Reg == 0)
|
|
|
|
return false; // we didn't handle it
|
2009-04-17 19:07:39 +00:00
|
|
|
|
2009-04-17 20:40:45 +00:00
|
|
|
unsigned i;
|
|
|
|
for (i = 0; i < 2; ++i)
|
|
|
|
if (HiRegList[i] == Reg)
|
|
|
|
break;
|
2009-04-17 19:07:39 +00:00
|
|
|
|
2009-06-22 23:27:02 +00:00
|
|
|
State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
|
2009-04-17 20:40:45 +00:00
|
|
|
State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, LoRegList[i],
|
2009-06-22 23:27:02 +00:00
|
|
|
LocVT, LocInfo));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2009-08-10 22:56:29 +00:00
|
|
|
static bool RetCC_ARM_APCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
|
2009-06-22 23:27:02 +00:00
|
|
|
CCValAssign::LocInfo &LocInfo,
|
|
|
|
ISD::ArgFlagsTy &ArgFlags,
|
|
|
|
CCState &State) {
|
|
|
|
if (!f64RetAssign(ValNo, ValVT, LocVT, LocInfo, State))
|
|
|
|
return false;
|
2009-08-11 20:47:22 +00:00
|
|
|
if (LocVT == MVT::v2f64 && !f64RetAssign(ValNo, ValVT, LocVT, LocInfo, State))
|
2009-06-22 23:27:02 +00:00
|
|
|
return false;
|
2009-04-17 20:40:45 +00:00
|
|
|
return true; // we handled it
|
2009-04-17 19:07:39 +00:00
|
|
|
}
|
|
|
|
|
2009-08-10 22:56:29 +00:00
|
|
|
static bool RetCC_ARM_AAPCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
|
2009-04-17 19:07:39 +00:00
|
|
|
CCValAssign::LocInfo &LocInfo,
|
|
|
|
ISD::ArgFlagsTy &ArgFlags,
|
|
|
|
CCState &State) {
|
|
|
|
return RetCC_ARM_APCS_Custom_f64(ValNo, ValVT, LocVT, LocInfo, ArgFlags,
|
|
|
|
State);
|
|
|
|
}
|
|
|
|
|
2009-06-16 18:50:49 +00:00
|
|
|
/// CCAssignFnForNode - Selects the correct CCAssignFn for a the
|
|
|
|
/// given CallingConvention value.
|
2009-09-02 08:44:58 +00:00
|
|
|
CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC,
|
2009-08-05 19:04:42 +00:00
|
|
|
bool Return,
|
|
|
|
bool isVarArg) const {
|
2009-06-16 18:50:49 +00:00
|
|
|
switch (CC) {
|
|
|
|
default:
|
2009-08-05 19:04:42 +00:00
|
|
|
llvm_unreachable("Unsupported calling convention");
|
2009-06-16 18:50:49 +00:00
|
|
|
case CallingConv::C:
|
|
|
|
case CallingConv::Fast:
|
2009-08-05 19:04:42 +00:00
|
|
|
// Use target triple & subtarget features to do actual dispatch.
|
|
|
|
if (Subtarget->isAAPCS_ABI()) {
|
|
|
|
if (Subtarget->hasVFP2() &&
|
|
|
|
FloatABIType == FloatABI::Hard && !isVarArg)
|
|
|
|
return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP);
|
|
|
|
else
|
|
|
|
return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS);
|
|
|
|
} else
|
|
|
|
return (Return ? RetCC_ARM_APCS: CC_ARM_APCS);
|
2009-06-16 18:50:49 +00:00
|
|
|
case CallingConv::ARM_AAPCS_VFP:
|
2009-08-05 19:04:42 +00:00
|
|
|
return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP);
|
2009-06-16 18:50:49 +00:00
|
|
|
case CallingConv::ARM_AAPCS:
|
2009-08-05 19:04:42 +00:00
|
|
|
return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS);
|
2009-06-16 18:50:49 +00:00
|
|
|
case CallingConv::ARM_APCS:
|
2009-08-05 19:04:42 +00:00
|
|
|
return (Return ? RetCC_ARM_APCS: CC_ARM_APCS);
|
2009-06-16 18:50:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
/// LowerCallResult - Lower the result values of a call into the
|
|
|
|
/// appropriate copies out of appropriate physical registers.
|
|
|
|
SDValue
|
|
|
|
ARMTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
|
2009-09-02 08:44:58 +00:00
|
|
|
CallingConv::ID CallConv, bool isVarArg,
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
const SmallVectorImpl<ISD::InputArg> &Ins,
|
|
|
|
DebugLoc dl, SelectionDAG &DAG,
|
|
|
|
SmallVectorImpl<SDValue> &InVals) {
|
2009-04-17 19:07:39 +00:00
|
|
|
|
|
|
|
// Assign locations to each value returned by this call.
|
|
|
|
SmallVector<CCValAssign, 16> RVLocs;
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
|
2009-07-22 00:24:57 +00:00
|
|
|
RVLocs, *DAG.getContext());
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
CCInfo.AnalyzeCallResult(Ins,
|
2009-08-05 19:04:42 +00:00
|
|
|
CCAssignFnForNode(CallConv, /* Return*/ true,
|
|
|
|
isVarArg));
|
2009-04-17 19:07:39 +00:00
|
|
|
|
|
|
|
// Copy all of the result registers out of their specified physreg.
|
|
|
|
for (unsigned i = 0; i != RVLocs.size(); ++i) {
|
|
|
|
CCValAssign VA = RVLocs[i];
|
|
|
|
|
2009-04-25 00:33:20 +00:00
|
|
|
SDValue Val;
|
2009-04-17 19:07:39 +00:00
|
|
|
if (VA.needsCustom()) {
|
2009-06-22 23:27:02 +00:00
|
|
|
// Handle f64 or half of a v2f64.
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
|
2009-04-17 19:07:39 +00:00
|
|
|
InFlag);
|
2009-04-24 17:00:36 +00:00
|
|
|
Chain = Lo.getValue(1);
|
|
|
|
InFlag = Lo.getValue(2);
|
2009-04-17 19:07:39 +00:00
|
|
|
VA = RVLocs[++i]; // skip ahead to next loc
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
|
2009-04-24 17:00:36 +00:00
|
|
|
InFlag);
|
|
|
|
Chain = Hi.getValue(1);
|
|
|
|
InFlag = Hi.getValue(2);
|
2009-11-09 00:11:35 +00:00
|
|
|
Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
|
2009-06-22 23:27:02 +00:00
|
|
|
|
2009-08-11 20:47:22 +00:00
|
|
|
if (VA.getLocVT() == MVT::v2f64) {
|
|
|
|
SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64);
|
|
|
|
Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val,
|
|
|
|
DAG.getConstant(0, MVT::i32));
|
2009-06-22 23:27:02 +00:00
|
|
|
|
|
|
|
VA = RVLocs[++i]; // skip ahead to next loc
|
2009-08-11 20:47:22 +00:00
|
|
|
Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag);
|
2009-06-22 23:27:02 +00:00
|
|
|
Chain = Lo.getValue(1);
|
|
|
|
InFlag = Lo.getValue(2);
|
|
|
|
VA = RVLocs[++i]; // skip ahead to next loc
|
2009-08-11 20:47:22 +00:00
|
|
|
Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag);
|
2009-06-22 23:27:02 +00:00
|
|
|
Chain = Hi.getValue(1);
|
|
|
|
InFlag = Hi.getValue(2);
|
2009-11-09 00:11:35 +00:00
|
|
|
Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
|
2009-08-11 20:47:22 +00:00
|
|
|
Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val,
|
|
|
|
DAG.getConstant(1, MVT::i32));
|
2009-06-22 23:27:02 +00:00
|
|
|
}
|
2009-04-17 19:07:39 +00:00
|
|
|
} else {
|
2009-04-25 00:33:20 +00:00
|
|
|
Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(),
|
|
|
|
InFlag);
|
2009-04-24 17:00:36 +00:00
|
|
|
Chain = Val.getValue(1);
|
|
|
|
InFlag = Val.getValue(2);
|
2009-04-25 00:33:20 +00:00
|
|
|
}
|
2009-04-17 19:07:39 +00:00
|
|
|
|
2009-04-25 00:33:20 +00:00
|
|
|
switch (VA.getLocInfo()) {
|
2009-07-14 16:55:14 +00:00
|
|
|
default: llvm_unreachable("Unknown loc info!");
|
2009-04-25 00:33:20 +00:00
|
|
|
case CCValAssign::Full: break;
|
|
|
|
case CCValAssign::BCvt:
|
|
|
|
Val = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getValVT(), Val);
|
|
|
|
break;
|
2009-04-17 19:07:39 +00:00
|
|
|
}
|
2009-04-25 00:33:20 +00:00
|
|
|
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
InVals.push_back(Val);
|
2007-01-19 07:51:42 +00:00
|
|
|
}
|
2009-04-17 19:07:39 +00:00
|
|
|
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
return Chain;
|
2009-04-17 19:07:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
|
|
|
|
/// by "Src" to address "Dst" of size "Size". Alignment information is
|
2009-04-17 20:35:10 +00:00
|
|
|
/// specified by the specific parameter attribute. The copy will be passed as
|
2009-04-17 19:07:39 +00:00
|
|
|
/// a byval function parameter.
|
|
|
|
/// Sometimes what we are copying is the end of a larger object, the part that
|
|
|
|
/// does not fit in registers.
|
|
|
|
static SDValue
|
|
|
|
CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
|
|
|
|
ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
|
|
|
|
DebugLoc dl) {
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
|
2009-04-17 19:07:39 +00:00
|
|
|
return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
|
|
|
|
/*AlwaysInline=*/false, NULL, 0, NULL, 0);
|
|
|
|
}
|
|
|
|
|
2009-04-17 20:35:10 +00:00
|
|
|
/// LowerMemOpCallTo - Store the argument to the stack.
|
2009-04-17 19:07:39 +00:00
|
|
|
SDValue
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
ARMTargetLowering::LowerMemOpCallTo(SDValue Chain,
|
|
|
|
SDValue StackPtr, SDValue Arg,
|
|
|
|
DebugLoc dl, SelectionDAG &DAG,
|
|
|
|
const CCValAssign &VA,
|
|
|
|
ISD::ArgFlagsTy Flags) {
|
2009-04-17 19:07:39 +00:00
|
|
|
unsigned LocMemOffset = VA.getLocMemOffset();
|
|
|
|
SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset);
|
|
|
|
PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
|
|
|
|
if (Flags.isByVal()) {
|
|
|
|
return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl);
|
|
|
|
}
|
|
|
|
return DAG.getStore(Chain, dl, Arg, PtrOff,
|
|
|
|
PseudoSourceValue::getStack(), LocMemOffset);
|
2007-01-19 07:51:42 +00:00
|
|
|
}
|
|
|
|
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
void ARMTargetLowering::PassF64ArgInRegs(DebugLoc dl, SelectionDAG &DAG,
|
2009-06-22 23:27:02 +00:00
|
|
|
SDValue Chain, SDValue &Arg,
|
|
|
|
RegsToPassVector &RegsToPass,
|
|
|
|
CCValAssign &VA, CCValAssign &NextVA,
|
|
|
|
SDValue &StackPtr,
|
|
|
|
SmallVector<SDValue, 8> &MemOpChains,
|
|
|
|
ISD::ArgFlagsTy Flags) {
|
|
|
|
|
2009-11-09 00:11:35 +00:00
|
|
|
SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl,
|
2009-08-11 20:47:22 +00:00
|
|
|
DAG.getVTList(MVT::i32, MVT::i32), Arg);
|
2009-06-22 23:27:02 +00:00
|
|
|
RegsToPass.push_back(std::make_pair(VA.getLocReg(), fmrrd));
|
|
|
|
|
|
|
|
if (NextVA.isRegLoc())
|
|
|
|
RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), fmrrd.getValue(1)));
|
|
|
|
else {
|
|
|
|
assert(NextVA.isMemLoc());
|
|
|
|
if (StackPtr.getNode() == 0)
|
|
|
|
StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy());
|
|
|
|
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, fmrrd.getValue(1),
|
|
|
|
dl, DAG, NextVA,
|
|
|
|
Flags));
|
2009-06-22 23:27:02 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
/// LowerCall - Lowering a call into a callseq_start <-
|
2007-02-03 08:53:01 +00:00
|
|
|
/// ARMISD:CALL <- callseq_end chain. Also add input and output parameter
|
|
|
|
/// nodes.
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
SDValue
|
|
|
|
ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
|
2009-09-02 08:44:58 +00:00
|
|
|
CallingConv::ID CallConv, bool isVarArg,
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
bool isTailCall,
|
|
|
|
const SmallVectorImpl<ISD::OutputArg> &Outs,
|
|
|
|
const SmallVectorImpl<ISD::InputArg> &Ins,
|
|
|
|
DebugLoc dl, SelectionDAG &DAG,
|
|
|
|
SmallVectorImpl<SDValue> &InVals) {
|
2009-04-17 19:07:39 +00:00
|
|
|
|
|
|
|
// Analyze operands of the call, assigning locations to each operand.
|
|
|
|
SmallVector<CCValAssign, 16> ArgLocs;
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs,
|
|
|
|
*DAG.getContext());
|
|
|
|
CCInfo.AnalyzeCallOperands(Outs,
|
2009-08-05 19:04:42 +00:00
|
|
|
CCAssignFnForNode(CallConv, /* Return*/ false,
|
|
|
|
isVarArg));
|
2009-04-17 19:07:39 +00:00
|
|
|
|
|
|
|
// Get a count of how many bytes are to be pushed on the stack.
|
|
|
|
unsigned NumBytes = CCInfo.getNextStackOffset();
|
2007-01-19 07:51:42 +00:00
|
|
|
|
|
|
|
// Adjust the stack pointer for the new arguments...
|
|
|
|
// These operations are automatically eliminated by the prolog/epilog pass
|
2008-10-11 22:08:30 +00:00
|
|
|
Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true));
|
2007-01-19 07:51:42 +00:00
|
|
|
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue StackPtr = DAG.getRegister(ARM::SP, MVT::i32);
|
2007-01-19 07:51:42 +00:00
|
|
|
|
2009-06-22 23:27:02 +00:00
|
|
|
RegsToPassVector RegsToPass;
|
2009-04-17 19:07:39 +00:00
|
|
|
SmallVector<SDValue, 8> MemOpChains;
|
|
|
|
|
|
|
|
// Walk the register/memloc assignments, inserting copies/loads. In the case
|
2009-04-17 20:35:10 +00:00
|
|
|
// of tail call optimization, arguments are handled later.
|
2009-04-17 19:07:39 +00:00
|
|
|
for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size();
|
|
|
|
i != e;
|
|
|
|
++i, ++realArgIdx) {
|
|
|
|
CCValAssign &VA = ArgLocs[i];
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
SDValue Arg = Outs[realArgIdx].Val;
|
|
|
|
ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
|
2009-04-17 19:07:39 +00:00
|
|
|
|
|
|
|
// Promote the value if needed.
|
|
|
|
switch (VA.getLocInfo()) {
|
2009-07-14 16:55:14 +00:00
|
|
|
default: llvm_unreachable("Unknown loc info!");
|
2009-04-17 19:07:39 +00:00
|
|
|
case CCValAssign::Full: break;
|
|
|
|
case CCValAssign::SExt:
|
|
|
|
Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
|
|
|
|
break;
|
|
|
|
case CCValAssign::ZExt:
|
|
|
|
Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
|
|
|
|
break;
|
|
|
|
case CCValAssign::AExt:
|
|
|
|
Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
|
|
|
|
break;
|
|
|
|
case CCValAssign::BCvt:
|
|
|
|
Arg = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getLocVT(), Arg);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2009-08-05 19:04:42 +00:00
|
|
|
// f64 and v2f64 might be passed in i32 pairs and must be split into pieces
|
2009-04-17 19:07:39 +00:00
|
|
|
if (VA.needsCustom()) {
|
2009-08-11 20:47:22 +00:00
|
|
|
if (VA.getLocVT() == MVT::v2f64) {
|
|
|
|
SDValue Op0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
|
|
|
|
DAG.getConstant(0, MVT::i32));
|
|
|
|
SDValue Op1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
|
|
|
|
DAG.getConstant(1, MVT::i32));
|
2009-06-22 23:27:02 +00:00
|
|
|
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass,
|
2009-06-22 23:27:02 +00:00
|
|
|
VA, ArgLocs[++i], StackPtr, MemOpChains, Flags);
|
|
|
|
|
|
|
|
VA = ArgLocs[++i]; // skip ahead to next loc
|
|
|
|
if (VA.isRegLoc()) {
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass,
|
2009-06-22 23:27:02 +00:00
|
|
|
VA, ArgLocs[++i], StackPtr, MemOpChains, Flags);
|
|
|
|
} else {
|
|
|
|
assert(VA.isMemLoc());
|
|
|
|
if (StackPtr.getNode() == 0)
|
|
|
|
StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy());
|
|
|
|
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Op1,
|
|
|
|
dl, DAG, VA, Flags));
|
2009-06-22 23:27:02 +00:00
|
|
|
}
|
|
|
|
} else {
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i],
|
2009-06-22 23:27:02 +00:00
|
|
|
StackPtr, MemOpChains, Flags);
|
2007-01-19 07:51:42 +00:00
|
|
|
}
|
2009-04-17 19:07:39 +00:00
|
|
|
} else if (VA.isRegLoc()) {
|
|
|
|
RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
|
2007-01-19 07:51:42 +00:00
|
|
|
} else {
|
2009-04-17 19:07:39 +00:00
|
|
|
assert(VA.isMemLoc());
|
|
|
|
if (StackPtr.getNode() == 0)
|
|
|
|
StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy());
|
2007-01-19 07:51:42 +00:00
|
|
|
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
|
|
|
|
dl, DAG, VA, Flags));
|
2009-04-17 19:07:39 +00:00
|
|
|
}
|
2007-01-19 07:51:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!MemOpChains.empty())
|
2009-08-11 20:47:22 +00:00
|
|
|
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
|
2007-01-19 07:51:42 +00:00
|
|
|
&MemOpChains[0], MemOpChains.size());
|
|
|
|
|
|
|
|
// Build a sequence of copy-to-reg nodes chained together with token chain
|
|
|
|
// and flag operands which copy the outgoing args into the appropriate regs.
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue InFlag;
|
2007-01-19 07:51:42 +00:00
|
|
|
for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
|
2009-03-20 22:42:55 +00:00
|
|
|
Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
|
2009-02-04 20:06:27 +00:00
|
|
|
RegsToPass[i].second, InFlag);
|
2007-01-19 07:51:42 +00:00
|
|
|
InFlag = Chain.getValue(1);
|
|
|
|
}
|
|
|
|
|
2008-09-16 21:48:12 +00:00
|
|
|
// If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
|
|
|
|
// direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
|
|
|
|
// node so that legalize doesn't hack it.
|
2007-01-19 07:51:42 +00:00
|
|
|
bool isDirect = false;
|
|
|
|
bool isARMFunc = false;
|
2007-06-19 21:05:09 +00:00
|
|
|
bool isLocalARMFunc = false;
|
2009-11-06 22:24:13 +00:00
|
|
|
MachineFunction &MF = DAG.getMachineFunction();
|
|
|
|
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
|
2007-01-19 07:51:42 +00:00
|
|
|
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
|
|
|
|
GlobalValue *GV = G->getGlobal();
|
|
|
|
isDirect = true;
|
2009-07-15 04:12:33 +00:00
|
|
|
bool isExt = GV->isDeclaration() || GV->isWeakForLinker();
|
2007-01-19 19:28:01 +00:00
|
|
|
bool isStub = (isExt && Subtarget->isTargetDarwin()) &&
|
2007-01-19 07:51:42 +00:00
|
|
|
getTargetMachine().getRelocationModel() != Reloc::Static;
|
|
|
|
isARMFunc = !Subtarget->isThumb() || isStub;
|
2007-06-19 21:05:09 +00:00
|
|
|
// ARM call to a local ARM function is predicable.
|
|
|
|
isLocalARMFunc = !Subtarget->isThumb() && !isExt;
|
2007-01-30 20:37:08 +00:00
|
|
|
// tBX takes a register source operand.
|
2009-07-08 23:10:31 +00:00
|
|
|
if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) {
|
2009-11-06 22:24:13 +00:00
|
|
|
unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId();
|
2009-08-28 23:18:09 +00:00
|
|
|
ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV,
|
2009-09-01 01:57:56 +00:00
|
|
|
ARMPCLabelIndex,
|
|
|
|
ARMCP::CPValue, 4);
|
2009-03-13 07:51:59 +00:00
|
|
|
SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4);
|
2009-08-11 20:47:22 +00:00
|
|
|
CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
|
2009-03-20 22:42:55 +00:00
|
|
|
Callee = DAG.getLoad(getPointerTy(), dl,
|
2009-10-31 03:39:36 +00:00
|
|
|
DAG.getEntryNode(), CPAddr,
|
|
|
|
PseudoSourceValue::getConstantPool(), 0);
|
2009-11-06 22:24:13 +00:00
|
|
|
SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
|
2009-03-20 22:42:55 +00:00
|
|
|
Callee = DAG.getNode(ARMISD::PIC_ADD, dl,
|
2009-02-04 20:06:27 +00:00
|
|
|
getPointerTy(), Callee, PICLabel);
|
2007-01-30 20:37:08 +00:00
|
|
|
} else
|
|
|
|
Callee = DAG.getTargetGlobalAddress(GV, getPointerTy());
|
2008-09-16 21:48:12 +00:00
|
|
|
} else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
|
2007-01-19 07:51:42 +00:00
|
|
|
isDirect = true;
|
2007-01-19 19:28:01 +00:00
|
|
|
bool isStub = Subtarget->isTargetDarwin() &&
|
2007-01-19 07:51:42 +00:00
|
|
|
getTargetMachine().getRelocationModel() != Reloc::Static;
|
|
|
|
isARMFunc = !Subtarget->isThumb() || isStub;
|
2007-01-30 20:37:08 +00:00
|
|
|
// tBX takes a register source operand.
|
|
|
|
const char *Sym = S->getSymbol();
|
2009-07-08 23:10:31 +00:00
|
|
|
if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) {
|
2009-11-06 22:24:13 +00:00
|
|
|
unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId();
|
2009-08-13 21:58:54 +00:00
|
|
|
ARMConstantPoolValue *CPV = new ARMConstantPoolValue(*DAG.getContext(),
|
2009-08-28 23:18:09 +00:00
|
|
|
Sym, ARMPCLabelIndex, 4);
|
2009-03-13 07:51:59 +00:00
|
|
|
SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4);
|
2009-08-11 20:47:22 +00:00
|
|
|
CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
|
2009-02-04 20:06:27 +00:00
|
|
|
Callee = DAG.getLoad(getPointerTy(), dl,
|
2009-10-31 03:39:36 +00:00
|
|
|
DAG.getEntryNode(), CPAddr,
|
|
|
|
PseudoSourceValue::getConstantPool(), 0);
|
2009-11-06 22:24:13 +00:00
|
|
|
SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
|
2009-03-20 22:42:55 +00:00
|
|
|
Callee = DAG.getNode(ARMISD::PIC_ADD, dl,
|
2009-02-04 20:06:27 +00:00
|
|
|
getPointerTy(), Callee, PICLabel);
|
2007-01-30 20:37:08 +00:00
|
|
|
} else
|
2008-09-16 21:48:12 +00:00
|
|
|
Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy());
|
2007-01-19 07:51:42 +00:00
|
|
|
}
|
|
|
|
|
2007-03-20 17:57:23 +00:00
|
|
|
// FIXME: handle tail calls differently.
|
|
|
|
unsigned CallOpc;
|
2009-08-01 00:16:10 +00:00
|
|
|
if (Subtarget->isThumb()) {
|
|
|
|
if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps())
|
2007-03-20 17:57:23 +00:00
|
|
|
CallOpc = ARMISD::CALL_NOLINK;
|
|
|
|
else
|
|
|
|
CallOpc = isARMFunc ? ARMISD::CALL : ARMISD::tCALL;
|
|
|
|
} else {
|
|
|
|
CallOpc = (isDirect || Subtarget->hasV5TOps())
|
2007-06-19 21:05:09 +00:00
|
|
|
? (isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL)
|
|
|
|
: ARMISD::CALL_NOLINK;
|
2007-03-20 17:57:23 +00:00
|
|
|
}
|
2009-07-08 23:10:31 +00:00
|
|
|
if (CallOpc == ARMISD::CALL_NOLINK && !Subtarget->isThumb1Only()) {
|
2007-03-27 16:19:21 +00:00
|
|
|
// implicit def LR - LR mustn't be allocated as GRP:$dst of CALL_NOLINK
|
2009-08-11 20:47:22 +00:00
|
|
|
Chain = DAG.getCopyToReg(Chain, dl, ARM::LR, DAG.getUNDEF(MVT::i32),InFlag);
|
2007-03-20 17:57:23 +00:00
|
|
|
InFlag = Chain.getValue(1);
|
|
|
|
}
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
std::vector<SDValue> Ops;
|
2007-01-19 07:51:42 +00:00
|
|
|
Ops.push_back(Chain);
|
|
|
|
Ops.push_back(Callee);
|
|
|
|
|
|
|
|
// Add argument registers to the end of the list so that they are known live
|
|
|
|
// into the call.
|
|
|
|
for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
|
|
|
|
Ops.push_back(DAG.getRegister(RegsToPass[i].first,
|
|
|
|
RegsToPass[i].second.getValueType()));
|
|
|
|
|
2008-08-28 21:40:38 +00:00
|
|
|
if (InFlag.getNode())
|
2007-01-19 07:51:42 +00:00
|
|
|
Ops.push_back(InFlag);
|
2008-07-02 17:40:58 +00:00
|
|
|
// Returns a chain and a flag for retval copy to use.
|
2009-08-11 20:47:22 +00:00
|
|
|
Chain = DAG.getNode(CallOpc, dl, DAG.getVTList(MVT::Other, MVT::Flag),
|
2008-07-02 17:40:58 +00:00
|
|
|
&Ops[0], Ops.size());
|
2007-01-19 07:51:42 +00:00
|
|
|
InFlag = Chain.getValue(1);
|
|
|
|
|
2008-10-11 22:08:30 +00:00
|
|
|
Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
|
|
|
|
DAG.getIntPtrConstant(0, true), InFlag);
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
if (!Ins.empty())
|
2007-01-19 07:51:42 +00:00
|
|
|
InFlag = Chain.getValue(1);
|
|
|
|
|
2009-04-17 19:07:39 +00:00
|
|
|
// Handle result values, copying them out of physregs into vregs that we
|
|
|
|
// return.
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins,
|
|
|
|
dl, DAG, InVals);
|
2007-01-19 07:51:42 +00:00
|
|
|
}
|
|
|
|
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
SDValue
|
|
|
|
ARMTargetLowering::LowerReturn(SDValue Chain,
|
2009-09-02 08:44:58 +00:00
|
|
|
CallingConv::ID CallConv, bool isVarArg,
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
const SmallVectorImpl<ISD::OutputArg> &Outs,
|
|
|
|
DebugLoc dl, SelectionDAG &DAG) {
|
2009-04-17 19:07:39 +00:00
|
|
|
|
2009-04-17 20:35:10 +00:00
|
|
|
// CCValAssign - represent the assignment of the return value to a location.
|
2009-04-17 19:07:39 +00:00
|
|
|
SmallVector<CCValAssign, 16> RVLocs;
|
|
|
|
|
2009-04-17 20:35:10 +00:00
|
|
|
// CCState - Info about the registers and stack slots.
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
CCState CCInfo(CallConv, isVarArg, getTargetMachine(), RVLocs,
|
|
|
|
*DAG.getContext());
|
2009-04-17 19:07:39 +00:00
|
|
|
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
// Analyze outgoing return values.
|
2009-08-05 19:04:42 +00:00
|
|
|
CCInfo.AnalyzeReturn(Outs, CCAssignFnForNode(CallConv, /* Return */ true,
|
|
|
|
isVarArg));
|
2009-04-17 19:07:39 +00:00
|
|
|
|
|
|
|
// If this is the first return lowered for this function, add
|
|
|
|
// the regs to the liveout set for the function.
|
|
|
|
if (DAG.getMachineFunction().getRegInfo().liveout_empty()) {
|
|
|
|
for (unsigned i = 0; i != RVLocs.size(); ++i)
|
|
|
|
if (RVLocs[i].isRegLoc())
|
|
|
|
DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg());
|
2007-01-19 07:51:42 +00:00
|
|
|
}
|
2009-04-17 19:07:39 +00:00
|
|
|
|
|
|
|
SDValue Flag;
|
|
|
|
|
|
|
|
// Copy the result values into the output registers.
|
|
|
|
for (unsigned i = 0, realRVLocIdx = 0;
|
|
|
|
i != RVLocs.size();
|
|
|
|
++i, ++realRVLocIdx) {
|
|
|
|
CCValAssign &VA = RVLocs[i];
|
|
|
|
assert(VA.isRegLoc() && "Can only return in registers!");
|
|
|
|
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
SDValue Arg = Outs[realRVLocIdx].Val;
|
2009-04-17 19:07:39 +00:00
|
|
|
|
|
|
|
switch (VA.getLocInfo()) {
|
2009-07-14 16:55:14 +00:00
|
|
|
default: llvm_unreachable("Unknown loc info!");
|
2009-04-17 19:07:39 +00:00
|
|
|
case CCValAssign::Full: break;
|
|
|
|
case CCValAssign::BCvt:
|
|
|
|
Arg = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getLocVT(), Arg);
|
|
|
|
break;
|
2008-07-11 20:53:00 +00:00
|
|
|
}
|
2009-03-20 22:42:55 +00:00
|
|
|
|
2009-04-17 19:07:39 +00:00
|
|
|
if (VA.needsCustom()) {
|
2009-08-11 20:47:22 +00:00
|
|
|
if (VA.getLocVT() == MVT::v2f64) {
|
2009-06-22 23:27:02 +00:00
|
|
|
// Extract the first half and return it in two registers.
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue Half = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
|
|
|
|
DAG.getConstant(0, MVT::i32));
|
2009-11-09 00:11:35 +00:00
|
|
|
SDValue HalfGPRs = DAG.getNode(ARMISD::VMOVRRD, dl,
|
2009-08-11 20:47:22 +00:00
|
|
|
DAG.getVTList(MVT::i32, MVT::i32), Half);
|
2009-06-22 23:27:02 +00:00
|
|
|
|
|
|
|
Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), HalfGPRs, Flag);
|
|
|
|
Flag = Chain.getValue(1);
|
|
|
|
VA = RVLocs[++i]; // skip ahead to next loc
|
|
|
|
Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
|
|
|
|
HalfGPRs.getValue(1), Flag);
|
|
|
|
Flag = Chain.getValue(1);
|
|
|
|
VA = RVLocs[++i]; // skip ahead to next loc
|
|
|
|
|
|
|
|
// Extract the 2nd half and fall through to handle it as an f64 value.
|
2009-08-11 20:47:22 +00:00
|
|
|
Arg = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
|
|
|
|
DAG.getConstant(1, MVT::i32));
|
2009-06-22 23:27:02 +00:00
|
|
|
}
|
|
|
|
// Legalize ret f64 -> ret 2 x i32. We always have fmrrd if f64 is
|
|
|
|
// available.
|
2009-11-09 00:11:35 +00:00
|
|
|
SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl,
|
2009-08-11 20:47:22 +00:00
|
|
|
DAG.getVTList(MVT::i32, MVT::i32), &Arg, 1);
|
2009-04-17 19:07:39 +00:00
|
|
|
Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd, Flag);
|
2009-04-24 17:00:36 +00:00
|
|
|
Flag = Chain.getValue(1);
|
2009-04-17 19:07:39 +00:00
|
|
|
VA = RVLocs[++i]; // skip ahead to next loc
|
|
|
|
Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd.getValue(1),
|
|
|
|
Flag);
|
|
|
|
} else
|
|
|
|
Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag);
|
|
|
|
|
2009-04-17 20:35:10 +00:00
|
|
|
// Guarantee that all emitted copies are
|
|
|
|
// stuck together, avoiding something bad.
|
2009-04-17 19:07:39 +00:00
|
|
|
Flag = Chain.getValue(1);
|
2007-01-19 07:51:42 +00:00
|
|
|
}
|
|
|
|
|
2009-04-17 19:07:39 +00:00
|
|
|
SDValue result;
|
|
|
|
if (Flag.getNode())
|
2009-08-11 20:47:22 +00:00
|
|
|
result = DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, Chain, Flag);
|
2009-04-17 19:07:39 +00:00
|
|
|
else // Return Void
|
2009-08-11 20:47:22 +00:00
|
|
|
result = DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, Chain);
|
2009-04-17 19:07:39 +00:00
|
|
|
|
|
|
|
return result;
|
2007-01-19 07:51:42 +00:00
|
|
|
}
|
|
|
|
|
2009-11-03 00:02:05 +00:00
|
|
|
// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
|
|
|
|
// their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is
|
|
|
|
// one of the above mentioned nodes. It has to be wrapped because otherwise
|
|
|
|
// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
|
|
|
|
// be used to form addressing mode. These wrapped nodes will be selected
|
|
|
|
// into MOVi.
|
2008-07-27 21:46:04 +00:00
|
|
|
static SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) {
|
2009-08-10 22:56:29 +00:00
|
|
|
EVT PtrVT = Op.getValueType();
|
2009-02-07 00:55:49 +00:00
|
|
|
// FIXME there is no actual debug info here
|
|
|
|
DebugLoc dl = Op.getDebugLoc();
|
2007-01-19 07:51:42 +00:00
|
|
|
ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Res;
|
2007-01-19 07:51:42 +00:00
|
|
|
if (CP->isMachineConstantPoolEntry())
|
|
|
|
Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT,
|
|
|
|
CP->getAlignment());
|
|
|
|
else
|
|
|
|
Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT,
|
|
|
|
CP->getAlignment());
|
2009-08-11 20:47:22 +00:00
|
|
|
return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Res);
|
2007-01-19 07:51:42 +00:00
|
|
|
}
|
|
|
|
|
2009-10-30 05:45:42 +00:00
|
|
|
SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) {
|
2009-11-06 22:24:13 +00:00
|
|
|
MachineFunction &MF = DAG.getMachineFunction();
|
|
|
|
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
|
|
|
|
unsigned ARMPCLabelIndex = 0;
|
2009-10-30 05:45:42 +00:00
|
|
|
DebugLoc DL = Op.getDebugLoc();
|
2009-11-02 20:59:23 +00:00
|
|
|
EVT PtrVT = getPointerTy();
|
2009-10-30 05:45:42 +00:00
|
|
|
BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
|
2009-11-02 20:59:23 +00:00
|
|
|
Reloc::Model RelocM = getTargetMachine().getRelocationModel();
|
|
|
|
SDValue CPAddr;
|
|
|
|
if (RelocM == Reloc::Static) {
|
|
|
|
CPAddr = DAG.getTargetConstantPool(BA, PtrVT, 4);
|
|
|
|
} else {
|
|
|
|
unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
|
2009-11-06 22:24:13 +00:00
|
|
|
ARMPCLabelIndex = AFI->createConstPoolEntryUId();
|
2009-11-02 20:59:23 +00:00
|
|
|
ARMConstantPoolValue *CPV = new ARMConstantPoolValue(BA, ARMPCLabelIndex,
|
|
|
|
ARMCP::CPBlockAddress,
|
|
|
|
PCAdj);
|
|
|
|
CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
|
|
|
|
}
|
|
|
|
CPAddr = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, CPAddr);
|
|
|
|
SDValue Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), CPAddr,
|
|
|
|
PseudoSourceValue::getConstantPool(), 0);
|
|
|
|
if (RelocM == Reloc::Static)
|
|
|
|
return Result;
|
2009-11-06 22:24:13 +00:00
|
|
|
SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
|
2009-11-02 20:59:23 +00:00
|
|
|
return DAG.getNode(ARMISD::PIC_ADD, DL, PtrVT, Result, PICLabel);
|
2009-10-30 05:45:42 +00:00
|
|
|
}
|
|
|
|
|
2007-04-27 13:54:47 +00:00
|
|
|
// Lower ISD::GlobalTLSAddress using the "general dynamic" model
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue
|
2007-04-27 13:54:47 +00:00
|
|
|
ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
|
|
|
|
SelectionDAG &DAG) {
|
2009-02-04 20:06:27 +00:00
|
|
|
DebugLoc dl = GA->getDebugLoc();
|
2009-08-10 22:56:29 +00:00
|
|
|
EVT PtrVT = getPointerTy();
|
2007-04-27 13:54:47 +00:00
|
|
|
unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8;
|
2009-11-06 22:24:13 +00:00
|
|
|
MachineFunction &MF = DAG.getMachineFunction();
|
|
|
|
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
|
|
|
|
unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId();
|
2007-04-27 13:54:47 +00:00
|
|
|
ARMConstantPoolValue *CPV =
|
2009-08-28 23:18:09 +00:00
|
|
|
new ARMConstantPoolValue(GA->getGlobal(), ARMPCLabelIndex,
|
2009-09-01 01:57:56 +00:00
|
|
|
ARMCP::CPValue, PCAdj, "tlsgd", true);
|
2009-03-13 07:51:59 +00:00
|
|
|
SDValue Argument = DAG.getTargetConstantPool(CPV, PtrVT, 4);
|
2009-08-11 20:47:22 +00:00
|
|
|
Argument = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Argument);
|
2009-10-31 03:39:36 +00:00
|
|
|
Argument = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Argument,
|
|
|
|
PseudoSourceValue::getConstantPool(), 0);
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Chain = Argument.getValue(1);
|
2007-04-27 13:54:47 +00:00
|
|
|
|
2009-11-06 22:24:13 +00:00
|
|
|
SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
|
2009-02-04 20:06:27 +00:00
|
|
|
Argument = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Argument, PICLabel);
|
2007-04-27 13:54:47 +00:00
|
|
|
|
|
|
|
// call __tls_get_addr.
|
|
|
|
ArgListTy Args;
|
|
|
|
ArgListEntry Entry;
|
|
|
|
Entry.Node = Argument;
|
2009-08-13 21:58:54 +00:00
|
|
|
Entry.Ty = (const Type *) Type::getInt32Ty(*DAG.getContext());
|
2007-04-27 13:54:47 +00:00
|
|
|
Args.push_back(Entry);
|
2009-01-30 23:10:59 +00:00
|
|
|
// FIXME: is there useful debug info available here?
|
2008-07-27 21:46:04 +00:00
|
|
|
std::pair<SDValue, SDValue> CallResult =
|
2009-08-14 19:11:20 +00:00
|
|
|
LowerCallTo(Chain, (const Type *) Type::getInt32Ty(*DAG.getContext()),
|
|
|
|
false, false, false, false,
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
0, CallingConv::C, false, /*isReturnValueUsed=*/true,
|
2009-02-04 20:06:27 +00:00
|
|
|
DAG.getExternalSymbol("__tls_get_addr", PtrVT), Args, DAG, dl);
|
2007-04-27 13:54:47 +00:00
|
|
|
return CallResult.first;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lower ISD::GlobalTLSAddress using the "initial exec" or
|
|
|
|
// "local exec" model.
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue
|
2007-04-27 13:54:47 +00:00
|
|
|
ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA,
|
2009-03-20 22:42:55 +00:00
|
|
|
SelectionDAG &DAG) {
|
2007-04-27 13:54:47 +00:00
|
|
|
GlobalValue *GV = GA->getGlobal();
|
2009-02-04 20:06:27 +00:00
|
|
|
DebugLoc dl = GA->getDebugLoc();
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Offset;
|
|
|
|
SDValue Chain = DAG.getEntryNode();
|
2009-08-10 22:56:29 +00:00
|
|
|
EVT PtrVT = getPointerTy();
|
2007-04-27 13:54:47 +00:00
|
|
|
// Get the Thread Pointer
|
2009-02-04 20:06:27 +00:00
|
|
|
SDValue ThreadPointer = DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT);
|
2007-04-27 13:54:47 +00:00
|
|
|
|
2009-07-15 04:12:33 +00:00
|
|
|
if (GV->isDeclaration()) {
|
2009-11-06 22:24:13 +00:00
|
|
|
MachineFunction &MF = DAG.getMachineFunction();
|
|
|
|
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
|
|
|
|
unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId();
|
|
|
|
// Initial exec model.
|
2007-04-27 13:54:47 +00:00
|
|
|
unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8;
|
|
|
|
ARMConstantPoolValue *CPV =
|
2009-08-28 23:18:09 +00:00
|
|
|
new ARMConstantPoolValue(GA->getGlobal(), ARMPCLabelIndex,
|
2009-09-01 01:57:56 +00:00
|
|
|
ARMCP::CPValue, PCAdj, "gottpoff", true);
|
2009-03-13 07:51:59 +00:00
|
|
|
Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4);
|
2009-08-11 20:47:22 +00:00
|
|
|
Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset);
|
2009-10-31 03:39:36 +00:00
|
|
|
Offset = DAG.getLoad(PtrVT, dl, Chain, Offset,
|
|
|
|
PseudoSourceValue::getConstantPool(), 0);
|
2007-04-27 13:54:47 +00:00
|
|
|
Chain = Offset.getValue(1);
|
|
|
|
|
2009-11-06 22:24:13 +00:00
|
|
|
SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
|
2009-02-04 20:06:27 +00:00
|
|
|
Offset = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Offset, PICLabel);
|
2007-04-27 13:54:47 +00:00
|
|
|
|
2009-10-31 03:39:36 +00:00
|
|
|
Offset = DAG.getLoad(PtrVT, dl, Chain, Offset,
|
|
|
|
PseudoSourceValue::getConstantPool(), 0);
|
2007-04-27 13:54:47 +00:00
|
|
|
} else {
|
|
|
|
// local exec model
|
2009-08-28 23:18:09 +00:00
|
|
|
ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, "tpoff");
|
2009-03-13 07:51:59 +00:00
|
|
|
Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4);
|
2009-08-11 20:47:22 +00:00
|
|
|
Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset);
|
2009-10-31 03:39:36 +00:00
|
|
|
Offset = DAG.getLoad(PtrVT, dl, Chain, Offset,
|
|
|
|
PseudoSourceValue::getConstantPool(), 0);
|
2007-04-27 13:54:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// The address of the thread local variable is the add of the thread
|
|
|
|
// pointer with the offset of the variable.
|
2009-02-04 20:06:27 +00:00
|
|
|
return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
|
2007-04-27 13:54:47 +00:00
|
|
|
}
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue
|
|
|
|
ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) {
|
2007-04-27 13:54:47 +00:00
|
|
|
// TODO: implement the "local dynamic" model
|
|
|
|
assert(Subtarget->isTargetELF() &&
|
|
|
|
"TLS not implemented for non-ELF targets");
|
|
|
|
GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
|
|
|
|
// If the relocation model is PIC, use the "General Dynamic" TLS Model,
|
|
|
|
// otherwise use the "Local Exec" TLS Model
|
|
|
|
if (getTargetMachine().getRelocationModel() == Reloc::PIC_)
|
|
|
|
return LowerToTLSGeneralDynamicModel(GA, DAG);
|
|
|
|
else
|
|
|
|
return LowerToTLSExecModels(GA, DAG);
|
|
|
|
}
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op,
|
2009-03-20 22:42:55 +00:00
|
|
|
SelectionDAG &DAG) {
|
2009-08-10 22:56:29 +00:00
|
|
|
EVT PtrVT = getPointerTy();
|
2009-02-04 20:06:27 +00:00
|
|
|
DebugLoc dl = Op.getDebugLoc();
|
2007-04-22 00:04:12 +00:00
|
|
|
GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
|
|
|
|
Reloc::Model RelocM = getTargetMachine().getRelocationModel();
|
|
|
|
if (RelocM == Reloc::PIC_) {
|
2009-01-15 20:18:42 +00:00
|
|
|
bool UseGOTOFF = GV->hasLocalLinkage() || GV->hasHiddenVisibility();
|
2007-04-22 00:04:12 +00:00
|
|
|
ARMConstantPoolValue *CPV =
|
2009-08-28 23:18:09 +00:00
|
|
|
new ARMConstantPoolValue(GV, UseGOTOFF ? "GOTOFF" : "GOT");
|
2009-03-13 07:51:59 +00:00
|
|
|
SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
|
2009-08-11 20:47:22 +00:00
|
|
|
CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
|
2009-03-20 22:42:55 +00:00
|
|
|
SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
|
2009-10-07 00:06:35 +00:00
|
|
|
CPAddr,
|
|
|
|
PseudoSourceValue::getConstantPool(), 0);
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Chain = Result.getValue(1);
|
2009-02-07 00:55:49 +00:00
|
|
|
SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT);
|
2009-02-04 20:06:27 +00:00
|
|
|
Result = DAG.getNode(ISD::ADD, dl, PtrVT, Result, GOT);
|
2007-04-22 00:04:12 +00:00
|
|
|
if (!UseGOTOFF)
|
2009-10-07 00:06:35 +00:00
|
|
|
Result = DAG.getLoad(PtrVT, dl, Chain, Result,
|
|
|
|
PseudoSourceValue::getGOT(), 0);
|
2007-04-22 00:04:12 +00:00
|
|
|
return Result;
|
|
|
|
} else {
|
2009-11-24 00:44:37 +00:00
|
|
|
// If we have T2 ops, we can materialize the address directly via movt/movw
|
|
|
|
// pair. This is always cheaper.
|
|
|
|
if (Subtarget->useMovt()) {
|
|
|
|
return DAG.getNode(ARMISD::Wrapper, dl, PtrVT,
|
|
|
|
DAG.getTargetGlobalAddress(GV, PtrVT));
|
|
|
|
} else {
|
|
|
|
SDValue CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4);
|
|
|
|
CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
|
|
|
|
return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr,
|
|
|
|
PseudoSourceValue::getConstantPool(), 0);
|
|
|
|
}
|
2007-04-22 00:04:12 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op,
|
2009-03-20 22:42:55 +00:00
|
|
|
SelectionDAG &DAG) {
|
2009-11-06 22:24:13 +00:00
|
|
|
MachineFunction &MF = DAG.getMachineFunction();
|
|
|
|
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
|
|
|
|
unsigned ARMPCLabelIndex = 0;
|
2009-08-10 22:56:29 +00:00
|
|
|
EVT PtrVT = getPointerTy();
|
2009-02-04 20:06:27 +00:00
|
|
|
DebugLoc dl = Op.getDebugLoc();
|
2007-01-19 07:51:42 +00:00
|
|
|
GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
|
|
|
|
Reloc::Model RelocM = getTargetMachine().getRelocationModel();
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue CPAddr;
|
2007-01-19 07:51:42 +00:00
|
|
|
if (RelocM == Reloc::Static)
|
2009-03-13 07:51:59 +00:00
|
|
|
CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4);
|
2007-01-19 07:51:42 +00:00
|
|
|
else {
|
2009-11-06 22:24:13 +00:00
|
|
|
ARMPCLabelIndex = AFI->createConstPoolEntryUId();
|
2009-08-28 23:18:09 +00:00
|
|
|
unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : (Subtarget->isThumb()?4:8);
|
|
|
|
ARMConstantPoolValue *CPV =
|
2009-09-01 01:57:56 +00:00
|
|
|
new ARMConstantPoolValue(GV, ARMPCLabelIndex, ARMCP::CPValue, PCAdj);
|
2009-03-13 07:51:59 +00:00
|
|
|
CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
|
2007-01-19 07:51:42 +00:00
|
|
|
}
|
2009-08-11 20:47:22 +00:00
|
|
|
CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
|
2007-01-19 07:51:42 +00:00
|
|
|
|
2009-10-31 03:39:36 +00:00
|
|
|
SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr,
|
|
|
|
PseudoSourceValue::getConstantPool(), 0);
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Chain = Result.getValue(1);
|
2007-01-19 07:51:42 +00:00
|
|
|
|
|
|
|
if (RelocM == Reloc::PIC_) {
|
2009-11-06 22:24:13 +00:00
|
|
|
SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
|
2009-02-04 20:06:27 +00:00
|
|
|
Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel);
|
2007-01-19 07:51:42 +00:00
|
|
|
}
|
2009-08-28 23:18:09 +00:00
|
|
|
|
2009-09-03 07:04:02 +00:00
|
|
|
if (Subtarget->GVIsIndirectSymbol(GV, RelocM))
|
2009-10-31 03:39:36 +00:00
|
|
|
Result = DAG.getLoad(PtrVT, dl, Chain, Result,
|
|
|
|
PseudoSourceValue::getGOT(), 0);
|
2007-01-19 07:51:42 +00:00
|
|
|
|
|
|
|
return Result;
|
|
|
|
}
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue ARMTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDValue Op,
|
2009-03-20 22:42:55 +00:00
|
|
|
SelectionDAG &DAG){
|
2007-04-22 00:04:12 +00:00
|
|
|
assert(Subtarget->isTargetELF() &&
|
|
|
|
"GLOBAL OFFSET TABLE not implemented for non-ELF targets");
|
2009-11-06 22:24:13 +00:00
|
|
|
MachineFunction &MF = DAG.getMachineFunction();
|
|
|
|
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
|
|
|
|
unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId();
|
2009-08-10 22:56:29 +00:00
|
|
|
EVT PtrVT = getPointerTy();
|
2009-02-04 20:06:27 +00:00
|
|
|
DebugLoc dl = Op.getDebugLoc();
|
2007-04-22 00:04:12 +00:00
|
|
|
unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
|
2009-08-13 21:58:54 +00:00
|
|
|
ARMConstantPoolValue *CPV = new ARMConstantPoolValue(*DAG.getContext(),
|
|
|
|
"_GLOBAL_OFFSET_TABLE_",
|
2009-08-28 23:18:09 +00:00
|
|
|
ARMPCLabelIndex, PCAdj);
|
2009-03-13 07:51:59 +00:00
|
|
|
SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
|
2009-08-11 20:47:22 +00:00
|
|
|
CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
|
2009-10-07 00:06:35 +00:00
|
|
|
SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr,
|
|
|
|
PseudoSourceValue::getConstantPool(), 0);
|
2009-11-06 22:24:13 +00:00
|
|
|
SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
|
2009-02-04 20:06:27 +00:00
|
|
|
return DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel);
|
2007-04-22 00:04:12 +00:00
|
|
|
}
|
|
|
|
|
2009-05-12 23:59:14 +00:00
|
|
|
SDValue
|
|
|
|
ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) {
|
2008-09-12 16:56:44 +00:00
|
|
|
unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
|
2009-05-12 23:59:14 +00:00
|
|
|
DebugLoc dl = Op.getDebugLoc();
|
2007-11-08 17:20:05 +00:00
|
|
|
switch (IntNo) {
|
2008-07-27 21:46:04 +00:00
|
|
|
default: return SDValue(); // Don't custom lower most intrinsics.
|
2009-08-04 00:25:01 +00:00
|
|
|
case Intrinsic::arm_thread_pointer: {
|
2009-08-10 22:56:29 +00:00
|
|
|
EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
|
2009-08-04 00:25:01 +00:00
|
|
|
return DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT);
|
|
|
|
}
|
2009-08-11 00:09:57 +00:00
|
|
|
case Intrinsic::eh_sjlj_lsda: {
|
|
|
|
MachineFunction &MF = DAG.getMachineFunction();
|
2009-11-06 22:24:13 +00:00
|
|
|
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
|
|
|
|
unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId();
|
2009-08-11 00:09:57 +00:00
|
|
|
EVT PtrVT = getPointerTy();
|
|
|
|
DebugLoc dl = Op.getDebugLoc();
|
|
|
|
Reloc::Model RelocM = getTargetMachine().getRelocationModel();
|
|
|
|
SDValue CPAddr;
|
|
|
|
unsigned PCAdj = (RelocM != Reloc::PIC_)
|
|
|
|
? 0 : (Subtarget->isThumb() ? 4 : 8);
|
|
|
|
ARMConstantPoolValue *CPV =
|
2009-09-01 01:57:56 +00:00
|
|
|
new ARMConstantPoolValue(MF.getFunction(), ARMPCLabelIndex,
|
|
|
|
ARMCP::CPLSDA, PCAdj);
|
2009-08-11 00:09:57 +00:00
|
|
|
CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
|
2009-08-11 20:47:22 +00:00
|
|
|
CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
|
2009-08-11 00:09:57 +00:00
|
|
|
SDValue Result =
|
2009-10-31 03:39:36 +00:00
|
|
|
DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr,
|
|
|
|
PseudoSourceValue::getConstantPool(), 0);
|
2009-08-11 00:09:57 +00:00
|
|
|
SDValue Chain = Result.getValue(1);
|
|
|
|
|
|
|
|
if (RelocM == Reloc::PIC_) {
|
2009-11-06 22:24:13 +00:00
|
|
|
SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
|
2009-08-11 00:09:57 +00:00
|
|
|
Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel);
|
|
|
|
}
|
|
|
|
return Result;
|
|
|
|
}
|
2009-05-14 00:46:35 +00:00
|
|
|
case Intrinsic::eh_sjlj_setjmp:
|
2009-08-11 20:47:22 +00:00
|
|
|
return DAG.getNode(ARMISD::EH_SJLJ_SETJMP, dl, MVT::i32, Op.getOperand(1));
|
2007-11-08 17:20:05 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-12-10 00:11:09 +00:00
|
|
|
static SDValue LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG) {
|
|
|
|
DebugLoc dl = Op.getDebugLoc();
|
|
|
|
SDValue Op5 = Op.getOperand(5);
|
|
|
|
SDValue Res;
|
|
|
|
unsigned isDeviceBarrier = cast<ConstantSDNode>(Op5)->getZExtValue();
|
|
|
|
if (isDeviceBarrier) {
|
|
|
|
Res = DAG.getNode(ARMISD::SYNCBARRIER, dl, MVT::Other,
|
|
|
|
Op.getOperand(0));
|
|
|
|
} else {
|
|
|
|
Res = DAG.getNode(ARMISD::MEMBARRIER, dl, MVT::Other,
|
|
|
|
Op.getOperand(0));
|
|
|
|
}
|
|
|
|
return Res;
|
|
|
|
}
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG,
|
2009-03-20 22:42:55 +00:00
|
|
|
unsigned VarArgsFrameIndex) {
|
2007-01-19 07:51:42 +00:00
|
|
|
// vastart just stores the address of the VarArgsFrameIndex slot into the
|
|
|
|
// memory location argument.
|
2009-02-04 20:06:27 +00:00
|
|
|
DebugLoc dl = Op.getDebugLoc();
|
2009-08-10 22:56:29 +00:00
|
|
|
EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue FR = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT);
|
2008-02-06 22:27:42 +00:00
|
|
|
const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
|
2009-02-04 20:06:27 +00:00
|
|
|
return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), SV, 0);
|
2007-01-19 07:51:42 +00:00
|
|
|
}
|
|
|
|
|
2009-08-07 00:34:42 +00:00
|
|
|
SDValue
|
|
|
|
ARMTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) {
|
|
|
|
SDNode *Node = Op.getNode();
|
|
|
|
DebugLoc dl = Node->getDebugLoc();
|
2009-08-10 22:56:29 +00:00
|
|
|
EVT VT = Node->getValueType(0);
|
2009-08-07 00:34:42 +00:00
|
|
|
SDValue Chain = Op.getOperand(0);
|
|
|
|
SDValue Size = Op.getOperand(1);
|
|
|
|
SDValue Align = Op.getOperand(2);
|
|
|
|
|
|
|
|
// Chain the dynamic stack allocation so that it doesn't modify the stack
|
|
|
|
// pointer when other instructions are using the stack.
|
|
|
|
Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(0, true));
|
|
|
|
|
|
|
|
unsigned AlignVal = cast<ConstantSDNode>(Align)->getZExtValue();
|
|
|
|
unsigned StackAlign = getTargetMachine().getFrameInfo()->getStackAlignment();
|
|
|
|
if (AlignVal > StackAlign)
|
|
|
|
// Do this now since selection pass cannot introduce new target
|
|
|
|
// independent node.
|
|
|
|
Align = DAG.getConstant(-(uint64_t)AlignVal, VT);
|
|
|
|
|
|
|
|
// In Thumb1 mode, there isn't a "sub r, sp, r" instruction, we will end up
|
|
|
|
// using a "add r, sp, r" instead. Negate the size now so we don't have to
|
|
|
|
// do even more horrible hack later.
|
|
|
|
MachineFunction &MF = DAG.getMachineFunction();
|
|
|
|
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
|
|
|
|
if (AFI->isThumb1OnlyFunction()) {
|
|
|
|
bool Negate = true;
|
|
|
|
ConstantSDNode *C = dyn_cast<ConstantSDNode>(Size);
|
|
|
|
if (C) {
|
|
|
|
uint32_t Val = C->getZExtValue();
|
|
|
|
if (Val <= 508 && ((Val & 3) == 0))
|
|
|
|
Negate = false;
|
|
|
|
}
|
|
|
|
if (Negate)
|
|
|
|
Size = DAG.getNode(ISD::SUB, dl, VT, DAG.getConstant(0, VT), Size);
|
|
|
|
}
|
|
|
|
|
2009-08-11 20:47:22 +00:00
|
|
|
SDVTList VTList = DAG.getVTList(VT, MVT::Other);
|
2009-08-07 00:34:42 +00:00
|
|
|
SDValue Ops1[] = { Chain, Size, Align };
|
|
|
|
SDValue Res = DAG.getNode(ARMISD::DYN_ALLOC, dl, VTList, Ops1, 3);
|
|
|
|
Chain = Res.getValue(1);
|
|
|
|
Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, true),
|
|
|
|
DAG.getIntPtrConstant(0, true), SDValue());
|
|
|
|
SDValue Ops2[] = { Res, Chain };
|
|
|
|
return DAG.getMergeValues(Ops2, 2, dl);
|
|
|
|
}
|
|
|
|
|
2009-06-22 23:27:02 +00:00
|
|
|
SDValue
|
|
|
|
ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA,
|
|
|
|
SDValue &Root, SelectionDAG &DAG,
|
|
|
|
DebugLoc dl) {
|
|
|
|
MachineFunction &MF = DAG.getMachineFunction();
|
|
|
|
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
|
|
|
|
|
|
|
|
TargetRegisterClass *RC;
|
2009-07-08 23:10:31 +00:00
|
|
|
if (AFI->isThumb1OnlyFunction())
|
2009-06-22 23:27:02 +00:00
|
|
|
RC = ARM::tGPRRegisterClass;
|
|
|
|
else
|
|
|
|
RC = ARM::GPRRegisterClass;
|
|
|
|
|
|
|
|
// Transform the arguments stored in physical registers into virtual ones.
|
|
|
|
unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32);
|
2009-06-22 23:27:02 +00:00
|
|
|
|
|
|
|
SDValue ArgValue2;
|
|
|
|
if (NextVA.isMemLoc()) {
|
|
|
|
unsigned ArgSize = NextVA.getLocVT().getSizeInBits()/8;
|
|
|
|
MachineFrameInfo *MFI = MF.getFrameInfo();
|
2009-11-12 20:49:22 +00:00
|
|
|
int FI = MFI->CreateFixedObject(ArgSize, NextVA.getLocMemOffset(),
|
|
|
|
true, false);
|
2009-06-22 23:27:02 +00:00
|
|
|
|
|
|
|
// Create load node to retrieve arguments from the stack.
|
|
|
|
SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
|
2009-10-31 03:39:36 +00:00
|
|
|
ArgValue2 = DAG.getLoad(MVT::i32, dl, Root, FIN,
|
|
|
|
PseudoSourceValue::getFixedStack(FI), 0);
|
2009-06-22 23:27:02 +00:00
|
|
|
} else {
|
|
|
|
Reg = MF.addLiveIn(NextVA.getLocReg(), RC);
|
2009-08-11 20:47:22 +00:00
|
|
|
ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32);
|
2009-06-22 23:27:02 +00:00
|
|
|
}
|
|
|
|
|
2009-11-09 00:11:35 +00:00
|
|
|
return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, ArgValue, ArgValue2);
|
2009-06-22 23:27:02 +00:00
|
|
|
}
|
|
|
|
|
2009-04-17 19:07:39 +00:00
|
|
|
SDValue
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
ARMTargetLowering::LowerFormalArguments(SDValue Chain,
|
2009-09-02 08:44:58 +00:00
|
|
|
CallingConv::ID CallConv, bool isVarArg,
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
const SmallVectorImpl<ISD::InputArg>
|
|
|
|
&Ins,
|
|
|
|
DebugLoc dl, SelectionDAG &DAG,
|
|
|
|
SmallVectorImpl<SDValue> &InVals) {
|
|
|
|
|
2007-01-19 07:51:42 +00:00
|
|
|
MachineFunction &MF = DAG.getMachineFunction();
|
2009-04-17 19:07:39 +00:00
|
|
|
MachineFrameInfo *MFI = MF.getFrameInfo();
|
|
|
|
|
2009-04-07 20:34:09 +00:00
|
|
|
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
|
2007-01-19 07:51:42 +00:00
|
|
|
|
2009-04-17 19:07:39 +00:00
|
|
|
// Assign locations to all of the incoming arguments.
|
|
|
|
SmallVector<CCValAssign, 16> ArgLocs;
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs,
|
|
|
|
*DAG.getContext());
|
|
|
|
CCInfo.AnalyzeFormalArguments(Ins,
|
2009-08-05 19:04:42 +00:00
|
|
|
CCAssignFnForNode(CallConv, /* Return*/ false,
|
|
|
|
isVarArg));
|
2009-04-17 19:07:39 +00:00
|
|
|
|
|
|
|
SmallVector<SDValue, 16> ArgValues;
|
|
|
|
|
|
|
|
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
|
|
|
|
CCValAssign &VA = ArgLocs[i];
|
|
|
|
|
2009-04-17 20:35:10 +00:00
|
|
|
// Arguments stored in registers.
|
2009-04-17 19:07:39 +00:00
|
|
|
if (VA.isRegLoc()) {
|
2009-08-10 22:56:29 +00:00
|
|
|
EVT RegVT = VA.getLocVT();
|
2009-04-17 19:07:39 +00:00
|
|
|
|
2009-06-22 23:27:02 +00:00
|
|
|
SDValue ArgValue;
|
|
|
|
if (VA.needsCustom()) {
|
|
|
|
// f64 and vector types are split up into multiple registers or
|
|
|
|
// combinations of registers and stack slots.
|
2009-08-11 20:47:22 +00:00
|
|
|
RegVT = MVT::i32;
|
2009-04-17 19:07:39 +00:00
|
|
|
|
2009-08-11 20:47:22 +00:00
|
|
|
if (VA.getLocVT() == MVT::v2f64) {
|
2009-06-22 23:27:02 +00:00
|
|
|
SDValue ArgValue1 = GetF64FormalArgument(VA, ArgLocs[++i],
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
Chain, DAG, dl);
|
2009-06-22 23:27:02 +00:00
|
|
|
VA = ArgLocs[++i]; // skip ahead to next loc
|
|
|
|
SDValue ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i],
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
Chain, DAG, dl);
|
2009-08-11 20:47:22 +00:00
|
|
|
ArgValue = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64);
|
|
|
|
ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64,
|
2009-06-22 23:27:02 +00:00
|
|
|
ArgValue, ArgValue1, DAG.getIntPtrConstant(0));
|
2009-08-11 20:47:22 +00:00
|
|
|
ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64,
|
2009-06-22 23:27:02 +00:00
|
|
|
ArgValue, ArgValue2, DAG.getIntPtrConstant(1));
|
|
|
|
} else
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl);
|
2009-06-22 23:27:02 +00:00
|
|
|
|
|
|
|
} else {
|
|
|
|
TargetRegisterClass *RC;
|
2009-08-05 19:04:42 +00:00
|
|
|
|
2009-08-11 20:47:22 +00:00
|
|
|
if (RegVT == MVT::f32)
|
2009-06-22 23:27:02 +00:00
|
|
|
RC = ARM::SPRRegisterClass;
|
2009-08-11 20:47:22 +00:00
|
|
|
else if (RegVT == MVT::f64)
|
2009-06-22 23:27:02 +00:00
|
|
|
RC = ARM::DPRRegisterClass;
|
2009-08-11 20:47:22 +00:00
|
|
|
else if (RegVT == MVT::v2f64)
|
2009-08-05 19:04:42 +00:00
|
|
|
RC = ARM::QPRRegisterClass;
|
2009-08-11 20:47:22 +00:00
|
|
|
else if (RegVT == MVT::i32)
|
2009-08-05 20:15:19 +00:00
|
|
|
RC = (AFI->isThumb1OnlyFunction() ?
|
|
|
|
ARM::tGPRRegisterClass : ARM::GPRRegisterClass);
|
2009-06-22 23:27:02 +00:00
|
|
|
else
|
2009-08-05 20:15:19 +00:00
|
|
|
llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering");
|
2009-04-17 19:07:39 +00:00
|
|
|
|
2009-06-22 23:27:02 +00:00
|
|
|
// Transform the arguments in physical registers into virtual ones.
|
|
|
|
unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
|
2009-04-17 19:07:39 +00:00
|
|
|
}
|
2007-01-19 07:51:42 +00:00
|
|
|
|
2009-04-17 19:07:39 +00:00
|
|
|
// If this is an 8 or 16-bit value, it is really passed promoted
|
|
|
|
// to 32 bits. Insert an assert[sz]ext to capture this, then
|
|
|
|
// truncate to the right size.
|
|
|
|
switch (VA.getLocInfo()) {
|
2009-07-14 16:55:14 +00:00
|
|
|
default: llvm_unreachable("Unknown loc info!");
|
2009-04-17 19:07:39 +00:00
|
|
|
case CCValAssign::Full: break;
|
|
|
|
case CCValAssign::BCvt:
|
|
|
|
ArgValue = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getValVT(), ArgValue);
|
|
|
|
break;
|
|
|
|
case CCValAssign::SExt:
|
|
|
|
ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
|
|
|
|
DAG.getValueType(VA.getValVT()));
|
|
|
|
ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
|
|
|
|
break;
|
|
|
|
case CCValAssign::ZExt:
|
|
|
|
ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
|
|
|
|
DAG.getValueType(VA.getValVT()));
|
|
|
|
ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
|
|
|
|
break;
|
|
|
|
}
|
2007-01-19 07:51:42 +00:00
|
|
|
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
InVals.push_back(ArgValue);
|
2007-01-19 07:51:42 +00:00
|
|
|
|
2009-04-17 19:07:39 +00:00
|
|
|
} else { // VA.isRegLoc()
|
2007-01-19 07:51:42 +00:00
|
|
|
|
2009-04-17 19:07:39 +00:00
|
|
|
// sanity check
|
|
|
|
assert(VA.isMemLoc());
|
2009-08-11 20:47:22 +00:00
|
|
|
assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered");
|
2007-01-19 07:51:42 +00:00
|
|
|
|
2009-04-17 19:07:39 +00:00
|
|
|
unsigned ArgSize = VA.getLocVT().getSizeInBits()/8;
|
2009-11-12 20:49:22 +00:00
|
|
|
int FI = MFI->CreateFixedObject(ArgSize, VA.getLocMemOffset(),
|
|
|
|
true, false);
|
2007-01-19 07:51:42 +00:00
|
|
|
|
2009-04-17 20:35:10 +00:00
|
|
|
// Create load nodes to retrieve arguments from the stack.
|
2009-04-17 19:07:39 +00:00
|
|
|
SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
|
2009-10-31 03:39:36 +00:00
|
|
|
InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN,
|
|
|
|
PseudoSourceValue::getFixedStack(FI), 0));
|
2009-04-17 19:07:39 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// varargs
|
2007-01-19 07:51:42 +00:00
|
|
|
if (isVarArg) {
|
|
|
|
static const unsigned GPRArgRegs[] = {
|
|
|
|
ARM::R0, ARM::R1, ARM::R2, ARM::R3
|
|
|
|
};
|
|
|
|
|
2009-04-17 20:35:10 +00:00
|
|
|
unsigned NumGPRs = CCInfo.getFirstUnallocated
|
|
|
|
(GPRArgRegs, sizeof(GPRArgRegs) / sizeof(GPRArgRegs[0]));
|
2009-04-17 19:07:39 +00:00
|
|
|
|
2007-02-23 20:32:57 +00:00
|
|
|
unsigned Align = MF.getTarget().getFrameInfo()->getStackAlignment();
|
|
|
|
unsigned VARegSize = (4 - NumGPRs) * 4;
|
|
|
|
unsigned VARegSaveSize = (VARegSize + Align - 1) & ~(Align - 1);
|
2009-10-30 14:33:14 +00:00
|
|
|
unsigned ArgOffset = CCInfo.getNextStackOffset();
|
2007-01-19 07:51:42 +00:00
|
|
|
if (VARegSaveSize) {
|
|
|
|
// If this function is vararg, store any remaining integer argument regs
|
|
|
|
// to their spots on the stack so that they may be loaded by deferencing
|
|
|
|
// the result of va_next.
|
|
|
|
AFI->setVarArgsRegSaveSize(VARegSaveSize);
|
2007-02-23 20:32:57 +00:00
|
|
|
VarArgsFrameIndex = MFI->CreateFixedObject(VARegSaveSize, ArgOffset +
|
2009-11-12 20:49:22 +00:00
|
|
|
VARegSaveSize - VARegSize,
|
|
|
|
true, false);
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue FIN = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy());
|
2007-01-19 07:51:42 +00:00
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SmallVector<SDValue, 4> MemOps;
|
2007-01-19 07:51:42 +00:00
|
|
|
for (; NumGPRs < 4; ++NumGPRs) {
|
2009-04-17 19:07:39 +00:00
|
|
|
TargetRegisterClass *RC;
|
2009-07-08 23:10:31 +00:00
|
|
|
if (AFI->isThumb1OnlyFunction())
|
2009-04-17 19:07:39 +00:00
|
|
|
RC = ARM::tGPRRegisterClass;
|
2009-04-07 20:34:09 +00:00
|
|
|
else
|
2009-04-17 19:07:39 +00:00
|
|
|
RC = ARM::GPRRegisterClass;
|
|
|
|
|
2009-04-20 18:36:57 +00:00
|
|
|
unsigned VReg = MF.addLiveIn(GPRArgRegs[NumGPRs], RC);
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
|
2009-10-31 03:39:36 +00:00
|
|
|
SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
|
|
|
|
PseudoSourceValue::getFixedStack(VarArgsFrameIndex), 0);
|
2007-01-19 07:51:42 +00:00
|
|
|
MemOps.push_back(Store);
|
2009-02-04 20:06:27 +00:00
|
|
|
FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), FIN,
|
2007-01-19 07:51:42 +00:00
|
|
|
DAG.getConstant(4, getPointerTy()));
|
|
|
|
}
|
|
|
|
if (!MemOps.empty())
|
2009-08-11 20:47:22 +00:00
|
|
|
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
&MemOps[0], MemOps.size());
|
2007-01-19 07:51:42 +00:00
|
|
|
} else
|
|
|
|
// This will point to the next argument passed via stack.
|
2009-11-12 20:49:22 +00:00
|
|
|
VarArgsFrameIndex = MFI->CreateFixedObject(4, ArgOffset, true, false);
|
2007-01-19 07:51:42 +00:00
|
|
|
}
|
|
|
|
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
return Chain;
|
2007-01-19 07:51:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// isFloatingPointZero - Return true if this is +0.0.
|
2008-07-27 21:46:04 +00:00
|
|
|
static bool isFloatingPointZero(SDValue Op) {
|
2007-01-19 07:51:42 +00:00
|
|
|
if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
|
2007-08-31 04:03:46 +00:00
|
|
|
return CFP->getValueAPF().isPosZero();
|
2008-08-28 21:40:38 +00:00
|
|
|
else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) {
|
2007-01-19 07:51:42 +00:00
|
|
|
// Maybe this has already been legalized into the constant pool?
|
|
|
|
if (Op.getOperand(1).getOpcode() == ARMISD::Wrapper) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue WrapperOp = Op.getOperand(1).getOperand(0);
|
2007-01-19 07:51:42 +00:00
|
|
|
if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(WrapperOp))
|
|
|
|
if (ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
|
2007-08-31 04:03:46 +00:00
|
|
|
return CFP->getValueAPF().isPosZero();
|
2007-01-19 07:51:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns appropriate ARM CMP (cmp) and corresponding condition code for
|
|
|
|
/// the given operands.
|
2009-11-12 07:13:11 +00:00
|
|
|
SDValue
|
|
|
|
ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
|
|
|
|
SDValue &ARMCC, SelectionDAG &DAG, DebugLoc dl) {
|
2008-08-28 21:40:38 +00:00
|
|
|
if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) {
|
2008-09-12 16:56:44 +00:00
|
|
|
unsigned C = RHSC->getZExtValue();
|
2009-11-12 07:13:11 +00:00
|
|
|
if (!isLegalICmpImmediate(C)) {
|
2007-01-19 07:51:42 +00:00
|
|
|
// Constant does not fit, try adjusting it by one?
|
|
|
|
switch (CC) {
|
|
|
|
default: break;
|
|
|
|
case ISD::SETLT:
|
|
|
|
case ISD::SETGE:
|
2009-11-12 07:13:11 +00:00
|
|
|
if (isLegalICmpImmediate(C-1)) {
|
2007-02-02 01:53:26 +00:00
|
|
|
CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT;
|
2009-08-11 20:47:22 +00:00
|
|
|
RHS = DAG.getConstant(C-1, MVT::i32);
|
2007-02-02 01:53:26 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case ISD::SETULT:
|
|
|
|
case ISD::SETUGE:
|
2009-11-12 07:13:11 +00:00
|
|
|
if (C > 0 && isLegalICmpImmediate(C-1)) {
|
2007-02-02 01:53:26 +00:00
|
|
|
CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT;
|
2009-08-11 20:47:22 +00:00
|
|
|
RHS = DAG.getConstant(C-1, MVT::i32);
|
2007-01-19 07:51:42 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case ISD::SETLE:
|
|
|
|
case ISD::SETGT:
|
2009-11-12 07:13:11 +00:00
|
|
|
if (isLegalICmpImmediate(C+1)) {
|
2007-02-02 01:53:26 +00:00
|
|
|
CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE;
|
2009-08-11 20:47:22 +00:00
|
|
|
RHS = DAG.getConstant(C+1, MVT::i32);
|
2007-02-02 01:53:26 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case ISD::SETULE:
|
|
|
|
case ISD::SETUGT:
|
2009-11-12 07:13:11 +00:00
|
|
|
if (C < 0xffffffff && isLegalICmpImmediate(C+1)) {
|
2007-02-02 01:53:26 +00:00
|
|
|
CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE;
|
2009-08-11 20:47:22 +00:00
|
|
|
RHS = DAG.getConstant(C+1, MVT::i32);
|
2007-01-19 07:51:42 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ARMCC::CondCodes CondCode = IntCCToARMCC(CC);
|
2007-04-02 01:30:03 +00:00
|
|
|
ARMISD::NodeType CompareType;
|
|
|
|
switch (CondCode) {
|
|
|
|
default:
|
|
|
|
CompareType = ARMISD::CMP;
|
|
|
|
break;
|
|
|
|
case ARMCC::EQ:
|
|
|
|
case ARMCC::NE:
|
2009-06-29 15:33:01 +00:00
|
|
|
// Uses only Z Flag
|
|
|
|
CompareType = ARMISD::CMPZ;
|
2007-04-02 01:30:03 +00:00
|
|
|
break;
|
|
|
|
}
|
2009-08-11 20:47:22 +00:00
|
|
|
ARMCC = DAG.getConstant(CondCode, MVT::i32);
|
|
|
|
return DAG.getNode(CompareType, dl, MVT::Flag, LHS, RHS);
|
2007-01-19 07:51:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands.
|
2009-03-20 22:42:55 +00:00
|
|
|
static SDValue getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG,
|
2009-02-06 21:50:26 +00:00
|
|
|
DebugLoc dl) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Cmp;
|
2007-01-19 07:51:42 +00:00
|
|
|
if (!isFloatingPointZero(RHS))
|
2009-08-11 20:47:22 +00:00
|
|
|
Cmp = DAG.getNode(ARMISD::CMPFP, dl, MVT::Flag, LHS, RHS);
|
2007-01-19 07:51:42 +00:00
|
|
|
else
|
2009-08-11 20:47:22 +00:00
|
|
|
Cmp = DAG.getNode(ARMISD::CMPFPw0, dl, MVT::Flag, LHS);
|
|
|
|
return DAG.getNode(ARMISD::FMSTAT, dl, MVT::Flag, Cmp);
|
2007-01-19 07:51:42 +00:00
|
|
|
}
|
|
|
|
|
2009-11-12 07:13:11 +00:00
|
|
|
SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) {
|
2009-08-10 22:56:29 +00:00
|
|
|
EVT VT = Op.getValueType();
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue LHS = Op.getOperand(0);
|
|
|
|
SDValue RHS = Op.getOperand(1);
|
2007-01-19 07:51:42 +00:00
|
|
|
ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue TrueVal = Op.getOperand(2);
|
|
|
|
SDValue FalseVal = Op.getOperand(3);
|
2009-02-06 21:50:26 +00:00
|
|
|
DebugLoc dl = Op.getDebugLoc();
|
2007-01-19 07:51:42 +00:00
|
|
|
|
2009-08-11 20:47:22 +00:00
|
|
|
if (LHS.getValueType() == MVT::i32) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue ARMCC;
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
|
2009-11-12 07:13:11 +00:00
|
|
|
SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMCC, DAG, dl);
|
2009-02-06 21:50:26 +00:00
|
|
|
return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMCC, CCR,Cmp);
|
2007-01-19 07:51:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ARMCC::CondCodes CondCode, CondCode2;
|
2009-09-09 23:14:54 +00:00
|
|
|
FPCCToARMCC(CC, CondCode, CondCode2);
|
2007-01-19 07:51:42 +00:00
|
|
|
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue ARMCC = DAG.getConstant(CondCode, MVT::i32);
|
|
|
|
SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
|
2009-02-06 21:50:26 +00:00
|
|
|
SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl);
|
|
|
|
SDValue Result = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal,
|
2007-07-05 07:18:20 +00:00
|
|
|
ARMCC, CCR, Cmp);
|
2007-01-19 07:51:42 +00:00
|
|
|
if (CondCode2 != ARMCC::AL) {
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue ARMCC2 = DAG.getConstant(CondCode2, MVT::i32);
|
2007-01-19 07:51:42 +00:00
|
|
|
// FIXME: Needs another CMP because flag can have but one use.
|
2009-02-06 21:50:26 +00:00
|
|
|
SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl);
|
2009-03-20 22:42:55 +00:00
|
|
|
Result = DAG.getNode(ARMISD::CMOV, dl, VT,
|
2009-02-06 21:50:26 +00:00
|
|
|
Result, TrueVal, ARMCC2, CCR, Cmp2);
|
2007-01-19 07:51:42 +00:00
|
|
|
}
|
|
|
|
return Result;
|
|
|
|
}
|
|
|
|
|
2009-11-12 07:13:11 +00:00
|
|
|
SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Chain = Op.getOperand(0);
|
2007-01-19 07:51:42 +00:00
|
|
|
ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue LHS = Op.getOperand(2);
|
|
|
|
SDValue RHS = Op.getOperand(3);
|
|
|
|
SDValue Dest = Op.getOperand(4);
|
2009-02-06 21:50:26 +00:00
|
|
|
DebugLoc dl = Op.getDebugLoc();
|
2007-01-19 07:51:42 +00:00
|
|
|
|
2009-08-11 20:47:22 +00:00
|
|
|
if (LHS.getValueType() == MVT::i32) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue ARMCC;
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
|
2009-11-12 07:13:11 +00:00
|
|
|
SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMCC, DAG, dl);
|
2009-08-11 20:47:22 +00:00
|
|
|
return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other,
|
2009-02-06 21:50:26 +00:00
|
|
|
Chain, Dest, ARMCC, CCR,Cmp);
|
2007-01-19 07:51:42 +00:00
|
|
|
}
|
|
|
|
|
2009-08-11 20:47:22 +00:00
|
|
|
assert(LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64);
|
2007-01-19 07:51:42 +00:00
|
|
|
ARMCC::CondCodes CondCode, CondCode2;
|
2009-09-09 23:14:54 +00:00
|
|
|
FPCCToARMCC(CC, CondCode, CondCode2);
|
2009-03-20 22:42:55 +00:00
|
|
|
|
2009-02-06 21:50:26 +00:00
|
|
|
SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl);
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue ARMCC = DAG.getConstant(CondCode, MVT::i32);
|
|
|
|
SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
|
|
|
|
SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Flag);
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Ops[] = { Chain, Dest, ARMCC, CCR, Cmp };
|
2009-02-06 21:50:26 +00:00
|
|
|
SDValue Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5);
|
2007-01-19 07:51:42 +00:00
|
|
|
if (CondCode2 != ARMCC::AL) {
|
2009-08-11 20:47:22 +00:00
|
|
|
ARMCC = DAG.getConstant(CondCode2, MVT::i32);
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Ops[] = { Res, Dest, ARMCC, CCR, Res.getValue(1) };
|
2009-02-06 21:50:26 +00:00
|
|
|
Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5);
|
2007-01-19 07:51:42 +00:00
|
|
|
}
|
|
|
|
return Res;
|
|
|
|
}
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) {
|
|
|
|
SDValue Chain = Op.getOperand(0);
|
|
|
|
SDValue Table = Op.getOperand(1);
|
|
|
|
SDValue Index = Op.getOperand(2);
|
2009-02-04 20:06:27 +00:00
|
|
|
DebugLoc dl = Op.getDebugLoc();
|
2007-01-19 07:51:42 +00:00
|
|
|
|
2009-08-10 22:56:29 +00:00
|
|
|
EVT PTy = getPointerTy();
|
2007-01-19 07:51:42 +00:00
|
|
|
JumpTableSDNode *JT = cast<JumpTableSDNode>(Table);
|
|
|
|
ARMFunctionInfo *AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>();
|
2009-07-14 18:44:34 +00:00
|
|
|
SDValue UId = DAG.getConstant(AFI->createJumpTableUId(), PTy);
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PTy);
|
2009-08-11 20:47:22 +00:00
|
|
|
Table = DAG.getNode(ARMISD::WrapperJT, dl, MVT::i32, JTI, UId);
|
2009-07-28 20:53:24 +00:00
|
|
|
Index = DAG.getNode(ISD::MUL, dl, PTy, Index, DAG.getConstant(4, PTy));
|
|
|
|
SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Index, Table);
|
Change Thumb2 jumptable codegen to one that uses two level jumps:
Before:
adr r12, #LJTI3_0_0
ldr pc, [r12, +r0, lsl #2]
LJTI3_0_0:
.long LBB3_24
.long LBB3_30
.long LBB3_31
.long LBB3_32
After:
adr r12, #LJTI3_0_0
add pc, r12, +r0, lsl #2
LJTI3_0_0:
b.w LBB3_24
b.w LBB3_30
b.w LBB3_31
b.w LBB3_32
This has several advantages.
1. This will make it easier to optimize this to a TBB / TBH instruction +
(smaller) table.
2. This eliminate the need for ugly asm printer hack to force the address
into thumb addresses (bit 0 is one).
3. Same codegen for pic and non-pic.
4. This eliminate the need to align the table so constantpool island pass
won't have to over-estimate the size.
Based on my calculation, the later is probably slightly faster as well since
ldr pc with shifter address is very slow. That is, it should be a win as long
as the HW implementation can do a reasonable job of branch predict the second
branch.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@77024 91177308-0d34-0410-b5e6-96231b3b80d8
2009-07-25 00:33:29 +00:00
|
|
|
if (Subtarget->isThumb2()) {
|
|
|
|
// Thumb2 uses a two-level jump. That is, it jumps into the jump table
|
|
|
|
// which does another jump to the destination. This also makes it easier
|
|
|
|
// to translate it to TBB / TBH later.
|
|
|
|
// FIXME: This might not work if the function is extremely large.
|
2009-08-11 20:47:22 +00:00
|
|
|
return DAG.getNode(ARMISD::BR2_JT, dl, MVT::Other, Chain,
|
2009-07-29 02:18:14 +00:00
|
|
|
Addr, Op.getOperand(2), JTI, UId);
|
Change Thumb2 jumptable codegen to one that uses two level jumps:
Before:
adr r12, #LJTI3_0_0
ldr pc, [r12, +r0, lsl #2]
LJTI3_0_0:
.long LBB3_24
.long LBB3_30
.long LBB3_31
.long LBB3_32
After:
adr r12, #LJTI3_0_0
add pc, r12, +r0, lsl #2
LJTI3_0_0:
b.w LBB3_24
b.w LBB3_30
b.w LBB3_31
b.w LBB3_32
This has several advantages.
1. This will make it easier to optimize this to a TBB / TBH instruction +
(smaller) table.
2. This eliminate the need for ugly asm printer hack to force the address
into thumb addresses (bit 0 is one).
3. Same codegen for pic and non-pic.
4. This eliminate the need to align the table so constantpool island pass
won't have to over-estimate the size.
Based on my calculation, the later is probably slightly faster as well since
ldr pc with shifter address is very slow. That is, it should be a win as long
as the HW implementation can do a reasonable job of branch predict the second
branch.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@77024 91177308-0d34-0410-b5e6-96231b3b80d8
2009-07-25 00:33:29 +00:00
|
|
|
}
|
|
|
|
if (getTargetMachine().getRelocationModel() == Reloc::PIC_) {
|
2009-10-31 03:39:36 +00:00
|
|
|
Addr = DAG.getLoad((EVT)MVT::i32, dl, Chain, Addr,
|
|
|
|
PseudoSourceValue::getJumpTable(), 0);
|
Change Thumb2 jumptable codegen to one that uses two level jumps:
Before:
adr r12, #LJTI3_0_0
ldr pc, [r12, +r0, lsl #2]
LJTI3_0_0:
.long LBB3_24
.long LBB3_30
.long LBB3_31
.long LBB3_32
After:
adr r12, #LJTI3_0_0
add pc, r12, +r0, lsl #2
LJTI3_0_0:
b.w LBB3_24
b.w LBB3_30
b.w LBB3_31
b.w LBB3_32
This has several advantages.
1. This will make it easier to optimize this to a TBB / TBH instruction +
(smaller) table.
2. This eliminate the need for ugly asm printer hack to force the address
into thumb addresses (bit 0 is one).
3. Same codegen for pic and non-pic.
4. This eliminate the need to align the table so constantpool island pass
won't have to over-estimate the size.
Based on my calculation, the later is probably slightly faster as well since
ldr pc with shifter address is very slow. That is, it should be a win as long
as the HW implementation can do a reasonable job of branch predict the second
branch.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@77024 91177308-0d34-0410-b5e6-96231b3b80d8
2009-07-25 00:33:29 +00:00
|
|
|
Chain = Addr.getValue(1);
|
2009-02-04 20:06:27 +00:00
|
|
|
Addr = DAG.getNode(ISD::ADD, dl, PTy, Addr, Table);
|
2009-08-11 20:47:22 +00:00
|
|
|
return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId);
|
Change Thumb2 jumptable codegen to one that uses two level jumps:
Before:
adr r12, #LJTI3_0_0
ldr pc, [r12, +r0, lsl #2]
LJTI3_0_0:
.long LBB3_24
.long LBB3_30
.long LBB3_31
.long LBB3_32
After:
adr r12, #LJTI3_0_0
add pc, r12, +r0, lsl #2
LJTI3_0_0:
b.w LBB3_24
b.w LBB3_30
b.w LBB3_31
b.w LBB3_32
This has several advantages.
1. This will make it easier to optimize this to a TBB / TBH instruction +
(smaller) table.
2. This eliminate the need for ugly asm printer hack to force the address
into thumb addresses (bit 0 is one).
3. Same codegen for pic and non-pic.
4. This eliminate the need to align the table so constantpool island pass
won't have to over-estimate the size.
Based on my calculation, the later is probably slightly faster as well since
ldr pc with shifter address is very slow. That is, it should be a win as long
as the HW implementation can do a reasonable job of branch predict the second
branch.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@77024 91177308-0d34-0410-b5e6-96231b3b80d8
2009-07-25 00:33:29 +00:00
|
|
|
} else {
|
2009-10-31 03:39:36 +00:00
|
|
|
Addr = DAG.getLoad(PTy, dl, Chain, Addr,
|
|
|
|
PseudoSourceValue::getJumpTable(), 0);
|
Change Thumb2 jumptable codegen to one that uses two level jumps:
Before:
adr r12, #LJTI3_0_0
ldr pc, [r12, +r0, lsl #2]
LJTI3_0_0:
.long LBB3_24
.long LBB3_30
.long LBB3_31
.long LBB3_32
After:
adr r12, #LJTI3_0_0
add pc, r12, +r0, lsl #2
LJTI3_0_0:
b.w LBB3_24
b.w LBB3_30
b.w LBB3_31
b.w LBB3_32
This has several advantages.
1. This will make it easier to optimize this to a TBB / TBH instruction +
(smaller) table.
2. This eliminate the need for ugly asm printer hack to force the address
into thumb addresses (bit 0 is one).
3. Same codegen for pic and non-pic.
4. This eliminate the need to align the table so constantpool island pass
won't have to over-estimate the size.
Based on my calculation, the later is probably slightly faster as well since
ldr pc with shifter address is very slow. That is, it should be a win as long
as the HW implementation can do a reasonable job of branch predict the second
branch.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@77024 91177308-0d34-0410-b5e6-96231b3b80d8
2009-07-25 00:33:29 +00:00
|
|
|
Chain = Addr.getValue(1);
|
2009-08-11 20:47:22 +00:00
|
|
|
return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId);
|
Change Thumb2 jumptable codegen to one that uses two level jumps:
Before:
adr r12, #LJTI3_0_0
ldr pc, [r12, +r0, lsl #2]
LJTI3_0_0:
.long LBB3_24
.long LBB3_30
.long LBB3_31
.long LBB3_32
After:
adr r12, #LJTI3_0_0
add pc, r12, +r0, lsl #2
LJTI3_0_0:
b.w LBB3_24
b.w LBB3_30
b.w LBB3_31
b.w LBB3_32
This has several advantages.
1. This will make it easier to optimize this to a TBB / TBH instruction +
(smaller) table.
2. This eliminate the need for ugly asm printer hack to force the address
into thumb addresses (bit 0 is one).
3. Same codegen for pic and non-pic.
4. This eliminate the need to align the table so constantpool island pass
won't have to over-estimate the size.
Based on my calculation, the later is probably slightly faster as well since
ldr pc with shifter address is very slow. That is, it should be a win as long
as the HW implementation can do a reasonable job of branch predict the second
branch.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@77024 91177308-0d34-0410-b5e6-96231b3b80d8
2009-07-25 00:33:29 +00:00
|
|
|
}
|
2007-01-19 07:51:42 +00:00
|
|
|
}
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
static SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) {
|
2009-02-06 21:50:26 +00:00
|
|
|
DebugLoc dl = Op.getDebugLoc();
|
2007-01-19 07:51:42 +00:00
|
|
|
unsigned Opc =
|
|
|
|
Op.getOpcode() == ISD::FP_TO_SINT ? ARMISD::FTOSI : ARMISD::FTOUI;
|
2009-08-11 20:47:22 +00:00
|
|
|
Op = DAG.getNode(Opc, dl, MVT::f32, Op.getOperand(0));
|
|
|
|
return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
|
2007-01-19 07:51:42 +00:00
|
|
|
}
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
static SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
|
2009-08-10 22:56:29 +00:00
|
|
|
EVT VT = Op.getValueType();
|
2009-02-06 21:50:26 +00:00
|
|
|
DebugLoc dl = Op.getDebugLoc();
|
2007-01-19 07:51:42 +00:00
|
|
|
unsigned Opc =
|
|
|
|
Op.getOpcode() == ISD::SINT_TO_FP ? ARMISD::SITOF : ARMISD::UITOF;
|
|
|
|
|
2009-08-11 20:47:22 +00:00
|
|
|
Op = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, Op.getOperand(0));
|
2009-02-06 21:50:26 +00:00
|
|
|
return DAG.getNode(Opc, dl, VT, Op);
|
2007-01-19 07:51:42 +00:00
|
|
|
}
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {
|
2007-01-19 07:51:42 +00:00
|
|
|
// Implement fcopysign with a fabs and a conditional fneg.
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Tmp0 = Op.getOperand(0);
|
|
|
|
SDValue Tmp1 = Op.getOperand(1);
|
2009-02-06 21:50:26 +00:00
|
|
|
DebugLoc dl = Op.getDebugLoc();
|
2009-08-10 22:56:29 +00:00
|
|
|
EVT VT = Op.getValueType();
|
|
|
|
EVT SrcVT = Tmp1.getValueType();
|
2009-02-06 21:50:26 +00:00
|
|
|
SDValue AbsVal = DAG.getNode(ISD::FABS, dl, VT, Tmp0);
|
|
|
|
SDValue Cmp = getVFPCmp(Tmp1, DAG.getConstantFP(0.0, SrcVT), DAG, dl);
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue ARMCC = DAG.getConstant(ARMCC::LT, MVT::i32);
|
|
|
|
SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
|
2009-02-06 21:50:26 +00:00
|
|
|
return DAG.getNode(ARMISD::CNEG, dl, VT, AbsVal, AbsVal, ARMCC, CCR, Cmp);
|
2007-01-19 07:51:42 +00:00
|
|
|
}
|
|
|
|
|
2009-05-12 23:59:14 +00:00
|
|
|
SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) {
|
|
|
|
MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
|
|
|
|
MFI->setFrameAddressIsTaken(true);
|
2009-08-10 22:56:29 +00:00
|
|
|
EVT VT = Op.getValueType();
|
2009-05-12 23:59:14 +00:00
|
|
|
DebugLoc dl = Op.getDebugLoc(); // FIXME probably not meaningful
|
|
|
|
unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
|
2009-06-18 23:14:30 +00:00
|
|
|
unsigned FrameReg = (Subtarget->isThumb() || Subtarget->isTargetDarwin())
|
2009-05-12 23:59:14 +00:00
|
|
|
? ARM::R7 : ARM::R11;
|
|
|
|
SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
|
|
|
|
while (Depth--)
|
|
|
|
FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, NULL, 0);
|
|
|
|
return FrameAddr;
|
|
|
|
}
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue
|
2009-02-03 22:26:09 +00:00
|
|
|
ARMTargetLowering::EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl,
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Chain,
|
|
|
|
SDValue Dst, SDValue Src,
|
|
|
|
SDValue Size, unsigned Align,
|
Drop ISD::MEMSET, ISD::MEMMOVE, and ISD::MEMCPY, which are not Legal
on any current target and aren't optimized in DAGCombiner. Instead
of using intermediate nodes, expand the operations, choosing between
simple loads/stores, target-specific code, and library calls,
immediately.
Previously, the code to emit optimized code for these operations
was only used at initial SelectionDAG construction time; now it is
used at all times. This fixes some cases where rep;movs was being
used for small copies where simple loads/stores would be better.
This also cleans up code that checks for alignments less than 4;
let the targets make that decision instead of doing it in
target-independent code. This allows x86 to use rep;movs in
low-alignment cases.
Also, this fixes a bug that resulted in the use of rep;stos for
memsets of 0 with non-constant memory size when the alignment was
at least 4. It's better to use the library in this case, which
can be significantly faster when the size is large.
This also preserves more SourceValue information when memory
intrinsics are lowered into simple loads/stores.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@49572 91177308-0d34-0410-b5e6-96231b3b80d8
2008-04-12 04:36:06 +00:00
|
|
|
bool AlwaysInline,
|
2008-04-28 17:15:20 +00:00
|
|
|
const Value *DstSV, uint64_t DstSVOff,
|
|
|
|
const Value *SrcSV, uint64_t SrcSVOff){
|
2007-10-22 22:11:27 +00:00
|
|
|
// Do repeated 4-byte loads and stores. To be improved.
|
Drop ISD::MEMSET, ISD::MEMMOVE, and ISD::MEMCPY, which are not Legal
on any current target and aren't optimized in DAGCombiner. Instead
of using intermediate nodes, expand the operations, choosing between
simple loads/stores, target-specific code, and library calls,
immediately.
Previously, the code to emit optimized code for these operations
was only used at initial SelectionDAG construction time; now it is
used at all times. This fixes some cases where rep;movs was being
used for small copies where simple loads/stores would be better.
This also cleans up code that checks for alignments less than 4;
let the targets make that decision instead of doing it in
target-independent code. This allows x86 to use rep;movs in
low-alignment cases.
Also, this fixes a bug that resulted in the use of rep;stos for
memsets of 0 with non-constant memory size when the alignment was
at least 4. It's better to use the library in this case, which
can be significantly faster when the size is large.
This also preserves more SourceValue information when memory
intrinsics are lowered into simple loads/stores.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@49572 91177308-0d34-0410-b5e6-96231b3b80d8
2008-04-12 04:36:06 +00:00
|
|
|
// This requires 4-byte alignment.
|
|
|
|
if ((Align & 3) != 0)
|
2008-07-27 21:46:04 +00:00
|
|
|
return SDValue();
|
Drop ISD::MEMSET, ISD::MEMMOVE, and ISD::MEMCPY, which are not Legal
on any current target and aren't optimized in DAGCombiner. Instead
of using intermediate nodes, expand the operations, choosing between
simple loads/stores, target-specific code, and library calls,
immediately.
Previously, the code to emit optimized code for these operations
was only used at initial SelectionDAG construction time; now it is
used at all times. This fixes some cases where rep;movs was being
used for small copies where simple loads/stores would be better.
This also cleans up code that checks for alignments less than 4;
let the targets make that decision instead of doing it in
target-independent code. This allows x86 to use rep;movs in
low-alignment cases.
Also, this fixes a bug that resulted in the use of rep;stos for
memsets of 0 with non-constant memory size when the alignment was
at least 4. It's better to use the library in this case, which
can be significantly faster when the size is large.
This also preserves more SourceValue information when memory
intrinsics are lowered into simple loads/stores.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@49572 91177308-0d34-0410-b5e6-96231b3b80d8
2008-04-12 04:36:06 +00:00
|
|
|
// This requires the copy size to be a constant, preferrably
|
|
|
|
// within a subtarget-specific limit.
|
|
|
|
ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
|
|
|
|
if (!ConstantSize)
|
2008-07-27 21:46:04 +00:00
|
|
|
return SDValue();
|
2008-09-12 16:56:44 +00:00
|
|
|
uint64_t SizeVal = ConstantSize->getZExtValue();
|
Drop ISD::MEMSET, ISD::MEMMOVE, and ISD::MEMCPY, which are not Legal
on any current target and aren't optimized in DAGCombiner. Instead
of using intermediate nodes, expand the operations, choosing between
simple loads/stores, target-specific code, and library calls,
immediately.
Previously, the code to emit optimized code for these operations
was only used at initial SelectionDAG construction time; now it is
used at all times. This fixes some cases where rep;movs was being
used for small copies where simple loads/stores would be better.
This also cleans up code that checks for alignments less than 4;
let the targets make that decision instead of doing it in
target-independent code. This allows x86 to use rep;movs in
low-alignment cases.
Also, this fixes a bug that resulted in the use of rep;stos for
memsets of 0 with non-constant memory size when the alignment was
at least 4. It's better to use the library in this case, which
can be significantly faster when the size is large.
This also preserves more SourceValue information when memory
intrinsics are lowered into simple loads/stores.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@49572 91177308-0d34-0410-b5e6-96231b3b80d8
2008-04-12 04:36:06 +00:00
|
|
|
if (!AlwaysInline && SizeVal > getSubtarget()->getMaxInlineSizeThreshold())
|
2008-07-27 21:46:04 +00:00
|
|
|
return SDValue();
|
Drop ISD::MEMSET, ISD::MEMMOVE, and ISD::MEMCPY, which are not Legal
on any current target and aren't optimized in DAGCombiner. Instead
of using intermediate nodes, expand the operations, choosing between
simple loads/stores, target-specific code, and library calls,
immediately.
Previously, the code to emit optimized code for these operations
was only used at initial SelectionDAG construction time; now it is
used at all times. This fixes some cases where rep;movs was being
used for small copies where simple loads/stores would be better.
This also cleans up code that checks for alignments less than 4;
let the targets make that decision instead of doing it in
target-independent code. This allows x86 to use rep;movs in
low-alignment cases.
Also, this fixes a bug that resulted in the use of rep;stos for
memsets of 0 with non-constant memory size when the alignment was
at least 4. It's better to use the library in this case, which
can be significantly faster when the size is large.
This also preserves more SourceValue information when memory
intrinsics are lowered into simple loads/stores.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@49572 91177308-0d34-0410-b5e6-96231b3b80d8
2008-04-12 04:36:06 +00:00
|
|
|
|
|
|
|
unsigned BytesLeft = SizeVal & 3;
|
|
|
|
unsigned NumMemOps = SizeVal >> 2;
|
2007-05-17 21:31:21 +00:00
|
|
|
unsigned EmittedNumMemOps = 0;
|
2009-08-11 20:47:22 +00:00
|
|
|
EVT VT = MVT::i32;
|
2007-05-17 21:31:21 +00:00
|
|
|
unsigned VTSize = 4;
|
2007-10-22 22:11:27 +00:00
|
|
|
unsigned i = 0;
|
2007-05-18 01:19:57 +00:00
|
|
|
const unsigned MAX_LOADS_IN_LDM = 6;
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue TFOps[MAX_LOADS_IN_LDM];
|
|
|
|
SDValue Loads[MAX_LOADS_IN_LDM];
|
2008-04-28 17:15:20 +00:00
|
|
|
uint64_t SrcOff = 0, DstOff = 0;
|
2007-05-17 21:31:21 +00:00
|
|
|
|
2007-10-22 22:11:27 +00:00
|
|
|
// Emit up to MAX_LOADS_IN_LDM loads, then a TokenFactor barrier, then the
|
|
|
|
// same number of stores. The loads and stores will get combined into
|
2007-05-17 21:31:21 +00:00
|
|
|
// ldm/stm later on.
|
2007-10-22 22:11:27 +00:00
|
|
|
while (EmittedNumMemOps < NumMemOps) {
|
|
|
|
for (i = 0;
|
|
|
|
i < MAX_LOADS_IN_LDM && EmittedNumMemOps + i < NumMemOps; ++i) {
|
2009-02-03 22:26:09 +00:00
|
|
|
Loads[i] = DAG.getLoad(VT, dl, Chain,
|
2009-08-11 20:47:22 +00:00
|
|
|
DAG.getNode(ISD::ADD, dl, MVT::i32, Src,
|
|
|
|
DAG.getConstant(SrcOff, MVT::i32)),
|
2008-04-28 17:15:20 +00:00
|
|
|
SrcSV, SrcSVOff + SrcOff);
|
2007-10-22 22:11:27 +00:00
|
|
|
TFOps[i] = Loads[i].getValue(1);
|
2007-05-17 21:31:21 +00:00
|
|
|
SrcOff += VTSize;
|
|
|
|
}
|
2009-08-11 20:47:22 +00:00
|
|
|
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &TFOps[0], i);
|
2007-05-17 21:31:21 +00:00
|
|
|
|
2007-10-22 22:11:27 +00:00
|
|
|
for (i = 0;
|
|
|
|
i < MAX_LOADS_IN_LDM && EmittedNumMemOps + i < NumMemOps; ++i) {
|
2009-02-03 22:26:09 +00:00
|
|
|
TFOps[i] = DAG.getStore(Chain, dl, Loads[i],
|
2009-08-11 20:47:22 +00:00
|
|
|
DAG.getNode(ISD::ADD, dl, MVT::i32, Dst,
|
|
|
|
DAG.getConstant(DstOff, MVT::i32)),
|
2008-04-28 17:15:20 +00:00
|
|
|
DstSV, DstSVOff + DstOff);
|
2007-05-17 21:31:21 +00:00
|
|
|
DstOff += VTSize;
|
|
|
|
}
|
2009-08-11 20:47:22 +00:00
|
|
|
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &TFOps[0], i);
|
2007-10-22 22:11:27 +00:00
|
|
|
|
2007-05-17 21:31:21 +00:00
|
|
|
EmittedNumMemOps += i;
|
|
|
|
}
|
|
|
|
|
2009-03-20 22:42:55 +00:00
|
|
|
if (BytesLeft == 0)
|
2007-10-22 22:11:27 +00:00
|
|
|
return Chain;
|
|
|
|
|
|
|
|
// Issue loads / stores for the trailing (1 - 3) bytes.
|
|
|
|
unsigned BytesLeftSave = BytesLeft;
|
|
|
|
i = 0;
|
|
|
|
while (BytesLeft) {
|
|
|
|
if (BytesLeft >= 2) {
|
2009-08-11 20:47:22 +00:00
|
|
|
VT = MVT::i16;
|
2007-10-22 22:11:27 +00:00
|
|
|
VTSize = 2;
|
|
|
|
} else {
|
2009-08-11 20:47:22 +00:00
|
|
|
VT = MVT::i8;
|
2007-10-22 22:11:27 +00:00
|
|
|
VTSize = 1;
|
|
|
|
}
|
|
|
|
|
2009-02-03 22:26:09 +00:00
|
|
|
Loads[i] = DAG.getLoad(VT, dl, Chain,
|
2009-08-11 20:47:22 +00:00
|
|
|
DAG.getNode(ISD::ADD, dl, MVT::i32, Src,
|
|
|
|
DAG.getConstant(SrcOff, MVT::i32)),
|
2008-04-28 17:15:20 +00:00
|
|
|
SrcSV, SrcSVOff + SrcOff);
|
2007-10-22 22:11:27 +00:00
|
|
|
TFOps[i] = Loads[i].getValue(1);
|
|
|
|
++i;
|
|
|
|
SrcOff += VTSize;
|
|
|
|
BytesLeft -= VTSize;
|
|
|
|
}
|
2009-08-11 20:47:22 +00:00
|
|
|
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &TFOps[0], i);
|
2007-10-22 22:11:27 +00:00
|
|
|
|
|
|
|
i = 0;
|
|
|
|
BytesLeft = BytesLeftSave;
|
|
|
|
while (BytesLeft) {
|
|
|
|
if (BytesLeft >= 2) {
|
2009-08-11 20:47:22 +00:00
|
|
|
VT = MVT::i16;
|
2007-10-22 22:11:27 +00:00
|
|
|
VTSize = 2;
|
|
|
|
} else {
|
2009-08-11 20:47:22 +00:00
|
|
|
VT = MVT::i8;
|
2007-10-22 22:11:27 +00:00
|
|
|
VTSize = 1;
|
|
|
|
}
|
|
|
|
|
2009-02-03 22:26:09 +00:00
|
|
|
TFOps[i] = DAG.getStore(Chain, dl, Loads[i],
|
2009-08-11 20:47:22 +00:00
|
|
|
DAG.getNode(ISD::ADD, dl, MVT::i32, Dst,
|
|
|
|
DAG.getConstant(DstOff, MVT::i32)),
|
2008-04-28 17:15:20 +00:00
|
|
|
DstSV, DstSVOff + DstOff);
|
2007-10-22 22:11:27 +00:00
|
|
|
++i;
|
|
|
|
DstOff += VTSize;
|
|
|
|
BytesLeft -= VTSize;
|
|
|
|
}
|
2009-08-11 20:47:22 +00:00
|
|
|
return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &TFOps[0], i);
|
2007-05-17 21:31:21 +00:00
|
|
|
}
|
|
|
|
|
2008-12-01 11:39:25 +00:00
|
|
|
static SDValue ExpandBIT_CONVERT(SDNode *N, SelectionDAG &DAG) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Op = N->getOperand(0);
|
2009-02-06 21:50:26 +00:00
|
|
|
DebugLoc dl = N->getDebugLoc();
|
2009-08-11 20:47:22 +00:00
|
|
|
if (N->getValueType(0) == MVT::f64) {
|
2009-11-09 00:11:35 +00:00
|
|
|
// Turn i64->f64 into VMOVDRR.
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op,
|
|
|
|
DAG.getConstant(0, MVT::i32));
|
|
|
|
SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op,
|
|
|
|
DAG.getConstant(1, MVT::i32));
|
2009-11-09 00:11:35 +00:00
|
|
|
return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
|
2008-11-04 19:57:48 +00:00
|
|
|
}
|
2009-03-20 22:42:55 +00:00
|
|
|
|
2009-11-09 00:11:35 +00:00
|
|
|
// Turn f64->i64 into VMOVRRD.
|
|
|
|
SDValue Cvt = DAG.getNode(ARMISD::VMOVRRD, dl,
|
2009-08-11 20:47:22 +00:00
|
|
|
DAG.getVTList(MVT::i32, MVT::i32), &Op, 1);
|
2009-03-20 22:42:55 +00:00
|
|
|
|
2007-11-24 07:07:01 +00:00
|
|
|
// Merge the pieces into a single i64 value.
|
2009-08-11 20:47:22 +00:00
|
|
|
return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Cvt, Cvt.getValue(1));
|
2007-11-24 07:07:01 +00:00
|
|
|
}
|
|
|
|
|
2009-06-22 23:27:02 +00:00
|
|
|
/// getZeroVector - Returns a vector of specified type with all zero elements.
|
|
|
|
///
|
2009-08-10 22:56:29 +00:00
|
|
|
static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, DebugLoc dl) {
|
2009-06-22 23:27:02 +00:00
|
|
|
assert(VT.isVector() && "Expected a vector type");
|
|
|
|
|
|
|
|
// Zero vectors are used to represent vector negation and in those cases
|
|
|
|
// will be implemented with the NEON VNEG instruction. However, VNEG does
|
|
|
|
// not support i64 elements, so sometimes the zero vectors will need to be
|
|
|
|
// explicitly constructed. For those cases, and potentially other uses in
|
2009-09-08 22:51:43 +00:00
|
|
|
// the future, always build zero vectors as <16 x i8> or <8 x i8> bitcasted
|
2009-06-22 23:27:02 +00:00
|
|
|
// to their dest type. This ensures they get CSE'd.
|
|
|
|
SDValue Vec;
|
2009-09-08 22:51:43 +00:00
|
|
|
SDValue Cst = DAG.getTargetConstant(0, MVT::i8);
|
|
|
|
SmallVector<SDValue, 8> Ops;
|
|
|
|
MVT TVT;
|
|
|
|
|
|
|
|
if (VT.getSizeInBits() == 64) {
|
|
|
|
Ops.assign(8, Cst); TVT = MVT::v8i8;
|
|
|
|
} else {
|
|
|
|
Ops.assign(16, Cst); TVT = MVT::v16i8;
|
|
|
|
}
|
|
|
|
Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, TVT, &Ops[0], Ops.size());
|
2009-06-22 23:27:02 +00:00
|
|
|
|
|
|
|
return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vec);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// getOnesVector - Returns a vector of specified type with all bits set.
|
|
|
|
///
|
2009-08-10 22:56:29 +00:00
|
|
|
static SDValue getOnesVector(EVT VT, SelectionDAG &DAG, DebugLoc dl) {
|
2009-06-22 23:27:02 +00:00
|
|
|
assert(VT.isVector() && "Expected a vector type");
|
|
|
|
|
2009-10-30 20:13:25 +00:00
|
|
|
// Always build ones vectors as <16 x i8> or <8 x i8> bitcasted to their
|
2009-09-08 22:51:43 +00:00
|
|
|
// dest type. This ensures they get CSE'd.
|
2009-06-22 23:27:02 +00:00
|
|
|
SDValue Vec;
|
2009-09-08 22:51:43 +00:00
|
|
|
SDValue Cst = DAG.getTargetConstant(0xFF, MVT::i8);
|
|
|
|
SmallVector<SDValue, 8> Ops;
|
|
|
|
MVT TVT;
|
|
|
|
|
|
|
|
if (VT.getSizeInBits() == 64) {
|
|
|
|
Ops.assign(8, Cst); TVT = MVT::v8i8;
|
|
|
|
} else {
|
|
|
|
Ops.assign(16, Cst); TVT = MVT::v16i8;
|
|
|
|
}
|
|
|
|
Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, TVT, &Ops[0], Ops.size());
|
2009-06-22 23:27:02 +00:00
|
|
|
|
|
|
|
return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vec);
|
|
|
|
}
|
|
|
|
|
2009-10-31 21:00:56 +00:00
|
|
|
/// LowerShiftRightParts - Lower SRA_PARTS, which returns two
|
|
|
|
/// i32 values and take a 2 x i32 value to shift plus a shift amount.
|
2009-11-12 07:13:11 +00:00
|
|
|
SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) {
|
2009-10-31 21:00:56 +00:00
|
|
|
assert(Op.getNumOperands() == 3 && "Not a double-shift!");
|
|
|
|
EVT VT = Op.getValueType();
|
|
|
|
unsigned VTBits = VT.getSizeInBits();
|
|
|
|
DebugLoc dl = Op.getDebugLoc();
|
|
|
|
SDValue ShOpLo = Op.getOperand(0);
|
|
|
|
SDValue ShOpHi = Op.getOperand(1);
|
|
|
|
SDValue ShAmt = Op.getOperand(2);
|
|
|
|
SDValue ARMCC;
|
2009-10-31 21:42:19 +00:00
|
|
|
unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL;
|
|
|
|
|
|
|
|
assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS);
|
2009-10-31 21:00:56 +00:00
|
|
|
|
|
|
|
SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
|
|
|
|
DAG.getConstant(VTBits, MVT::i32), ShAmt);
|
|
|
|
SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt);
|
|
|
|
SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
|
|
|
|
DAG.getConstant(VTBits, MVT::i32));
|
|
|
|
SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt);
|
|
|
|
SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
|
2009-10-31 21:42:19 +00:00
|
|
|
SDValue TrueVal = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt);
|
2009-10-31 21:00:56 +00:00
|
|
|
|
|
|
|
SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
|
|
|
|
SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE,
|
2009-11-12 07:13:11 +00:00
|
|
|
ARMCC, DAG, dl);
|
2009-10-31 21:42:19 +00:00
|
|
|
SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
|
2009-10-31 21:00:56 +00:00
|
|
|
SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMCC,
|
|
|
|
CCR, Cmp);
|
|
|
|
|
|
|
|
SDValue Ops[2] = { Lo, Hi };
|
|
|
|
return DAG.getMergeValues(Ops, 2, dl);
|
|
|
|
}
|
|
|
|
|
2009-10-31 19:38:01 +00:00
|
|
|
/// LowerShiftLeftParts - Lower SHL_PARTS, which returns two
|
|
|
|
/// i32 values and take a 2 x i32 value to shift plus a shift amount.
|
2009-11-12 07:13:11 +00:00
|
|
|
SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) {
|
2009-10-31 19:38:01 +00:00
|
|
|
assert(Op.getNumOperands() == 3 && "Not a double-shift!");
|
|
|
|
EVT VT = Op.getValueType();
|
|
|
|
unsigned VTBits = VT.getSizeInBits();
|
|
|
|
DebugLoc dl = Op.getDebugLoc();
|
|
|
|
SDValue ShOpLo = Op.getOperand(0);
|
|
|
|
SDValue ShOpHi = Op.getOperand(1);
|
|
|
|
SDValue ShAmt = Op.getOperand(2);
|
|
|
|
SDValue ARMCC;
|
|
|
|
|
|
|
|
assert(Op.getOpcode() == ISD::SHL_PARTS);
|
|
|
|
SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
|
|
|
|
DAG.getConstant(VTBits, MVT::i32), ShAmt);
|
|
|
|
SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt);
|
|
|
|
SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
|
|
|
|
DAG.getConstant(VTBits, MVT::i32));
|
|
|
|
SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt);
|
|
|
|
SDValue Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt);
|
|
|
|
|
|
|
|
SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
|
|
|
|
SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
|
|
|
|
SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE,
|
2009-11-12 07:13:11 +00:00
|
|
|
ARMCC, DAG, dl);
|
2009-10-31 19:38:01 +00:00
|
|
|
SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
|
|
|
|
SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, Tmp3, ARMCC,
|
|
|
|
CCR, Cmp);
|
|
|
|
|
|
|
|
SDValue Ops[2] = { Lo, Hi };
|
|
|
|
return DAG.getMergeValues(Ops, 2, dl);
|
|
|
|
}
|
|
|
|
|
2009-06-22 23:27:02 +00:00
|
|
|
static SDValue LowerShift(SDNode *N, SelectionDAG &DAG,
|
|
|
|
const ARMSubtarget *ST) {
|
2009-08-10 22:56:29 +00:00
|
|
|
EVT VT = N->getValueType(0);
|
2009-06-22 23:27:02 +00:00
|
|
|
DebugLoc dl = N->getDebugLoc();
|
|
|
|
|
|
|
|
// Lower vector shifts on NEON to use VSHL.
|
|
|
|
if (VT.isVector()) {
|
|
|
|
assert(ST->hasNEON() && "unexpected vector shift");
|
|
|
|
|
|
|
|
// Left shifts translate directly to the vshiftu intrinsic.
|
|
|
|
if (N->getOpcode() == ISD::SHL)
|
|
|
|
return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
|
2009-08-11 20:47:22 +00:00
|
|
|
DAG.getConstant(Intrinsic::arm_neon_vshiftu, MVT::i32),
|
2009-06-22 23:27:02 +00:00
|
|
|
N->getOperand(0), N->getOperand(1));
|
|
|
|
|
|
|
|
assert((N->getOpcode() == ISD::SRA ||
|
|
|
|
N->getOpcode() == ISD::SRL) && "unexpected vector shift opcode");
|
|
|
|
|
|
|
|
// NEON uses the same intrinsics for both left and right shifts. For
|
|
|
|
// right shifts, the shift amounts are negative, so negate the vector of
|
|
|
|
// shift amounts.
|
2009-08-10 22:56:29 +00:00
|
|
|
EVT ShiftVT = N->getOperand(1).getValueType();
|
2009-06-22 23:27:02 +00:00
|
|
|
SDValue NegatedCount = DAG.getNode(ISD::SUB, dl, ShiftVT,
|
|
|
|
getZeroVector(ShiftVT, DAG, dl),
|
|
|
|
N->getOperand(1));
|
|
|
|
Intrinsic::ID vshiftInt = (N->getOpcode() == ISD::SRA ?
|
|
|
|
Intrinsic::arm_neon_vshifts :
|
|
|
|
Intrinsic::arm_neon_vshiftu);
|
|
|
|
return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
|
2009-08-11 20:47:22 +00:00
|
|
|
DAG.getConstant(vshiftInt, MVT::i32),
|
2009-06-22 23:27:02 +00:00
|
|
|
N->getOperand(0), NegatedCount);
|
|
|
|
}
|
|
|
|
|
2009-08-22 03:13:10 +00:00
|
|
|
// We can get here for a node like i32 = ISD::SHL i32, i64
|
|
|
|
if (VT != MVT::i64)
|
|
|
|
return SDValue();
|
|
|
|
|
|
|
|
assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) &&
|
2007-11-24 07:07:01 +00:00
|
|
|
"Unknown shift to lower!");
|
2008-12-01 11:39:25 +00:00
|
|
|
|
2007-11-24 07:07:01 +00:00
|
|
|
// We only lower SRA, SRL of 1 here, all others use generic lowering.
|
|
|
|
if (!isa<ConstantSDNode>(N->getOperand(1)) ||
|
2008-09-12 16:56:44 +00:00
|
|
|
cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 1)
|
2008-12-01 11:39:25 +00:00
|
|
|
return SDValue();
|
2009-03-20 22:42:55 +00:00
|
|
|
|
2007-11-24 07:07:01 +00:00
|
|
|
// If we are in thumb mode, we don't have RRX.
|
2009-07-08 23:10:31 +00:00
|
|
|
if (ST->isThumb1Only()) return SDValue();
|
2009-03-20 22:42:55 +00:00
|
|
|
|
2007-11-24 07:07:01 +00:00
|
|
|
// Okay, we have a 64-bit SRA or SRL of 1. Lower this to an RRX expr.
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0),
|
|
|
|
DAG.getConstant(0, MVT::i32));
|
|
|
|
SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0),
|
|
|
|
DAG.getConstant(1, MVT::i32));
|
2009-03-20 22:42:55 +00:00
|
|
|
|
2007-11-24 07:07:01 +00:00
|
|
|
// First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and
|
|
|
|
// captures the result into a carry flag.
|
|
|
|
unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::SRL_FLAG:ARMISD::SRA_FLAG;
|
2009-08-11 20:47:22 +00:00
|
|
|
Hi = DAG.getNode(Opc, dl, DAG.getVTList(MVT::i32, MVT::Flag), &Hi, 1);
|
2009-03-20 22:42:55 +00:00
|
|
|
|
2007-11-24 07:07:01 +00:00
|
|
|
// The low part is an ARMISD::RRX operand, which shifts the carry in.
|
2009-08-11 20:47:22 +00:00
|
|
|
Lo = DAG.getNode(ARMISD::RRX, dl, MVT::i32, Lo, Hi.getValue(1));
|
2009-03-20 22:42:55 +00:00
|
|
|
|
2007-11-24 07:07:01 +00:00
|
|
|
// Merge the pieces into a single i64 value.
|
2009-08-11 20:47:22 +00:00
|
|
|
return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
|
2007-11-24 07:07:01 +00:00
|
|
|
}
|
|
|
|
|
2009-06-22 23:27:02 +00:00
|
|
|
static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) {
|
|
|
|
SDValue TmpOp0, TmpOp1;
|
|
|
|
bool Invert = false;
|
|
|
|
bool Swap = false;
|
|
|
|
unsigned Opc = 0;
|
|
|
|
|
|
|
|
SDValue Op0 = Op.getOperand(0);
|
|
|
|
SDValue Op1 = Op.getOperand(1);
|
|
|
|
SDValue CC = Op.getOperand(2);
|
2009-08-10 22:56:29 +00:00
|
|
|
EVT VT = Op.getValueType();
|
2009-06-22 23:27:02 +00:00
|
|
|
ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
|
|
|
|
DebugLoc dl = Op.getDebugLoc();
|
|
|
|
|
|
|
|
if (Op.getOperand(1).getValueType().isFloatingPoint()) {
|
|
|
|
switch (SetCCOpcode) {
|
2009-07-14 16:55:14 +00:00
|
|
|
default: llvm_unreachable("Illegal FP comparison"); break;
|
2009-06-22 23:27:02 +00:00
|
|
|
case ISD::SETUNE:
|
|
|
|
case ISD::SETNE: Invert = true; // Fallthrough
|
|
|
|
case ISD::SETOEQ:
|
|
|
|
case ISD::SETEQ: Opc = ARMISD::VCEQ; break;
|
|
|
|
case ISD::SETOLT:
|
|
|
|
case ISD::SETLT: Swap = true; // Fallthrough
|
|
|
|
case ISD::SETOGT:
|
|
|
|
case ISD::SETGT: Opc = ARMISD::VCGT; break;
|
|
|
|
case ISD::SETOLE:
|
|
|
|
case ISD::SETLE: Swap = true; // Fallthrough
|
|
|
|
case ISD::SETOGE:
|
|
|
|
case ISD::SETGE: Opc = ARMISD::VCGE; break;
|
|
|
|
case ISD::SETUGE: Swap = true; // Fallthrough
|
|
|
|
case ISD::SETULE: Invert = true; Opc = ARMISD::VCGT; break;
|
|
|
|
case ISD::SETUGT: Swap = true; // Fallthrough
|
|
|
|
case ISD::SETULT: Invert = true; Opc = ARMISD::VCGE; break;
|
|
|
|
case ISD::SETUEQ: Invert = true; // Fallthrough
|
|
|
|
case ISD::SETONE:
|
|
|
|
// Expand this to (OLT | OGT).
|
|
|
|
TmpOp0 = Op0;
|
|
|
|
TmpOp1 = Op1;
|
|
|
|
Opc = ISD::OR;
|
|
|
|
Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0);
|
|
|
|
Op1 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp0, TmpOp1);
|
|
|
|
break;
|
|
|
|
case ISD::SETUO: Invert = true; // Fallthrough
|
|
|
|
case ISD::SETO:
|
|
|
|
// Expand this to (OLT | OGE).
|
|
|
|
TmpOp0 = Op0;
|
|
|
|
TmpOp1 = Op1;
|
|
|
|
Opc = ISD::OR;
|
|
|
|
Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0);
|
|
|
|
Op1 = DAG.getNode(ARMISD::VCGE, dl, VT, TmpOp0, TmpOp1);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Integer comparisons.
|
|
|
|
switch (SetCCOpcode) {
|
2009-07-14 16:55:14 +00:00
|
|
|
default: llvm_unreachable("Illegal integer comparison"); break;
|
2009-06-22 23:27:02 +00:00
|
|
|
case ISD::SETNE: Invert = true;
|
|
|
|
case ISD::SETEQ: Opc = ARMISD::VCEQ; break;
|
|
|
|
case ISD::SETLT: Swap = true;
|
|
|
|
case ISD::SETGT: Opc = ARMISD::VCGT; break;
|
|
|
|
case ISD::SETLE: Swap = true;
|
|
|
|
case ISD::SETGE: Opc = ARMISD::VCGE; break;
|
|
|
|
case ISD::SETULT: Swap = true;
|
|
|
|
case ISD::SETUGT: Opc = ARMISD::VCGTU; break;
|
|
|
|
case ISD::SETULE: Swap = true;
|
|
|
|
case ISD::SETUGE: Opc = ARMISD::VCGEU; break;
|
|
|
|
}
|
|
|
|
|
2009-07-08 03:04:38 +00:00
|
|
|
// Detect VTST (Vector Test Bits) = icmp ne (and (op0, op1), zero).
|
2009-06-22 23:27:02 +00:00
|
|
|
if (Opc == ARMISD::VCEQ) {
|
|
|
|
|
|
|
|
SDValue AndOp;
|
|
|
|
if (ISD::isBuildVectorAllZeros(Op1.getNode()))
|
|
|
|
AndOp = Op0;
|
|
|
|
else if (ISD::isBuildVectorAllZeros(Op0.getNode()))
|
|
|
|
AndOp = Op1;
|
|
|
|
|
|
|
|
// Ignore bitconvert.
|
|
|
|
if (AndOp.getNode() && AndOp.getOpcode() == ISD::BIT_CONVERT)
|
|
|
|
AndOp = AndOp.getOperand(0);
|
|
|
|
|
|
|
|
if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) {
|
|
|
|
Opc = ARMISD::VTST;
|
|
|
|
Op0 = DAG.getNode(ISD::BIT_CONVERT, dl, VT, AndOp.getOperand(0));
|
|
|
|
Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, VT, AndOp.getOperand(1));
|
|
|
|
Invert = !Invert;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (Swap)
|
|
|
|
std::swap(Op0, Op1);
|
|
|
|
|
|
|
|
SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
|
|
|
|
|
|
|
|
if (Invert)
|
|
|
|
Result = DAG.getNOT(dl, Result, VT);
|
|
|
|
|
|
|
|
return Result;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// isVMOVSplat - Check if the specified splat value corresponds to an immediate
|
|
|
|
/// VMOV instruction, and if so, return the constant being splatted.
|
|
|
|
static SDValue isVMOVSplat(uint64_t SplatBits, uint64_t SplatUndef,
|
|
|
|
unsigned SplatBitSize, SelectionDAG &DAG) {
|
|
|
|
switch (SplatBitSize) {
|
|
|
|
case 8:
|
|
|
|
// Any 1-byte value is OK.
|
|
|
|
assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big");
|
2009-08-11 20:47:22 +00:00
|
|
|
return DAG.getTargetConstant(SplatBits, MVT::i8);
|
2009-06-22 23:27:02 +00:00
|
|
|
|
|
|
|
case 16:
|
|
|
|
// NEON's 16-bit VMOV supports splat values where only one byte is nonzero.
|
|
|
|
if ((SplatBits & ~0xff) == 0 ||
|
|
|
|
(SplatBits & ~0xff00) == 0)
|
2009-08-11 20:47:22 +00:00
|
|
|
return DAG.getTargetConstant(SplatBits, MVT::i16);
|
2009-06-22 23:27:02 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case 32:
|
|
|
|
// NEON's 32-bit VMOV supports splat values where:
|
|
|
|
// * only one byte is nonzero, or
|
|
|
|
// * the least significant byte is 0xff and the second byte is nonzero, or
|
|
|
|
// * the least significant 2 bytes are 0xff and the third is nonzero.
|
|
|
|
if ((SplatBits & ~0xff) == 0 ||
|
|
|
|
(SplatBits & ~0xff00) == 0 ||
|
|
|
|
(SplatBits & ~0xff0000) == 0 ||
|
|
|
|
(SplatBits & ~0xff000000) == 0)
|
2009-08-11 20:47:22 +00:00
|
|
|
return DAG.getTargetConstant(SplatBits, MVT::i32);
|
2009-06-22 23:27:02 +00:00
|
|
|
|
|
|
|
if ((SplatBits & ~0xffff) == 0 &&
|
|
|
|
((SplatBits | SplatUndef) & 0xff) == 0xff)
|
2009-08-11 20:47:22 +00:00
|
|
|
return DAG.getTargetConstant(SplatBits | 0xff, MVT::i32);
|
2009-06-22 23:27:02 +00:00
|
|
|
|
|
|
|
if ((SplatBits & ~0xffffff) == 0 &&
|
|
|
|
((SplatBits | SplatUndef) & 0xffff) == 0xffff)
|
2009-08-11 20:47:22 +00:00
|
|
|
return DAG.getTargetConstant(SplatBits | 0xffff, MVT::i32);
|
2009-06-22 23:27:02 +00:00
|
|
|
|
|
|
|
// Note: there are a few 32-bit splat values (specifically: 00ffff00,
|
|
|
|
// ff000000, ff0000ff, and ffff00ff) that are valid for VMOV.I64 but not
|
|
|
|
// VMOV.I32. A (very) minor optimization would be to replicate the value
|
|
|
|
// and fall through here to test for a valid 64-bit splat. But, then the
|
|
|
|
// caller would also need to check and handle the change in size.
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 64: {
|
|
|
|
// NEON has a 64-bit VMOV splat where each byte is either 0 or 0xff.
|
|
|
|
uint64_t BitMask = 0xff;
|
|
|
|
uint64_t Val = 0;
|
|
|
|
for (int ByteNum = 0; ByteNum < 8; ++ByteNum) {
|
|
|
|
if (((SplatBits | SplatUndef) & BitMask) == BitMask)
|
|
|
|
Val |= BitMask;
|
|
|
|
else if ((SplatBits & BitMask) != 0)
|
|
|
|
return SDValue();
|
|
|
|
BitMask <<= 8;
|
|
|
|
}
|
2009-08-11 20:47:22 +00:00
|
|
|
return DAG.getTargetConstant(Val, MVT::i64);
|
2009-06-22 23:27:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
default:
|
2009-07-14 16:55:14 +00:00
|
|
|
llvm_unreachable("unexpected size for isVMOVSplat");
|
2009-06-22 23:27:02 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return SDValue();
|
|
|
|
}
|
|
|
|
|
|
|
|
/// getVMOVImm - If this is a build_vector of constants which can be
|
|
|
|
/// formed by using a VMOV instruction of the specified element size,
|
|
|
|
/// return the constant being splatted. The ByteSize field indicates the
|
|
|
|
/// number of bytes of each element [1248].
|
|
|
|
SDValue ARM::getVMOVImm(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
|
|
|
|
BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N);
|
|
|
|
APInt SplatBits, SplatUndef;
|
|
|
|
unsigned SplatBitSize;
|
|
|
|
bool HasAnyUndefs;
|
|
|
|
if (! BVN || ! BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize,
|
|
|
|
HasAnyUndefs, ByteSize * 8))
|
|
|
|
return SDValue();
|
|
|
|
|
|
|
|
if (SplatBitSize > ByteSize * 8)
|
|
|
|
return SDValue();
|
|
|
|
|
|
|
|
return isVMOVSplat(SplatBits.getZExtValue(), SplatUndef.getZExtValue(),
|
|
|
|
SplatBitSize, DAG);
|
|
|
|
}
|
|
|
|
|
2009-08-21 12:40:07 +00:00
|
|
|
static bool isVEXTMask(const SmallVectorImpl<int> &M, EVT VT,
|
|
|
|
bool &ReverseVEXT, unsigned &Imm) {
|
2009-08-19 17:03:43 +00:00
|
|
|
unsigned NumElts = VT.getVectorNumElements();
|
|
|
|
ReverseVEXT = false;
|
2009-08-21 12:40:07 +00:00
|
|
|
Imm = M[0];
|
2009-08-19 17:03:43 +00:00
|
|
|
|
|
|
|
// If this is a VEXT shuffle, the immediate value is the index of the first
|
|
|
|
// element. The other shuffle indices must be the successive elements after
|
|
|
|
// the first one.
|
|
|
|
unsigned ExpectedElt = Imm;
|
|
|
|
for (unsigned i = 1; i < NumElts; ++i) {
|
|
|
|
// Increment the expected index. If it wraps around, it may still be
|
|
|
|
// a VEXT but the source vectors must be swapped.
|
|
|
|
ExpectedElt += 1;
|
|
|
|
if (ExpectedElt == NumElts * 2) {
|
|
|
|
ExpectedElt = 0;
|
|
|
|
ReverseVEXT = true;
|
|
|
|
}
|
|
|
|
|
2009-08-21 12:40:07 +00:00
|
|
|
if (ExpectedElt != static_cast<unsigned>(M[i]))
|
2009-08-19 17:03:43 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Adjust the index value if the source operands will be swapped.
|
|
|
|
if (ReverseVEXT)
|
|
|
|
Imm -= NumElts;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2009-07-26 00:39:34 +00:00
|
|
|
/// isVREVMask - Check if a vector shuffle corresponds to a VREV
|
|
|
|
/// instruction with the specified blocksize. (The order of the elements
|
|
|
|
/// within each block of the vector is reversed.)
|
2009-08-21 12:40:07 +00:00
|
|
|
static bool isVREVMask(const SmallVectorImpl<int> &M, EVT VT,
|
|
|
|
unsigned BlockSize) {
|
2009-07-26 00:39:34 +00:00
|
|
|
assert((BlockSize==16 || BlockSize==32 || BlockSize==64) &&
|
|
|
|
"Only possible block sizes for VREV are: 16, 32, 64");
|
|
|
|
|
|
|
|
unsigned EltSz = VT.getVectorElementType().getSizeInBits();
|
2009-10-21 21:36:27 +00:00
|
|
|
if (EltSz == 64)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
unsigned NumElts = VT.getVectorNumElements();
|
2009-08-21 12:40:07 +00:00
|
|
|
unsigned BlockElts = M[0] + 1;
|
2009-07-26 00:39:34 +00:00
|
|
|
|
|
|
|
if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < NumElts; ++i) {
|
2009-08-21 12:40:07 +00:00
|
|
|
if ((unsigned) M[i] !=
|
2009-07-26 00:39:34 +00:00
|
|
|
(i - i%BlockElts) + (BlockElts - 1 - i%BlockElts))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2009-08-21 20:54:19 +00:00
|
|
|
static bool isVTRNMask(const SmallVectorImpl<int> &M, EVT VT,
|
|
|
|
unsigned &WhichResult) {
|
2009-10-21 21:36:27 +00:00
|
|
|
unsigned EltSz = VT.getVectorElementType().getSizeInBits();
|
|
|
|
if (EltSz == 64)
|
|
|
|
return false;
|
|
|
|
|
2009-08-21 20:54:19 +00:00
|
|
|
unsigned NumElts = VT.getVectorNumElements();
|
|
|
|
WhichResult = (M[0] == 0 ? 0 : 1);
|
|
|
|
for (unsigned i = 0; i < NumElts; i += 2) {
|
|
|
|
if ((unsigned) M[i] != i + WhichResult ||
|
|
|
|
(unsigned) M[i+1] != i + NumElts + WhichResult)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2009-12-03 06:40:55 +00:00
|
|
|
/// isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of
|
|
|
|
/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
|
|
|
|
/// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>.
|
|
|
|
static bool isVTRN_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT,
|
|
|
|
unsigned &WhichResult) {
|
|
|
|
unsigned EltSz = VT.getVectorElementType().getSizeInBits();
|
|
|
|
if (EltSz == 64)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
unsigned NumElts = VT.getVectorNumElements();
|
|
|
|
WhichResult = (M[0] == 0 ? 0 : 1);
|
|
|
|
for (unsigned i = 0; i < NumElts; i += 2) {
|
|
|
|
if ((unsigned) M[i] != i + WhichResult ||
|
|
|
|
(unsigned) M[i+1] != i + WhichResult)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2009-08-21 20:54:19 +00:00
|
|
|
static bool isVUZPMask(const SmallVectorImpl<int> &M, EVT VT,
|
|
|
|
unsigned &WhichResult) {
|
2009-10-21 21:36:27 +00:00
|
|
|
unsigned EltSz = VT.getVectorElementType().getSizeInBits();
|
|
|
|
if (EltSz == 64)
|
|
|
|
return false;
|
|
|
|
|
2009-08-21 20:54:19 +00:00
|
|
|
unsigned NumElts = VT.getVectorNumElements();
|
|
|
|
WhichResult = (M[0] == 0 ? 0 : 1);
|
|
|
|
for (unsigned i = 0; i != NumElts; ++i) {
|
|
|
|
if ((unsigned) M[i] != 2 * i + WhichResult)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
|
2009-10-21 21:36:27 +00:00
|
|
|
if (VT.is64BitVector() && EltSz == 32)
|
2009-08-21 20:54:19 +00:00
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2009-12-03 06:40:55 +00:00
|
|
|
/// isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of
|
|
|
|
/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
|
|
|
|
/// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>,
|
|
|
|
static bool isVUZP_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT,
|
|
|
|
unsigned &WhichResult) {
|
|
|
|
unsigned EltSz = VT.getVectorElementType().getSizeInBits();
|
|
|
|
if (EltSz == 64)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
unsigned Half = VT.getVectorNumElements() / 2;
|
|
|
|
WhichResult = (M[0] == 0 ? 0 : 1);
|
|
|
|
for (unsigned j = 0; j != 2; ++j) {
|
|
|
|
unsigned Idx = WhichResult;
|
|
|
|
for (unsigned i = 0; i != Half; ++i) {
|
|
|
|
if ((unsigned) M[i + j * Half] != Idx)
|
|
|
|
return false;
|
|
|
|
Idx += 2;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
|
|
|
|
if (VT.is64BitVector() && EltSz == 32)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2009-08-21 20:54:19 +00:00
|
|
|
static bool isVZIPMask(const SmallVectorImpl<int> &M, EVT VT,
|
|
|
|
unsigned &WhichResult) {
|
2009-10-21 21:36:27 +00:00
|
|
|
unsigned EltSz = VT.getVectorElementType().getSizeInBits();
|
|
|
|
if (EltSz == 64)
|
|
|
|
return false;
|
|
|
|
|
2009-08-21 20:54:19 +00:00
|
|
|
unsigned NumElts = VT.getVectorNumElements();
|
|
|
|
WhichResult = (M[0] == 0 ? 0 : 1);
|
|
|
|
unsigned Idx = WhichResult * NumElts / 2;
|
|
|
|
for (unsigned i = 0; i != NumElts; i += 2) {
|
|
|
|
if ((unsigned) M[i] != Idx ||
|
|
|
|
(unsigned) M[i+1] != Idx + NumElts)
|
|
|
|
return false;
|
|
|
|
Idx += 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
// VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
|
2009-10-21 21:36:27 +00:00
|
|
|
if (VT.is64BitVector() && EltSz == 32)
|
2009-08-21 20:54:19 +00:00
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2009-12-03 06:40:55 +00:00
|
|
|
/// isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of
|
|
|
|
/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
|
|
|
|
/// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>.
|
|
|
|
static bool isVZIP_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT,
|
|
|
|
unsigned &WhichResult) {
|
|
|
|
unsigned EltSz = VT.getVectorElementType().getSizeInBits();
|
|
|
|
if (EltSz == 64)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
unsigned NumElts = VT.getVectorNumElements();
|
|
|
|
WhichResult = (M[0] == 0 ? 0 : 1);
|
|
|
|
unsigned Idx = WhichResult * NumElts / 2;
|
|
|
|
for (unsigned i = 0; i != NumElts; i += 2) {
|
|
|
|
if ((unsigned) M[i] != Idx ||
|
|
|
|
(unsigned) M[i+1] != Idx)
|
|
|
|
return false;
|
|
|
|
Idx += 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
// VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
|
|
|
|
if (VT.is64BitVector() && EltSz == 32)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-08-10 22:56:29 +00:00
|
|
|
static SDValue BuildSplat(SDValue Val, EVT VT, SelectionDAG &DAG, DebugLoc dl) {
|
2009-06-22 23:27:02 +00:00
|
|
|
// Canonicalize all-zeros and all-ones vectors.
|
2009-08-13 01:57:47 +00:00
|
|
|
ConstantSDNode *ConstVal = cast<ConstantSDNode>(Val.getNode());
|
2009-06-22 23:27:02 +00:00
|
|
|
if (ConstVal->isNullValue())
|
|
|
|
return getZeroVector(VT, DAG, dl);
|
|
|
|
if (ConstVal->isAllOnesValue())
|
|
|
|
return getOnesVector(VT, DAG, dl);
|
|
|
|
|
2009-08-10 22:56:29 +00:00
|
|
|
EVT CanonicalVT;
|
2009-06-22 23:27:02 +00:00
|
|
|
if (VT.is64BitVector()) {
|
|
|
|
switch (Val.getValueType().getSizeInBits()) {
|
2009-08-11 20:47:22 +00:00
|
|
|
case 8: CanonicalVT = MVT::v8i8; break;
|
|
|
|
case 16: CanonicalVT = MVT::v4i16; break;
|
|
|
|
case 32: CanonicalVT = MVT::v2i32; break;
|
|
|
|
case 64: CanonicalVT = MVT::v1i64; break;
|
2009-07-14 16:55:14 +00:00
|
|
|
default: llvm_unreachable("unexpected splat element type"); break;
|
2009-06-22 23:27:02 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
assert(VT.is128BitVector() && "unknown splat vector size");
|
|
|
|
switch (Val.getValueType().getSizeInBits()) {
|
2009-08-11 20:47:22 +00:00
|
|
|
case 8: CanonicalVT = MVT::v16i8; break;
|
|
|
|
case 16: CanonicalVT = MVT::v8i16; break;
|
|
|
|
case 32: CanonicalVT = MVT::v4i32; break;
|
|
|
|
case 64: CanonicalVT = MVT::v2i64; break;
|
2009-07-14 16:55:14 +00:00
|
|
|
default: llvm_unreachable("unexpected splat element type"); break;
|
2009-06-22 23:27:02 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Build a canonical splat for this value.
|
|
|
|
SmallVector<SDValue, 8> Ops;
|
|
|
|
Ops.assign(CanonicalVT.getVectorNumElements(), Val);
|
|
|
|
SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, dl, CanonicalVT, &Ops[0],
|
|
|
|
Ops.size());
|
|
|
|
return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Res);
|
|
|
|
}
|
|
|
|
|
|
|
|
// If this is a case we can't handle, return null and let the default
|
|
|
|
// expansion code take care of it.
|
|
|
|
static SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) {
|
2009-08-13 01:57:47 +00:00
|
|
|
BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode());
|
2009-06-22 23:27:02 +00:00
|
|
|
DebugLoc dl = Op.getDebugLoc();
|
2009-08-10 22:56:29 +00:00
|
|
|
EVT VT = Op.getValueType();
|
2009-06-22 23:27:02 +00:00
|
|
|
|
|
|
|
APInt SplatBits, SplatUndef;
|
|
|
|
unsigned SplatBitSize;
|
|
|
|
bool HasAnyUndefs;
|
|
|
|
if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
|
2009-08-29 00:08:18 +00:00
|
|
|
if (SplatBitSize <= 64) {
|
|
|
|
SDValue Val = isVMOVSplat(SplatBits.getZExtValue(),
|
|
|
|
SplatUndef.getZExtValue(), SplatBitSize, DAG);
|
|
|
|
if (Val.getNode())
|
|
|
|
return BuildSplat(Val, VT, DAG, dl);
|
|
|
|
}
|
2009-07-30 00:31:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// If there are only 2 elements in a 128-bit vector, insert them into an
|
|
|
|
// undef vector. This handles the common case for 128-bit vector argument
|
|
|
|
// passing, where the insertions should be translated to subreg accesses
|
|
|
|
// with no real instructions.
|
|
|
|
if (VT.is128BitVector() && Op.getNumOperands() == 2) {
|
|
|
|
SDValue Val = DAG.getUNDEF(VT);
|
|
|
|
SDValue Op0 = Op.getOperand(0);
|
|
|
|
SDValue Op1 = Op.getOperand(1);
|
|
|
|
if (Op0.getOpcode() != ISD::UNDEF)
|
|
|
|
Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Val, Op0,
|
|
|
|
DAG.getIntPtrConstant(0));
|
|
|
|
if (Op1.getOpcode() != ISD::UNDEF)
|
|
|
|
Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Val, Op1,
|
|
|
|
DAG.getIntPtrConstant(1));
|
|
|
|
return Val;
|
2009-06-22 23:27:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return SDValue();
|
|
|
|
}
|
|
|
|
|
2009-08-21 12:40:07 +00:00
|
|
|
/// isShuffleMaskLegal - Targets can use this to indicate that they only
|
|
|
|
/// support *some* VECTOR_SHUFFLE operations, those with specific masks.
|
|
|
|
/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
|
|
|
|
/// are assumed to be legal.
|
|
|
|
bool
|
|
|
|
ARMTargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M,
|
|
|
|
EVT VT) const {
|
2009-08-21 12:41:24 +00:00
|
|
|
if (VT.getVectorNumElements() == 4 &&
|
|
|
|
(VT.is128BitVector() || VT.is64BitVector())) {
|
|
|
|
unsigned PFIndexes[4];
|
|
|
|
for (unsigned i = 0; i != 4; ++i) {
|
|
|
|
if (M[i] < 0)
|
|
|
|
PFIndexes[i] = 8;
|
|
|
|
else
|
|
|
|
PFIndexes[i] = M[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
// Compute the index in the perfect shuffle table.
|
|
|
|
unsigned PFTableIndex =
|
|
|
|
PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
|
|
|
|
unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
|
|
|
|
unsigned Cost = (PFEntry >> 30);
|
|
|
|
|
|
|
|
if (Cost <= 4)
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2009-08-21 12:40:07 +00:00
|
|
|
bool ReverseVEXT;
|
2009-08-21 20:54:19 +00:00
|
|
|
unsigned Imm, WhichResult;
|
2009-08-21 12:40:07 +00:00
|
|
|
|
|
|
|
return (ShuffleVectorSDNode::isSplatMask(&M[0], VT) ||
|
|
|
|
isVREVMask(M, VT, 64) ||
|
|
|
|
isVREVMask(M, VT, 32) ||
|
|
|
|
isVREVMask(M, VT, 16) ||
|
2009-08-21 20:54:19 +00:00
|
|
|
isVEXTMask(M, VT, ReverseVEXT, Imm) ||
|
|
|
|
isVTRNMask(M, VT, WhichResult) ||
|
|
|
|
isVUZPMask(M, VT, WhichResult) ||
|
2009-12-03 06:40:55 +00:00
|
|
|
isVZIPMask(M, VT, WhichResult) ||
|
|
|
|
isVTRN_v_undef_Mask(M, VT, WhichResult) ||
|
|
|
|
isVUZP_v_undef_Mask(M, VT, WhichResult) ||
|
|
|
|
isVZIP_v_undef_Mask(M, VT, WhichResult));
|
2009-08-21 12:40:07 +00:00
|
|
|
}
|
|
|
|
|
2009-08-21 12:41:24 +00:00
|
|
|
/// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
|
|
|
|
/// the specified operations to build the shuffle.
|
|
|
|
static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
|
|
|
|
SDValue RHS, SelectionDAG &DAG,
|
|
|
|
DebugLoc dl) {
|
|
|
|
unsigned OpNum = (PFEntry >> 26) & 0x0F;
|
|
|
|
unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
|
|
|
|
unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1);
|
|
|
|
|
|
|
|
enum {
|
|
|
|
OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
|
|
|
|
OP_VREV,
|
|
|
|
OP_VDUP0,
|
|
|
|
OP_VDUP1,
|
|
|
|
OP_VDUP2,
|
|
|
|
OP_VDUP3,
|
|
|
|
OP_VEXT1,
|
|
|
|
OP_VEXT2,
|
|
|
|
OP_VEXT3,
|
|
|
|
OP_VUZPL, // VUZP, left result
|
|
|
|
OP_VUZPR, // VUZP, right result
|
|
|
|
OP_VZIPL, // VZIP, left result
|
|
|
|
OP_VZIPR, // VZIP, right result
|
|
|
|
OP_VTRNL, // VTRN, left result
|
|
|
|
OP_VTRNR // VTRN, right result
|
|
|
|
};
|
|
|
|
|
|
|
|
if (OpNum == OP_COPY) {
|
|
|
|
if (LHSID == (1*9+2)*9+3) return LHS;
|
|
|
|
assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!");
|
|
|
|
return RHS;
|
|
|
|
}
|
|
|
|
|
|
|
|
SDValue OpLHS, OpRHS;
|
|
|
|
OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl);
|
|
|
|
OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl);
|
|
|
|
EVT VT = OpLHS.getValueType();
|
|
|
|
|
|
|
|
switch (OpNum) {
|
|
|
|
default: llvm_unreachable("Unknown shuffle opcode!");
|
|
|
|
case OP_VREV:
|
|
|
|
return DAG.getNode(ARMISD::VREV64, dl, VT, OpLHS);
|
|
|
|
case OP_VDUP0:
|
|
|
|
case OP_VDUP1:
|
|
|
|
case OP_VDUP2:
|
|
|
|
case OP_VDUP3:
|
|
|
|
return DAG.getNode(ARMISD::VDUPLANE, dl, VT,
|
2009-08-21 12:41:42 +00:00
|
|
|
OpLHS, DAG.getConstant(OpNum-OP_VDUP0, MVT::i32));
|
2009-08-21 12:41:24 +00:00
|
|
|
case OP_VEXT1:
|
|
|
|
case OP_VEXT2:
|
|
|
|
case OP_VEXT3:
|
|
|
|
return DAG.getNode(ARMISD::VEXT, dl, VT,
|
|
|
|
OpLHS, OpRHS,
|
|
|
|
DAG.getConstant(OpNum-OP_VEXT1+1, MVT::i32));
|
|
|
|
case OP_VUZPL:
|
|
|
|
case OP_VUZPR:
|
2009-08-21 12:41:42 +00:00
|
|
|
return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT),
|
2009-08-21 12:41:24 +00:00
|
|
|
OpLHS, OpRHS).getValue(OpNum-OP_VUZPL);
|
|
|
|
case OP_VZIPL:
|
|
|
|
case OP_VZIPR:
|
2009-08-21 12:41:42 +00:00
|
|
|
return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT),
|
2009-08-21 12:41:24 +00:00
|
|
|
OpLHS, OpRHS).getValue(OpNum-OP_VZIPL);
|
|
|
|
case OP_VTRNL:
|
|
|
|
case OP_VTRNR:
|
2009-08-21 12:41:42 +00:00
|
|
|
return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT),
|
|
|
|
OpLHS, OpRHS).getValue(OpNum-OP_VTRNL);
|
2009-08-21 12:41:24 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-06-22 23:27:02 +00:00
|
|
|
static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
|
2009-08-21 12:41:24 +00:00
|
|
|
SDValue V1 = Op.getOperand(0);
|
|
|
|
SDValue V2 = Op.getOperand(1);
|
2009-08-12 22:31:50 +00:00
|
|
|
DebugLoc dl = Op.getDebugLoc();
|
|
|
|
EVT VT = Op.getValueType();
|
2009-08-21 12:41:24 +00:00
|
|
|
ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
|
2009-08-21 12:40:07 +00:00
|
|
|
SmallVector<int, 8> ShuffleMask;
|
2009-08-12 22:31:50 +00:00
|
|
|
|
2009-08-13 02:13:04 +00:00
|
|
|
// Convert shuffles that are directly supported on NEON to target-specific
|
|
|
|
// DAG nodes, instead of keeping them as shuffles and matching them again
|
|
|
|
// during code selection. This is more efficient and avoids the possibility
|
|
|
|
// of inconsistencies between legalization and selection.
|
2009-08-13 06:01:30 +00:00
|
|
|
// FIXME: floating-point vectors should be canonicalized to integer vectors
|
|
|
|
// of the same time so that they get CSEd properly.
|
2009-08-21 12:40:07 +00:00
|
|
|
SVN->getMask(ShuffleMask);
|
|
|
|
|
|
|
|
if (ShuffleVectorSDNode::isSplatMask(&ShuffleMask[0], VT)) {
|
2009-08-14 05:08:32 +00:00
|
|
|
int Lane = SVN->getSplatIndex();
|
2009-11-02 00:12:06 +00:00
|
|
|
// If this is undef splat, generate it via "just" vdup, if possible.
|
|
|
|
if (Lane == -1) Lane = 0;
|
|
|
|
|
2009-08-21 12:41:24 +00:00
|
|
|
if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) {
|
|
|
|
return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0));
|
2009-08-14 05:13:08 +00:00
|
|
|
}
|
2009-08-21 12:41:24 +00:00
|
|
|
return DAG.getNode(ARMISD::VDUPLANE, dl, VT, V1,
|
2009-08-19 17:03:43 +00:00
|
|
|
DAG.getConstant(Lane, MVT::i32));
|
2009-08-14 05:08:32 +00:00
|
|
|
}
|
2009-08-19 17:03:43 +00:00
|
|
|
|
|
|
|
bool ReverseVEXT;
|
|
|
|
unsigned Imm;
|
2009-08-21 12:40:07 +00:00
|
|
|
if (isVEXTMask(ShuffleMask, VT, ReverseVEXT, Imm)) {
|
2009-08-19 17:03:43 +00:00
|
|
|
if (ReverseVEXT)
|
2009-08-21 20:54:19 +00:00
|
|
|
std::swap(V1, V2);
|
|
|
|
return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V2,
|
2009-08-19 17:03:43 +00:00
|
|
|
DAG.getConstant(Imm, MVT::i32));
|
|
|
|
}
|
|
|
|
|
2009-08-21 12:40:07 +00:00
|
|
|
if (isVREVMask(ShuffleMask, VT, 64))
|
2009-08-21 12:41:24 +00:00
|
|
|
return DAG.getNode(ARMISD::VREV64, dl, VT, V1);
|
2009-08-21 12:40:07 +00:00
|
|
|
if (isVREVMask(ShuffleMask, VT, 32))
|
2009-08-21 12:41:24 +00:00
|
|
|
return DAG.getNode(ARMISD::VREV32, dl, VT, V1);
|
2009-08-21 12:40:07 +00:00
|
|
|
if (isVREVMask(ShuffleMask, VT, 16))
|
2009-08-21 12:41:24 +00:00
|
|
|
return DAG.getNode(ARMISD::VREV16, dl, VT, V1);
|
|
|
|
|
2009-08-21 20:54:19 +00:00
|
|
|
// Check for Neon shuffles that modify both input vectors in place.
|
|
|
|
// If both results are used, i.e., if there are two shuffles with the same
|
|
|
|
// source operands and with masks corresponding to both results of one of
|
|
|
|
// these operations, DAG memoization will ensure that a single node is
|
|
|
|
// used for both shuffles.
|
|
|
|
unsigned WhichResult;
|
|
|
|
if (isVTRNMask(ShuffleMask, VT, WhichResult))
|
|
|
|
return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT),
|
|
|
|
V1, V2).getValue(WhichResult);
|
|
|
|
if (isVUZPMask(ShuffleMask, VT, WhichResult))
|
|
|
|
return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT),
|
|
|
|
V1, V2).getValue(WhichResult);
|
|
|
|
if (isVZIPMask(ShuffleMask, VT, WhichResult))
|
|
|
|
return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT),
|
|
|
|
V1, V2).getValue(WhichResult);
|
|
|
|
|
2009-12-03 06:40:55 +00:00
|
|
|
if (isVTRN_v_undef_Mask(ShuffleMask, VT, WhichResult))
|
|
|
|
return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT),
|
|
|
|
V1, V1).getValue(WhichResult);
|
|
|
|
if (isVUZP_v_undef_Mask(ShuffleMask, VT, WhichResult))
|
|
|
|
return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT),
|
|
|
|
V1, V1).getValue(WhichResult);
|
|
|
|
if (isVZIP_v_undef_Mask(ShuffleMask, VT, WhichResult))
|
|
|
|
return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT),
|
|
|
|
V1, V1).getValue(WhichResult);
|
|
|
|
|
2009-08-21 20:54:19 +00:00
|
|
|
// If the shuffle is not directly supported and it has 4 elements, use
|
|
|
|
// the PerfectShuffle-generated table to synthesize it from other shuffles.
|
2009-08-21 12:41:24 +00:00
|
|
|
if (VT.getVectorNumElements() == 4 &&
|
|
|
|
(VT.is128BitVector() || VT.is64BitVector())) {
|
|
|
|
unsigned PFIndexes[4];
|
|
|
|
for (unsigned i = 0; i != 4; ++i) {
|
|
|
|
if (ShuffleMask[i] < 0)
|
|
|
|
PFIndexes[i] = 8;
|
|
|
|
else
|
|
|
|
PFIndexes[i] = ShuffleMask[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
// Compute the index in the perfect shuffle table.
|
|
|
|
unsigned PFTableIndex =
|
|
|
|
PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
|
|
|
|
|
|
|
|
unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
|
|
|
|
unsigned Cost = (PFEntry >> 30);
|
|
|
|
|
|
|
|
if (Cost <= 4)
|
|
|
|
return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl);
|
|
|
|
}
|
2009-08-12 22:31:50 +00:00
|
|
|
|
2009-08-14 05:16:33 +00:00
|
|
|
return SDValue();
|
2009-06-22 23:27:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
|
2009-08-10 22:56:29 +00:00
|
|
|
EVT VT = Op.getValueType();
|
2009-06-22 23:27:02 +00:00
|
|
|
DebugLoc dl = Op.getDebugLoc();
|
|
|
|
SDValue Vec = Op.getOperand(0);
|
|
|
|
SDValue Lane = Op.getOperand(1);
|
2009-10-15 23:12:05 +00:00
|
|
|
assert(VT == MVT::i32 &&
|
|
|
|
Vec.getValueType().getVectorElementType().getSizeInBits() < 32 &&
|
|
|
|
"unexpected type for custom-lowering vector extract");
|
|
|
|
return DAG.getNode(ARMISD::VGETLANEu, dl, MVT::i32, Vec, Lane);
|
2009-06-22 23:27:02 +00:00
|
|
|
}
|
|
|
|
|
2009-08-03 20:36:38 +00:00
|
|
|
static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
|
|
|
|
// The only time a CONCAT_VECTORS operation can have legal types is when
|
|
|
|
// two 64-bit vectors are concatenated to a 128-bit vector.
|
|
|
|
assert(Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 &&
|
|
|
|
"unexpected CONCAT_VECTORS");
|
|
|
|
DebugLoc dl = Op.getDebugLoc();
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue Val = DAG.getUNDEF(MVT::v2f64);
|
2009-08-03 20:36:38 +00:00
|
|
|
SDValue Op0 = Op.getOperand(0);
|
|
|
|
SDValue Op1 = Op.getOperand(1);
|
|
|
|
if (Op0.getOpcode() != ISD::UNDEF)
|
2009-08-11 20:47:22 +00:00
|
|
|
Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val,
|
|
|
|
DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f64, Op0),
|
2009-08-03 20:36:38 +00:00
|
|
|
DAG.getIntPtrConstant(0));
|
|
|
|
if (Op1.getOpcode() != ISD::UNDEF)
|
2009-08-11 20:47:22 +00:00
|
|
|
Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val,
|
|
|
|
DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f64, Op1),
|
2009-08-03 20:36:38 +00:00
|
|
|
DAG.getIntPtrConstant(1));
|
|
|
|
return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Val);
|
2009-06-22 23:27:02 +00:00
|
|
|
}
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
|
2007-01-19 07:51:42 +00:00
|
|
|
switch (Op.getOpcode()) {
|
2009-07-14 16:55:14 +00:00
|
|
|
default: llvm_unreachable("Don't know how to custom lower this!");
|
2007-01-19 07:51:42 +00:00
|
|
|
case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
|
2009-10-30 05:45:42 +00:00
|
|
|
case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
|
2007-04-22 00:04:12 +00:00
|
|
|
case ISD::GlobalAddress:
|
|
|
|
return Subtarget->isTargetDarwin() ? LowerGlobalAddressDarwin(Op, DAG) :
|
|
|
|
LowerGlobalAddressELF(Op, DAG);
|
2007-04-27 13:54:47 +00:00
|
|
|
case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
|
2009-11-12 07:13:11 +00:00
|
|
|
case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
|
|
|
|
case ISD::BR_CC: return LowerBR_CC(Op, DAG);
|
2007-01-19 07:51:42 +00:00
|
|
|
case ISD::BR_JT: return LowerBR_JT(Op, DAG);
|
2009-08-07 00:34:42 +00:00
|
|
|
case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
|
2007-01-19 07:51:42 +00:00
|
|
|
case ISD::VASTART: return LowerVASTART(Op, DAG, VarArgsFrameIndex);
|
2009-12-10 00:11:09 +00:00
|
|
|
case ISD::MEMBARRIER: return LowerMEMBARRIER(Op, DAG);
|
2007-01-19 07:51:42 +00:00
|
|
|
case ISD::SINT_TO_FP:
|
|
|
|
case ISD::UINT_TO_FP: return LowerINT_TO_FP(Op, DAG);
|
|
|
|
case ISD::FP_TO_SINT:
|
|
|
|
case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG);
|
|
|
|
case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG);
|
2007-01-29 22:58:52 +00:00
|
|
|
case ISD::RETURNADDR: break;
|
2009-05-12 23:59:14 +00:00
|
|
|
case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
|
2007-04-22 00:04:12 +00:00
|
|
|
case ISD::GLOBAL_OFFSET_TABLE: return LowerGLOBAL_OFFSET_TABLE(Op, DAG);
|
2007-11-08 17:20:05 +00:00
|
|
|
case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
|
2008-12-01 11:39:25 +00:00
|
|
|
case ISD::BIT_CONVERT: return ExpandBIT_CONVERT(Op.getNode(), DAG);
|
2009-06-22 23:27:02 +00:00
|
|
|
case ISD::SHL:
|
2007-11-24 07:07:01 +00:00
|
|
|
case ISD::SRL:
|
2009-06-22 23:27:02 +00:00
|
|
|
case ISD::SRA: return LowerShift(Op.getNode(), DAG, Subtarget);
|
2009-11-12 07:13:11 +00:00
|
|
|
case ISD::SHL_PARTS: return LowerShiftLeftParts(Op, DAG);
|
2009-10-31 21:42:19 +00:00
|
|
|
case ISD::SRL_PARTS:
|
2009-11-12 07:13:11 +00:00
|
|
|
case ISD::SRA_PARTS: return LowerShiftRightParts(Op, DAG);
|
2009-06-22 23:27:02 +00:00
|
|
|
case ISD::VSETCC: return LowerVSETCC(Op, DAG);
|
|
|
|
case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
|
|
|
|
case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
|
|
|
|
case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
|
2009-08-03 20:36:38 +00:00
|
|
|
case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
|
2007-01-19 07:51:42 +00:00
|
|
|
}
|
2008-07-27 21:46:04 +00:00
|
|
|
return SDValue();
|
2007-01-19 07:51:42 +00:00
|
|
|
}
|
|
|
|
|
2008-12-01 11:39:25 +00:00
|
|
|
/// ReplaceNodeResults - Replace the results of node with an illegal result
|
|
|
|
/// type with new values built out of custom code.
|
|
|
|
void ARMTargetLowering::ReplaceNodeResults(SDNode *N,
|
|
|
|
SmallVectorImpl<SDValue>&Results,
|
|
|
|
SelectionDAG &DAG) {
|
2007-11-24 07:07:01 +00:00
|
|
|
switch (N->getOpcode()) {
|
2008-12-01 11:39:25 +00:00
|
|
|
default:
|
2009-07-14 16:55:14 +00:00
|
|
|
llvm_unreachable("Don't know how to custom expand this!");
|
2008-12-01 11:39:25 +00:00
|
|
|
return;
|
|
|
|
case ISD::BIT_CONVERT:
|
|
|
|
Results.push_back(ExpandBIT_CONVERT(N, DAG));
|
|
|
|
return;
|
2007-11-24 07:07:01 +00:00
|
|
|
case ISD::SRL:
|
2008-12-01 11:39:25 +00:00
|
|
|
case ISD::SRA: {
|
2009-06-22 23:27:02 +00:00
|
|
|
SDValue Res = LowerShift(N, DAG, Subtarget);
|
2008-12-01 11:39:25 +00:00
|
|
|
if (Res.getNode())
|
|
|
|
Results.push_back(Res);
|
|
|
|
return;
|
|
|
|
}
|
2007-11-24 07:07:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-01-19 07:51:42 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// ARM Scheduler Hooks
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2009-12-11 01:42:04 +00:00
|
|
|
MachineBasicBlock *
|
|
|
|
ARMTargetLowering::EmitAtomicCmpSwap(unsigned Size, MachineInstr *MI,
|
|
|
|
MachineBasicBlock *BB) const {
|
|
|
|
unsigned dest = MI->getOperand(0).getReg();
|
|
|
|
unsigned ptr = MI->getOperand(1).getReg();
|
|
|
|
unsigned oldval = MI->getOperand(2).getReg();
|
|
|
|
unsigned newval = MI->getOperand(3).getReg();
|
|
|
|
unsigned scratch = BB->getParent()->getRegInfo()
|
|
|
|
.createVirtualRegister(ARM::GPRRegisterClass);
|
|
|
|
const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
|
|
|
|
DebugLoc dl = MI->getDebugLoc();
|
|
|
|
|
|
|
|
unsigned ldrOpc, strOpc;
|
|
|
|
switch (Size) {
|
|
|
|
default: llvm_unreachable("unsupported size for AtomicCmpSwap!");
|
|
|
|
case 1: ldrOpc = ARM::LDREXB; strOpc = ARM::STREXB; break;
|
|
|
|
case 2: ldrOpc = ARM::LDREXH; strOpc = ARM::STREXH; break;
|
|
|
|
case 4: ldrOpc = ARM::LDREX; strOpc = ARM::STREX; break;
|
|
|
|
}
|
|
|
|
|
|
|
|
MachineFunction *MF = BB->getParent();
|
|
|
|
const BasicBlock *LLVM_BB = BB->getBasicBlock();
|
|
|
|
MachineFunction::iterator It = BB;
|
|
|
|
++It; // insert the new blocks after the current block
|
|
|
|
|
|
|
|
MachineBasicBlock *loop1MBB = MF->CreateMachineBasicBlock(LLVM_BB);
|
|
|
|
MachineBasicBlock *loop2MBB = MF->CreateMachineBasicBlock(LLVM_BB);
|
|
|
|
MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
|
|
|
|
MF->insert(It, loop1MBB);
|
|
|
|
MF->insert(It, loop2MBB);
|
|
|
|
MF->insert(It, exitMBB);
|
|
|
|
exitMBB->transferSuccessors(BB);
|
|
|
|
|
|
|
|
// thisMBB:
|
|
|
|
// ...
|
|
|
|
// fallthrough --> loop1MBB
|
|
|
|
BB->addSuccessor(loop1MBB);
|
|
|
|
|
|
|
|
// loop1MBB:
|
|
|
|
// ldrex dest, [ptr]
|
|
|
|
// cmp dest, oldval
|
|
|
|
// bne exitMBB
|
|
|
|
BB = loop1MBB;
|
|
|
|
AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr));
|
|
|
|
AddDefaultPred(BuildMI(BB, dl, TII->get(ARM::CMPrr))
|
|
|
|
.addReg(dest).addReg(oldval));
|
|
|
|
BuildMI(BB, dl, TII->get(ARM::Bcc)).addMBB(exitMBB).addImm(ARMCC::NE)
|
|
|
|
.addReg(ARM::CPSR);
|
|
|
|
BB->addSuccessor(loop2MBB);
|
|
|
|
BB->addSuccessor(exitMBB);
|
|
|
|
|
|
|
|
// loop2MBB:
|
|
|
|
// strex scratch, newval, [ptr]
|
|
|
|
// cmp scratch, #0
|
|
|
|
// bne loop1MBB
|
|
|
|
BB = loop2MBB;
|
|
|
|
AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(newval)
|
|
|
|
.addReg(ptr));
|
|
|
|
AddDefaultPred(BuildMI(BB, dl, TII->get(ARM::CMPri))
|
|
|
|
.addReg(scratch).addImm(0));
|
|
|
|
BuildMI(BB, dl, TII->get(ARM::Bcc)).addMBB(loop1MBB).addImm(ARMCC::NE)
|
|
|
|
.addReg(ARM::CPSR);
|
|
|
|
BB->addSuccessor(loop1MBB);
|
|
|
|
BB->addSuccessor(exitMBB);
|
|
|
|
|
|
|
|
// exitMBB:
|
|
|
|
// ...
|
|
|
|
BB = exitMBB;
|
|
|
|
return BB;
|
|
|
|
}
|
|
|
|
|
2007-01-19 07:51:42 +00:00
|
|
|
MachineBasicBlock *
|
2008-01-30 18:18:23 +00:00
|
|
|
ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
|
2009-09-18 21:02:19 +00:00
|
|
|
MachineBasicBlock *BB,
|
|
|
|
DenseMap<MachineBasicBlock*, MachineBasicBlock*> *EM) const {
|
2007-01-19 07:51:42 +00:00
|
|
|
const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
|
2009-02-13 02:25:56 +00:00
|
|
|
DebugLoc dl = MI->getDebugLoc();
|
2007-01-19 07:51:42 +00:00
|
|
|
switch (MI->getOpcode()) {
|
2009-08-07 00:34:42 +00:00
|
|
|
default:
|
2009-12-11 01:42:04 +00:00
|
|
|
MI->dump();
|
2009-08-07 00:34:42 +00:00
|
|
|
llvm_unreachable("Unexpected instr type to insert");
|
2009-12-11 01:42:04 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
case ARM::ATOMIC_CMP_SWAP_I8: return EmitAtomicCmpSwap(1, MI, BB);
|
|
|
|
case ARM::ATOMIC_CMP_SWAP_I16: return EmitAtomicCmpSwap(2, MI, BB);
|
|
|
|
case ARM::ATOMIC_CMP_SWAP_I32: return EmitAtomicCmpSwap(4, MI, BB);
|
|
|
|
|
2009-08-12 05:17:19 +00:00
|
|
|
case ARM::tMOVCCr_pseudo: {
|
2007-01-19 07:51:42 +00:00
|
|
|
// To "insert" a SELECT_CC instruction, we actually have to insert the
|
|
|
|
// diamond control-flow pattern. The incoming instruction knows the
|
|
|
|
// destination vreg to set, the condition code register to branch on, the
|
|
|
|
// true/false values to select between, and a branch opcode to use.
|
|
|
|
const BasicBlock *LLVM_BB = BB->getBasicBlock();
|
2008-07-07 23:14:23 +00:00
|
|
|
MachineFunction::iterator It = BB;
|
2007-01-19 07:51:42 +00:00
|
|
|
++It;
|
|
|
|
|
|
|
|
// thisMBB:
|
|
|
|
// ...
|
|
|
|
// TrueVal = ...
|
|
|
|
// cmpTY ccX, r1, r2
|
|
|
|
// bCC copy1MBB
|
|
|
|
// fallthrough --> copy0MBB
|
|
|
|
MachineBasicBlock *thisMBB = BB;
|
2008-07-07 23:14:23 +00:00
|
|
|
MachineFunction *F = BB->getParent();
|
|
|
|
MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
|
|
|
|
MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
|
2009-02-13 02:25:56 +00:00
|
|
|
BuildMI(BB, dl, TII->get(ARM::tBcc)).addMBB(sinkMBB)
|
2007-07-05 07:18:20 +00:00
|
|
|
.addImm(MI->getOperand(3).getImm()).addReg(MI->getOperand(4).getReg());
|
2008-07-07 23:14:23 +00:00
|
|
|
F->insert(It, copy0MBB);
|
|
|
|
F->insert(It, sinkMBB);
|
2007-01-19 07:51:42 +00:00
|
|
|
// Update machine-CFG edges by first adding all successors of the current
|
|
|
|
// block to the new block which will contain the Phi node for the select.
|
2009-09-19 09:51:03 +00:00
|
|
|
// Also inform sdisel of the edge changes.
|
|
|
|
for (MachineBasicBlock::succ_iterator I = BB->succ_begin(),
|
|
|
|
E = BB->succ_end(); I != E; ++I) {
|
|
|
|
EM->insert(std::make_pair(*I, sinkMBB));
|
|
|
|
sinkMBB->addSuccessor(*I);
|
|
|
|
}
|
2007-01-19 07:51:42 +00:00
|
|
|
// Next, remove all successors of the current block, and add the true
|
|
|
|
// and fallthrough blocks as its successors.
|
2009-09-19 09:51:03 +00:00
|
|
|
while (!BB->succ_empty())
|
2007-01-19 07:51:42 +00:00
|
|
|
BB->removeSuccessor(BB->succ_begin());
|
|
|
|
BB->addSuccessor(copy0MBB);
|
|
|
|
BB->addSuccessor(sinkMBB);
|
|
|
|
|
|
|
|
// copy0MBB:
|
|
|
|
// %FalseValue = ...
|
|
|
|
// # fallthrough to sinkMBB
|
|
|
|
BB = copy0MBB;
|
|
|
|
|
|
|
|
// Update machine-CFG edges
|
|
|
|
BB->addSuccessor(sinkMBB);
|
|
|
|
|
|
|
|
// sinkMBB:
|
|
|
|
// %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
|
|
|
|
// ...
|
|
|
|
BB = sinkMBB;
|
2009-02-13 02:25:56 +00:00
|
|
|
BuildMI(BB, dl, TII->get(ARM::PHI), MI->getOperand(0).getReg())
|
2007-01-19 07:51:42 +00:00
|
|
|
.addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB)
|
|
|
|
.addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
|
|
|
|
|
2008-07-07 23:14:23 +00:00
|
|
|
F->DeleteMachineInstr(MI); // The pseudo instruction is gone now.
|
2007-01-19 07:51:42 +00:00
|
|
|
return BB;
|
|
|
|
}
|
2009-08-07 00:34:42 +00:00
|
|
|
|
|
|
|
case ARM::tANDsp:
|
|
|
|
case ARM::tADDspr_:
|
|
|
|
case ARM::tSUBspi_:
|
|
|
|
case ARM::t2SUBrSPi_:
|
|
|
|
case ARM::t2SUBrSPi12_:
|
|
|
|
case ARM::t2SUBrSPs_: {
|
|
|
|
MachineFunction *MF = BB->getParent();
|
|
|
|
unsigned DstReg = MI->getOperand(0).getReg();
|
|
|
|
unsigned SrcReg = MI->getOperand(1).getReg();
|
|
|
|
bool DstIsDead = MI->getOperand(0).isDead();
|
|
|
|
bool SrcIsKill = MI->getOperand(1).isKill();
|
|
|
|
|
|
|
|
if (SrcReg != ARM::SP) {
|
|
|
|
// Copy the source to SP from virtual register.
|
|
|
|
const TargetRegisterClass *RC = MF->getRegInfo().getRegClass(SrcReg);
|
|
|
|
unsigned CopyOpc = (RC == ARM::tGPRRegisterClass)
|
|
|
|
? ARM::tMOVtgpr2gpr : ARM::tMOVgpr2gpr;
|
|
|
|
BuildMI(BB, dl, TII->get(CopyOpc), ARM::SP)
|
|
|
|
.addReg(SrcReg, getKillRegState(SrcIsKill));
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned OpOpc = 0;
|
|
|
|
bool NeedPred = false, NeedCC = false, NeedOp3 = false;
|
|
|
|
switch (MI->getOpcode()) {
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Unexpected pseudo instruction!");
|
|
|
|
case ARM::tANDsp:
|
|
|
|
OpOpc = ARM::tAND;
|
|
|
|
NeedPred = true;
|
|
|
|
break;
|
|
|
|
case ARM::tADDspr_:
|
|
|
|
OpOpc = ARM::tADDspr;
|
|
|
|
break;
|
|
|
|
case ARM::tSUBspi_:
|
|
|
|
OpOpc = ARM::tSUBspi;
|
|
|
|
break;
|
|
|
|
case ARM::t2SUBrSPi_:
|
|
|
|
OpOpc = ARM::t2SUBrSPi;
|
|
|
|
NeedPred = true; NeedCC = true;
|
|
|
|
break;
|
|
|
|
case ARM::t2SUBrSPi12_:
|
|
|
|
OpOpc = ARM::t2SUBrSPi12;
|
|
|
|
NeedPred = true;
|
|
|
|
break;
|
|
|
|
case ARM::t2SUBrSPs_:
|
|
|
|
OpOpc = ARM::t2SUBrSPs;
|
|
|
|
NeedPred = true; NeedCC = true; NeedOp3 = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
MachineInstrBuilder MIB = BuildMI(BB, dl, TII->get(OpOpc), ARM::SP);
|
|
|
|
if (OpOpc == ARM::tAND)
|
|
|
|
AddDefaultT1CC(MIB);
|
|
|
|
MIB.addReg(ARM::SP);
|
|
|
|
MIB.addOperand(MI->getOperand(2));
|
|
|
|
if (NeedOp3)
|
|
|
|
MIB.addOperand(MI->getOperand(3));
|
|
|
|
if (NeedPred)
|
|
|
|
AddDefaultPred(MIB);
|
|
|
|
if (NeedCC)
|
|
|
|
AddDefaultCC(MIB);
|
|
|
|
|
|
|
|
// Copy the result from SP to virtual register.
|
|
|
|
const TargetRegisterClass *RC = MF->getRegInfo().getRegClass(DstReg);
|
|
|
|
unsigned CopyOpc = (RC == ARM::tGPRRegisterClass)
|
|
|
|
? ARM::tMOVgpr2tgpr : ARM::tMOVgpr2gpr;
|
|
|
|
BuildMI(BB, dl, TII->get(CopyOpc))
|
|
|
|
.addReg(DstReg, getDefRegState(true) | getDeadRegState(DstIsDead))
|
|
|
|
.addReg(ARM::SP);
|
|
|
|
MF->DeleteMachineInstr(MI); // The pseudo instruction is gone now.
|
|
|
|
return BB;
|
|
|
|
}
|
2007-01-19 07:51:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// ARM Optimization Hooks
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
Move 3 "(add (select cc, 0, c), x) -> (select cc, x, (add, x, c))"
related transformations out of target-specific dag combine into the
ARM backend. These were added by Evan in r37685 with no testcases
and only seems to help ARM (e.g. test/CodeGen/ARM/select_xform.ll).
Add some simple X86-specific (for now) DAG combines that turn things
like cond ? 8 : 0 -> (zext(cond) << 3). This happens frequently
with the recently added cp constant select optimization, but is a
very general xform. For example, we now compile the second example
in const-select.ll to:
_test:
movsd LCPI2_0, %xmm0
ucomisd 8(%esp), %xmm0
seta %al
movzbl %al, %eax
movl 4(%esp), %ecx
movsbl (%ecx,%eax,4), %eax
ret
instead of:
_test:
movl 4(%esp), %eax
leal 4(%eax), %ecx
movsd LCPI2_0, %xmm0
ucomisd 8(%esp), %xmm0
cmovbe %eax, %ecx
movsbl (%ecx), %eax
ret
This passes multisource and dejagnu.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@66779 91177308-0d34-0410-b5e6-96231b3b80d8
2009-03-12 06:52:53 +00:00
|
|
|
static
|
|
|
|
SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
|
|
|
|
TargetLowering::DAGCombinerInfo &DCI) {
|
|
|
|
SelectionDAG &DAG = DCI.DAG;
|
|
|
|
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
|
2009-08-10 22:56:29 +00:00
|
|
|
EVT VT = N->getValueType(0);
|
Move 3 "(add (select cc, 0, c), x) -> (select cc, x, (add, x, c))"
related transformations out of target-specific dag combine into the
ARM backend. These were added by Evan in r37685 with no testcases
and only seems to help ARM (e.g. test/CodeGen/ARM/select_xform.ll).
Add some simple X86-specific (for now) DAG combines that turn things
like cond ? 8 : 0 -> (zext(cond) << 3). This happens frequently
with the recently added cp constant select optimization, but is a
very general xform. For example, we now compile the second example
in const-select.ll to:
_test:
movsd LCPI2_0, %xmm0
ucomisd 8(%esp), %xmm0
seta %al
movzbl %al, %eax
movl 4(%esp), %ecx
movsbl (%ecx,%eax,4), %eax
ret
instead of:
_test:
movl 4(%esp), %eax
leal 4(%eax), %ecx
movsd LCPI2_0, %xmm0
ucomisd 8(%esp), %xmm0
cmovbe %eax, %ecx
movsbl (%ecx), %eax
ret
This passes multisource and dejagnu.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@66779 91177308-0d34-0410-b5e6-96231b3b80d8
2009-03-12 06:52:53 +00:00
|
|
|
unsigned Opc = N->getOpcode();
|
|
|
|
bool isSlctCC = Slct.getOpcode() == ISD::SELECT_CC;
|
|
|
|
SDValue LHS = isSlctCC ? Slct.getOperand(2) : Slct.getOperand(1);
|
|
|
|
SDValue RHS = isSlctCC ? Slct.getOperand(3) : Slct.getOperand(2);
|
|
|
|
ISD::CondCode CC = ISD::SETCC_INVALID;
|
|
|
|
|
|
|
|
if (isSlctCC) {
|
|
|
|
CC = cast<CondCodeSDNode>(Slct.getOperand(4))->get();
|
|
|
|
} else {
|
|
|
|
SDValue CCOp = Slct.getOperand(0);
|
|
|
|
if (CCOp.getOpcode() == ISD::SETCC)
|
|
|
|
CC = cast<CondCodeSDNode>(CCOp.getOperand(2))->get();
|
|
|
|
}
|
|
|
|
|
|
|
|
bool DoXform = false;
|
|
|
|
bool InvCC = false;
|
|
|
|
assert ((Opc == ISD::ADD || (Opc == ISD::SUB && Slct == N->getOperand(1))) &&
|
|
|
|
"Bad input!");
|
|
|
|
|
|
|
|
if (LHS.getOpcode() == ISD::Constant &&
|
|
|
|
cast<ConstantSDNode>(LHS)->isNullValue()) {
|
|
|
|
DoXform = true;
|
|
|
|
} else if (CC != ISD::SETCC_INVALID &&
|
|
|
|
RHS.getOpcode() == ISD::Constant &&
|
|
|
|
cast<ConstantSDNode>(RHS)->isNullValue()) {
|
|
|
|
std::swap(LHS, RHS);
|
|
|
|
SDValue Op0 = Slct.getOperand(0);
|
2009-08-10 22:56:29 +00:00
|
|
|
EVT OpVT = isSlctCC ? Op0.getValueType() :
|
Move 3 "(add (select cc, 0, c), x) -> (select cc, x, (add, x, c))"
related transformations out of target-specific dag combine into the
ARM backend. These were added by Evan in r37685 with no testcases
and only seems to help ARM (e.g. test/CodeGen/ARM/select_xform.ll).
Add some simple X86-specific (for now) DAG combines that turn things
like cond ? 8 : 0 -> (zext(cond) << 3). This happens frequently
with the recently added cp constant select optimization, but is a
very general xform. For example, we now compile the second example
in const-select.ll to:
_test:
movsd LCPI2_0, %xmm0
ucomisd 8(%esp), %xmm0
seta %al
movzbl %al, %eax
movl 4(%esp), %ecx
movsbl (%ecx,%eax,4), %eax
ret
instead of:
_test:
movl 4(%esp), %eax
leal 4(%eax), %ecx
movsd LCPI2_0, %xmm0
ucomisd 8(%esp), %xmm0
cmovbe %eax, %ecx
movsbl (%ecx), %eax
ret
This passes multisource and dejagnu.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@66779 91177308-0d34-0410-b5e6-96231b3b80d8
2009-03-12 06:52:53 +00:00
|
|
|
Op0.getOperand(0).getValueType();
|
|
|
|
bool isInt = OpVT.isInteger();
|
|
|
|
CC = ISD::getSetCCInverse(CC, isInt);
|
|
|
|
|
|
|
|
if (!TLI.isCondCodeLegal(CC, OpVT))
|
|
|
|
return SDValue(); // Inverse operator isn't legal.
|
|
|
|
|
|
|
|
DoXform = true;
|
|
|
|
InvCC = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (DoXform) {
|
|
|
|
SDValue Result = DAG.getNode(Opc, RHS.getDebugLoc(), VT, OtherOp, RHS);
|
|
|
|
if (isSlctCC)
|
|
|
|
return DAG.getSelectCC(N->getDebugLoc(), OtherOp, Result,
|
|
|
|
Slct.getOperand(0), Slct.getOperand(1), CC);
|
|
|
|
SDValue CCOp = Slct.getOperand(0);
|
|
|
|
if (InvCC)
|
|
|
|
CCOp = DAG.getSetCC(Slct.getDebugLoc(), CCOp.getValueType(),
|
|
|
|
CCOp.getOperand(0), CCOp.getOperand(1), CC);
|
|
|
|
return DAG.getNode(ISD::SELECT, N->getDebugLoc(), VT,
|
|
|
|
CCOp, OtherOp, Result);
|
|
|
|
}
|
|
|
|
return SDValue();
|
|
|
|
}
|
|
|
|
|
|
|
|
/// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD.
|
|
|
|
static SDValue PerformADDCombine(SDNode *N,
|
|
|
|
TargetLowering::DAGCombinerInfo &DCI) {
|
|
|
|
// added by evan in r37685 with no testcase.
|
|
|
|
SDValue N0 = N->getOperand(0), N1 = N->getOperand(1);
|
2009-03-20 22:42:55 +00:00
|
|
|
|
Move 3 "(add (select cc, 0, c), x) -> (select cc, x, (add, x, c))"
related transformations out of target-specific dag combine into the
ARM backend. These were added by Evan in r37685 with no testcases
and only seems to help ARM (e.g. test/CodeGen/ARM/select_xform.ll).
Add some simple X86-specific (for now) DAG combines that turn things
like cond ? 8 : 0 -> (zext(cond) << 3). This happens frequently
with the recently added cp constant select optimization, but is a
very general xform. For example, we now compile the second example
in const-select.ll to:
_test:
movsd LCPI2_0, %xmm0
ucomisd 8(%esp), %xmm0
seta %al
movzbl %al, %eax
movl 4(%esp), %ecx
movsbl (%ecx,%eax,4), %eax
ret
instead of:
_test:
movl 4(%esp), %eax
leal 4(%eax), %ecx
movsd LCPI2_0, %xmm0
ucomisd 8(%esp), %xmm0
cmovbe %eax, %ecx
movsbl (%ecx), %eax
ret
This passes multisource and dejagnu.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@66779 91177308-0d34-0410-b5e6-96231b3b80d8
2009-03-12 06:52:53 +00:00
|
|
|
// fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c))
|
|
|
|
if (N0.getOpcode() == ISD::SELECT && N0.getNode()->hasOneUse()) {
|
|
|
|
SDValue Result = combineSelectAndUse(N, N0, N1, DCI);
|
|
|
|
if (Result.getNode()) return Result;
|
|
|
|
}
|
|
|
|
if (N1.getOpcode() == ISD::SELECT && N1.getNode()->hasOneUse()) {
|
|
|
|
SDValue Result = combineSelectAndUse(N, N1, N0, DCI);
|
|
|
|
if (Result.getNode()) return Result;
|
|
|
|
}
|
2009-03-20 22:42:55 +00:00
|
|
|
|
Move 3 "(add (select cc, 0, c), x) -> (select cc, x, (add, x, c))"
related transformations out of target-specific dag combine into the
ARM backend. These were added by Evan in r37685 with no testcases
and only seems to help ARM (e.g. test/CodeGen/ARM/select_xform.ll).
Add some simple X86-specific (for now) DAG combines that turn things
like cond ? 8 : 0 -> (zext(cond) << 3). This happens frequently
with the recently added cp constant select optimization, but is a
very general xform. For example, we now compile the second example
in const-select.ll to:
_test:
movsd LCPI2_0, %xmm0
ucomisd 8(%esp), %xmm0
seta %al
movzbl %al, %eax
movl 4(%esp), %ecx
movsbl (%ecx,%eax,4), %eax
ret
instead of:
_test:
movl 4(%esp), %eax
leal 4(%eax), %ecx
movsd LCPI2_0, %xmm0
ucomisd 8(%esp), %xmm0
cmovbe %eax, %ecx
movsbl (%ecx), %eax
ret
This passes multisource and dejagnu.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@66779 91177308-0d34-0410-b5e6-96231b3b80d8
2009-03-12 06:52:53 +00:00
|
|
|
return SDValue();
|
|
|
|
}
|
|
|
|
|
|
|
|
/// PerformSUBCombine - Target-specific dag combine xforms for ISD::SUB.
|
|
|
|
static SDValue PerformSUBCombine(SDNode *N,
|
|
|
|
TargetLowering::DAGCombinerInfo &DCI) {
|
|
|
|
// added by evan in r37685 with no testcase.
|
|
|
|
SDValue N0 = N->getOperand(0), N1 = N->getOperand(1);
|
2009-03-20 22:42:55 +00:00
|
|
|
|
Move 3 "(add (select cc, 0, c), x) -> (select cc, x, (add, x, c))"
related transformations out of target-specific dag combine into the
ARM backend. These were added by Evan in r37685 with no testcases
and only seems to help ARM (e.g. test/CodeGen/ARM/select_xform.ll).
Add some simple X86-specific (for now) DAG combines that turn things
like cond ? 8 : 0 -> (zext(cond) << 3). This happens frequently
with the recently added cp constant select optimization, but is a
very general xform. For example, we now compile the second example
in const-select.ll to:
_test:
movsd LCPI2_0, %xmm0
ucomisd 8(%esp), %xmm0
seta %al
movzbl %al, %eax
movl 4(%esp), %ecx
movsbl (%ecx,%eax,4), %eax
ret
instead of:
_test:
movl 4(%esp), %eax
leal 4(%eax), %ecx
movsd LCPI2_0, %xmm0
ucomisd 8(%esp), %xmm0
cmovbe %eax, %ecx
movsbl (%ecx), %eax
ret
This passes multisource and dejagnu.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@66779 91177308-0d34-0410-b5e6-96231b3b80d8
2009-03-12 06:52:53 +00:00
|
|
|
// fold (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c))
|
|
|
|
if (N1.getOpcode() == ISD::SELECT && N1.getNode()->hasOneUse()) {
|
|
|
|
SDValue Result = combineSelectAndUse(N, N1, N0, DCI);
|
|
|
|
if (Result.getNode()) return Result;
|
|
|
|
}
|
2009-03-20 22:42:55 +00:00
|
|
|
|
Move 3 "(add (select cc, 0, c), x) -> (select cc, x, (add, x, c))"
related transformations out of target-specific dag combine into the
ARM backend. These were added by Evan in r37685 with no testcases
and only seems to help ARM (e.g. test/CodeGen/ARM/select_xform.ll).
Add some simple X86-specific (for now) DAG combines that turn things
like cond ? 8 : 0 -> (zext(cond) << 3). This happens frequently
with the recently added cp constant select optimization, but is a
very general xform. For example, we now compile the second example
in const-select.ll to:
_test:
movsd LCPI2_0, %xmm0
ucomisd 8(%esp), %xmm0
seta %al
movzbl %al, %eax
movl 4(%esp), %ecx
movsbl (%ecx,%eax,4), %eax
ret
instead of:
_test:
movl 4(%esp), %eax
leal 4(%eax), %ecx
movsd LCPI2_0, %xmm0
ucomisd 8(%esp), %xmm0
cmovbe %eax, %ecx
movsbl (%ecx), %eax
ret
This passes multisource and dejagnu.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@66779 91177308-0d34-0410-b5e6-96231b3b80d8
2009-03-12 06:52:53 +00:00
|
|
|
return SDValue();
|
|
|
|
}
|
|
|
|
|
2009-11-09 00:11:35 +00:00
|
|
|
/// PerformVMOVRRDCombine - Target-specific dag combine xforms for ARMISD::VMOVRRD.
|
|
|
|
static SDValue PerformVMOVRRDCombine(SDNode *N,
|
2009-03-20 22:42:55 +00:00
|
|
|
TargetLowering::DAGCombinerInfo &DCI) {
|
2007-11-27 22:36:16 +00:00
|
|
|
// fmrrd(fmdrr x, y) -> x,y
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue InDouble = N->getOperand(0);
|
2009-11-09 00:11:35 +00:00
|
|
|
if (InDouble.getOpcode() == ARMISD::VMOVDRR)
|
2007-11-27 22:36:16 +00:00
|
|
|
return DCI.CombineTo(N, InDouble.getOperand(0), InDouble.getOperand(1));
|
2008-07-27 21:46:04 +00:00
|
|
|
return SDValue();
|
2007-11-27 22:36:16 +00:00
|
|
|
}
|
|
|
|
|
2009-06-22 23:27:02 +00:00
|
|
|
/// getVShiftImm - Check if this is a valid build_vector for the immediate
|
|
|
|
/// operand of a vector shift operation, where all the elements of the
|
|
|
|
/// build_vector must have the same constant integer value.
|
|
|
|
static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) {
|
|
|
|
// Ignore bit_converts.
|
|
|
|
while (Op.getOpcode() == ISD::BIT_CONVERT)
|
|
|
|
Op = Op.getOperand(0);
|
|
|
|
BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
|
|
|
|
APInt SplatBits, SplatUndef;
|
|
|
|
unsigned SplatBitSize;
|
|
|
|
bool HasAnyUndefs;
|
|
|
|
if (! BVN || ! BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize,
|
|
|
|
HasAnyUndefs, ElementBits) ||
|
|
|
|
SplatBitSize > ElementBits)
|
|
|
|
return false;
|
|
|
|
Cnt = SplatBits.getSExtValue();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// isVShiftLImm - Check if this is a valid build_vector for the immediate
|
|
|
|
/// operand of a vector shift left operation. That value must be in the range:
|
|
|
|
/// 0 <= Value < ElementBits for a left shift; or
|
|
|
|
/// 0 <= Value <= ElementBits for a long left shift.
|
2009-08-10 22:56:29 +00:00
|
|
|
static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) {
|
2009-06-22 23:27:02 +00:00
|
|
|
assert(VT.isVector() && "vector shift count is not a vector type");
|
|
|
|
unsigned ElementBits = VT.getVectorElementType().getSizeInBits();
|
|
|
|
if (! getVShiftImm(Op, ElementBits, Cnt))
|
|
|
|
return false;
|
|
|
|
return (Cnt >= 0 && (isLong ? Cnt-1 : Cnt) < ElementBits);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// isVShiftRImm - Check if this is a valid build_vector for the immediate
|
|
|
|
/// operand of a vector shift right operation. For a shift opcode, the value
|
|
|
|
/// is positive, but for an intrinsic the value count must be negative. The
|
|
|
|
/// absolute value must be in the range:
|
|
|
|
/// 1 <= |Value| <= ElementBits for a right shift; or
|
|
|
|
/// 1 <= |Value| <= ElementBits/2 for a narrow right shift.
|
2009-08-10 22:56:29 +00:00
|
|
|
static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic,
|
2009-06-22 23:27:02 +00:00
|
|
|
int64_t &Cnt) {
|
|
|
|
assert(VT.isVector() && "vector shift count is not a vector type");
|
|
|
|
unsigned ElementBits = VT.getVectorElementType().getSizeInBits();
|
|
|
|
if (! getVShiftImm(Op, ElementBits, Cnt))
|
|
|
|
return false;
|
|
|
|
if (isIntrinsic)
|
|
|
|
Cnt = -Cnt;
|
|
|
|
return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits/2 : ElementBits));
|
|
|
|
}
|
|
|
|
|
|
|
|
/// PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics.
|
|
|
|
static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) {
|
|
|
|
unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
|
|
|
|
switch (IntNo) {
|
|
|
|
default:
|
|
|
|
// Don't do anything for most intrinsics.
|
|
|
|
break;
|
|
|
|
|
|
|
|
// Vector shifts: check for immediate versions and lower them.
|
|
|
|
// Note: This is done during DAG combining instead of DAG legalizing because
|
|
|
|
// the build_vectors for 64-bit vector element shift counts are generally
|
|
|
|
// not legal, and it is hard to see their values after they get legalized to
|
|
|
|
// loads from a constant pool.
|
|
|
|
case Intrinsic::arm_neon_vshifts:
|
|
|
|
case Intrinsic::arm_neon_vshiftu:
|
|
|
|
case Intrinsic::arm_neon_vshiftls:
|
|
|
|
case Intrinsic::arm_neon_vshiftlu:
|
|
|
|
case Intrinsic::arm_neon_vshiftn:
|
|
|
|
case Intrinsic::arm_neon_vrshifts:
|
|
|
|
case Intrinsic::arm_neon_vrshiftu:
|
|
|
|
case Intrinsic::arm_neon_vrshiftn:
|
|
|
|
case Intrinsic::arm_neon_vqshifts:
|
|
|
|
case Intrinsic::arm_neon_vqshiftu:
|
|
|
|
case Intrinsic::arm_neon_vqshiftsu:
|
|
|
|
case Intrinsic::arm_neon_vqshiftns:
|
|
|
|
case Intrinsic::arm_neon_vqshiftnu:
|
|
|
|
case Intrinsic::arm_neon_vqshiftnsu:
|
|
|
|
case Intrinsic::arm_neon_vqrshiftns:
|
|
|
|
case Intrinsic::arm_neon_vqrshiftnu:
|
|
|
|
case Intrinsic::arm_neon_vqrshiftnsu: {
|
2009-08-10 22:56:29 +00:00
|
|
|
EVT VT = N->getOperand(1).getValueType();
|
2009-06-22 23:27:02 +00:00
|
|
|
int64_t Cnt;
|
|
|
|
unsigned VShiftOpc = 0;
|
|
|
|
|
|
|
|
switch (IntNo) {
|
|
|
|
case Intrinsic::arm_neon_vshifts:
|
|
|
|
case Intrinsic::arm_neon_vshiftu:
|
|
|
|
if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) {
|
|
|
|
VShiftOpc = ARMISD::VSHL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) {
|
|
|
|
VShiftOpc = (IntNo == Intrinsic::arm_neon_vshifts ?
|
|
|
|
ARMISD::VSHRs : ARMISD::VSHRu);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return SDValue();
|
|
|
|
|
|
|
|
case Intrinsic::arm_neon_vshiftls:
|
|
|
|
case Intrinsic::arm_neon_vshiftlu:
|
|
|
|
if (isVShiftLImm(N->getOperand(2), VT, true, Cnt))
|
|
|
|
break;
|
2009-07-14 16:55:14 +00:00
|
|
|
llvm_unreachable("invalid shift count for vshll intrinsic");
|
2009-06-22 23:27:02 +00:00
|
|
|
|
|
|
|
case Intrinsic::arm_neon_vrshifts:
|
|
|
|
case Intrinsic::arm_neon_vrshiftu:
|
|
|
|
if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt))
|
|
|
|
break;
|
|
|
|
return SDValue();
|
|
|
|
|
|
|
|
case Intrinsic::arm_neon_vqshifts:
|
|
|
|
case Intrinsic::arm_neon_vqshiftu:
|
|
|
|
if (isVShiftLImm(N->getOperand(2), VT, false, Cnt))
|
|
|
|
break;
|
|
|
|
return SDValue();
|
|
|
|
|
|
|
|
case Intrinsic::arm_neon_vqshiftsu:
|
|
|
|
if (isVShiftLImm(N->getOperand(2), VT, false, Cnt))
|
|
|
|
break;
|
2009-07-14 16:55:14 +00:00
|
|
|
llvm_unreachable("invalid shift count for vqshlu intrinsic");
|
2009-06-22 23:27:02 +00:00
|
|
|
|
|
|
|
case Intrinsic::arm_neon_vshiftn:
|
|
|
|
case Intrinsic::arm_neon_vrshiftn:
|
|
|
|
case Intrinsic::arm_neon_vqshiftns:
|
|
|
|
case Intrinsic::arm_neon_vqshiftnu:
|
|
|
|
case Intrinsic::arm_neon_vqshiftnsu:
|
|
|
|
case Intrinsic::arm_neon_vqrshiftns:
|
|
|
|
case Intrinsic::arm_neon_vqrshiftnu:
|
|
|
|
case Intrinsic::arm_neon_vqrshiftnsu:
|
|
|
|
// Narrowing shifts require an immediate right shift.
|
|
|
|
if (isVShiftRImm(N->getOperand(2), VT, true, true, Cnt))
|
|
|
|
break;
|
2009-07-14 16:55:14 +00:00
|
|
|
llvm_unreachable("invalid shift count for narrowing vector shift intrinsic");
|
2009-06-22 23:27:02 +00:00
|
|
|
|
|
|
|
default:
|
2009-07-14 16:55:14 +00:00
|
|
|
llvm_unreachable("unhandled vector shift");
|
2009-06-22 23:27:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
switch (IntNo) {
|
|
|
|
case Intrinsic::arm_neon_vshifts:
|
|
|
|
case Intrinsic::arm_neon_vshiftu:
|
|
|
|
// Opcode already set above.
|
|
|
|
break;
|
|
|
|
case Intrinsic::arm_neon_vshiftls:
|
|
|
|
case Intrinsic::arm_neon_vshiftlu:
|
|
|
|
if (Cnt == VT.getVectorElementType().getSizeInBits())
|
|
|
|
VShiftOpc = ARMISD::VSHLLi;
|
|
|
|
else
|
|
|
|
VShiftOpc = (IntNo == Intrinsic::arm_neon_vshiftls ?
|
|
|
|
ARMISD::VSHLLs : ARMISD::VSHLLu);
|
|
|
|
break;
|
|
|
|
case Intrinsic::arm_neon_vshiftn:
|
|
|
|
VShiftOpc = ARMISD::VSHRN; break;
|
|
|
|
case Intrinsic::arm_neon_vrshifts:
|
|
|
|
VShiftOpc = ARMISD::VRSHRs; break;
|
|
|
|
case Intrinsic::arm_neon_vrshiftu:
|
|
|
|
VShiftOpc = ARMISD::VRSHRu; break;
|
|
|
|
case Intrinsic::arm_neon_vrshiftn:
|
|
|
|
VShiftOpc = ARMISD::VRSHRN; break;
|
|
|
|
case Intrinsic::arm_neon_vqshifts:
|
|
|
|
VShiftOpc = ARMISD::VQSHLs; break;
|
|
|
|
case Intrinsic::arm_neon_vqshiftu:
|
|
|
|
VShiftOpc = ARMISD::VQSHLu; break;
|
|
|
|
case Intrinsic::arm_neon_vqshiftsu:
|
|
|
|
VShiftOpc = ARMISD::VQSHLsu; break;
|
|
|
|
case Intrinsic::arm_neon_vqshiftns:
|
|
|
|
VShiftOpc = ARMISD::VQSHRNs; break;
|
|
|
|
case Intrinsic::arm_neon_vqshiftnu:
|
|
|
|
VShiftOpc = ARMISD::VQSHRNu; break;
|
|
|
|
case Intrinsic::arm_neon_vqshiftnsu:
|
|
|
|
VShiftOpc = ARMISD::VQSHRNsu; break;
|
|
|
|
case Intrinsic::arm_neon_vqrshiftns:
|
|
|
|
VShiftOpc = ARMISD::VQRSHRNs; break;
|
|
|
|
case Intrinsic::arm_neon_vqrshiftnu:
|
|
|
|
VShiftOpc = ARMISD::VQRSHRNu; break;
|
|
|
|
case Intrinsic::arm_neon_vqrshiftnsu:
|
|
|
|
VShiftOpc = ARMISD::VQRSHRNsu; break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return DAG.getNode(VShiftOpc, N->getDebugLoc(), N->getValueType(0),
|
2009-08-11 20:47:22 +00:00
|
|
|
N->getOperand(1), DAG.getConstant(Cnt, MVT::i32));
|
2009-06-22 23:27:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
case Intrinsic::arm_neon_vshiftins: {
|
2009-08-10 22:56:29 +00:00
|
|
|
EVT VT = N->getOperand(1).getValueType();
|
2009-06-22 23:27:02 +00:00
|
|
|
int64_t Cnt;
|
|
|
|
unsigned VShiftOpc = 0;
|
|
|
|
|
|
|
|
if (isVShiftLImm(N->getOperand(3), VT, false, Cnt))
|
|
|
|
VShiftOpc = ARMISD::VSLI;
|
|
|
|
else if (isVShiftRImm(N->getOperand(3), VT, false, true, Cnt))
|
|
|
|
VShiftOpc = ARMISD::VSRI;
|
|
|
|
else {
|
2009-07-14 16:55:14 +00:00
|
|
|
llvm_unreachable("invalid shift count for vsli/vsri intrinsic");
|
2009-06-22 23:27:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return DAG.getNode(VShiftOpc, N->getDebugLoc(), N->getValueType(0),
|
|
|
|
N->getOperand(1), N->getOperand(2),
|
2009-08-11 20:47:22 +00:00
|
|
|
DAG.getConstant(Cnt, MVT::i32));
|
2009-06-22 23:27:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
case Intrinsic::arm_neon_vqrshifts:
|
|
|
|
case Intrinsic::arm_neon_vqrshiftu:
|
|
|
|
// No immediate versions of these to check for.
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return SDValue();
|
|
|
|
}
|
|
|
|
|
|
|
|
/// PerformShiftCombine - Checks for immediate versions of vector shifts and
|
|
|
|
/// lowers them. As with the vector shift intrinsics, this is done during DAG
|
|
|
|
/// combining instead of DAG legalizing because the build_vectors for 64-bit
|
|
|
|
/// vector element shift counts are generally not legal, and it is hard to see
|
|
|
|
/// their values after they get legalized to loads from a constant pool.
|
|
|
|
static SDValue PerformShiftCombine(SDNode *N, SelectionDAG &DAG,
|
|
|
|
const ARMSubtarget *ST) {
|
2009-08-10 22:56:29 +00:00
|
|
|
EVT VT = N->getValueType(0);
|
2009-06-22 23:27:02 +00:00
|
|
|
|
|
|
|
// Nothing to be done for scalar shifts.
|
|
|
|
if (! VT.isVector())
|
|
|
|
return SDValue();
|
|
|
|
|
|
|
|
assert(ST->hasNEON() && "unexpected vector shift");
|
|
|
|
int64_t Cnt;
|
|
|
|
|
|
|
|
switch (N->getOpcode()) {
|
2009-07-14 16:55:14 +00:00
|
|
|
default: llvm_unreachable("unexpected shift opcode");
|
2009-06-22 23:27:02 +00:00
|
|
|
|
|
|
|
case ISD::SHL:
|
|
|
|
if (isVShiftLImm(N->getOperand(1), VT, false, Cnt))
|
|
|
|
return DAG.getNode(ARMISD::VSHL, N->getDebugLoc(), VT, N->getOperand(0),
|
2009-08-11 20:47:22 +00:00
|
|
|
DAG.getConstant(Cnt, MVT::i32));
|
2009-06-22 23:27:02 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case ISD::SRA:
|
|
|
|
case ISD::SRL:
|
|
|
|
if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) {
|
|
|
|
unsigned VShiftOpc = (N->getOpcode() == ISD::SRA ?
|
|
|
|
ARMISD::VSHRs : ARMISD::VSHRu);
|
|
|
|
return DAG.getNode(VShiftOpc, N->getDebugLoc(), VT, N->getOperand(0),
|
2009-08-11 20:47:22 +00:00
|
|
|
DAG.getConstant(Cnt, MVT::i32));
|
2009-06-22 23:27:02 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return SDValue();
|
|
|
|
}
|
|
|
|
|
|
|
|
/// PerformExtendCombine - Target-specific DAG combining for ISD::SIGN_EXTEND,
|
|
|
|
/// ISD::ZERO_EXTEND, and ISD::ANY_EXTEND.
|
|
|
|
static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG,
|
|
|
|
const ARMSubtarget *ST) {
|
|
|
|
SDValue N0 = N->getOperand(0);
|
|
|
|
|
|
|
|
// Check for sign- and zero-extensions of vector extract operations of 8-
|
|
|
|
// and 16-bit vector elements. NEON supports these directly. They are
|
|
|
|
// handled during DAG combining because type legalization will promote them
|
|
|
|
// to 32-bit types and it is messy to recognize the operations after that.
|
|
|
|
if (ST->hasNEON() && N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
|
|
|
|
SDValue Vec = N0.getOperand(0);
|
|
|
|
SDValue Lane = N0.getOperand(1);
|
2009-08-10 22:56:29 +00:00
|
|
|
EVT VT = N->getValueType(0);
|
|
|
|
EVT EltVT = N0.getValueType();
|
2009-06-22 23:27:02 +00:00
|
|
|
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
|
|
|
|
|
2009-08-11 20:47:22 +00:00
|
|
|
if (VT == MVT::i32 &&
|
|
|
|
(EltVT == MVT::i8 || EltVT == MVT::i16) &&
|
2009-06-22 23:27:02 +00:00
|
|
|
TLI.isTypeLegal(Vec.getValueType())) {
|
|
|
|
|
|
|
|
unsigned Opc = 0;
|
|
|
|
switch (N->getOpcode()) {
|
2009-07-14 16:55:14 +00:00
|
|
|
default: llvm_unreachable("unexpected opcode");
|
2009-06-22 23:27:02 +00:00
|
|
|
case ISD::SIGN_EXTEND:
|
|
|
|
Opc = ARMISD::VGETLANEs;
|
|
|
|
break;
|
|
|
|
case ISD::ZERO_EXTEND:
|
|
|
|
case ISD::ANY_EXTEND:
|
|
|
|
Opc = ARMISD::VGETLANEu;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return DAG.getNode(Opc, N->getDebugLoc(), VT, Vec, Lane);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return SDValue();
|
|
|
|
}
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N,
|
2009-03-20 22:42:55 +00:00
|
|
|
DAGCombinerInfo &DCI) const {
|
2007-11-27 22:36:16 +00:00
|
|
|
switch (N->getOpcode()) {
|
|
|
|
default: break;
|
Move 3 "(add (select cc, 0, c), x) -> (select cc, x, (add, x, c))"
related transformations out of target-specific dag combine into the
ARM backend. These were added by Evan in r37685 with no testcases
and only seems to help ARM (e.g. test/CodeGen/ARM/select_xform.ll).
Add some simple X86-specific (for now) DAG combines that turn things
like cond ? 8 : 0 -> (zext(cond) << 3). This happens frequently
with the recently added cp constant select optimization, but is a
very general xform. For example, we now compile the second example
in const-select.ll to:
_test:
movsd LCPI2_0, %xmm0
ucomisd 8(%esp), %xmm0
seta %al
movzbl %al, %eax
movl 4(%esp), %ecx
movsbl (%ecx,%eax,4), %eax
ret
instead of:
_test:
movl 4(%esp), %eax
leal 4(%eax), %ecx
movsd LCPI2_0, %xmm0
ucomisd 8(%esp), %xmm0
cmovbe %eax, %ecx
movsbl (%ecx), %eax
ret
This passes multisource and dejagnu.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@66779 91177308-0d34-0410-b5e6-96231b3b80d8
2009-03-12 06:52:53 +00:00
|
|
|
case ISD::ADD: return PerformADDCombine(N, DCI);
|
|
|
|
case ISD::SUB: return PerformSUBCombine(N, DCI);
|
2009-11-09 00:11:35 +00:00
|
|
|
case ARMISD::VMOVRRD: return PerformVMOVRRDCombine(N, DCI);
|
2009-06-22 23:27:02 +00:00
|
|
|
case ISD::INTRINSIC_WO_CHAIN:
|
|
|
|
return PerformIntrinsicCombine(N, DCI.DAG);
|
|
|
|
case ISD::SHL:
|
|
|
|
case ISD::SRA:
|
|
|
|
case ISD::SRL:
|
|
|
|
return PerformShiftCombine(N, DCI.DAG, Subtarget);
|
|
|
|
case ISD::SIGN_EXTEND:
|
|
|
|
case ISD::ZERO_EXTEND:
|
|
|
|
case ISD::ANY_EXTEND:
|
|
|
|
return PerformExtendCombine(N, DCI.DAG, Subtarget);
|
2007-11-27 22:36:16 +00:00
|
|
|
}
|
2008-07-27 21:46:04 +00:00
|
|
|
return SDValue();
|
2007-11-27 22:36:16 +00:00
|
|
|
}
|
|
|
|
|
2009-08-15 21:21:19 +00:00
|
|
|
bool ARMTargetLowering::allowsUnalignedMemoryAccesses(EVT VT) const {
|
|
|
|
if (!Subtarget->hasV6Ops())
|
|
|
|
// Pre-v6 does not support unaligned mem access.
|
|
|
|
return false;
|
|
|
|
else if (!Subtarget->hasV6Ops()) {
|
|
|
|
// v6 may or may not support unaligned mem access.
|
|
|
|
if (!Subtarget->isTargetDarwin())
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (VT.getSimpleVT().SimpleTy) {
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
case MVT::i8:
|
|
|
|
case MVT::i16:
|
|
|
|
case MVT::i32:
|
|
|
|
return true;
|
|
|
|
// FIXME: VLD1 etc with standard alignment is legal.
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-08-14 20:09:37 +00:00
|
|
|
static bool isLegalT1AddressImmediate(int64_t V, EVT VT) {
|
|
|
|
if (V < 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
unsigned Scale = 1;
|
|
|
|
switch (VT.getSimpleVT().SimpleTy) {
|
|
|
|
default: return false;
|
|
|
|
case MVT::i1:
|
|
|
|
case MVT::i8:
|
|
|
|
// Scale == 1;
|
|
|
|
break;
|
|
|
|
case MVT::i16:
|
|
|
|
// Scale == 2;
|
|
|
|
Scale = 2;
|
|
|
|
break;
|
|
|
|
case MVT::i32:
|
|
|
|
// Scale == 4;
|
|
|
|
Scale = 4;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((V & (Scale - 1)) != 0)
|
|
|
|
return false;
|
|
|
|
V /= Scale;
|
|
|
|
return V == (V & ((1LL << 5) - 1));
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool isLegalT2AddressImmediate(int64_t V, EVT VT,
|
|
|
|
const ARMSubtarget *Subtarget) {
|
|
|
|
bool isNeg = false;
|
|
|
|
if (V < 0) {
|
|
|
|
isNeg = true;
|
|
|
|
V = - V;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (VT.getSimpleVT().SimpleTy) {
|
|
|
|
default: return false;
|
|
|
|
case MVT::i1:
|
|
|
|
case MVT::i8:
|
|
|
|
case MVT::i16:
|
|
|
|
case MVT::i32:
|
|
|
|
// + imm12 or - imm8
|
|
|
|
if (isNeg)
|
|
|
|
return V == (V & ((1LL << 8) - 1));
|
|
|
|
return V == (V & ((1LL << 12) - 1));
|
|
|
|
case MVT::f32:
|
|
|
|
case MVT::f64:
|
|
|
|
// Same as ARM mode. FIXME: NEON?
|
|
|
|
if (!Subtarget->hasVFP2())
|
|
|
|
return false;
|
|
|
|
if ((V & 3) != 0)
|
|
|
|
return false;
|
|
|
|
V >>= 2;
|
|
|
|
return V == (V & ((1LL << 8) - 1));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-03-12 23:30:29 +00:00
|
|
|
/// isLegalAddressImmediate - Return true if the integer value can be used
|
|
|
|
/// as the offset of the target addressing mode for load / store of the
|
|
|
|
/// given type.
|
2009-08-10 22:56:29 +00:00
|
|
|
static bool isLegalAddressImmediate(int64_t V, EVT VT,
|
2007-04-09 23:33:39 +00:00
|
|
|
const ARMSubtarget *Subtarget) {
|
2007-03-13 20:37:59 +00:00
|
|
|
if (V == 0)
|
|
|
|
return true;
|
|
|
|
|
2009-03-09 19:15:00 +00:00
|
|
|
if (!VT.isSimple())
|
|
|
|
return false;
|
|
|
|
|
2009-08-14 20:09:37 +00:00
|
|
|
if (Subtarget->isThumb1Only())
|
|
|
|
return isLegalT1AddressImmediate(V, VT);
|
|
|
|
else if (Subtarget->isThumb2())
|
|
|
|
return isLegalT2AddressImmediate(V, VT, Subtarget);
|
2007-03-12 23:30:29 +00:00
|
|
|
|
2009-08-14 20:09:37 +00:00
|
|
|
// ARM mode.
|
2007-03-12 23:30:29 +00:00
|
|
|
if (V < 0)
|
|
|
|
V = - V;
|
2009-08-11 20:47:22 +00:00
|
|
|
switch (VT.getSimpleVT().SimpleTy) {
|
2007-03-12 23:30:29 +00:00
|
|
|
default: return false;
|
2009-08-11 20:47:22 +00:00
|
|
|
case MVT::i1:
|
|
|
|
case MVT::i8:
|
|
|
|
case MVT::i32:
|
2007-03-12 23:30:29 +00:00
|
|
|
// +- imm12
|
2008-02-20 11:22:39 +00:00
|
|
|
return V == (V & ((1LL << 12) - 1));
|
2009-08-11 20:47:22 +00:00
|
|
|
case MVT::i16:
|
2007-03-12 23:30:29 +00:00
|
|
|
// +- imm8
|
2008-02-20 11:22:39 +00:00
|
|
|
return V == (V & ((1LL << 8) - 1));
|
2009-08-11 20:47:22 +00:00
|
|
|
case MVT::f32:
|
|
|
|
case MVT::f64:
|
2009-08-14 20:09:37 +00:00
|
|
|
if (!Subtarget->hasVFP2()) // FIXME: NEON?
|
2007-03-12 23:30:29 +00:00
|
|
|
return false;
|
2007-05-03 02:00:18 +00:00
|
|
|
if ((V & 3) != 0)
|
2007-03-12 23:30:29 +00:00
|
|
|
return false;
|
|
|
|
V >>= 2;
|
2008-02-20 11:22:39 +00:00
|
|
|
return V == (V & ((1LL << 8) - 1));
|
2007-03-12 23:30:29 +00:00
|
|
|
}
|
2007-01-19 07:51:42 +00:00
|
|
|
}
|
|
|
|
|
2009-08-14 20:09:37 +00:00
|
|
|
bool ARMTargetLowering::isLegalT2ScaledAddressingMode(const AddrMode &AM,
|
|
|
|
EVT VT) const {
|
|
|
|
int Scale = AM.Scale;
|
|
|
|
if (Scale < 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
switch (VT.getSimpleVT().SimpleTy) {
|
|
|
|
default: return false;
|
|
|
|
case MVT::i1:
|
|
|
|
case MVT::i8:
|
|
|
|
case MVT::i16:
|
|
|
|
case MVT::i32:
|
|
|
|
if (Scale == 1)
|
|
|
|
return true;
|
|
|
|
// r + r << imm
|
|
|
|
Scale = Scale & ~1;
|
|
|
|
return Scale == 2 || Scale == 4 || Scale == 8;
|
|
|
|
case MVT::i64:
|
|
|
|
// r + r
|
|
|
|
if (((unsigned)AM.HasBaseReg + Scale) <= 2)
|
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
case MVT::isVoid:
|
|
|
|
// Note, we allow "void" uses (basically, uses that aren't loads or
|
|
|
|
// stores), because arm allows folding a scale into many arithmetic
|
|
|
|
// operations. This should be made more precise and revisited later.
|
|
|
|
|
|
|
|
// Allow r << imm, but the imm has to be a multiple of two.
|
|
|
|
if (Scale & 1) return false;
|
|
|
|
return isPowerOf2_32(Scale);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-04-09 23:33:39 +00:00
|
|
|
/// isLegalAddressingMode - Return true if the addressing mode represented
|
|
|
|
/// by AM is legal for this target, for a load/store of the specified type.
|
2009-03-20 22:42:55 +00:00
|
|
|
bool ARMTargetLowering::isLegalAddressingMode(const AddrMode &AM,
|
2007-04-09 23:33:39 +00:00
|
|
|
const Type *Ty) const {
|
2009-08-10 22:56:29 +00:00
|
|
|
EVT VT = getValueType(Ty, true);
|
2009-04-08 17:55:28 +00:00
|
|
|
if (!isLegalAddressImmediate(AM.BaseOffs, VT, Subtarget))
|
2007-03-12 23:30:29 +00:00
|
|
|
return false;
|
2009-03-20 22:42:55 +00:00
|
|
|
|
2007-04-09 23:33:39 +00:00
|
|
|
// Can never fold addr of global into load/store.
|
2009-03-20 22:42:55 +00:00
|
|
|
if (AM.BaseGV)
|
2007-04-09 23:33:39 +00:00
|
|
|
return false;
|
2009-03-20 22:42:55 +00:00
|
|
|
|
2007-04-09 23:33:39 +00:00
|
|
|
switch (AM.Scale) {
|
|
|
|
case 0: // no scale reg, must be "r+i" or "r", or "i".
|
|
|
|
break;
|
|
|
|
case 1:
|
2009-08-14 20:09:37 +00:00
|
|
|
if (Subtarget->isThumb1Only())
|
2007-04-09 23:33:39 +00:00
|
|
|
return false;
|
2007-04-13 06:50:55 +00:00
|
|
|
// FALL THROUGH.
|
2007-04-09 23:33:39 +00:00
|
|
|
default:
|
2007-04-13 06:50:55 +00:00
|
|
|
// ARM doesn't support any R+R*scale+imm addr modes.
|
|
|
|
if (AM.BaseOffs)
|
|
|
|
return false;
|
2009-03-20 22:42:55 +00:00
|
|
|
|
2009-04-08 17:55:28 +00:00
|
|
|
if (!VT.isSimple())
|
|
|
|
return false;
|
|
|
|
|
2009-08-14 20:09:37 +00:00
|
|
|
if (Subtarget->isThumb2())
|
|
|
|
return isLegalT2ScaledAddressingMode(AM, VT);
|
|
|
|
|
2007-04-10 03:48:29 +00:00
|
|
|
int Scale = AM.Scale;
|
2009-08-11 20:47:22 +00:00
|
|
|
switch (VT.getSimpleVT().SimpleTy) {
|
2007-04-09 23:33:39 +00:00
|
|
|
default: return false;
|
2009-08-11 20:47:22 +00:00
|
|
|
case MVT::i1:
|
|
|
|
case MVT::i8:
|
|
|
|
case MVT::i32:
|
2007-04-10 03:48:29 +00:00
|
|
|
if (Scale < 0) Scale = -Scale;
|
|
|
|
if (Scale == 1)
|
2007-04-09 23:33:39 +00:00
|
|
|
return true;
|
|
|
|
// r + r << imm
|
2007-04-11 16:17:12 +00:00
|
|
|
return isPowerOf2_32(Scale & ~1);
|
2009-08-11 20:47:22 +00:00
|
|
|
case MVT::i16:
|
2009-08-14 20:09:37 +00:00
|
|
|
case MVT::i64:
|
2007-04-09 23:33:39 +00:00
|
|
|
// r + r
|
2007-04-10 03:48:29 +00:00
|
|
|
if (((unsigned)AM.HasBaseReg + Scale) <= 2)
|
2007-04-09 23:33:39 +00:00
|
|
|
return true;
|
2007-04-11 16:17:12 +00:00
|
|
|
return false;
|
2009-03-20 22:42:55 +00:00
|
|
|
|
2009-08-11 20:47:22 +00:00
|
|
|
case MVT::isVoid:
|
2007-04-09 23:33:39 +00:00
|
|
|
// Note, we allow "void" uses (basically, uses that aren't loads or
|
|
|
|
// stores), because arm allows folding a scale into many arithmetic
|
|
|
|
// operations. This should be made more precise and revisited later.
|
2009-03-20 22:42:55 +00:00
|
|
|
|
2007-04-09 23:33:39 +00:00
|
|
|
// Allow r << imm, but the imm has to be a multiple of two.
|
2009-08-14 20:09:37 +00:00
|
|
|
if (Scale & 1) return false;
|
|
|
|
return isPowerOf2_32(Scale);
|
2007-04-09 23:33:39 +00:00
|
|
|
}
|
|
|
|
break;
|
2007-03-12 23:30:29 +00:00
|
|
|
}
|
2007-04-09 23:33:39 +00:00
|
|
|
return true;
|
2007-03-12 23:30:29 +00:00
|
|
|
}
|
|
|
|
|
2009-11-11 19:05:52 +00:00
|
|
|
/// isLegalICmpImmediate - Return true if the specified immediate is legal
|
|
|
|
/// icmp immediate, that is the target has icmp instructions which can compare
|
|
|
|
/// a register against the immediate without having to materialize the
|
|
|
|
/// immediate into a register.
|
2009-11-12 07:13:11 +00:00
|
|
|
bool ARMTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
|
2009-11-11 19:05:52 +00:00
|
|
|
if (!Subtarget->isThumb())
|
|
|
|
return ARM_AM::getSOImmVal(Imm) != -1;
|
|
|
|
if (Subtarget->isThumb2())
|
|
|
|
return ARM_AM::getT2SOImmVal(Imm) != -1;
|
2009-11-12 07:13:11 +00:00
|
|
|
return Imm >= 0 && Imm <= 255;
|
2009-11-11 19:05:52 +00:00
|
|
|
}
|
|
|
|
|
2009-08-10 22:56:29 +00:00
|
|
|
static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT,
|
2009-07-02 07:28:31 +00:00
|
|
|
bool isSEXTLoad, SDValue &Base,
|
|
|
|
SDValue &Offset, bool &isInc,
|
|
|
|
SelectionDAG &DAG) {
|
2007-01-19 07:51:42 +00:00
|
|
|
if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB)
|
|
|
|
return false;
|
|
|
|
|
2009-08-11 20:47:22 +00:00
|
|
|
if (VT == MVT::i16 || ((VT == MVT::i8 || VT == MVT::i1) && isSEXTLoad)) {
|
2007-01-19 07:51:42 +00:00
|
|
|
// AddressingMode 3
|
|
|
|
Base = Ptr->getOperand(0);
|
|
|
|
if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) {
|
2008-09-12 16:56:44 +00:00
|
|
|
int RHSC = (int)RHS->getZExtValue();
|
2007-01-19 07:51:42 +00:00
|
|
|
if (RHSC < 0 && RHSC > -256) {
|
2009-07-02 07:28:31 +00:00
|
|
|
assert(Ptr->getOpcode() == ISD::ADD);
|
2007-01-19 07:51:42 +00:00
|
|
|
isInc = false;
|
|
|
|
Offset = DAG.getConstant(-RHSC, RHS->getValueType(0));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
isInc = (Ptr->getOpcode() == ISD::ADD);
|
|
|
|
Offset = Ptr->getOperand(1);
|
|
|
|
return true;
|
2009-08-11 20:47:22 +00:00
|
|
|
} else if (VT == MVT::i32 || VT == MVT::i8 || VT == MVT::i1) {
|
2007-01-19 07:51:42 +00:00
|
|
|
// AddressingMode 2
|
|
|
|
if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) {
|
2008-09-12 16:56:44 +00:00
|
|
|
int RHSC = (int)RHS->getZExtValue();
|
2007-01-19 07:51:42 +00:00
|
|
|
if (RHSC < 0 && RHSC > -0x1000) {
|
2009-07-02 07:28:31 +00:00
|
|
|
assert(Ptr->getOpcode() == ISD::ADD);
|
2007-01-19 07:51:42 +00:00
|
|
|
isInc = false;
|
|
|
|
Offset = DAG.getConstant(-RHSC, RHS->getValueType(0));
|
|
|
|
Base = Ptr->getOperand(0);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (Ptr->getOpcode() == ISD::ADD) {
|
|
|
|
isInc = true;
|
|
|
|
ARM_AM::ShiftOpc ShOpcVal= ARM_AM::getShiftOpcForNode(Ptr->getOperand(0));
|
|
|
|
if (ShOpcVal != ARM_AM::no_shift) {
|
|
|
|
Base = Ptr->getOperand(1);
|
|
|
|
Offset = Ptr->getOperand(0);
|
|
|
|
} else {
|
|
|
|
Base = Ptr->getOperand(0);
|
|
|
|
Offset = Ptr->getOperand(1);
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
isInc = (Ptr->getOpcode() == ISD::ADD);
|
|
|
|
Base = Ptr->getOperand(0);
|
|
|
|
Offset = Ptr->getOperand(1);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2009-11-09 00:11:35 +00:00
|
|
|
// FIXME: Use VLDM / VSTM to emulate indexed FP load / store.
|
2007-01-19 07:51:42 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2009-08-10 22:56:29 +00:00
|
|
|
static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT,
|
2009-07-02 07:28:31 +00:00
|
|
|
bool isSEXTLoad, SDValue &Base,
|
|
|
|
SDValue &Offset, bool &isInc,
|
|
|
|
SelectionDAG &DAG) {
|
|
|
|
if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
Base = Ptr->getOperand(0);
|
|
|
|
if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) {
|
|
|
|
int RHSC = (int)RHS->getZExtValue();
|
|
|
|
if (RHSC < 0 && RHSC > -0x100) { // 8 bits.
|
|
|
|
assert(Ptr->getOpcode() == ISD::ADD);
|
|
|
|
isInc = false;
|
|
|
|
Offset = DAG.getConstant(-RHSC, RHS->getValueType(0));
|
|
|
|
return true;
|
|
|
|
} else if (RHSC > 0 && RHSC < 0x100) { // 8 bit, no zero.
|
|
|
|
isInc = Ptr->getOpcode() == ISD::ADD;
|
|
|
|
Offset = DAG.getConstant(RHSC, RHS->getValueType(0));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2007-01-19 07:51:42 +00:00
|
|
|
/// getPreIndexedAddressParts - returns true by value, base pointer and
|
|
|
|
/// offset pointer and addressing mode by reference if the node's address
|
|
|
|
/// can be legally represented as pre-indexed load / store address.
|
|
|
|
bool
|
2008-07-27 21:46:04 +00:00
|
|
|
ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
|
|
|
|
SDValue &Offset,
|
2007-01-19 07:51:42 +00:00
|
|
|
ISD::MemIndexedMode &AM,
|
2009-01-15 16:29:45 +00:00
|
|
|
SelectionDAG &DAG) const {
|
2009-07-02 07:28:31 +00:00
|
|
|
if (Subtarget->isThumb1Only())
|
2007-01-19 07:51:42 +00:00
|
|
|
return false;
|
|
|
|
|
2009-08-10 22:56:29 +00:00
|
|
|
EVT VT;
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Ptr;
|
2007-01-19 07:51:42 +00:00
|
|
|
bool isSEXTLoad = false;
|
|
|
|
if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
|
|
|
|
Ptr = LD->getBasePtr();
|
2008-01-30 00:15:11 +00:00
|
|
|
VT = LD->getMemoryVT();
|
2007-01-19 07:51:42 +00:00
|
|
|
isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD;
|
|
|
|
} else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
|
|
|
|
Ptr = ST->getBasePtr();
|
2008-01-30 00:15:11 +00:00
|
|
|
VT = ST->getMemoryVT();
|
2007-01-19 07:51:42 +00:00
|
|
|
} else
|
|
|
|
return false;
|
|
|
|
|
|
|
|
bool isInc;
|
2009-07-02 07:28:31 +00:00
|
|
|
bool isLegal = false;
|
2009-08-14 20:09:37 +00:00
|
|
|
if (Subtarget->isThumb2())
|
2009-07-02 07:28:31 +00:00
|
|
|
isLegal = getT2IndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base,
|
|
|
|
Offset, isInc, DAG);
|
2009-08-11 15:33:49 +00:00
|
|
|
else
|
2009-07-02 07:28:31 +00:00
|
|
|
isLegal = getARMIndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base,
|
2009-07-02 06:44:30 +00:00
|
|
|
Offset, isInc, DAG);
|
2009-07-02 07:28:31 +00:00
|
|
|
if (!isLegal)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
AM = isInc ? ISD::PRE_INC : ISD::PRE_DEC;
|
|
|
|
return true;
|
2007-01-19 07:51:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// getPostIndexedAddressParts - returns true by value, base pointer and
|
|
|
|
/// offset pointer and addressing mode by reference if this node can be
|
|
|
|
/// combined with a load / store to form a post-indexed load / store.
|
|
|
|
bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op,
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue &Base,
|
|
|
|
SDValue &Offset,
|
2007-01-19 07:51:42 +00:00
|
|
|
ISD::MemIndexedMode &AM,
|
2009-01-15 16:29:45 +00:00
|
|
|
SelectionDAG &DAG) const {
|
2009-07-02 07:28:31 +00:00
|
|
|
if (Subtarget->isThumb1Only())
|
2007-01-19 07:51:42 +00:00
|
|
|
return false;
|
|
|
|
|
2009-08-10 22:56:29 +00:00
|
|
|
EVT VT;
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Ptr;
|
2007-01-19 07:51:42 +00:00
|
|
|
bool isSEXTLoad = false;
|
|
|
|
if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
|
2008-01-30 00:15:11 +00:00
|
|
|
VT = LD->getMemoryVT();
|
2007-01-19 07:51:42 +00:00
|
|
|
isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD;
|
|
|
|
} else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
|
2008-01-30 00:15:11 +00:00
|
|
|
VT = ST->getMemoryVT();
|
2007-01-19 07:51:42 +00:00
|
|
|
} else
|
|
|
|
return false;
|
|
|
|
|
|
|
|
bool isInc;
|
2009-07-02 07:28:31 +00:00
|
|
|
bool isLegal = false;
|
2009-08-14 20:09:37 +00:00
|
|
|
if (Subtarget->isThumb2())
|
2009-07-02 07:28:31 +00:00
|
|
|
isLegal = getT2IndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset,
|
2007-01-19 07:51:42 +00:00
|
|
|
isInc, DAG);
|
2009-08-11 15:33:49 +00:00
|
|
|
else
|
2009-07-02 07:28:31 +00:00
|
|
|
isLegal = getARMIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset,
|
|
|
|
isInc, DAG);
|
|
|
|
if (!isLegal)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
AM = isInc ? ISD::POST_INC : ISD::POST_DEC;
|
|
|
|
return true;
|
2007-01-19 07:51:42 +00:00
|
|
|
}
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
void ARMTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
|
2008-02-13 22:28:48 +00:00
|
|
|
const APInt &Mask,
|
2009-03-20 22:42:55 +00:00
|
|
|
APInt &KnownZero,
|
2008-02-13 00:35:47 +00:00
|
|
|
APInt &KnownOne,
|
2007-06-22 14:59:07 +00:00
|
|
|
const SelectionDAG &DAG,
|
2007-01-19 07:51:42 +00:00
|
|
|
unsigned Depth) const {
|
2008-02-13 00:35:47 +00:00
|
|
|
KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0);
|
2007-01-19 07:51:42 +00:00
|
|
|
switch (Op.getOpcode()) {
|
|
|
|
default: break;
|
|
|
|
case ARMISD::CMOV: {
|
|
|
|
// Bits are known zero/one if known on the LHS and RHS.
|
2007-06-22 14:59:07 +00:00
|
|
|
DAG.ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero, KnownOne, Depth+1);
|
2007-01-19 07:51:42 +00:00
|
|
|
if (KnownZero == 0 && KnownOne == 0) return;
|
|
|
|
|
2008-02-13 00:35:47 +00:00
|
|
|
APInt KnownZeroRHS, KnownOneRHS;
|
2007-06-22 14:59:07 +00:00
|
|
|
DAG.ComputeMaskedBits(Op.getOperand(1), Mask,
|
|
|
|
KnownZeroRHS, KnownOneRHS, Depth+1);
|
2007-01-19 07:51:42 +00:00
|
|
|
KnownZero &= KnownZeroRHS;
|
|
|
|
KnownOne &= KnownOneRHS;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// ARM Inline Assembly Support
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
/// getConstraintType - Given a constraint letter, return the type of
|
|
|
|
/// constraint it is for this target.
|
|
|
|
ARMTargetLowering::ConstraintType
|
2007-03-25 02:14:49 +00:00
|
|
|
ARMTargetLowering::getConstraintType(const std::string &Constraint) const {
|
|
|
|
if (Constraint.size() == 1) {
|
|
|
|
switch (Constraint[0]) {
|
|
|
|
default: break;
|
|
|
|
case 'l': return C_RegisterClass;
|
2007-04-02 17:24:08 +00:00
|
|
|
case 'w': return C_RegisterClass;
|
2007-03-25 02:14:49 +00:00
|
|
|
}
|
2007-01-19 07:51:42 +00:00
|
|
|
}
|
2007-03-25 02:14:49 +00:00
|
|
|
return TargetLowering::getConstraintType(Constraint);
|
2007-01-19 07:51:42 +00:00
|
|
|
}
|
|
|
|
|
2009-03-20 22:42:55 +00:00
|
|
|
std::pair<unsigned, const TargetRegisterClass*>
|
2007-01-19 07:51:42 +00:00
|
|
|
ARMTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
|
2009-08-10 22:56:29 +00:00
|
|
|
EVT VT) const {
|
2007-01-19 07:51:42 +00:00
|
|
|
if (Constraint.size() == 1) {
|
|
|
|
// GCC RS6000 Constraint Letters
|
|
|
|
switch (Constraint[0]) {
|
2007-04-02 17:24:08 +00:00
|
|
|
case 'l':
|
2009-07-08 23:10:31 +00:00
|
|
|
if (Subtarget->isThumb1Only())
|
2009-04-07 20:34:09 +00:00
|
|
|
return std::make_pair(0U, ARM::tGPRRegisterClass);
|
|
|
|
else
|
|
|
|
return std::make_pair(0U, ARM::GPRRegisterClass);
|
2007-04-02 17:24:08 +00:00
|
|
|
case 'r':
|
|
|
|
return std::make_pair(0U, ARM::GPRRegisterClass);
|
|
|
|
case 'w':
|
2009-08-11 20:47:22 +00:00
|
|
|
if (VT == MVT::f32)
|
2007-04-02 17:24:08 +00:00
|
|
|
return std::make_pair(0U, ARM::SPRRegisterClass);
|
2009-08-11 20:47:22 +00:00
|
|
|
if (VT == MVT::f64)
|
2007-04-02 17:24:08 +00:00
|
|
|
return std::make_pair(0U, ARM::DPRRegisterClass);
|
2009-12-08 23:06:22 +00:00
|
|
|
if (VT.getSizeInBits() == 128)
|
|
|
|
return std::make_pair(0U, ARM::QPRRegisterClass);
|
2007-04-02 17:24:08 +00:00
|
|
|
break;
|
2007-01-19 07:51:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
|
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<unsigned> ARMTargetLowering::
|
|
|
|
getRegClassForInlineAsmConstraint(const std::string &Constraint,
|
2009-08-10 22:56:29 +00:00
|
|
|
EVT VT) const {
|
2007-01-19 07:51:42 +00:00
|
|
|
if (Constraint.size() != 1)
|
|
|
|
return std::vector<unsigned>();
|
|
|
|
|
|
|
|
switch (Constraint[0]) { // GCC ARM Constraint Letters
|
|
|
|
default: break;
|
|
|
|
case 'l':
|
2009-04-07 20:34:09 +00:00
|
|
|
return make_vector<unsigned>(ARM::R0, ARM::R1, ARM::R2, ARM::R3,
|
|
|
|
ARM::R4, ARM::R5, ARM::R6, ARM::R7,
|
|
|
|
0);
|
2007-01-19 07:51:42 +00:00
|
|
|
case 'r':
|
|
|
|
return make_vector<unsigned>(ARM::R0, ARM::R1, ARM::R2, ARM::R3,
|
|
|
|
ARM::R4, ARM::R5, ARM::R6, ARM::R7,
|
|
|
|
ARM::R8, ARM::R9, ARM::R10, ARM::R11,
|
|
|
|
ARM::R12, ARM::LR, 0);
|
2007-04-02 17:24:08 +00:00
|
|
|
case 'w':
|
2009-08-11 20:47:22 +00:00
|
|
|
if (VT == MVT::f32)
|
2007-04-02 17:24:08 +00:00
|
|
|
return make_vector<unsigned>(ARM::S0, ARM::S1, ARM::S2, ARM::S3,
|
|
|
|
ARM::S4, ARM::S5, ARM::S6, ARM::S7,
|
|
|
|
ARM::S8, ARM::S9, ARM::S10, ARM::S11,
|
|
|
|
ARM::S12,ARM::S13,ARM::S14,ARM::S15,
|
|
|
|
ARM::S16,ARM::S17,ARM::S18,ARM::S19,
|
|
|
|
ARM::S20,ARM::S21,ARM::S22,ARM::S23,
|
|
|
|
ARM::S24,ARM::S25,ARM::S26,ARM::S27,
|
|
|
|
ARM::S28,ARM::S29,ARM::S30,ARM::S31, 0);
|
2009-08-11 20:47:22 +00:00
|
|
|
if (VT == MVT::f64)
|
2007-04-02 17:24:08 +00:00
|
|
|
return make_vector<unsigned>(ARM::D0, ARM::D1, ARM::D2, ARM::D3,
|
|
|
|
ARM::D4, ARM::D5, ARM::D6, ARM::D7,
|
|
|
|
ARM::D8, ARM::D9, ARM::D10,ARM::D11,
|
|
|
|
ARM::D12,ARM::D13,ARM::D14,ARM::D15, 0);
|
2009-12-08 23:06:22 +00:00
|
|
|
if (VT.getSizeInBits() == 128)
|
|
|
|
return make_vector<unsigned>(ARM::Q0, ARM::Q1, ARM::Q2, ARM::Q3,
|
|
|
|
ARM::Q4, ARM::Q5, ARM::Q6, ARM::Q7, 0);
|
2007-04-02 17:24:08 +00:00
|
|
|
break;
|
2007-01-19 07:51:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return std::vector<unsigned>();
|
|
|
|
}
|
2009-04-01 17:58:54 +00:00
|
|
|
|
|
|
|
/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
|
|
|
|
/// vector. If it is invalid, don't add anything to Ops.
|
|
|
|
void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
|
|
|
|
char Constraint,
|
|
|
|
bool hasMemory,
|
|
|
|
std::vector<SDValue>&Ops,
|
|
|
|
SelectionDAG &DAG) const {
|
|
|
|
SDValue Result(0, 0);
|
|
|
|
|
|
|
|
switch (Constraint) {
|
|
|
|
default: break;
|
|
|
|
case 'I': case 'J': case 'K': case 'L':
|
|
|
|
case 'M': case 'N': case 'O':
|
|
|
|
ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
|
|
|
|
if (!C)
|
|
|
|
return;
|
|
|
|
|
|
|
|
int64_t CVal64 = C->getSExtValue();
|
|
|
|
int CVal = (int) CVal64;
|
|
|
|
// None of these constraints allow values larger than 32 bits. Check
|
|
|
|
// that the value fits in an int.
|
|
|
|
if (CVal != CVal64)
|
|
|
|
return;
|
|
|
|
|
|
|
|
switch (Constraint) {
|
|
|
|
case 'I':
|
2009-07-08 23:10:31 +00:00
|
|
|
if (Subtarget->isThumb1Only()) {
|
|
|
|
// This must be a constant between 0 and 255, for ADD
|
|
|
|
// immediates.
|
2009-04-01 17:58:54 +00:00
|
|
|
if (CVal >= 0 && CVal <= 255)
|
|
|
|
break;
|
2009-07-08 23:10:31 +00:00
|
|
|
} else if (Subtarget->isThumb2()) {
|
|
|
|
// A constant that can be used as an immediate value in a
|
|
|
|
// data-processing instruction.
|
|
|
|
if (ARM_AM::getT2SOImmVal(CVal) != -1)
|
|
|
|
break;
|
2009-04-01 17:58:54 +00:00
|
|
|
} else {
|
|
|
|
// A constant that can be used as an immediate value in a
|
|
|
|
// data-processing instruction.
|
|
|
|
if (ARM_AM::getSOImmVal(CVal) != -1)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
|
|
|
|
case 'J':
|
2009-07-08 23:10:31 +00:00
|
|
|
if (Subtarget->isThumb()) { // FIXME thumb2
|
2009-04-01 17:58:54 +00:00
|
|
|
// This must be a constant between -255 and -1, for negated ADD
|
|
|
|
// immediates. This can be used in GCC with an "n" modifier that
|
|
|
|
// prints the negated value, for use with SUB instructions. It is
|
|
|
|
// not useful otherwise but is implemented for compatibility.
|
|
|
|
if (CVal >= -255 && CVal <= -1)
|
|
|
|
break;
|
|
|
|
} else {
|
|
|
|
// This must be a constant between -4095 and 4095. It is not clear
|
|
|
|
// what this constraint is intended for. Implemented for
|
|
|
|
// compatibility with GCC.
|
|
|
|
if (CVal >= -4095 && CVal <= 4095)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
|
|
|
|
case 'K':
|
2009-07-08 23:10:31 +00:00
|
|
|
if (Subtarget->isThumb1Only()) {
|
2009-04-01 17:58:54 +00:00
|
|
|
// A 32-bit value where only one byte has a nonzero value. Exclude
|
|
|
|
// zero to match GCC. This constraint is used by GCC internally for
|
|
|
|
// constants that can be loaded with a move/shift combination.
|
|
|
|
// It is not useful otherwise but is implemented for compatibility.
|
|
|
|
if (CVal != 0 && ARM_AM::isThumbImmShiftedVal(CVal))
|
|
|
|
break;
|
2009-07-08 23:10:31 +00:00
|
|
|
} else if (Subtarget->isThumb2()) {
|
|
|
|
// A constant whose bitwise inverse can be used as an immediate
|
|
|
|
// value in a data-processing instruction. This can be used in GCC
|
|
|
|
// with a "B" modifier that prints the inverted value, for use with
|
|
|
|
// BIC and MVN instructions. It is not useful otherwise but is
|
|
|
|
// implemented for compatibility.
|
|
|
|
if (ARM_AM::getT2SOImmVal(~CVal) != -1)
|
|
|
|
break;
|
2009-04-01 17:58:54 +00:00
|
|
|
} else {
|
|
|
|
// A constant whose bitwise inverse can be used as an immediate
|
|
|
|
// value in a data-processing instruction. This can be used in GCC
|
|
|
|
// with a "B" modifier that prints the inverted value, for use with
|
|
|
|
// BIC and MVN instructions. It is not useful otherwise but is
|
|
|
|
// implemented for compatibility.
|
|
|
|
if (ARM_AM::getSOImmVal(~CVal) != -1)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
|
|
|
|
case 'L':
|
2009-07-08 23:10:31 +00:00
|
|
|
if (Subtarget->isThumb1Only()) {
|
2009-04-01 17:58:54 +00:00
|
|
|
// This must be a constant between -7 and 7,
|
|
|
|
// for 3-operand ADD/SUB immediate instructions.
|
|
|
|
if (CVal >= -7 && CVal < 7)
|
|
|
|
break;
|
2009-07-08 23:10:31 +00:00
|
|
|
} else if (Subtarget->isThumb2()) {
|
|
|
|
// A constant whose negation can be used as an immediate value in a
|
|
|
|
// data-processing instruction. This can be used in GCC with an "n"
|
|
|
|
// modifier that prints the negated value, for use with SUB
|
|
|
|
// instructions. It is not useful otherwise but is implemented for
|
|
|
|
// compatibility.
|
|
|
|
if (ARM_AM::getT2SOImmVal(-CVal) != -1)
|
|
|
|
break;
|
2009-04-01 17:58:54 +00:00
|
|
|
} else {
|
|
|
|
// A constant whose negation can be used as an immediate value in a
|
|
|
|
// data-processing instruction. This can be used in GCC with an "n"
|
|
|
|
// modifier that prints the negated value, for use with SUB
|
|
|
|
// instructions. It is not useful otherwise but is implemented for
|
|
|
|
// compatibility.
|
|
|
|
if (ARM_AM::getSOImmVal(-CVal) != -1)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
|
|
|
|
case 'M':
|
2009-07-08 23:10:31 +00:00
|
|
|
if (Subtarget->isThumb()) { // FIXME thumb2
|
2009-04-01 17:58:54 +00:00
|
|
|
// This must be a multiple of 4 between 0 and 1020, for
|
|
|
|
// ADD sp + immediate.
|
|
|
|
if ((CVal >= 0 && CVal <= 1020) && ((CVal & 3) == 0))
|
|
|
|
break;
|
|
|
|
} else {
|
|
|
|
// A power of two or a constant between 0 and 32. This is used in
|
|
|
|
// GCC for the shift amount on shifted register operands, but it is
|
|
|
|
// useful in general for any shift amounts.
|
|
|
|
if ((CVal >= 0 && CVal <= 32) || ((CVal & (CVal - 1)) == 0))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
|
|
|
|
case 'N':
|
2009-07-08 23:10:31 +00:00
|
|
|
if (Subtarget->isThumb()) { // FIXME thumb2
|
2009-04-01 17:58:54 +00:00
|
|
|
// This must be a constant between 0 and 31, for shift amounts.
|
|
|
|
if (CVal >= 0 && CVal <= 31)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
|
|
|
|
case 'O':
|
2009-07-08 23:10:31 +00:00
|
|
|
if (Subtarget->isThumb()) { // FIXME thumb2
|
2009-04-01 17:58:54 +00:00
|
|
|
// This must be a multiple of 4 between -508 and 508, for
|
|
|
|
// ADD/SUB sp = sp + immediate.
|
|
|
|
if ((CVal >= -508 && CVal <= 508) && ((CVal & 3) == 0))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
Result = DAG.getTargetConstant(CVal, Op.getValueType());
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (Result.getNode()) {
|
|
|
|
Ops.push_back(Result);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, hasMemory,
|
|
|
|
Ops, DAG);
|
|
|
|
}
|
2009-09-23 19:04:09 +00:00
|
|
|
|
|
|
|
bool
|
|
|
|
ARMTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
|
|
|
|
// The ARM target isn't yet aware of offsets.
|
|
|
|
return false;
|
|
|
|
}
|
2009-10-28 01:44:26 +00:00
|
|
|
|
|
|
|
int ARM::getVFPf32Imm(const APFloat &FPImm) {
|
|
|
|
APInt Imm = FPImm.bitcastToAPInt();
|
|
|
|
uint32_t Sign = Imm.lshr(31).getZExtValue() & 1;
|
|
|
|
int32_t Exp = (Imm.lshr(23).getSExtValue() & 0xff) - 127; // -126 to 127
|
|
|
|
int64_t Mantissa = Imm.getZExtValue() & 0x7fffff; // 23 bits
|
|
|
|
|
|
|
|
// We can handle 4 bits of mantissa.
|
|
|
|
// mantissa = (16+UInt(e:f:g:h))/16.
|
|
|
|
if (Mantissa & 0x7ffff)
|
|
|
|
return -1;
|
|
|
|
Mantissa >>= 19;
|
|
|
|
if ((Mantissa & 0xf) != Mantissa)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
// We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3
|
|
|
|
if (Exp < -3 || Exp > 4)
|
|
|
|
return -1;
|
|
|
|
Exp = ((Exp+3) & 0x7) ^ 4;
|
|
|
|
|
|
|
|
return ((int)Sign << 7) | (Exp << 4) | Mantissa;
|
|
|
|
}
|
|
|
|
|
|
|
|
int ARM::getVFPf64Imm(const APFloat &FPImm) {
|
|
|
|
APInt Imm = FPImm.bitcastToAPInt();
|
|
|
|
uint64_t Sign = Imm.lshr(63).getZExtValue() & 1;
|
|
|
|
int64_t Exp = (Imm.lshr(52).getSExtValue() & 0x7ff) - 1023; // -1022 to 1023
|
|
|
|
uint64_t Mantissa = Imm.getZExtValue() & 0xfffffffffffffLL;
|
|
|
|
|
|
|
|
// We can handle 4 bits of mantissa.
|
|
|
|
// mantissa = (16+UInt(e:f:g:h))/16.
|
|
|
|
if (Mantissa & 0xffffffffffffLL)
|
|
|
|
return -1;
|
|
|
|
Mantissa >>= 48;
|
|
|
|
if ((Mantissa & 0xf) != Mantissa)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
// We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3
|
|
|
|
if (Exp < -3 || Exp > 4)
|
|
|
|
return -1;
|
|
|
|
Exp = ((Exp+3) & 0x7) ^ 4;
|
|
|
|
|
|
|
|
return ((int)Sign << 7) | (Exp << 4) | Mantissa;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// isFPImmLegal - Returns true if the target can instruction select the
|
|
|
|
/// specified FP immediate natively. If false, the legalizer will
|
|
|
|
/// materialize the FP immediate as a load from a constant pool.
|
|
|
|
bool ARMTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
|
|
|
|
if (!Subtarget->hasVFP3())
|
|
|
|
return false;
|
|
|
|
if (VT == MVT::f32)
|
|
|
|
return ARM::getVFPf32Imm(Imm) != -1;
|
|
|
|
if (VT == MVT::f64)
|
|
|
|
return ARM::getVFPf64Imm(Imm) != -1;
|
|
|
|
return false;
|
|
|
|
}
|