2009-11-23 18:04:58 +00:00
|
|
|
//===-- SelectionDAGBuilder.cpp - Selection-DAG building ------------------===//
|
2008-09-03 16:12:24 +00:00
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This implements routines for translating from LLVM IR into SelectionDAG IR.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#define DEBUG_TYPE "isel"
|
2009-11-23 18:04:58 +00:00
|
|
|
#include "SelectionDAGBuilder.h"
|
2012-12-03 16:50:05 +00:00
|
|
|
#include "SDNodeDbgValue.h"
|
2008-09-03 16:12:24 +00:00
|
|
|
#include "llvm/ADT/BitVector.h"
|
2010-10-16 08:25:41 +00:00
|
|
|
#include "llvm/ADT/PostOrderIterator.h"
|
2008-09-04 20:49:27 +00:00
|
|
|
#include "llvm/ADT/SmallSet.h"
|
2008-09-03 16:12:24 +00:00
|
|
|
#include "llvm/Analysis/AliasAnalysis.h"
|
2009-12-24 00:37:38 +00:00
|
|
|
#include "llvm/Analysis/ConstantFolding.h"
|
2012-09-06 09:17:37 +00:00
|
|
|
#include "llvm/Analysis/ValueTracking.h"
|
2008-09-03 16:12:24 +00:00
|
|
|
#include "llvm/CallingConv.h"
|
2010-04-21 01:22:34 +00:00
|
|
|
#include "llvm/CodeGen/Analysis.h"
|
2008-09-03 16:12:24 +00:00
|
|
|
#include "llvm/CodeGen/FastISel.h"
|
2010-07-07 16:01:37 +00:00
|
|
|
#include "llvm/CodeGen/FunctionLoweringInfo.h"
|
2008-09-03 16:12:24 +00:00
|
|
|
#include "llvm/CodeGen/GCMetadata.h"
|
2012-12-03 16:50:05 +00:00
|
|
|
#include "llvm/CodeGen/GCStrategy.h"
|
2008-09-03 16:12:24 +00:00
|
|
|
#include "llvm/CodeGen/MachineFrameInfo.h"
|
2012-12-03 16:50:05 +00:00
|
|
|
#include "llvm/CodeGen/MachineFunction.h"
|
2008-09-03 16:12:24 +00:00
|
|
|
#include "llvm/CodeGen/MachineInstrBuilder.h"
|
|
|
|
#include "llvm/CodeGen/MachineJumpTableInfo.h"
|
|
|
|
#include "llvm/CodeGen/MachineModuleInfo.h"
|
|
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
|
|
|
#include "llvm/CodeGen/SelectionDAG.h"
|
2012-12-03 16:50:05 +00:00
|
|
|
#include "llvm/Constants.h"
|
2012-10-08 16:38:25 +00:00
|
|
|
#include "llvm/DataLayout.h"
|
2012-12-03 16:50:05 +00:00
|
|
|
#include "llvm/DebugInfo.h"
|
|
|
|
#include "llvm/DerivedTypes.h"
|
|
|
|
#include "llvm/Function.h"
|
|
|
|
#include "llvm/GlobalVariable.h"
|
|
|
|
#include "llvm/InlineAsm.h"
|
|
|
|
#include "llvm/Instructions.h"
|
|
|
|
#include "llvm/IntrinsicInst.h"
|
|
|
|
#include "llvm/Intrinsics.h"
|
|
|
|
#include "llvm/LLVMContext.h"
|
|
|
|
#include "llvm/Module.h"
|
|
|
|
#include "llvm/Support/CommandLine.h"
|
|
|
|
#include "llvm/Support/Debug.h"
|
|
|
|
#include "llvm/Support/ErrorHandling.h"
|
|
|
|
#include "llvm/Support/IntegersSubsetMapping.h"
|
|
|
|
#include "llvm/Support/MathExtras.h"
|
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2011-01-10 12:39:04 +00:00
|
|
|
#include "llvm/Target/TargetFrameLowering.h"
|
2008-09-03 16:12:24 +00:00
|
|
|
#include "llvm/Target/TargetInstrInfo.h"
|
2009-02-05 01:49:45 +00:00
|
|
|
#include "llvm/Target/TargetIntrinsicInfo.h"
|
2011-12-08 22:15:21 +00:00
|
|
|
#include "llvm/Target/TargetLibraryInfo.h"
|
2008-09-03 16:12:24 +00:00
|
|
|
#include "llvm/Target/TargetLowering.h"
|
|
|
|
#include "llvm/Target/TargetOptions.h"
|
|
|
|
#include <algorithm>
|
|
|
|
using namespace llvm;
|
|
|
|
|
2008-09-05 01:48:15 +00:00
|
|
|
/// LimitFloatPrecision - Generate low-precision inline sequences for
|
|
|
|
/// some float libcalls (6, 8 or 12 bits).
|
|
|
|
static unsigned LimitFloatPrecision;
|
|
|
|
|
|
|
|
static cl::opt<unsigned, true>
|
|
|
|
LimitFPPrecision("limit-float-precision",
|
|
|
|
cl::desc("Generate low-precision inline sequences "
|
|
|
|
"for some float libcalls"),
|
|
|
|
cl::location(LimitFloatPrecision),
|
|
|
|
cl::init(0));
|
|
|
|
|
2010-11-12 17:50:46 +00:00
|
|
|
// Limit the width of DAG chains. This is important in general to prevent
|
|
|
|
// prevent DAG-based analysis from blowing up. For example, alias analysis and
|
|
|
|
// load clustering may not complete in reasonable time. It is difficult to
|
|
|
|
// recognize and avoid this situation within each individual analysis, and
|
|
|
|
// future analyses are likely to have the same behavior. Limiting DAG width is
|
2010-11-20 07:26:51 +00:00
|
|
|
// the safe approach, and will be especially important with global DAGs.
|
2010-11-12 17:50:46 +00:00
|
|
|
//
|
|
|
|
// MaxParallelChains default is arbitrarily high to avoid affecting
|
|
|
|
// optimization, but could be lowered to improve compile time. Any ld-ld-st-st
|
2010-11-20 07:26:51 +00:00
|
|
|
// sequence over this should have been converted to llvm.memcpy by the
|
|
|
|
// frontend. It easy to induce this behavior with .ll code such as:
|
|
|
|
// %buffer = alloca [4096 x i8]
|
|
|
|
// %data = load [4096 x i8]* %argPtr
|
|
|
|
// store [4096 x i8] %data, [4096 x i8]* %buffer
|
2011-03-11 17:46:59 +00:00
|
|
|
static const unsigned MaxParallelChains = 64;
|
2010-11-12 17:50:46 +00:00
|
|
|
|
2010-08-24 23:20:40 +00:00
|
|
|
static SDValue getCopyFromPartsVector(SelectionDAG &DAG, DebugLoc DL,
|
|
|
|
const SDValue *Parts, unsigned NumParts,
|
2012-09-26 04:04:19 +00:00
|
|
|
EVT PartVT, EVT ValueVT, const Value *V);
|
2010-10-16 08:25:21 +00:00
|
|
|
|
2008-09-03 16:12:24 +00:00
|
|
|
/// getCopyFromParts - Create a value that contains the specified legal parts
|
|
|
|
/// combined into the value they represent. If the parts combine to a type
|
|
|
|
/// larger then ValueVT then AssertOp can be used to specify whether the extra
|
|
|
|
/// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT
|
|
|
|
/// (ISD::AssertSext).
|
2010-08-24 23:20:40 +00:00
|
|
|
static SDValue getCopyFromParts(SelectionDAG &DAG, DebugLoc DL,
|
2009-01-31 02:22:37 +00:00
|
|
|
const SDValue *Parts,
|
2009-08-10 22:56:29 +00:00
|
|
|
unsigned NumParts, EVT PartVT, EVT ValueVT,
|
2012-09-26 04:04:19 +00:00
|
|
|
const Value *V,
|
2009-01-28 14:42:54 +00:00
|
|
|
ISD::NodeType AssertOp = ISD::DELETED_NODE) {
|
2010-08-24 23:20:40 +00:00
|
|
|
if (ValueVT.isVector())
|
2012-09-26 04:04:19 +00:00
|
|
|
return getCopyFromPartsVector(DAG, DL, Parts, NumParts,
|
|
|
|
PartVT, ValueVT, V);
|
2010-10-16 08:25:21 +00:00
|
|
|
|
2008-09-03 16:12:24 +00:00
|
|
|
assert(NumParts > 0 && "No parts to assemble!");
|
2009-01-15 16:58:17 +00:00
|
|
|
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
|
2008-09-03 16:12:24 +00:00
|
|
|
SDValue Val = Parts[0];
|
|
|
|
|
|
|
|
if (NumParts > 1) {
|
|
|
|
// Assemble the value from multiple parts.
|
2010-08-24 23:20:40 +00:00
|
|
|
if (ValueVT.isInteger()) {
|
2008-09-03 16:12:24 +00:00
|
|
|
unsigned PartBits = PartVT.getSizeInBits();
|
|
|
|
unsigned ValueBits = ValueVT.getSizeInBits();
|
|
|
|
|
|
|
|
// Assemble the power of 2 part.
|
|
|
|
unsigned RoundParts = NumParts & (NumParts - 1) ?
|
|
|
|
1 << Log2_32(NumParts) : NumParts;
|
|
|
|
unsigned RoundBits = PartBits * RoundParts;
|
2009-08-10 22:56:29 +00:00
|
|
|
EVT RoundVT = RoundBits == ValueBits ?
|
2009-08-12 00:36:31 +00:00
|
|
|
ValueVT : EVT::getIntegerVT(*DAG.getContext(), RoundBits);
|
2008-09-03 16:12:24 +00:00
|
|
|
SDValue Lo, Hi;
|
|
|
|
|
2009-08-12 00:36:31 +00:00
|
|
|
EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), RoundBits/2);
|
2008-10-29 14:22:20 +00:00
|
|
|
|
2008-09-03 16:12:24 +00:00
|
|
|
if (RoundParts > 2) {
|
2010-08-24 23:20:40 +00:00
|
|
|
Lo = getCopyFromParts(DAG, DL, Parts, RoundParts / 2,
|
2012-09-26 04:04:19 +00:00
|
|
|
PartVT, HalfVT, V);
|
2010-08-24 23:20:40 +00:00
|
|
|
Hi = getCopyFromParts(DAG, DL, Parts + RoundParts / 2,
|
2012-09-26 04:04:19 +00:00
|
|
|
RoundParts / 2, PartVT, HalfVT, V);
|
2008-09-03 16:12:24 +00:00
|
|
|
} else {
|
2010-11-23 03:31:01 +00:00
|
|
|
Lo = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[0]);
|
|
|
|
Hi = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[1]);
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
2009-12-22 02:10:19 +00:00
|
|
|
|
2008-09-03 16:12:24 +00:00
|
|
|
if (TLI.isBigEndian())
|
|
|
|
std::swap(Lo, Hi);
|
2009-12-22 02:10:19 +00:00
|
|
|
|
2010-08-24 23:20:40 +00:00
|
|
|
Val = DAG.getNode(ISD::BUILD_PAIR, DL, RoundVT, Lo, Hi);
|
2008-09-03 16:12:24 +00:00
|
|
|
|
|
|
|
if (RoundParts < NumParts) {
|
|
|
|
// Assemble the trailing non-power-of-2 part.
|
|
|
|
unsigned OddParts = NumParts - RoundParts;
|
2009-08-12 00:36:31 +00:00
|
|
|
EVT OddVT = EVT::getIntegerVT(*DAG.getContext(), OddParts * PartBits);
|
2010-08-24 23:20:40 +00:00
|
|
|
Hi = getCopyFromParts(DAG, DL,
|
2012-09-26 04:04:19 +00:00
|
|
|
Parts + RoundParts, OddParts, PartVT, OddVT, V);
|
2008-09-03 16:12:24 +00:00
|
|
|
|
|
|
|
// Combine the round and odd parts.
|
|
|
|
Lo = Val;
|
|
|
|
if (TLI.isBigEndian())
|
|
|
|
std::swap(Lo, Hi);
|
2009-08-12 00:36:31 +00:00
|
|
|
EVT TotalVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
|
2010-08-24 23:20:40 +00:00
|
|
|
Hi = DAG.getNode(ISD::ANY_EXTEND, DL, TotalVT, Hi);
|
|
|
|
Hi = DAG.getNode(ISD::SHL, DL, TotalVT, Hi,
|
2008-09-03 16:12:24 +00:00
|
|
|
DAG.getConstant(Lo.getValueType().getSizeInBits(),
|
2009-01-31 15:50:11 +00:00
|
|
|
TLI.getPointerTy()));
|
2010-08-24 23:20:40 +00:00
|
|
|
Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, TotalVT, Lo);
|
|
|
|
Val = DAG.getNode(ISD::OR, DL, TotalVT, Lo, Hi);
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
2009-05-20 06:02:09 +00:00
|
|
|
} else if (PartVT.isFloatingPoint()) {
|
|
|
|
// FP split into multiple FP parts (for ppcf128)
|
2009-08-11 20:47:22 +00:00
|
|
|
assert(ValueVT == EVT(MVT::ppcf128) && PartVT == EVT(MVT::f64) &&
|
2009-05-20 06:02:09 +00:00
|
|
|
"Unexpected split");
|
|
|
|
SDValue Lo, Hi;
|
2010-11-23 03:31:01 +00:00
|
|
|
Lo = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[0]);
|
|
|
|
Hi = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[1]);
|
2009-05-20 06:02:09 +00:00
|
|
|
if (TLI.isBigEndian())
|
|
|
|
std::swap(Lo, Hi);
|
2010-08-24 23:20:40 +00:00
|
|
|
Val = DAG.getNode(ISD::BUILD_PAIR, DL, ValueVT, Lo, Hi);
|
2009-05-20 06:02:09 +00:00
|
|
|
} else {
|
|
|
|
// FP split into integer parts (soft fp)
|
|
|
|
assert(ValueVT.isFloatingPoint() && PartVT.isInteger() &&
|
|
|
|
!PartVT.isVector() && "Unexpected split");
|
2009-08-12 00:36:31 +00:00
|
|
|
EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
|
2012-09-26 04:04:19 +00:00
|
|
|
Val = getCopyFromParts(DAG, DL, Parts, NumParts, PartVT, IntVT, V);
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// There is now one part, held in Val. Correct it to match ValueVT.
|
|
|
|
PartVT = Val.getValueType();
|
|
|
|
|
|
|
|
if (PartVT == ValueVT)
|
|
|
|
return Val;
|
|
|
|
|
2010-08-24 23:20:40 +00:00
|
|
|
if (PartVT.isInteger() && ValueVT.isInteger()) {
|
2008-09-03 16:12:24 +00:00
|
|
|
if (ValueVT.bitsLT(PartVT)) {
|
|
|
|
// For a truncate, see if we have any information to
|
|
|
|
// indicate whether the truncated bits will always be
|
|
|
|
// zero or sign-extension.
|
|
|
|
if (AssertOp != ISD::DELETED_NODE)
|
2010-08-24 23:20:40 +00:00
|
|
|
Val = DAG.getNode(AssertOp, DL, PartVT, Val,
|
2008-09-03 16:12:24 +00:00
|
|
|
DAG.getValueType(ValueVT));
|
2010-08-24 23:20:40 +00:00
|
|
|
return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
2010-08-24 23:20:40 +00:00
|
|
|
return DAG.getNode(ISD::ANY_EXTEND, DL, ValueVT, Val);
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
|
2010-08-24 23:20:40 +00:00
|
|
|
// FP_ROUND's are always exact here.
|
|
|
|
if (ValueVT.bitsLT(Val.getValueType()))
|
|
|
|
return DAG.getNode(ISD::FP_ROUND, DL, ValueVT, Val,
|
2012-01-17 01:54:07 +00:00
|
|
|
DAG.getTargetConstant(1, TLI.getPointerTy()));
|
2009-12-22 02:10:19 +00:00
|
|
|
|
2010-08-24 23:20:40 +00:00
|
|
|
return DAG.getNode(ISD::FP_EXTEND, DL, ValueVT, Val);
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
|
|
|
|
2010-01-28 21:51:40 +00:00
|
|
|
if (PartVT.getSizeInBits() == ValueVT.getSizeInBits())
|
2010-11-23 03:31:01 +00:00
|
|
|
return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2009-07-14 16:55:14 +00:00
|
|
|
llvm_unreachable("Unknown mismatch!");
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
|
|
|
|
2012-09-26 04:04:19 +00:00
|
|
|
/// getCopyFromPartsVector - Create a value that contains the specified legal
|
|
|
|
/// parts combined into the value they represent. If the parts combine to a
|
|
|
|
/// type larger then ValueVT then AssertOp can be used to specify whether the
|
|
|
|
/// extra bits are known to be zero (ISD::AssertZext) or sign extended from
|
|
|
|
/// ValueVT (ISD::AssertSext).
|
2010-08-24 23:20:40 +00:00
|
|
|
static SDValue getCopyFromPartsVector(SelectionDAG &DAG, DebugLoc DL,
|
|
|
|
const SDValue *Parts, unsigned NumParts,
|
2012-09-26 04:04:19 +00:00
|
|
|
EVT PartVT, EVT ValueVT, const Value *V) {
|
2010-08-24 23:20:40 +00:00
|
|
|
assert(ValueVT.isVector() && "Not a vector value");
|
|
|
|
assert(NumParts > 0 && "No parts to assemble!");
|
|
|
|
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
|
|
|
|
SDValue Val = Parts[0];
|
2010-10-16 08:25:21 +00:00
|
|
|
|
2010-08-24 23:20:40 +00:00
|
|
|
// Handle a multi-element vector.
|
|
|
|
if (NumParts > 1) {
|
2012-12-11 10:16:19 +00:00
|
|
|
EVT IntermediateVT;
|
|
|
|
MVT RegisterVT;
|
2010-08-24 23:20:40 +00:00
|
|
|
unsigned NumIntermediates;
|
|
|
|
unsigned NumRegs =
|
|
|
|
TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
|
|
|
|
NumIntermediates, RegisterVT);
|
|
|
|
assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
|
|
|
|
NumParts = NumRegs; // Silence a compiler warning.
|
2012-12-11 10:16:19 +00:00
|
|
|
assert(RegisterVT == PartVT.getSimpleVT() &&
|
|
|
|
"Part type doesn't match vector breakdown!");
|
|
|
|
assert(RegisterVT == Parts[0].getSimpleValueType() &&
|
2010-08-24 23:20:40 +00:00
|
|
|
"Part type doesn't match part!");
|
2010-10-16 08:25:21 +00:00
|
|
|
|
2010-08-24 23:20:40 +00:00
|
|
|
// Assemble the parts into intermediate operands.
|
|
|
|
SmallVector<SDValue, 8> Ops(NumIntermediates);
|
|
|
|
if (NumIntermediates == NumParts) {
|
|
|
|
// If the register was not expanded, truncate or copy the value,
|
|
|
|
// as appropriate.
|
|
|
|
for (unsigned i = 0; i != NumParts; ++i)
|
|
|
|
Ops[i] = getCopyFromParts(DAG, DL, &Parts[i], 1,
|
2012-09-26 04:04:19 +00:00
|
|
|
PartVT, IntermediateVT, V);
|
2010-08-24 23:20:40 +00:00
|
|
|
} else if (NumParts > 0) {
|
|
|
|
// If the intermediate type was expanded, build the intermediate
|
|
|
|
// operands from the parts.
|
|
|
|
assert(NumParts % NumIntermediates == 0 &&
|
|
|
|
"Must expand into a divisible number of parts!");
|
|
|
|
unsigned Factor = NumParts / NumIntermediates;
|
|
|
|
for (unsigned i = 0; i != NumIntermediates; ++i)
|
|
|
|
Ops[i] = getCopyFromParts(DAG, DL, &Parts[i * Factor], Factor,
|
2012-09-26 04:04:19 +00:00
|
|
|
PartVT, IntermediateVT, V);
|
2010-08-24 23:20:40 +00:00
|
|
|
}
|
2010-10-16 08:25:21 +00:00
|
|
|
|
2010-08-24 23:20:40 +00:00
|
|
|
// Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the
|
|
|
|
// intermediate operands.
|
|
|
|
Val = DAG.getNode(IntermediateVT.isVector() ?
|
|
|
|
ISD::CONCAT_VECTORS : ISD::BUILD_VECTOR, DL,
|
|
|
|
ValueVT, &Ops[0], NumIntermediates);
|
|
|
|
}
|
2010-10-16 08:25:21 +00:00
|
|
|
|
2010-08-24 23:20:40 +00:00
|
|
|
// There is now one part, held in Val. Correct it to match ValueVT.
|
|
|
|
PartVT = Val.getValueType();
|
2010-10-16 08:25:21 +00:00
|
|
|
|
2010-08-24 23:20:40 +00:00
|
|
|
if (PartVT == ValueVT)
|
|
|
|
return Val;
|
2010-10-16 08:25:21 +00:00
|
|
|
|
Change handling of illegal vector types to widen when possible instead of
expanding: e.g. <2 x float> -> <4 x float> instead of -> 2 floats. This
affects two places in the code: handling cross block values and handling
function return and arguments. Since vectors are already widened by
legalizetypes, this gives us much better code and unblocks x86-64 abi
and SPU abi work.
For example, this (which is a silly example of a cross-block value):
define <4 x float> @test2(<4 x float> %A) nounwind {
%B = shufflevector <4 x float> %A, <4 x float> undef, <2 x i32> <i32 0, i32 1>
%C = fadd <2 x float> %B, %B
br label %BB
BB:
%D = fadd <2 x float> %C, %C
%E = shufflevector <2 x float> %D, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
ret <4 x float> %E
}
Now compiles into:
_test2: ## @test2
## BB#0:
addps %xmm0, %xmm0
addps %xmm0, %xmm0
ret
previously it compiled into:
_test2: ## @test2
## BB#0:
addps %xmm0, %xmm0
pshufd $1, %xmm0, %xmm1
## kill: XMM0<def> XMM0<kill> XMM0<def>
insertps $0, %xmm0, %xmm0
insertps $16, %xmm1, %xmm0
addps %xmm0, %xmm0
ret
This implements rdar://8230384
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@112101 91177308-0d34-0410-b5e6-96231b3b80d8
2010-08-25 22:49:25 +00:00
|
|
|
if (PartVT.isVector()) {
|
|
|
|
// If the element type of the source/dest vectors are the same, but the
|
|
|
|
// parts vector has more elements than the value vector, then we have a
|
|
|
|
// vector widening case (e.g. <2 x float> -> <4 x float>). Extract the
|
|
|
|
// elements we want.
|
|
|
|
if (PartVT.getVectorElementType() == ValueVT.getVectorElementType()) {
|
|
|
|
assert(PartVT.getVectorNumElements() > ValueVT.getVectorNumElements() &&
|
|
|
|
"Cannot narrow, it would be a lossy transformation");
|
|
|
|
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val,
|
|
|
|
DAG.getIntPtrConstant(0));
|
2010-10-16 08:25:21 +00:00
|
|
|
}
|
|
|
|
|
Change handling of illegal vector types to widen when possible instead of
expanding: e.g. <2 x float> -> <4 x float> instead of -> 2 floats. This
affects two places in the code: handling cross block values and handling
function return and arguments. Since vectors are already widened by
legalizetypes, this gives us much better code and unblocks x86-64 abi
and SPU abi work.
For example, this (which is a silly example of a cross-block value):
define <4 x float> @test2(<4 x float> %A) nounwind {
%B = shufflevector <4 x float> %A, <4 x float> undef, <2 x i32> <i32 0, i32 1>
%C = fadd <2 x float> %B, %B
br label %BB
BB:
%D = fadd <2 x float> %C, %C
%E = shufflevector <2 x float> %D, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
ret <4 x float> %E
}
Now compiles into:
_test2: ## @test2
## BB#0:
addps %xmm0, %xmm0
addps %xmm0, %xmm0
ret
previously it compiled into:
_test2: ## @test2
## BB#0:
addps %xmm0, %xmm0
pshufd $1, %xmm0, %xmm1
## kill: XMM0<def> XMM0<kill> XMM0<def>
insertps $0, %xmm0, %xmm0
insertps $16, %xmm1, %xmm0
addps %xmm0, %xmm0
ret
This implements rdar://8230384
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@112101 91177308-0d34-0410-b5e6-96231b3b80d8
2010-08-25 22:49:25 +00:00
|
|
|
// Vector/Vector bitcast.
|
2011-06-04 20:58:08 +00:00
|
|
|
if (ValueVT.getSizeInBits() == PartVT.getSizeInBits())
|
|
|
|
return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
|
|
|
|
|
|
|
|
assert(PartVT.getVectorNumElements() == ValueVT.getVectorNumElements() &&
|
|
|
|
"Cannot handle this kind of promotion");
|
|
|
|
// Promoted vector extract
|
2011-06-12 14:49:38 +00:00
|
|
|
bool Smaller = ValueVT.bitsLE(PartVT);
|
|
|
|
return DAG.getNode((Smaller ? ISD::TRUNCATE : ISD::ANY_EXTEND),
|
|
|
|
DL, ValueVT, Val);
|
2011-06-04 20:58:08 +00:00
|
|
|
|
Change handling of illegal vector types to widen when possible instead of
expanding: e.g. <2 x float> -> <4 x float> instead of -> 2 floats. This
affects two places in the code: handling cross block values and handling
function return and arguments. Since vectors are already widened by
legalizetypes, this gives us much better code and unblocks x86-64 abi
and SPU abi work.
For example, this (which is a silly example of a cross-block value):
define <4 x float> @test2(<4 x float> %A) nounwind {
%B = shufflevector <4 x float> %A, <4 x float> undef, <2 x i32> <i32 0, i32 1>
%C = fadd <2 x float> %B, %B
br label %BB
BB:
%D = fadd <2 x float> %C, %C
%E = shufflevector <2 x float> %D, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
ret <4 x float> %E
}
Now compiles into:
_test2: ## @test2
## BB#0:
addps %xmm0, %xmm0
addps %xmm0, %xmm0
ret
previously it compiled into:
_test2: ## @test2
## BB#0:
addps %xmm0, %xmm0
pshufd $1, %xmm0, %xmm1
## kill: XMM0<def> XMM0<kill> XMM0<def>
insertps $0, %xmm0, %xmm0
insertps $16, %xmm1, %xmm0
addps %xmm0, %xmm0
ret
This implements rdar://8230384
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@112101 91177308-0d34-0410-b5e6-96231b3b80d8
2010-08-25 22:49:25 +00:00
|
|
|
}
|
2011-06-08 23:55:35 +00:00
|
|
|
|
2011-06-01 19:55:10 +00:00
|
|
|
// Trivial bitcast if the types are the same size and the destination
|
|
|
|
// vector type is legal.
|
|
|
|
if (PartVT.getSizeInBits() == ValueVT.getSizeInBits() &&
|
|
|
|
TLI.isTypeLegal(ValueVT))
|
|
|
|
return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
|
2010-10-16 08:25:21 +00:00
|
|
|
|
2011-06-12 14:49:38 +00:00
|
|
|
// Handle cases such as i8 -> <1 x i1>
|
2012-09-26 04:04:19 +00:00
|
|
|
if (ValueVT.getVectorNumElements() != 1) {
|
|
|
|
LLVMContext &Ctx = *DAG.getContext();
|
|
|
|
Twine ErrMsg("non-trivial scalar-to-vector conversion");
|
|
|
|
if (const Instruction *I = dyn_cast_or_null<Instruction>(V)) {
|
|
|
|
if (const CallInst *CI = dyn_cast<CallInst>(I))
|
|
|
|
if (isa<InlineAsm>(CI->getCalledValue()))
|
|
|
|
ErrMsg = ErrMsg + ", possible invalid constraint for vector type";
|
|
|
|
Ctx.emitError(I, ErrMsg);
|
|
|
|
} else {
|
|
|
|
Ctx.emitError(ErrMsg);
|
|
|
|
}
|
|
|
|
report_fatal_error("Cannot handle scalar-to-vector conversion!");
|
|
|
|
}
|
2011-06-12 14:49:38 +00:00
|
|
|
|
|
|
|
if (ValueVT.getVectorNumElements() == 1 &&
|
|
|
|
ValueVT.getVectorElementType() != PartVT) {
|
|
|
|
bool Smaller = ValueVT.bitsLE(PartVT);
|
|
|
|
Val = DAG.getNode((Smaller ? ISD::TRUNCATE : ISD::ANY_EXTEND),
|
|
|
|
DL, ValueVT.getScalarType(), Val);
|
|
|
|
}
|
|
|
|
|
2010-08-24 23:20:40 +00:00
|
|
|
return DAG.getNode(ISD::BUILD_VECTOR, DL, ValueVT, Val);
|
|
|
|
}
|
|
|
|
|
2010-08-24 23:10:06 +00:00
|
|
|
static void getCopyToPartsVector(SelectionDAG &DAG, DebugLoc dl,
|
|
|
|
SDValue Val, SDValue *Parts, unsigned NumParts,
|
2012-09-26 06:16:18 +00:00
|
|
|
EVT PartVT, const Value *V);
|
2010-10-16 08:25:21 +00:00
|
|
|
|
2008-09-03 16:12:24 +00:00
|
|
|
/// getCopyToParts - Create a series of nodes that contain the specified value
|
|
|
|
/// split into legal parts. If the parts contain more bits than Val, then, for
|
|
|
|
/// integers, ExtendKind can be used to specify how to generate the extra bits.
|
2010-08-24 23:10:06 +00:00
|
|
|
static void getCopyToParts(SelectionDAG &DAG, DebugLoc DL,
|
2009-12-22 02:10:19 +00:00
|
|
|
SDValue Val, SDValue *Parts, unsigned NumParts,
|
2012-09-26 06:16:18 +00:00
|
|
|
EVT PartVT, const Value *V,
|
2008-09-03 16:12:24 +00:00
|
|
|
ISD::NodeType ExtendKind = ISD::ANY_EXTEND) {
|
2009-08-10 22:56:29 +00:00
|
|
|
EVT ValueVT = Val.getValueType();
|
2010-10-16 08:25:21 +00:00
|
|
|
|
2010-08-24 23:10:06 +00:00
|
|
|
// Handle the vector case separately.
|
|
|
|
if (ValueVT.isVector())
|
2012-09-26 06:16:18 +00:00
|
|
|
return getCopyToPartsVector(DAG, DL, Val, Parts, NumParts, PartVT, V);
|
2010-10-16 08:25:21 +00:00
|
|
|
|
2010-08-24 23:10:06 +00:00
|
|
|
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
|
2008-09-03 16:12:24 +00:00
|
|
|
unsigned PartBits = PartVT.getSizeInBits();
|
2009-02-25 22:39:13 +00:00
|
|
|
unsigned OrigNumParts = NumParts;
|
2008-09-03 16:12:24 +00:00
|
|
|
assert(TLI.isTypeLegal(PartVT) && "Copying to an illegal type!");
|
|
|
|
|
2010-08-24 23:10:06 +00:00
|
|
|
if (NumParts == 0)
|
2008-09-03 16:12:24 +00:00
|
|
|
return;
|
|
|
|
|
2010-08-24 23:10:06 +00:00
|
|
|
assert(!ValueVT.isVector() && "Vector case handled elsewhere");
|
|
|
|
if (PartVT == ValueVT) {
|
|
|
|
assert(NumParts == 1 && "No-op copy with multiple parts!");
|
|
|
|
Parts[0] = Val;
|
|
|
|
return;
|
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-08-24 23:10:06 +00:00
|
|
|
if (NumParts * PartBits > ValueVT.getSizeInBits()) {
|
|
|
|
// If the parts cover more bits than the value has, promote the value.
|
|
|
|
if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
|
|
|
|
assert(NumParts == 1 && "Do not know what to promote to!");
|
|
|
|
Val = DAG.getNode(ISD::FP_EXTEND, DL, PartVT, Val);
|
|
|
|
} else {
|
2012-02-23 23:25:25 +00:00
|
|
|
assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
|
|
|
|
ValueVT.isInteger() &&
|
2010-10-16 08:25:21 +00:00
|
|
|
"Unknown mismatch!");
|
2010-08-24 23:10:06 +00:00
|
|
|
ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
|
|
|
|
Val = DAG.getNode(ExtendKind, DL, ValueVT, Val);
|
2012-02-23 23:25:25 +00:00
|
|
|
if (PartVT == MVT::x86mmx)
|
|
|
|
Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
2010-08-24 23:10:06 +00:00
|
|
|
} else if (PartBits == ValueVT.getSizeInBits()) {
|
|
|
|
// Different types of the same size.
|
|
|
|
assert(NumParts == 1 && PartVT != ValueVT);
|
2010-11-23 03:31:01 +00:00
|
|
|
Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
|
2010-08-24 23:10:06 +00:00
|
|
|
} else if (NumParts * PartBits < ValueVT.getSizeInBits()) {
|
|
|
|
// If the parts cover less bits than value has, truncate the value.
|
2012-02-23 23:25:25 +00:00
|
|
|
assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
|
|
|
|
ValueVT.isInteger() &&
|
2010-08-24 23:10:06 +00:00
|
|
|
"Unknown mismatch!");
|
|
|
|
ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
|
|
|
|
Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
|
2012-02-23 23:25:25 +00:00
|
|
|
if (PartVT == MVT::x86mmx)
|
|
|
|
Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
|
2010-08-24 23:10:06 +00:00
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-08-24 23:10:06 +00:00
|
|
|
// The value may have changed - recompute ValueVT.
|
|
|
|
ValueVT = Val.getValueType();
|
|
|
|
assert(NumParts * PartBits == ValueVT.getSizeInBits() &&
|
|
|
|
"Failed to tile the value with PartVT!");
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-08-24 23:10:06 +00:00
|
|
|
if (NumParts == 1) {
|
2012-09-26 06:16:18 +00:00
|
|
|
if (PartVT != ValueVT) {
|
|
|
|
LLVMContext &Ctx = *DAG.getContext();
|
|
|
|
Twine ErrMsg("scalar-to-vector conversion failed");
|
|
|
|
if (const Instruction *I = dyn_cast_or_null<Instruction>(V)) {
|
|
|
|
if (const CallInst *CI = dyn_cast<CallInst>(I))
|
|
|
|
if (isa<InlineAsm>(CI->getCalledValue()))
|
|
|
|
ErrMsg = ErrMsg + ", possible invalid constraint for vector type";
|
|
|
|
Ctx.emitError(I, ErrMsg);
|
|
|
|
} else {
|
|
|
|
Ctx.emitError(ErrMsg);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-08-24 23:10:06 +00:00
|
|
|
Parts[0] = Val;
|
|
|
|
return;
|
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-08-24 23:10:06 +00:00
|
|
|
// Expand the value into multiple parts.
|
|
|
|
if (NumParts & (NumParts - 1)) {
|
|
|
|
// The number of parts is not a power of 2. Split off and copy the tail.
|
|
|
|
assert(PartVT.isInteger() && ValueVT.isInteger() &&
|
|
|
|
"Do not know what to expand to!");
|
|
|
|
unsigned RoundParts = 1 << Log2_32(NumParts);
|
|
|
|
unsigned RoundBits = RoundParts * PartBits;
|
|
|
|
unsigned OddParts = NumParts - RoundParts;
|
|
|
|
SDValue OddVal = DAG.getNode(ISD::SRL, DL, ValueVT, Val,
|
|
|
|
DAG.getIntPtrConstant(RoundBits));
|
2012-09-26 06:16:18 +00:00
|
|
|
getCopyToParts(DAG, DL, OddVal, Parts + RoundParts, OddParts, PartVT, V);
|
2009-12-22 02:10:19 +00:00
|
|
|
|
2010-08-24 23:10:06 +00:00
|
|
|
if (TLI.isBigEndian())
|
|
|
|
// The odd parts were reversed by getCopyToParts - unreverse them.
|
|
|
|
std::reverse(Parts + RoundParts, Parts + NumParts);
|
2009-12-22 02:10:19 +00:00
|
|
|
|
2010-08-24 23:10:06 +00:00
|
|
|
NumParts = RoundParts;
|
|
|
|
ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
|
|
|
|
Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
|
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-08-24 23:10:06 +00:00
|
|
|
// The number of parts is a power of 2. Repeatedly bisect the value using
|
|
|
|
// EXTRACT_ELEMENT.
|
2010-11-23 03:31:01 +00:00
|
|
|
Parts[0] = DAG.getNode(ISD::BITCAST, DL,
|
2010-08-24 23:10:06 +00:00
|
|
|
EVT::getIntegerVT(*DAG.getContext(),
|
|
|
|
ValueVT.getSizeInBits()),
|
|
|
|
Val);
|
|
|
|
|
|
|
|
for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
|
|
|
|
for (unsigned i = 0; i < NumParts; i += StepSize) {
|
|
|
|
unsigned ThisBits = StepSize * PartBits / 2;
|
|
|
|
EVT ThisVT = EVT::getIntegerVT(*DAG.getContext(), ThisBits);
|
|
|
|
SDValue &Part0 = Parts[i];
|
|
|
|
SDValue &Part1 = Parts[i+StepSize/2];
|
|
|
|
|
|
|
|
Part1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
|
|
|
|
ThisVT, Part0, DAG.getIntPtrConstant(1));
|
|
|
|
Part0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
|
|
|
|
ThisVT, Part0, DAG.getIntPtrConstant(0));
|
|
|
|
|
|
|
|
if (ThisBits == PartBits && ThisVT != PartVT) {
|
2010-11-23 03:31:01 +00:00
|
|
|
Part0 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part0);
|
|
|
|
Part1 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part1);
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
|
|
|
}
|
2010-08-24 23:10:06 +00:00
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-08-24 23:10:06 +00:00
|
|
|
if (TLI.isBigEndian())
|
|
|
|
std::reverse(Parts, Parts + OrigNumParts);
|
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
|
|
|
|
|
2010-08-24 23:10:06 +00:00
|
|
|
/// getCopyToPartsVector - Create a series of nodes that contain the specified
|
|
|
|
/// value split into legal parts.
|
|
|
|
static void getCopyToPartsVector(SelectionDAG &DAG, DebugLoc DL,
|
|
|
|
SDValue Val, SDValue *Parts, unsigned NumParts,
|
2012-09-26 06:16:18 +00:00
|
|
|
EVT PartVT, const Value *V) {
|
2010-08-24 23:10:06 +00:00
|
|
|
EVT ValueVT = Val.getValueType();
|
|
|
|
assert(ValueVT.isVector() && "Not a vector");
|
|
|
|
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
|
2010-10-16 08:25:21 +00:00
|
|
|
|
2008-09-03 16:12:24 +00:00
|
|
|
if (NumParts == 1) {
|
Change handling of illegal vector types to widen when possible instead of
expanding: e.g. <2 x float> -> <4 x float> instead of -> 2 floats. This
affects two places in the code: handling cross block values and handling
function return and arguments. Since vectors are already widened by
legalizetypes, this gives us much better code and unblocks x86-64 abi
and SPU abi work.
For example, this (which is a silly example of a cross-block value):
define <4 x float> @test2(<4 x float> %A) nounwind {
%B = shufflevector <4 x float> %A, <4 x float> undef, <2 x i32> <i32 0, i32 1>
%C = fadd <2 x float> %B, %B
br label %BB
BB:
%D = fadd <2 x float> %C, %C
%E = shufflevector <2 x float> %D, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
ret <4 x float> %E
}
Now compiles into:
_test2: ## @test2
## BB#0:
addps %xmm0, %xmm0
addps %xmm0, %xmm0
ret
previously it compiled into:
_test2: ## @test2
## BB#0:
addps %xmm0, %xmm0
pshufd $1, %xmm0, %xmm1
## kill: XMM0<def> XMM0<kill> XMM0<def>
insertps $0, %xmm0, %xmm0
insertps $16, %xmm1, %xmm0
addps %xmm0, %xmm0
ret
This implements rdar://8230384
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@112101 91177308-0d34-0410-b5e6-96231b3b80d8
2010-08-25 22:49:25 +00:00
|
|
|
if (PartVT == ValueVT) {
|
|
|
|
// Nothing to do.
|
|
|
|
} else if (PartVT.getSizeInBits() == ValueVT.getSizeInBits()) {
|
|
|
|
// Bitconvert vector->vector case.
|
2010-11-23 03:31:01 +00:00
|
|
|
Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
|
Change handling of illegal vector types to widen when possible instead of
expanding: e.g. <2 x float> -> <4 x float> instead of -> 2 floats. This
affects two places in the code: handling cross block values and handling
function return and arguments. Since vectors are already widened by
legalizetypes, this gives us much better code and unblocks x86-64 abi
and SPU abi work.
For example, this (which is a silly example of a cross-block value):
define <4 x float> @test2(<4 x float> %A) nounwind {
%B = shufflevector <4 x float> %A, <4 x float> undef, <2 x i32> <i32 0, i32 1>
%C = fadd <2 x float> %B, %B
br label %BB
BB:
%D = fadd <2 x float> %C, %C
%E = shufflevector <2 x float> %D, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
ret <4 x float> %E
}
Now compiles into:
_test2: ## @test2
## BB#0:
addps %xmm0, %xmm0
addps %xmm0, %xmm0
ret
previously it compiled into:
_test2: ## @test2
## BB#0:
addps %xmm0, %xmm0
pshufd $1, %xmm0, %xmm1
## kill: XMM0<def> XMM0<kill> XMM0<def>
insertps $0, %xmm0, %xmm0
insertps $16, %xmm1, %xmm0
addps %xmm0, %xmm0
ret
This implements rdar://8230384
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@112101 91177308-0d34-0410-b5e6-96231b3b80d8
2010-08-25 22:49:25 +00:00
|
|
|
} else if (PartVT.isVector() &&
|
2011-06-12 14:49:38 +00:00
|
|
|
PartVT.getVectorElementType() == ValueVT.getVectorElementType() &&
|
Change handling of illegal vector types to widen when possible instead of
expanding: e.g. <2 x float> -> <4 x float> instead of -> 2 floats. This
affects two places in the code: handling cross block values and handling
function return and arguments. Since vectors are already widened by
legalizetypes, this gives us much better code and unblocks x86-64 abi
and SPU abi work.
For example, this (which is a silly example of a cross-block value):
define <4 x float> @test2(<4 x float> %A) nounwind {
%B = shufflevector <4 x float> %A, <4 x float> undef, <2 x i32> <i32 0, i32 1>
%C = fadd <2 x float> %B, %B
br label %BB
BB:
%D = fadd <2 x float> %C, %C
%E = shufflevector <2 x float> %D, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
ret <4 x float> %E
}
Now compiles into:
_test2: ## @test2
## BB#0:
addps %xmm0, %xmm0
addps %xmm0, %xmm0
ret
previously it compiled into:
_test2: ## @test2
## BB#0:
addps %xmm0, %xmm0
pshufd $1, %xmm0, %xmm1
## kill: XMM0<def> XMM0<kill> XMM0<def>
insertps $0, %xmm0, %xmm0
insertps $16, %xmm1, %xmm0
addps %xmm0, %xmm0
ret
This implements rdar://8230384
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@112101 91177308-0d34-0410-b5e6-96231b3b80d8
2010-08-25 22:49:25 +00:00
|
|
|
PartVT.getVectorNumElements() > ValueVT.getVectorNumElements()) {
|
|
|
|
EVT ElementVT = PartVT.getVectorElementType();
|
|
|
|
// Vector widening case, e.g. <2 x float> -> <4 x float>. Shuffle in
|
|
|
|
// undef elements.
|
|
|
|
SmallVector<SDValue, 16> Ops;
|
|
|
|
for (unsigned i = 0, e = ValueVT.getVectorNumElements(); i != e; ++i)
|
|
|
|
Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
|
|
|
|
ElementVT, Val, DAG.getIntPtrConstant(i)));
|
2010-10-16 08:25:21 +00:00
|
|
|
|
Change handling of illegal vector types to widen when possible instead of
expanding: e.g. <2 x float> -> <4 x float> instead of -> 2 floats. This
affects two places in the code: handling cross block values and handling
function return and arguments. Since vectors are already widened by
legalizetypes, this gives us much better code and unblocks x86-64 abi
and SPU abi work.
For example, this (which is a silly example of a cross-block value):
define <4 x float> @test2(<4 x float> %A) nounwind {
%B = shufflevector <4 x float> %A, <4 x float> undef, <2 x i32> <i32 0, i32 1>
%C = fadd <2 x float> %B, %B
br label %BB
BB:
%D = fadd <2 x float> %C, %C
%E = shufflevector <2 x float> %D, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
ret <4 x float> %E
}
Now compiles into:
_test2: ## @test2
## BB#0:
addps %xmm0, %xmm0
addps %xmm0, %xmm0
ret
previously it compiled into:
_test2: ## @test2
## BB#0:
addps %xmm0, %xmm0
pshufd $1, %xmm0, %xmm1
## kill: XMM0<def> XMM0<kill> XMM0<def>
insertps $0, %xmm0, %xmm0
insertps $16, %xmm1, %xmm0
addps %xmm0, %xmm0
ret
This implements rdar://8230384
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@112101 91177308-0d34-0410-b5e6-96231b3b80d8
2010-08-25 22:49:25 +00:00
|
|
|
for (unsigned i = ValueVT.getVectorNumElements(),
|
|
|
|
e = PartVT.getVectorNumElements(); i != e; ++i)
|
|
|
|
Ops.push_back(DAG.getUNDEF(ElementVT));
|
|
|
|
|
|
|
|
Val = DAG.getNode(ISD::BUILD_VECTOR, DL, PartVT, &Ops[0], Ops.size());
|
|
|
|
|
|
|
|
// FIXME: Use CONCAT for 2x -> 4x.
|
2010-10-16 08:25:21 +00:00
|
|
|
|
Change handling of illegal vector types to widen when possible instead of
expanding: e.g. <2 x float> -> <4 x float> instead of -> 2 floats. This
affects two places in the code: handling cross block values and handling
function return and arguments. Since vectors are already widened by
legalizetypes, this gives us much better code and unblocks x86-64 abi
and SPU abi work.
For example, this (which is a silly example of a cross-block value):
define <4 x float> @test2(<4 x float> %A) nounwind {
%B = shufflevector <4 x float> %A, <4 x float> undef, <2 x i32> <i32 0, i32 1>
%C = fadd <2 x float> %B, %B
br label %BB
BB:
%D = fadd <2 x float> %C, %C
%E = shufflevector <2 x float> %D, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
ret <4 x float> %E
}
Now compiles into:
_test2: ## @test2
## BB#0:
addps %xmm0, %xmm0
addps %xmm0, %xmm0
ret
previously it compiled into:
_test2: ## @test2
## BB#0:
addps %xmm0, %xmm0
pshufd $1, %xmm0, %xmm1
## kill: XMM0<def> XMM0<kill> XMM0<def>
insertps $0, %xmm0, %xmm0
insertps $16, %xmm1, %xmm0
addps %xmm0, %xmm0
ret
This implements rdar://8230384
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@112101 91177308-0d34-0410-b5e6-96231b3b80d8
2010-08-25 22:49:25 +00:00
|
|
|
//SDValue UndefElts = DAG.getUNDEF(VectorTy);
|
|
|
|
//Val = DAG.getNode(ISD::CONCAT_VECTORS, DL, PartVT, Val, UndefElts);
|
2011-06-04 20:58:08 +00:00
|
|
|
} else if (PartVT.isVector() &&
|
|
|
|
PartVT.getVectorElementType().bitsGE(
|
2011-06-12 14:49:38 +00:00
|
|
|
ValueVT.getVectorElementType()) &&
|
2011-06-04 20:58:08 +00:00
|
|
|
PartVT.getVectorNumElements() == ValueVT.getVectorNumElements()) {
|
|
|
|
|
|
|
|
// Promoted vector extract
|
2011-06-19 08:49:38 +00:00
|
|
|
bool Smaller = PartVT.bitsLE(ValueVT);
|
|
|
|
Val = DAG.getNode((Smaller ? ISD::TRUNCATE : ISD::ANY_EXTEND),
|
|
|
|
DL, PartVT, Val);
|
2011-06-04 20:58:08 +00:00
|
|
|
} else{
|
Change handling of illegal vector types to widen when possible instead of
expanding: e.g. <2 x float> -> <4 x float> instead of -> 2 floats. This
affects two places in the code: handling cross block values and handling
function return and arguments. Since vectors are already widened by
legalizetypes, this gives us much better code and unblocks x86-64 abi
and SPU abi work.
For example, this (which is a silly example of a cross-block value):
define <4 x float> @test2(<4 x float> %A) nounwind {
%B = shufflevector <4 x float> %A, <4 x float> undef, <2 x i32> <i32 0, i32 1>
%C = fadd <2 x float> %B, %B
br label %BB
BB:
%D = fadd <2 x float> %C, %C
%E = shufflevector <2 x float> %D, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
ret <4 x float> %E
}
Now compiles into:
_test2: ## @test2
## BB#0:
addps %xmm0, %xmm0
addps %xmm0, %xmm0
ret
previously it compiled into:
_test2: ## @test2
## BB#0:
addps %xmm0, %xmm0
pshufd $1, %xmm0, %xmm1
## kill: XMM0<def> XMM0<kill> XMM0<def>
insertps $0, %xmm0, %xmm0
insertps $16, %xmm1, %xmm0
addps %xmm0, %xmm0
ret
This implements rdar://8230384
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@112101 91177308-0d34-0410-b5e6-96231b3b80d8
2010-08-25 22:49:25 +00:00
|
|
|
// Vector -> scalar conversion.
|
2011-06-12 14:49:38 +00:00
|
|
|
assert(ValueVT.getVectorNumElements() == 1 &&
|
Change handling of illegal vector types to widen when possible instead of
expanding: e.g. <2 x float> -> <4 x float> instead of -> 2 floats. This
affects two places in the code: handling cross block values and handling
function return and arguments. Since vectors are already widened by
legalizetypes, this gives us much better code and unblocks x86-64 abi
and SPU abi work.
For example, this (which is a silly example of a cross-block value):
define <4 x float> @test2(<4 x float> %A) nounwind {
%B = shufflevector <4 x float> %A, <4 x float> undef, <2 x i32> <i32 0, i32 1>
%C = fadd <2 x float> %B, %B
br label %BB
BB:
%D = fadd <2 x float> %C, %C
%E = shufflevector <2 x float> %D, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
ret <4 x float> %E
}
Now compiles into:
_test2: ## @test2
## BB#0:
addps %xmm0, %xmm0
addps %xmm0, %xmm0
ret
previously it compiled into:
_test2: ## @test2
## BB#0:
addps %xmm0, %xmm0
pshufd $1, %xmm0, %xmm1
## kill: XMM0<def> XMM0<kill> XMM0<def>
insertps $0, %xmm0, %xmm0
insertps $16, %xmm1, %xmm0
addps %xmm0, %xmm0
ret
This implements rdar://8230384
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@112101 91177308-0d34-0410-b5e6-96231b3b80d8
2010-08-25 22:49:25 +00:00
|
|
|
"Only trivial vector-to-scalar conversions should get here!");
|
|
|
|
Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
|
|
|
|
PartVT, Val, DAG.getIntPtrConstant(0));
|
2011-06-12 14:49:38 +00:00
|
|
|
|
|
|
|
bool Smaller = ValueVT.bitsLE(PartVT);
|
|
|
|
Val = DAG.getNode((Smaller ? ISD::TRUNCATE : ISD::ANY_EXTEND),
|
|
|
|
DL, PartVT, Val);
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
2010-10-16 08:25:21 +00:00
|
|
|
|
2008-09-03 16:12:24 +00:00
|
|
|
Parts[0] = Val;
|
|
|
|
return;
|
|
|
|
}
|
2010-10-16 08:25:21 +00:00
|
|
|
|
2008-09-03 16:12:24 +00:00
|
|
|
// Handle a multi-element vector.
|
2012-12-11 10:16:19 +00:00
|
|
|
EVT IntermediateVT;
|
|
|
|
MVT RegisterVT;
|
2008-09-03 16:12:24 +00:00
|
|
|
unsigned NumIntermediates;
|
2009-08-12 00:36:31 +00:00
|
|
|
unsigned NumRegs = TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT,
|
2010-08-26 20:32:32 +00:00
|
|
|
IntermediateVT,
|
|
|
|
NumIntermediates, RegisterVT);
|
2008-09-03 16:12:24 +00:00
|
|
|
unsigned NumElements = ValueVT.getVectorNumElements();
|
2010-10-16 08:25:21 +00:00
|
|
|
|
2008-09-03 16:12:24 +00:00
|
|
|
assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
|
|
|
|
NumParts = NumRegs; // Silence a compiler warning.
|
2012-12-11 10:16:19 +00:00
|
|
|
assert(RegisterVT == PartVT.getSimpleVT() &&
|
|
|
|
"Part type doesn't match vector breakdown!");
|
2010-10-16 08:25:21 +00:00
|
|
|
|
2008-09-03 16:12:24 +00:00
|
|
|
// Split the vector into intermediate operands.
|
|
|
|
SmallVector<SDValue, 8> Ops(NumIntermediates);
|
2009-12-22 02:10:19 +00:00
|
|
|
for (unsigned i = 0; i != NumIntermediates; ++i) {
|
2008-09-03 16:12:24 +00:00
|
|
|
if (IntermediateVT.isVector())
|
2010-08-24 23:10:06 +00:00
|
|
|
Ops[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL,
|
2008-09-03 16:12:24 +00:00
|
|
|
IntermediateVT, Val,
|
2010-08-24 23:10:06 +00:00
|
|
|
DAG.getIntPtrConstant(i * (NumElements / NumIntermediates)));
|
2008-09-03 16:12:24 +00:00
|
|
|
else
|
2010-08-24 23:10:06 +00:00
|
|
|
Ops[i] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
|
Change handling of illegal vector types to widen when possible instead of
expanding: e.g. <2 x float> -> <4 x float> instead of -> 2 floats. This
affects two places in the code: handling cross block values and handling
function return and arguments. Since vectors are already widened by
legalizetypes, this gives us much better code and unblocks x86-64 abi
and SPU abi work.
For example, this (which is a silly example of a cross-block value):
define <4 x float> @test2(<4 x float> %A) nounwind {
%B = shufflevector <4 x float> %A, <4 x float> undef, <2 x i32> <i32 0, i32 1>
%C = fadd <2 x float> %B, %B
br label %BB
BB:
%D = fadd <2 x float> %C, %C
%E = shufflevector <2 x float> %D, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
ret <4 x float> %E
}
Now compiles into:
_test2: ## @test2
## BB#0:
addps %xmm0, %xmm0
addps %xmm0, %xmm0
ret
previously it compiled into:
_test2: ## @test2
## BB#0:
addps %xmm0, %xmm0
pshufd $1, %xmm0, %xmm1
## kill: XMM0<def> XMM0<kill> XMM0<def>
insertps $0, %xmm0, %xmm0
insertps $16, %xmm1, %xmm0
addps %xmm0, %xmm0
ret
This implements rdar://8230384
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@112101 91177308-0d34-0410-b5e6-96231b3b80d8
2010-08-25 22:49:25 +00:00
|
|
|
IntermediateVT, Val, DAG.getIntPtrConstant(i));
|
2009-12-22 02:10:19 +00:00
|
|
|
}
|
2010-10-16 08:25:21 +00:00
|
|
|
|
2008-09-03 16:12:24 +00:00
|
|
|
// Split the intermediate operands into legal parts.
|
|
|
|
if (NumParts == NumIntermediates) {
|
|
|
|
// If the register was not expanded, promote or copy the value,
|
|
|
|
// as appropriate.
|
|
|
|
for (unsigned i = 0; i != NumParts; ++i)
|
2012-09-26 06:16:18 +00:00
|
|
|
getCopyToParts(DAG, DL, Ops[i], &Parts[i], 1, PartVT, V);
|
2008-09-03 16:12:24 +00:00
|
|
|
} else if (NumParts > 0) {
|
|
|
|
// If the intermediate type was expanded, split each the value into
|
|
|
|
// legal parts.
|
|
|
|
assert(NumParts % NumIntermediates == 0 &&
|
|
|
|
"Must expand into a divisible number of parts!");
|
|
|
|
unsigned Factor = NumParts / NumIntermediates;
|
|
|
|
for (unsigned i = 0; i != NumIntermediates; ++i)
|
2012-09-26 06:16:18 +00:00
|
|
|
getCopyToParts(DAG, DL, Ops[i], &Parts[i*Factor], Factor, PartVT, V);
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
namespace {
|
|
|
|
/// RegsForValue - This struct represents the registers (physical or virtual)
|
|
|
|
/// that a particular set of values is assigned, and the type information
|
|
|
|
/// about the value. The most common situation is to represent one value at a
|
|
|
|
/// time, but struct or array values are handled element-wise as multiple
|
|
|
|
/// values. The splitting of aggregates is performed recursively, so that we
|
|
|
|
/// never have aggregate-typed registers. The values at this point do not
|
|
|
|
/// necessarily have legal types, so each value may require one or more
|
|
|
|
/// registers of some legal type.
|
|
|
|
///
|
|
|
|
struct RegsForValue {
|
|
|
|
/// ValueVTs - The value types of the values, which may not be legal, and
|
|
|
|
/// may need be promoted or synthesized from one or more registers.
|
|
|
|
///
|
|
|
|
SmallVector<EVT, 4> ValueVTs;
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
/// RegVTs - The value types of the registers. This is the same size as
|
|
|
|
/// ValueVTs and it records, for each value, what the type of the assigned
|
|
|
|
/// register or registers are. (Individual values are never synthesized
|
|
|
|
/// from more than one type of register.)
|
|
|
|
///
|
|
|
|
/// With virtual registers, the contents of RegVTs is redundant with TLI's
|
|
|
|
/// getRegisterType member function, however when with physical registers
|
|
|
|
/// it is necessary to have a separate record of the types.
|
|
|
|
///
|
2012-12-11 10:24:48 +00:00
|
|
|
SmallVector<MVT, 4> RegVTs;
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
/// Regs - This list holds the registers assigned to the values.
|
|
|
|
/// Each legal or promoted value requires one register, and each
|
|
|
|
/// expanded value requires multiple registers.
|
|
|
|
///
|
|
|
|
SmallVector<unsigned, 4> Regs;
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
RegsForValue() {}
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
RegsForValue(const SmallVector<unsigned, 4> ®s,
|
2012-12-11 10:24:48 +00:00
|
|
|
MVT regvt, EVT valuevt)
|
2010-05-29 17:53:24 +00:00
|
|
|
: ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs) {}
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
RegsForValue(LLVMContext &Context, const TargetLowering &tli,
|
2011-07-18 04:54:35 +00:00
|
|
|
unsigned Reg, Type *Ty) {
|
2010-05-29 17:53:24 +00:00
|
|
|
ComputeValueVTs(tli, Ty, ValueVTs);
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
|
|
|
|
EVT ValueVT = ValueVTs[Value];
|
|
|
|
unsigned NumRegs = tli.getNumRegisters(Context, ValueVT);
|
2012-12-11 10:09:23 +00:00
|
|
|
MVT RegisterVT = tli.getRegisterType(Context, ValueVT);
|
2010-05-29 17:53:24 +00:00
|
|
|
for (unsigned i = 0; i != NumRegs; ++i)
|
|
|
|
Regs.push_back(Reg + i);
|
|
|
|
RegVTs.push_back(RegisterVT);
|
|
|
|
Reg += NumRegs;
|
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
/// areValueTypesLegal - Return true if types of all the values are legal.
|
|
|
|
bool areValueTypesLegal(const TargetLowering &TLI) {
|
|
|
|
for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
|
2012-12-11 10:24:48 +00:00
|
|
|
MVT RegisterVT = RegVTs[Value];
|
2010-05-29 17:53:24 +00:00
|
|
|
if (!TLI.isTypeLegal(RegisterVT))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
/// append - Add the specified values to this one.
|
|
|
|
void append(const RegsForValue &RHS) {
|
|
|
|
ValueVTs.append(RHS.ValueVTs.begin(), RHS.ValueVTs.end());
|
|
|
|
RegVTs.append(RHS.RegVTs.begin(), RHS.RegVTs.end());
|
|
|
|
Regs.append(RHS.Regs.begin(), RHS.Regs.end());
|
|
|
|
}
|
2010-01-28 21:51:40 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
/// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from
|
|
|
|
/// this value and returns the result as a ValueVTs value. This uses
|
|
|
|
/// Chain/Flag as the input and updates them for the output Chain/Flag.
|
|
|
|
/// If the Flag pointer is NULL, no flag is used.
|
|
|
|
SDValue getCopyFromRegs(SelectionDAG &DAG, FunctionLoweringInfo &FuncInfo,
|
|
|
|
DebugLoc dl,
|
2012-09-26 04:04:19 +00:00
|
|
|
SDValue &Chain, SDValue *Flag,
|
|
|
|
const Value *V = 0) const;
|
2010-01-28 21:51:40 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
/// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
|
|
|
|
/// specified value into the registers specified by this object. This uses
|
|
|
|
/// Chain/Flag as the input and updates them for the output Chain/Flag.
|
|
|
|
/// If the Flag pointer is NULL, no flag is used.
|
|
|
|
void getCopyToRegs(SDValue Val, SelectionDAG &DAG, DebugLoc dl,
|
2012-09-26 06:16:18 +00:00
|
|
|
SDValue &Chain, SDValue *Flag, const Value *V) const;
|
2010-04-22 20:55:53 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
/// AddInlineAsmOperands - Add this value to the specified inlineasm node
|
|
|
|
/// operand list. This adds the code marker, matching input operand index
|
|
|
|
/// (if applicable), and includes the number of values added into it.
|
|
|
|
void AddInlineAsmOperands(unsigned Kind,
|
|
|
|
bool HasMatching, unsigned MatchingIdx,
|
|
|
|
SelectionDAG &DAG,
|
|
|
|
std::vector<SDValue> &Ops) const;
|
|
|
|
};
|
2009-01-16 06:53:46 +00:00
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
/// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from
|
|
|
|
/// this value and returns the result as a ValueVT value. This uses
|
|
|
|
/// Chain/Flag as the input and updates them for the output Chain/Flag.
|
|
|
|
/// If the Flag pointer is NULL, no flag is used.
|
|
|
|
SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
|
|
|
|
FunctionLoweringInfo &FuncInfo,
|
|
|
|
DebugLoc dl,
|
2012-09-26 04:04:19 +00:00
|
|
|
SDValue &Chain, SDValue *Flag,
|
|
|
|
const Value *V) const {
|
2010-07-26 18:15:41 +00:00
|
|
|
// A Value with type {} or [0 x %t] needs no registers.
|
|
|
|
if (ValueVTs.empty())
|
|
|
|
return SDValue();
|
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Assemble the legal parts into the final values.
|
|
|
|
SmallVector<SDValue, 4> Values(ValueVTs.size());
|
|
|
|
SmallVector<SDValue, 8> Parts;
|
|
|
|
for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
|
|
|
|
// Copy the legal parts from the registers.
|
|
|
|
EVT ValueVT = ValueVTs[Value];
|
|
|
|
unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVT);
|
2012-12-11 10:24:48 +00:00
|
|
|
MVT RegisterVT = RegVTs[Value];
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
Parts.resize(NumRegs);
|
|
|
|
for (unsigned i = 0; i != NumRegs; ++i) {
|
|
|
|
SDValue P;
|
|
|
|
if (Flag == 0) {
|
|
|
|
P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT);
|
|
|
|
} else {
|
|
|
|
P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT, *Flag);
|
|
|
|
*Flag = P.getValue(2);
|
|
|
|
}
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
Chain = P.getValue(1);
|
2010-12-13 01:11:17 +00:00
|
|
|
Parts[i] = P;
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// If the source register was virtual and if we know something about it,
|
|
|
|
// add an assert node.
|
2010-12-13 01:11:17 +00:00
|
|
|
if (!TargetRegisterInfo::isVirtualRegister(Regs[Part+i]) ||
|
2011-02-24 10:00:08 +00:00
|
|
|
!RegisterVT.isInteger() || RegisterVT.isVector())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
const FunctionLoweringInfo::LiveOutInfo *LOI =
|
|
|
|
FuncInfo.GetLiveOutRegInfo(Regs[Part+i]);
|
|
|
|
if (!LOI)
|
2010-12-13 01:11:17 +00:00
|
|
|
continue;
|
|
|
|
|
|
|
|
unsigned RegSize = RegisterVT.getSizeInBits();
|
2011-02-24 10:00:08 +00:00
|
|
|
unsigned NumSignBits = LOI->NumSignBits;
|
|
|
|
unsigned NumZeroBits = LOI->KnownZero.countLeadingOnes();
|
2010-12-13 01:11:17 +00:00
|
|
|
|
|
|
|
// FIXME: We capture more information than the dag can represent. For
|
|
|
|
// now, just use the tightest assertzext/assertsext possible.
|
|
|
|
bool isSExt = true;
|
|
|
|
EVT FromVT(MVT::Other);
|
|
|
|
if (NumSignBits == RegSize)
|
|
|
|
isSExt = true, FromVT = MVT::i1; // ASSERT SEXT 1
|
|
|
|
else if (NumZeroBits >= RegSize-1)
|
|
|
|
isSExt = false, FromVT = MVT::i1; // ASSERT ZEXT 1
|
|
|
|
else if (NumSignBits > RegSize-8)
|
|
|
|
isSExt = true, FromVT = MVT::i8; // ASSERT SEXT 8
|
|
|
|
else if (NumZeroBits >= RegSize-8)
|
|
|
|
isSExt = false, FromVT = MVT::i8; // ASSERT ZEXT 8
|
|
|
|
else if (NumSignBits > RegSize-16)
|
|
|
|
isSExt = true, FromVT = MVT::i16; // ASSERT SEXT 16
|
|
|
|
else if (NumZeroBits >= RegSize-16)
|
|
|
|
isSExt = false, FromVT = MVT::i16; // ASSERT ZEXT 16
|
|
|
|
else if (NumSignBits > RegSize-32)
|
|
|
|
isSExt = true, FromVT = MVT::i32; // ASSERT SEXT 32
|
|
|
|
else if (NumZeroBits >= RegSize-32)
|
|
|
|
isSExt = false, FromVT = MVT::i32; // ASSERT ZEXT 32
|
|
|
|
else
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// Add an assertion node.
|
|
|
|
assert(FromVT != MVT::Other);
|
|
|
|
Parts[i] = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, dl,
|
|
|
|
RegisterVT, P, DAG.getValueType(FromVT));
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(),
|
2012-09-26 04:04:19 +00:00
|
|
|
NumRegs, RegisterVT, ValueVT, V);
|
2010-05-29 17:53:24 +00:00
|
|
|
Part += NumRegs;
|
|
|
|
Parts.clear();
|
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
return DAG.getNode(ISD::MERGE_VALUES, dl,
|
|
|
|
DAG.getVTList(&ValueVTs[0], ValueVTs.size()),
|
|
|
|
&Values[0], ValueVTs.size());
|
|
|
|
}
|
2009-12-21 23:47:40 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
/// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
|
|
|
|
/// specified value into the registers specified by this object. This uses
|
|
|
|
/// Chain/Flag as the input and updates them for the output Chain/Flag.
|
|
|
|
/// If the Flag pointer is NULL, no flag is used.
|
|
|
|
void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG, DebugLoc dl,
|
2012-09-26 06:16:18 +00:00
|
|
|
SDValue &Chain, SDValue *Flag,
|
|
|
|
const Value *V) const {
|
2010-05-29 17:53:24 +00:00
|
|
|
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Get the list of the values's legal parts.
|
|
|
|
unsigned NumRegs = Regs.size();
|
|
|
|
SmallVector<SDValue, 8> Parts(NumRegs);
|
|
|
|
for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
|
|
|
|
EVT ValueVT = ValueVTs[Value];
|
|
|
|
unsigned NumParts = TLI.getNumRegisters(*DAG.getContext(), ValueVT);
|
2012-12-11 10:24:48 +00:00
|
|
|
MVT RegisterVT = RegVTs[Value];
|
2012-12-06 19:13:27 +00:00
|
|
|
ISD::NodeType ExtendKind =
|
|
|
|
TLI.isZExtFree(Val, RegisterVT)? ISD::ZERO_EXTEND: ISD::ANY_EXTEND;
|
2009-10-30 01:27:03 +00:00
|
|
|
|
2010-08-24 23:20:40 +00:00
|
|
|
getCopyToParts(DAG, dl, Val.getValue(Val.getResNo() + Value),
|
2012-12-06 19:13:27 +00:00
|
|
|
&Parts[Part], NumParts, RegisterVT, V, ExtendKind);
|
2010-05-29 17:53:24 +00:00
|
|
|
Part += NumParts;
|
|
|
|
}
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Copy the parts into the registers.
|
|
|
|
SmallVector<SDValue, 8> Chains(NumRegs);
|
|
|
|
for (unsigned i = 0; i != NumRegs; ++i) {
|
|
|
|
SDValue Part;
|
|
|
|
if (Flag == 0) {
|
|
|
|
Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i]);
|
2008-09-03 16:12:24 +00:00
|
|
|
} else {
|
2010-05-29 17:53:24 +00:00
|
|
|
Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i], *Flag);
|
|
|
|
*Flag = Part.getValue(1);
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
Chains[i] = Part.getValue(0);
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
if (NumRegs == 1 || Flag)
|
|
|
|
// If NumRegs > 1 && Flag is used then the use of the last CopyToReg is
|
|
|
|
// flagged to it. That is the CopyToReg nodes and the user are considered
|
|
|
|
// a single scheduling unit. If we create a TokenFactor and return it as
|
|
|
|
// chain, then the TokenFactor is both a predecessor (operand) of the
|
|
|
|
// user as well as a successor (the TF operands are flagged to the user).
|
|
|
|
// c1, f1 = CopyToReg
|
|
|
|
// c2, f2 = CopyToReg
|
|
|
|
// c3 = TokenFactor c1, c2
|
|
|
|
// ...
|
|
|
|
// = op c3, ..., f2
|
|
|
|
Chain = Chains[NumRegs-1];
|
|
|
|
else
|
|
|
|
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Chains[0], NumRegs);
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
/// AddInlineAsmOperands - Add this value to the specified inlineasm node
|
|
|
|
/// operand list. This adds the code marker and includes the number of
|
|
|
|
/// values added into it.
|
|
|
|
void RegsForValue::AddInlineAsmOperands(unsigned Code, bool HasMatching,
|
|
|
|
unsigned MatchingIdx,
|
|
|
|
SelectionDAG &DAG,
|
|
|
|
std::vector<SDValue> &Ops) const {
|
|
|
|
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
|
2009-11-11 19:59:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
unsigned Flag = InlineAsm::getFlagWord(Code, Regs.size());
|
|
|
|
if (HasMatching)
|
|
|
|
Flag = InlineAsm::getFlagWordForMatchingOp(Flag, MatchingIdx);
|
2011-10-12 23:37:29 +00:00
|
|
|
else if (!Regs.empty() &&
|
|
|
|
TargetRegisterInfo::isVirtualRegister(Regs.front())) {
|
|
|
|
// Put the register class of the virtual registers in the flag word. That
|
|
|
|
// way, later passes can recompute register class constraints for inline
|
|
|
|
// assembly as well as normal instructions.
|
|
|
|
// Don't do this for tied operands that can use the regclass information
|
|
|
|
// from the def.
|
|
|
|
const MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
|
|
|
|
const TargetRegisterClass *RC = MRI.getRegClass(Regs.front());
|
|
|
|
Flag = InlineAsm::getFlagWordForRegClass(Flag, RC->getID());
|
|
|
|
}
|
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
SDValue Res = DAG.getTargetConstant(Flag, MVT::i32);
|
|
|
|
Ops.push_back(Res);
|
2009-11-07 02:11:54 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) {
|
|
|
|
unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVTs[Value]);
|
2012-12-11 10:24:48 +00:00
|
|
|
MVT RegisterVT = RegVTs[Value];
|
2010-05-29 17:53:24 +00:00
|
|
|
for (unsigned i = 0; i != NumRegs; ++i) {
|
|
|
|
assert(Reg < Regs.size() && "Mismatch in # registers expected");
|
|
|
|
Ops.push_back(DAG.getRegister(Regs[Reg++], RegisterVT));
|
2009-11-07 02:11:54 +00:00
|
|
|
}
|
2010-05-29 17:53:24 +00:00
|
|
|
}
|
|
|
|
}
|
2009-11-07 02:11:54 +00:00
|
|
|
|
2011-12-08 22:15:21 +00:00
|
|
|
void SelectionDAGBuilder::init(GCFunctionInfo *gfi, AliasAnalysis &aa,
|
|
|
|
const TargetLibraryInfo *li) {
|
2010-05-29 17:53:24 +00:00
|
|
|
AA = &aa;
|
|
|
|
GFI = gfi;
|
2011-12-08 22:15:21 +00:00
|
|
|
LibInfo = li;
|
2012-10-08 16:38:25 +00:00
|
|
|
TD = DAG.getTarget().getDataLayout();
|
2012-08-22 00:42:39 +00:00
|
|
|
Context = DAG.getContext();
|
2011-10-15 01:00:26 +00:00
|
|
|
LPadToCallSiteMap.clear();
|
2010-05-29 17:53:24 +00:00
|
|
|
}
|
2009-11-07 02:11:54 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
/// clear - Clear out the current SelectionDAG and the associated
|
|
|
|
/// state and prepare this SelectionDAGBuilder object to be used
|
|
|
|
/// for a new block. This doesn't clear out information about
|
|
|
|
/// additional blocks that are needed to complete switch lowering
|
|
|
|
/// or PHI node updating; that information is cleared out as it is
|
|
|
|
/// consumed.
|
|
|
|
void SelectionDAGBuilder::clear() {
|
|
|
|
NodeMap.clear();
|
2010-06-01 19:59:01 +00:00
|
|
|
UnusedArgNodeMap.clear();
|
2010-05-29 17:53:24 +00:00
|
|
|
PendingLoads.clear();
|
|
|
|
PendingExports.clear();
|
|
|
|
CurDebugLoc = DebugLoc();
|
|
|
|
HasTailCall = false;
|
2009-11-07 02:11:54 +00:00
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2011-05-23 17:44:13 +00:00
|
|
|
/// clearDanglingDebugInfo - Clear the dangling debug information
|
2012-06-02 10:20:22 +00:00
|
|
|
/// map. This function is separated from the clear so that debug
|
2011-05-23 17:44:13 +00:00
|
|
|
/// information that is dangling in a basic block can be properly
|
|
|
|
/// resolved in a different basic block. This allows the
|
|
|
|
/// SelectionDAG to resolve dangling debug information attached
|
|
|
|
/// to PHI nodes.
|
|
|
|
void SelectionDAGBuilder::clearDanglingDebugInfo() {
|
|
|
|
DanglingDebugInfoMap.clear();
|
|
|
|
}
|
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
/// getRoot - Return the current virtual root of the Selection DAG,
|
|
|
|
/// flushing any PendingLoad items. This must be done before emitting
|
|
|
|
/// a store or any other node that may need to be ordered after any
|
|
|
|
/// prior load instructions.
|
|
|
|
///
|
|
|
|
SDValue SelectionDAGBuilder::getRoot() {
|
|
|
|
if (PendingLoads.empty())
|
|
|
|
return DAG.getRoot();
|
2010-01-01 04:41:22 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
if (PendingLoads.size() == 1) {
|
|
|
|
SDValue Root = PendingLoads[0];
|
|
|
|
DAG.setRoot(Root);
|
|
|
|
PendingLoads.clear();
|
|
|
|
return Root;
|
|
|
|
}
|
2009-11-11 19:59:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Otherwise, we have to make a token factor node.
|
|
|
|
SDValue Root = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(), MVT::Other,
|
|
|
|
&PendingLoads[0], PendingLoads.size());
|
|
|
|
PendingLoads.clear();
|
|
|
|
DAG.setRoot(Root);
|
|
|
|
return Root;
|
|
|
|
}
|
2009-11-11 19:59:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
/// getControlRoot - Similar to getRoot, but instead of flushing all the
|
|
|
|
/// PendingLoad items, flush all the PendingExports items. It is necessary
|
|
|
|
/// to do this before emitting a terminator instruction.
|
|
|
|
///
|
|
|
|
SDValue SelectionDAGBuilder::getControlRoot() {
|
|
|
|
SDValue Root = DAG.getRoot();
|
2010-01-01 04:41:22 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
if (PendingExports.empty())
|
|
|
|
return Root;
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Turn all of the CopyToReg chains into one factored node.
|
|
|
|
if (Root.getOpcode() != ISD::EntryToken) {
|
|
|
|
unsigned i = 0, e = PendingExports.size();
|
|
|
|
for (; i != e; ++i) {
|
|
|
|
assert(PendingExports[i].getNode()->getNumOperands() > 1);
|
|
|
|
if (PendingExports[i].getNode()->getOperand(0) == Root)
|
|
|
|
break; // Don't add the root if we already indirectly depend on it.
|
2009-12-21 23:47:40 +00:00
|
|
|
}
|
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
if (i == e)
|
|
|
|
PendingExports.push_back(Root);
|
|
|
|
}
|
2009-11-11 19:59:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
Root = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(), MVT::Other,
|
|
|
|
&PendingExports[0],
|
|
|
|
PendingExports.size());
|
|
|
|
PendingExports.clear();
|
|
|
|
DAG.setRoot(Root);
|
|
|
|
return Root;
|
|
|
|
}
|
2009-11-11 19:59:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
void SelectionDAGBuilder::AssignOrderingToNode(const SDNode *Node) {
|
|
|
|
if (DAG.GetOrdering(Node) != 0) return; // Already has ordering.
|
|
|
|
DAG.AssignOrdering(Node, SDNodeOrder);
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
for (unsigned I = 0, E = Node->getNumOperands(); I != E; ++I)
|
|
|
|
AssignOrderingToNode(Node->getOperand(I).getNode());
|
|
|
|
}
|
2009-11-11 19:59:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
void SelectionDAGBuilder::visit(const Instruction &I) {
|
|
|
|
// Set up outgoing PHI node register values before emitting the terminator.
|
|
|
|
if (isa<TerminatorInst>(&I))
|
|
|
|
HandlePHINodesInSuccessorBlocks(I.getParent());
|
2009-11-11 19:59:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
CurDebugLoc = I.getDebugLoc();
|
2009-11-11 19:59:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
visit(I.getOpcode(), I);
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
if (!isa<TerminatorInst>(&I) && !HasTailCall)
|
|
|
|
CopyToExportRegsIfNeeded(&I);
|
2009-08-06 15:37:27 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
CurDebugLoc = DebugLoc();
|
|
|
|
}
|
2009-08-06 15:37:27 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
void SelectionDAGBuilder::visitPHI(const PHINode &) {
|
|
|
|
llvm_unreachable("SelectionDAGBuilder shouldn't visit PHI nodes!");
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
void SelectionDAGBuilder::visit(unsigned Opcode, const User &I) {
|
|
|
|
// Note: this doesn't use InstVisitor, because it has to work with
|
|
|
|
// ConstantExpr's in addition to instructions.
|
|
|
|
switch (Opcode) {
|
|
|
|
default: llvm_unreachable("Unknown instruction type encountered!");
|
|
|
|
// Build the switch statement using the Instruction.def file.
|
|
|
|
#define HANDLE_INST(NUM, OPCODE, CLASS) \
|
2012-07-19 04:50:12 +00:00
|
|
|
case Instruction::OPCODE: visit##OPCODE((const CLASS&)I); break;
|
2010-05-29 17:53:24 +00:00
|
|
|
#include "llvm/Instruction.def"
|
|
|
|
}
|
|
|
|
|
|
|
|
// Assign the ordering to the freshly created DAG nodes.
|
|
|
|
if (NodeMap.count(&I)) {
|
|
|
|
++SDNodeOrder;
|
|
|
|
AssignOrderingToNode(getValue(&I).getNode());
|
2009-04-23 23:13:24 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-07-16 00:02:08 +00:00
|
|
|
// resolveDanglingDebugInfo - if we saw an earlier dbg_value referring to V,
|
|
|
|
// generate the debug data structures now that we've seen its definition.
|
|
|
|
void SelectionDAGBuilder::resolveDanglingDebugInfo(const Value *V,
|
|
|
|
SDValue Val) {
|
|
|
|
DanglingDebugInfo &DDI = DanglingDebugInfoMap[V];
|
2010-08-26 23:35:15 +00:00
|
|
|
if (DDI.getDI()) {
|
|
|
|
const DbgValueInst *DI = DDI.getDI();
|
2010-07-16 00:02:08 +00:00
|
|
|
DebugLoc dl = DDI.getdl();
|
|
|
|
unsigned DbgSDNodeOrder = DDI.getSDNodeOrder();
|
2010-08-26 23:35:15 +00:00
|
|
|
MDNode *Variable = DI->getVariable();
|
|
|
|
uint64_t Offset = DI->getOffset();
|
2010-07-16 00:02:08 +00:00
|
|
|
SDDbgValue *SDV;
|
|
|
|
if (Val.getNode()) {
|
2010-08-25 20:39:26 +00:00
|
|
|
if (!EmitFuncArgumentDbgValue(V, Variable, Offset, Val)) {
|
2010-07-16 00:02:08 +00:00
|
|
|
SDV = DAG.getDbgValue(Variable, Val.getNode(),
|
|
|
|
Val.getResNo(), Offset, dl, DbgSDNodeOrder);
|
|
|
|
DAG.AddDbgValue(SDV, Val.getNode(), false);
|
|
|
|
}
|
2011-02-25 21:41:48 +00:00
|
|
|
} else
|
2012-02-23 03:39:43 +00:00
|
|
|
DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
|
2010-07-16 00:02:08 +00:00
|
|
|
DanglingDebugInfoMap[V] = DanglingDebugInfo();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-09-30 22:19:53 +00:00
|
|
|
/// getValue - Return an SDValue for the given Value.
|
2010-05-29 17:53:24 +00:00
|
|
|
SDValue SelectionDAGBuilder::getValue(const Value *V) {
|
2010-07-01 01:59:43 +00:00
|
|
|
// If we already have an SDValue for this value, use it. It's important
|
|
|
|
// to do this first, so that we don't create a CopyFromReg if we already
|
|
|
|
// have a regular SDValue.
|
2010-05-29 17:53:24 +00:00
|
|
|
SDValue &N = NodeMap[V];
|
|
|
|
if (N.getNode()) return N;
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-07-01 01:59:43 +00:00
|
|
|
// If there's a virtual register allocated and initialized for this
|
|
|
|
// value, use it.
|
|
|
|
DenseMap<const Value *, unsigned>::iterator It = FuncInfo.ValueMap.find(V);
|
|
|
|
if (It != FuncInfo.ValueMap.end()) {
|
|
|
|
unsigned InReg = It->second;
|
|
|
|
RegsForValue RFV(*DAG.getContext(), TLI, InReg, V->getType());
|
|
|
|
SDValue Chain = DAG.getEntryNode();
|
2012-09-26 04:04:19 +00:00
|
|
|
N = RFV.getCopyFromRegs(DAG, FuncInfo, getCurDebugLoc(), Chain, NULL, V);
|
2011-01-25 18:09:58 +00:00
|
|
|
resolveDanglingDebugInfo(V, N);
|
|
|
|
return N;
|
2010-07-01 01:59:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise create a new SDValue and remember it.
|
|
|
|
SDValue Val = getValueImpl(V);
|
|
|
|
NodeMap[V] = Val;
|
2010-07-16 00:02:08 +00:00
|
|
|
resolveDanglingDebugInfo(V, Val);
|
2010-07-01 01:59:43 +00:00
|
|
|
return Val;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// getNonRegisterValue - Return an SDValue for the given Value, but
|
|
|
|
/// don't look in FuncInfo.ValueMap for a virtual register.
|
|
|
|
SDValue SelectionDAGBuilder::getNonRegisterValue(const Value *V) {
|
|
|
|
// If we already have an SDValue for this value, use it.
|
|
|
|
SDValue &N = NodeMap[V];
|
|
|
|
if (N.getNode()) return N;
|
|
|
|
|
|
|
|
// Otherwise create a new SDValue and remember it.
|
|
|
|
SDValue Val = getValueImpl(V);
|
|
|
|
NodeMap[V] = Val;
|
2010-07-16 00:02:08 +00:00
|
|
|
resolveDanglingDebugInfo(V, Val);
|
2010-07-01 01:59:43 +00:00
|
|
|
return Val;
|
|
|
|
}
|
|
|
|
|
2010-07-16 00:02:08 +00:00
|
|
|
/// getValueImpl - Helper function for getValue and getNonRegisterValue.
|
2010-07-01 01:59:43 +00:00
|
|
|
/// Create an SDValue for the given value.
|
|
|
|
SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
|
2010-05-29 17:53:24 +00:00
|
|
|
if (const Constant *C = dyn_cast<Constant>(V)) {
|
|
|
|
EVT VT = TLI.getValueType(V->getType(), true);
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
if (const ConstantInt *CI = dyn_cast<ConstantInt>(C))
|
2010-07-01 01:59:43 +00:00
|
|
|
return DAG.getConstant(*CI, VT);
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
|
2010-07-06 22:08:15 +00:00
|
|
|
return DAG.getGlobalAddress(GV, getCurDebugLoc(), VT);
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
if (isa<ConstantPointerNull>(C))
|
2010-07-01 01:59:43 +00:00
|
|
|
return DAG.getConstant(0, TLI.getPointerTy());
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
|
2010-07-01 01:59:43 +00:00
|
|
|
return DAG.getConstantFP(*CFP, VT);
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
if (isa<UndefValue>(C) && !V->getType()->isAggregateType())
|
2010-07-01 01:59:43 +00:00
|
|
|
return DAG.getUNDEF(VT);
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
|
|
|
|
visit(CE->getOpcode(), *CE);
|
|
|
|
SDValue N1 = NodeMap[V];
|
|
|
|
assert(N1.getNode() && "visit didn't populate the NodeMap!");
|
|
|
|
return N1;
|
|
|
|
}
|
2008-10-17 21:16:08 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
if (isa<ConstantStruct>(C) || isa<ConstantArray>(C)) {
|
|
|
|
SmallVector<SDValue, 4> Constants;
|
|
|
|
for (User::const_op_iterator OI = C->op_begin(), OE = C->op_end();
|
|
|
|
OI != OE; ++OI) {
|
|
|
|
SDNode *Val = getValue(*OI).getNode();
|
|
|
|
// If the operand is an empty aggregate, there are no values.
|
|
|
|
if (!Val) continue;
|
|
|
|
// Add each leaf value from the operand to the Constants list
|
|
|
|
// to form a flattened list of all the values.
|
|
|
|
for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
|
|
|
|
Constants.push_back(SDValue(Val, i));
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
2008-10-17 21:16:08 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
return DAG.getMergeValues(&Constants[0], Constants.size(),
|
|
|
|
getCurDebugLoc());
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
2012-01-24 13:41:11 +00:00
|
|
|
|
|
|
|
if (const ConstantDataSequential *CDS =
|
|
|
|
dyn_cast<ConstantDataSequential>(C)) {
|
|
|
|
SmallVector<SDValue, 4> Ops;
|
2012-01-25 01:27:20 +00:00
|
|
|
for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
|
2012-01-24 13:41:11 +00:00
|
|
|
SDNode *Val = getValue(CDS->getElementAsConstant(i)).getNode();
|
|
|
|
// Add each leaf value from the operand to the Constants list
|
|
|
|
// to form a flattened list of all the values.
|
|
|
|
for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
|
|
|
|
Ops.push_back(SDValue(Val, i));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (isa<ArrayType>(CDS->getType()))
|
|
|
|
return DAG.getMergeValues(&Ops[0], Ops.size(), getCurDebugLoc());
|
|
|
|
return NodeMap[V] = DAG.getNode(ISD::BUILD_VECTOR, getCurDebugLoc(),
|
|
|
|
VT, &Ops[0], Ops.size());
|
|
|
|
}
|
2008-10-17 21:16:08 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
if (C->getType()->isStructTy() || C->getType()->isArrayTy()) {
|
|
|
|
assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) &&
|
|
|
|
"Unknown struct or array constant!");
|
2008-10-17 21:16:08 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
SmallVector<EVT, 4> ValueVTs;
|
|
|
|
ComputeValueVTs(TLI, C->getType(), ValueVTs);
|
|
|
|
unsigned NumElts = ValueVTs.size();
|
|
|
|
if (NumElts == 0)
|
|
|
|
return SDValue(); // empty struct
|
|
|
|
SmallVector<SDValue, 4> Constants(NumElts);
|
|
|
|
for (unsigned i = 0; i != NumElts; ++i) {
|
|
|
|
EVT EltVT = ValueVTs[i];
|
|
|
|
if (isa<UndefValue>(C))
|
|
|
|
Constants[i] = DAG.getUNDEF(EltVT);
|
|
|
|
else if (EltVT.isFloatingPoint())
|
|
|
|
Constants[i] = DAG.getConstantFP(0, EltVT);
|
|
|
|
else
|
|
|
|
Constants[i] = DAG.getConstant(0, EltVT);
|
|
|
|
}
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
return DAG.getMergeValues(&Constants[0], NumElts,
|
|
|
|
getCurDebugLoc());
|
|
|
|
}
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
if (const BlockAddress *BA = dyn_cast<BlockAddress>(C))
|
|
|
|
return DAG.getBlockAddress(BA, VT);
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2011-07-18 04:54:35 +00:00
|
|
|
VectorType *VecTy = cast<VectorType>(V->getType());
|
2010-05-29 17:53:24 +00:00
|
|
|
unsigned NumElements = VecTy->getNumElements();
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Now that we know the number and type of the elements, get that number of
|
|
|
|
// elements into the Ops array based on what kind of constant it is.
|
|
|
|
SmallVector<SDValue, 16> Ops;
|
2012-01-24 13:41:11 +00:00
|
|
|
if (const ConstantVector *CV = dyn_cast<ConstantVector>(C)) {
|
2010-05-29 17:53:24 +00:00
|
|
|
for (unsigned i = 0; i != NumElements; ++i)
|
2012-01-24 13:41:11 +00:00
|
|
|
Ops.push_back(getValue(CV->getOperand(i)));
|
2010-05-29 17:53:24 +00:00
|
|
|
} else {
|
|
|
|
assert(isa<ConstantAggregateZero>(C) && "Unknown vector constant!");
|
|
|
|
EVT EltVT = TLI.getValueType(VecTy->getElementType());
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
SDValue Op;
|
|
|
|
if (EltVT.isFloatingPoint())
|
|
|
|
Op = DAG.getConstantFP(0, EltVT);
|
|
|
|
else
|
|
|
|
Op = DAG.getConstant(0, EltVT);
|
|
|
|
Ops.assign(NumElements, Op);
|
|
|
|
}
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Create a BUILD_VECTOR node.
|
|
|
|
return NodeMap[V] = DAG.getNode(ISD::BUILD_VECTOR, getCurDebugLoc(),
|
|
|
|
VT, &Ops[0], Ops.size());
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// If this is a static alloca, generate it as the frameindex instead of
|
|
|
|
// computation.
|
|
|
|
if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
|
|
|
|
DenseMap<const AllocaInst*, int>::iterator SI =
|
|
|
|
FuncInfo.StaticAllocaMap.find(AI);
|
|
|
|
if (SI != FuncInfo.StaticAllocaMap.end())
|
|
|
|
return DAG.getFrameIndex(SI->second, TLI.getPointerTy());
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2010-07-01 01:59:43 +00:00
|
|
|
// If this is an instruction which fast-isel has deferred, select it now.
|
|
|
|
if (const Instruction *Inst = dyn_cast<Instruction>(V)) {
|
2010-07-10 09:00:22 +00:00
|
|
|
unsigned InReg = FuncInfo.InitializeRegForValue(Inst);
|
|
|
|
RegsForValue RFV(*DAG.getContext(), TLI, InReg, Inst->getType());
|
|
|
|
SDValue Chain = DAG.getEntryNode();
|
2012-09-26 04:04:19 +00:00
|
|
|
return RFV.getCopyFromRegs(DAG, FuncInfo, getCurDebugLoc(), Chain, NULL, V);
|
2010-07-01 01:59:43 +00:00
|
|
|
}
|
2010-05-29 17:53:24 +00:00
|
|
|
|
2010-07-01 01:59:43 +00:00
|
|
|
llvm_unreachable("Can't get register for value!");
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
|
|
|
|
SDValue Chain = getControlRoot();
|
|
|
|
SmallVector<ISD::OutputArg, 8> Outs;
|
2010-07-07 15:54:55 +00:00
|
|
|
SmallVector<SDValue, 8> OutVals;
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
if (!FuncInfo.CanLowerReturn) {
|
|
|
|
unsigned DemoteReg = FuncInfo.DemoteRegister;
|
|
|
|
const Function *F = I.getParent()->getParent();
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Emit a store of the return value through the virtual register.
|
|
|
|
// Leave Outs empty so that LowerReturn won't try to load return
|
|
|
|
// registers the usual way.
|
|
|
|
SmallVector<EVT, 1> PtrValueVTs;
|
|
|
|
ComputeValueVTs(TLI, PointerType::getUnqual(F->getReturnType()),
|
|
|
|
PtrValueVTs);
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
SDValue RetPtr = DAG.getRegister(DemoteReg, PtrValueVTs[0]);
|
|
|
|
SDValue RetOp = getValue(I.getOperand(0));
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
SmallVector<EVT, 4> ValueVTs;
|
|
|
|
SmallVector<uint64_t, 4> Offsets;
|
|
|
|
ComputeValueVTs(TLI, I.getOperand(0)->getType(), ValueVTs, &Offsets);
|
|
|
|
unsigned NumValues = ValueVTs.size();
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
SmallVector<SDValue, 4> Chains(NumValues);
|
|
|
|
for (unsigned i = 0; i != NumValues; ++i) {
|
2010-08-24 23:10:06 +00:00
|
|
|
SDValue Add = DAG.getNode(ISD::ADD, getCurDebugLoc(),
|
|
|
|
RetPtr.getValueType(), RetPtr,
|
|
|
|
DAG.getIntPtrConstant(Offsets[i]));
|
2010-05-29 17:53:24 +00:00
|
|
|
Chains[i] =
|
|
|
|
DAG.getStore(Chain, getCurDebugLoc(),
|
|
|
|
SDValue(RetOp.getNode(), RetOp.getResNo() + i),
|
2010-09-21 18:58:22 +00:00
|
|
|
// FIXME: better loc info would be nice.
|
|
|
|
Add, MachinePointerInfo(), false, false, 0);
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
Chain = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(),
|
|
|
|
MVT::Other, &Chains[0], NumValues);
|
|
|
|
} else if (I.getNumOperands() != 0) {
|
|
|
|
SmallVector<EVT, 4> ValueVTs;
|
|
|
|
ComputeValueVTs(TLI, I.getOperand(0)->getType(), ValueVTs);
|
|
|
|
unsigned NumValues = ValueVTs.size();
|
|
|
|
if (NumValues) {
|
|
|
|
SDValue RetOp = getValue(I.getOperand(0));
|
|
|
|
for (unsigned j = 0, f = NumValues; j != f; ++j) {
|
|
|
|
EVT VT = ValueVTs[j];
|
2009-12-21 19:59:38 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
const Function *F = I.getParent()->getParent();
|
2012-10-09 07:45:08 +00:00
|
|
|
if (F->getRetAttributes().hasAttribute(Attributes::SExt))
|
2010-05-29 17:53:24 +00:00
|
|
|
ExtendKind = ISD::SIGN_EXTEND;
|
2012-10-09 07:45:08 +00:00
|
|
|
else if (F->getRetAttributes().hasAttribute(Attributes::ZExt))
|
2010-05-29 17:53:24 +00:00
|
|
|
ExtendKind = ISD::ZERO_EXTEND;
|
2008-12-23 22:25:27 +00:00
|
|
|
|
2011-03-17 14:53:37 +00:00
|
|
|
if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger())
|
2012-12-11 10:20:51 +00:00
|
|
|
VT = TLI.getTypeForExtArgOrReturn(*DAG.getContext(),
|
|
|
|
VT.getSimpleVT(), ExtendKind);
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
unsigned NumParts = TLI.getNumRegisters(*DAG.getContext(), VT);
|
2012-12-11 10:09:23 +00:00
|
|
|
MVT PartVT = TLI.getRegisterType(*DAG.getContext(), VT);
|
2010-05-29 17:53:24 +00:00
|
|
|
SmallVector<SDValue, 4> Parts(NumParts);
|
|
|
|
getCopyToParts(DAG, getCurDebugLoc(),
|
|
|
|
SDValue(RetOp.getNode(), RetOp.getResNo() + j),
|
2012-09-26 06:16:18 +00:00
|
|
|
&Parts[0], NumParts, PartVT, &I, ExtendKind);
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// 'inreg' on function refers to return value
|
|
|
|
ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
|
2012-10-09 07:45:08 +00:00
|
|
|
if (F->getRetAttributes().hasAttribute(Attributes::InReg))
|
2010-05-29 17:53:24 +00:00
|
|
|
Flags.setInReg();
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Propagate extension type if any
|
2011-03-16 22:20:07 +00:00
|
|
|
if (ExtendKind == ISD::SIGN_EXTEND)
|
2010-05-29 17:53:24 +00:00
|
|
|
Flags.setSExt();
|
2011-03-16 22:20:07 +00:00
|
|
|
else if (ExtendKind == ISD::ZERO_EXTEND)
|
2010-05-29 17:53:24 +00:00
|
|
|
Flags.setZExt();
|
|
|
|
|
2010-07-07 15:54:55 +00:00
|
|
|
for (unsigned i = 0; i < NumParts; ++i) {
|
|
|
|
Outs.push_back(ISD::OutputArg(Flags, Parts[i].getValueType(),
|
2012-11-01 23:49:58 +00:00
|
|
|
/*isfixed=*/true, 0, 0));
|
2010-07-07 15:54:55 +00:00
|
|
|
OutVals.push_back(Parts[i]);
|
|
|
|
}
|
2010-05-29 17:53:24 +00:00
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
|
|
|
}
|
2008-12-23 22:25:27 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg();
|
|
|
|
CallingConv::ID CallConv =
|
|
|
|
DAG.getMachineFunction().getFunction()->getCallingConv();
|
|
|
|
Chain = TLI.LowerReturn(Chain, CallConv, isVarArg,
|
2010-07-07 15:54:55 +00:00
|
|
|
Outs, OutVals, getCurDebugLoc(), DAG);
|
2008-12-23 22:25:27 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Verify that the target's LowerReturn behaved as expected.
|
|
|
|
assert(Chain.getNode() && Chain.getValueType() == MVT::Other &&
|
|
|
|
"LowerReturn didn't return a valid chain!");
|
2008-12-23 22:25:27 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Update the DAG with the new chain value resulting from return lowering.
|
|
|
|
DAG.setRoot(Chain);
|
|
|
|
}
|
2009-12-21 19:59:38 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
/// CopyToExportRegsIfNeeded - If the given value has virtual registers
|
|
|
|
/// created for it, emit nodes to copy the value into the virtual
|
|
|
|
/// registers.
|
|
|
|
void SelectionDAGBuilder::CopyToExportRegsIfNeeded(const Value *V) {
|
2011-05-13 15:18:06 +00:00
|
|
|
// Skip empty types
|
|
|
|
if (V->getType()->isEmptyTy())
|
|
|
|
return;
|
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
DenseMap<const Value *, unsigned>::iterator VMI = FuncInfo.ValueMap.find(V);
|
|
|
|
if (VMI != FuncInfo.ValueMap.end()) {
|
|
|
|
assert(!V->use_empty() && "Unused value assigned virtual registers!");
|
|
|
|
CopyValueToVirtualRegister(V, VMI->second);
|
|
|
|
}
|
|
|
|
}
|
2009-12-21 23:47:40 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
/// ExportFromCurrentBlock - If this condition isn't known to be exported from
|
|
|
|
/// the current basic block, add it to ValueMap now so that we'll get a
|
|
|
|
/// CopyTo/FromReg.
|
|
|
|
void SelectionDAGBuilder::ExportFromCurrentBlock(const Value *V) {
|
|
|
|
// No need to export constants.
|
|
|
|
if (!isa<Instruction>(V) && !isa<Argument>(V)) return;
|
2008-12-23 22:25:27 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Already exported?
|
|
|
|
if (FuncInfo.isExportedInst(V)) return;
|
2009-12-21 19:59:38 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
unsigned Reg = FuncInfo.InitializeRegForValue(V);
|
|
|
|
CopyValueToVirtualRegister(V, Reg);
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
bool SelectionDAGBuilder::isExportableFromCurrentBlock(const Value *V,
|
|
|
|
const BasicBlock *FromBB) {
|
|
|
|
// The operands of the setcc have to be in this block. We don't know
|
|
|
|
// how to export them from some other block.
|
|
|
|
if (const Instruction *VI = dyn_cast<Instruction>(V)) {
|
|
|
|
// Can export from current BB.
|
|
|
|
if (VI->getParent() == FromBB)
|
|
|
|
return true;
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Is already exported, noop.
|
|
|
|
return FuncInfo.isExportedInst(V);
|
|
|
|
}
|
2008-12-23 22:25:27 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// If this is an argument, we can export it if the BB is the entry block or
|
|
|
|
// if it is already exported.
|
|
|
|
if (isa<Argument>(V)) {
|
|
|
|
if (FromBB == &FromBB->getParent()->getEntryBlock())
|
|
|
|
return true;
|
2008-12-23 22:25:27 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Otherwise, can only export this if it is already exported.
|
|
|
|
return FuncInfo.isExportedInst(V);
|
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Otherwise, constants can always be exported.
|
|
|
|
return true;
|
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2011-06-16 20:22:37 +00:00
|
|
|
/// Return branch probability calculated by BranchProbabilityInfo for IR blocks.
|
2011-12-20 20:03:10 +00:00
|
|
|
uint32_t SelectionDAGBuilder::getEdgeWeight(const MachineBasicBlock *Src,
|
|
|
|
const MachineBasicBlock *Dst) const {
|
2011-06-16 20:22:37 +00:00
|
|
|
BranchProbabilityInfo *BPI = FuncInfo.BPI;
|
|
|
|
if (!BPI)
|
|
|
|
return 0;
|
2011-07-29 20:05:36 +00:00
|
|
|
const BasicBlock *SrcBB = Src->getBasicBlock();
|
|
|
|
const BasicBlock *DstBB = Dst->getBasicBlock();
|
2011-06-16 20:22:37 +00:00
|
|
|
return BPI->getEdgeWeight(SrcBB, DstBB);
|
|
|
|
}
|
|
|
|
|
2011-07-29 22:25:21 +00:00
|
|
|
void SelectionDAGBuilder::
|
|
|
|
addSuccessorWithWeight(MachineBasicBlock *Src, MachineBasicBlock *Dst,
|
|
|
|
uint32_t Weight /* = 0 */) {
|
|
|
|
if (!Weight)
|
|
|
|
Weight = getEdgeWeight(Src, Dst);
|
|
|
|
Src->addSuccessor(Dst, Weight);
|
2011-06-16 20:22:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
static bool InBlock(const Value *V, const BasicBlock *BB) {
|
|
|
|
if (const Instruction *I = dyn_cast<Instruction>(V))
|
|
|
|
return I->getParent() == BB;
|
|
|
|
return true;
|
|
|
|
}
|
2009-12-21 23:47:40 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
/// EmitBranchForMergedCondition - Helper method for FindMergedConditions.
|
|
|
|
/// This function emits a branch and is used at the leaves of an OR or an
|
|
|
|
/// AND operator tree.
|
|
|
|
///
|
|
|
|
void
|
|
|
|
SelectionDAGBuilder::EmitBranchForMergedCondition(const Value *Cond,
|
|
|
|
MachineBasicBlock *TBB,
|
|
|
|
MachineBasicBlock *FBB,
|
|
|
|
MachineBasicBlock *CurBB,
|
|
|
|
MachineBasicBlock *SwitchBB) {
|
|
|
|
const BasicBlock *BB = CurBB->getBasicBlock();
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// If the leaf of the tree is a comparison, merge the condition into
|
|
|
|
// the caseblock.
|
|
|
|
if (const CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
|
|
|
|
// The operands of the cmp have to be in this block. We don't know
|
|
|
|
// how to export them from some other block. If this is the first block
|
|
|
|
// of the sequence, no exporting is needed.
|
|
|
|
if (CurBB == SwitchBB ||
|
|
|
|
(isExportableFromCurrentBlock(BOp->getOperand(0), BB) &&
|
|
|
|
isExportableFromCurrentBlock(BOp->getOperand(1), BB))) {
|
|
|
|
ISD::CondCode Condition;
|
|
|
|
if (const ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
|
|
|
|
Condition = getICmpCondCode(IC->getPredicate());
|
|
|
|
} else if (const FCmpInst *FC = dyn_cast<FCmpInst>(Cond)) {
|
|
|
|
Condition = getFCmpCondCode(FC->getPredicate());
|
2011-12-02 22:16:29 +00:00
|
|
|
if (TM.Options.NoNaNsFPMath)
|
|
|
|
Condition = getFCmpCodeWithoutNaN(Condition);
|
2010-05-29 17:53:24 +00:00
|
|
|
} else {
|
|
|
|
Condition = ISD::SETEQ; // silence warning.
|
|
|
|
llvm_unreachable("Unknown compare instruction");
|
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
CaseBlock CB(Condition, BOp->getOperand(0),
|
|
|
|
BOp->getOperand(1), NULL, TBB, FBB, CurBB);
|
|
|
|
SwitchCases.push_back(CB);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2009-12-21 19:59:38 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Create a CaseBlock record representing this branch.
|
|
|
|
CaseBlock CB(ISD::SETEQ, Cond, ConstantInt::getTrue(*DAG.getContext()),
|
|
|
|
NULL, TBB, FBB, CurBB);
|
|
|
|
SwitchCases.push_back(CB);
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
/// FindMergedConditions - If Cond is an expression like
|
|
|
|
void SelectionDAGBuilder::FindMergedConditions(const Value *Cond,
|
|
|
|
MachineBasicBlock *TBB,
|
|
|
|
MachineBasicBlock *FBB,
|
|
|
|
MachineBasicBlock *CurBB,
|
|
|
|
MachineBasicBlock *SwitchBB,
|
|
|
|
unsigned Opc) {
|
|
|
|
// If this node is not part of the or/and tree, emit it as a branch.
|
|
|
|
const Instruction *BOp = dyn_cast<Instruction>(Cond);
|
|
|
|
if (!BOp || !(isa<BinaryOperator>(BOp) || isa<CmpInst>(BOp)) ||
|
|
|
|
(unsigned)BOp->getOpcode() != Opc || !BOp->hasOneUse() ||
|
|
|
|
BOp->getParent() != CurBB->getBasicBlock() ||
|
|
|
|
!InBlock(BOp->getOperand(0), CurBB->getBasicBlock()) ||
|
|
|
|
!InBlock(BOp->getOperand(1), CurBB->getBasicBlock())) {
|
|
|
|
EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB);
|
|
|
|
return;
|
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Create TmpBB after CurBB.
|
|
|
|
MachineFunction::iterator BBI = CurBB;
|
|
|
|
MachineFunction &MF = DAG.getMachineFunction();
|
|
|
|
MachineBasicBlock *TmpBB = MF.CreateMachineBasicBlock(CurBB->getBasicBlock());
|
|
|
|
CurBB->getParent()->insert(++BBI, TmpBB);
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
if (Opc == Instruction::Or) {
|
|
|
|
// Codegen X | Y as:
|
|
|
|
// jmp_if_X TBB
|
|
|
|
// jmp TmpBB
|
|
|
|
// TmpBB:
|
|
|
|
// jmp_if_Y TBB
|
|
|
|
// jmp FBB
|
|
|
|
//
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Emit the LHS condition.
|
|
|
|
FindMergedConditions(BOp->getOperand(0), TBB, TmpBB, CurBB, SwitchBB, Opc);
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Emit the RHS condition into TmpBB.
|
|
|
|
FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc);
|
|
|
|
} else {
|
|
|
|
assert(Opc == Instruction::And && "Unknown merge op!");
|
|
|
|
// Codegen X & Y as:
|
|
|
|
// jmp_if_X TmpBB
|
|
|
|
// jmp FBB
|
|
|
|
// TmpBB:
|
|
|
|
// jmp_if_Y TBB
|
|
|
|
// jmp FBB
|
|
|
|
//
|
|
|
|
// This requires creation of TmpBB after CurBB.
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Emit the LHS condition.
|
|
|
|
FindMergedConditions(BOp->getOperand(0), TmpBB, FBB, CurBB, SwitchBB, Opc);
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Emit the RHS condition into TmpBB.
|
|
|
|
FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc);
|
|
|
|
}
|
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
/// If the set of cases should be emitted as a series of branches, return true.
|
|
|
|
/// If we should emit this as a bunch of and/or'd together conditions, return
|
|
|
|
/// false.
|
|
|
|
bool
|
|
|
|
SelectionDAGBuilder::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases){
|
|
|
|
if (Cases.size() != 2) return true;
|
2008-12-23 22:25:27 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// If this is two comparisons of the same values or'd or and'd together, they
|
|
|
|
// will get folded into a single comparison, so don't emit two blocks.
|
|
|
|
if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
|
|
|
|
Cases[0].CmpRHS == Cases[1].CmpRHS) ||
|
|
|
|
(Cases[0].CmpRHS == Cases[1].CmpLHS &&
|
|
|
|
Cases[0].CmpLHS == Cases[1].CmpRHS)) {
|
|
|
|
return false;
|
|
|
|
}
|
2009-12-21 19:59:38 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Handle: (X != null) | (Y != null) --> (X|Y) != 0
|
|
|
|
// Handle: (X == null) & (Y == null) --> (X|Y) == 0
|
|
|
|
if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
|
|
|
|
Cases[0].CC == Cases[1].CC &&
|
|
|
|
isa<Constant>(Cases[0].CmpRHS) &&
|
|
|
|
cast<Constant>(Cases[0].CmpRHS)->isNullValue()) {
|
|
|
|
if (Cases[0].CC == ISD::SETEQ && Cases[0].TrueBB == Cases[1].ThisBB)
|
|
|
|
return false;
|
|
|
|
if (Cases[0].CC == ISD::SETNE && Cases[0].FalseBB == Cases[1].ThisBB)
|
|
|
|
return false;
|
|
|
|
}
|
2010-10-16 08:25:21 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
return true;
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
void SelectionDAGBuilder::visitBr(const BranchInst &I) {
|
2010-07-10 09:00:22 +00:00
|
|
|
MachineBasicBlock *BrMBB = FuncInfo.MBB;
|
2008-12-23 22:25:27 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Update machine-CFG edges.
|
|
|
|
MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Figure out which block is immediately after the current one.
|
2008-09-03 16:12:24 +00:00
|
|
|
MachineBasicBlock *NextBlock = 0;
|
2010-05-29 17:53:24 +00:00
|
|
|
MachineFunction::iterator BBI = BrMBB;
|
2009-08-15 02:06:22 +00:00
|
|
|
if (++BBI != FuncInfo.MF->end())
|
2008-09-03 16:12:24 +00:00
|
|
|
NextBlock = BBI;
|
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
if (I.isUnconditional()) {
|
|
|
|
// Update machine-CFG edges.
|
|
|
|
BrMBB->addSuccessor(Succ0MBB);
|
2010-04-19 22:41:47 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// If this is not a fall-through branch, emit the branch.
|
|
|
|
if (Succ0MBB != NextBlock)
|
|
|
|
DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(),
|
|
|
|
MVT::Other, getControlRoot(),
|
|
|
|
DAG.getBasicBlock(Succ0MBB)));
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
return;
|
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// If this condition is one of the special cases we handle, do special stuff
|
|
|
|
// now.
|
|
|
|
const Value *CondVal = I.getCondition();
|
|
|
|
MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// If this is a series of conditions that are or'd or and'd together, emit
|
|
|
|
// this as a sequence of branches instead of setcc's with and/or operations.
|
2010-11-30 18:12:52 +00:00
|
|
|
// As long as jumps are not expensive, this should improve performance.
|
2010-05-29 17:53:24 +00:00
|
|
|
// For example, instead of something like:
|
|
|
|
// cmp A, B
|
|
|
|
// C = seteq
|
|
|
|
// cmp D, E
|
|
|
|
// F = setle
|
|
|
|
// or C, F
|
|
|
|
// jnz foo
|
|
|
|
// Emit:
|
|
|
|
// cmp A, B
|
|
|
|
// je foo
|
|
|
|
// cmp D, E
|
|
|
|
// jle foo
|
|
|
|
//
|
|
|
|
if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(CondVal)) {
|
2011-02-25 21:41:48 +00:00
|
|
|
if (!TLI.isJumpExpensive() &&
|
2010-11-30 18:12:52 +00:00
|
|
|
BOp->hasOneUse() &&
|
2010-05-29 17:53:24 +00:00
|
|
|
(BOp->getOpcode() == Instruction::And ||
|
|
|
|
BOp->getOpcode() == Instruction::Or)) {
|
|
|
|
FindMergedConditions(BOp, Succ0MBB, Succ1MBB, BrMBB, BrMBB,
|
|
|
|
BOp->getOpcode());
|
|
|
|
// If the compares in later blocks need to use values not currently
|
|
|
|
// exported from this block, export them now. This block should always
|
|
|
|
// be the first entry.
|
|
|
|
assert(SwitchCases[0].ThisBB == BrMBB && "Unexpected lowering!");
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Allow some cases to be rejected.
|
|
|
|
if (ShouldEmitAsBranches(SwitchCases)) {
|
|
|
|
for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i) {
|
|
|
|
ExportFromCurrentBlock(SwitchCases[i].CmpLHS);
|
|
|
|
ExportFromCurrentBlock(SwitchCases[i].CmpRHS);
|
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Emit the branch for this block.
|
|
|
|
visitSwitchCase(SwitchCases[0], BrMBB);
|
|
|
|
SwitchCases.erase(SwitchCases.begin());
|
|
|
|
return;
|
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Okay, we decided not to do this, remove any inserted MBB's and clear
|
|
|
|
// SwitchCases.
|
|
|
|
for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i)
|
|
|
|
FuncInfo.MF->erase(SwitchCases[i].ThisBB);
|
2008-12-23 22:25:27 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
SwitchCases.clear();
|
|
|
|
}
|
|
|
|
}
|
2008-12-23 22:25:27 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Create a CaseBlock record representing this branch.
|
|
|
|
CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(*DAG.getContext()),
|
|
|
|
NULL, Succ0MBB, Succ1MBB, BrMBB);
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Use visitSwitchCase to actually insert the fast branch sequence for this
|
|
|
|
// cond branch.
|
|
|
|
visitSwitchCase(CB, BrMBB);
|
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
/// visitSwitchCase - Emits the necessary code to represent a single node in
|
|
|
|
/// the binary search tree resulting from lowering a switch instruction.
|
|
|
|
void SelectionDAGBuilder::visitSwitchCase(CaseBlock &CB,
|
|
|
|
MachineBasicBlock *SwitchBB) {
|
|
|
|
SDValue Cond;
|
|
|
|
SDValue CondLHS = getValue(CB.CmpLHS);
|
|
|
|
DebugLoc dl = getCurDebugLoc();
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Build the setcc now.
|
|
|
|
if (CB.CmpMHS == NULL) {
|
|
|
|
// Fold "(X == true)" to X and "(X == false)" to !X to
|
|
|
|
// handle common cases produced by branch lowering.
|
|
|
|
if (CB.CmpRHS == ConstantInt::getTrue(*DAG.getContext()) &&
|
|
|
|
CB.CC == ISD::SETEQ)
|
|
|
|
Cond = CondLHS;
|
|
|
|
else if (CB.CmpRHS == ConstantInt::getFalse(*DAG.getContext()) &&
|
|
|
|
CB.CC == ISD::SETEQ) {
|
|
|
|
SDValue True = DAG.getConstant(1, CondLHS.getValueType());
|
|
|
|
Cond = DAG.getNode(ISD::XOR, dl, CondLHS.getValueType(), CondLHS, True);
|
|
|
|
} else
|
|
|
|
Cond = DAG.getSetCC(dl, MVT::i1, CondLHS, getValue(CB.CmpRHS), CB.CC);
|
|
|
|
} else {
|
2012-05-17 08:56:30 +00:00
|
|
|
assert(CB.CC == ISD::SETCC_INVALID &&
|
|
|
|
"Condition is undefined for to-the-range belonging check.");
|
2008-12-23 22:25:27 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
|
|
|
|
const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
|
2008-12-23 22:25:27 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
SDValue CmpOp = getValue(CB.CmpMHS);
|
|
|
|
EVT VT = CmpOp.getValueType();
|
2012-05-17 08:56:30 +00:00
|
|
|
|
|
|
|
if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(false)) {
|
2010-05-29 17:53:24 +00:00
|
|
|
Cond = DAG.getSetCC(dl, MVT::i1, CmpOp, DAG.getConstant(High, VT),
|
2012-05-17 08:56:30 +00:00
|
|
|
ISD::SETULE);
|
2008-09-03 16:12:24 +00:00
|
|
|
} else {
|
2010-05-29 17:53:24 +00:00
|
|
|
SDValue SUB = DAG.getNode(ISD::SUB, dl,
|
|
|
|
VT, CmpOp, DAG.getConstant(Low, VT));
|
|
|
|
Cond = DAG.getSetCC(dl, MVT::i1, SUB,
|
|
|
|
DAG.getConstant(High-Low, VT), ISD::SETULE);
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
2010-05-29 17:53:24 +00:00
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Update successor info
|
2011-07-29 22:25:21 +00:00
|
|
|
addSuccessorWithWeight(SwitchBB, CB.TrueBB, CB.TrueWeight);
|
2012-08-20 21:39:52 +00:00
|
|
|
// TrueBB and FalseBB are always different unless the incoming IR is
|
|
|
|
// degenerate. This only happens when running llc on weird IR.
|
|
|
|
if (CB.TrueBB != CB.FalseBB)
|
|
|
|
addSuccessorWithWeight(SwitchBB, CB.FalseBB, CB.FalseWeight);
|
2008-12-23 22:25:27 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Set NextBlock to be the MBB immediately after the current one, if any.
|
|
|
|
// This is used to avoid emitting unnecessary branches to the next block.
|
|
|
|
MachineBasicBlock *NextBlock = 0;
|
|
|
|
MachineFunction::iterator BBI = SwitchBB;
|
|
|
|
if (++BBI != FuncInfo.MF->end())
|
|
|
|
NextBlock = BBI;
|
2008-12-23 22:25:27 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// If the lhs block is the next block, invert the condition so that we can
|
|
|
|
// fall through to the lhs instead of the rhs block.
|
|
|
|
if (CB.TrueBB == NextBlock) {
|
|
|
|
std::swap(CB.TrueBB, CB.FalseBB);
|
|
|
|
SDValue True = DAG.getConstant(1, Cond.getValueType());
|
|
|
|
Cond = DAG.getNode(ISD::XOR, dl, Cond.getValueType(), Cond, True);
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
|
|
|
|
MVT::Other, getControlRoot(), Cond,
|
|
|
|
DAG.getBasicBlock(CB.TrueBB));
|
2008-12-23 22:25:27 +00:00
|
|
|
|
2010-09-23 06:51:55 +00:00
|
|
|
// Insert the false branch. Do this even if it's a fall through branch,
|
|
|
|
// this makes it easier to do DAG optimizations which require inverting
|
|
|
|
// the branch condition.
|
|
|
|
BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
|
|
|
|
DAG.getBasicBlock(CB.FalseBB));
|
2010-05-29 17:53:24 +00:00
|
|
|
|
|
|
|
DAG.setRoot(BrCond);
|
2008-12-23 22:26:01 +00:00
|
|
|
}
|
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
/// visitJumpTable - Emit JumpTable node in the current MBB
|
|
|
|
void SelectionDAGBuilder::visitJumpTable(JumpTable &JT) {
|
|
|
|
// Emit the code for the jump table
|
|
|
|
assert(JT.Reg != -1U && "Should lower JT Header first!");
|
|
|
|
EVT PTy = TLI.getPointerTy();
|
|
|
|
SDValue Index = DAG.getCopyFromReg(getControlRoot(), getCurDebugLoc(),
|
|
|
|
JT.Reg, PTy);
|
|
|
|
SDValue Table = DAG.getJumpTable(JT.JTI, PTy);
|
|
|
|
SDValue BrJumpTable = DAG.getNode(ISD::BR_JT, getCurDebugLoc(),
|
|
|
|
MVT::Other, Index.getValue(1),
|
|
|
|
Table, Index);
|
|
|
|
DAG.setRoot(BrJumpTable);
|
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
/// visitJumpTableHeader - This function emits necessary code to produce index
|
|
|
|
/// in the JumpTable from switch case.
|
|
|
|
void SelectionDAGBuilder::visitJumpTableHeader(JumpTable &JT,
|
|
|
|
JumpTableHeader &JTH,
|
|
|
|
MachineBasicBlock *SwitchBB) {
|
|
|
|
// Subtract the lowest switch case value from the value being switched on and
|
|
|
|
// conditional branch to default mbb if the result is greater than the
|
|
|
|
// difference between smallest and largest cases.
|
|
|
|
SDValue SwitchOp = getValue(JTH.SValue);
|
|
|
|
EVT VT = SwitchOp.getValueType();
|
|
|
|
SDValue Sub = DAG.getNode(ISD::SUB, getCurDebugLoc(), VT, SwitchOp,
|
|
|
|
DAG.getConstant(JTH.First, VT));
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// The SDNode we just created, which holds the value being switched on minus
|
|
|
|
// the smallest case value, needs to be copied to a virtual register so it
|
|
|
|
// can be used as an index into the jump table in a subsequent basic block.
|
|
|
|
// This value may be smaller or larger than the target's pointer type, and
|
|
|
|
// therefore require extension or truncating.
|
|
|
|
SwitchOp = DAG.getZExtOrTrunc(Sub, getCurDebugLoc(), TLI.getPointerTy());
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-07-02 00:10:16 +00:00
|
|
|
unsigned JumpTableReg = FuncInfo.CreateReg(TLI.getPointerTy());
|
2010-05-29 17:53:24 +00:00
|
|
|
SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), getCurDebugLoc(),
|
|
|
|
JumpTableReg, SwitchOp);
|
|
|
|
JT.Reg = JumpTableReg;
|
2008-12-23 22:25:27 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Emit the range check for the jump table, and branch to the default block
|
|
|
|
// for the switch statement if the value being switched on exceeds the largest
|
|
|
|
// case in the switch.
|
|
|
|
SDValue CMP = DAG.getSetCC(getCurDebugLoc(),
|
|
|
|
TLI.getSetCCResultType(Sub.getValueType()), Sub,
|
|
|
|
DAG.getConstant(JTH.Last-JTH.First,VT),
|
|
|
|
ISD::SETUGT);
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Set NextBlock to be the MBB immediately after the current one, if any.
|
|
|
|
// This is used to avoid emitting unnecessary branches to the next block.
|
|
|
|
MachineBasicBlock *NextBlock = 0;
|
|
|
|
MachineFunction::iterator BBI = SwitchBB;
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
if (++BBI != FuncInfo.MF->end())
|
|
|
|
NextBlock = BBI;
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
SDValue BrCond = DAG.getNode(ISD::BRCOND, getCurDebugLoc(),
|
|
|
|
MVT::Other, CopyTo, CMP,
|
|
|
|
DAG.getBasicBlock(JT.Default));
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
if (JT.MBB != NextBlock)
|
|
|
|
BrCond = DAG.getNode(ISD::BR, getCurDebugLoc(), MVT::Other, BrCond,
|
|
|
|
DAG.getBasicBlock(JT.MBB));
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
DAG.setRoot(BrCond);
|
|
|
|
}
|
2008-12-23 22:25:27 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
/// visitBitTestHeader - This function emits necessary code to produce value
|
|
|
|
/// suitable for "bit tests"
|
|
|
|
void SelectionDAGBuilder::visitBitTestHeader(BitTestBlock &B,
|
|
|
|
MachineBasicBlock *SwitchBB) {
|
|
|
|
// Subtract the minimum value
|
|
|
|
SDValue SwitchOp = getValue(B.SValue);
|
2012-12-11 09:10:33 +00:00
|
|
|
MVT VT = SwitchOp.getSimpleValueType();
|
2010-05-29 17:53:24 +00:00
|
|
|
SDValue Sub = DAG.getNode(ISD::SUB, getCurDebugLoc(), VT, SwitchOp,
|
|
|
|
DAG.getConstant(B.First, VT));
|
2008-12-23 22:25:27 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Check range
|
|
|
|
SDValue RangeCmp = DAG.getSetCC(getCurDebugLoc(),
|
|
|
|
TLI.getSetCCResultType(Sub.getValueType()),
|
|
|
|
Sub, DAG.getConstant(B.Range, VT),
|
|
|
|
ISD::SETUGT);
|
2008-12-23 22:25:27 +00:00
|
|
|
|
2011-01-06 01:02:44 +00:00
|
|
|
// Determine the type of the test operands.
|
|
|
|
bool UsePtrType = false;
|
|
|
|
if (!TLI.isTypeLegal(VT))
|
|
|
|
UsePtrType = true;
|
|
|
|
else {
|
|
|
|
for (unsigned i = 0, e = B.Cases.size(); i != e; ++i)
|
2011-10-12 22:46:45 +00:00
|
|
|
if (!isUIntN(VT.getSizeInBits(), B.Cases[i].Mask)) {
|
2011-01-06 01:02:44 +00:00
|
|
|
// Switch table case range are encoded into series of masks.
|
|
|
|
// Just use pointer type, it's guaranteed to fit.
|
|
|
|
UsePtrType = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (UsePtrType) {
|
|
|
|
VT = TLI.getPointerTy();
|
|
|
|
Sub = DAG.getZExtOrTrunc(Sub, getCurDebugLoc(), VT);
|
|
|
|
}
|
2008-12-23 22:25:27 +00:00
|
|
|
|
2011-01-06 01:02:44 +00:00
|
|
|
B.RegVT = VT;
|
|
|
|
B.Reg = FuncInfo.CreateReg(VT);
|
2010-05-29 17:53:24 +00:00
|
|
|
SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), getCurDebugLoc(),
|
2011-01-06 01:02:44 +00:00
|
|
|
B.Reg, Sub);
|
2008-12-23 22:25:27 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Set NextBlock to be the MBB immediately after the current one, if any.
|
|
|
|
// This is used to avoid emitting unnecessary branches to the next block.
|
|
|
|
MachineBasicBlock *NextBlock = 0;
|
|
|
|
MachineFunction::iterator BBI = SwitchBB;
|
|
|
|
if (++BBI != FuncInfo.MF->end())
|
|
|
|
NextBlock = BBI;
|
2008-12-23 22:25:27 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
MachineBasicBlock* MBB = B.Cases[0].ThisBB;
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2011-06-16 20:22:37 +00:00
|
|
|
addSuccessorWithWeight(SwitchBB, B.Default);
|
|
|
|
addSuccessorWithWeight(SwitchBB, MBB);
|
2010-05-29 17:53:24 +00:00
|
|
|
|
|
|
|
SDValue BrRange = DAG.getNode(ISD::BRCOND, getCurDebugLoc(),
|
|
|
|
MVT::Other, CopyTo, RangeCmp,
|
|
|
|
DAG.getBasicBlock(B.Default));
|
|
|
|
|
2010-09-23 18:32:19 +00:00
|
|
|
if (MBB != NextBlock)
|
|
|
|
BrRange = DAG.getNode(ISD::BR, getCurDebugLoc(), MVT::Other, CopyTo,
|
|
|
|
DAG.getBasicBlock(MBB));
|
2010-05-29 17:53:24 +00:00
|
|
|
|
|
|
|
DAG.setRoot(BrRange);
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
/// visitBitTestCase - this function produces one "bit test"
|
2011-01-06 01:02:44 +00:00
|
|
|
void SelectionDAGBuilder::visitBitTestCase(BitTestBlock &BB,
|
|
|
|
MachineBasicBlock* NextMBB,
|
2012-08-24 18:14:27 +00:00
|
|
|
uint32_t BranchWeightToNext,
|
2010-05-29 17:53:24 +00:00
|
|
|
unsigned Reg,
|
|
|
|
BitTestCase &B,
|
|
|
|
MachineBasicBlock *SwitchBB) {
|
2012-12-11 10:24:48 +00:00
|
|
|
MVT VT = BB.RegVT;
|
2011-01-06 01:02:44 +00:00
|
|
|
SDValue ShiftOp = DAG.getCopyFromReg(getControlRoot(), getCurDebugLoc(),
|
|
|
|
Reg, VT);
|
2010-06-24 02:06:24 +00:00
|
|
|
SDValue Cmp;
|
2011-07-14 01:38:42 +00:00
|
|
|
unsigned PopCount = CountPopulation_64(B.Mask);
|
|
|
|
if (PopCount == 1) {
|
2010-06-24 02:06:24 +00:00
|
|
|
// Testing for a single bit; just compare the shift count with what it
|
|
|
|
// would need to be to shift a 1 bit in that position.
|
|
|
|
Cmp = DAG.getSetCC(getCurDebugLoc(),
|
2011-01-06 01:02:44 +00:00
|
|
|
TLI.getSetCCResultType(VT),
|
2010-06-24 02:06:24 +00:00
|
|
|
ShiftOp,
|
2011-01-06 01:02:44 +00:00
|
|
|
DAG.getConstant(CountTrailingZeros_64(B.Mask), VT),
|
2010-06-24 02:06:24 +00:00
|
|
|
ISD::SETEQ);
|
2011-07-14 01:38:42 +00:00
|
|
|
} else if (PopCount == BB.Range) {
|
|
|
|
// There is only one zero bit in the range, test for it directly.
|
|
|
|
Cmp = DAG.getSetCC(getCurDebugLoc(),
|
|
|
|
TLI.getSetCCResultType(VT),
|
|
|
|
ShiftOp,
|
|
|
|
DAG.getConstant(CountTrailingOnes_64(B.Mask), VT),
|
|
|
|
ISD::SETNE);
|
2010-06-24 02:06:24 +00:00
|
|
|
} else {
|
|
|
|
// Make desired shift
|
2011-01-06 01:02:44 +00:00
|
|
|
SDValue SwitchVal = DAG.getNode(ISD::SHL, getCurDebugLoc(), VT,
|
|
|
|
DAG.getConstant(1, VT), ShiftOp);
|
2010-06-24 02:06:24 +00:00
|
|
|
|
|
|
|
// Emit bit tests and jumps
|
|
|
|
SDValue AndOp = DAG.getNode(ISD::AND, getCurDebugLoc(),
|
2011-01-06 01:02:44 +00:00
|
|
|
VT, SwitchVal, DAG.getConstant(B.Mask, VT));
|
2010-06-24 02:06:24 +00:00
|
|
|
Cmp = DAG.getSetCC(getCurDebugLoc(),
|
2011-01-06 01:02:44 +00:00
|
|
|
TLI.getSetCCResultType(VT),
|
|
|
|
AndOp, DAG.getConstant(0, VT),
|
2010-06-24 02:06:24 +00:00
|
|
|
ISD::SETNE);
|
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2012-08-24 18:14:27 +00:00
|
|
|
// The branch weight from SwitchBB to B.TargetBB is B.ExtraWeight.
|
|
|
|
addSuccessorWithWeight(SwitchBB, B.TargetBB, B.ExtraWeight);
|
|
|
|
// The branch weight from SwitchBB to NextMBB is BranchWeightToNext.
|
|
|
|
addSuccessorWithWeight(SwitchBB, NextMBB, BranchWeightToNext);
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
SDValue BrAnd = DAG.getNode(ISD::BRCOND, getCurDebugLoc(),
|
|
|
|
MVT::Other, getControlRoot(),
|
2010-06-24 02:06:24 +00:00
|
|
|
Cmp, DAG.getBasicBlock(B.TargetBB));
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Set NextBlock to be the MBB immediately after the current one, if any.
|
|
|
|
// This is used to avoid emitting unnecessary branches to the next block.
|
|
|
|
MachineBasicBlock *NextBlock = 0;
|
|
|
|
MachineFunction::iterator BBI = SwitchBB;
|
|
|
|
if (++BBI != FuncInfo.MF->end())
|
|
|
|
NextBlock = BBI;
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-09-23 18:32:19 +00:00
|
|
|
if (NextMBB != NextBlock)
|
|
|
|
BrAnd = DAG.getNode(ISD::BR, getCurDebugLoc(), MVT::Other, BrAnd,
|
|
|
|
DAG.getBasicBlock(NextMBB));
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
DAG.setRoot(BrAnd);
|
|
|
|
}
|
2009-04-09 02:33:36 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
void SelectionDAGBuilder::visitInvoke(const InvokeInst &I) {
|
2010-07-10 09:00:22 +00:00
|
|
|
MachineBasicBlock *InvokeMBB = FuncInfo.MBB;
|
2008-12-23 22:25:27 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Retrieve successors.
|
|
|
|
MachineBasicBlock *Return = FuncInfo.MBBMap[I.getSuccessor(0)];
|
|
|
|
MachineBasicBlock *LandingPad = FuncInfo.MBBMap[I.getSuccessor(1)];
|
2009-04-09 02:33:36 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
const Value *Callee(I.getCalledValue());
|
2012-06-28 22:30:12 +00:00
|
|
|
const Function *Fn = dyn_cast<Function>(Callee);
|
2010-05-29 17:53:24 +00:00
|
|
|
if (isa<InlineAsm>(Callee))
|
|
|
|
visitInlineAsm(&I);
|
2012-06-28 22:30:12 +00:00
|
|
|
else if (Fn && Fn->isIntrinsic()) {
|
|
|
|
assert(Fn->getIntrinsicID() == Intrinsic::donothing);
|
2012-07-18 00:07:17 +00:00
|
|
|
// Ignore invokes to @llvm.donothing: jump directly to the next BB.
|
2012-06-28 22:30:12 +00:00
|
|
|
} else
|
2010-05-29 17:53:24 +00:00
|
|
|
LowerCallTo(&I, getValue(Callee), false, LandingPad);
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// If the value of the invoke is used outside of its defining block, make it
|
|
|
|
// available as a virtual register.
|
|
|
|
CopyToExportRegsIfNeeded(&I);
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Update successor info
|
2011-11-22 11:37:46 +00:00
|
|
|
addSuccessorWithWeight(InvokeMBB, Return);
|
|
|
|
addSuccessorWithWeight(InvokeMBB, LandingPad);
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Drop into normal successor.
|
|
|
|
DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(),
|
|
|
|
MVT::Other, getControlRoot(),
|
|
|
|
DAG.getBasicBlock(Return)));
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
|
|
|
|
2011-07-31 06:30:59 +00:00
|
|
|
void SelectionDAGBuilder::visitResume(const ResumeInst &RI) {
|
|
|
|
llvm_unreachable("SelectionDAGBuilder shouldn't visit resume instructions!");
|
|
|
|
}
|
|
|
|
|
2011-08-17 21:56:44 +00:00
|
|
|
void SelectionDAGBuilder::visitLandingPad(const LandingPadInst &LP) {
|
|
|
|
assert(FuncInfo.MBB->isLandingPad() &&
|
|
|
|
"Call to landingpad not in landing pad!");
|
|
|
|
|
|
|
|
MachineBasicBlock *MBB = FuncInfo.MBB;
|
|
|
|
MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
|
|
|
|
AddLandingPadInfo(LP, MMI, MBB);
|
|
|
|
|
2012-02-13 23:47:16 +00:00
|
|
|
// If there aren't registers to copy the values into (e.g., during SjLj
|
|
|
|
// exceptions), then don't bother to create these DAG nodes.
|
2012-02-14 04:45:49 +00:00
|
|
|
if (TLI.getExceptionPointerRegister() == 0 &&
|
2012-02-13 23:47:16 +00:00
|
|
|
TLI.getExceptionSelectorRegister() == 0)
|
|
|
|
return;
|
|
|
|
|
2011-08-17 21:56:44 +00:00
|
|
|
SmallVector<EVT, 2> ValueVTs;
|
|
|
|
ComputeValueVTs(TLI, LP.getType(), ValueVTs);
|
|
|
|
|
|
|
|
// Insert the EXCEPTIONADDR instruction.
|
|
|
|
assert(FuncInfo.MBB->isLandingPad() &&
|
|
|
|
"Call to eh.exception not in landing pad!");
|
|
|
|
SDVTList VTs = DAG.getVTList(TLI.getPointerTy(), MVT::Other);
|
|
|
|
SDValue Ops[2];
|
|
|
|
Ops[0] = DAG.getRoot();
|
|
|
|
SDValue Op1 = DAG.getNode(ISD::EXCEPTIONADDR, getCurDebugLoc(), VTs, Ops, 1);
|
|
|
|
SDValue Chain = Op1.getValue(1);
|
|
|
|
|
|
|
|
// Insert the EHSELECTION instruction.
|
|
|
|
VTs = DAG.getVTList(TLI.getPointerTy(), MVT::Other);
|
|
|
|
Ops[0] = Op1;
|
|
|
|
Ops[1] = Chain;
|
|
|
|
SDValue Op2 = DAG.getNode(ISD::EHSELECTION, getCurDebugLoc(), VTs, Ops, 2);
|
|
|
|
Chain = Op2.getValue(1);
|
|
|
|
Op2 = DAG.getSExtOrTrunc(Op2, getCurDebugLoc(), MVT::i32);
|
|
|
|
|
|
|
|
Ops[0] = Op1;
|
|
|
|
Ops[1] = Op2;
|
|
|
|
SDValue Res = DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
|
|
|
|
DAG.getVTList(&ValueVTs[0], ValueVTs.size()),
|
|
|
|
&Ops[0], 2);
|
|
|
|
|
|
|
|
std::pair<SDValue, SDValue> RetPair = std::make_pair(Res, Chain);
|
|
|
|
setValue(&LP, RetPair.first);
|
|
|
|
DAG.setRoot(RetPair.second);
|
|
|
|
}
|
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
/// handleSmallSwitchCaseRange - Emit a series of specific tests (suitable for
|
|
|
|
/// small case ranges).
|
|
|
|
bool SelectionDAGBuilder::handleSmallSwitchRange(CaseRec& CR,
|
|
|
|
CaseRecVector& WorkList,
|
|
|
|
const Value* SV,
|
|
|
|
MachineBasicBlock *Default,
|
|
|
|
MachineBasicBlock *SwitchBB) {
|
|
|
|
// Size is the number of Cases represented by this range.
|
|
|
|
size_t Size = CR.Range.second - CR.Range.first;
|
|
|
|
if (Size > 3)
|
|
|
|
return false;
|
|
|
|
|
2008-09-03 16:12:24 +00:00
|
|
|
// Get the MachineFunction which holds the current MBB. This is used when
|
|
|
|
// inserting any additional MBBs necessary to represent the switch.
|
2009-08-15 02:06:22 +00:00
|
|
|
MachineFunction *CurMF = FuncInfo.MF;
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Figure out which block is immediately after the current one.
|
|
|
|
MachineBasicBlock *NextBlock = 0;
|
|
|
|
MachineFunction::iterator BBI = CR.CaseBB;
|
2008-12-23 22:25:27 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
if (++BBI != FuncInfo.MF->end())
|
|
|
|
NextBlock = BBI;
|
2008-12-23 22:25:27 +00:00
|
|
|
|
2012-08-24 18:14:27 +00:00
|
|
|
BranchProbabilityInfo *BPI = FuncInfo.BPI;
|
2010-11-22 09:45:38 +00:00
|
|
|
// If any two of the cases has the same destination, and if one value
|
2010-05-29 17:53:24 +00:00
|
|
|
// is the same as the other, but has one bit unset that the other has set,
|
|
|
|
// use bit manipulation to do two compares at once. For example:
|
|
|
|
// "if (X == 6 || X == 4)" -> "if ((X|2) == 6)"
|
2010-11-22 09:45:38 +00:00
|
|
|
// TODO: This could be extended to merge any 2 cases in switches with 3 cases.
|
|
|
|
// TODO: Handle cases where CR.CaseBB != SwitchBB.
|
|
|
|
if (Size == 2 && CR.CaseBB == SwitchBB) {
|
|
|
|
Case &Small = *CR.Range.first;
|
|
|
|
Case &Big = *(CR.Range.second-1);
|
|
|
|
|
|
|
|
if (Small.Low == Small.High && Big.Low == Big.High && Small.BB == Big.BB) {
|
|
|
|
const APInt& SmallValue = cast<ConstantInt>(Small.Low)->getValue();
|
|
|
|
const APInt& BigValue = cast<ConstantInt>(Big.Low)->getValue();
|
|
|
|
|
|
|
|
// Check that there is only one bit different.
|
|
|
|
if (BigValue.countPopulation() == SmallValue.countPopulation() + 1 &&
|
|
|
|
(SmallValue | BigValue) == BigValue) {
|
|
|
|
// Isolate the common bit.
|
|
|
|
APInt CommonBit = BigValue & ~SmallValue;
|
|
|
|
assert((SmallValue | CommonBit) == BigValue &&
|
|
|
|
CommonBit.countPopulation() == 1 && "Not a common bit?");
|
|
|
|
|
|
|
|
SDValue CondLHS = getValue(SV);
|
|
|
|
EVT VT = CondLHS.getValueType();
|
|
|
|
DebugLoc DL = getCurDebugLoc();
|
|
|
|
|
|
|
|
SDValue Or = DAG.getNode(ISD::OR, DL, VT, CondLHS,
|
|
|
|
DAG.getConstant(CommonBit, VT));
|
|
|
|
SDValue Cond = DAG.getSetCC(DL, MVT::i1,
|
|
|
|
Or, DAG.getConstant(BigValue, VT),
|
|
|
|
ISD::SETEQ);
|
|
|
|
|
|
|
|
// Update successor info.
|
2012-08-24 18:14:27 +00:00
|
|
|
// Both Small and Big will jump to Small.BB, so we sum up the weights.
|
|
|
|
addSuccessorWithWeight(SwitchBB, Small.BB,
|
|
|
|
Small.ExtraWeight + Big.ExtraWeight);
|
|
|
|
addSuccessorWithWeight(SwitchBB, Default,
|
|
|
|
// The default destination is the first successor in IR.
|
|
|
|
BPI ? BPI->getEdgeWeight(SwitchBB->getBasicBlock(), (unsigned)0) : 0);
|
2010-11-22 09:45:38 +00:00
|
|
|
|
|
|
|
// Insert the true branch.
|
|
|
|
SDValue BrCond = DAG.getNode(ISD::BRCOND, DL, MVT::Other,
|
|
|
|
getControlRoot(), Cond,
|
|
|
|
DAG.getBasicBlock(Small.BB));
|
|
|
|
|
|
|
|
// Insert the false branch.
|
|
|
|
BrCond = DAG.getNode(ISD::BR, DL, MVT::Other, BrCond,
|
|
|
|
DAG.getBasicBlock(Default));
|
|
|
|
|
|
|
|
DAG.setRoot(BrCond);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2012-05-26 20:01:32 +00:00
|
|
|
// Order cases by weight so the most likely case will be checked first.
|
2012-08-24 18:14:27 +00:00
|
|
|
uint32_t UnhandledWeights = 0;
|
2012-05-26 20:01:32 +00:00
|
|
|
if (BPI) {
|
|
|
|
for (CaseItr I = CR.Range.first, IE = CR.Range.second; I != IE; ++I) {
|
2012-08-24 18:14:27 +00:00
|
|
|
uint32_t IWeight = I->ExtraWeight;
|
|
|
|
UnhandledWeights += IWeight;
|
2012-05-26 20:01:32 +00:00
|
|
|
for (CaseItr J = CR.Range.first; J < I; ++J) {
|
2012-08-24 18:14:27 +00:00
|
|
|
uint32_t JWeight = J->ExtraWeight;
|
2012-05-26 20:01:32 +00:00
|
|
|
if (IWeight > JWeight)
|
|
|
|
std::swap(*I, *J);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2010-05-29 17:53:24 +00:00
|
|
|
// Rearrange the case blocks so that the last one falls through if possible.
|
2012-05-26 20:01:32 +00:00
|
|
|
Case &BackCase = *(CR.Range.second-1);
|
2012-05-26 21:19:12 +00:00
|
|
|
if (Size > 1 &&
|
|
|
|
NextBlock && Default != NextBlock && BackCase.BB != NextBlock) {
|
2010-05-29 17:53:24 +00:00
|
|
|
// The last case block won't fall through into 'NextBlock' if we emit the
|
|
|
|
// branches in this order. See if rearranging a case value would help.
|
2012-05-26 20:01:32 +00:00
|
|
|
// We start at the bottom as it's the case with the least weight.
|
2012-05-27 10:56:55 +00:00
|
|
|
for (Case *I = &*(CR.Range.second-2), *E = &*CR.Range.first-1; I != E; --I){
|
2010-05-29 17:53:24 +00:00
|
|
|
if (I->BB == NextBlock) {
|
|
|
|
std::swap(*I, BackCase);
|
2008-09-03 16:12:24 +00:00
|
|
|
break;
|
2010-05-29 17:53:24 +00:00
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
2010-05-29 17:53:24 +00:00
|
|
|
}
|
2008-12-23 22:25:27 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Create a CaseBlock record representing a conditional branch to
|
|
|
|
// the Case's target mbb if the value being switched on SV is equal
|
|
|
|
// to C.
|
|
|
|
MachineBasicBlock *CurBlock = CR.CaseBB;
|
|
|
|
for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++I) {
|
|
|
|
MachineBasicBlock *FallThrough;
|
|
|
|
if (I != E-1) {
|
|
|
|
FallThrough = CurMF->CreateMachineBasicBlock(CurBlock->getBasicBlock());
|
|
|
|
CurMF->insert(BBI, FallThrough);
|
2008-12-23 22:25:27 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Put SV in a virtual register to make it available from the new blocks.
|
|
|
|
ExportFromCurrentBlock(SV);
|
|
|
|
} else {
|
|
|
|
// If the last case doesn't match, go to the default block.
|
|
|
|
FallThrough = Default;
|
|
|
|
}
|
2008-12-23 22:25:27 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
const Value *RHS, *LHS, *MHS;
|
|
|
|
ISD::CondCode CC;
|
|
|
|
if (I->High == I->Low) {
|
|
|
|
// This is just small small case range :) containing exactly 1 case
|
|
|
|
CC = ISD::SETEQ;
|
|
|
|
LHS = SV; RHS = I->High; MHS = NULL;
|
|
|
|
} else {
|
2012-05-17 08:56:30 +00:00
|
|
|
CC = ISD::SETCC_INVALID;
|
2010-05-29 17:53:24 +00:00
|
|
|
LHS = I->Low; MHS = SV; RHS = I->High;
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
2011-07-29 22:25:21 +00:00
|
|
|
|
2012-08-24 18:14:27 +00:00
|
|
|
// The false weight should be sum of all un-handled cases.
|
|
|
|
UnhandledWeights -= I->ExtraWeight;
|
2011-07-29 22:25:21 +00:00
|
|
|
CaseBlock CB(CC, LHS, RHS, MHS, /* truebb */ I->BB, /* falsebb */ FallThrough,
|
|
|
|
/* me */ CurBlock,
|
2012-08-24 18:14:27 +00:00
|
|
|
/* trueweight */ I->ExtraWeight,
|
|
|
|
/* falseweight */ UnhandledWeights);
|
2008-12-23 22:25:27 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// If emitting the first comparison, just call visitSwitchCase to emit the
|
|
|
|
// code into the current block. Otherwise, push the CaseBlock onto the
|
|
|
|
// vector to be later processed by SDISel, and insert the node's MBB
|
|
|
|
// before the next MBB.
|
|
|
|
if (CurBlock == SwitchBB)
|
|
|
|
visitSwitchCase(CB, SwitchBB);
|
|
|
|
else
|
|
|
|
SwitchCases.push_back(CB);
|
2008-12-23 22:25:27 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
CurBlock = FallThrough;
|
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
return true;
|
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
static inline bool areJTsAllowed(const TargetLowering &TLI) {
|
2012-07-02 22:39:56 +00:00
|
|
|
return TLI.supportJumpTables() &&
|
2010-05-29 17:53:24 +00:00
|
|
|
(TLI.isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
|
|
|
|
TLI.isOperationLegalOrCustom(ISD::BRIND, MVT::Other));
|
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
static APInt ComputeRange(const APInt &First, const APInt &Last) {
|
|
|
|
uint32_t BitWidth = std::max(Last.getBitWidth(), First.getBitWidth()) + 1;
|
2012-05-17 08:56:30 +00:00
|
|
|
APInt LastExt = Last.zext(BitWidth), FirstExt = First.zext(BitWidth);
|
2010-05-29 17:53:24 +00:00
|
|
|
return (LastExt - FirstExt + 1ULL);
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
/// handleJTSwitchCase - Emit jumptable for current switch case range
|
2011-09-09 22:06:59 +00:00
|
|
|
bool SelectionDAGBuilder::handleJTSwitchCase(CaseRec &CR,
|
|
|
|
CaseRecVector &WorkList,
|
|
|
|
const Value *SV,
|
|
|
|
MachineBasicBlock *Default,
|
2010-05-29 17:53:24 +00:00
|
|
|
MachineBasicBlock *SwitchBB) {
|
|
|
|
Case& FrontCase = *CR.Range.first;
|
|
|
|
Case& BackCase = *(CR.Range.second-1);
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
const APInt &First = cast<ConstantInt>(FrontCase.Low)->getValue();
|
|
|
|
const APInt &Last = cast<ConstantInt>(BackCase.High)->getValue();
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
APInt TSize(First.getBitWidth(), 0);
|
2011-09-09 22:06:59 +00:00
|
|
|
for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++I)
|
2010-05-29 17:53:24 +00:00
|
|
|
TSize += I->size();
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2012-09-25 20:35:36 +00:00
|
|
|
if (!areJTsAllowed(TLI) || TSize.ult(TLI.getMinimumJumpTableEntries()))
|
2010-05-29 17:53:24 +00:00
|
|
|
return false;
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
APInt Range = ComputeRange(First, Last);
|
2011-10-26 01:47:48 +00:00
|
|
|
// The density is TSize / Range. Require at least 40%.
|
|
|
|
// It should not be possible for IntTSize to saturate for sane code, but make
|
|
|
|
// sure we handle Range saturation correctly.
|
|
|
|
uint64_t IntRange = Range.getLimitedValue(UINT64_MAX/10);
|
|
|
|
uint64_t IntTSize = TSize.getLimitedValue(UINT64_MAX/10);
|
|
|
|
if (IntTSize * 10 < IntRange * 4)
|
2010-05-29 17:53:24 +00:00
|
|
|
return false;
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
DEBUG(dbgs() << "Lowering jump table\n"
|
|
|
|
<< "First entry: " << First << ". Last entry: " << Last << '\n'
|
2011-10-26 01:47:48 +00:00
|
|
|
<< "Range: " << Range << ". Size: " << TSize << ".\n\n");
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Get the MachineFunction which holds the current MBB. This is used when
|
|
|
|
// inserting any additional MBBs necessary to represent the switch.
|
|
|
|
MachineFunction *CurMF = FuncInfo.MF;
|
2010-04-19 22:41:47 +00:00
|
|
|
|
2008-09-03 16:12:24 +00:00
|
|
|
// Figure out which block is immediately after the current one.
|
2010-05-29 17:53:24 +00:00
|
|
|
MachineFunction::iterator BBI = CR.CaseBB;
|
|
|
|
++BBI;
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock();
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Create a new basic block to hold the code for loading the address
|
|
|
|
// of the jump table, and jumping to it. Update successor information;
|
|
|
|
// we will either branch to the default case for the switch, or the jump
|
|
|
|
// table.
|
|
|
|
MachineBasicBlock *JumpTableBB = CurMF->CreateMachineBasicBlock(LLVMBB);
|
|
|
|
CurMF->insert(BBI, JumpTableBB);
|
2011-06-16 20:22:37 +00:00
|
|
|
|
|
|
|
addSuccessorWithWeight(CR.CaseBB, Default);
|
|
|
|
addSuccessorWithWeight(CR.CaseBB, JumpTableBB);
|
2009-12-21 22:30:11 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Build a vector of destination BBs, corresponding to each target
|
|
|
|
// of the jump table. If the value of the jump table slot corresponds to
|
|
|
|
// a case statement, push the case's BB onto the vector, otherwise, push
|
|
|
|
// the default BB.
|
|
|
|
std::vector<MachineBasicBlock*> DestBBs;
|
|
|
|
APInt TEI = First;
|
|
|
|
for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++TEI) {
|
|
|
|
const APInt &Low = cast<ConstantInt>(I->Low)->getValue();
|
|
|
|
const APInt &High = cast<ConstantInt>(I->High)->getValue();
|
|
|
|
|
2012-05-17 08:56:30 +00:00
|
|
|
if (Low.ule(TEI) && TEI.ule(High)) {
|
2010-05-29 17:53:24 +00:00
|
|
|
DestBBs.push_back(I->BB);
|
|
|
|
if (TEI==High)
|
|
|
|
++I;
|
|
|
|
} else {
|
|
|
|
DestBBs.push_back(Default);
|
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
2008-12-23 22:25:27 +00:00
|
|
|
|
2012-08-24 18:14:27 +00:00
|
|
|
// Calculate weight for each unique destination in CR.
|
|
|
|
DenseMap<MachineBasicBlock*, uint32_t> DestWeights;
|
|
|
|
if (FuncInfo.BPI)
|
|
|
|
for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++I) {
|
|
|
|
DenseMap<MachineBasicBlock*, uint32_t>::iterator Itr =
|
|
|
|
DestWeights.find(I->BB);
|
|
|
|
if (Itr != DestWeights.end())
|
|
|
|
Itr->second += I->ExtraWeight;
|
|
|
|
else
|
|
|
|
DestWeights[I->BB] = I->ExtraWeight;
|
|
|
|
}
|
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Update successor info. Add one edge to each unique successor.
|
|
|
|
BitVector SuccsHandled(CR.CaseBB->getParent()->getNumBlockIDs());
|
|
|
|
for (std::vector<MachineBasicBlock*>::iterator I = DestBBs.begin(),
|
|
|
|
E = DestBBs.end(); I != E; ++I) {
|
|
|
|
if (!SuccsHandled[(*I)->getNumber()]) {
|
|
|
|
SuccsHandled[(*I)->getNumber()] = true;
|
2012-08-24 18:14:27 +00:00
|
|
|
DenseMap<MachineBasicBlock*, uint32_t>::iterator Itr =
|
|
|
|
DestWeights.find(*I);
|
|
|
|
addSuccessorWithWeight(JumpTableBB, *I,
|
|
|
|
Itr != DestWeights.end() ? Itr->second : 0);
|
2010-05-29 17:53:24 +00:00
|
|
|
}
|
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Create a jump table index for this jump table.
|
|
|
|
unsigned JTEncoding = TLI.getJumpTableEncoding();
|
|
|
|
unsigned JTI = CurMF->getOrCreateJumpTableInfo(JTEncoding)
|
|
|
|
->createJumpTableIndex(DestBBs);
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Set the jump table information so that we can codegen it as a second
|
|
|
|
// MachineBasicBlock
|
|
|
|
JumpTable JT(-1U, JTI, JumpTableBB, Default);
|
|
|
|
JumpTableHeader JTH(First, Last, SV, CR.CaseBB, (CR.CaseBB == SwitchBB));
|
|
|
|
if (CR.CaseBB == SwitchBB)
|
|
|
|
visitJumpTableHeader(JT, JTH, SwitchBB);
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
JTCases.push_back(JumpTableBlock(JTH, JT));
|
|
|
|
return true;
|
|
|
|
}
|
2008-12-23 22:25:27 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
/// handleBTSplitSwitchCase - emit comparison and split binary search tree into
|
|
|
|
/// 2 subtrees.
|
|
|
|
bool SelectionDAGBuilder::handleBTSplitSwitchCase(CaseRec& CR,
|
|
|
|
CaseRecVector& WorkList,
|
|
|
|
const Value* SV,
|
|
|
|
MachineBasicBlock *Default,
|
|
|
|
MachineBasicBlock *SwitchBB) {
|
|
|
|
// Get the MachineFunction which holds the current MBB. This is used when
|
|
|
|
// inserting any additional MBBs necessary to represent the switch.
|
|
|
|
MachineFunction *CurMF = FuncInfo.MF;
|
2008-12-23 22:25:27 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Figure out which block is immediately after the current one.
|
|
|
|
MachineFunction::iterator BBI = CR.CaseBB;
|
|
|
|
++BBI;
|
2008-12-23 22:25:27 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
Case& FrontCase = *CR.Range.first;
|
|
|
|
Case& BackCase = *(CR.Range.second-1);
|
|
|
|
const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock();
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Size is the number of Cases represented by this range.
|
|
|
|
unsigned Size = CR.Range.second - CR.Range.first;
|
2010-04-19 22:41:47 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
const APInt &First = cast<ConstantInt>(FrontCase.Low)->getValue();
|
|
|
|
const APInt &Last = cast<ConstantInt>(BackCase.High)->getValue();
|
|
|
|
double FMetric = 0;
|
|
|
|
CaseItr Pivot = CR.Range.first + Size/2;
|
2009-10-27 22:10:34 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Select optimal pivot, maximizing sum density of LHS and RHS. This will
|
|
|
|
// (heuristically) allow us to emit JumpTable's later.
|
|
|
|
APInt TSize(First.getBitWidth(), 0);
|
|
|
|
for (CaseItr I = CR.Range.first, E = CR.Range.second;
|
|
|
|
I!=E; ++I)
|
|
|
|
TSize += I->size();
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
APInt LSize = FrontCase.size();
|
|
|
|
APInt RSize = TSize-LSize;
|
|
|
|
DEBUG(dbgs() << "Selecting best pivot: \n"
|
|
|
|
<< "First: " << First << ", Last: " << Last <<'\n'
|
|
|
|
<< "LSize: " << LSize << ", RSize: " << RSize << '\n');
|
|
|
|
for (CaseItr I = CR.Range.first, J=I+1, E = CR.Range.second;
|
|
|
|
J!=E; ++I, ++J) {
|
|
|
|
const APInt &LEnd = cast<ConstantInt>(I->High)->getValue();
|
|
|
|
const APInt &RBegin = cast<ConstantInt>(J->Low)->getValue();
|
|
|
|
APInt Range = ComputeRange(LEnd, RBegin);
|
2012-05-15 06:50:18 +00:00
|
|
|
assert((Range - 2ULL).isNonNegative() &&
|
|
|
|
"Invalid case distance");
|
2011-04-09 06:57:13 +00:00
|
|
|
// Use volatile double here to avoid excess precision issues on some hosts,
|
|
|
|
// e.g. that use 80-bit X87 registers.
|
|
|
|
volatile double LDensity =
|
|
|
|
(double)LSize.roundToDouble() /
|
2010-05-29 17:53:24 +00:00
|
|
|
(LEnd - First + 1ULL).roundToDouble();
|
2011-04-09 06:57:13 +00:00
|
|
|
volatile double RDensity =
|
|
|
|
(double)RSize.roundToDouble() /
|
2010-05-29 17:53:24 +00:00
|
|
|
(Last - RBegin + 1ULL).roundToDouble();
|
|
|
|
double Metric = Range.logBase2()*(LDensity+RDensity);
|
|
|
|
// Should always split in some non-trivial place
|
|
|
|
DEBUG(dbgs() <<"=>Step\n"
|
|
|
|
<< "LEnd: " << LEnd << ", RBegin: " << RBegin << '\n'
|
|
|
|
<< "LDensity: " << LDensity
|
|
|
|
<< ", RDensity: " << RDensity << '\n'
|
|
|
|
<< "Metric: " << Metric << '\n');
|
|
|
|
if (FMetric < Metric) {
|
|
|
|
Pivot = J;
|
|
|
|
FMetric = Metric;
|
|
|
|
DEBUG(dbgs() << "Current metric set to: " << FMetric << '\n');
|
2009-06-04 22:49:04 +00:00
|
|
|
}
|
2010-05-29 17:53:24 +00:00
|
|
|
|
|
|
|
LSize += J->size();
|
|
|
|
RSize -= J->size();
|
|
|
|
}
|
|
|
|
if (areJTsAllowed(TLI)) {
|
|
|
|
// If our case is dense we *really* should handle it earlier!
|
|
|
|
assert((FMetric > 0) && "Should handle dense range earlier!");
|
|
|
|
} else {
|
|
|
|
Pivot = CR.Range.first + Size/2;
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
2009-12-21 22:30:11 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
CaseRange LHSR(CR.Range.first, Pivot);
|
|
|
|
CaseRange RHSR(Pivot, CR.Range.second);
|
SwitchInst refactoring.
The purpose of refactoring is to hide operand roles from SwitchInst user (programmer). If you want to play with operands directly, probably you will need lower level methods than SwitchInst ones (TerminatorInst or may be User). After this patch we can reorganize SwitchInst operands and successors as we want.
What was done:
1. Changed semantics of index inside the getCaseValue method:
getCaseValue(0) means "get first case", not a condition. Use getCondition() if you want to resolve the condition. I propose don't mix SwitchInst case indexing with low level indexing (TI successors indexing, User's operands indexing), since it may be dangerous.
2. By the same reason findCaseValue(ConstantInt*) returns actual number of case value. 0 means first case, not default. If there is no case with given value, ErrorIndex will returned.
3. Added getCaseSuccessor method. I propose to avoid usage of TerminatorInst::getSuccessor if you want to resolve case successor BB. Use getCaseSuccessor instead, since internal SwitchInst organization of operands/successors is hidden and may be changed in any moment.
4. Added resolveSuccessorIndex and resolveCaseIndex. The main purpose of these methods is to see how case successors are really mapped in TerminatorInst.
4.1 "resolveSuccessorIndex" was created if you need to level down from SwitchInst to TerminatorInst. It returns TerminatorInst's successor index for given case successor.
4.2 "resolveCaseIndex" converts low level successors index to case index that curresponds to the given successor.
Note: There are also related compatability fix patches for dragonegg, klee, llvm-gcc-4.0, llvm-gcc-4.2, safecode, clang.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@149481 91177308-0d34-0410-b5e6-96231b3b80d8
2012-02-01 07:49:51 +00:00
|
|
|
const Constant *C = Pivot->Low;
|
2010-05-29 17:53:24 +00:00
|
|
|
MachineBasicBlock *FalseBB = 0, *TrueBB = 0;
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// We know that we branch to the LHS if the Value being switched on is
|
|
|
|
// less than the Pivot value, C. We use this to optimize our binary
|
|
|
|
// tree a bit, by recognizing that if SV is greater than or equal to the
|
|
|
|
// LHS's Case Value, and that Case Value is exactly one less than the
|
|
|
|
// Pivot's Value, then we can branch directly to the LHS's Target,
|
|
|
|
// rather than creating a leaf node for it.
|
|
|
|
if ((LHSR.second - LHSR.first) == 1 &&
|
|
|
|
LHSR.first->High == CR.GE &&
|
|
|
|
cast<ConstantInt>(C)->getValue() ==
|
|
|
|
(cast<ConstantInt>(CR.GE)->getValue() + 1LL)) {
|
|
|
|
TrueBB = LHSR.first->BB;
|
|
|
|
} else {
|
|
|
|
TrueBB = CurMF->CreateMachineBasicBlock(LLVMBB);
|
|
|
|
CurMF->insert(BBI, TrueBB);
|
|
|
|
WorkList.push_back(CaseRec(TrueBB, C, CR.GE, LHSR));
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Put SV in a virtual register to make it available from the new blocks.
|
|
|
|
ExportFromCurrentBlock(SV);
|
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Similar to the optimization above, if the Value being switched on is
|
|
|
|
// known to be less than the Constant CR.LT, and the current Case Value
|
|
|
|
// is CR.LT - 1, then we can branch directly to the target block for
|
|
|
|
// the current Case Value, rather than emitting a RHS leaf node for it.
|
|
|
|
if ((RHSR.second - RHSR.first) == 1 && CR.LT &&
|
|
|
|
cast<ConstantInt>(RHSR.first->Low)->getValue() ==
|
|
|
|
(cast<ConstantInt>(CR.LT)->getValue() - 1LL)) {
|
|
|
|
FalseBB = RHSR.first->BB;
|
|
|
|
} else {
|
|
|
|
FalseBB = CurMF->CreateMachineBasicBlock(LLVMBB);
|
|
|
|
CurMF->insert(BBI, FalseBB);
|
|
|
|
WorkList.push_back(CaseRec(FalseBB,CR.LT,C,RHSR));
|
|
|
|
|
|
|
|
// Put SV in a virtual register to make it available from the new blocks.
|
|
|
|
ExportFromCurrentBlock(SV);
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Create a CaseBlock record representing a conditional branch to
|
|
|
|
// the LHS node if the value being switched on SV is less than C.
|
|
|
|
// Otherwise, branch to LHS.
|
2012-05-17 08:56:30 +00:00
|
|
|
CaseBlock CB(ISD::SETULT, SV, C, NULL, TrueBB, FalseBB, CR.CaseBB);
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
if (CR.CaseBB == SwitchBB)
|
|
|
|
visitSwitchCase(CB, SwitchBB);
|
|
|
|
else
|
|
|
|
SwitchCases.push_back(CB);
|
2010-01-01 04:41:22 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
return true;
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
/// handleBitTestsSwitchCase - if current case range has few destination and
|
|
|
|
/// range span less, than machine word bitwidth, encode case range into series
|
|
|
|
/// of masks and emit bit tests with these masks.
|
|
|
|
bool SelectionDAGBuilder::handleBitTestsSwitchCase(CaseRec& CR,
|
|
|
|
CaseRecVector& WorkList,
|
|
|
|
const Value* SV,
|
|
|
|
MachineBasicBlock* Default,
|
|
|
|
MachineBasicBlock *SwitchBB){
|
|
|
|
EVT PTy = TLI.getPointerTy();
|
|
|
|
unsigned IntPtrBits = PTy.getSizeInBits();
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
Case& FrontCase = *CR.Range.first;
|
|
|
|
Case& BackCase = *(CR.Range.second-1);
|
2009-12-21 22:30:11 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Get the MachineFunction which holds the current MBB. This is used when
|
|
|
|
// inserting any additional MBBs necessary to represent the switch.
|
|
|
|
MachineFunction *CurMF = FuncInfo.MF;
|
2009-12-21 22:30:11 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// If target does not have legal shift left, do not emit bit tests at all.
|
|
|
|
if (!TLI.isOperationLegal(ISD::SHL, TLI.getPointerTy()))
|
|
|
|
return false;
|
2009-12-21 22:30:11 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
size_t numCmps = 0;
|
|
|
|
for (CaseItr I = CR.Range.first, E = CR.Range.second;
|
|
|
|
I!=E; ++I) {
|
|
|
|
// Single case counts one, case range - two.
|
|
|
|
numCmps += (I->Low == I->High ? 1 : 2);
|
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Count unique destinations
|
|
|
|
SmallSet<MachineBasicBlock*, 4> Dests;
|
|
|
|
for (CaseItr I = CR.Range.first, E = CR.Range.second; I!=E; ++I) {
|
|
|
|
Dests.insert(I->BB);
|
|
|
|
if (Dests.size() > 3)
|
|
|
|
// Don't bother the code below, if there are too much unique destinations
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
DEBUG(dbgs() << "Total number of unique destinations: "
|
|
|
|
<< Dests.size() << '\n'
|
|
|
|
<< "Total number of comparisons: " << numCmps << '\n');
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Compute span of values.
|
|
|
|
const APInt& minValue = cast<ConstantInt>(FrontCase.Low)->getValue();
|
|
|
|
const APInt& maxValue = cast<ConstantInt>(BackCase.High)->getValue();
|
|
|
|
APInt cmpRange = maxValue - minValue;
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
DEBUG(dbgs() << "Compare range: " << cmpRange << '\n'
|
|
|
|
<< "Low bound: " << minValue << '\n'
|
|
|
|
<< "High bound: " << maxValue << '\n');
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
if (cmpRange.uge(IntPtrBits) ||
|
|
|
|
(!(Dests.size() == 1 && numCmps >= 3) &&
|
|
|
|
!(Dests.size() == 2 && numCmps >= 5) &&
|
|
|
|
!(Dests.size() >= 3 && numCmps >= 6)))
|
|
|
|
return false;
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
DEBUG(dbgs() << "Emitting bit tests\n");
|
|
|
|
APInt lowBound = APInt::getNullValue(cmpRange.getBitWidth());
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Optimize the case where all the case values fit in a
|
|
|
|
// word without having to subtract minValue. In this case,
|
|
|
|
// we can optimize away the subtraction.
|
2012-05-17 08:56:30 +00:00
|
|
|
if (maxValue.ult(IntPtrBits)) {
|
2010-05-29 17:53:24 +00:00
|
|
|
cmpRange = maxValue;
|
|
|
|
} else {
|
|
|
|
lowBound = minValue;
|
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
CaseBitsVector CasesBits;
|
|
|
|
unsigned i, count = 0;
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
for (CaseItr I = CR.Range.first, E = CR.Range.second; I!=E; ++I) {
|
|
|
|
MachineBasicBlock* Dest = I->BB;
|
|
|
|
for (i = 0; i < count; ++i)
|
|
|
|
if (Dest == CasesBits[i].BB)
|
|
|
|
break;
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
if (i == count) {
|
|
|
|
assert((count < 3) && "Too much destinations to test!");
|
2012-08-24 18:14:27 +00:00
|
|
|
CasesBits.push_back(CaseBits(0, Dest, 0, 0/*Weight*/));
|
2010-05-29 17:53:24 +00:00
|
|
|
count++;
|
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
const APInt& lowValue = cast<ConstantInt>(I->Low)->getValue();
|
|
|
|
const APInt& highValue = cast<ConstantInt>(I->High)->getValue();
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
uint64_t lo = (lowValue - lowBound).getZExtValue();
|
|
|
|
uint64_t hi = (highValue - lowBound).getZExtValue();
|
2012-08-24 18:14:27 +00:00
|
|
|
CasesBits[i].ExtraWeight += I->ExtraWeight;
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
for (uint64_t j = lo; j <= hi; j++) {
|
|
|
|
CasesBits[i].Mask |= 1ULL << j;
|
|
|
|
CasesBits[i].Bits++;
|
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
}
|
|
|
|
std::sort(CasesBits.begin(), CasesBits.end(), CaseBitsCmp());
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
BitTestInfo BTC;
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Figure out which block is immediately after the current one.
|
|
|
|
MachineFunction::iterator BBI = CR.CaseBB;
|
|
|
|
++BBI;
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock();
|
2008-11-10 04:46:22 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
DEBUG(dbgs() << "Cases:\n");
|
|
|
|
for (unsigned i = 0, e = CasesBits.size(); i!=e; ++i) {
|
|
|
|
DEBUG(dbgs() << "Mask: " << CasesBits[i].Mask
|
|
|
|
<< ", Bits: " << CasesBits[i].Bits
|
|
|
|
<< ", BB: " << CasesBits[i].BB << '\n');
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
MachineBasicBlock *CaseBB = CurMF->CreateMachineBasicBlock(LLVMBB);
|
|
|
|
CurMF->insert(BBI, CaseBB);
|
|
|
|
BTC.push_back(BitTestCase(CasesBits[i].Mask,
|
|
|
|
CaseBB,
|
2012-08-24 18:14:27 +00:00
|
|
|
CasesBits[i].BB, CasesBits[i].ExtraWeight));
|
2010-05-29 17:53:24 +00:00
|
|
|
|
|
|
|
// Put SV in a virtual register to make it available from the new blocks.
|
|
|
|
ExportFromCurrentBlock(SV);
|
2009-04-27 18:41:29 +00:00
|
|
|
}
|
2010-01-01 04:41:22 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
BitTestBlock BTB(lowBound, cmpRange, SV,
|
2011-01-06 01:02:44 +00:00
|
|
|
-1U, MVT::Other, (CR.CaseBB == SwitchBB),
|
2010-05-29 17:53:24 +00:00
|
|
|
CR.CaseBB, Default, BTC);
|
2008-11-10 04:46:22 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
if (CR.CaseBB == SwitchBB)
|
|
|
|
visitBitTestHeader(BTB, SwitchBB);
|
2008-11-10 04:46:22 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
BitTestCases.push_back(BTB);
|
2008-11-10 04:46:22 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
return true;
|
|
|
|
}
|
2008-11-10 04:46:22 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
/// Clusterify - Transform simple list of Cases into list of CaseRange's
|
|
|
|
size_t SelectionDAGBuilder::Clusterify(CaseVector& Cases,
|
|
|
|
const SwitchInst& SI) {
|
2012-05-18 08:32:28 +00:00
|
|
|
|
|
|
|
/// Use a shorter form of declaration, and also
|
|
|
|
/// show the we want to use CRSBuilder as Clusterifier.
|
2012-06-02 07:26:00 +00:00
|
|
|
typedef IntegersSubsetMapping<MachineBasicBlock> Clusterifier;
|
2012-05-18 08:32:28 +00:00
|
|
|
|
|
|
|
Clusterifier TheClusterifier;
|
2010-01-01 04:41:22 +00:00
|
|
|
|
2012-08-24 18:14:27 +00:00
|
|
|
BranchProbabilityInfo *BPI = FuncInfo.BPI;
|
2010-05-29 17:53:24 +00:00
|
|
|
// Start with "simple" cases
|
2012-03-11 06:09:17 +00:00
|
|
|
for (SwitchInst::ConstCaseIt i = SI.case_begin(), e = SI.case_end();
|
2012-03-08 07:06:20 +00:00
|
|
|
i != e; ++i) {
|
|
|
|
const BasicBlock *SuccBB = i.getCaseSuccessor();
|
2011-07-29 22:25:21 +00:00
|
|
|
MachineBasicBlock *SMBB = FuncInfo.MBBMap[SuccBB];
|
|
|
|
|
2012-08-24 18:14:27 +00:00
|
|
|
TheClusterifier.add(i.getCaseValueEx(), SMBB,
|
|
|
|
BPI ? BPI->getEdgeWeight(SI.getParent(), i.getSuccessorIndex()) : 0);
|
2010-05-29 17:53:24 +00:00
|
|
|
}
|
2012-05-18 08:32:28 +00:00
|
|
|
|
|
|
|
TheClusterifier.optimize();
|
|
|
|
|
|
|
|
size_t numCmps = 0;
|
|
|
|
for (Clusterifier::RangeIterator i = TheClusterifier.begin(),
|
|
|
|
e = TheClusterifier.end(); i != e; ++i, ++numCmps) {
|
2012-07-04 05:53:05 +00:00
|
|
|
Clusterifier::Cluster &C = *i;
|
2012-08-24 18:14:27 +00:00
|
|
|
// Update edge weight for the cluster.
|
|
|
|
unsigned W = C.first.Weight;
|
2009-12-21 22:42:14 +00:00
|
|
|
|
2012-05-28 12:39:09 +00:00
|
|
|
// FIXME: Currently work with ConstantInt based numbers.
|
|
|
|
// Changing it to APInt based is a pretty heavy for this commit.
|
2012-07-04 05:53:05 +00:00
|
|
|
Cases.push_back(Case(C.first.getLow().toConstantInt(),
|
|
|
|
C.first.getHigh().toConstantInt(), C.second, W));
|
2012-05-18 08:32:28 +00:00
|
|
|
|
2012-07-04 05:53:05 +00:00
|
|
|
if (C.first.getLow() != C.first.getHigh())
|
2012-05-18 08:32:28 +00:00
|
|
|
// A range counts double, since it requires two compares.
|
|
|
|
++numCmps;
|
2008-11-10 04:46:22 +00:00
|
|
|
}
|
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
return numCmps;
|
|
|
|
}
|
2008-11-16 05:06:27 +00:00
|
|
|
|
2010-09-30 19:44:31 +00:00
|
|
|
void SelectionDAGBuilder::UpdateSplitBlock(MachineBasicBlock *First,
|
|
|
|
MachineBasicBlock *Last) {
|
|
|
|
// Update JTCases.
|
|
|
|
for (unsigned i = 0, e = JTCases.size(); i != e; ++i)
|
|
|
|
if (JTCases[i].first.HeaderBB == First)
|
|
|
|
JTCases[i].first.HeaderBB = Last;
|
|
|
|
|
|
|
|
// Update BitTestCases.
|
|
|
|
for (unsigned i = 0, e = BitTestCases.size(); i != e; ++i)
|
|
|
|
if (BitTestCases[i].Parent == First)
|
|
|
|
BitTestCases[i].Parent = Last;
|
|
|
|
}
|
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) {
|
2010-07-10 09:00:22 +00:00
|
|
|
MachineBasicBlock *SwitchMBB = FuncInfo.MBB;
|
2010-01-01 04:41:22 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Figure out which block is immediately after the current one.
|
|
|
|
MachineBasicBlock *NextBlock = 0;
|
|
|
|
MachineBasicBlock *Default = FuncInfo.MBBMap[SI.getDefaultDest()];
|
2008-11-16 05:06:27 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// If there is only the default destination, branch to it if it is not the
|
|
|
|
// next basic block. Otherwise, just fall through.
|
SwitchInst refactoring.
The purpose of refactoring is to hide operand roles from SwitchInst user (programmer). If you want to play with operands directly, probably you will need lower level methods than SwitchInst ones (TerminatorInst or may be User). After this patch we can reorganize SwitchInst operands and successors as we want.
What was done:
1. Changed semantics of index inside the getCaseValue method:
getCaseValue(0) means "get first case", not a condition. Use getCondition() if you want to resolve the condition. I propose don't mix SwitchInst case indexing with low level indexing (TI successors indexing, User's operands indexing), since it may be dangerous.
2. By the same reason findCaseValue(ConstantInt*) returns actual number of case value. 0 means first case, not default. If there is no case with given value, ErrorIndex will returned.
3. Added getCaseSuccessor method. I propose to avoid usage of TerminatorInst::getSuccessor if you want to resolve case successor BB. Use getCaseSuccessor instead, since internal SwitchInst organization of operands/successors is hidden and may be changed in any moment.
4. Added resolveSuccessorIndex and resolveCaseIndex. The main purpose of these methods is to see how case successors are really mapped in TerminatorInst.
4.1 "resolveSuccessorIndex" was created if you need to level down from SwitchInst to TerminatorInst. It returns TerminatorInst's successor index for given case successor.
4.2 "resolveCaseIndex" converts low level successors index to case index that curresponds to the given successor.
Note: There are also related compatability fix patches for dragonegg, klee, llvm-gcc-4.0, llvm-gcc-4.2, safecode, clang.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@149481 91177308-0d34-0410-b5e6-96231b3b80d8
2012-02-01 07:49:51 +00:00
|
|
|
if (!SI.getNumCases()) {
|
2010-05-29 17:53:24 +00:00
|
|
|
// Update machine-CFG edges.
|
2009-12-21 22:42:14 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// If this is not a fall-through branch, emit the branch.
|
|
|
|
SwitchMBB->addSuccessor(Default);
|
|
|
|
if (Default != NextBlock)
|
|
|
|
DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(),
|
|
|
|
MVT::Other, getControlRoot(),
|
|
|
|
DAG.getBasicBlock(Default)));
|
2009-12-21 22:42:14 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
return;
|
2008-11-10 04:46:22 +00:00
|
|
|
}
|
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// If there are any non-default case statements, create a vector of Cases
|
|
|
|
// representing each one, and sort the vector so that we can efficiently
|
|
|
|
// create a binary search tree from them.
|
|
|
|
CaseVector Cases;
|
|
|
|
size_t numCmps = Clusterify(Cases, SI);
|
|
|
|
DEBUG(dbgs() << "Clusterify finished. Total clusters: " << Cases.size()
|
|
|
|
<< ". Total compares: " << numCmps << '\n');
|
2011-10-18 12:44:00 +00:00
|
|
|
(void)numCmps;
|
2009-12-21 22:42:14 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Get the Value to be switched on and default basic blocks, which will be
|
|
|
|
// inserted into CaseBlock records, representing basic blocks in the binary
|
|
|
|
// search tree.
|
2011-09-29 20:21:17 +00:00
|
|
|
const Value *SV = SI.getCondition();
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Push the initial CaseRec onto the worklist
|
|
|
|
CaseRecVector WorkList;
|
|
|
|
WorkList.push_back(CaseRec(SwitchMBB,0,0,
|
|
|
|
CaseRange(Cases.begin(),Cases.end())));
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
while (!WorkList.empty()) {
|
|
|
|
// Grab a record representing a case range to process off the worklist
|
|
|
|
CaseRec CR = WorkList.back();
|
|
|
|
WorkList.pop_back();
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
if (handleBitTestsSwitchCase(CR, WorkList, SV, Default, SwitchMBB))
|
|
|
|
continue;
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// If the range has few cases (two or less) emit a series of specific
|
|
|
|
// tests.
|
|
|
|
if (handleSmallSwitchRange(CR, WorkList, SV, Default, SwitchMBB))
|
|
|
|
continue;
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2012-09-25 20:35:36 +00:00
|
|
|
// If the switch has more than N blocks, and is at least 40% dense, and the
|
2010-05-29 17:53:24 +00:00
|
|
|
// target supports indirect branches, then emit a jump table rather than
|
|
|
|
// lowering the switch to a binary tree of conditional branches.
|
2012-09-25 20:35:36 +00:00
|
|
|
// N defaults to 4 and is controlled via TLS.getMinimumJumpTableEntries().
|
2010-05-29 17:53:24 +00:00
|
|
|
if (handleJTSwitchCase(CR, WorkList, SV, Default, SwitchMBB))
|
|
|
|
continue;
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Emit binary tree. We need to pick a pivot, and push left and right ranges
|
|
|
|
// onto the worklist. Leafs are handled via handleSmallSwitchRange() call.
|
|
|
|
handleBTSplitSwitchCase(CR, WorkList, SV, Default, SwitchMBB);
|
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
void SelectionDAGBuilder::visitIndirectBr(const IndirectBrInst &I) {
|
2010-07-10 09:00:22 +00:00
|
|
|
MachineBasicBlock *IndirectBrMBB = FuncInfo.MBB;
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Update machine-CFG edges with unique successors.
|
2012-10-23 21:05:33 +00:00
|
|
|
SmallSet<BasicBlock*, 32> Done;
|
|
|
|
for (unsigned i = 0, e = I.getNumSuccessors(); i != e; ++i) {
|
|
|
|
BasicBlock *BB = I.getSuccessor(i);
|
|
|
|
bool Inserted = Done.insert(BB);
|
|
|
|
if (!Inserted)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
MachineBasicBlock *Succ = FuncInfo.MBBMap[BB];
|
2011-06-16 20:22:37 +00:00
|
|
|
addSuccessorWithWeight(IndirectBrMBB, Succ);
|
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
DAG.setRoot(DAG.getNode(ISD::BRIND, getCurDebugLoc(),
|
|
|
|
MVT::Other, getControlRoot(),
|
|
|
|
getValue(I.getAddress())));
|
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
void SelectionDAGBuilder::visitFSub(const User &I) {
|
|
|
|
// -0.0 - X --> fneg
|
2011-07-18 04:54:35 +00:00
|
|
|
Type *Ty = I.getType();
|
2011-02-15 00:14:00 +00:00
|
|
|
if (isa<Constant>(I.getOperand(0)) &&
|
|
|
|
I.getOperand(0) == ConstantFP::getZeroValueForNegation(Ty)) {
|
|
|
|
SDValue Op2 = getValue(I.getOperand(1));
|
|
|
|
setValue(&I, DAG.getNode(ISD::FNEG, getCurDebugLoc(),
|
|
|
|
Op2.getValueType(), Op2));
|
|
|
|
return;
|
2010-05-29 17:53:24 +00:00
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
visitBinary(I, ISD::FSUB);
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
void SelectionDAGBuilder::visitBinary(const User &I, unsigned OpCode) {
|
|
|
|
SDValue Op1 = getValue(I.getOperand(0));
|
|
|
|
SDValue Op2 = getValue(I.getOperand(1));
|
|
|
|
setValue(&I, DAG.getNode(OpCode, getCurDebugLoc(),
|
|
|
|
Op1.getValueType(), Op1, Op2));
|
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
void SelectionDAGBuilder::visitShift(const User &I, unsigned Opcode) {
|
|
|
|
SDValue Op1 = getValue(I.getOperand(0));
|
|
|
|
SDValue Op2 = getValue(I.getOperand(1));
|
2011-02-25 21:41:48 +00:00
|
|
|
|
|
|
|
MVT ShiftTy = TLI.getShiftAmountTy(Op2.getValueType());
|
|
|
|
|
2011-02-13 09:02:52 +00:00
|
|
|
// Coerce the shift amount to the right type if we can.
|
|
|
|
if (!I.getType()->isVectorTy() && Op2.getValueType() != ShiftTy) {
|
2011-02-13 09:10:56 +00:00
|
|
|
unsigned ShiftSize = ShiftTy.getSizeInBits();
|
|
|
|
unsigned Op2Size = Op2.getValueType().getSizeInBits();
|
2011-02-13 09:02:52 +00:00
|
|
|
DebugLoc DL = getCurDebugLoc();
|
2011-02-25 21:41:48 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// If the operand is smaller than the shift count type, promote it.
|
2011-02-13 09:02:52 +00:00
|
|
|
if (ShiftSize > Op2Size)
|
|
|
|
Op2 = DAG.getNode(ISD::ZERO_EXTEND, DL, ShiftTy, Op2);
|
2011-02-25 21:41:48 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// If the operand is larger than the shift count type but the shift
|
|
|
|
// count type has enough bits to represent any shift value, truncate
|
|
|
|
// it now. This is a common case and it exposes the truncate to
|
|
|
|
// optimization early.
|
2011-02-13 09:02:52 +00:00
|
|
|
else if (ShiftSize >= Log2_32_Ceil(Op2.getValueType().getSizeInBits()))
|
|
|
|
Op2 = DAG.getNode(ISD::TRUNCATE, DL, ShiftTy, Op2);
|
|
|
|
// Otherwise we'll need to temporarily settle for some other convenient
|
2011-02-13 19:09:16 +00:00
|
|
|
// type. Type legalization will make adjustments once the shiftee is split.
|
2011-02-13 09:02:52 +00:00
|
|
|
else
|
2011-02-13 19:09:16 +00:00
|
|
|
Op2 = DAG.getZExtOrTrunc(Op2, DL, MVT::i32);
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
2009-12-21 23:10:19 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
setValue(&I, DAG.getNode(Opcode, getCurDebugLoc(),
|
|
|
|
Op1.getValueType(), Op1, Op2));
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
|
|
|
|
Emit a more efficient magic number multiplication for exact sdivs.
We have to do this in DAGBuilder instead of DAGCombiner, because the exact bit is lost after building.
struct foo { char x[24]; };
long bar(struct foo *a, struct foo *b) { return a-b; }
is now compiled into
movl 4(%esp), %eax
subl 8(%esp), %eax
sarl $3, %eax
imull $-1431655765, %eax, %eax
instead of
movl 4(%esp), %eax
subl 8(%esp), %eax
movl $715827883, %ecx
imull %ecx
movl %edx, %eax
shrl $31, %eax
sarl $2, %edx
addl %eax, %edx
movl %edx, %eax
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@134695 91177308-0d34-0410-b5e6-96231b3b80d8
2011-07-08 10:31:30 +00:00
|
|
|
void SelectionDAGBuilder::visitSDiv(const User &I) {
|
|
|
|
SDValue Op1 = getValue(I.getOperand(0));
|
|
|
|
SDValue Op2 = getValue(I.getOperand(1));
|
|
|
|
|
|
|
|
// Turn exact SDivs into multiplications.
|
|
|
|
// FIXME: This should be in DAGCombiner, but it doesn't have access to the
|
|
|
|
// exact bit.
|
2011-07-08 12:08:24 +00:00
|
|
|
if (isa<BinaryOperator>(&I) && cast<BinaryOperator>(&I)->isExact() &&
|
|
|
|
!isa<ConstantSDNode>(Op1) &&
|
Emit a more efficient magic number multiplication for exact sdivs.
We have to do this in DAGBuilder instead of DAGCombiner, because the exact bit is lost after building.
struct foo { char x[24]; };
long bar(struct foo *a, struct foo *b) { return a-b; }
is now compiled into
movl 4(%esp), %eax
subl 8(%esp), %eax
sarl $3, %eax
imull $-1431655765, %eax, %eax
instead of
movl 4(%esp), %eax
subl 8(%esp), %eax
movl $715827883, %ecx
imull %ecx
movl %edx, %eax
shrl $31, %eax
sarl $2, %edx
addl %eax, %edx
movl %edx, %eax
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@134695 91177308-0d34-0410-b5e6-96231b3b80d8
2011-07-08 10:31:30 +00:00
|
|
|
isa<ConstantSDNode>(Op2) && !cast<ConstantSDNode>(Op2)->isNullValue())
|
|
|
|
setValue(&I, TLI.BuildExactSDIV(Op1, Op2, getCurDebugLoc(), DAG));
|
|
|
|
else
|
|
|
|
setValue(&I, DAG.getNode(ISD::SDIV, getCurDebugLoc(), Op1.getValueType(),
|
|
|
|
Op1, Op2));
|
|
|
|
}
|
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
void SelectionDAGBuilder::visitICmp(const User &I) {
|
|
|
|
ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE;
|
|
|
|
if (const ICmpInst *IC = dyn_cast<ICmpInst>(&I))
|
|
|
|
predicate = IC->getPredicate();
|
|
|
|
else if (const ConstantExpr *IC = dyn_cast<ConstantExpr>(&I))
|
|
|
|
predicate = ICmpInst::Predicate(IC->getPredicate());
|
|
|
|
SDValue Op1 = getValue(I.getOperand(0));
|
|
|
|
SDValue Op2 = getValue(I.getOperand(1));
|
|
|
|
ISD::CondCode Opcode = getICmpCondCode(predicate);
|
2009-12-22 00:12:37 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
EVT DestVT = TLI.getValueType(I.getType());
|
|
|
|
setValue(&I, DAG.getSetCC(getCurDebugLoc(), DestVT, Op1, Op2, Opcode));
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
void SelectionDAGBuilder::visitFCmp(const User &I) {
|
|
|
|
FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE;
|
|
|
|
if (const FCmpInst *FC = dyn_cast<FCmpInst>(&I))
|
|
|
|
predicate = FC->getPredicate();
|
|
|
|
else if (const ConstantExpr *FC = dyn_cast<ConstantExpr>(&I))
|
|
|
|
predicate = FCmpInst::Predicate(FC->getPredicate());
|
|
|
|
SDValue Op1 = getValue(I.getOperand(0));
|
|
|
|
SDValue Op2 = getValue(I.getOperand(1));
|
|
|
|
ISD::CondCode Condition = getFCmpCondCode(predicate);
|
2011-12-02 22:16:29 +00:00
|
|
|
if (TM.Options.NoNaNsFPMath)
|
|
|
|
Condition = getFCmpCodeWithoutNaN(Condition);
|
2010-05-29 17:53:24 +00:00
|
|
|
EVT DestVT = TLI.getValueType(I.getType());
|
|
|
|
setValue(&I, DAG.getSetCC(getCurDebugLoc(), DestVT, Op1, Op2, Condition));
|
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
void SelectionDAGBuilder::visitSelect(const User &I) {
|
2009-08-10 22:56:29 +00:00
|
|
|
SmallVector<EVT, 4> ValueVTs;
|
2010-05-29 17:53:24 +00:00
|
|
|
ComputeValueVTs(TLI, I.getType(), ValueVTs);
|
2008-09-03 16:12:24 +00:00
|
|
|
unsigned NumValues = ValueVTs.size();
|
2010-05-29 17:53:24 +00:00
|
|
|
if (NumValues == 0) return;
|
2008-09-03 16:12:24 +00:00
|
|
|
|
|
|
|
SmallVector<SDValue, 4> Values(NumValues);
|
2010-05-29 17:53:24 +00:00
|
|
|
SDValue Cond = getValue(I.getOperand(0));
|
|
|
|
SDValue TrueVal = getValue(I.getOperand(1));
|
|
|
|
SDValue FalseVal = getValue(I.getOperand(2));
|
2011-09-06 19:07:46 +00:00
|
|
|
ISD::NodeType OpCode = Cond.getValueType().isVector() ?
|
|
|
|
ISD::VSELECT : ISD::SELECT;
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
for (unsigned i = 0; i != NumValues; ++i)
|
2011-09-06 19:07:46 +00:00
|
|
|
Values[i] = DAG.getNode(OpCode, getCurDebugLoc(),
|
|
|
|
TrueVal.getNode()->getValueType(TrueVal.getResNo()+i),
|
2010-05-29 17:53:24 +00:00
|
|
|
Cond,
|
|
|
|
SDValue(TrueVal.getNode(),
|
|
|
|
TrueVal.getResNo() + i),
|
|
|
|
SDValue(FalseVal.getNode(),
|
|
|
|
FalseVal.getResNo() + i));
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-01-28 21:51:40 +00:00
|
|
|
setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
|
|
|
|
DAG.getVTList(&ValueVTs[0], NumValues),
|
|
|
|
&Values[0], NumValues));
|
2009-12-22 00:12:37 +00:00
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
void SelectionDAGBuilder::visitTrunc(const User &I) {
|
|
|
|
// TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest).
|
|
|
|
SDValue N = getValue(I.getOperand(0));
|
|
|
|
EVT DestVT = TLI.getValueType(I.getType());
|
|
|
|
setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), DestVT, N));
|
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
void SelectionDAGBuilder::visitZExt(const User &I) {
|
|
|
|
// ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
|
|
|
|
// ZExt also can't be a cast to bool for same reason. So, nothing much to do
|
|
|
|
SDValue N = getValue(I.getOperand(0));
|
|
|
|
EVT DestVT = TLI.getValueType(I.getType());
|
|
|
|
setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(), DestVT, N));
|
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
void SelectionDAGBuilder::visitSExt(const User &I) {
|
|
|
|
// SExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
|
|
|
|
// SExt also can't be a cast to bool for same reason. So, nothing much to do
|
|
|
|
SDValue N = getValue(I.getOperand(0));
|
|
|
|
EVT DestVT = TLI.getValueType(I.getType());
|
|
|
|
setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurDebugLoc(), DestVT, N));
|
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
void SelectionDAGBuilder::visitFPTrunc(const User &I) {
|
|
|
|
// FPTrunc is never a no-op cast, no need to check
|
|
|
|
SDValue N = getValue(I.getOperand(0));
|
|
|
|
EVT DestVT = TLI.getValueType(I.getType());
|
|
|
|
setValue(&I, DAG.getNode(ISD::FP_ROUND, getCurDebugLoc(),
|
2012-01-17 01:54:07 +00:00
|
|
|
DestVT, N,
|
|
|
|
DAG.getTargetConstant(0, TLI.getPointerTy())));
|
2010-05-29 17:53:24 +00:00
|
|
|
}
|
2009-12-22 00:12:37 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
void SelectionDAGBuilder::visitFPExt(const User &I){
|
2011-10-18 03:51:57 +00:00
|
|
|
// FPExt is never a no-op cast, no need to check
|
2010-05-29 17:53:24 +00:00
|
|
|
SDValue N = getValue(I.getOperand(0));
|
|
|
|
EVT DestVT = TLI.getValueType(I.getType());
|
|
|
|
setValue(&I, DAG.getNode(ISD::FP_EXTEND, getCurDebugLoc(), DestVT, N));
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
void SelectionDAGBuilder::visitFPToUI(const User &I) {
|
|
|
|
// FPToUI is never a no-op cast, no need to check
|
|
|
|
SDValue N = getValue(I.getOperand(0));
|
|
|
|
EVT DestVT = TLI.getValueType(I.getType());
|
|
|
|
setValue(&I, DAG.getNode(ISD::FP_TO_UINT, getCurDebugLoc(), DestVT, N));
|
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
void SelectionDAGBuilder::visitFPToSI(const User &I) {
|
|
|
|
// FPToSI is never a no-op cast, no need to check
|
|
|
|
SDValue N = getValue(I.getOperand(0));
|
|
|
|
EVT DestVT = TLI.getValueType(I.getType());
|
|
|
|
setValue(&I, DAG.getNode(ISD::FP_TO_SINT, getCurDebugLoc(), DestVT, N));
|
|
|
|
}
|
2009-12-22 00:12:37 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
void SelectionDAGBuilder::visitUIToFP(const User &I) {
|
|
|
|
// UIToFP is never a no-op cast, no need to check
|
|
|
|
SDValue N = getValue(I.getOperand(0));
|
|
|
|
EVT DestVT = TLI.getValueType(I.getType());
|
|
|
|
setValue(&I, DAG.getNode(ISD::UINT_TO_FP, getCurDebugLoc(), DestVT, N));
|
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
void SelectionDAGBuilder::visitSIToFP(const User &I){
|
|
|
|
// SIToFP is never a no-op cast, no need to check
|
|
|
|
SDValue N = getValue(I.getOperand(0));
|
|
|
|
EVT DestVT = TLI.getValueType(I.getType());
|
|
|
|
setValue(&I, DAG.getNode(ISD::SINT_TO_FP, getCurDebugLoc(), DestVT, N));
|
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
void SelectionDAGBuilder::visitPtrToInt(const User &I) {
|
|
|
|
// What to do depends on the size of the integer and the size of the pointer.
|
|
|
|
// We can either truncate, zero extend, or no-op, accordingly.
|
|
|
|
SDValue N = getValue(I.getOperand(0));
|
|
|
|
EVT DestVT = TLI.getValueType(I.getType());
|
|
|
|
setValue(&I, DAG.getZExtOrTrunc(N, getCurDebugLoc(), DestVT));
|
|
|
|
}
|
2009-12-22 00:12:37 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
void SelectionDAGBuilder::visitIntToPtr(const User &I) {
|
|
|
|
// What to do depends on the size of the integer and the size of the pointer.
|
|
|
|
// We can either truncate, zero extend, or no-op, accordingly.
|
|
|
|
SDValue N = getValue(I.getOperand(0));
|
|
|
|
EVT DestVT = TLI.getValueType(I.getType());
|
|
|
|
setValue(&I, DAG.getZExtOrTrunc(N, getCurDebugLoc(), DestVT));
|
|
|
|
}
|
2009-12-22 00:12:37 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
void SelectionDAGBuilder::visitBitCast(const User &I) {
|
|
|
|
SDValue N = getValue(I.getOperand(0));
|
|
|
|
EVT DestVT = TLI.getValueType(I.getType());
|
2009-12-22 00:12:37 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// BitCast assures us that source and destination are the same size so this is
|
2010-11-23 03:31:01 +00:00
|
|
|
// either a BITCAST or a no-op.
|
2010-05-29 17:53:24 +00:00
|
|
|
if (DestVT != N.getValueType())
|
2010-11-23 03:31:01 +00:00
|
|
|
setValue(&I, DAG.getNode(ISD::BITCAST, getCurDebugLoc(),
|
2010-05-29 17:53:24 +00:00
|
|
|
DestVT, N)); // convert types.
|
|
|
|
else
|
|
|
|
setValue(&I, N); // noop cast.
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
void SelectionDAGBuilder::visitInsertElement(const User &I) {
|
|
|
|
SDValue InVec = getValue(I.getOperand(0));
|
|
|
|
SDValue InVal = getValue(I.getOperand(1));
|
|
|
|
SDValue InIdx = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
|
|
|
|
TLI.getPointerTy(),
|
|
|
|
getValue(I.getOperand(2)));
|
|
|
|
setValue(&I, DAG.getNode(ISD::INSERT_VECTOR_ELT, getCurDebugLoc(),
|
|
|
|
TLI.getValueType(I.getType()),
|
|
|
|
InVec, InVal, InIdx));
|
2008-09-09 20:39:27 +00:00
|
|
|
}
|
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
void SelectionDAGBuilder::visitExtractElement(const User &I) {
|
|
|
|
SDValue InVec = getValue(I.getOperand(0));
|
|
|
|
SDValue InIdx = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
|
|
|
|
TLI.getPointerTy(),
|
|
|
|
getValue(I.getOperand(1)));
|
|
|
|
setValue(&I, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurDebugLoc(),
|
|
|
|
TLI.getValueType(I.getType()), InVec, InIdx));
|
2008-09-09 20:39:27 +00:00
|
|
|
}
|
|
|
|
|
2012-01-04 09:23:09 +00:00
|
|
|
// Utility for visitShuffleVector - Return true if every element in Mask,
|
2012-06-02 10:20:22 +00:00
|
|
|
// beginning from position Pos and ending in Pos+Size, falls within the
|
2012-01-04 09:23:09 +00:00
|
|
|
// specified sequential range [L, L+Pos). or is undef.
|
|
|
|
static bool isSequentialInRange(const SmallVectorImpl<int> &Mask,
|
2012-04-11 03:06:35 +00:00
|
|
|
unsigned Pos, unsigned Size, int Low) {
|
|
|
|
for (unsigned i = Pos, e = Pos+Size; i != e; ++i, ++Low)
|
2012-01-04 09:23:09 +00:00
|
|
|
if (Mask[i] >= 0 && Mask[i] != Low)
|
2010-05-29 17:53:24 +00:00
|
|
|
return false;
|
|
|
|
return true;
|
2008-09-22 00:44:35 +00:00
|
|
|
}
|
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
void SelectionDAGBuilder::visitShuffleVector(const User &I) {
|
|
|
|
SDValue Src1 = getValue(I.getOperand(0));
|
|
|
|
SDValue Src2 = getValue(I.getOperand(1));
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2012-01-26 02:51:13 +00:00
|
|
|
SmallVector<int, 8> Mask;
|
|
|
|
ShuffleVectorInst::getShuffleMask(cast<Constant>(I.getOperand(2)), Mask);
|
|
|
|
unsigned MaskNumElts = Mask.size();
|
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
EVT VT = TLI.getValueType(I.getType());
|
|
|
|
EVT SrcVT = Src1.getValueType();
|
|
|
|
unsigned SrcNumElts = SrcVT.getVectorNumElements();
|
2008-12-09 22:08:41 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
if (SrcNumElts == MaskNumElts) {
|
|
|
|
setValue(&I, DAG.getVectorShuffle(VT, getCurDebugLoc(), Src1, Src2,
|
|
|
|
&Mask[0]));
|
|
|
|
return;
|
|
|
|
}
|
2008-09-09 22:13:54 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Normalize the shuffle vector since mask and vector length don't match.
|
|
|
|
if (SrcNumElts < MaskNumElts && MaskNumElts % SrcNumElts == 0) {
|
|
|
|
// Mask is longer than the source vectors and is a multiple of the source
|
|
|
|
// vectors. We can use concatenate vector to make the mask and vectors
|
|
|
|
// lengths match.
|
2012-01-04 09:23:09 +00:00
|
|
|
if (SrcNumElts*2 == MaskNumElts) {
|
|
|
|
// First check for Src1 in low and Src2 in high
|
|
|
|
if (isSequentialInRange(Mask, 0, SrcNumElts, 0) &&
|
|
|
|
isSequentialInRange(Mask, SrcNumElts, SrcNumElts, SrcNumElts)) {
|
|
|
|
// The shuffle is concatenating two vectors together.
|
|
|
|
setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, getCurDebugLoc(),
|
|
|
|
VT, Src1, Src2));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
// Then check for Src2 in low and Src1 in high
|
|
|
|
if (isSequentialInRange(Mask, 0, SrcNumElts, SrcNumElts) &&
|
|
|
|
isSequentialInRange(Mask, SrcNumElts, SrcNumElts, 0)) {
|
|
|
|
// The shuffle is concatenating two vectors together.
|
|
|
|
setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, getCurDebugLoc(),
|
|
|
|
VT, Src2, Src1));
|
|
|
|
return;
|
|
|
|
}
|
2010-05-29 17:53:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Pad both vectors with undefs to make them the same length as the mask.
|
|
|
|
unsigned NumConcat = MaskNumElts / SrcNumElts;
|
|
|
|
bool Src1U = Src1.getOpcode() == ISD::UNDEF;
|
|
|
|
bool Src2U = Src2.getOpcode() == ISD::UNDEF;
|
|
|
|
SDValue UndefVal = DAG.getUNDEF(SrcVT);
|
|
|
|
|
|
|
|
SmallVector<SDValue, 8> MOps1(NumConcat, UndefVal);
|
|
|
|
SmallVector<SDValue, 8> MOps2(NumConcat, UndefVal);
|
|
|
|
MOps1[0] = Src1;
|
|
|
|
MOps2[0] = Src2;
|
|
|
|
|
|
|
|
Src1 = Src1U ? DAG.getUNDEF(VT) : DAG.getNode(ISD::CONCAT_VECTORS,
|
|
|
|
getCurDebugLoc(), VT,
|
|
|
|
&MOps1[0], NumConcat);
|
|
|
|
Src2 = Src2U ? DAG.getUNDEF(VT) : DAG.getNode(ISD::CONCAT_VECTORS,
|
|
|
|
getCurDebugLoc(), VT,
|
|
|
|
&MOps2[0], NumConcat);
|
|
|
|
|
|
|
|
// Readjust mask for new input vector length.
|
|
|
|
SmallVector<int, 8> MappedOps;
|
|
|
|
for (unsigned i = 0; i != MaskNumElts; ++i) {
|
|
|
|
int Idx = Mask[i];
|
2012-04-11 03:06:35 +00:00
|
|
|
if (Idx >= (int)SrcNumElts)
|
|
|
|
Idx -= SrcNumElts - MaskNumElts;
|
|
|
|
MappedOps.push_back(Idx);
|
2010-05-29 17:53:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
setValue(&I, DAG.getVectorShuffle(VT, getCurDebugLoc(), Src1, Src2,
|
|
|
|
&MappedOps[0]));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (SrcNumElts > MaskNumElts) {
|
|
|
|
// Analyze the access pattern of the vector to see if we can extract
|
|
|
|
// two subvectors and do the shuffle. The analysis is done by calculating
|
|
|
|
// the range of elements the mask access on both vectors.
|
2012-04-08 23:15:04 +00:00
|
|
|
int MinRange[2] = { static_cast<int>(SrcNumElts),
|
|
|
|
static_cast<int>(SrcNumElts)};
|
2010-05-29 17:53:24 +00:00
|
|
|
int MaxRange[2] = {-1, -1};
|
|
|
|
|
|
|
|
for (unsigned i = 0; i != MaskNumElts; ++i) {
|
|
|
|
int Idx = Mask[i];
|
2012-04-08 23:15:04 +00:00
|
|
|
unsigned Input = 0;
|
2010-05-29 17:53:24 +00:00
|
|
|
if (Idx < 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (Idx >= (int)SrcNumElts) {
|
|
|
|
Input = 1;
|
|
|
|
Idx -= SrcNumElts;
|
|
|
|
}
|
|
|
|
if (Idx > MaxRange[Input])
|
|
|
|
MaxRange[Input] = Idx;
|
|
|
|
if (Idx < MinRange[Input])
|
|
|
|
MinRange[Input] = Idx;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if the access is smaller than the vector size and can we find
|
|
|
|
// a reasonable extract index.
|
2012-04-08 23:15:04 +00:00
|
|
|
int RangeUse[2] = { -1, -1 }; // 0 = Unused, 1 = Extract, -1 = Can not
|
|
|
|
// Extract.
|
2010-05-29 17:53:24 +00:00
|
|
|
int StartIdx[2]; // StartIdx to extract from
|
2012-04-08 23:15:04 +00:00
|
|
|
for (unsigned Input = 0; Input < 2; ++Input) {
|
|
|
|
if (MinRange[Input] >= (int)SrcNumElts && MaxRange[Input] < 0) {
|
2010-05-29 17:53:24 +00:00
|
|
|
RangeUse[Input] = 0; // Unused
|
|
|
|
StartIdx[Input] = 0;
|
2012-04-08 17:53:33 +00:00
|
|
|
continue;
|
2010-05-29 17:53:24 +00:00
|
|
|
}
|
2012-04-08 17:53:33 +00:00
|
|
|
|
|
|
|
// Find a good start index that is a multiple of the mask length. Then
|
|
|
|
// see if the rest of the elements are in range.
|
|
|
|
StartIdx[Input] = (MinRange[Input]/MaskNumElts)*MaskNumElts;
|
|
|
|
if (MaxRange[Input] - StartIdx[Input] < (int)MaskNumElts &&
|
|
|
|
StartIdx[Input] + MaskNumElts <= SrcNumElts)
|
|
|
|
RangeUse[Input] = 1; // Extract from a multiple of the mask length.
|
2010-05-29 17:53:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (RangeUse[0] == 0 && RangeUse[1] == 0) {
|
|
|
|
setValue(&I, DAG.getUNDEF(VT)); // Vectors are not used.
|
|
|
|
return;
|
|
|
|
}
|
2012-04-08 23:15:04 +00:00
|
|
|
if (RangeUse[0] >= 0 && RangeUse[1] >= 0) {
|
2010-05-29 17:53:24 +00:00
|
|
|
// Extract appropriate subvector and generate a vector shuffle
|
2012-04-08 23:15:04 +00:00
|
|
|
for (unsigned Input = 0; Input < 2; ++Input) {
|
2010-05-29 17:53:24 +00:00
|
|
|
SDValue &Src = Input == 0 ? Src1 : Src2;
|
|
|
|
if (RangeUse[Input] == 0)
|
|
|
|
Src = DAG.getUNDEF(VT);
|
|
|
|
else
|
|
|
|
Src = DAG.getNode(ISD::EXTRACT_SUBVECTOR, getCurDebugLoc(), VT,
|
|
|
|
Src, DAG.getIntPtrConstant(StartIdx[Input]));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Calculate new mask.
|
|
|
|
SmallVector<int, 8> MappedOps;
|
|
|
|
for (unsigned i = 0; i != MaskNumElts; ++i) {
|
|
|
|
int Idx = Mask[i];
|
2012-04-11 03:06:35 +00:00
|
|
|
if (Idx >= 0) {
|
|
|
|
if (Idx < (int)SrcNumElts)
|
|
|
|
Idx -= StartIdx[0];
|
|
|
|
else
|
|
|
|
Idx -= SrcNumElts + StartIdx[1] - MaskNumElts;
|
|
|
|
}
|
|
|
|
MappedOps.push_back(Idx);
|
2010-05-29 17:53:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
setValue(&I, DAG.getVectorShuffle(VT, getCurDebugLoc(), Src1, Src2,
|
|
|
|
&MappedOps[0]));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// We can't use either concat vectors or extract subvectors so fall back to
|
|
|
|
// replacing the shuffle with extract and build vector.
|
|
|
|
// to insert and build vector.
|
|
|
|
EVT EltVT = VT.getVectorElementType();
|
|
|
|
EVT PtrVT = TLI.getPointerTy();
|
|
|
|
SmallVector<SDValue,8> Ops;
|
|
|
|
for (unsigned i = 0; i != MaskNumElts; ++i) {
|
2012-04-11 03:06:35 +00:00
|
|
|
int Idx = Mask[i];
|
|
|
|
SDValue Res;
|
2010-05-29 17:53:24 +00:00
|
|
|
|
2012-04-11 03:06:35 +00:00
|
|
|
if (Idx < 0) {
|
|
|
|
Res = DAG.getUNDEF(EltVT);
|
|
|
|
} else {
|
|
|
|
SDValue &Src = Idx < (int)SrcNumElts ? Src1 : Src2;
|
|
|
|
if (Idx >= (int)SrcNumElts) Idx -= SrcNumElts;
|
2010-05-29 17:53:24 +00:00
|
|
|
|
2012-04-11 03:06:35 +00:00
|
|
|
Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurDebugLoc(),
|
|
|
|
EltVT, Src, DAG.getConstant(Idx, PtrVT));
|
2010-05-29 17:53:24 +00:00
|
|
|
}
|
2012-04-11 03:06:35 +00:00
|
|
|
|
|
|
|
Ops.push_back(Res);
|
2010-05-29 17:53:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
setValue(&I, DAG.getNode(ISD::BUILD_VECTOR, getCurDebugLoc(),
|
|
|
|
VT, &Ops[0], Ops.size()));
|
|
|
|
}
|
|
|
|
|
|
|
|
void SelectionDAGBuilder::visitInsertValue(const InsertValueInst &I) {
|
|
|
|
const Value *Op0 = I.getOperand(0);
|
|
|
|
const Value *Op1 = I.getOperand(1);
|
2011-07-18 04:54:35 +00:00
|
|
|
Type *AggTy = I.getType();
|
|
|
|
Type *ValTy = Op1->getType();
|
2010-05-29 17:53:24 +00:00
|
|
|
bool IntoUndef = isa<UndefValue>(Op0);
|
|
|
|
bool FromUndef = isa<UndefValue>(Op1);
|
|
|
|
|
2011-07-13 10:26:04 +00:00
|
|
|
unsigned LinearIndex = ComputeLinearIndex(AggTy, I.getIndices());
|
2010-05-29 17:53:24 +00:00
|
|
|
|
|
|
|
SmallVector<EVT, 4> AggValueVTs;
|
|
|
|
ComputeValueVTs(TLI, AggTy, AggValueVTs);
|
|
|
|
SmallVector<EVT, 4> ValValueVTs;
|
|
|
|
ComputeValueVTs(TLI, ValTy, ValValueVTs);
|
|
|
|
|
|
|
|
unsigned NumAggValues = AggValueVTs.size();
|
|
|
|
unsigned NumValValues = ValValueVTs.size();
|
|
|
|
SmallVector<SDValue, 4> Values(NumAggValues);
|
|
|
|
|
|
|
|
SDValue Agg = getValue(Op0);
|
|
|
|
unsigned i = 0;
|
|
|
|
// Copy the beginning value(s) from the original aggregate.
|
|
|
|
for (; i != LinearIndex; ++i)
|
|
|
|
Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
|
|
|
|
SDValue(Agg.getNode(), Agg.getResNo() + i);
|
|
|
|
// Copy values from the inserted value(s).
|
2011-05-13 15:18:06 +00:00
|
|
|
if (NumValValues) {
|
|
|
|
SDValue Val = getValue(Op1);
|
|
|
|
for (; i != LinearIndex + NumValValues; ++i)
|
|
|
|
Values[i] = FromUndef ? DAG.getUNDEF(AggValueVTs[i]) :
|
|
|
|
SDValue(Val.getNode(), Val.getResNo() + i - LinearIndex);
|
|
|
|
}
|
2010-05-29 17:53:24 +00:00
|
|
|
// Copy remaining value(s) from the original aggregate.
|
|
|
|
for (; i != NumAggValues; ++i)
|
|
|
|
Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
|
|
|
|
SDValue(Agg.getNode(), Agg.getResNo() + i);
|
|
|
|
|
|
|
|
setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
|
|
|
|
DAG.getVTList(&AggValueVTs[0], NumAggValues),
|
|
|
|
&Values[0], NumAggValues));
|
|
|
|
}
|
|
|
|
|
|
|
|
void SelectionDAGBuilder::visitExtractValue(const ExtractValueInst &I) {
|
|
|
|
const Value *Op0 = I.getOperand(0);
|
2011-07-18 04:54:35 +00:00
|
|
|
Type *AggTy = Op0->getType();
|
|
|
|
Type *ValTy = I.getType();
|
2010-05-29 17:53:24 +00:00
|
|
|
bool OutOfUndef = isa<UndefValue>(Op0);
|
|
|
|
|
2011-07-13 10:26:04 +00:00
|
|
|
unsigned LinearIndex = ComputeLinearIndex(AggTy, I.getIndices());
|
2010-05-29 17:53:24 +00:00
|
|
|
|
|
|
|
SmallVector<EVT, 4> ValValueVTs;
|
|
|
|
ComputeValueVTs(TLI, ValTy, ValValueVTs);
|
|
|
|
|
|
|
|
unsigned NumValValues = ValValueVTs.size();
|
2011-05-13 15:18:06 +00:00
|
|
|
|
|
|
|
// Ignore a extractvalue that produces an empty object
|
|
|
|
if (!NumValValues) {
|
|
|
|
setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
SmallVector<SDValue, 4> Values(NumValValues);
|
|
|
|
|
|
|
|
SDValue Agg = getValue(Op0);
|
|
|
|
// Copy out the selected value(s).
|
|
|
|
for (unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
|
|
|
|
Values[i - LinearIndex] =
|
|
|
|
OutOfUndef ?
|
|
|
|
DAG.getUNDEF(Agg.getNode()->getValueType(Agg.getResNo() + i)) :
|
|
|
|
SDValue(Agg.getNode(), Agg.getResNo() + i);
|
|
|
|
|
|
|
|
setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
|
|
|
|
DAG.getVTList(&ValValueVTs[0], NumValValues),
|
|
|
|
&Values[0], NumValValues));
|
|
|
|
}
|
|
|
|
|
|
|
|
void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
|
|
|
|
SDValue N = getValue(I.getOperand(0));
|
2012-02-28 14:13:19 +00:00
|
|
|
// Note that the pointer operand may be a vector of pointers. Take the scalar
|
|
|
|
// element which holds a pointer.
|
|
|
|
Type *Ty = I.getOperand(0)->getType()->getScalarType();
|
2010-05-29 17:53:24 +00:00
|
|
|
|
|
|
|
for (GetElementPtrInst::const_op_iterator OI = I.op_begin()+1, E = I.op_end();
|
|
|
|
OI != E; ++OI) {
|
|
|
|
const Value *Idx = *OI;
|
2011-07-18 04:54:35 +00:00
|
|
|
if (StructType *StTy = dyn_cast<StructType>(Ty)) {
|
2012-11-13 13:01:58 +00:00
|
|
|
unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
|
2010-05-29 17:53:24 +00:00
|
|
|
if (Field) {
|
|
|
|
// N = N + Offset
|
|
|
|
uint64_t Offset = TD->getStructLayout(StTy)->getElementOffset(Field);
|
|
|
|
N = DAG.getNode(ISD::ADD, getCurDebugLoc(), N.getValueType(), N,
|
2012-11-13 13:01:58 +00:00
|
|
|
DAG.getConstant(Offset, N.getValueType()));
|
2010-05-29 17:53:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Ty = StTy->getElementType(Field);
|
|
|
|
} else {
|
|
|
|
Ty = cast<SequentialType>(Ty)->getElementType();
|
|
|
|
|
|
|
|
// If this is a constant subscript, handle it quickly.
|
|
|
|
if (const ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
|
2010-06-18 14:22:04 +00:00
|
|
|
if (CI->isZero()) continue;
|
2010-05-29 17:53:24 +00:00
|
|
|
uint64_t Offs =
|
|
|
|
TD->getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
|
|
|
|
SDValue OffsVal;
|
|
|
|
EVT PTy = TLI.getPointerTy();
|
|
|
|
unsigned PtrBits = PTy.getSizeInBits();
|
|
|
|
if (PtrBits < 64)
|
|
|
|
OffsVal = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
|
|
|
|
TLI.getPointerTy(),
|
|
|
|
DAG.getConstant(Offs, MVT::i64));
|
|
|
|
else
|
|
|
|
OffsVal = DAG.getIntPtrConstant(Offs);
|
|
|
|
|
|
|
|
N = DAG.getNode(ISD::ADD, getCurDebugLoc(), N.getValueType(), N,
|
|
|
|
OffsVal);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// N = N + Idx * ElementSize;
|
|
|
|
APInt ElementSize = APInt(TLI.getPointerTy().getSizeInBits(),
|
|
|
|
TD->getTypeAllocSize(Ty));
|
|
|
|
SDValue IdxN = getValue(Idx);
|
|
|
|
|
|
|
|
// If the index is smaller or larger than intptr_t, truncate or extend
|
|
|
|
// it.
|
|
|
|
IdxN = DAG.getSExtOrTrunc(IdxN, getCurDebugLoc(), N.getValueType());
|
|
|
|
|
|
|
|
// If this is a multiply by a power of two, turn it into a shl
|
|
|
|
// immediately. This is a very common case.
|
|
|
|
if (ElementSize != 1) {
|
|
|
|
if (ElementSize.isPowerOf2()) {
|
|
|
|
unsigned Amt = ElementSize.logBase2();
|
|
|
|
IdxN = DAG.getNode(ISD::SHL, getCurDebugLoc(),
|
|
|
|
N.getValueType(), IdxN,
|
2011-12-05 06:29:09 +00:00
|
|
|
DAG.getConstant(Amt, IdxN.getValueType()));
|
2010-05-29 17:53:24 +00:00
|
|
|
} else {
|
2012-11-13 13:01:58 +00:00
|
|
|
SDValue Scale = DAG.getConstant(ElementSize, IdxN.getValueType());
|
2010-05-29 17:53:24 +00:00
|
|
|
IdxN = DAG.getNode(ISD::MUL, getCurDebugLoc(),
|
|
|
|
N.getValueType(), IdxN, Scale);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
N = DAG.getNode(ISD::ADD, getCurDebugLoc(),
|
|
|
|
N.getValueType(), N, IdxN);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
setValue(&I, N);
|
|
|
|
}
|
|
|
|
|
|
|
|
void SelectionDAGBuilder::visitAlloca(const AllocaInst &I) {
|
|
|
|
// If this is a fixed sized alloca in the entry block of the function,
|
|
|
|
// allocate it statically on the stack.
|
|
|
|
if (FuncInfo.StaticAllocaMap.count(&I))
|
|
|
|
return; // getValue will auto-populate this.
|
|
|
|
|
2011-07-18 04:54:35 +00:00
|
|
|
Type *Ty = I.getAllocatedType();
|
2012-10-08 16:38:25 +00:00
|
|
|
uint64_t TySize = TLI.getDataLayout()->getTypeAllocSize(Ty);
|
2010-05-29 17:53:24 +00:00
|
|
|
unsigned Align =
|
2012-10-08 16:38:25 +00:00
|
|
|
std::max((unsigned)TLI.getDataLayout()->getPrefTypeAlignment(Ty),
|
2010-05-29 17:53:24 +00:00
|
|
|
I.getAlignment());
|
|
|
|
|
|
|
|
SDValue AllocSize = getValue(I.getArraySize());
|
|
|
|
|
|
|
|
EVT IntPtr = TLI.getPointerTy();
|
|
|
|
if (AllocSize.getValueType() != IntPtr)
|
|
|
|
AllocSize = DAG.getZExtOrTrunc(AllocSize, getCurDebugLoc(), IntPtr);
|
|
|
|
|
|
|
|
AllocSize = DAG.getNode(ISD::MUL, getCurDebugLoc(), IntPtr,
|
|
|
|
AllocSize,
|
|
|
|
DAG.getConstant(TySize, IntPtr));
|
|
|
|
|
|
|
|
// Handle alignment. If the requested alignment is less than or equal to
|
|
|
|
// the stack alignment, ignore it. If the size is greater than or equal to
|
|
|
|
// the stack alignment, we note this in the DYNAMIC_STACKALLOC node.
|
2011-01-10 12:39:04 +00:00
|
|
|
unsigned StackAlign = TM.getFrameLowering()->getStackAlignment();
|
2010-05-29 17:53:24 +00:00
|
|
|
if (Align <= StackAlign)
|
|
|
|
Align = 0;
|
|
|
|
|
|
|
|
// Round the size of the allocation up to the stack alignment size
|
|
|
|
// by add SA-1 to the size.
|
|
|
|
AllocSize = DAG.getNode(ISD::ADD, getCurDebugLoc(),
|
|
|
|
AllocSize.getValueType(), AllocSize,
|
|
|
|
DAG.getIntPtrConstant(StackAlign-1));
|
|
|
|
|
|
|
|
// Mask out the low bits for alignment purposes.
|
|
|
|
AllocSize = DAG.getNode(ISD::AND, getCurDebugLoc(),
|
|
|
|
AllocSize.getValueType(), AllocSize,
|
|
|
|
DAG.getIntPtrConstant(~(uint64_t)(StackAlign-1)));
|
|
|
|
|
|
|
|
SDValue Ops[] = { getRoot(), AllocSize, DAG.getIntPtrConstant(Align) };
|
|
|
|
SDVTList VTs = DAG.getVTList(AllocSize.getValueType(), MVT::Other);
|
|
|
|
SDValue DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, getCurDebugLoc(),
|
|
|
|
VTs, Ops, 3);
|
|
|
|
setValue(&I, DSA);
|
|
|
|
DAG.setRoot(DSA.getValue(1));
|
|
|
|
|
|
|
|
// Inform the Frame Information that we have just allocated a variable-sized
|
|
|
|
// object.
|
2010-07-17 00:28:22 +00:00
|
|
|
FuncInfo.MF->getFrameInfo()->CreateVariableSizedObject(Align ? Align : 1);
|
2010-05-29 17:53:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
|
2011-08-24 20:50:09 +00:00
|
|
|
if (I.isAtomic())
|
|
|
|
return visitAtomicLoad(I);
|
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
const Value *SV = I.getOperand(0);
|
|
|
|
SDValue Ptr = getValue(SV);
|
|
|
|
|
2011-07-18 04:54:35 +00:00
|
|
|
Type *Ty = I.getType();
|
2010-05-29 17:53:24 +00:00
|
|
|
|
|
|
|
bool isVolatile = I.isVolatile();
|
|
|
|
bool isNonTemporal = I.getMetadata("nontemporal") != 0;
|
2011-11-08 18:42:53 +00:00
|
|
|
bool isInvariant = I.getMetadata("invariant.load") != 0;
|
2010-05-29 17:53:24 +00:00
|
|
|
unsigned Alignment = I.getAlignment();
|
2010-10-20 00:31:05 +00:00
|
|
|
const MDNode *TBAAInfo = I.getMetadata(LLVMContext::MD_tbaa);
|
2012-03-31 18:14:00 +00:00
|
|
|
const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
|
2010-05-29 17:53:24 +00:00
|
|
|
|
|
|
|
SmallVector<EVT, 4> ValueVTs;
|
|
|
|
SmallVector<uint64_t, 4> Offsets;
|
|
|
|
ComputeValueVTs(TLI, Ty, ValueVTs, &Offsets);
|
|
|
|
unsigned NumValues = ValueVTs.size();
|
|
|
|
if (NumValues == 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
SDValue Root;
|
|
|
|
bool ConstantMemory = false;
|
2010-11-12 17:50:46 +00:00
|
|
|
if (I.isVolatile() || NumValues > MaxParallelChains)
|
2010-05-29 17:53:24 +00:00
|
|
|
// Serialize volatile loads with other side effects.
|
|
|
|
Root = getRoot();
|
2010-10-20 00:31:05 +00:00
|
|
|
else if (AA->pointsToConstantMemory(
|
|
|
|
AliasAnalysis::Location(SV, AA->getTypeStoreSize(Ty), TBAAInfo))) {
|
2010-05-29 17:53:24 +00:00
|
|
|
// Do not serialize (non-volatile) loads of constant memory with anything.
|
|
|
|
Root = DAG.getEntryNode();
|
|
|
|
ConstantMemory = true;
|
|
|
|
} else {
|
|
|
|
// Do not serialize non-volatile loads against each other.
|
|
|
|
Root = DAG.getRoot();
|
|
|
|
}
|
2010-11-23 03:31:01 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
SmallVector<SDValue, 4> Values(NumValues);
|
2010-11-12 17:50:46 +00:00
|
|
|
SmallVector<SDValue, 4> Chains(std::min(unsigned(MaxParallelChains),
|
|
|
|
NumValues));
|
2010-05-29 17:53:24 +00:00
|
|
|
EVT PtrVT = Ptr.getValueType();
|
2010-11-12 17:50:46 +00:00
|
|
|
unsigned ChainI = 0;
|
|
|
|
for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
|
|
|
|
// Serializing loads here may result in excessive register pressure, and
|
|
|
|
// TokenFactor places arbitrary choke points on the scheduler. SD scheduling
|
|
|
|
// could recover a bit by hoisting nodes upward in the chain by recognizing
|
|
|
|
// they are side-effect free or do not alias. The optimizer should really
|
|
|
|
// avoid this case by converting large object/array copies to llvm.memcpy
|
|
|
|
// (MaxParallelChains should always remain as failsafe).
|
|
|
|
if (ChainI == MaxParallelChains) {
|
|
|
|
assert(PendingLoads.empty() && "PendingLoads must be serialized first");
|
|
|
|
SDValue Chain = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(),
|
|
|
|
MVT::Other, &Chains[0], ChainI);
|
|
|
|
Root = Chain;
|
|
|
|
ChainI = 0;
|
|
|
|
}
|
2010-05-29 17:53:24 +00:00
|
|
|
SDValue A = DAG.getNode(ISD::ADD, getCurDebugLoc(),
|
|
|
|
PtrVT, Ptr,
|
|
|
|
DAG.getConstant(Offsets[i], PtrVT));
|
|
|
|
SDValue L = DAG.getLoad(ValueVTs[i], getCurDebugLoc(), Root,
|
2010-10-16 08:25:21 +00:00
|
|
|
A, MachinePointerInfo(SV, Offsets[i]), isVolatile,
|
2012-03-31 18:14:00 +00:00
|
|
|
isNonTemporal, isInvariant, Alignment, TBAAInfo,
|
|
|
|
Ranges);
|
2010-05-29 17:53:24 +00:00
|
|
|
|
|
|
|
Values[i] = L;
|
2010-11-12 17:50:46 +00:00
|
|
|
Chains[ChainI] = L.getValue(1);
|
2010-05-29 17:53:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!ConstantMemory) {
|
|
|
|
SDValue Chain = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(),
|
2010-11-12 17:50:46 +00:00
|
|
|
MVT::Other, &Chains[0], ChainI);
|
2010-05-29 17:53:24 +00:00
|
|
|
if (isVolatile)
|
|
|
|
DAG.setRoot(Chain);
|
|
|
|
else
|
|
|
|
PendingLoads.push_back(Chain);
|
|
|
|
}
|
|
|
|
|
|
|
|
setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
|
|
|
|
DAG.getVTList(&ValueVTs[0], NumValues),
|
|
|
|
&Values[0], NumValues));
|
|
|
|
}
|
|
|
|
|
|
|
|
void SelectionDAGBuilder::visitStore(const StoreInst &I) {
|
2011-08-24 20:50:09 +00:00
|
|
|
if (I.isAtomic())
|
|
|
|
return visitAtomicStore(I);
|
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
const Value *SrcV = I.getOperand(0);
|
|
|
|
const Value *PtrV = I.getOperand(1);
|
|
|
|
|
|
|
|
SmallVector<EVT, 4> ValueVTs;
|
|
|
|
SmallVector<uint64_t, 4> Offsets;
|
|
|
|
ComputeValueVTs(TLI, SrcV->getType(), ValueVTs, &Offsets);
|
|
|
|
unsigned NumValues = ValueVTs.size();
|
|
|
|
if (NumValues == 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
// Get the lowered operands. Note that we do this after
|
|
|
|
// checking if NumResults is zero, because with zero results
|
|
|
|
// the operands won't have values in the map.
|
|
|
|
SDValue Src = getValue(SrcV);
|
|
|
|
SDValue Ptr = getValue(PtrV);
|
|
|
|
|
|
|
|
SDValue Root = getRoot();
|
2010-11-12 17:50:46 +00:00
|
|
|
SmallVector<SDValue, 4> Chains(std::min(unsigned(MaxParallelChains),
|
|
|
|
NumValues));
|
2010-05-29 17:53:24 +00:00
|
|
|
EVT PtrVT = Ptr.getValueType();
|
|
|
|
bool isVolatile = I.isVolatile();
|
|
|
|
bool isNonTemporal = I.getMetadata("nontemporal") != 0;
|
|
|
|
unsigned Alignment = I.getAlignment();
|
2010-10-20 00:31:05 +00:00
|
|
|
const MDNode *TBAAInfo = I.getMetadata(LLVMContext::MD_tbaa);
|
2010-05-29 17:53:24 +00:00
|
|
|
|
2010-11-12 17:50:46 +00:00
|
|
|
unsigned ChainI = 0;
|
|
|
|
for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
|
|
|
|
// See visitLoad comments.
|
|
|
|
if (ChainI == MaxParallelChains) {
|
|
|
|
SDValue Chain = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(),
|
|
|
|
MVT::Other, &Chains[0], ChainI);
|
|
|
|
Root = Chain;
|
|
|
|
ChainI = 0;
|
|
|
|
}
|
2010-05-29 17:53:24 +00:00
|
|
|
SDValue Add = DAG.getNode(ISD::ADD, getCurDebugLoc(), PtrVT, Ptr,
|
|
|
|
DAG.getConstant(Offsets[i], PtrVT));
|
2010-11-12 17:50:46 +00:00
|
|
|
SDValue St = DAG.getStore(Root, getCurDebugLoc(),
|
|
|
|
SDValue(Src.getNode(), Src.getResNo() + i),
|
|
|
|
Add, MachinePointerInfo(PtrV, Offsets[i]),
|
|
|
|
isVolatile, isNonTemporal, Alignment, TBAAInfo);
|
|
|
|
Chains[ChainI] = St;
|
2010-05-29 17:53:24 +00:00
|
|
|
}
|
|
|
|
|
2010-10-26 22:14:52 +00:00
|
|
|
SDValue StoreNode = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(),
|
2010-11-12 17:50:46 +00:00
|
|
|
MVT::Other, &Chains[0], ChainI);
|
2010-10-26 22:14:52 +00:00
|
|
|
++SDNodeOrder;
|
|
|
|
AssignOrderingToNode(StoreNode.getNode());
|
|
|
|
DAG.setRoot(StoreNode);
|
2010-05-29 17:53:24 +00:00
|
|
|
}
|
|
|
|
|
2011-08-03 21:06:02 +00:00
|
|
|
static SDValue InsertFenceForAtomic(SDValue Chain, AtomicOrdering Order,
|
2011-08-24 20:50:09 +00:00
|
|
|
SynchronizationScope Scope,
|
2011-08-03 21:06:02 +00:00
|
|
|
bool Before, DebugLoc dl,
|
|
|
|
SelectionDAG &DAG,
|
|
|
|
const TargetLowering &TLI) {
|
|
|
|
// Fence, if necessary
|
|
|
|
if (Before) {
|
2011-08-26 02:59:24 +00:00
|
|
|
if (Order == AcquireRelease || Order == SequentiallyConsistent)
|
2011-08-03 21:06:02 +00:00
|
|
|
Order = Release;
|
|
|
|
else if (Order == Acquire || Order == Monotonic)
|
|
|
|
return Chain;
|
|
|
|
} else {
|
|
|
|
if (Order == AcquireRelease)
|
|
|
|
Order = Acquire;
|
|
|
|
else if (Order == Release || Order == Monotonic)
|
|
|
|
return Chain;
|
|
|
|
}
|
|
|
|
SDValue Ops[3];
|
|
|
|
Ops[0] = Chain;
|
2011-08-24 20:50:09 +00:00
|
|
|
Ops[1] = DAG.getConstant(Order, TLI.getPointerTy());
|
|
|
|
Ops[2] = DAG.getConstant(Scope, TLI.getPointerTy());
|
2011-08-03 21:06:02 +00:00
|
|
|
return DAG.getNode(ISD::ATOMIC_FENCE, dl, MVT::Other, Ops, 3);
|
|
|
|
}
|
|
|
|
|
2011-07-28 21:48:00 +00:00
|
|
|
void SelectionDAGBuilder::visitAtomicCmpXchg(const AtomicCmpXchgInst &I) {
|
2011-08-03 21:06:02 +00:00
|
|
|
DebugLoc dl = getCurDebugLoc();
|
|
|
|
AtomicOrdering Order = I.getOrdering();
|
2011-08-24 20:50:09 +00:00
|
|
|
SynchronizationScope Scope = I.getSynchScope();
|
2011-08-03 21:06:02 +00:00
|
|
|
|
|
|
|
SDValue InChain = getRoot();
|
|
|
|
|
|
|
|
if (TLI.getInsertFencesForAtomic())
|
2011-08-24 20:50:09 +00:00
|
|
|
InChain = InsertFenceForAtomic(InChain, Order, Scope, true, dl,
|
|
|
|
DAG, TLI);
|
2011-08-03 21:06:02 +00:00
|
|
|
|
2011-07-29 03:05:32 +00:00
|
|
|
SDValue L =
|
2011-08-03 21:06:02 +00:00
|
|
|
DAG.getAtomic(ISD::ATOMIC_CMP_SWAP, dl,
|
2011-07-29 03:05:32 +00:00
|
|
|
getValue(I.getCompareOperand()).getValueType().getSimpleVT(),
|
2011-08-03 21:06:02 +00:00
|
|
|
InChain,
|
2011-07-29 03:05:32 +00:00
|
|
|
getValue(I.getPointerOperand()),
|
|
|
|
getValue(I.getCompareOperand()),
|
|
|
|
getValue(I.getNewValOperand()),
|
|
|
|
MachinePointerInfo(I.getPointerOperand()), 0 /* Alignment */,
|
2011-08-24 20:50:09 +00:00
|
|
|
TLI.getInsertFencesForAtomic() ? Monotonic : Order,
|
|
|
|
Scope);
|
2011-08-03 21:06:02 +00:00
|
|
|
|
|
|
|
SDValue OutChain = L.getValue(1);
|
|
|
|
|
|
|
|
if (TLI.getInsertFencesForAtomic())
|
2011-08-24 20:50:09 +00:00
|
|
|
OutChain = InsertFenceForAtomic(OutChain, Order, Scope, false, dl,
|
|
|
|
DAG, TLI);
|
2011-08-03 21:06:02 +00:00
|
|
|
|
2011-07-29 03:05:32 +00:00
|
|
|
setValue(&I, L);
|
2011-08-03 21:06:02 +00:00
|
|
|
DAG.setRoot(OutChain);
|
2011-07-28 21:48:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void SelectionDAGBuilder::visitAtomicRMW(const AtomicRMWInst &I) {
|
2011-08-03 21:06:02 +00:00
|
|
|
DebugLoc dl = getCurDebugLoc();
|
2011-07-29 03:05:32 +00:00
|
|
|
ISD::NodeType NT;
|
|
|
|
switch (I.getOperation()) {
|
2012-01-20 21:51:11 +00:00
|
|
|
default: llvm_unreachable("Unknown atomicrmw operation");
|
2011-07-29 03:05:32 +00:00
|
|
|
case AtomicRMWInst::Xchg: NT = ISD::ATOMIC_SWAP; break;
|
|
|
|
case AtomicRMWInst::Add: NT = ISD::ATOMIC_LOAD_ADD; break;
|
|
|
|
case AtomicRMWInst::Sub: NT = ISD::ATOMIC_LOAD_SUB; break;
|
|
|
|
case AtomicRMWInst::And: NT = ISD::ATOMIC_LOAD_AND; break;
|
|
|
|
case AtomicRMWInst::Nand: NT = ISD::ATOMIC_LOAD_NAND; break;
|
|
|
|
case AtomicRMWInst::Or: NT = ISD::ATOMIC_LOAD_OR; break;
|
|
|
|
case AtomicRMWInst::Xor: NT = ISD::ATOMIC_LOAD_XOR; break;
|
|
|
|
case AtomicRMWInst::Max: NT = ISD::ATOMIC_LOAD_MAX; break;
|
|
|
|
case AtomicRMWInst::Min: NT = ISD::ATOMIC_LOAD_MIN; break;
|
|
|
|
case AtomicRMWInst::UMax: NT = ISD::ATOMIC_LOAD_UMAX; break;
|
|
|
|
case AtomicRMWInst::UMin: NT = ISD::ATOMIC_LOAD_UMIN; break;
|
|
|
|
}
|
2011-08-03 21:06:02 +00:00
|
|
|
AtomicOrdering Order = I.getOrdering();
|
2011-08-24 20:50:09 +00:00
|
|
|
SynchronizationScope Scope = I.getSynchScope();
|
2011-08-03 21:06:02 +00:00
|
|
|
|
|
|
|
SDValue InChain = getRoot();
|
|
|
|
|
|
|
|
if (TLI.getInsertFencesForAtomic())
|
2011-08-24 20:50:09 +00:00
|
|
|
InChain = InsertFenceForAtomic(InChain, Order, Scope, true, dl,
|
|
|
|
DAG, TLI);
|
2011-08-03 21:06:02 +00:00
|
|
|
|
2011-07-29 03:05:32 +00:00
|
|
|
SDValue L =
|
2011-08-03 21:06:02 +00:00
|
|
|
DAG.getAtomic(NT, dl,
|
2011-07-29 03:05:32 +00:00
|
|
|
getValue(I.getValOperand()).getValueType().getSimpleVT(),
|
2011-08-03 21:06:02 +00:00
|
|
|
InChain,
|
2011-07-29 03:05:32 +00:00
|
|
|
getValue(I.getPointerOperand()),
|
|
|
|
getValue(I.getValOperand()),
|
|
|
|
I.getPointerOperand(), 0 /* Alignment */,
|
2011-08-03 21:06:02 +00:00
|
|
|
TLI.getInsertFencesForAtomic() ? Monotonic : Order,
|
2011-08-24 20:50:09 +00:00
|
|
|
Scope);
|
2011-08-03 21:06:02 +00:00
|
|
|
|
|
|
|
SDValue OutChain = L.getValue(1);
|
|
|
|
|
|
|
|
if (TLI.getInsertFencesForAtomic())
|
2011-08-24 20:50:09 +00:00
|
|
|
OutChain = InsertFenceForAtomic(OutChain, Order, Scope, false, dl,
|
|
|
|
DAG, TLI);
|
2011-08-03 21:06:02 +00:00
|
|
|
|
2011-07-29 03:05:32 +00:00
|
|
|
setValue(&I, L);
|
2011-08-03 21:06:02 +00:00
|
|
|
DAG.setRoot(OutChain);
|
2011-07-28 21:48:00 +00:00
|
|
|
}
|
|
|
|
|
2011-07-25 23:16:38 +00:00
|
|
|
void SelectionDAGBuilder::visitFence(const FenceInst &I) {
|
2011-07-27 22:21:52 +00:00
|
|
|
DebugLoc dl = getCurDebugLoc();
|
|
|
|
SDValue Ops[3];
|
|
|
|
Ops[0] = getRoot();
|
|
|
|
Ops[1] = DAG.getConstant(I.getOrdering(), TLI.getPointerTy());
|
|
|
|
Ops[2] = DAG.getConstant(I.getSynchScope(), TLI.getPointerTy());
|
|
|
|
DAG.setRoot(DAG.getNode(ISD::ATOMIC_FENCE, dl, MVT::Other, Ops, 3));
|
2011-07-25 23:16:38 +00:00
|
|
|
}
|
|
|
|
|
2011-08-24 20:50:09 +00:00
|
|
|
void SelectionDAGBuilder::visitAtomicLoad(const LoadInst &I) {
|
|
|
|
DebugLoc dl = getCurDebugLoc();
|
|
|
|
AtomicOrdering Order = I.getOrdering();
|
|
|
|
SynchronizationScope Scope = I.getSynchScope();
|
|
|
|
|
|
|
|
SDValue InChain = getRoot();
|
|
|
|
|
2012-08-17 23:24:29 +00:00
|
|
|
EVT VT = TLI.getValueType(I.getType());
|
2011-08-24 20:50:09 +00:00
|
|
|
|
2011-09-13 22:19:59 +00:00
|
|
|
if (I.getAlignment() * 8 < VT.getSizeInBits())
|
2011-09-13 20:50:54 +00:00
|
|
|
report_fatal_error("Cannot generate unaligned atomic load");
|
|
|
|
|
2011-08-24 20:50:09 +00:00
|
|
|
SDValue L =
|
|
|
|
DAG.getAtomic(ISD::ATOMIC_LOAD, dl, VT, VT, InChain,
|
|
|
|
getValue(I.getPointerOperand()),
|
|
|
|
I.getPointerOperand(), I.getAlignment(),
|
|
|
|
TLI.getInsertFencesForAtomic() ? Monotonic : Order,
|
|
|
|
Scope);
|
|
|
|
|
|
|
|
SDValue OutChain = L.getValue(1);
|
|
|
|
|
|
|
|
if (TLI.getInsertFencesForAtomic())
|
|
|
|
OutChain = InsertFenceForAtomic(OutChain, Order, Scope, false, dl,
|
|
|
|
DAG, TLI);
|
|
|
|
|
|
|
|
setValue(&I, L);
|
|
|
|
DAG.setRoot(OutChain);
|
|
|
|
}
|
|
|
|
|
|
|
|
void SelectionDAGBuilder::visitAtomicStore(const StoreInst &I) {
|
|
|
|
DebugLoc dl = getCurDebugLoc();
|
|
|
|
|
|
|
|
AtomicOrdering Order = I.getOrdering();
|
|
|
|
SynchronizationScope Scope = I.getSynchScope();
|
|
|
|
|
|
|
|
SDValue InChain = getRoot();
|
|
|
|
|
2012-08-17 23:24:29 +00:00
|
|
|
EVT VT = TLI.getValueType(I.getValueOperand()->getType());
|
2011-09-13 20:50:54 +00:00
|
|
|
|
2011-09-13 22:19:59 +00:00
|
|
|
if (I.getAlignment() * 8 < VT.getSizeInBits())
|
2011-09-13 20:50:54 +00:00
|
|
|
report_fatal_error("Cannot generate unaligned atomic store");
|
|
|
|
|
2011-08-24 20:50:09 +00:00
|
|
|
if (TLI.getInsertFencesForAtomic())
|
|
|
|
InChain = InsertFenceForAtomic(InChain, Order, Scope, true, dl,
|
|
|
|
DAG, TLI);
|
|
|
|
|
|
|
|
SDValue OutChain =
|
2011-09-13 20:50:54 +00:00
|
|
|
DAG.getAtomic(ISD::ATOMIC_STORE, dl, VT,
|
2011-08-24 20:50:09 +00:00
|
|
|
InChain,
|
|
|
|
getValue(I.getPointerOperand()),
|
|
|
|
getValue(I.getValueOperand()),
|
|
|
|
I.getPointerOperand(), I.getAlignment(),
|
|
|
|
TLI.getInsertFencesForAtomic() ? Monotonic : Order,
|
|
|
|
Scope);
|
|
|
|
|
|
|
|
if (TLI.getInsertFencesForAtomic())
|
|
|
|
OutChain = InsertFenceForAtomic(OutChain, Order, Scope, false, dl,
|
|
|
|
DAG, TLI);
|
|
|
|
|
|
|
|
DAG.setRoot(OutChain);
|
|
|
|
}
|
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
/// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
|
|
|
|
/// node.
|
|
|
|
void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
|
|
|
|
unsigned Intrinsic) {
|
|
|
|
bool HasChain = !I.doesNotAccessMemory();
|
|
|
|
bool OnlyLoad = HasChain && I.onlyReadsMemory();
|
|
|
|
|
|
|
|
// Build the operand list.
|
|
|
|
SmallVector<SDValue, 8> Ops;
|
|
|
|
if (HasChain) { // If this intrinsic has side-effects, chainify it.
|
|
|
|
if (OnlyLoad) {
|
|
|
|
// We don't need to serialize loads against other loads.
|
|
|
|
Ops.push_back(DAG.getRoot());
|
|
|
|
} else {
|
|
|
|
Ops.push_back(getRoot());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Info is set by getTgtMemInstrinsic
|
|
|
|
TargetLowering::IntrinsicInfo Info;
|
|
|
|
bool IsTgtIntrinsic = TLI.getTgtMemIntrinsic(Info, I, Intrinsic);
|
|
|
|
|
|
|
|
// Add the intrinsic ID as an integer operand if it's not a target intrinsic.
|
2010-09-21 17:56:22 +00:00
|
|
|
if (!IsTgtIntrinsic || Info.opc == ISD::INTRINSIC_VOID ||
|
|
|
|
Info.opc == ISD::INTRINSIC_W_CHAIN)
|
2012-01-16 04:08:12 +00:00
|
|
|
Ops.push_back(DAG.getTargetConstant(Intrinsic, TLI.getPointerTy()));
|
2010-05-29 17:53:24 +00:00
|
|
|
|
|
|
|
// Add all operands of the call to the operand list.
|
2010-06-25 09:38:13 +00:00
|
|
|
for (unsigned i = 0, e = I.getNumArgOperands(); i != e; ++i) {
|
|
|
|
SDValue Op = getValue(I.getArgOperand(i));
|
2010-05-29 17:53:24 +00:00
|
|
|
Ops.push_back(Op);
|
|
|
|
}
|
|
|
|
|
|
|
|
SmallVector<EVT, 4> ValueVTs;
|
|
|
|
ComputeValueVTs(TLI, I.getType(), ValueVTs);
|
|
|
|
|
|
|
|
if (HasChain)
|
|
|
|
ValueVTs.push_back(MVT::Other);
|
|
|
|
|
|
|
|
SDVTList VTs = DAG.getVTList(ValueVTs.data(), ValueVTs.size());
|
|
|
|
|
|
|
|
// Create the node.
|
|
|
|
SDValue Result;
|
|
|
|
if (IsTgtIntrinsic) {
|
|
|
|
// This is target intrinsic that touches memory
|
|
|
|
Result = DAG.getMemIntrinsicNode(Info.opc, getCurDebugLoc(),
|
|
|
|
VTs, &Ops[0], Ops.size(),
|
2010-09-21 04:57:15 +00:00
|
|
|
Info.memVT,
|
|
|
|
MachinePointerInfo(Info.ptrVal, Info.offset),
|
2010-05-29 17:53:24 +00:00
|
|
|
Info.align, Info.vol,
|
|
|
|
Info.readMem, Info.writeMem);
|
|
|
|
} else if (!HasChain) {
|
|
|
|
Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, getCurDebugLoc(),
|
|
|
|
VTs, &Ops[0], Ops.size());
|
|
|
|
} else if (!I.getType()->isVoidTy()) {
|
|
|
|
Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, getCurDebugLoc(),
|
|
|
|
VTs, &Ops[0], Ops.size());
|
|
|
|
} else {
|
|
|
|
Result = DAG.getNode(ISD::INTRINSIC_VOID, getCurDebugLoc(),
|
|
|
|
VTs, &Ops[0], Ops.size());
|
|
|
|
}
|
|
|
|
|
|
|
|
if (HasChain) {
|
|
|
|
SDValue Chain = Result.getValue(Result.getNode()->getNumValues()-1);
|
|
|
|
if (OnlyLoad)
|
|
|
|
PendingLoads.push_back(Chain);
|
|
|
|
else
|
|
|
|
DAG.setRoot(Chain);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!I.getType()->isVoidTy()) {
|
2011-07-18 04:54:35 +00:00
|
|
|
if (VectorType *PTy = dyn_cast<VectorType>(I.getType())) {
|
2010-05-29 17:53:24 +00:00
|
|
|
EVT VT = TLI.getValueType(PTy);
|
2010-11-23 03:31:01 +00:00
|
|
|
Result = DAG.getNode(ISD::BITCAST, getCurDebugLoc(), VT, Result);
|
2010-05-29 17:53:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
setValue(&I, Result);
|
2012-03-22 19:29:09 +00:00
|
|
|
} else {
|
|
|
|
// Assign order to result here. If the intrinsic does not produce a result,
|
|
|
|
// it won't be mapped to a SDNode and visit() will not assign it an order
|
|
|
|
// number.
|
|
|
|
++SDNodeOrder;
|
|
|
|
AssignOrderingToNode(Result.getNode());
|
2010-05-29 17:53:24 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// GetSignificand - Get the significand and build it into a floating-point
|
|
|
|
/// number with exponent of 1:
|
|
|
|
///
|
|
|
|
/// Op = (Op & 0x007fffff) | 0x3f800000;
|
|
|
|
///
|
|
|
|
/// where Op is the hexidecimal representation of floating point value.
|
|
|
|
static SDValue
|
|
|
|
GetSignificand(SelectionDAG &DAG, SDValue Op, DebugLoc dl) {
|
|
|
|
SDValue t1 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
|
|
|
|
DAG.getConstant(0x007fffff, MVT::i32));
|
|
|
|
SDValue t2 = DAG.getNode(ISD::OR, dl, MVT::i32, t1,
|
|
|
|
DAG.getConstant(0x3f800000, MVT::i32));
|
2010-11-23 03:31:01 +00:00
|
|
|
return DAG.getNode(ISD::BITCAST, dl, MVT::f32, t2);
|
2010-05-29 17:53:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// GetExponent - Get the exponent:
|
|
|
|
///
|
|
|
|
/// (float)(int)(((Op & 0x7f800000) >> 23) - 127);
|
|
|
|
///
|
|
|
|
/// where Op is the hexidecimal representation of floating point value.
|
|
|
|
static SDValue
|
|
|
|
GetExponent(SelectionDAG &DAG, SDValue Op, const TargetLowering &TLI,
|
|
|
|
DebugLoc dl) {
|
|
|
|
SDValue t0 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
|
|
|
|
DAG.getConstant(0x7f800000, MVT::i32));
|
|
|
|
SDValue t1 = DAG.getNode(ISD::SRL, dl, MVT::i32, t0,
|
|
|
|
DAG.getConstant(23, TLI.getPointerTy()));
|
|
|
|
SDValue t2 = DAG.getNode(ISD::SUB, dl, MVT::i32, t1,
|
|
|
|
DAG.getConstant(127, MVT::i32));
|
|
|
|
return DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, t2);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// getF32Constant - Get 32-bit floating point constant.
|
|
|
|
static SDValue
|
|
|
|
getF32Constant(SelectionDAG &DAG, unsigned Flt) {
|
|
|
|
return DAG.getConstantFP(APFloat(APInt(32, Flt)), MVT::f32);
|
|
|
|
}
|
|
|
|
|
2012-11-24 18:52:06 +00:00
|
|
|
/// expandExp - Lower an exp intrinsic. Handles the special sequences for
|
2010-05-29 17:53:24 +00:00
|
|
|
/// limited-precision mode.
|
2012-11-24 18:52:06 +00:00
|
|
|
static SDValue expandExp(DebugLoc dl, SDValue Op, SelectionDAG &DAG,
|
|
|
|
const TargetLowering &TLI) {
|
|
|
|
if (Op.getValueType() == MVT::f32 &&
|
2010-05-29 17:53:24 +00:00
|
|
|
LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
|
2008-09-09 22:13:54 +00:00
|
|
|
|
|
|
|
// Put the exponent in the right bit position for later addition to the
|
|
|
|
// final result:
|
|
|
|
//
|
|
|
|
// #define LOG2OFe 1.4426950f
|
|
|
|
// IntegerPartOfX = ((int32_t)(X * LOG2OFe));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x3fb8aa3b));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
|
2008-09-09 22:13:54 +00:00
|
|
|
|
|
|
|
// FractionalPartOfX = (X * LOG2OFe) - (float)IntegerPartOfX;
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
|
|
|
|
SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
|
2008-09-09 22:13:54 +00:00
|
|
|
|
|
|
|
// IntegerPartOfX <<= 23;
|
2009-08-11 20:47:22 +00:00
|
|
|
IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
|
2009-01-31 15:50:11 +00:00
|
|
|
DAG.getConstant(23, TLI.getPointerTy()));
|
2009-12-22 00:12:37 +00:00
|
|
|
|
2012-11-24 08:22:37 +00:00
|
|
|
SDValue TwoToFracPartOfX;
|
2008-09-09 22:13:54 +00:00
|
|
|
if (LimitFloatPrecision <= 6) {
|
|
|
|
// For floating-point precision of 6:
|
|
|
|
//
|
|
|
|
// TwoToFractionalPartOfX =
|
|
|
|
// 0.997535578f +
|
|
|
|
// (0.735607626f + 0.252464424f * x) * x;
|
|
|
|
//
|
|
|
|
// error 0.0144103317, which is 6 bits
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x3e814304));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x3f3c50c8));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
|
2012-11-24 08:22:37 +00:00
|
|
|
TwoToFracPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
|
|
|
|
getF32Constant(DAG, 0x3f7f5e7e));
|
2012-11-16 20:01:39 +00:00
|
|
|
} else if (LimitFloatPrecision <= 12) {
|
2008-09-09 22:13:54 +00:00
|
|
|
// For floating-point precision of 12:
|
|
|
|
//
|
|
|
|
// TwoToFractionalPartOfX =
|
|
|
|
// 0.999892986f +
|
|
|
|
// (0.696457318f +
|
|
|
|
// (0.224338339f + 0.792043434e-1f * x) * x) * x;
|
|
|
|
//
|
|
|
|
// 0.000107046256 error, which is 13 to 14 bits
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x3da235e3));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x3e65b8f3));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
|
|
|
|
SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x3f324b07));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
|
2012-11-24 08:22:37 +00:00
|
|
|
TwoToFracPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
|
|
|
|
getF32Constant(DAG, 0x3f7ff8fd));
|
2012-11-16 20:01:39 +00:00
|
|
|
} else { // LimitFloatPrecision <= 18
|
2008-09-09 22:13:54 +00:00
|
|
|
// For floating-point precision of 18:
|
|
|
|
//
|
|
|
|
// TwoToFractionalPartOfX =
|
|
|
|
// 0.999999982f +
|
|
|
|
// (0.693148872f +
|
|
|
|
// (0.240227044f +
|
|
|
|
// (0.554906021e-1f +
|
|
|
|
// (0.961591928e-2f +
|
|
|
|
// (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
|
|
|
|
//
|
|
|
|
// error 2.47208000*10^(-7), which is better than 18 bits
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x3924b03e));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x3ab24b87));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
|
|
|
|
SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x3c1d8c17));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
|
|
|
|
SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x3d634a1d));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
|
|
|
|
SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x3e75fe14));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
|
|
|
|
SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x3f317234));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
|
2012-11-24 08:22:37 +00:00
|
|
|
TwoToFracPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
|
|
|
|
getF32Constant(DAG, 0x3f800000));
|
2008-09-09 22:13:54 +00:00
|
|
|
}
|
2012-11-24 08:22:37 +00:00
|
|
|
|
|
|
|
// Add the exponent into the result in integer domain.
|
|
|
|
SDValue t13 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, TwoToFracPartOfX);
|
2012-11-24 18:52:06 +00:00
|
|
|
return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
|
|
|
|
DAG.getNode(ISD::ADD, dl, MVT::i32,
|
|
|
|
t13, IntegerPartOfX));
|
2008-09-09 22:13:54 +00:00
|
|
|
}
|
|
|
|
|
2012-11-24 18:52:06 +00:00
|
|
|
// No special expansion.
|
|
|
|
return DAG.getNode(ISD::FEXP, dl, Op.getValueType(), Op);
|
2008-09-05 18:38:42 +00:00
|
|
|
}
|
|
|
|
|
2012-11-23 18:38:31 +00:00
|
|
|
/// expandLog - Lower a log intrinsic. Handles the special sequences for
|
2008-09-09 20:39:27 +00:00
|
|
|
/// limited-precision mode.
|
2012-11-23 18:38:31 +00:00
|
|
|
static SDValue expandLog(DebugLoc dl, SDValue Op, SelectionDAG &DAG,
|
|
|
|
const TargetLowering &TLI) {
|
|
|
|
if (Op.getValueType() == MVT::f32 &&
|
2008-09-09 20:39:27 +00:00
|
|
|
LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
|
2010-11-23 03:31:01 +00:00
|
|
|
SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
|
2008-09-09 20:39:27 +00:00
|
|
|
|
|
|
|
// Scale the exponent by log(2) [0.69314718f].
|
2010-03-02 01:55:18 +00:00
|
|
|
SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x3f317218));
|
2008-09-09 20:39:27 +00:00
|
|
|
|
|
|
|
// Get the significand and build it into a floating-point number with
|
|
|
|
// exponent of 1.
|
2010-03-02 01:55:18 +00:00
|
|
|
SDValue X = GetSignificand(DAG, Op1, dl);
|
2008-09-09 20:39:27 +00:00
|
|
|
|
2012-11-16 19:08:44 +00:00
|
|
|
SDValue LogOfMantissa;
|
2008-09-09 20:39:27 +00:00
|
|
|
if (LimitFloatPrecision <= 6) {
|
|
|
|
// For floating-point precision of 6:
|
|
|
|
//
|
|
|
|
// LogofMantissa =
|
|
|
|
// -1.1609546f +
|
|
|
|
// (1.4034025f - 0.23903021f * x) * x;
|
2009-01-16 06:53:46 +00:00
|
|
|
//
|
2008-09-09 20:39:27 +00:00
|
|
|
// error 0.0034276066, which is better than 8 bits
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0xbe74c456));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x3fb3a2b1));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
|
2012-11-16 19:08:44 +00:00
|
|
|
LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
|
|
|
|
getF32Constant(DAG, 0x3f949a29));
|
2012-11-16 20:01:39 +00:00
|
|
|
} else if (LimitFloatPrecision <= 12) {
|
2008-09-09 20:39:27 +00:00
|
|
|
// For floating-point precision of 12:
|
|
|
|
//
|
|
|
|
// LogOfMantissa =
|
|
|
|
// -1.7417939f +
|
|
|
|
// (2.8212026f +
|
|
|
|
// (-1.4699568f +
|
|
|
|
// (0.44717955f - 0.56570851e-1f * x) * x) * x) * x;
|
|
|
|
//
|
|
|
|
// error 0.000061011436, which is 14 bits
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0xbd67b6d6));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x3ee4f4b8));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
|
|
|
|
SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x3fbc278b));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
|
|
|
|
SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x40348e95));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
|
2012-11-16 19:08:44 +00:00
|
|
|
LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
|
|
|
|
getF32Constant(DAG, 0x3fdef31a));
|
2012-11-16 20:01:39 +00:00
|
|
|
} else { // LimitFloatPrecision <= 18
|
2008-09-09 20:39:27 +00:00
|
|
|
// For floating-point precision of 18:
|
|
|
|
//
|
|
|
|
// LogOfMantissa =
|
|
|
|
// -2.1072184f +
|
|
|
|
// (4.2372794f +
|
|
|
|
// (-3.7029485f +
|
|
|
|
// (2.2781945f +
|
|
|
|
// (-0.87823314f +
|
|
|
|
// (0.19073739f - 0.17809712e-1f * x) * x) * x) * x) * x)*x;
|
|
|
|
//
|
|
|
|
// error 0.0000023660568, which is better than 18 bits
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0xbc91e5ac));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x3e4350aa));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
|
|
|
|
SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x3f60d3e3));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
|
|
|
|
SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x4011cdf0));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
|
|
|
|
SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x406cfd1c));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
|
|
|
|
SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x408797cb));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
|
2012-11-16 19:08:44 +00:00
|
|
|
LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
|
|
|
|
getF32Constant(DAG, 0x4006dcab));
|
2008-09-09 20:39:27 +00:00
|
|
|
}
|
2012-11-16 19:08:44 +00:00
|
|
|
|
2012-11-23 18:38:31 +00:00
|
|
|
return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, LogOfMantissa);
|
2008-09-09 20:39:27 +00:00
|
|
|
}
|
|
|
|
|
2012-11-23 18:38:31 +00:00
|
|
|
// No special expansion.
|
|
|
|
return DAG.getNode(ISD::FLOG, dl, Op.getValueType(), Op);
|
2008-09-05 18:38:42 +00:00
|
|
|
}
|
|
|
|
|
2012-11-23 18:38:31 +00:00
|
|
|
/// expandLog2 - Lower a log2 intrinsic. Handles the special sequences for
|
2008-09-09 00:28:24 +00:00
|
|
|
/// limited-precision mode.
|
2012-11-23 18:38:31 +00:00
|
|
|
static SDValue expandLog2(DebugLoc dl, SDValue Op, SelectionDAG &DAG,
|
|
|
|
const TargetLowering &TLI) {
|
|
|
|
if (Op.getValueType() == MVT::f32 &&
|
2008-09-09 00:28:24 +00:00
|
|
|
LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
|
2010-11-23 03:31:01 +00:00
|
|
|
SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
|
2008-09-09 00:28:24 +00:00
|
|
|
|
2008-09-09 20:39:27 +00:00
|
|
|
// Get the exponent.
|
2010-03-02 01:55:18 +00:00
|
|
|
SDValue LogOfExponent = GetExponent(DAG, Op1, TLI, dl);
|
2009-12-22 00:12:37 +00:00
|
|
|
|
2008-09-09 00:28:24 +00:00
|
|
|
// Get the significand and build it into a floating-point number with
|
2008-09-09 20:39:27 +00:00
|
|
|
// exponent of 1.
|
2010-03-02 01:55:18 +00:00
|
|
|
SDValue X = GetSignificand(DAG, Op1, dl);
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2008-09-09 00:28:24 +00:00
|
|
|
// Different possible minimax approximations of significand in
|
|
|
|
// floating-point for various degrees of accuracy over [1,2].
|
2012-11-16 19:08:44 +00:00
|
|
|
SDValue Log2ofMantissa;
|
2008-09-09 00:28:24 +00:00
|
|
|
if (LimitFloatPrecision <= 6) {
|
|
|
|
// For floating-point precision of 6:
|
|
|
|
//
|
|
|
|
// Log2ofMantissa = -1.6749035f + (2.0246817f - .34484768f * x) * x;
|
|
|
|
//
|
|
|
|
// error 0.0049451742, which is more than 7 bits
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0xbeb08fe0));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x40019463));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
|
2012-11-16 19:08:44 +00:00
|
|
|
Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
|
|
|
|
getF32Constant(DAG, 0x3fd6633d));
|
2012-11-16 20:01:39 +00:00
|
|
|
} else if (LimitFloatPrecision <= 12) {
|
2008-09-09 00:28:24 +00:00
|
|
|
// For floating-point precision of 12:
|
|
|
|
//
|
|
|
|
// Log2ofMantissa =
|
|
|
|
// -2.51285454f +
|
|
|
|
// (4.07009056f +
|
|
|
|
// (-2.12067489f +
|
|
|
|
// (.645142248f - 0.816157886e-1f * x) * x) * x) * x;
|
2009-01-16 06:53:46 +00:00
|
|
|
//
|
2008-09-09 00:28:24 +00:00
|
|
|
// error 0.0000876136000, which is better than 13 bits
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0xbda7262e));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x3f25280b));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
|
|
|
|
SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x4007b923));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
|
|
|
|
SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x40823e2f));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
|
2012-11-16 19:08:44 +00:00
|
|
|
Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
|
|
|
|
getF32Constant(DAG, 0x4020d29c));
|
2012-11-16 20:01:39 +00:00
|
|
|
} else { // LimitFloatPrecision <= 18
|
2008-09-09 00:28:24 +00:00
|
|
|
// For floating-point precision of 18:
|
|
|
|
//
|
|
|
|
// Log2ofMantissa =
|
|
|
|
// -3.0400495f +
|
|
|
|
// (6.1129976f +
|
|
|
|
// (-5.3420409f +
|
|
|
|
// (3.2865683f +
|
|
|
|
// (-1.2669343f +
|
|
|
|
// (0.27515199f -
|
|
|
|
// 0.25691327e-1f * x) * x) * x) * x) * x) * x;
|
|
|
|
//
|
|
|
|
// error 0.0000018516, which is better than 18 bits
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0xbcd2769e));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x3e8ce0b9));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
|
|
|
|
SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x3fa22ae7));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
|
|
|
|
SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x40525723));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
|
|
|
|
SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x40aaf200));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
|
|
|
|
SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x40c39dad));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
|
2012-11-16 19:08:44 +00:00
|
|
|
Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
|
|
|
|
getF32Constant(DAG, 0x4042902c));
|
2008-09-09 00:28:24 +00:00
|
|
|
}
|
2012-11-16 19:08:44 +00:00
|
|
|
|
2012-11-23 18:38:31 +00:00
|
|
|
return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log2ofMantissa);
|
2008-09-05 23:49:37 +00:00
|
|
|
}
|
2008-09-09 00:28:24 +00:00
|
|
|
|
2012-11-23 18:38:31 +00:00
|
|
|
// No special expansion.
|
|
|
|
return DAG.getNode(ISD::FLOG2, dl, Op.getValueType(), Op);
|
2008-09-05 18:38:42 +00:00
|
|
|
}
|
|
|
|
|
2012-11-23 18:38:31 +00:00
|
|
|
/// expandLog10 - Lower a log10 intrinsic. Handles the special sequences for
|
2008-09-09 00:28:24 +00:00
|
|
|
/// limited-precision mode.
|
2012-11-23 18:38:31 +00:00
|
|
|
static SDValue expandLog10(DebugLoc dl, SDValue Op, SelectionDAG &DAG,
|
|
|
|
const TargetLowering &TLI) {
|
|
|
|
if (Op.getValueType() == MVT::f32 &&
|
2008-09-09 00:28:24 +00:00
|
|
|
LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
|
2010-11-23 03:31:01 +00:00
|
|
|
SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
|
2008-09-09 00:28:24 +00:00
|
|
|
|
2008-09-09 20:39:27 +00:00
|
|
|
// Scale the exponent by log10(2) [0.30102999f].
|
2010-03-02 01:55:18 +00:00
|
|
|
SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x3e9a209a));
|
2008-09-09 00:28:24 +00:00
|
|
|
|
|
|
|
// Get the significand and build it into a floating-point number with
|
2008-09-09 20:39:27 +00:00
|
|
|
// exponent of 1.
|
2010-03-02 01:55:18 +00:00
|
|
|
SDValue X = GetSignificand(DAG, Op1, dl);
|
2008-09-09 00:28:24 +00:00
|
|
|
|
2012-11-16 19:08:44 +00:00
|
|
|
SDValue Log10ofMantissa;
|
2008-09-09 00:28:24 +00:00
|
|
|
if (LimitFloatPrecision <= 6) {
|
2008-09-09 18:42:23 +00:00
|
|
|
// For floating-point precision of 6:
|
2009-01-16 06:53:46 +00:00
|
|
|
//
|
2008-09-09 18:42:23 +00:00
|
|
|
// Log10ofMantissa =
|
|
|
|
// -0.50419619f +
|
|
|
|
// (0.60948995f - 0.10380950f * x) * x;
|
|
|
|
//
|
|
|
|
// error 0.0014886165, which is 6 bits
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0xbdd49a13));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x3f1c0789));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
|
2012-11-16 19:08:44 +00:00
|
|
|
Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
|
|
|
|
getF32Constant(DAG, 0x3f011300));
|
2012-11-16 20:01:39 +00:00
|
|
|
} else if (LimitFloatPrecision <= 12) {
|
2008-09-09 00:28:24 +00:00
|
|
|
// For floating-point precision of 12:
|
|
|
|
//
|
|
|
|
// Log10ofMantissa =
|
|
|
|
// -0.64831180f +
|
|
|
|
// (0.91751397f +
|
|
|
|
// (-0.31664806f + 0.47637168e-1f * x) * x) * x;
|
|
|
|
//
|
|
|
|
// error 0.00019228036, which is better than 12 bits
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x3d431f31));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x3ea21fb2));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
|
|
|
|
SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x3f6ae232));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
|
2012-11-16 19:08:44 +00:00
|
|
|
Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
|
|
|
|
getF32Constant(DAG, 0x3f25f7c3));
|
2012-11-16 20:01:39 +00:00
|
|
|
} else { // LimitFloatPrecision <= 18
|
2008-09-09 18:42:23 +00:00
|
|
|
// For floating-point precision of 18:
|
|
|
|
//
|
|
|
|
// Log10ofMantissa =
|
|
|
|
// -0.84299375f +
|
|
|
|
// (1.5327582f +
|
|
|
|
// (-1.0688956f +
|
|
|
|
// (0.49102474f +
|
|
|
|
// (-0.12539807f + 0.13508273e-1f * x) * x) * x) * x) * x;
|
|
|
|
//
|
|
|
|
// error 0.0000037995730, which is better than 18 bits
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x3c5d51ce));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x3e00685a));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
|
|
|
|
SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x3efb6798));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
|
|
|
|
SDValue t5 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x3f88d192));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
|
|
|
|
SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x3fc4316c));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
|
2012-11-16 19:08:44 +00:00
|
|
|
Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t8,
|
|
|
|
getF32Constant(DAG, 0x3f57ce70));
|
2008-09-09 00:28:24 +00:00
|
|
|
}
|
2012-11-16 19:08:44 +00:00
|
|
|
|
2012-11-23 18:38:31 +00:00
|
|
|
return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log10ofMantissa);
|
2008-09-05 21:27:19 +00:00
|
|
|
}
|
2008-09-09 00:28:24 +00:00
|
|
|
|
2012-11-23 18:38:31 +00:00
|
|
|
// No special expansion.
|
|
|
|
return DAG.getNode(ISD::FLOG10, dl, Op.getValueType(), Op);
|
2008-09-05 18:38:42 +00:00
|
|
|
}
|
|
|
|
|
2012-11-24 18:52:06 +00:00
|
|
|
/// expandExp2 - Lower an exp2 intrinsic. Handles the special sequences for
|
2008-09-09 22:39:21 +00:00
|
|
|
/// limited-precision mode.
|
2012-11-24 18:52:06 +00:00
|
|
|
static SDValue expandExp2(DebugLoc dl, SDValue Op, SelectionDAG &DAG,
|
|
|
|
const TargetLowering &TLI) {
|
|
|
|
if (Op.getValueType() == MVT::f32 &&
|
2008-09-09 22:39:21 +00:00
|
|
|
LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Op);
|
2008-09-09 22:39:21 +00:00
|
|
|
|
|
|
|
// FractionalPartOfX = x - (float)IntegerPartOfX;
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
|
|
|
|
SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, Op, t1);
|
2008-09-09 22:39:21 +00:00
|
|
|
|
|
|
|
// IntegerPartOfX <<= 23;
|
2009-08-11 20:47:22 +00:00
|
|
|
IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
|
2009-01-31 15:50:11 +00:00
|
|
|
DAG.getConstant(23, TLI.getPointerTy()));
|
2008-09-09 22:39:21 +00:00
|
|
|
|
2012-11-24 08:22:37 +00:00
|
|
|
SDValue TwoToFractionalPartOfX;
|
2008-09-09 22:39:21 +00:00
|
|
|
if (LimitFloatPrecision <= 6) {
|
|
|
|
// For floating-point precision of 6:
|
2009-01-16 06:53:46 +00:00
|
|
|
//
|
2008-09-09 22:39:21 +00:00
|
|
|
// TwoToFractionalPartOfX =
|
|
|
|
// 0.997535578f +
|
|
|
|
// (0.735607626f + 0.252464424f * x) * x;
|
|
|
|
//
|
|
|
|
// error 0.0144103317, which is 6 bits
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x3e814304));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x3f3c50c8));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
|
2012-11-24 08:22:37 +00:00
|
|
|
TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
|
|
|
|
getF32Constant(DAG, 0x3f7f5e7e));
|
2012-11-16 20:01:39 +00:00
|
|
|
} else if (LimitFloatPrecision <= 12) {
|
2008-09-09 22:39:21 +00:00
|
|
|
// For floating-point precision of 12:
|
|
|
|
//
|
|
|
|
// TwoToFractionalPartOfX =
|
|
|
|
// 0.999892986f +
|
|
|
|
// (0.696457318f +
|
|
|
|
// (0.224338339f + 0.792043434e-1f * x) * x) * x;
|
|
|
|
//
|
|
|
|
// error 0.000107046256, which is 13 to 14 bits
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x3da235e3));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x3e65b8f3));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
|
|
|
|
SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x3f324b07));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
|
2012-11-24 08:22:37 +00:00
|
|
|
TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
|
|
|
|
getF32Constant(DAG, 0x3f7ff8fd));
|
2012-11-16 20:01:39 +00:00
|
|
|
} else { // LimitFloatPrecision <= 18
|
2008-09-09 22:39:21 +00:00
|
|
|
// For floating-point precision of 18:
|
|
|
|
//
|
|
|
|
// TwoToFractionalPartOfX =
|
|
|
|
// 0.999999982f +
|
|
|
|
// (0.693148872f +
|
|
|
|
// (0.240227044f +
|
|
|
|
// (0.554906021e-1f +
|
|
|
|
// (0.961591928e-2f +
|
|
|
|
// (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
|
|
|
|
// error 2.47208000*10^(-7), which is better than 18 bits
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x3924b03e));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x3ab24b87));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
|
|
|
|
SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x3c1d8c17));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
|
|
|
|
SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x3d634a1d));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
|
|
|
|
SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x3e75fe14));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
|
|
|
|
SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x3f317234));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
|
2012-11-24 08:22:37 +00:00
|
|
|
TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
|
|
|
|
getF32Constant(DAG, 0x3f800000));
|
2008-09-09 22:39:21 +00:00
|
|
|
}
|
2012-11-24 08:22:37 +00:00
|
|
|
|
|
|
|
// Add the exponent into the result in integer domain.
|
|
|
|
SDValue t13 = DAG.getNode(ISD::BITCAST, dl, MVT::i32,
|
|
|
|
TwoToFractionalPartOfX);
|
2012-11-24 18:52:06 +00:00
|
|
|
return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
|
|
|
|
DAG.getNode(ISD::ADD, dl, MVT::i32,
|
|
|
|
t13, IntegerPartOfX));
|
2008-09-05 01:48:15 +00:00
|
|
|
}
|
2008-09-09 22:39:21 +00:00
|
|
|
|
2012-11-24 18:52:06 +00:00
|
|
|
// No special expansion.
|
|
|
|
return DAG.getNode(ISD::FEXP2, dl, Op.getValueType(), Op);
|
2008-09-05 01:48:15 +00:00
|
|
|
}
|
|
|
|
|
2008-09-10 00:20:20 +00:00
|
|
|
/// visitPow - Lower a pow intrinsic. Handles the special sequences for
|
|
|
|
/// limited-precision mode with x == 10.0f.
|
2012-11-25 08:08:58 +00:00
|
|
|
static SDValue expandPow(DebugLoc dl, SDValue LHS, SDValue RHS,
|
|
|
|
SelectionDAG &DAG, const TargetLowering &TLI) {
|
2008-09-10 00:20:20 +00:00
|
|
|
bool IsExp10 = false;
|
2012-11-25 08:08:58 +00:00
|
|
|
if (LHS.getValueType() == MVT::f32 && LHS.getValueType() == MVT::f32 &&
|
2008-09-10 00:20:20 +00:00
|
|
|
LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
|
2012-11-25 08:08:58 +00:00
|
|
|
if (ConstantFPSDNode *LHSC = dyn_cast<ConstantFPSDNode>(LHS)) {
|
|
|
|
APFloat Ten(10.0f);
|
|
|
|
IsExp10 = LHSC->isExactlyValue(Ten);
|
2008-09-10 00:20:20 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-11-25 00:48:58 +00:00
|
|
|
if (IsExp10) {
|
2008-09-10 00:20:20 +00:00
|
|
|
// Put the exponent in the right bit position for later addition to the
|
|
|
|
// final result:
|
|
|
|
//
|
|
|
|
// #define LOG2OF10 3.3219281f
|
|
|
|
// IntegerPartOfX = (int32_t)(x * LOG2OF10);
|
2012-11-25 08:08:58 +00:00
|
|
|
SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, RHS,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x40549a78));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
|
2008-09-10 00:20:20 +00:00
|
|
|
|
|
|
|
// FractionalPartOfX = x - (float)IntegerPartOfX;
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
|
|
|
|
SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
|
2008-09-10 00:20:20 +00:00
|
|
|
|
|
|
|
// IntegerPartOfX <<= 23;
|
2009-08-11 20:47:22 +00:00
|
|
|
IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
|
2009-01-31 15:50:11 +00:00
|
|
|
DAG.getConstant(23, TLI.getPointerTy()));
|
2008-09-10 00:20:20 +00:00
|
|
|
|
2012-11-25 00:15:07 +00:00
|
|
|
SDValue TwoToFractionalPartOfX;
|
2008-09-10 00:20:20 +00:00
|
|
|
if (LimitFloatPrecision <= 6) {
|
|
|
|
// For floating-point precision of 6:
|
2009-01-16 06:53:46 +00:00
|
|
|
//
|
2008-09-10 00:20:20 +00:00
|
|
|
// twoToFractionalPartOfX =
|
|
|
|
// 0.997535578f +
|
|
|
|
// (0.735607626f + 0.252464424f * x) * x;
|
2009-01-16 06:53:46 +00:00
|
|
|
//
|
2008-09-10 00:20:20 +00:00
|
|
|
// error 0.0144103317, which is 6 bits
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x3e814304));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x3f3c50c8));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
|
2012-11-25 00:15:07 +00:00
|
|
|
TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
|
|
|
|
getF32Constant(DAG, 0x3f7f5e7e));
|
2012-11-16 20:01:39 +00:00
|
|
|
} else if (LimitFloatPrecision <= 12) {
|
2008-09-10 00:20:20 +00:00
|
|
|
// For floating-point precision of 12:
|
|
|
|
//
|
|
|
|
// TwoToFractionalPartOfX =
|
|
|
|
// 0.999892986f +
|
|
|
|
// (0.696457318f +
|
|
|
|
// (0.224338339f + 0.792043434e-1f * x) * x) * x;
|
|
|
|
//
|
|
|
|
// error 0.000107046256, which is 13 to 14 bits
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x3da235e3));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x3e65b8f3));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
|
|
|
|
SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x3f324b07));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
|
2012-11-25 00:15:07 +00:00
|
|
|
TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
|
|
|
|
getF32Constant(DAG, 0x3f7ff8fd));
|
2012-11-16 20:01:39 +00:00
|
|
|
} else { // LimitFloatPrecision <= 18
|
2008-09-10 00:20:20 +00:00
|
|
|
// For floating-point precision of 18:
|
|
|
|
//
|
|
|
|
// TwoToFractionalPartOfX =
|
|
|
|
// 0.999999982f +
|
|
|
|
// (0.693148872f +
|
|
|
|
// (0.240227044f +
|
|
|
|
// (0.554906021e-1f +
|
|
|
|
// (0.961591928e-2f +
|
|
|
|
// (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
|
|
|
|
// error 2.47208000*10^(-7), which is better than 18 bits
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x3924b03e));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x3ab24b87));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
|
|
|
|
SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x3c1d8c17));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
|
|
|
|
SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x3d634a1d));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
|
|
|
|
SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x3e75fe14));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
|
|
|
|
SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
|
2008-09-22 00:44:35 +00:00
|
|
|
getF32Constant(DAG, 0x3f317234));
|
2009-08-11 20:47:22 +00:00
|
|
|
SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
|
2012-11-25 00:15:07 +00:00
|
|
|
TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
|
|
|
|
getF32Constant(DAG, 0x3f800000));
|
2008-09-10 00:20:20 +00:00
|
|
|
}
|
2012-11-25 00:15:07 +00:00
|
|
|
|
|
|
|
SDValue t13 = DAG.getNode(ISD::BITCAST, dl,MVT::i32,TwoToFractionalPartOfX);
|
2012-11-25 08:08:58 +00:00
|
|
|
return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
|
|
|
|
DAG.getNode(ISD::ADD, dl, MVT::i32,
|
|
|
|
t13, IntegerPartOfX));
|
2008-09-10 00:20:20 +00:00
|
|
|
}
|
|
|
|
|
2012-11-25 08:08:58 +00:00
|
|
|
// No special expansion.
|
|
|
|
return DAG.getNode(ISD::FPOW, dl, LHS.getValueType(), LHS, RHS);
|
2008-09-10 00:20:20 +00:00
|
|
|
}
|
|
|
|
|
2010-01-01 03:32:16 +00:00
|
|
|
|
|
|
|
/// ExpandPowI - Expand a llvm.powi intrinsic.
|
|
|
|
static SDValue ExpandPowI(DebugLoc DL, SDValue LHS, SDValue RHS,
|
|
|
|
SelectionDAG &DAG) {
|
|
|
|
// If RHS is a constant, we can expand this out to a multiplication tree,
|
|
|
|
// otherwise we end up lowering to a call to __powidf2 (for example). When
|
|
|
|
// optimizing for size, we only want to do this if the expansion would produce
|
|
|
|
// a small number of multiplies, otherwise we do the full expansion.
|
|
|
|
if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
|
|
|
|
// Get the exponent as a positive value.
|
|
|
|
unsigned Val = RHSC->getSExtValue();
|
|
|
|
if ((int)Val < 0) Val = -Val;
|
2010-01-01 04:41:22 +00:00
|
|
|
|
2010-01-01 03:32:16 +00:00
|
|
|
// powi(x, 0) -> 1.0
|
|
|
|
if (Val == 0)
|
|
|
|
return DAG.getConstantFP(1.0, LHS.getValueType());
|
|
|
|
|
2010-04-15 04:33:49 +00:00
|
|
|
const Function *F = DAG.getMachineFunction().getFunction();
|
2012-10-09 07:45:08 +00:00
|
|
|
if (!F->getFnAttributes().hasAttribute(Attributes::OptimizeForSize) ||
|
2010-01-01 03:32:16 +00:00
|
|
|
// If optimizing for size, don't insert too many multiplies. This
|
|
|
|
// inserts up to 5 multiplies.
|
|
|
|
CountPopulation_32(Val)+Log2_32(Val) < 7) {
|
|
|
|
// We use the simple binary decomposition method to generate the multiply
|
2010-01-01 04:41:22 +00:00
|
|
|
// sequence. There are more optimal ways to do this (for example,
|
2010-01-01 03:32:16 +00:00
|
|
|
// powi(x,15) generates one more multiply than it should), but this has
|
|
|
|
// the benefit of being both really simple and much better than a libcall.
|
|
|
|
SDValue Res; // Logically starts equal to 1.0
|
|
|
|
SDValue CurSquare = LHS;
|
|
|
|
while (Val) {
|
2010-01-01 04:41:36 +00:00
|
|
|
if (Val & 1) {
|
2010-01-01 03:32:16 +00:00
|
|
|
if (Res.getNode())
|
|
|
|
Res = DAG.getNode(ISD::FMUL, DL,Res.getValueType(), Res, CurSquare);
|
|
|
|
else
|
|
|
|
Res = CurSquare; // 1.0*CurSquare.
|
2010-01-01 04:41:36 +00:00
|
|
|
}
|
2010-01-01 04:41:22 +00:00
|
|
|
|
2010-01-01 03:32:16 +00:00
|
|
|
CurSquare = DAG.getNode(ISD::FMUL, DL, CurSquare.getValueType(),
|
|
|
|
CurSquare, CurSquare);
|
|
|
|
Val >>= 1;
|
|
|
|
}
|
2010-01-01 04:41:22 +00:00
|
|
|
|
2010-01-01 03:32:16 +00:00
|
|
|
// If the original was negative, invert the result, producing 1/(x*x*x).
|
|
|
|
if (RHSC->getSExtValue() < 0)
|
|
|
|
Res = DAG.getNode(ISD::FDIV, DL, LHS.getValueType(),
|
|
|
|
DAG.getConstantFP(1.0, LHS.getValueType()), Res);
|
|
|
|
return Res;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, expand to a libcall.
|
|
|
|
return DAG.getNode(ISD::FPOWI, DL, LHS.getValueType(), LHS, RHS);
|
|
|
|
}
|
|
|
|
|
2011-05-16 21:24:05 +00:00
|
|
|
// getTruncatedArgReg - Find underlying register used for an truncated
|
|
|
|
// argument.
|
|
|
|
static unsigned getTruncatedArgReg(const SDValue &N) {
|
|
|
|
if (N.getOpcode() != ISD::TRUNCATE)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
const SDValue &Ext = N.getOperand(0);
|
|
|
|
if (Ext.getOpcode() == ISD::AssertZext || Ext.getOpcode() == ISD::AssertSext){
|
|
|
|
const SDValue &CFR = Ext.getOperand(0);
|
|
|
|
if (CFR.getOpcode() == ISD::CopyFromReg)
|
|
|
|
return cast<RegisterSDNode>(CFR.getOperand(1))->getReg();
|
2012-04-11 04:55:51 +00:00
|
|
|
if (CFR.getOpcode() == ISD::TRUNCATE)
|
|
|
|
return getTruncatedArgReg(CFR);
|
2011-05-16 21:24:05 +00:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-04-28 23:08:54 +00:00
|
|
|
/// EmitFuncArgumentDbgValue - If the DbgValueInst is a dbg_value of a function
|
|
|
|
/// argument, create the corresponding DBG_VALUE machine instruction for it now.
|
|
|
|
/// At the end of instruction selection, they will be inserted to the entry BB.
|
2010-04-29 01:40:30 +00:00
|
|
|
bool
|
2010-08-25 20:39:26 +00:00
|
|
|
SelectionDAGBuilder::EmitFuncArgumentDbgValue(const Value *V, MDNode *Variable,
|
2010-10-16 08:25:21 +00:00
|
|
|
int64_t Offset,
|
2010-05-01 00:33:16 +00:00
|
|
|
const SDValue &N) {
|
2010-08-31 22:22:42 +00:00
|
|
|
const Argument *Arg = dyn_cast<Argument>(V);
|
|
|
|
if (!Arg)
|
2010-04-29 01:40:30 +00:00
|
|
|
return false;
|
2010-04-28 23:08:54 +00:00
|
|
|
|
2010-04-29 20:40:36 +00:00
|
|
|
MachineFunction &MF = DAG.getMachineFunction();
|
2010-11-02 17:01:30 +00:00
|
|
|
const TargetInstrInfo *TII = DAG.getTarget().getInstrInfo();
|
|
|
|
const TargetRegisterInfo *TRI = DAG.getTarget().getRegisterInfo();
|
|
|
|
|
2010-04-29 18:50:36 +00:00
|
|
|
// Ignore inlined function arguments here.
|
|
|
|
DIVariable DV(Variable);
|
2010-04-29 20:40:36 +00:00
|
|
|
if (DV.isInlinedFnArgument(MF.getFunction()))
|
2010-04-29 18:50:36 +00:00
|
|
|
return false;
|
|
|
|
|
2010-04-28 23:08:54 +00:00
|
|
|
unsigned Reg = 0;
|
2011-09-08 22:59:09 +00:00
|
|
|
// Some arguments' frame index is recorded during argument lowering.
|
|
|
|
Offset = FuncInfo.getArgumentFrameIndex(Arg);
|
|
|
|
if (Offset)
|
2012-04-11 04:55:51 +00:00
|
|
|
Reg = TRI->getFrameRegister(MF);
|
2010-08-31 22:22:42 +00:00
|
|
|
|
2011-09-08 22:59:09 +00:00
|
|
|
if (!Reg && N.getNode()) {
|
2011-05-16 21:24:05 +00:00
|
|
|
if (N.getOpcode() == ISD::CopyFromReg)
|
|
|
|
Reg = cast<RegisterSDNode>(N.getOperand(1))->getReg();
|
|
|
|
else
|
|
|
|
Reg = getTruncatedArgReg(N);
|
|
|
|
if (Reg && TargetRegisterInfo::isVirtualRegister(Reg)) {
|
2010-04-28 23:08:54 +00:00
|
|
|
MachineRegisterInfo &RegInfo = MF.getRegInfo();
|
|
|
|
unsigned PR = RegInfo.getLiveInPhysReg(Reg);
|
|
|
|
if (PR)
|
|
|
|
Reg = PR;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-04-29 06:33:38 +00:00
|
|
|
if (!Reg) {
|
2010-11-02 17:01:30 +00:00
|
|
|
// Check if ValueMap has reg number.
|
2010-04-29 06:33:38 +00:00
|
|
|
DenseMap<const Value *, unsigned>::iterator VMI = FuncInfo.ValueMap.find(V);
|
2010-11-02 17:19:03 +00:00
|
|
|
if (VMI != FuncInfo.ValueMap.end())
|
|
|
|
Reg = VMI->second;
|
2010-04-29 06:33:38 +00:00
|
|
|
}
|
2010-11-23 03:31:01 +00:00
|
|
|
|
2010-11-02 17:19:03 +00:00
|
|
|
if (!Reg && N.getNode()) {
|
2010-11-02 17:01:30 +00:00
|
|
|
// Check if frame index is available.
|
|
|
|
if (LoadSDNode *LNode = dyn_cast<LoadSDNode>(N.getNode()))
|
2010-11-23 03:31:01 +00:00
|
|
|
if (FrameIndexSDNode *FINode =
|
2010-11-02 17:01:30 +00:00
|
|
|
dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode())) {
|
|
|
|
Reg = TRI->getFrameRegister(MF);
|
|
|
|
Offset = FINode->getIndex();
|
|
|
|
}
|
2010-11-02 17:19:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!Reg)
|
|
|
|
return false;
|
2010-11-02 17:01:30 +00:00
|
|
|
|
2010-04-28 23:08:54 +00:00
|
|
|
MachineInstrBuilder MIB = BuildMI(MF, getCurDebugLoc(),
|
|
|
|
TII->get(TargetOpcode::DBG_VALUE))
|
2010-04-29 06:33:38 +00:00
|
|
|
.addReg(Reg, RegState::Debug).addImm(Offset).addMetadata(Variable);
|
2010-04-28 23:08:54 +00:00
|
|
|
FuncInfo.ArgDbgValues.push_back(&*MIB);
|
2010-04-29 01:40:30 +00:00
|
|
|
return true;
|
2010-04-28 23:08:54 +00:00
|
|
|
}
|
2010-01-01 03:32:16 +00:00
|
|
|
|
2010-05-11 06:17:44 +00:00
|
|
|
// VisualStudio defines setjmp as _setjmp
|
2010-09-24 19:48:47 +00:00
|
|
|
#if defined(_MSC_VER) && defined(setjmp) && \
|
|
|
|
!defined(setjmp_undefined_for_msvc)
|
|
|
|
# pragma push_macro("setjmp")
|
|
|
|
# undef setjmp
|
|
|
|
# define setjmp_undefined_for_msvc
|
2010-05-11 06:17:44 +00:00
|
|
|
#endif
|
|
|
|
|
2008-09-03 16:12:24 +00:00
|
|
|
/// visitIntrinsicCall - Lower the call to the specified intrinsic function. If
|
|
|
|
/// we want to emit this as a call to a named external function, return the name
|
|
|
|
/// otherwise lower it and return null.
|
|
|
|
const char *
|
2010-04-15 01:51:59 +00:00
|
|
|
SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
|
2009-01-31 02:22:37 +00:00
|
|
|
DebugLoc dl = getCurDebugLoc();
|
2009-12-22 00:40:51 +00:00
|
|
|
SDValue Res;
|
|
|
|
|
2008-09-03 16:12:24 +00:00
|
|
|
switch (Intrinsic) {
|
|
|
|
default:
|
|
|
|
// By default, turn this into a target intrinsic node.
|
|
|
|
visitTargetIntrinsic(I, Intrinsic);
|
|
|
|
return 0;
|
|
|
|
case Intrinsic::vastart: visitVAStart(I); return 0;
|
|
|
|
case Intrinsic::vaend: visitVAEnd(I); return 0;
|
|
|
|
case Intrinsic::vacopy: visitVACopy(I); return 0;
|
|
|
|
case Intrinsic::returnaddress:
|
2010-01-28 21:51:40 +00:00
|
|
|
setValue(&I, DAG.getNode(ISD::RETURNADDR, dl, TLI.getPointerTy(),
|
2010-06-25 09:38:13 +00:00
|
|
|
getValue(I.getArgOperand(0))));
|
2008-09-03 16:12:24 +00:00
|
|
|
return 0;
|
2008-09-26 22:10:44 +00:00
|
|
|
case Intrinsic::frameaddress:
|
2010-01-28 21:51:40 +00:00
|
|
|
setValue(&I, DAG.getNode(ISD::FRAMEADDR, dl, TLI.getPointerTy(),
|
2010-06-25 09:38:13 +00:00
|
|
|
getValue(I.getArgOperand(0))));
|
2008-09-03 16:12:24 +00:00
|
|
|
return 0;
|
|
|
|
case Intrinsic::setjmp:
|
2012-03-05 19:29:36 +00:00
|
|
|
return &"_setjmp"[!TLI.usesUnderscoreSetJmp()];
|
2008-09-03 16:12:24 +00:00
|
|
|
case Intrinsic::longjmp:
|
2012-03-05 19:29:36 +00:00
|
|
|
return &"_longjmp"[!TLI.usesUnderscoreLongJmp()];
|
2008-11-21 16:42:48 +00:00
|
|
|
case Intrinsic::memcpy: {
|
Reapply address space patch after fixing an issue in MemCopyOptimizer.
Added support for address spaces and added a isVolatile field to memcpy, memmove, and memset,
e.g., llvm.memcpy.i32(i8*, i8*, i32, i32) -> llvm.memcpy.p0i8.p0i8.i32(i8*, i8*, i32, i32, i1)
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@100304 91177308-0d34-0410-b5e6-96231b3b80d8
2010-04-04 03:10:48 +00:00
|
|
|
// Assert for address < 256 since we support only user defined address
|
|
|
|
// spaces.
|
2010-06-25 09:38:13 +00:00
|
|
|
assert(cast<PointerType>(I.getArgOperand(0)->getType())->getAddressSpace()
|
Reapply address space patch after fixing an issue in MemCopyOptimizer.
Added support for address spaces and added a isVolatile field to memcpy, memmove, and memset,
e.g., llvm.memcpy.i32(i8*, i8*, i32, i32) -> llvm.memcpy.p0i8.p0i8.i32(i8*, i8*, i32, i32, i1)
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@100304 91177308-0d34-0410-b5e6-96231b3b80d8
2010-04-04 03:10:48 +00:00
|
|
|
< 256 &&
|
2010-06-25 09:38:13 +00:00
|
|
|
cast<PointerType>(I.getArgOperand(1)->getType())->getAddressSpace()
|
Reapply address space patch after fixing an issue in MemCopyOptimizer.
Added support for address spaces and added a isVolatile field to memcpy, memmove, and memset,
e.g., llvm.memcpy.i32(i8*, i8*, i32, i32) -> llvm.memcpy.p0i8.p0i8.i32(i8*, i8*, i32, i32, i1)
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@100304 91177308-0d34-0410-b5e6-96231b3b80d8
2010-04-04 03:10:48 +00:00
|
|
|
< 256 &&
|
|
|
|
"Unknown address space");
|
2010-06-25 09:38:13 +00:00
|
|
|
SDValue Op1 = getValue(I.getArgOperand(0));
|
|
|
|
SDValue Op2 = getValue(I.getArgOperand(1));
|
|
|
|
SDValue Op3 = getValue(I.getArgOperand(2));
|
|
|
|
unsigned Align = cast<ConstantInt>(I.getArgOperand(3))->getZExtValue();
|
|
|
|
bool isVol = cast<ConstantInt>(I.getArgOperand(4))->getZExtValue();
|
Reapply address space patch after fixing an issue in MemCopyOptimizer.
Added support for address spaces and added a isVolatile field to memcpy, memmove, and memset,
e.g., llvm.memcpy.i32(i8*, i8*, i32, i32) -> llvm.memcpy.p0i8.p0i8.i32(i8*, i8*, i32, i32, i1)
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@100304 91177308-0d34-0410-b5e6-96231b3b80d8
2010-04-04 03:10:48 +00:00
|
|
|
DAG.setRoot(DAG.getMemcpy(getRoot(), dl, Op1, Op2, Op3, Align, isVol, false,
|
2010-09-21 05:40:29 +00:00
|
|
|
MachinePointerInfo(I.getArgOperand(0)),
|
|
|
|
MachinePointerInfo(I.getArgOperand(1))));
|
2008-09-03 16:12:24 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2008-11-21 16:42:48 +00:00
|
|
|
case Intrinsic::memset: {
|
Reapply address space patch after fixing an issue in MemCopyOptimizer.
Added support for address spaces and added a isVolatile field to memcpy, memmove, and memset,
e.g., llvm.memcpy.i32(i8*, i8*, i32, i32) -> llvm.memcpy.p0i8.p0i8.i32(i8*, i8*, i32, i32, i1)
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@100304 91177308-0d34-0410-b5e6-96231b3b80d8
2010-04-04 03:10:48 +00:00
|
|
|
// Assert for address < 256 since we support only user defined address
|
|
|
|
// spaces.
|
2010-06-25 09:38:13 +00:00
|
|
|
assert(cast<PointerType>(I.getArgOperand(0)->getType())->getAddressSpace()
|
Reapply address space patch after fixing an issue in MemCopyOptimizer.
Added support for address spaces and added a isVolatile field to memcpy, memmove, and memset,
e.g., llvm.memcpy.i32(i8*, i8*, i32, i32) -> llvm.memcpy.p0i8.p0i8.i32(i8*, i8*, i32, i32, i1)
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@100304 91177308-0d34-0410-b5e6-96231b3b80d8
2010-04-04 03:10:48 +00:00
|
|
|
< 256 &&
|
|
|
|
"Unknown address space");
|
2010-06-25 09:38:13 +00:00
|
|
|
SDValue Op1 = getValue(I.getArgOperand(0));
|
|
|
|
SDValue Op2 = getValue(I.getArgOperand(1));
|
|
|
|
SDValue Op3 = getValue(I.getArgOperand(2));
|
|
|
|
unsigned Align = cast<ConstantInt>(I.getArgOperand(3))->getZExtValue();
|
|
|
|
bool isVol = cast<ConstantInt>(I.getArgOperand(4))->getZExtValue();
|
Reapply address space patch after fixing an issue in MemCopyOptimizer.
Added support for address spaces and added a isVolatile field to memcpy, memmove, and memset,
e.g., llvm.memcpy.i32(i8*, i8*, i32, i32) -> llvm.memcpy.p0i8.p0i8.i32(i8*, i8*, i32, i32, i1)
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@100304 91177308-0d34-0410-b5e6-96231b3b80d8
2010-04-04 03:10:48 +00:00
|
|
|
DAG.setRoot(DAG.getMemset(getRoot(), dl, Op1, Op2, Op3, Align, isVol,
|
2010-09-21 05:40:29 +00:00
|
|
|
MachinePointerInfo(I.getArgOperand(0))));
|
2008-09-03 16:12:24 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2008-11-21 16:42:48 +00:00
|
|
|
case Intrinsic::memmove: {
|
Reapply address space patch after fixing an issue in MemCopyOptimizer.
Added support for address spaces and added a isVolatile field to memcpy, memmove, and memset,
e.g., llvm.memcpy.i32(i8*, i8*, i32, i32) -> llvm.memcpy.p0i8.p0i8.i32(i8*, i8*, i32, i32, i1)
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@100304 91177308-0d34-0410-b5e6-96231b3b80d8
2010-04-04 03:10:48 +00:00
|
|
|
// Assert for address < 256 since we support only user defined address
|
|
|
|
// spaces.
|
2010-06-25 09:38:13 +00:00
|
|
|
assert(cast<PointerType>(I.getArgOperand(0)->getType())->getAddressSpace()
|
Reapply address space patch after fixing an issue in MemCopyOptimizer.
Added support for address spaces and added a isVolatile field to memcpy, memmove, and memset,
e.g., llvm.memcpy.i32(i8*, i8*, i32, i32) -> llvm.memcpy.p0i8.p0i8.i32(i8*, i8*, i32, i32, i1)
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@100304 91177308-0d34-0410-b5e6-96231b3b80d8
2010-04-04 03:10:48 +00:00
|
|
|
< 256 &&
|
2010-06-25 09:38:13 +00:00
|
|
|
cast<PointerType>(I.getArgOperand(1)->getType())->getAddressSpace()
|
Reapply address space patch after fixing an issue in MemCopyOptimizer.
Added support for address spaces and added a isVolatile field to memcpy, memmove, and memset,
e.g., llvm.memcpy.i32(i8*, i8*, i32, i32) -> llvm.memcpy.p0i8.p0i8.i32(i8*, i8*, i32, i32, i1)
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@100304 91177308-0d34-0410-b5e6-96231b3b80d8
2010-04-04 03:10:48 +00:00
|
|
|
< 256 &&
|
|
|
|
"Unknown address space");
|
2010-06-25 09:38:13 +00:00
|
|
|
SDValue Op1 = getValue(I.getArgOperand(0));
|
|
|
|
SDValue Op2 = getValue(I.getArgOperand(1));
|
|
|
|
SDValue Op3 = getValue(I.getArgOperand(2));
|
|
|
|
unsigned Align = cast<ConstantInt>(I.getArgOperand(3))->getZExtValue();
|
|
|
|
bool isVol = cast<ConstantInt>(I.getArgOperand(4))->getZExtValue();
|
Reapply address space patch after fixing an issue in MemCopyOptimizer.
Added support for address spaces and added a isVolatile field to memcpy, memmove, and memset,
e.g., llvm.memcpy.i32(i8*, i8*, i32, i32) -> llvm.memcpy.p0i8.p0i8.i32(i8*, i8*, i32, i32, i1)
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@100304 91177308-0d34-0410-b5e6-96231b3b80d8
2010-04-04 03:10:48 +00:00
|
|
|
DAG.setRoot(DAG.getMemmove(getRoot(), dl, Op1, Op2, Op3, Align, isVol,
|
2010-09-21 05:40:29 +00:00
|
|
|
MachinePointerInfo(I.getArgOperand(0)),
|
|
|
|
MachinePointerInfo(I.getArgOperand(1))));
|
2008-09-03 16:12:24 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2009-02-13 02:16:35 +00:00
|
|
|
case Intrinsic::dbg_declare: {
|
2010-04-15 01:51:59 +00:00
|
|
|
const DbgDeclareInst &DI = cast<DbgDeclareInst>(I);
|
2009-10-09 22:42:28 +00:00
|
|
|
MDNode *Variable = DI.getVariable();
|
2010-04-15 01:51:59 +00:00
|
|
|
const Value *Address = DI.getAddress();
|
2012-03-15 21:33:41 +00:00
|
|
|
if (!Address || !DIVariable(Variable).Verify()) {
|
|
|
|
DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
|
2010-02-08 21:53:27 +00:00
|
|
|
return 0;
|
2012-03-15 21:33:41 +00:00
|
|
|
}
|
2009-11-12 19:02:56 +00:00
|
|
|
|
2010-04-26 20:06:49 +00:00
|
|
|
// Build an entry in DbgOrdering. Debug info input nodes get an SDNodeOrder
|
|
|
|
// but do not always have a corresponding SDNode built. The SDNodeOrder
|
|
|
|
// absolute, but not relative, values are different depending on whether
|
|
|
|
// debug info exists.
|
|
|
|
++SDNodeOrder;
|
2010-09-02 21:29:42 +00:00
|
|
|
|
|
|
|
// Check if address has undef value.
|
|
|
|
if (isa<UndefValue>(Address) ||
|
|
|
|
(Address->use_empty() && !isa<Argument>(Address))) {
|
2012-02-23 03:39:39 +00:00
|
|
|
DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
|
2010-09-02 21:29:42 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-04-26 20:06:49 +00:00
|
|
|
SDValue &N = NodeMap[Address];
|
2010-08-31 22:22:42 +00:00
|
|
|
if (!N.getNode() && isa<Argument>(Address))
|
|
|
|
// Check unused arguments map.
|
|
|
|
N = UnusedArgNodeMap[Address];
|
2010-04-26 20:06:49 +00:00
|
|
|
SDDbgValue *SDV;
|
|
|
|
if (N.getNode()) {
|
2010-09-02 21:02:27 +00:00
|
|
|
if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Address))
|
|
|
|
Address = BCI->getOperand(0);
|
2012-02-24 01:59:08 +00:00
|
|
|
// Parameters are handled specially.
|
|
|
|
bool isParameter =
|
|
|
|
(DIVariable(Variable).getTag() == dwarf::DW_TAG_arg_variable ||
|
|
|
|
isa<Argument>(Address));
|
|
|
|
|
2010-09-02 21:02:27 +00:00
|
|
|
const AllocaInst *AI = dyn_cast<AllocaInst>(Address);
|
|
|
|
|
2010-04-26 20:06:49 +00:00
|
|
|
if (isParameter && !AI) {
|
|
|
|
FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(N.getNode());
|
|
|
|
if (FINode)
|
|
|
|
// Byval parameter. We have a frame index at this point.
|
|
|
|
SDV = DAG.getDbgValue(Variable, FINode->getIndex(),
|
|
|
|
0, dl, SDNodeOrder);
|
2010-12-06 22:39:26 +00:00
|
|
|
else {
|
2011-05-16 21:24:05 +00:00
|
|
|
// Address is an argument, so try to emit its dbg value using
|
|
|
|
// virtual register info from the FuncInfo.ValueMap.
|
|
|
|
EmitFuncArgumentDbgValue(Address, Variable, 0, N);
|
2010-04-26 20:06:49 +00:00
|
|
|
return 0;
|
2010-12-06 22:39:26 +00:00
|
|
|
}
|
2010-04-26 20:06:49 +00:00
|
|
|
} else if (AI)
|
|
|
|
SDV = DAG.getDbgValue(Variable, N.getNode(), N.getResNo(),
|
|
|
|
0, dl, SDNodeOrder);
|
2010-12-06 22:39:26 +00:00
|
|
|
else {
|
2010-04-26 20:06:49 +00:00
|
|
|
// Can't do anything with other non-AI cases yet.
|
2012-02-23 03:39:39 +00:00
|
|
|
DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
|
2012-02-24 01:59:08 +00:00
|
|
|
DEBUG(dbgs() << "non-AllocaInst issue for Address: \n\t");
|
|
|
|
DEBUG(Address->dump());
|
2010-04-26 20:06:49 +00:00
|
|
|
return 0;
|
2010-12-06 22:39:26 +00:00
|
|
|
}
|
2010-04-26 20:06:49 +00:00
|
|
|
DAG.AddDbgValue(SDV, N.getNode(), isParameter);
|
|
|
|
} else {
|
2010-10-01 10:32:19 +00:00
|
|
|
// If Address is an argument then try to emit its dbg value using
|
2010-10-16 08:25:21 +00:00
|
|
|
// virtual register info from the FuncInfo.ValueMap.
|
2010-08-26 22:53:27 +00:00
|
|
|
if (!EmitFuncArgumentDbgValue(Address, Variable, 0, N)) {
|
2010-09-15 14:48:53 +00:00
|
|
|
// If variable is pinned by a alloca in dominating bb then
|
|
|
|
// use StaticAllocaMap.
|
|
|
|
if (const AllocaInst *AI = dyn_cast<AllocaInst>(Address)) {
|
2010-09-15 18:13:55 +00:00
|
|
|
if (AI->getParent() != DI.getParent()) {
|
|
|
|
DenseMap<const AllocaInst*, int>::iterator SI =
|
|
|
|
FuncInfo.StaticAllocaMap.find(AI);
|
|
|
|
if (SI != FuncInfo.StaticAllocaMap.end()) {
|
|
|
|
SDV = DAG.getDbgValue(Variable, SI->second,
|
|
|
|
0, dl, SDNodeOrder);
|
|
|
|
DAG.AddDbgValue(SDV, 0, false);
|
|
|
|
return 0;
|
|
|
|
}
|
2010-09-15 14:48:53 +00:00
|
|
|
}
|
|
|
|
}
|
2012-02-23 03:39:43 +00:00
|
|
|
DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
|
2010-08-26 22:53:27 +00:00
|
|
|
}
|
2010-04-26 20:06:49 +00:00
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
return 0;
|
2009-02-13 02:16:35 +00:00
|
|
|
}
|
2010-02-01 19:54:53 +00:00
|
|
|
case Intrinsic::dbg_value: {
|
2010-04-15 01:51:59 +00:00
|
|
|
const DbgValueInst &DI = cast<DbgValueInst>(I);
|
2010-05-07 22:04:20 +00:00
|
|
|
if (!DIVariable(DI.getVariable()).Verify())
|
2010-02-01 19:54:53 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
MDNode *Variable = DI.getVariable();
|
2010-03-15 19:15:44 +00:00
|
|
|
uint64_t Offset = DI.getOffset();
|
2010-04-15 01:51:59 +00:00
|
|
|
const Value *V = DI.getValue();
|
2010-02-01 19:54:53 +00:00
|
|
|
if (!V)
|
|
|
|
return 0;
|
2010-03-15 19:15:44 +00:00
|
|
|
|
|
|
|
// Build an entry in DbgOrdering. Debug info input nodes get an SDNodeOrder
|
|
|
|
// but do not always have a corresponding SDNode built. The SDNodeOrder
|
|
|
|
// absolute, but not relative, values are different depending on whether
|
|
|
|
// debug info exists.
|
|
|
|
++SDNodeOrder;
|
2010-04-26 20:06:49 +00:00
|
|
|
SDDbgValue *SDV;
|
2011-08-03 23:13:55 +00:00
|
|
|
if (isa<ConstantInt>(V) || isa<ConstantFP>(V) || isa<UndefValue>(V)) {
|
2010-04-26 20:06:49 +00:00
|
|
|
SDV = DAG.getDbgValue(Variable, V, Offset, dl, SDNodeOrder);
|
|
|
|
DAG.AddDbgValue(SDV, 0, false);
|
2010-03-15 19:15:44 +00:00
|
|
|
} else {
|
2010-07-16 00:02:08 +00:00
|
|
|
// Do not use getValue() in here; we don't want to generate code at
|
|
|
|
// this point if it hasn't been done yet.
|
2010-06-01 19:59:01 +00:00
|
|
|
SDValue N = NodeMap[V];
|
|
|
|
if (!N.getNode() && isa<Argument>(V))
|
|
|
|
// Check unused arguments map.
|
|
|
|
N = UnusedArgNodeMap[V];
|
2010-04-26 20:06:49 +00:00
|
|
|
if (N.getNode()) {
|
2010-08-25 20:39:26 +00:00
|
|
|
if (!EmitFuncArgumentDbgValue(V, Variable, Offset, N)) {
|
2010-04-29 01:40:30 +00:00
|
|
|
SDV = DAG.getDbgValue(Variable, N.getNode(),
|
|
|
|
N.getResNo(), Offset, dl, SDNodeOrder);
|
|
|
|
DAG.AddDbgValue(SDV, N.getNode(), false);
|
|
|
|
}
|
2011-02-18 22:43:42 +00:00
|
|
|
} else if (!V->use_empty() ) {
|
2010-07-16 00:02:08 +00:00
|
|
|
// Do not call getValue(V) yet, as we don't want to generate code.
|
|
|
|
// Remember it for later.
|
|
|
|
DanglingDebugInfo DDI(&DI, dl, SDNodeOrder);
|
|
|
|
DanglingDebugInfoMap[V] = DDI;
|
2010-08-27 22:25:51 +00:00
|
|
|
} else {
|
2010-03-15 19:15:44 +00:00
|
|
|
// We may expand this to cover more cases. One case where we have no
|
2010-12-06 22:39:26 +00:00
|
|
|
// data available is an unreferenced parameter.
|
2012-02-23 03:39:43 +00:00
|
|
|
DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
|
2010-04-26 20:06:49 +00:00
|
|
|
}
|
2010-03-15 19:15:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Build a debug info table entry.
|
2010-04-15 01:51:59 +00:00
|
|
|
if (const BitCastInst *BCI = dyn_cast<BitCastInst>(V))
|
2010-02-01 19:54:53 +00:00
|
|
|
V = BCI->getOperand(0);
|
2010-04-15 01:51:59 +00:00
|
|
|
const AllocaInst *AI = dyn_cast<AllocaInst>(V);
|
2010-02-01 19:54:53 +00:00
|
|
|
// Don't handle byval struct arguments or VLAs, for example.
|
2012-03-26 06:10:32 +00:00
|
|
|
if (!AI) {
|
2012-03-28 07:34:36 +00:00
|
|
|
DEBUG(dbgs() << "Dropping debug location info for:\n " << DI << "\n");
|
|
|
|
DEBUG(dbgs() << " Last seen at:\n " << *V << "\n");
|
2010-02-01 19:54:53 +00:00
|
|
|
return 0;
|
2012-03-26 06:10:32 +00:00
|
|
|
}
|
2010-02-01 19:54:53 +00:00
|
|
|
DenseMap<const AllocaInst*, int>::iterator SI =
|
|
|
|
FuncInfo.StaticAllocaMap.find(AI);
|
|
|
|
if (SI == FuncInfo.StaticAllocaMap.end())
|
|
|
|
return 0; // VLAs.
|
|
|
|
int FI = SI->second;
|
2010-10-16 08:25:21 +00:00
|
|
|
|
2010-04-05 06:19:28 +00:00
|
|
|
MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
|
|
|
|
if (!DI.getDebugLoc().isUnknown() && MMI.hasDebugInfo())
|
|
|
|
MMI.setVariableDbgInfo(Variable, FI, DI.getDebugLoc());
|
2010-02-01 19:54:53 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2009-10-14 16:11:37 +00:00
|
|
|
case Intrinsic::eh_typeid_for: {
|
2010-04-05 06:19:28 +00:00
|
|
|
// Find the type id for the given typeinfo.
|
2010-06-25 09:38:13 +00:00
|
|
|
GlobalVariable *GV = ExtractTypeInfo(I.getArgOperand(0));
|
2010-04-05 06:19:28 +00:00
|
|
|
unsigned TypeID = DAG.getMachineFunction().getMMI().getTypeIDFor(GV);
|
|
|
|
Res = DAG.getConstant(TypeID, MVT::i32);
|
2009-12-22 00:40:51 +00:00
|
|
|
setValue(&I, Res);
|
2008-09-03 16:12:24 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-09-08 21:13:56 +00:00
|
|
|
case Intrinsic::eh_return_i32:
|
|
|
|
case Intrinsic::eh_return_i64:
|
2010-04-05 06:19:28 +00:00
|
|
|
DAG.getMachineFunction().getMMI().setCallsEHReturn(true);
|
|
|
|
DAG.setRoot(DAG.getNode(ISD::EH_RETURN, dl,
|
|
|
|
MVT::Other,
|
|
|
|
getControlRoot(),
|
2010-06-25 09:38:13 +00:00
|
|
|
getValue(I.getArgOperand(0)),
|
|
|
|
getValue(I.getArgOperand(1))));
|
2008-09-03 16:12:24 +00:00
|
|
|
return 0;
|
2008-09-08 21:13:56 +00:00
|
|
|
case Intrinsic::eh_unwind_init:
|
2010-04-05 06:19:28 +00:00
|
|
|
DAG.getMachineFunction().getMMI().setCallsUnwindInit(true);
|
2008-09-08 21:13:56 +00:00
|
|
|
return 0;
|
|
|
|
case Intrinsic::eh_dwarf_cfa: {
|
2010-06-25 09:38:13 +00:00
|
|
|
SDValue CfaArg = DAG.getSExtOrTrunc(getValue(I.getArgOperand(0)), dl,
|
2009-10-13 21:04:12 +00:00
|
|
|
TLI.getPointerTy());
|
2009-01-30 01:34:22 +00:00
|
|
|
SDValue Offset = DAG.getNode(ISD::ADD, dl,
|
2008-09-08 21:13:56 +00:00
|
|
|
TLI.getPointerTy(),
|
2009-01-30 01:34:22 +00:00
|
|
|
DAG.getNode(ISD::FRAME_TO_ARGS_OFFSET, dl,
|
2008-09-08 21:13:56 +00:00
|
|
|
TLI.getPointerTy()),
|
|
|
|
CfaArg);
|
2009-12-22 00:40:51 +00:00
|
|
|
SDValue FA = DAG.getNode(ISD::FRAMEADDR, dl,
|
2008-09-08 21:13:56 +00:00
|
|
|
TLI.getPointerTy(),
|
2009-12-22 00:40:51 +00:00
|
|
|
DAG.getConstant(0, TLI.getPointerTy()));
|
2010-01-28 21:51:40 +00:00
|
|
|
setValue(&I, DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(),
|
|
|
|
FA, Offset));
|
2008-09-08 21:13:56 +00:00
|
|
|
return 0;
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
2010-01-28 01:45:32 +00:00
|
|
|
case Intrinsic::eh_sjlj_callsite: {
|
2010-04-05 06:19:28 +00:00
|
|
|
MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
|
2010-06-25 09:38:13 +00:00
|
|
|
ConstantInt *CI = dyn_cast<ConstantInt>(I.getArgOperand(0));
|
2010-01-28 01:45:32 +00:00
|
|
|
assert(CI && "Non-constant call site value in eh.sjlj.callsite!");
|
2010-04-05 06:19:28 +00:00
|
|
|
assert(MMI.getCurrentCallSite() == 0 && "Overlapping call sites!");
|
2010-01-28 01:45:32 +00:00
|
|
|
|
2010-04-05 06:19:28 +00:00
|
|
|
MMI.setCurrentCallSite(CI->getZExtValue());
|
2010-01-28 01:45:32 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2011-09-28 03:36:43 +00:00
|
|
|
case Intrinsic::eh_sjlj_functioncontext: {
|
|
|
|
// Get and store the index of the function context.
|
|
|
|
MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
|
2011-09-28 03:52:41 +00:00
|
|
|
AllocaInst *FnCtx =
|
|
|
|
cast<AllocaInst>(I.getArgOperand(0)->stripPointerCasts());
|
2011-09-28 03:36:43 +00:00
|
|
|
int FI = FuncInfo.StaticAllocaMap[FnCtx];
|
|
|
|
MFI->setFunctionContextIndex(FI);
|
|
|
|
return 0;
|
|
|
|
}
|
2010-05-26 20:22:18 +00:00
|
|
|
case Intrinsic::eh_sjlj_setjmp: {
|
2011-10-07 21:25:38 +00:00
|
|
|
SDValue Ops[2];
|
|
|
|
Ops[0] = getRoot();
|
|
|
|
Ops[1] = getValue(I.getArgOperand(0));
|
|
|
|
SDValue Op = DAG.getNode(ISD::EH_SJLJ_SETJMP, dl,
|
|
|
|
DAG.getVTList(MVT::i32, MVT::Other),
|
|
|
|
Ops, 2);
|
|
|
|
setValue(&I, Op.getValue(0));
|
|
|
|
DAG.setRoot(Op.getValue(1));
|
2010-05-26 20:22:18 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2010-05-22 01:06:18 +00:00
|
|
|
case Intrinsic::eh_sjlj_longjmp: {
|
2010-05-26 20:22:18 +00:00
|
|
|
DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_LONGJMP, dl, MVT::Other,
|
2010-10-19 23:27:08 +00:00
|
|
|
getRoot(), getValue(I.getArgOperand(0))));
|
|
|
|
return 0;
|
|
|
|
}
|
2010-01-28 01:45:32 +00:00
|
|
|
|
2010-09-30 23:57:10 +00:00
|
|
|
case Intrinsic::x86_mmx_pslli_w:
|
|
|
|
case Intrinsic::x86_mmx_pslli_d:
|
|
|
|
case Intrinsic::x86_mmx_pslli_q:
|
|
|
|
case Intrinsic::x86_mmx_psrli_w:
|
|
|
|
case Intrinsic::x86_mmx_psrli_d:
|
|
|
|
case Intrinsic::x86_mmx_psrli_q:
|
|
|
|
case Intrinsic::x86_mmx_psrai_w:
|
|
|
|
case Intrinsic::x86_mmx_psrai_d: {
|
|
|
|
SDValue ShAmt = getValue(I.getArgOperand(1));
|
|
|
|
if (isa<ConstantSDNode>(ShAmt)) {
|
|
|
|
visitTargetIntrinsic(I, Intrinsic);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
unsigned NewIntrinsic = 0;
|
|
|
|
EVT ShAmtVT = MVT::v2i32;
|
|
|
|
switch (Intrinsic) {
|
|
|
|
case Intrinsic::x86_mmx_pslli_w:
|
|
|
|
NewIntrinsic = Intrinsic::x86_mmx_psll_w;
|
|
|
|
break;
|
|
|
|
case Intrinsic::x86_mmx_pslli_d:
|
|
|
|
NewIntrinsic = Intrinsic::x86_mmx_psll_d;
|
|
|
|
break;
|
|
|
|
case Intrinsic::x86_mmx_pslli_q:
|
|
|
|
NewIntrinsic = Intrinsic::x86_mmx_psll_q;
|
|
|
|
break;
|
|
|
|
case Intrinsic::x86_mmx_psrli_w:
|
|
|
|
NewIntrinsic = Intrinsic::x86_mmx_psrl_w;
|
|
|
|
break;
|
|
|
|
case Intrinsic::x86_mmx_psrli_d:
|
|
|
|
NewIntrinsic = Intrinsic::x86_mmx_psrl_d;
|
|
|
|
break;
|
|
|
|
case Intrinsic::x86_mmx_psrli_q:
|
|
|
|
NewIntrinsic = Intrinsic::x86_mmx_psrl_q;
|
|
|
|
break;
|
|
|
|
case Intrinsic::x86_mmx_psrai_w:
|
|
|
|
NewIntrinsic = Intrinsic::x86_mmx_psra_w;
|
|
|
|
break;
|
|
|
|
case Intrinsic::x86_mmx_psrai_d:
|
|
|
|
NewIntrinsic = Intrinsic::x86_mmx_psra_d;
|
|
|
|
break;
|
|
|
|
default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
|
|
|
|
}
|
|
|
|
|
|
|
|
// The vector shift intrinsics with scalars uses 32b shift amounts but
|
|
|
|
// the sse2/mmx shift instructions reads 64 bits. Set the upper 32 bits
|
|
|
|
// to be zero.
|
|
|
|
// We must do this early because v2i32 is not a legal type.
|
|
|
|
SDValue ShOps[2];
|
|
|
|
ShOps[0] = ShAmt;
|
|
|
|
ShOps[1] = DAG.getConstant(0, MVT::i32);
|
|
|
|
ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, ShAmtVT, &ShOps[0], 2);
|
|
|
|
EVT DestVT = TLI.getValueType(I.getType());
|
2010-11-23 03:31:01 +00:00
|
|
|
ShAmt = DAG.getNode(ISD::BITCAST, dl, DestVT, ShAmt);
|
2010-09-30 23:57:10 +00:00
|
|
|
Res = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
|
|
|
|
DAG.getConstant(NewIntrinsic, MVT::i32),
|
|
|
|
getValue(I.getArgOperand(0)), ShAmt);
|
|
|
|
setValue(&I, Res);
|
|
|
|
return 0;
|
|
|
|
}
|
2012-02-24 03:51:49 +00:00
|
|
|
case Intrinsic::x86_avx_vinsertf128_pd_256:
|
|
|
|
case Intrinsic::x86_avx_vinsertf128_ps_256:
|
2012-04-07 22:32:29 +00:00
|
|
|
case Intrinsic::x86_avx_vinsertf128_si_256:
|
|
|
|
case Intrinsic::x86_avx2_vinserti128: {
|
2012-02-24 03:51:49 +00:00
|
|
|
EVT DestVT = TLI.getValueType(I.getType());
|
|
|
|
EVT ElVT = TLI.getValueType(I.getArgOperand(1)->getType());
|
|
|
|
uint64_t Idx = (cast<ConstantInt>(I.getArgOperand(2))->getZExtValue() & 1) *
|
|
|
|
ElVT.getVectorNumElements();
|
|
|
|
Res = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, DestVT,
|
|
|
|
getValue(I.getArgOperand(0)),
|
|
|
|
getValue(I.getArgOperand(1)),
|
2012-09-05 05:48:09 +00:00
|
|
|
DAG.getIntPtrConstant(Idx));
|
|
|
|
setValue(&I, Res);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
case Intrinsic::x86_avx_vextractf128_pd_256:
|
|
|
|
case Intrinsic::x86_avx_vextractf128_ps_256:
|
|
|
|
case Intrinsic::x86_avx_vextractf128_si_256:
|
|
|
|
case Intrinsic::x86_avx2_vextracti128: {
|
|
|
|
EVT DestVT = TLI.getValueType(I.getType());
|
|
|
|
uint64_t Idx = (cast<ConstantInt>(I.getArgOperand(1))->getZExtValue() & 1) *
|
|
|
|
DestVT.getVectorNumElements();
|
|
|
|
Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT,
|
|
|
|
getValue(I.getArgOperand(0)),
|
|
|
|
DAG.getIntPtrConstant(Idx));
|
2012-02-24 03:51:49 +00:00
|
|
|
setValue(&I, Res);
|
|
|
|
return 0;
|
|
|
|
}
|
2008-11-10 20:54:11 +00:00
|
|
|
case Intrinsic::convertff:
|
|
|
|
case Intrinsic::convertfsi:
|
|
|
|
case Intrinsic::convertfui:
|
|
|
|
case Intrinsic::convertsif:
|
|
|
|
case Intrinsic::convertuif:
|
|
|
|
case Intrinsic::convertss:
|
|
|
|
case Intrinsic::convertsu:
|
|
|
|
case Intrinsic::convertus:
|
|
|
|
case Intrinsic::convertuu: {
|
|
|
|
ISD::CvtCode Code = ISD::CVT_INVALID;
|
|
|
|
switch (Intrinsic) {
|
2012-04-11 04:34:11 +00:00
|
|
|
default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
|
2008-11-10 20:54:11 +00:00
|
|
|
case Intrinsic::convertff: Code = ISD::CVT_FF; break;
|
|
|
|
case Intrinsic::convertfsi: Code = ISD::CVT_FS; break;
|
|
|
|
case Intrinsic::convertfui: Code = ISD::CVT_FU; break;
|
|
|
|
case Intrinsic::convertsif: Code = ISD::CVT_SF; break;
|
|
|
|
case Intrinsic::convertuif: Code = ISD::CVT_UF; break;
|
|
|
|
case Intrinsic::convertss: Code = ISD::CVT_SS; break;
|
|
|
|
case Intrinsic::convertsu: Code = ISD::CVT_SU; break;
|
|
|
|
case Intrinsic::convertus: Code = ISD::CVT_US; break;
|
|
|
|
case Intrinsic::convertuu: Code = ISD::CVT_UU; break;
|
|
|
|
}
|
2009-08-10 22:56:29 +00:00
|
|
|
EVT DestVT = TLI.getValueType(I.getType());
|
2010-06-25 09:38:13 +00:00
|
|
|
const Value *Op1 = I.getArgOperand(0);
|
2012-11-24 23:05:23 +00:00
|
|
|
Res = DAG.getConvertRndSat(DestVT, dl, getValue(Op1),
|
2009-12-22 00:40:51 +00:00
|
|
|
DAG.getValueType(DestVT),
|
|
|
|
DAG.getValueType(getValue(Op1).getValueType()),
|
2010-06-25 09:38:13 +00:00
|
|
|
getValue(I.getArgOperand(1)),
|
|
|
|
getValue(I.getArgOperand(2)),
|
2009-12-22 00:40:51 +00:00
|
|
|
Code);
|
|
|
|
setValue(&I, Res);
|
2008-11-10 20:54:11 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
case Intrinsic::powi:
|
2010-06-25 09:38:13 +00:00
|
|
|
setValue(&I, ExpandPowI(dl, getValue(I.getArgOperand(0)),
|
|
|
|
getValue(I.getArgOperand(1)), DAG));
|
2008-09-03 16:12:24 +00:00
|
|
|
return 0;
|
2008-09-04 00:47:13 +00:00
|
|
|
case Intrinsic::log:
|
2012-11-23 18:38:31 +00:00
|
|
|
setValue(&I, expandLog(dl, getValue(I.getArgOperand(0)), DAG, TLI));
|
2008-09-04 00:47:13 +00:00
|
|
|
return 0;
|
|
|
|
case Intrinsic::log2:
|
2012-11-23 18:38:31 +00:00
|
|
|
setValue(&I, expandLog2(dl, getValue(I.getArgOperand(0)), DAG, TLI));
|
2008-09-04 00:47:13 +00:00
|
|
|
return 0;
|
|
|
|
case Intrinsic::log10:
|
2012-11-23 18:38:31 +00:00
|
|
|
setValue(&I, expandLog10(dl, getValue(I.getArgOperand(0)), DAG, TLI));
|
2008-09-04 00:47:13 +00:00
|
|
|
return 0;
|
|
|
|
case Intrinsic::exp:
|
2012-11-24 18:52:06 +00:00
|
|
|
setValue(&I, expandExp(dl, getValue(I.getArgOperand(0)), DAG, TLI));
|
2008-09-04 00:47:13 +00:00
|
|
|
return 0;
|
|
|
|
case Intrinsic::exp2:
|
2012-11-24 18:52:06 +00:00
|
|
|
setValue(&I, expandExp2(dl, getValue(I.getArgOperand(0)), DAG, TLI));
|
2008-09-03 16:12:24 +00:00
|
|
|
return 0;
|
|
|
|
case Intrinsic::pow:
|
2012-11-25 08:08:58 +00:00
|
|
|
setValue(&I, expandPow(dl, getValue(I.getArgOperand(0)),
|
|
|
|
getValue(I.getArgOperand(1)), DAG, TLI));
|
2008-09-03 16:12:24 +00:00
|
|
|
return 0;
|
2012-11-16 07:48:23 +00:00
|
|
|
case Intrinsic::sqrt:
|
2012-05-28 21:48:37 +00:00
|
|
|
case Intrinsic::fabs:
|
2012-11-16 07:48:23 +00:00
|
|
|
case Intrinsic::sin:
|
|
|
|
case Intrinsic::cos:
|
2012-07-26 17:43:27 +00:00
|
|
|
case Intrinsic::floor:
|
2012-11-15 06:51:10 +00:00
|
|
|
case Intrinsic::ceil:
|
|
|
|
case Intrinsic::trunc:
|
|
|
|
case Intrinsic::rint:
|
2012-11-16 07:48:23 +00:00
|
|
|
case Intrinsic::nearbyint: {
|
|
|
|
unsigned Opcode;
|
|
|
|
switch (Intrinsic) {
|
|
|
|
default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
|
|
|
|
case Intrinsic::sqrt: Opcode = ISD::FSQRT; break;
|
|
|
|
case Intrinsic::fabs: Opcode = ISD::FABS; break;
|
|
|
|
case Intrinsic::sin: Opcode = ISD::FSIN; break;
|
|
|
|
case Intrinsic::cos: Opcode = ISD::FCOS; break;
|
|
|
|
case Intrinsic::floor: Opcode = ISD::FFLOOR; break;
|
|
|
|
case Intrinsic::ceil: Opcode = ISD::FCEIL; break;
|
|
|
|
case Intrinsic::trunc: Opcode = ISD::FTRUNC; break;
|
|
|
|
case Intrinsic::rint: Opcode = ISD::FRINT; break;
|
|
|
|
case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT; break;
|
|
|
|
}
|
|
|
|
|
|
|
|
setValue(&I, DAG.getNode(Opcode, dl,
|
2012-11-15 06:51:10 +00:00
|
|
|
getValue(I.getArgOperand(0)).getValueType(),
|
|
|
|
getValue(I.getArgOperand(0))));
|
|
|
|
return 0;
|
2012-11-16 07:48:23 +00:00
|
|
|
}
|
2011-07-08 21:39:21 +00:00
|
|
|
case Intrinsic::fma:
|
|
|
|
setValue(&I, DAG.getNode(ISD::FMA, dl,
|
|
|
|
getValue(I.getArgOperand(0)).getValueType(),
|
|
|
|
getValue(I.getArgOperand(0)),
|
|
|
|
getValue(I.getArgOperand(1)),
|
|
|
|
getValue(I.getArgOperand(2))));
|
|
|
|
return 0;
|
2012-06-05 19:07:46 +00:00
|
|
|
case Intrinsic::fmuladd: {
|
|
|
|
EVT VT = TLI.getValueType(I.getType());
|
2012-06-22 01:09:09 +00:00
|
|
|
if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
|
2012-11-22 03:31:45 +00:00
|
|
|
TLI.isOperationLegalOrCustom(ISD::FMA, VT) &&
|
2012-06-22 01:09:09 +00:00
|
|
|
TLI.isFMAFasterThanMulAndAdd(VT)){
|
2012-06-05 19:07:46 +00:00
|
|
|
setValue(&I, DAG.getNode(ISD::FMA, dl,
|
|
|
|
getValue(I.getArgOperand(0)).getValueType(),
|
|
|
|
getValue(I.getArgOperand(0)),
|
|
|
|
getValue(I.getArgOperand(1)),
|
|
|
|
getValue(I.getArgOperand(2))));
|
|
|
|
} else {
|
|
|
|
SDValue Mul = DAG.getNode(ISD::FMUL, dl,
|
|
|
|
getValue(I.getArgOperand(0)).getValueType(),
|
|
|
|
getValue(I.getArgOperand(0)),
|
|
|
|
getValue(I.getArgOperand(1)));
|
|
|
|
SDValue Add = DAG.getNode(ISD::FADD, dl,
|
|
|
|
getValue(I.getArgOperand(0)).getValueType(),
|
|
|
|
Mul,
|
|
|
|
getValue(I.getArgOperand(2)));
|
|
|
|
setValue(&I, Add);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
2010-03-14 18:42:15 +00:00
|
|
|
case Intrinsic::convert_to_fp16:
|
|
|
|
setValue(&I, DAG.getNode(ISD::FP32_TO_FP16, dl,
|
2010-06-25 09:38:13 +00:00
|
|
|
MVT::i16, getValue(I.getArgOperand(0))));
|
2010-03-14 18:42:15 +00:00
|
|
|
return 0;
|
|
|
|
case Intrinsic::convert_from_fp16:
|
|
|
|
setValue(&I, DAG.getNode(ISD::FP16_TO_FP32, dl,
|
2010-06-25 09:38:13 +00:00
|
|
|
MVT::f32, getValue(I.getArgOperand(0))));
|
2010-03-14 18:42:15 +00:00
|
|
|
return 0;
|
2008-09-03 16:12:24 +00:00
|
|
|
case Intrinsic::pcmarker: {
|
2010-06-25 09:38:13 +00:00
|
|
|
SDValue Tmp = getValue(I.getArgOperand(0));
|
2010-01-28 21:51:40 +00:00
|
|
|
DAG.setRoot(DAG.getNode(ISD::PCMARKER, dl, MVT::Other, getRoot(), Tmp));
|
2008-09-03 16:12:24 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
case Intrinsic::readcyclecounter: {
|
|
|
|
SDValue Op = getRoot();
|
2009-12-22 00:40:51 +00:00
|
|
|
Res = DAG.getNode(ISD::READCYCLECOUNTER, dl,
|
|
|
|
DAG.getVTList(MVT::i64, MVT::Other),
|
|
|
|
&Op, 1);
|
|
|
|
setValue(&I, Res);
|
|
|
|
DAG.setRoot(Res.getValue(1));
|
2008-09-03 16:12:24 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
case Intrinsic::bswap:
|
2010-01-28 21:51:40 +00:00
|
|
|
setValue(&I, DAG.getNode(ISD::BSWAP, dl,
|
2010-06-25 09:38:13 +00:00
|
|
|
getValue(I.getArgOperand(0)).getValueType(),
|
|
|
|
getValue(I.getArgOperand(0))));
|
2008-09-03 16:12:24 +00:00
|
|
|
return 0;
|
|
|
|
case Intrinsic::cttz: {
|
2010-06-25 09:38:13 +00:00
|
|
|
SDValue Arg = getValue(I.getArgOperand(0));
|
2011-12-13 01:56:10 +00:00
|
|
|
ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
|
2009-08-10 22:56:29 +00:00
|
|
|
EVT Ty = Arg.getValueType();
|
2011-12-13 01:56:10 +00:00
|
|
|
setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTTZ : ISD::CTTZ_ZERO_UNDEF,
|
|
|
|
dl, Ty, Arg));
|
2008-09-03 16:12:24 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
case Intrinsic::ctlz: {
|
2010-06-25 09:38:13 +00:00
|
|
|
SDValue Arg = getValue(I.getArgOperand(0));
|
2011-12-13 01:56:10 +00:00
|
|
|
ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
|
2009-08-10 22:56:29 +00:00
|
|
|
EVT Ty = Arg.getValueType();
|
2011-12-13 01:56:10 +00:00
|
|
|
setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTLZ : ISD::CTLZ_ZERO_UNDEF,
|
|
|
|
dl, Ty, Arg));
|
2008-09-03 16:12:24 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
case Intrinsic::ctpop: {
|
2010-06-25 09:38:13 +00:00
|
|
|
SDValue Arg = getValue(I.getArgOperand(0));
|
2009-08-10 22:56:29 +00:00
|
|
|
EVT Ty = Arg.getValueType();
|
2010-01-28 21:51:40 +00:00
|
|
|
setValue(&I, DAG.getNode(ISD::CTPOP, dl, Ty, Arg));
|
2008-09-03 16:12:24 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
case Intrinsic::stacksave: {
|
|
|
|
SDValue Op = getRoot();
|
2009-12-22 00:40:51 +00:00
|
|
|
Res = DAG.getNode(ISD::STACKSAVE, dl,
|
|
|
|
DAG.getVTList(TLI.getPointerTy(), MVT::Other), &Op, 1);
|
|
|
|
setValue(&I, Res);
|
|
|
|
DAG.setRoot(Res.getValue(1));
|
2008-09-03 16:12:24 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
case Intrinsic::stackrestore: {
|
2010-06-25 09:38:13 +00:00
|
|
|
Res = getValue(I.getArgOperand(0));
|
2010-01-28 21:51:40 +00:00
|
|
|
DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, dl, MVT::Other, getRoot(), Res));
|
2008-09-03 16:12:24 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2008-11-18 11:01:33 +00:00
|
|
|
case Intrinsic::stackprotector: {
|
2008-11-06 02:29:10 +00:00
|
|
|
// Emit code into the DAG to store the stack guard onto the stack.
|
|
|
|
MachineFunction &MF = DAG.getMachineFunction();
|
|
|
|
MachineFrameInfo *MFI = MF.getFrameInfo();
|
2009-08-10 22:56:29 +00:00
|
|
|
EVT PtrTy = TLI.getPointerTy();
|
2008-11-06 02:29:10 +00:00
|
|
|
|
2010-06-25 09:38:13 +00:00
|
|
|
SDValue Src = getValue(I.getArgOperand(0)); // The guard's value.
|
|
|
|
AllocaInst *Slot = cast<AllocaInst>(I.getArgOperand(1));
|
2008-11-07 01:23:58 +00:00
|
|
|
|
|
|
|
int FI = FuncInfo.StaticAllocaMap[Slot];
|
2008-11-06 02:29:10 +00:00
|
|
|
MFI->setStackProtectorIndex(FI);
|
|
|
|
|
|
|
|
SDValue FIN = DAG.getFrameIndex(FI, PtrTy);
|
|
|
|
|
|
|
|
// Store the stack protector onto the stack.
|
2012-11-24 23:05:23 +00:00
|
|
|
Res = DAG.getStore(getRoot(), dl, Src, FIN,
|
2010-09-21 18:58:22 +00:00
|
|
|
MachinePointerInfo::getFixedStack(FI),
|
|
|
|
true, false, 0);
|
2009-12-22 00:40:51 +00:00
|
|
|
setValue(&I, Res);
|
|
|
|
DAG.setRoot(Res);
|
2008-11-06 02:29:10 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2009-10-27 00:52:25 +00:00
|
|
|
case Intrinsic::objectsize: {
|
|
|
|
// If we don't know by now, we're never going to know.
|
2010-06-25 09:38:13 +00:00
|
|
|
ConstantInt *CI = dyn_cast<ConstantInt>(I.getArgOperand(1));
|
2009-10-27 00:52:25 +00:00
|
|
|
|
|
|
|
assert(CI && "Non-constant type in __builtin_object_size?");
|
|
|
|
|
2010-06-25 09:38:13 +00:00
|
|
|
SDValue Arg = getValue(I.getCalledValue());
|
2009-10-28 21:32:16 +00:00
|
|
|
EVT Ty = Arg.getValueType();
|
|
|
|
|
2010-06-18 14:22:04 +00:00
|
|
|
if (CI->isZero())
|
2009-12-22 00:40:51 +00:00
|
|
|
Res = DAG.getConstant(-1ULL, Ty);
|
2009-10-27 00:52:25 +00:00
|
|
|
else
|
2009-12-22 00:40:51 +00:00
|
|
|
Res = DAG.getConstant(0, Ty);
|
|
|
|
|
|
|
|
setValue(&I, Res);
|
2009-10-27 00:52:25 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
case Intrinsic::var_annotation:
|
|
|
|
// Discard annotate attributes
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
case Intrinsic::init_trampoline: {
|
2010-06-25 09:38:13 +00:00
|
|
|
const Function *F = cast<Function>(I.getArgOperand(1)->stripPointerCasts());
|
2008-09-03 16:12:24 +00:00
|
|
|
|
|
|
|
SDValue Ops[6];
|
|
|
|
Ops[0] = getRoot();
|
2010-06-25 09:38:13 +00:00
|
|
|
Ops[1] = getValue(I.getArgOperand(0));
|
|
|
|
Ops[2] = getValue(I.getArgOperand(1));
|
|
|
|
Ops[3] = getValue(I.getArgOperand(2));
|
|
|
|
Ops[4] = DAG.getSrcValue(I.getArgOperand(0));
|
2008-09-03 16:12:24 +00:00
|
|
|
Ops[5] = DAG.getSrcValue(F);
|
|
|
|
|
2011-09-06 13:37:06 +00:00
|
|
|
Res = DAG.getNode(ISD::INIT_TRAMPOLINE, dl, MVT::Other, Ops, 6);
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2011-09-06 13:37:06 +00:00
|
|
|
DAG.setRoot(Res);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
case Intrinsic::adjust_trampoline: {
|
|
|
|
setValue(&I, DAG.getNode(ISD::ADJUST_TRAMPOLINE, dl,
|
|
|
|
TLI.getPointerTy(),
|
|
|
|
getValue(I.getArgOperand(0))));
|
2008-09-03 16:12:24 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
case Intrinsic::gcroot:
|
|
|
|
if (GFI) {
|
2012-05-01 22:50:45 +00:00
|
|
|
const Value *Alloca = I.getArgOperand(0)->stripPointerCasts();
|
2010-06-25 09:38:13 +00:00
|
|
|
const Constant *TypeMap = cast<Constant>(I.getArgOperand(1));
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2008-09-03 16:12:24 +00:00
|
|
|
FrameIndexSDNode *FI = cast<FrameIndexSDNode>(getValue(Alloca).getNode());
|
|
|
|
GFI->addStackRoot(FI->getIndex(), TypeMap);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
case Intrinsic::gcread:
|
|
|
|
case Intrinsic::gcwrite:
|
2009-07-14 16:55:14 +00:00
|
|
|
llvm_unreachable("GC failed to lower gcread/gcwrite intrinsics!");
|
2009-12-22 00:40:51 +00:00
|
|
|
case Intrinsic::flt_rounds:
|
2010-01-28 21:51:40 +00:00
|
|
|
setValue(&I, DAG.getNode(ISD::FLT_ROUNDS_, dl, MVT::i32));
|
2008-09-03 16:12:24 +00:00
|
|
|
return 0;
|
2011-07-06 18:22:43 +00:00
|
|
|
|
|
|
|
case Intrinsic::expect: {
|
|
|
|
// Just replace __builtin_expect(exp, c) with EXP.
|
|
|
|
setValue(&I, getValue(I.getArgOperand(0)));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-10-19 20:11:16 +00:00
|
|
|
case Intrinsic::debugtrap:
|
2011-04-08 21:37:21 +00:00
|
|
|
case Intrinsic::trap: {
|
2011-12-02 22:16:29 +00:00
|
|
|
StringRef TrapFuncName = TM.Options.getTrapFunctionName();
|
2011-04-08 21:37:21 +00:00
|
|
|
if (TrapFuncName.empty()) {
|
2012-10-19 20:11:16 +00:00
|
|
|
ISD::NodeType Op = (Intrinsic == Intrinsic::trap) ?
|
|
|
|
ISD::TRAP : ISD::DEBUGTRAP;
|
|
|
|
DAG.setRoot(DAG.getNode(Op, dl,MVT::Other, getRoot()));
|
2011-04-08 21:37:21 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
TargetLowering::ArgListTy Args;
|
2012-05-25 16:35:28 +00:00
|
|
|
TargetLowering::
|
|
|
|
CallLoweringInfo CLI(getRoot(), I.getType(),
|
2011-04-08 21:37:21 +00:00
|
|
|
false, false, false, false, 0, CallingConv::C,
|
2012-02-28 18:51:51 +00:00
|
|
|
/*isTailCall=*/false,
|
|
|
|
/*doesNotRet=*/false, /*isReturnValueUsed=*/true,
|
2011-04-08 21:37:21 +00:00
|
|
|
DAG.getExternalSymbol(TrapFuncName.data(), TLI.getPointerTy()),
|
2012-11-24 23:05:23 +00:00
|
|
|
Args, DAG, dl);
|
2012-05-25 16:35:28 +00:00
|
|
|
std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
|
2011-04-08 21:37:21 +00:00
|
|
|
DAG.setRoot(Result.second);
|
2008-09-03 16:12:24 +00:00
|
|
|
return 0;
|
2011-04-08 21:37:21 +00:00
|
|
|
}
|
2012-10-19 20:11:16 +00:00
|
|
|
|
2008-11-21 02:38:44 +00:00
|
|
|
case Intrinsic::uadd_with_overflow:
|
2008-12-09 22:08:41 +00:00
|
|
|
case Intrinsic::sadd_with_overflow:
|
|
|
|
case Intrinsic::usub_with_overflow:
|
|
|
|
case Intrinsic::ssub_with_overflow:
|
|
|
|
case Intrinsic::umul_with_overflow:
|
2012-04-11 04:34:11 +00:00
|
|
|
case Intrinsic::smul_with_overflow: {
|
|
|
|
ISD::NodeType Op;
|
|
|
|
switch (Intrinsic) {
|
|
|
|
default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
|
|
|
|
case Intrinsic::uadd_with_overflow: Op = ISD::UADDO; break;
|
|
|
|
case Intrinsic::sadd_with_overflow: Op = ISD::SADDO; break;
|
|
|
|
case Intrinsic::usub_with_overflow: Op = ISD::USUBO; break;
|
|
|
|
case Intrinsic::ssub_with_overflow: Op = ISD::SSUBO; break;
|
|
|
|
case Intrinsic::umul_with_overflow: Op = ISD::UMULO; break;
|
|
|
|
case Intrinsic::smul_with_overflow: Op = ISD::SMULO; break;
|
|
|
|
}
|
|
|
|
SDValue Op1 = getValue(I.getArgOperand(0));
|
|
|
|
SDValue Op2 = getValue(I.getArgOperand(1));
|
2008-11-21 02:03:52 +00:00
|
|
|
|
2012-04-11 04:34:11 +00:00
|
|
|
SDVTList VTs = DAG.getVTList(Op1.getValueType(), MVT::i1);
|
2012-11-24 23:05:23 +00:00
|
|
|
setValue(&I, DAG.getNode(Op, dl, VTs, Op1, Op2));
|
2012-04-11 04:34:11 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
case Intrinsic::prefetch: {
|
2011-06-14 04:58:37 +00:00
|
|
|
SDValue Ops[5];
|
2010-10-26 23:11:10 +00:00
|
|
|
unsigned rw = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
|
2008-09-03 16:12:24 +00:00
|
|
|
Ops[0] = getRoot();
|
2010-06-25 09:38:13 +00:00
|
|
|
Ops[1] = getValue(I.getArgOperand(0));
|
|
|
|
Ops[2] = getValue(I.getArgOperand(1));
|
|
|
|
Ops[3] = getValue(I.getArgOperand(2));
|
2011-06-14 04:58:37 +00:00
|
|
|
Ops[4] = getValue(I.getArgOperand(3));
|
2010-10-26 23:11:10 +00:00
|
|
|
DAG.setRoot(DAG.getMemIntrinsicNode(ISD::PREFETCH, dl,
|
|
|
|
DAG.getVTList(MVT::Other),
|
2011-06-14 04:58:37 +00:00
|
|
|
&Ops[0], 5,
|
2010-10-26 23:11:10 +00:00
|
|
|
EVT::getIntegerVT(*Context, 8),
|
|
|
|
MachinePointerInfo(I.getArgOperand(0)),
|
|
|
|
0, /* align */
|
|
|
|
false, /* volatile */
|
|
|
|
rw==0, /* read */
|
|
|
|
rw==1)); /* write */
|
2008-09-03 16:12:24 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2012-09-06 09:17:37 +00:00
|
|
|
case Intrinsic::lifetime_start:
|
|
|
|
case Intrinsic::lifetime_end: {
|
2012-09-10 08:43:23 +00:00
|
|
|
bool IsStart = (Intrinsic == Intrinsic::lifetime_start);
|
2012-09-07 05:13:00 +00:00
|
|
|
// Stack coloring is not enabled in O0, discard region information.
|
2012-09-10 08:43:23 +00:00
|
|
|
if (TM.getOptLevel() == CodeGenOpt::None)
|
2012-09-06 09:17:37 +00:00
|
|
|
return 0;
|
|
|
|
|
2012-09-10 08:43:23 +00:00
|
|
|
SmallVector<Value *, 4> Allocas;
|
|
|
|
GetUnderlyingObjects(I.getArgOperand(1), Allocas, TD);
|
2010-01-01 04:41:22 +00:00
|
|
|
|
2012-09-10 08:43:23 +00:00
|
|
|
for (SmallVector<Value*, 4>::iterator Object = Allocas.begin(),
|
|
|
|
E = Allocas.end(); Object != E; ++Object) {
|
|
|
|
AllocaInst *LifetimeObject = dyn_cast_or_null<AllocaInst>(*Object);
|
|
|
|
|
|
|
|
// Could not find an Alloca.
|
|
|
|
if (!LifetimeObject)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
int FI = FuncInfo.StaticAllocaMap[LifetimeObject];
|
|
|
|
|
|
|
|
SDValue Ops[2];
|
|
|
|
Ops[0] = getRoot();
|
|
|
|
Ops[1] = DAG.getFrameIndex(FI, TLI.getPointerTy(), true);
|
|
|
|
unsigned Opcode = (IsStart ? ISD::LIFETIME_START : ISD::LIFETIME_END);
|
|
|
|
|
|
|
|
Res = DAG.getNode(Opcode, dl, MVT::Other, Ops, 2);
|
|
|
|
DAG.setRoot(Res);
|
|
|
|
}
|
2012-09-06 09:17:37 +00:00
|
|
|
}
|
2010-05-29 17:53:24 +00:00
|
|
|
case Intrinsic::invariant_start:
|
|
|
|
// Discard region information.
|
|
|
|
setValue(&I, DAG.getUNDEF(TLI.getPointerTy()));
|
|
|
|
return 0;
|
|
|
|
case Intrinsic::invariant_end:
|
|
|
|
// Discard region information.
|
|
|
|
return 0;
|
2012-06-28 22:30:12 +00:00
|
|
|
case Intrinsic::donothing:
|
|
|
|
// ignore
|
|
|
|
return 0;
|
2009-12-24 00:37:38 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
|
|
|
|
bool isTailCall,
|
|
|
|
MachineBasicBlock *LandingPad) {
|
2011-07-18 04:54:35 +00:00
|
|
|
PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
|
|
|
|
FunctionType *FTy = cast<FunctionType>(PT->getElementType());
|
|
|
|
Type *RetTy = FTy->getReturnType();
|
2010-05-29 17:53:24 +00:00
|
|
|
MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
|
|
|
|
MCSymbol *BeginLabel = 0;
|
2009-12-24 00:37:38 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
TargetLowering::ArgListTy Args;
|
|
|
|
TargetLowering::ArgListEntry Entry;
|
|
|
|
Args.reserve(CS.arg_size());
|
2010-01-01 04:41:22 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Check whether the function can return without sret-demotion.
|
2010-07-10 09:00:22 +00:00
|
|
|
SmallVector<ISD::OutputArg, 4> Outs;
|
|
|
|
GetReturnInfo(RetTy, CS.getAttributes().getRetAttributes(),
|
2012-05-25 00:09:29 +00:00
|
|
|
Outs, TLI);
|
2010-01-01 04:41:22 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
bool CanLowerReturn = TLI.CanLowerReturn(CS.getCallingConv(),
|
2012-07-19 00:04:14 +00:00
|
|
|
DAG.getMachineFunction(),
|
|
|
|
FTy->isVarArg(), Outs,
|
|
|
|
FTy->getContext());
|
2010-01-01 04:41:22 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
SDValue DemoteStackSlot;
|
2010-09-21 16:36:31 +00:00
|
|
|
int DemoteStackIdx = -100;
|
2010-01-01 04:41:22 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
if (!CanLowerReturn) {
|
2012-10-08 16:38:25 +00:00
|
|
|
uint64_t TySize = TLI.getDataLayout()->getTypeAllocSize(
|
2010-05-29 17:53:24 +00:00
|
|
|
FTy->getReturnType());
|
2012-10-08 16:38:25 +00:00
|
|
|
unsigned Align = TLI.getDataLayout()->getPrefTypeAlignment(
|
2010-05-29 17:53:24 +00:00
|
|
|
FTy->getReturnType());
|
|
|
|
MachineFunction &MF = DAG.getMachineFunction();
|
2010-09-21 16:36:31 +00:00
|
|
|
DemoteStackIdx = MF.getFrameInfo()->CreateStackObject(TySize, Align, false);
|
2011-07-18 04:54:35 +00:00
|
|
|
Type *StackSlotPtrType = PointerType::getUnqual(FTy->getReturnType());
|
2010-01-01 04:41:22 +00:00
|
|
|
|
2010-09-21 16:36:31 +00:00
|
|
|
DemoteStackSlot = DAG.getFrameIndex(DemoteStackIdx, TLI.getPointerTy());
|
2010-05-29 17:53:24 +00:00
|
|
|
Entry.Node = DemoteStackSlot;
|
|
|
|
Entry.Ty = StackSlotPtrType;
|
|
|
|
Entry.isSExt = false;
|
|
|
|
Entry.isZExt = false;
|
|
|
|
Entry.isInReg = false;
|
|
|
|
Entry.isSRet = true;
|
|
|
|
Entry.isNest = false;
|
|
|
|
Entry.isByVal = false;
|
|
|
|
Entry.Alignment = Align;
|
|
|
|
Args.push_back(Entry);
|
|
|
|
RetTy = Type::getVoidTy(FTy->getContext());
|
|
|
|
}
|
2010-01-01 04:41:22 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
|
|
|
|
i != e; ++i) {
|
2011-05-13 15:18:06 +00:00
|
|
|
const Value *V = *i;
|
|
|
|
|
|
|
|
// Skip empty types
|
|
|
|
if (V->getType()->isEmptyTy())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
SDValue ArgNode = getValue(V);
|
|
|
|
Entry.Node = ArgNode; Entry.Ty = V->getType();
|
2010-01-01 04:41:22 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
unsigned attrInd = i - CS.arg_begin() + 1;
|
2012-10-09 21:38:14 +00:00
|
|
|
Entry.isSExt = CS.paramHasAttr(attrInd, Attributes::SExt);
|
|
|
|
Entry.isZExt = CS.paramHasAttr(attrInd, Attributes::ZExt);
|
|
|
|
Entry.isInReg = CS.paramHasAttr(attrInd, Attributes::InReg);
|
|
|
|
Entry.isSRet = CS.paramHasAttr(attrInd, Attributes::StructRet);
|
|
|
|
Entry.isNest = CS.paramHasAttr(attrInd, Attributes::Nest);
|
|
|
|
Entry.isByVal = CS.paramHasAttr(attrInd, Attributes::ByVal);
|
2010-05-29 17:53:24 +00:00
|
|
|
Entry.Alignment = CS.getParamAlignment(attrInd);
|
|
|
|
Args.push_back(Entry);
|
2009-12-24 00:37:38 +00:00
|
|
|
}
|
2010-01-01 04:41:22 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
if (LandingPad) {
|
|
|
|
// Insert a label before the invoke call to mark the try range. This can be
|
|
|
|
// used to detect deletion of the invoke via the MachineModuleInfo.
|
|
|
|
BeginLabel = MMI.getContext().CreateTempSymbol();
|
2010-01-01 04:41:22 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// For SjLj, keep track of which landing pads go with which invokes
|
|
|
|
// so as to maintain the ordering of pads in the LSDA.
|
|
|
|
unsigned CallSiteIndex = MMI.getCurrentCallSite();
|
|
|
|
if (CallSiteIndex) {
|
|
|
|
MMI.setCallSiteBeginLabel(BeginLabel, CallSiteIndex);
|
2011-10-05 22:24:35 +00:00
|
|
|
LPadToCallSiteMap[LandingPad].push_back(CallSiteIndex);
|
2011-10-04 22:00:35 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Now that the call site is handled, stop tracking it.
|
|
|
|
MMI.setCurrentCallSite(0);
|
|
|
|
}
|
2009-12-24 00:37:38 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Both PendingLoads and PendingExports must be flushed here;
|
|
|
|
// this call might not return.
|
|
|
|
(void)getRoot();
|
|
|
|
DAG.setRoot(DAG.getEHLabel(getCurDebugLoc(), getControlRoot(), BeginLabel));
|
|
|
|
}
|
2009-12-24 00:37:38 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Check if target-independent constraints permit a tail call here.
|
|
|
|
// Target-dependent constraints are checked within TLI.LowerCallTo.
|
|
|
|
if (isTailCall &&
|
|
|
|
!isInTailCallPosition(CS, CS.getAttributes().getRetAttributes(), TLI))
|
|
|
|
isTailCall = false;
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2012-05-25 16:35:28 +00:00
|
|
|
TargetLowering::
|
|
|
|
CallLoweringInfo CLI(getRoot(), RetTy, FTy, isTailCall, Callee, Args, DAG,
|
|
|
|
getCurDebugLoc(), CS);
|
|
|
|
std::pair<SDValue,SDValue> Result = TLI.LowerCallTo(CLI);
|
2010-05-29 17:53:24 +00:00
|
|
|
assert((isTailCall || Result.second.getNode()) &&
|
|
|
|
"Non-null chain expected with non-tail call!");
|
|
|
|
assert((Result.second.getNode() || !Result.first.getNode()) &&
|
|
|
|
"Null value expected with tail call!");
|
|
|
|
if (Result.first.getNode()) {
|
|
|
|
setValue(CS.getInstruction(), Result.first);
|
|
|
|
} else if (!CanLowerReturn && Result.second.getNode()) {
|
|
|
|
// The instruction result is the result of loading from the
|
|
|
|
// hidden sret parameter.
|
|
|
|
SmallVector<EVT, 1> PVTs;
|
2011-07-18 04:54:35 +00:00
|
|
|
Type *PtrRetTy = PointerType::getUnqual(FTy->getReturnType());
|
2010-05-29 17:53:24 +00:00
|
|
|
|
|
|
|
ComputeValueVTs(TLI, PtrRetTy, PVTs);
|
|
|
|
assert(PVTs.size() == 1 && "Pointers should fit in one register");
|
|
|
|
EVT PtrVT = PVTs[0];
|
2012-05-25 00:09:29 +00:00
|
|
|
|
|
|
|
SmallVector<EVT, 4> RetTys;
|
|
|
|
SmallVector<uint64_t, 4> Offsets;
|
|
|
|
RetTy = FTy->getReturnType();
|
|
|
|
ComputeValueVTs(TLI, RetTy, RetTys, &Offsets);
|
|
|
|
|
|
|
|
unsigned NumValues = RetTys.size();
|
2010-05-29 17:53:24 +00:00
|
|
|
SmallVector<SDValue, 4> Values(NumValues);
|
|
|
|
SmallVector<SDValue, 4> Chains(NumValues);
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < NumValues; ++i) {
|
|
|
|
SDValue Add = DAG.getNode(ISD::ADD, getCurDebugLoc(), PtrVT,
|
|
|
|
DemoteStackSlot,
|
|
|
|
DAG.getConstant(Offsets[i], PtrVT));
|
2012-05-25 00:09:29 +00:00
|
|
|
SDValue L = DAG.getLoad(RetTys[i], getCurDebugLoc(), Result.second, Add,
|
2010-09-21 16:36:31 +00:00
|
|
|
MachinePointerInfo::getFixedStack(DemoteStackIdx, Offsets[i]),
|
2011-11-08 18:42:53 +00:00
|
|
|
false, false, false, 1);
|
2010-05-29 17:53:24 +00:00
|
|
|
Values[i] = L;
|
|
|
|
Chains[i] = L.getValue(1);
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
SDValue Chain = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(),
|
|
|
|
MVT::Other, &Chains[0], NumValues);
|
|
|
|
PendingLoads.push_back(Chain);
|
2010-10-16 08:25:21 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
setValue(CS.getInstruction(),
|
|
|
|
DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
|
|
|
|
DAG.getVTList(&RetTys[0], RetTys.size()),
|
2012-05-25 00:09:29 +00:00
|
|
|
&Values[0], Values.size()));
|
2010-05-29 17:53:24 +00:00
|
|
|
}
|
2010-05-29 17:03:36 +00:00
|
|
|
|
2011-04-01 19:57:01 +00:00
|
|
|
// Assign order to nodes here. If the call does not produce a result, it won't
|
|
|
|
// be mapped to a SDNode and visit() will not assign it an order number.
|
2011-04-01 19:42:22 +00:00
|
|
|
if (!Result.second.getNode()) {
|
2011-04-01 19:57:01 +00:00
|
|
|
// As a special case, a null chain means that a tail call has been emitted and
|
|
|
|
// the DAG root is already updated.
|
2010-05-29 17:53:24 +00:00
|
|
|
HasTailCall = true;
|
2011-04-01 19:42:22 +00:00
|
|
|
++SDNodeOrder;
|
|
|
|
AssignOrderingToNode(DAG.getRoot().getNode());
|
|
|
|
} else {
|
|
|
|
DAG.setRoot(Result.second);
|
|
|
|
++SDNodeOrder;
|
|
|
|
AssignOrderingToNode(Result.second.getNode());
|
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
if (LandingPad) {
|
|
|
|
// Insert a label at the end of the invoke call to mark the try range. This
|
|
|
|
// can be used to detect deletion of the invoke via the MachineModuleInfo.
|
|
|
|
MCSymbol *EndLabel = MMI.getContext().CreateTempSymbol();
|
|
|
|
DAG.setRoot(DAG.getEHLabel(getCurDebugLoc(), getRoot(), EndLabel));
|
2009-12-22 01:11:43 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Inform MachineModuleInfo of range.
|
|
|
|
MMI.addInvoke(LandingPad, BeginLabel, EndLabel);
|
|
|
|
}
|
|
|
|
}
|
2009-12-22 01:11:43 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
/// IsOnlyUsedInZeroEqualityComparison - Return true if it only matters that the
|
|
|
|
/// value is equal or not-equal to zero.
|
|
|
|
static bool IsOnlyUsedInZeroEqualityComparison(const Value *V) {
|
|
|
|
for (Value::const_use_iterator UI = V->use_begin(), E = V->use_end();
|
|
|
|
UI != E; ++UI) {
|
|
|
|
if (const ICmpInst *IC = dyn_cast<ICmpInst>(*UI))
|
|
|
|
if (IC->isEquality())
|
|
|
|
if (const Constant *C = dyn_cast<Constant>(IC->getOperand(1)))
|
|
|
|
if (C->isNullValue())
|
|
|
|
continue;
|
|
|
|
// Unknown instruction.
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT,
|
2011-07-18 04:54:35 +00:00
|
|
|
Type *LoadTy,
|
2010-05-29 17:53:24 +00:00
|
|
|
SelectionDAGBuilder &Builder) {
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Check to see if this load can be trivially constant folded, e.g. if the
|
|
|
|
// input is from a string literal.
|
|
|
|
if (const Constant *LoadInput = dyn_cast<Constant>(PtrVal)) {
|
|
|
|
// Cast pointer to the type we really want to load.
|
|
|
|
LoadInput = ConstantExpr::getBitCast(const_cast<Constant *>(LoadInput),
|
|
|
|
PointerType::getUnqual(LoadTy));
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
if (const Constant *LoadCst =
|
|
|
|
ConstantFoldLoadFromConstPtr(const_cast<Constant *>(LoadInput),
|
|
|
|
Builder.TD))
|
|
|
|
return Builder.getValue(LoadCst);
|
|
|
|
}
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Otherwise, we have to emit the load. If the pointer is to unfoldable but
|
|
|
|
// still constant memory, the input chain can be the entry node.
|
|
|
|
SDValue Root;
|
|
|
|
bool ConstantMemory = false;
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Do not serialize (non-volatile) loads of constant memory with anything.
|
|
|
|
if (Builder.AA->pointsToConstantMemory(PtrVal)) {
|
|
|
|
Root = Builder.DAG.getEntryNode();
|
|
|
|
ConstantMemory = true;
|
|
|
|
} else {
|
|
|
|
// Do not serialize non-volatile loads against each other.
|
|
|
|
Root = Builder.DAG.getRoot();
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
SDValue Ptr = Builder.getValue(PtrVal);
|
|
|
|
SDValue LoadVal = Builder.DAG.getLoad(LoadVT, Builder.getCurDebugLoc(), Root,
|
2010-09-21 16:36:31 +00:00
|
|
|
Ptr, MachinePointerInfo(PtrVal),
|
2010-05-29 17:53:24 +00:00
|
|
|
false /*volatile*/,
|
2011-11-08 18:42:53 +00:00
|
|
|
false /*nontemporal*/,
|
|
|
|
false /*isinvariant*/, 1 /* align=1 */);
|
2010-05-29 17:53:24 +00:00
|
|
|
|
|
|
|
if (!ConstantMemory)
|
|
|
|
Builder.PendingLoads.push_back(LoadVal.getValue(1));
|
|
|
|
return LoadVal;
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
|
|
|
|
2010-05-29 17:03:36 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
/// visitMemCmpCall - See if we can lower a call to memcmp in an optimized form.
|
|
|
|
/// If so, return true and lower it, otherwise return false and it will be
|
|
|
|
/// lowered like a normal call.
|
|
|
|
bool SelectionDAGBuilder::visitMemCmpCall(const CallInst &I) {
|
|
|
|
// Verify that the prototype makes sense. int memcmp(void*,void*,size_t)
|
2010-06-30 12:55:46 +00:00
|
|
|
if (I.getNumArgOperands() != 3)
|
2010-05-29 17:53:24 +00:00
|
|
|
return false;
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-06-25 09:38:13 +00:00
|
|
|
const Value *LHS = I.getArgOperand(0), *RHS = I.getArgOperand(1);
|
2010-05-29 17:53:24 +00:00
|
|
|
if (!LHS->getType()->isPointerTy() || !RHS->getType()->isPointerTy() ||
|
2010-06-25 09:38:13 +00:00
|
|
|
!I.getArgOperand(2)->getType()->isIntegerTy() ||
|
2010-05-29 17:53:24 +00:00
|
|
|
!I.getType()->isIntegerTy())
|
|
|
|
return false;
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-06-25 09:38:13 +00:00
|
|
|
const ConstantInt *Size = dyn_cast<ConstantInt>(I.getArgOperand(2));
|
2010-05-29 17:53:24 +00:00
|
|
|
|
|
|
|
// memcmp(S1,S2,2) != 0 -> (*(short*)LHS != *(short*)RHS) != 0
|
|
|
|
// memcmp(S1,S2,4) != 0 -> (*(int*)LHS != *(int*)RHS) != 0
|
|
|
|
if (Size && IsOnlyUsedInZeroEqualityComparison(&I)) {
|
|
|
|
bool ActuallyDoIt = true;
|
|
|
|
MVT LoadVT;
|
2011-07-18 04:54:35 +00:00
|
|
|
Type *LoadTy;
|
2010-05-29 17:53:24 +00:00
|
|
|
switch (Size->getZExtValue()) {
|
|
|
|
default:
|
|
|
|
LoadVT = MVT::Other;
|
|
|
|
LoadTy = 0;
|
|
|
|
ActuallyDoIt = false;
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
LoadVT = MVT::i16;
|
|
|
|
LoadTy = Type::getInt16Ty(Size->getContext());
|
|
|
|
break;
|
|
|
|
case 4:
|
|
|
|
LoadVT = MVT::i32;
|
|
|
|
LoadTy = Type::getInt32Ty(Size->getContext());
|
|
|
|
break;
|
|
|
|
case 8:
|
|
|
|
LoadVT = MVT::i64;
|
|
|
|
LoadTy = Type::getInt64Ty(Size->getContext());
|
|
|
|
break;
|
|
|
|
/*
|
|
|
|
case 16:
|
|
|
|
LoadVT = MVT::v4i32;
|
|
|
|
LoadTy = Type::getInt32Ty(Size->getContext());
|
|
|
|
LoadTy = VectorType::get(LoadTy, 4);
|
|
|
|
break;
|
|
|
|
*/
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
2009-12-22 01:11:43 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// This turns into unaligned loads. We only do this if the target natively
|
|
|
|
// supports the MVT we'll be loading or if it is small enough (<= 4) that
|
|
|
|
// we'll only produce a small number of byte loads.
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Require that we can find a legal MVT, and only do this if the target
|
|
|
|
// supports unaligned loads of that type. Expanding into byte loads would
|
|
|
|
// bloat the code.
|
|
|
|
if (ActuallyDoIt && Size->getZExtValue() > 4) {
|
|
|
|
// TODO: Handle 5 byte compare as 4-byte + 1 byte.
|
|
|
|
// TODO: Handle 8 byte compare on x86-32 as two 32-bit loads.
|
|
|
|
if (!TLI.isTypeLegal(LoadVT) ||!TLI.allowsUnalignedMemoryAccesses(LoadVT))
|
|
|
|
ActuallyDoIt = false;
|
|
|
|
}
|
2010-05-29 17:03:36 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
if (ActuallyDoIt) {
|
|
|
|
SDValue LHSVal = getMemCmpLoad(LHS, LoadVT, LoadTy, *this);
|
|
|
|
SDValue RHSVal = getMemCmpLoad(RHS, LoadVT, LoadTy, *this);
|
2009-12-22 01:25:10 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
SDValue Res = DAG.getSetCC(getCurDebugLoc(), MVT::i1, LHSVal, RHSVal,
|
|
|
|
ISD::SETNE);
|
|
|
|
EVT CallVT = TLI.getValueType(I.getType(), true);
|
|
|
|
setValue(&I, DAG.getZExtOrTrunc(Res, getCurDebugLoc(), CallVT));
|
|
|
|
return true;
|
2008-10-17 16:21:11 +00:00
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
2010-05-29 17:53:24 +00:00
|
|
|
|
|
|
|
|
|
|
|
return false;
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
|
|
|
|
2012-08-03 23:29:17 +00:00
|
|
|
/// visitUnaryFloatCall - If a call instruction is a unary floating-point
|
|
|
|
/// operation (as expected), translate it to an SDNode with the specified opcode
|
|
|
|
/// and return true.
|
|
|
|
bool SelectionDAGBuilder::visitUnaryFloatCall(const CallInst &I,
|
|
|
|
unsigned Opcode) {
|
|
|
|
// Sanity check that it really is a unary floating-point call.
|
|
|
|
if (I.getNumArgOperands() != 1 ||
|
|
|
|
!I.getArgOperand(0)->getType()->isFloatingPointTy() ||
|
|
|
|
I.getType() != I.getArgOperand(0)->getType() ||
|
|
|
|
!I.onlyReadsMemory())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
SDValue Tmp = getValue(I.getArgOperand(0));
|
|
|
|
setValue(&I, DAG.getNode(Opcode, getCurDebugLoc(), Tmp.getValueType(), Tmp));
|
|
|
|
return true;
|
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
void SelectionDAGBuilder::visitCall(const CallInst &I) {
|
2010-07-05 05:36:21 +00:00
|
|
|
// Handle inline assembly differently.
|
|
|
|
if (isa<InlineAsm>(I.getCalledValue())) {
|
|
|
|
visitInlineAsm(&I);
|
|
|
|
return;
|
|
|
|
}
|
2010-10-16 08:25:21 +00:00
|
|
|
|
2010-10-21 20:49:23 +00:00
|
|
|
MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
|
2012-02-22 19:06:13 +00:00
|
|
|
ComputeUsesVAFloatArgument(I, &MMI);
|
2010-10-21 20:49:23 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
const char *RenameFn = 0;
|
|
|
|
if (Function *F = I.getCalledFunction()) {
|
|
|
|
if (F->isDeclaration()) {
|
2010-07-05 05:36:21 +00:00
|
|
|
if (const TargetIntrinsicInfo *II = TM.getIntrinsicInfo()) {
|
2010-05-29 17:53:24 +00:00
|
|
|
if (unsigned IID = II->getIntrinsicID(F)) {
|
|
|
|
RenameFn = visitIntrinsicCall(I, IID);
|
|
|
|
if (!RenameFn)
|
|
|
|
return;
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
|
|
|
}
|
2010-05-29 17:53:24 +00:00
|
|
|
if (unsigned IID = F->getIntrinsicID()) {
|
|
|
|
RenameFn = visitIntrinsicCall(I, IID);
|
|
|
|
if (!RenameFn)
|
|
|
|
return;
|
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
// Check for well-known libc/libm calls. If the function is internal, it
|
|
|
|
// can't be a library call.
|
2012-08-03 21:26:24 +00:00
|
|
|
LibFunc::Func Func;
|
|
|
|
if (!F->hasLocalLinkage() && F->hasName() &&
|
|
|
|
LibInfo->getLibFunc(F->getName(), Func) &&
|
|
|
|
LibInfo->hasOptimizedCodeGen(Func)) {
|
|
|
|
switch (Func) {
|
|
|
|
default: break;
|
|
|
|
case LibFunc::copysign:
|
|
|
|
case LibFunc::copysignf:
|
|
|
|
case LibFunc::copysignl:
|
2010-06-30 12:55:46 +00:00
|
|
|
if (I.getNumArgOperands() == 2 && // Basic sanity checks.
|
2010-06-25 09:38:13 +00:00
|
|
|
I.getArgOperand(0)->getType()->isFloatingPointTy() &&
|
|
|
|
I.getType() == I.getArgOperand(0)->getType() &&
|
2012-08-03 23:29:17 +00:00
|
|
|
I.getType() == I.getArgOperand(1)->getType() &&
|
|
|
|
I.onlyReadsMemory()) {
|
2010-06-25 09:38:13 +00:00
|
|
|
SDValue LHS = getValue(I.getArgOperand(0));
|
|
|
|
SDValue RHS = getValue(I.getArgOperand(1));
|
2010-05-29 17:53:24 +00:00
|
|
|
setValue(&I, DAG.getNode(ISD::FCOPYSIGN, getCurDebugLoc(),
|
|
|
|
LHS.getValueType(), LHS, RHS));
|
|
|
|
return;
|
|
|
|
}
|
2012-08-03 21:26:24 +00:00
|
|
|
break;
|
|
|
|
case LibFunc::fabs:
|
|
|
|
case LibFunc::fabsf:
|
|
|
|
case LibFunc::fabsl:
|
2012-08-03 23:29:17 +00:00
|
|
|
if (visitUnaryFloatCall(I, ISD::FABS))
|
2010-05-29 17:53:24 +00:00
|
|
|
return;
|
2012-08-03 21:26:24 +00:00
|
|
|
break;
|
|
|
|
case LibFunc::sin:
|
|
|
|
case LibFunc::sinf:
|
|
|
|
case LibFunc::sinl:
|
2012-08-03 23:29:17 +00:00
|
|
|
if (visitUnaryFloatCall(I, ISD::FSIN))
|
2010-05-29 17:53:24 +00:00
|
|
|
return;
|
2012-08-03 21:26:24 +00:00
|
|
|
break;
|
|
|
|
case LibFunc::cos:
|
|
|
|
case LibFunc::cosf:
|
|
|
|
case LibFunc::cosl:
|
2012-08-03 23:29:17 +00:00
|
|
|
if (visitUnaryFloatCall(I, ISD::FCOS))
|
2010-05-29 17:53:24 +00:00
|
|
|
return;
|
2012-08-03 21:26:24 +00:00
|
|
|
break;
|
|
|
|
case LibFunc::sqrt:
|
|
|
|
case LibFunc::sqrtf:
|
|
|
|
case LibFunc::sqrtl:
|
2012-08-03 23:29:17 +00:00
|
|
|
if (visitUnaryFloatCall(I, ISD::FSQRT))
|
2010-05-29 17:53:24 +00:00
|
|
|
return;
|
2012-08-03 21:26:24 +00:00
|
|
|
break;
|
|
|
|
case LibFunc::floor:
|
|
|
|
case LibFunc::floorf:
|
|
|
|
case LibFunc::floorl:
|
2012-08-03 23:29:17 +00:00
|
|
|
if (visitUnaryFloatCall(I, ISD::FFLOOR))
|
2011-12-08 19:32:14 +00:00
|
|
|
return;
|
2012-08-03 21:26:24 +00:00
|
|
|
break;
|
|
|
|
case LibFunc::nearbyint:
|
|
|
|
case LibFunc::nearbyintf:
|
|
|
|
case LibFunc::nearbyintl:
|
2012-08-03 23:29:17 +00:00
|
|
|
if (visitUnaryFloatCall(I, ISD::FNEARBYINT))
|
2011-12-08 19:32:14 +00:00
|
|
|
return;
|
2012-08-03 21:26:24 +00:00
|
|
|
break;
|
|
|
|
case LibFunc::ceil:
|
|
|
|
case LibFunc::ceilf:
|
|
|
|
case LibFunc::ceill:
|
2012-08-03 23:29:17 +00:00
|
|
|
if (visitUnaryFloatCall(I, ISD::FCEIL))
|
2011-12-08 19:32:14 +00:00
|
|
|
return;
|
2012-08-03 21:26:24 +00:00
|
|
|
break;
|
|
|
|
case LibFunc::rint:
|
|
|
|
case LibFunc::rintf:
|
|
|
|
case LibFunc::rintl:
|
2012-08-03 23:29:17 +00:00
|
|
|
if (visitUnaryFloatCall(I, ISD::FRINT))
|
2011-12-08 19:32:14 +00:00
|
|
|
return;
|
2012-08-03 21:26:24 +00:00
|
|
|
break;
|
|
|
|
case LibFunc::trunc:
|
|
|
|
case LibFunc::truncf:
|
|
|
|
case LibFunc::truncl:
|
2012-08-03 23:29:17 +00:00
|
|
|
if (visitUnaryFloatCall(I, ISD::FTRUNC))
|
2011-12-15 00:54:12 +00:00
|
|
|
return;
|
2012-08-03 21:26:24 +00:00
|
|
|
break;
|
|
|
|
case LibFunc::log2:
|
|
|
|
case LibFunc::log2f:
|
|
|
|
case LibFunc::log2l:
|
2012-08-03 23:29:17 +00:00
|
|
|
if (visitUnaryFloatCall(I, ISD::FLOG2))
|
2011-12-15 00:54:12 +00:00
|
|
|
return;
|
2012-08-03 21:26:24 +00:00
|
|
|
break;
|
|
|
|
case LibFunc::exp2:
|
|
|
|
case LibFunc::exp2f:
|
|
|
|
case LibFunc::exp2l:
|
2012-08-03 23:29:17 +00:00
|
|
|
if (visitUnaryFloatCall(I, ISD::FEXP2))
|
2011-12-08 19:32:14 +00:00
|
|
|
return;
|
2012-08-03 21:26:24 +00:00
|
|
|
break;
|
|
|
|
case LibFunc::memcmp:
|
2010-05-29 17:53:24 +00:00
|
|
|
if (visitMemCmpCall(I))
|
|
|
|
return;
|
2012-08-03 21:26:24 +00:00
|
|
|
break;
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
2010-05-29 17:53:24 +00:00
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
2010-10-16 08:25:21 +00:00
|
|
|
|
2010-05-29 17:53:24 +00:00
|
|
|
SDValue Callee;
|
|
|
|
if (!RenameFn)
|
2010-06-25 09:38:13 +00:00
|
|
|
Callee = getValue(I.getCalledValue());
|
2010-05-29 17:53:24 +00:00
|
|
|
else
|
|
|
|
Callee = DAG.getExternalSymbol(RenameFn, TLI.getPointerTy());
|
|
|
|
|
|
|
|
// Check if we can potentially perform a tail call. More detailed checking is
|
|
|
|
// be done within LowerCallTo, after more information about the call is known.
|
|
|
|
LowerCallTo(&I, Callee, I.isTailCall());
|
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2011-03-26 16:35:10 +00:00
|
|
|
namespace {
|
2010-05-29 17:53:24 +00:00
|
|
|
|
2008-09-03 16:12:24 +00:00
|
|
|
/// AsmOperandInfo - This contains information for each constraint that we are
|
|
|
|
/// lowering.
|
2011-03-26 16:35:10 +00:00
|
|
|
class SDISelAsmOperandInfo : public TargetLowering::AsmOperandInfo {
|
2009-02-14 16:06:42 +00:00
|
|
|
public:
|
2008-09-03 16:12:24 +00:00
|
|
|
/// CallOperand - If this is the result output operand or a clobber
|
|
|
|
/// this is null, otherwise it is the incoming operand to the CallInst.
|
|
|
|
/// This gets modified as the asm is processed.
|
|
|
|
SDValue CallOperand;
|
|
|
|
|
|
|
|
/// AssignedRegs - If this is a register or register class operand, this
|
|
|
|
/// contains the set of register corresponding to the operand.
|
|
|
|
RegsForValue AssignedRegs;
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2010-09-13 18:15:37 +00:00
|
|
|
explicit SDISelAsmOperandInfo(const TargetLowering::AsmOperandInfo &info)
|
2008-09-03 16:12:24 +00:00
|
|
|
: TargetLowering::AsmOperandInfo(info), CallOperand(0,0) {
|
|
|
|
}
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2009-08-10 22:56:29 +00:00
|
|
|
/// getCallOperandValEVT - Return the EVT of the Value* that this operand
|
2008-10-17 17:05:25 +00:00
|
|
|
/// corresponds to. If there is no Value* for this operand, it returns
|
2009-08-11 20:47:22 +00:00
|
|
|
/// MVT::Other.
|
2010-01-01 04:41:22 +00:00
|
|
|
EVT getCallOperandValEVT(LLVMContext &Context,
|
2009-08-13 21:58:54 +00:00
|
|
|
const TargetLowering &TLI,
|
2012-10-08 16:38:25 +00:00
|
|
|
const DataLayout *TD) const {
|
2009-08-11 20:47:22 +00:00
|
|
|
if (CallOperandVal == 0) return MVT::Other;
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2008-10-17 17:05:25 +00:00
|
|
|
if (isa<BasicBlock>(CallOperandVal))
|
|
|
|
return TLI.getPointerTy();
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2011-07-18 04:54:35 +00:00
|
|
|
llvm::Type *OpTy = CallOperandVal->getType();
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2011-05-09 20:04:43 +00:00
|
|
|
// FIXME: code duplicated from TargetLowering::ParseConstraints().
|
2008-10-17 17:05:25 +00:00
|
|
|
// If this is an indirect operand, the operand is a pointer to the
|
|
|
|
// accessed type.
|
2009-12-22 18:34:19 +00:00
|
|
|
if (isIndirect) {
|
2011-07-18 04:54:35 +00:00
|
|
|
llvm::PointerType *PtrTy = dyn_cast<PointerType>(OpTy);
|
2009-12-22 18:34:19 +00:00
|
|
|
if (!PtrTy)
|
2010-04-07 22:58:41 +00:00
|
|
|
report_fatal_error("Indirect operand for inline asm not a pointer!");
|
2009-12-22 18:34:19 +00:00
|
|
|
OpTy = PtrTy->getElementType();
|
|
|
|
}
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2011-05-09 20:04:43 +00:00
|
|
|
// Look for vector wrapped in a struct. e.g. { <16 x i8> }.
|
2011-07-18 04:54:35 +00:00
|
|
|
if (StructType *STy = dyn_cast<StructType>(OpTy))
|
2011-05-09 20:04:43 +00:00
|
|
|
if (STy->getNumElements() == 1)
|
|
|
|
OpTy = STy->getElementType(0);
|
|
|
|
|
2008-10-17 17:05:25 +00:00
|
|
|
// If OpTy is not a single value, it may be a struct/union that we
|
|
|
|
// can tile with integers.
|
|
|
|
if (!OpTy->isSingleValueType() && OpTy->isSized()) {
|
|
|
|
unsigned BitSize = TD->getTypeSizeInBits(OpTy);
|
|
|
|
switch (BitSize) {
|
|
|
|
default: break;
|
|
|
|
case 1:
|
|
|
|
case 8:
|
|
|
|
case 16:
|
|
|
|
case 32:
|
|
|
|
case 64:
|
2008-10-17 19:59:51 +00:00
|
|
|
case 128:
|
2009-08-13 21:58:54 +00:00
|
|
|
OpTy = IntegerType::get(Context, BitSize);
|
2008-10-17 17:05:25 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2008-10-17 17:05:25 +00:00
|
|
|
return TLI.getValueType(OpTy, true);
|
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
};
|
2010-05-29 17:53:24 +00:00
|
|
|
|
2010-10-29 17:29:13 +00:00
|
|
|
typedef SmallVector<SDISelAsmOperandInfo,16> SDISelAsmOperandInfoVector;
|
|
|
|
|
2011-03-26 16:35:10 +00:00
|
|
|
} // end anonymous namespace
|
2008-09-03 16:12:24 +00:00
|
|
|
|
|
|
|
/// GetRegistersForValue - Assign registers (virtual or physical) for the
|
|
|
|
/// specified operand. We prefer to assign virtual registers, to allow the
|
2009-12-17 05:07:36 +00:00
|
|
|
/// register allocator to handle the assignment process. However, if the asm
|
|
|
|
/// uses features that we can't model on machineinstrs, we have SDISel do the
|
2008-09-03 16:12:24 +00:00
|
|
|
/// allocation. This produces generally horrible, but correct, code.
|
|
|
|
///
|
|
|
|
/// OpInfo describes the operand.
|
|
|
|
///
|
2011-03-26 16:35:10 +00:00
|
|
|
static void GetRegistersForValue(SelectionDAG &DAG,
|
|
|
|
const TargetLowering &TLI,
|
|
|
|
DebugLoc DL,
|
2012-02-24 14:01:17 +00:00
|
|
|
SDISelAsmOperandInfo &OpInfo) {
|
2011-03-26 16:35:10 +00:00
|
|
|
LLVMContext &Context = *DAG.getContext();
|
2009-08-12 00:36:31 +00:00
|
|
|
|
2008-09-03 16:12:24 +00:00
|
|
|
MachineFunction &MF = DAG.getMachineFunction();
|
|
|
|
SmallVector<unsigned, 4> Regs;
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2008-09-03 16:12:24 +00:00
|
|
|
// If this is a constraint for a single physreg, or a constraint for a
|
|
|
|
// register class, find it.
|
2009-01-16 06:53:46 +00:00
|
|
|
std::pair<unsigned, const TargetRegisterClass*> PhysReg =
|
2008-09-03 16:12:24 +00:00
|
|
|
TLI.getRegForInlineAsmConstraint(OpInfo.ConstraintCode,
|
|
|
|
OpInfo.ConstraintVT);
|
|
|
|
|
|
|
|
unsigned NumRegs = 1;
|
2009-08-11 20:47:22 +00:00
|
|
|
if (OpInfo.ConstraintVT != MVT::Other) {
|
2008-10-21 00:45:36 +00:00
|
|
|
// If this is a FP input in an integer register (or visa versa) insert a bit
|
|
|
|
// cast of the input value. More generally, handle any case where the input
|
|
|
|
// value disagrees with the register class we plan to stick this in.
|
|
|
|
if (OpInfo.Type == InlineAsm::isInput &&
|
|
|
|
PhysReg.second && !PhysReg.second->hasType(OpInfo.ConstraintVT)) {
|
2009-08-10 22:56:29 +00:00
|
|
|
// Try to convert to the first EVT that the reg class contains. If the
|
2008-10-21 00:45:36 +00:00
|
|
|
// types are identical size, use a bitcast to convert (e.g. two differing
|
|
|
|
// vector types).
|
2012-12-11 10:24:48 +00:00
|
|
|
MVT RegVT = *PhysReg.second->vt_begin();
|
2008-10-21 00:45:36 +00:00
|
|
|
if (RegVT.getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) {
|
2011-03-26 16:35:10 +00:00
|
|
|
OpInfo.CallOperand = DAG.getNode(ISD::BITCAST, DL,
|
2009-01-30 01:34:22 +00:00
|
|
|
RegVT, OpInfo.CallOperand);
|
2008-10-21 00:45:36 +00:00
|
|
|
OpInfo.ConstraintVT = RegVT;
|
|
|
|
} else if (RegVT.isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
|
|
|
|
// If the input is a FP value and we want it in FP registers, do a
|
|
|
|
// bitcast to the corresponding integer type. This turns an f64 value
|
|
|
|
// into i64, which can be passed with two i32 values on a 32-bit
|
|
|
|
// machine.
|
2012-12-11 10:24:48 +00:00
|
|
|
RegVT = MVT::getIntegerVT(OpInfo.ConstraintVT.getSizeInBits());
|
2011-03-26 16:35:10 +00:00
|
|
|
OpInfo.CallOperand = DAG.getNode(ISD::BITCAST, DL,
|
2009-01-30 01:34:22 +00:00
|
|
|
RegVT, OpInfo.CallOperand);
|
2008-10-21 00:45:36 +00:00
|
|
|
OpInfo.ConstraintVT = RegVT;
|
|
|
|
}
|
|
|
|
}
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2009-08-12 00:36:31 +00:00
|
|
|
NumRegs = TLI.getNumRegisters(Context, OpInfo.ConstraintVT);
|
2008-10-21 00:45:36 +00:00
|
|
|
}
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2012-12-11 10:24:48 +00:00
|
|
|
MVT RegVT;
|
2009-08-10 22:56:29 +00:00
|
|
|
EVT ValueVT = OpInfo.ConstraintVT;
|
2008-09-03 16:12:24 +00:00
|
|
|
|
|
|
|
// If this is a constraint for a specific physical register, like {r17},
|
|
|
|
// assign it now.
|
2009-03-24 15:27:37 +00:00
|
|
|
if (unsigned AssignedReg = PhysReg.first) {
|
|
|
|
const TargetRegisterClass *RC = PhysReg.second;
|
2009-08-11 20:47:22 +00:00
|
|
|
if (OpInfo.ConstraintVT == MVT::Other)
|
2009-03-24 15:27:37 +00:00
|
|
|
ValueVT = *RC->vt_begin();
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2008-09-03 16:12:24 +00:00
|
|
|
// Get the actual register value type. This is important, because the user
|
|
|
|
// may have asked for (e.g.) the AX register in i32 type. We need to
|
|
|
|
// remember that AX is actually i16 to get the right extension.
|
2009-03-24 15:27:37 +00:00
|
|
|
RegVT = *RC->vt_begin();
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2008-09-03 16:12:24 +00:00
|
|
|
// This is a explicit reference to a physical register.
|
2009-03-24 15:27:37 +00:00
|
|
|
Regs.push_back(AssignedReg);
|
2008-09-03 16:12:24 +00:00
|
|
|
|
|
|
|
// If this is an expanded reference, add the rest of the regs to Regs.
|
|
|
|
if (NumRegs != 1) {
|
2009-03-24 15:27:37 +00:00
|
|
|
TargetRegisterClass::iterator I = RC->begin();
|
|
|
|
for (; *I != AssignedReg; ++I)
|
|
|
|
assert(I != RC->end() && "Didn't find reg!");
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2008-09-03 16:12:24 +00:00
|
|
|
// Already added the first reg.
|
|
|
|
--NumRegs; ++I;
|
|
|
|
for (; NumRegs; --NumRegs, ++I) {
|
2009-03-24 15:27:37 +00:00
|
|
|
assert(I != RC->end() && "Ran out of registers to allocate!");
|
2008-09-03 16:12:24 +00:00
|
|
|
Regs.push_back(*I);
|
|
|
|
}
|
|
|
|
}
|
2009-12-22 01:25:10 +00:00
|
|
|
|
2010-05-29 17:03:36 +00:00
|
|
|
OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT);
|
2008-09-03 16:12:24 +00:00
|
|
|
return;
|
|
|
|
}
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2008-09-03 16:12:24 +00:00
|
|
|
// Otherwise, if this was a reference to an LLVM register class, create vregs
|
|
|
|
// for this reference.
|
2009-03-24 15:25:07 +00:00
|
|
|
if (const TargetRegisterClass *RC = PhysReg.second) {
|
|
|
|
RegVT = *RC->vt_begin();
|
2009-08-11 20:47:22 +00:00
|
|
|
if (OpInfo.ConstraintVT == MVT::Other)
|
2009-03-23 08:01:15 +00:00
|
|
|
ValueVT = RegVT;
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2009-03-23 08:01:15 +00:00
|
|
|
// Create the appropriate number of virtual registers.
|
|
|
|
MachineRegisterInfo &RegInfo = MF.getRegInfo();
|
|
|
|
for (; NumRegs; --NumRegs)
|
2009-03-24 15:25:07 +00:00
|
|
|
Regs.push_back(RegInfo.createVirtualRegister(RC));
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2010-05-29 17:03:36 +00:00
|
|
|
OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT);
|
2009-03-23 08:01:15 +00:00
|
|
|
return;
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
2010-01-01 04:41:22 +00:00
|
|
|
|
2008-09-03 16:12:24 +00:00
|
|
|
// Otherwise, we couldn't allocate enough registers for this.
|
|
|
|
}
|
|
|
|
|
|
|
|
/// visitInlineAsm - Handle a call to an InlineAsm object.
|
|
|
|
///
|
2010-04-15 01:51:59 +00:00
|
|
|
void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
|
|
|
|
const InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue());
|
2008-09-03 16:12:24 +00:00
|
|
|
|
|
|
|
/// ConstraintOperands - Information about all of the constraints.
|
2010-10-29 17:29:13 +00:00
|
|
|
SDISelAsmOperandInfoVector ConstraintOperands;
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2011-05-06 20:52:23 +00:00
|
|
|
TargetLowering::AsmOperandInfoVector
|
|
|
|
TargetConstraints = TLI.ParseConstraints(CS);
|
|
|
|
|
2010-09-13 18:15:37 +00:00
|
|
|
bool hasMemory = false;
|
2010-10-16 08:25:21 +00:00
|
|
|
|
2008-09-03 16:12:24 +00:00
|
|
|
unsigned ArgNo = 0; // ArgNo - The argument of the CallInst.
|
|
|
|
unsigned ResNo = 0; // ResNo - The result number of the next output.
|
2010-09-13 18:15:37 +00:00
|
|
|
for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) {
|
|
|
|
ConstraintOperands.push_back(SDISelAsmOperandInfo(TargetConstraints[i]));
|
2008-09-03 16:12:24 +00:00
|
|
|
SDISelAsmOperandInfo &OpInfo = ConstraintOperands.back();
|
2010-10-16 08:25:21 +00:00
|
|
|
|
2009-08-11 20:47:22 +00:00
|
|
|
EVT OpVT = MVT::Other;
|
2008-09-03 16:12:24 +00:00
|
|
|
|
|
|
|
// Compute the value type for each operand.
|
|
|
|
switch (OpInfo.Type) {
|
|
|
|
case InlineAsm::isOutput:
|
|
|
|
// Indirect outputs just consume an argument.
|
|
|
|
if (OpInfo.isIndirect) {
|
2010-04-15 01:51:59 +00:00
|
|
|
OpInfo.CallOperandVal = const_cast<Value *>(CS.getArgument(ArgNo++));
|
2008-09-03 16:12:24 +00:00
|
|
|
break;
|
|
|
|
}
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2008-09-03 16:12:24 +00:00
|
|
|
// The return value of the call is this value. As such, there is no
|
|
|
|
// corresponding argument.
|
2011-09-30 22:19:53 +00:00
|
|
|
assert(!CS.getType()->isVoidTy() && "Bad inline asm!");
|
2011-07-18 04:54:35 +00:00
|
|
|
if (StructType *STy = dyn_cast<StructType>(CS.getType())) {
|
2008-09-03 16:12:24 +00:00
|
|
|
OpVT = TLI.getValueType(STy->getElementType(ResNo));
|
|
|
|
} else {
|
|
|
|
assert(ResNo == 0 && "Asm only has one result!");
|
|
|
|
OpVT = TLI.getValueType(CS.getType());
|
|
|
|
}
|
|
|
|
++ResNo;
|
|
|
|
break;
|
|
|
|
case InlineAsm::isInput:
|
2010-04-15 01:51:59 +00:00
|
|
|
OpInfo.CallOperandVal = const_cast<Value *>(CS.getArgument(ArgNo++));
|
2008-09-03 16:12:24 +00:00
|
|
|
break;
|
|
|
|
case InlineAsm::isClobber:
|
|
|
|
// Nothing to do.
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If this is an input or an indirect output, process the call argument.
|
|
|
|
// BasicBlocks are labels, currently appearing only in asm's.
|
|
|
|
if (OpInfo.CallOperandVal) {
|
2010-04-15 01:51:59 +00:00
|
|
|
if (const BasicBlock *BB = dyn_cast<BasicBlock>(OpInfo.CallOperandVal)) {
|
2008-09-03 16:12:24 +00:00
|
|
|
OpInfo.CallOperand = DAG.getBasicBlock(FuncInfo.MBBMap[BB]);
|
2008-10-17 17:05:25 +00:00
|
|
|
} else {
|
2008-09-03 16:12:24 +00:00
|
|
|
OpInfo.CallOperand = getValue(OpInfo.CallOperandVal);
|
|
|
|
}
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2009-08-13 21:58:54 +00:00
|
|
|
OpVT = OpInfo.getCallOperandValEVT(*DAG.getContext(), TLI, TD);
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2008-09-03 16:12:24 +00:00
|
|
|
OpInfo.ConstraintVT = OpVT;
|
2010-10-16 08:25:21 +00:00
|
|
|
|
2010-09-13 18:15:37 +00:00
|
|
|
// Indirect operand accesses access memory.
|
|
|
|
if (OpInfo.isIndirect)
|
|
|
|
hasMemory = true;
|
|
|
|
else {
|
|
|
|
for (unsigned j = 0, ee = OpInfo.Codes.size(); j != ee; ++j) {
|
2011-05-06 20:52:23 +00:00
|
|
|
TargetLowering::ConstraintType
|
|
|
|
CType = TLI.getConstraintType(OpInfo.Codes[j]);
|
2010-09-13 18:15:37 +00:00
|
|
|
if (CType == TargetLowering::C_Memory) {
|
|
|
|
hasMemory = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2008-10-18 18:49:30 +00:00
|
|
|
}
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2010-09-13 18:15:37 +00:00
|
|
|
SDValue Chain, Flag;
|
|
|
|
|
|
|
|
// We won't need to flush pending loads if this asm doesn't touch
|
|
|
|
// memory and is nonvolatile.
|
|
|
|
if (hasMemory || IA->hasSideEffects())
|
|
|
|
Chain = getRoot();
|
|
|
|
else
|
|
|
|
Chain = DAG.getRoot();
|
|
|
|
|
2008-10-18 18:49:30 +00:00
|
|
|
// Second pass over the constraints: compute which constraint option to use
|
|
|
|
// and assign registers to constraints that want a specific physreg.
|
2010-09-13 18:15:37 +00:00
|
|
|
for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
|
2008-10-18 18:49:30 +00:00
|
|
|
SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2010-09-24 22:24:05 +00:00
|
|
|
// If this is an output operand with a matching input operand, look up the
|
|
|
|
// matching input. If their types mismatch, e.g. one is an integer, the
|
|
|
|
// other is floating point, or their sizes are different, flag it as an
|
|
|
|
// error.
|
|
|
|
if (OpInfo.hasMatchingInput()) {
|
|
|
|
SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
|
2010-10-16 08:25:21 +00:00
|
|
|
|
2010-09-24 22:24:05 +00:00
|
|
|
if (OpInfo.ConstraintVT != Input.ConstraintVT) {
|
2012-07-19 00:04:14 +00:00
|
|
|
std::pair<unsigned, const TargetRegisterClass*> MatchRC =
|
|
|
|
TLI.getRegForInlineAsmConstraint(OpInfo.ConstraintCode,
|
2011-08-23 19:17:21 +00:00
|
|
|
OpInfo.ConstraintVT);
|
2012-07-19 00:04:14 +00:00
|
|
|
std::pair<unsigned, const TargetRegisterClass*> InputRC =
|
|
|
|
TLI.getRegForInlineAsmConstraint(Input.ConstraintCode,
|
2011-08-23 19:17:21 +00:00
|
|
|
Input.ConstraintVT);
|
2010-09-24 22:24:05 +00:00
|
|
|
if ((OpInfo.ConstraintVT.isInteger() !=
|
|
|
|
Input.ConstraintVT.isInteger()) ||
|
2011-07-14 20:13:52 +00:00
|
|
|
(MatchRC.second != InputRC.second)) {
|
2010-09-24 22:24:05 +00:00
|
|
|
report_fatal_error("Unsupported asm: input constraint"
|
|
|
|
" with a matching output constraint of"
|
|
|
|
" incompatible type!");
|
|
|
|
}
|
|
|
|
Input.ConstraintVT = OpInfo.ConstraintVT;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-09-03 16:12:24 +00:00
|
|
|
// Compute the constraint code and ConstraintType to use.
|
2010-06-25 21:55:36 +00:00
|
|
|
TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, &DAG);
|
2008-09-03 16:12:24 +00:00
|
|
|
|
|
|
|
// If this is a memory input, and if the operand is not indirect, do what we
|
|
|
|
// need to to provide an address for the memory input.
|
|
|
|
if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
|
|
|
|
!OpInfo.isIndirect) {
|
2011-05-06 20:52:23 +00:00
|
|
|
assert((OpInfo.isMultipleAlternative ||
|
|
|
|
(OpInfo.Type == InlineAsm::isInput)) &&
|
2008-09-03 16:12:24 +00:00
|
|
|
"Can only indirectify direct input operands!");
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2008-09-03 16:12:24 +00:00
|
|
|
// Memory operands really want the address of the value. If we don't have
|
|
|
|
// an indirect input, put it in the constpool if we can, otherwise spill
|
|
|
|
// it to a stack slot.
|
2011-06-03 17:21:23 +00:00
|
|
|
// TODO: This isn't quite right. We need to handle these according to
|
|
|
|
// the addressing mode that the constraint wants. Also, this may take
|
|
|
|
// an additional register for the computation and we don't want that
|
|
|
|
// either.
|
2011-06-08 23:55:35 +00:00
|
|
|
|
2008-09-03 16:12:24 +00:00
|
|
|
// If the operand is a float, integer, or vector constant, spill to a
|
|
|
|
// constant pool entry to get its address.
|
2010-04-15 01:51:59 +00:00
|
|
|
const Value *OpVal = OpInfo.CallOperandVal;
|
2008-09-03 16:12:24 +00:00
|
|
|
if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) ||
|
2012-01-27 03:08:05 +00:00
|
|
|
isa<ConstantVector>(OpVal) || isa<ConstantDataVector>(OpVal)) {
|
2008-09-03 16:12:24 +00:00
|
|
|
OpInfo.CallOperand = DAG.getConstantPool(cast<Constant>(OpVal),
|
|
|
|
TLI.getPointerTy());
|
|
|
|
} else {
|
|
|
|
// Otherwise, create a stack slot and emit a store to it before the
|
|
|
|
// asm.
|
2011-07-18 04:54:35 +00:00
|
|
|
Type *Ty = OpVal->getType();
|
2012-10-08 16:38:25 +00:00
|
|
|
uint64_t TySize = TLI.getDataLayout()->getTypeAllocSize(Ty);
|
|
|
|
unsigned Align = TLI.getDataLayout()->getPrefTypeAlignment(Ty);
|
2008-09-03 16:12:24 +00:00
|
|
|
MachineFunction &MF = DAG.getMachineFunction();
|
2009-11-12 20:49:22 +00:00
|
|
|
int SSFI = MF.getFrameInfo()->CreateStackObject(TySize, Align, false);
|
2008-09-03 16:12:24 +00:00
|
|
|
SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI.getPointerTy());
|
2009-01-31 02:22:37 +00:00
|
|
|
Chain = DAG.getStore(Chain, getCurDebugLoc(),
|
2010-09-21 16:36:31 +00:00
|
|
|
OpInfo.CallOperand, StackSlot,
|
|
|
|
MachinePointerInfo::getFixedStack(SSFI),
|
2010-02-15 17:00:31 +00:00
|
|
|
false, false, 0);
|
2008-09-03 16:12:24 +00:00
|
|
|
OpInfo.CallOperand = StackSlot;
|
|
|
|
}
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2008-09-03 16:12:24 +00:00
|
|
|
// There is no longer a Value* corresponding to this operand.
|
|
|
|
OpInfo.CallOperandVal = 0;
|
2009-12-22 01:25:10 +00:00
|
|
|
|
2008-09-03 16:12:24 +00:00
|
|
|
// It is now an indirect operand.
|
|
|
|
OpInfo.isIndirect = true;
|
|
|
|
}
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2008-09-03 16:12:24 +00:00
|
|
|
// If this constraint is for a specific register, allocate it before
|
|
|
|
// anything else.
|
|
|
|
if (OpInfo.ConstraintType == TargetLowering::C_Register)
|
2012-02-24 14:01:17 +00:00
|
|
|
GetRegistersForValue(DAG, TLI, getCurDebugLoc(), OpInfo);
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2008-09-03 16:12:24 +00:00
|
|
|
// Second pass - Loop over all of the operands, assigning virtual or physregs
|
2008-10-17 16:21:11 +00:00
|
|
|
// to register class operands.
|
2008-09-03 16:12:24 +00:00
|
|
|
for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
|
|
|
|
SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2008-09-03 16:12:24 +00:00
|
|
|
// C_Register operands have already been allocated, Other/Memory don't need
|
|
|
|
// to be.
|
|
|
|
if (OpInfo.ConstraintType == TargetLowering::C_RegisterClass)
|
2012-02-24 14:01:17 +00:00
|
|
|
GetRegistersForValue(DAG, TLI, getCurDebugLoc(), OpInfo);
|
2009-01-16 06:53:46 +00:00
|
|
|
}
|
|
|
|
|
2008-09-03 16:12:24 +00:00
|
|
|
// AsmNodeOperands - The operands for the ISD::INLINEASM node.
|
|
|
|
std::vector<SDValue> AsmNodeOperands;
|
|
|
|
AsmNodeOperands.push_back(SDValue()); // reserve space for input chain
|
|
|
|
AsmNodeOperands.push_back(
|
2010-01-04 21:00:54 +00:00
|
|
|
DAG.getTargetExternalSymbol(IA->getAsmString().c_str(),
|
|
|
|
TLI.getPointerTy()));
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2010-04-07 05:20:54 +00:00
|
|
|
// If we have a !srcloc metadata node associated with it, we want to attach
|
|
|
|
// this to the ultimately generated inline asm machineinstr. To do this, we
|
|
|
|
// pass in the third operand as this (potentially null) inline asm MDNode.
|
|
|
|
const MDNode *SrcLoc = CS.getInstruction()->getMetadata("srcloc");
|
|
|
|
AsmNodeOperands.push_back(DAG.getMDNode(SrcLoc));
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2012-10-30 19:11:54 +00:00
|
|
|
// Remember the HasSideEffect, AlignStack, AsmDialect, MayLoad and MayStore
|
|
|
|
// bits as operand 3.
|
2011-01-07 23:50:32 +00:00
|
|
|
unsigned ExtraInfo = 0;
|
|
|
|
if (IA->hasSideEffects())
|
|
|
|
ExtraInfo |= InlineAsm::Extra_HasSideEffects;
|
|
|
|
if (IA->isAlignStack())
|
|
|
|
ExtraInfo |= InlineAsm::Extra_IsAlignStack;
|
2012-09-05 22:17:43 +00:00
|
|
|
// Set the asm dialect.
|
2012-09-05 22:40:13 +00:00
|
|
|
ExtraInfo |= IA->getDialect() * InlineAsm::Extra_AsmDialect;
|
2012-10-30 19:11:54 +00:00
|
|
|
|
|
|
|
// Determine if this InlineAsm MayLoad or MayStore based on the constraints.
|
|
|
|
for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) {
|
|
|
|
TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i];
|
|
|
|
|
|
|
|
// Compute the constraint code and ConstraintType to use.
|
|
|
|
TLI.ComputeConstraintToUse(OpInfo, SDValue());
|
|
|
|
|
2012-10-30 20:01:12 +00:00
|
|
|
// Ideally, we would only check against memory constraints. However, the
|
|
|
|
// meaning of an other constraint can be target-specific and we can't easily
|
|
|
|
// reason about it. Therefore, be conservative and set MayLoad/MayStore
|
|
|
|
// for other constriants as well.
|
2012-10-30 19:11:54 +00:00
|
|
|
if (OpInfo.ConstraintType == TargetLowering::C_Memory ||
|
|
|
|
OpInfo.ConstraintType == TargetLowering::C_Other) {
|
|
|
|
if (OpInfo.Type == InlineAsm::isInput)
|
|
|
|
ExtraInfo |= InlineAsm::Extra_MayLoad;
|
|
|
|
else if (OpInfo.Type == InlineAsm::isOutput)
|
|
|
|
ExtraInfo |= InlineAsm::Extra_MayStore;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-01-07 23:50:32 +00:00
|
|
|
AsmNodeOperands.push_back(DAG.getTargetConstant(ExtraInfo,
|
|
|
|
TLI.getPointerTy()));
|
2010-07-02 20:16:09 +00:00
|
|
|
|
2008-09-03 16:12:24 +00:00
|
|
|
// Loop over all of the inputs, copying the operand values into the
|
|
|
|
// appropriate registers and processing the output regs.
|
|
|
|
RegsForValue RetValRegs;
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2008-09-03 16:12:24 +00:00
|
|
|
// IndirectStoresToEmit - The set of stores to emit after the inline asm node.
|
|
|
|
std::vector<std::pair<RegsForValue, Value*> > IndirectStoresToEmit;
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2008-09-03 16:12:24 +00:00
|
|
|
for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
|
|
|
|
SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
|
|
|
|
|
|
|
|
switch (OpInfo.Type) {
|
|
|
|
case InlineAsm::isOutput: {
|
|
|
|
if (OpInfo.ConstraintType != TargetLowering::C_RegisterClass &&
|
|
|
|
OpInfo.ConstraintType != TargetLowering::C_Register) {
|
|
|
|
// Memory output, or 'other' output (e.g. 'X' constraint).
|
|
|
|
assert(OpInfo.isIndirect && "Memory output must be indirect operand");
|
|
|
|
|
|
|
|
// Add information to the INLINEASM node to know about this output.
|
2010-04-07 05:20:54 +00:00
|
|
|
unsigned OpFlags = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1);
|
|
|
|
AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlags,
|
2008-09-03 16:12:24 +00:00
|
|
|
TLI.getPointerTy()));
|
|
|
|
AsmNodeOperands.push_back(OpInfo.CallOperand);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, this is a register or register class output.
|
|
|
|
|
|
|
|
// Copy the output from the appropriate register. Find a register that
|
|
|
|
// we can use.
|
2012-01-03 23:51:01 +00:00
|
|
|
if (OpInfo.AssignedRegs.Regs.empty()) {
|
|
|
|
LLVMContext &Ctx = *DAG.getContext();
|
|
|
|
Ctx.emitError(CS.getInstruction(),
|
|
|
|
"couldn't allocate output register for constraint '" +
|
|
|
|
Twine(OpInfo.ConstraintCode) + "'");
|
|
|
|
break;
|
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
|
|
|
|
// If this is an indirect operand, store through the pointer after the
|
|
|
|
// asm.
|
|
|
|
if (OpInfo.isIndirect) {
|
|
|
|
IndirectStoresToEmit.push_back(std::make_pair(OpInfo.AssignedRegs,
|
|
|
|
OpInfo.CallOperandVal));
|
|
|
|
} else {
|
|
|
|
// This is the result value of the call.
|
2010-01-05 13:12:22 +00:00
|
|
|
assert(!CS.getType()->isVoidTy() && "Bad inline asm!");
|
2008-09-03 16:12:24 +00:00
|
|
|
// Concatenate this output onto the outputs list.
|
|
|
|
RetValRegs.append(OpInfo.AssignedRegs);
|
|
|
|
}
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2008-09-03 16:12:24 +00:00
|
|
|
// Add information to the INLINEASM node to know that this register is
|
|
|
|
// set.
|
2008-09-12 17:49:03 +00:00
|
|
|
OpInfo.AssignedRegs.AddInlineAsmOperands(OpInfo.isEarlyClobber ?
|
2010-04-07 05:20:54 +00:00
|
|
|
InlineAsm::Kind_RegDefEarlyClobber :
|
|
|
|
InlineAsm::Kind_RegDef,
|
2009-03-23 08:01:15 +00:00
|
|
|
false,
|
|
|
|
0,
|
2010-03-02 01:55:18 +00:00
|
|
|
DAG,
|
2009-12-22 01:25:10 +00:00
|
|
|
AsmNodeOperands);
|
2008-09-03 16:12:24 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case InlineAsm::isInput: {
|
|
|
|
SDValue InOperandVal = OpInfo.CallOperand;
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2008-10-17 16:47:46 +00:00
|
|
|
if (OpInfo.isMatchingInputConstraint()) { // Matching constraint?
|
2008-09-03 16:12:24 +00:00
|
|
|
// If this is required to match an output register we have already set,
|
|
|
|
// just use its register.
|
2008-10-17 16:21:11 +00:00
|
|
|
unsigned OperandNo = OpInfo.getMatchedOperand();
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2008-09-03 16:12:24 +00:00
|
|
|
// Scan until we find the definition we already emitted of this operand.
|
|
|
|
// When we find it, create a RegsForValue operand.
|
2010-04-07 05:20:54 +00:00
|
|
|
unsigned CurOp = InlineAsm::Op_FirstOperand;
|
2008-09-03 16:12:24 +00:00
|
|
|
for (; OperandNo; --OperandNo) {
|
|
|
|
// Advance to the next operand.
|
2009-03-20 18:03:34 +00:00
|
|
|
unsigned OpFlag =
|
2008-09-12 16:56:44 +00:00
|
|
|
cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
|
2010-04-07 05:20:54 +00:00
|
|
|
assert((InlineAsm::isRegDefKind(OpFlag) ||
|
|
|
|
InlineAsm::isRegDefEarlyClobberKind(OpFlag) ||
|
|
|
|
InlineAsm::isMemKind(OpFlag)) && "Skipped past definitions?");
|
2009-03-20 18:03:34 +00:00
|
|
|
CurOp += InlineAsm::getNumOperandRegisters(OpFlag)+1;
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
|
|
|
|
2009-03-20 18:03:34 +00:00
|
|
|
unsigned OpFlag =
|
2008-09-12 16:56:44 +00:00
|
|
|
cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
|
2010-04-07 05:20:54 +00:00
|
|
|
if (InlineAsm::isRegDefKind(OpFlag) ||
|
|
|
|
InlineAsm::isRegDefEarlyClobberKind(OpFlag)) {
|
2009-03-20 18:03:34 +00:00
|
|
|
// Add (OpFlag&0xffff)>>3 registers to MatchedRegs.
|
2010-04-08 00:09:16 +00:00
|
|
|
if (OpInfo.isIndirect) {
|
|
|
|
// This happens on gcc/testsuite/gcc.dg/pr8788-1.c
|
2010-04-19 22:41:47 +00:00
|
|
|
LLVMContext &Ctx = *DAG.getContext();
|
2010-04-08 00:09:16 +00:00
|
|
|
Ctx.emitError(CS.getInstruction(), "inline asm not supported yet:"
|
|
|
|
" don't know how to handle tied "
|
|
|
|
"indirect register inputs");
|
|
|
|
}
|
2010-10-16 08:25:21 +00:00
|
|
|
|
2008-09-03 16:12:24 +00:00
|
|
|
RegsForValue MatchedRegs;
|
|
|
|
MatchedRegs.ValueVTs.push_back(InOperandVal.getValueType());
|
2012-12-11 09:10:33 +00:00
|
|
|
MVT RegVT = AsmNodeOperands[CurOp+1].getSimpleValueType();
|
2009-03-23 08:01:15 +00:00
|
|
|
MatchedRegs.RegVTs.push_back(RegVT);
|
|
|
|
MachineRegisterInfo &RegInfo = DAG.getMachineFunction().getRegInfo();
|
2009-03-20 18:03:34 +00:00
|
|
|
for (unsigned i = 0, e = InlineAsm::getNumOperandRegisters(OpFlag);
|
2009-03-23 08:01:15 +00:00
|
|
|
i != e; ++i)
|
2010-01-01 04:41:22 +00:00
|
|
|
MatchedRegs.Regs.push_back
|
|
|
|
(RegInfo.createVirtualRegister(TLI.getRegClassFor(RegVT)));
|
2009-01-16 06:53:46 +00:00
|
|
|
|
|
|
|
// Use the produced MatchedRegs object to
|
2009-01-31 02:22:37 +00:00
|
|
|
MatchedRegs.getCopyToRegs(InOperandVal, DAG, getCurDebugLoc(),
|
2012-09-26 06:16:18 +00:00
|
|
|
Chain, &Flag, CS.getInstruction());
|
2010-04-07 05:20:54 +00:00
|
|
|
MatchedRegs.AddInlineAsmOperands(InlineAsm::Kind_RegUse,
|
2009-03-23 08:01:15 +00:00
|
|
|
true, OpInfo.getMatchedOperand(),
|
2010-03-02 01:55:18 +00:00
|
|
|
DAG, AsmNodeOperands);
|
2008-09-03 16:12:24 +00:00
|
|
|
break;
|
|
|
|
}
|
2010-10-16 08:25:21 +00:00
|
|
|
|
2010-04-07 05:20:54 +00:00
|
|
|
assert(InlineAsm::isMemKind(OpFlag) && "Unknown matching constraint!");
|
|
|
|
assert(InlineAsm::getNumOperandRegisters(OpFlag) == 1 &&
|
|
|
|
"Unexpected number of operands");
|
|
|
|
// Add information to the INLINEASM node to know about this input.
|
|
|
|
// See InlineAsm.h isUseOperandTiedToDef.
|
|
|
|
OpFlag = InlineAsm::getFlagWordForMatchingOp(OpFlag,
|
|
|
|
OpInfo.getMatchedOperand());
|
|
|
|
AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlag,
|
|
|
|
TLI.getPointerTy()));
|
|
|
|
AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
|
|
|
|
break;
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2010-07-13 20:17:05 +00:00
|
|
|
// Treat indirect 'X' constraint as memory.
|
2010-10-16 08:25:21 +00:00
|
|
|
if (OpInfo.ConstraintType == TargetLowering::C_Other &&
|
|
|
|
OpInfo.isIndirect)
|
2010-07-13 20:17:05 +00:00
|
|
|
OpInfo.ConstraintType = TargetLowering::C_Memory;
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2010-07-13 20:17:05 +00:00
|
|
|
if (OpInfo.ConstraintType == TargetLowering::C_Other) {
|
2008-09-03 16:12:24 +00:00
|
|
|
std::vector<SDValue> Ops;
|
2011-06-02 23:16:42 +00:00
|
|
|
TLI.LowerAsmOperandForConstraint(InOperandVal, OpInfo.ConstraintCode,
|
2010-06-25 21:55:36 +00:00
|
|
|
Ops, DAG);
|
2012-01-03 23:51:01 +00:00
|
|
|
if (Ops.empty()) {
|
|
|
|
LLVMContext &Ctx = *DAG.getContext();
|
|
|
|
Ctx.emitError(CS.getInstruction(),
|
|
|
|
"invalid operand for inline asm constraint '" +
|
|
|
|
Twine(OpInfo.ConstraintCode) + "'");
|
|
|
|
break;
|
|
|
|
}
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2008-09-03 16:12:24 +00:00
|
|
|
// Add information to the INLINEASM node to know about this input.
|
2010-04-07 05:20:54 +00:00
|
|
|
unsigned ResOpType =
|
|
|
|
InlineAsm::getFlagWord(InlineAsm::Kind_Imm, Ops.size());
|
2009-01-16 06:53:46 +00:00
|
|
|
AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
|
2008-09-03 16:12:24 +00:00
|
|
|
TLI.getPointerTy()));
|
|
|
|
AsmNodeOperands.insert(AsmNodeOperands.end(), Ops.begin(), Ops.end());
|
|
|
|
break;
|
2010-04-07 05:20:54 +00:00
|
|
|
}
|
2010-10-16 08:25:21 +00:00
|
|
|
|
2010-04-07 05:20:54 +00:00
|
|
|
if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
|
2008-09-03 16:12:24 +00:00
|
|
|
assert(OpInfo.isIndirect && "Operand must be indirect to be a mem!");
|
|
|
|
assert(InOperandVal.getValueType() == TLI.getPointerTy() &&
|
|
|
|
"Memory operands expect pointer values");
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2008-09-03 16:12:24 +00:00
|
|
|
// Add information to the INLINEASM node to know about this input.
|
2010-04-07 05:20:54 +00:00
|
|
|
unsigned ResOpType = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1);
|
2008-09-24 01:07:17 +00:00
|
|
|
AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
|
2008-09-03 16:12:24 +00:00
|
|
|
TLI.getPointerTy()));
|
|
|
|
AsmNodeOperands.push_back(InOperandVal);
|
|
|
|
break;
|
|
|
|
}
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2008-09-03 16:12:24 +00:00
|
|
|
assert((OpInfo.ConstraintType == TargetLowering::C_RegisterClass ||
|
|
|
|
OpInfo.ConstraintType == TargetLowering::C_Register) &&
|
|
|
|
"Unknown constraint type!");
|
2012-07-02 21:16:43 +00:00
|
|
|
|
|
|
|
// TODO: Support this.
|
|
|
|
if (OpInfo.isIndirect) {
|
|
|
|
LLVMContext &Ctx = *DAG.getContext();
|
|
|
|
Ctx.emitError(CS.getInstruction(),
|
|
|
|
"Don't know how to handle indirect register inputs yet "
|
|
|
|
"for constraint '" + Twine(OpInfo.ConstraintCode) + "'");
|
|
|
|
break;
|
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
|
|
|
|
// Copy the input into the appropriate registers.
|
2012-01-03 23:51:01 +00:00
|
|
|
if (OpInfo.AssignedRegs.Regs.empty()) {
|
|
|
|
LLVMContext &Ctx = *DAG.getContext();
|
|
|
|
Ctx.emitError(CS.getInstruction(),
|
|
|
|
"couldn't allocate input reg for constraint '" +
|
|
|
|
Twine(OpInfo.ConstraintCode) + "'");
|
|
|
|
break;
|
|
|
|
}
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2009-01-31 02:22:37 +00:00
|
|
|
OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG, getCurDebugLoc(),
|
2012-09-26 06:16:18 +00:00
|
|
|
Chain, &Flag, CS.getInstruction());
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2010-04-07 05:20:54 +00:00
|
|
|
OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind_RegUse, false, 0,
|
2010-03-02 01:55:18 +00:00
|
|
|
DAG, AsmNodeOperands);
|
2008-09-03 16:12:24 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case InlineAsm::isClobber: {
|
|
|
|
// Add the clobbered value to the operand list, so that the register
|
|
|
|
// allocator is aware that the physreg got clobbered.
|
|
|
|
if (!OpInfo.AssignedRegs.Regs.empty())
|
2011-06-27 04:08:33 +00:00
|
|
|
OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind_Clobber,
|
2010-03-02 01:55:18 +00:00
|
|
|
false, 0, DAG,
|
2009-12-22 01:25:10 +00:00
|
|
|
AsmNodeOperands);
|
2008-09-03 16:12:24 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2010-04-07 05:20:54 +00:00
|
|
|
// Finish up input operands. Set the input chain and add the flag last.
|
2010-07-02 20:16:09 +00:00
|
|
|
AsmNodeOperands[InlineAsm::Op_InputChain] = Chain;
|
2008-09-03 16:12:24 +00:00
|
|
|
if (Flag.getNode()) AsmNodeOperands.push_back(Flag);
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2009-01-31 02:22:37 +00:00
|
|
|
Chain = DAG.getNode(ISD::INLINEASM, getCurDebugLoc(),
|
2010-12-21 02:38:05 +00:00
|
|
|
DAG.getVTList(MVT::Other, MVT::Glue),
|
2008-09-03 16:12:24 +00:00
|
|
|
&AsmNodeOperands[0], AsmNodeOperands.size());
|
|
|
|
Flag = Chain.getValue(1);
|
|
|
|
|
|
|
|
// If this asm returns a register value, copy the result from that register
|
|
|
|
// and set it as the value of the call.
|
|
|
|
if (!RetValRegs.Regs.empty()) {
|
2010-05-29 17:03:36 +00:00
|
|
|
SDValue Val = RetValRegs.getCopyFromRegs(DAG, FuncInfo, getCurDebugLoc(),
|
2012-09-26 04:04:19 +00:00
|
|
|
Chain, &Flag, CS.getInstruction());
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2008-10-18 18:49:30 +00:00
|
|
|
// FIXME: Why don't we do this for inline asms with MRVs?
|
|
|
|
if (CS.getType()->isSingleValueType() && CS.getType()->isSized()) {
|
2009-08-10 22:56:29 +00:00
|
|
|
EVT ResultType = TLI.getValueType(CS.getType());
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2008-10-18 18:49:30 +00:00
|
|
|
// If any of the results of the inline asm is a vector, it may have the
|
|
|
|
// wrong width/num elts. This can happen for register classes that can
|
|
|
|
// contain multiple different value types. The preg or vreg allocated may
|
|
|
|
// not have the same VT as was expected. Convert it to the right type
|
|
|
|
// with bit_convert.
|
|
|
|
if (ResultType != Val.getValueType() && Val.getValueType().isVector()) {
|
2010-11-23 03:31:01 +00:00
|
|
|
Val = DAG.getNode(ISD::BITCAST, getCurDebugLoc(),
|
2009-01-30 01:34:22 +00:00
|
|
|
ResultType, Val);
|
2008-10-18 18:49:30 +00:00
|
|
|
|
2009-01-16 06:53:46 +00:00
|
|
|
} else if (ResultType != Val.getValueType() &&
|
2008-10-18 18:49:30 +00:00
|
|
|
ResultType.isInteger() && Val.getValueType().isInteger()) {
|
|
|
|
// If a result value was tied to an input value, the computed result may
|
|
|
|
// have a wider width than the expected result. Extract the relevant
|
|
|
|
// portion.
|
2009-01-31 02:22:37 +00:00
|
|
|
Val = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), ResultType, Val);
|
2008-10-18 01:03:45 +00:00
|
|
|
}
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2008-10-18 18:49:30 +00:00
|
|
|
assert(ResultType == Val.getValueType() && "Asm result value mismatch!");
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
2008-10-18 01:03:45 +00:00
|
|
|
|
2008-09-03 16:12:24 +00:00
|
|
|
setValue(CS.getInstruction(), Val);
|
2009-04-14 00:56:56 +00:00
|
|
|
// Don't need to use this as a chain in this case.
|
|
|
|
if (!IA->hasSideEffects() && !hasMemory && IndirectStoresToEmit.empty())
|
|
|
|
return;
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2010-04-15 01:51:59 +00:00
|
|
|
std::vector<std::pair<SDValue, const Value *> > StoresToEmit;
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2008-09-03 16:12:24 +00:00
|
|
|
// Process indirect outputs, first output all of the flagged copies out of
|
|
|
|
// physregs.
|
|
|
|
for (unsigned i = 0, e = IndirectStoresToEmit.size(); i != e; ++i) {
|
|
|
|
RegsForValue &OutRegs = IndirectStoresToEmit[i].first;
|
2010-04-15 01:51:59 +00:00
|
|
|
const Value *Ptr = IndirectStoresToEmit[i].second;
|
2010-05-29 17:03:36 +00:00
|
|
|
SDValue OutVal = OutRegs.getCopyFromRegs(DAG, FuncInfo, getCurDebugLoc(),
|
2012-09-26 04:04:19 +00:00
|
|
|
Chain, &Flag, IA);
|
2008-09-03 16:12:24 +00:00
|
|
|
StoresToEmit.push_back(std::make_pair(OutVal, Ptr));
|
|
|
|
}
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2008-09-03 16:12:24 +00:00
|
|
|
// Emit the non-flagged stores from the physregs.
|
|
|
|
SmallVector<SDValue, 8> OutChains;
|
2009-12-22 01:25:10 +00:00
|
|
|
for (unsigned i = 0, e = StoresToEmit.size(); i != e; ++i) {
|
|
|
|
SDValue Val = DAG.getStore(Chain, getCurDebugLoc(),
|
|
|
|
StoresToEmit[i].first,
|
|
|
|
getValue(StoresToEmit[i].second),
|
2010-09-21 18:58:22 +00:00
|
|
|
MachinePointerInfo(StoresToEmit[i].second),
|
2010-02-15 17:00:31 +00:00
|
|
|
false, false, 0);
|
2009-12-22 01:25:10 +00:00
|
|
|
OutChains.push_back(Val);
|
|
|
|
}
|
|
|
|
|
2008-09-03 16:12:24 +00:00
|
|
|
if (!OutChains.empty())
|
2009-08-11 20:47:22 +00:00
|
|
|
Chain = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(), MVT::Other,
|
2008-09-03 16:12:24 +00:00
|
|
|
&OutChains[0], OutChains.size());
|
2009-12-22 01:25:10 +00:00
|
|
|
|
2008-09-03 16:12:24 +00:00
|
|
|
DAG.setRoot(Chain);
|
|
|
|
}
|
|
|
|
|
2010-04-15 01:51:59 +00:00
|
|
|
void SelectionDAGBuilder::visitVAStart(const CallInst &I) {
|
2009-12-23 00:44:51 +00:00
|
|
|
DAG.setRoot(DAG.getNode(ISD::VASTART, getCurDebugLoc(),
|
|
|
|
MVT::Other, getRoot(),
|
2010-06-25 09:38:13 +00:00
|
|
|
getValue(I.getArgOperand(0)),
|
|
|
|
DAG.getSrcValue(I.getArgOperand(0))));
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
|
|
|
|
2010-04-15 01:51:59 +00:00
|
|
|
void SelectionDAGBuilder::visitVAArg(const VAArgInst &I) {
|
2012-10-08 16:38:25 +00:00
|
|
|
const DataLayout &TD = *TLI.getDataLayout();
|
2009-02-03 23:04:43 +00:00
|
|
|
SDValue V = DAG.getVAArg(TLI.getValueType(I.getType()), getCurDebugLoc(),
|
|
|
|
getRoot(), getValue(I.getOperand(0)),
|
2010-07-11 04:01:49 +00:00
|
|
|
DAG.getSrcValue(I.getOperand(0)),
|
2010-07-12 18:11:17 +00:00
|
|
|
TD.getABITypeAlignment(I.getType()));
|
2008-09-03 16:12:24 +00:00
|
|
|
setValue(&I, V);
|
|
|
|
DAG.setRoot(V.getValue(1));
|
|
|
|
}
|
|
|
|
|
2010-04-15 01:51:59 +00:00
|
|
|
void SelectionDAGBuilder::visitVAEnd(const CallInst &I) {
|
2009-12-23 00:44:51 +00:00
|
|
|
DAG.setRoot(DAG.getNode(ISD::VAEND, getCurDebugLoc(),
|
|
|
|
MVT::Other, getRoot(),
|
2010-06-25 09:38:13 +00:00
|
|
|
getValue(I.getArgOperand(0)),
|
|
|
|
DAG.getSrcValue(I.getArgOperand(0))));
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
|
|
|
|
2010-04-15 01:51:59 +00:00
|
|
|
void SelectionDAGBuilder::visitVACopy(const CallInst &I) {
|
2009-12-23 00:44:51 +00:00
|
|
|
DAG.setRoot(DAG.getNode(ISD::VACOPY, getCurDebugLoc(),
|
|
|
|
MVT::Other, getRoot(),
|
2010-06-25 09:38:13 +00:00
|
|
|
getValue(I.getArgOperand(0)),
|
|
|
|
getValue(I.getArgOperand(1)),
|
|
|
|
DAG.getSrcValue(I.getArgOperand(0)),
|
|
|
|
DAG.getSrcValue(I.getArgOperand(1))));
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// TargetLowering::LowerCallTo - This is the default LowerCallTo
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
/// implementation, which just calls LowerCall.
|
|
|
|
/// FIXME: When all targets are
|
|
|
|
/// migrated to using LowerCall, this hook should be integrated into SDISel.
|
2008-09-03 16:12:24 +00:00
|
|
|
std::pair<SDValue, SDValue>
|
2012-05-25 16:35:28 +00:00
|
|
|
TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
|
2008-09-03 16:12:24 +00:00
|
|
|
// Handle all of the outgoing arguments.
|
2012-05-25 16:35:28 +00:00
|
|
|
CLI.Outs.clear();
|
|
|
|
CLI.OutVals.clear();
|
|
|
|
ArgListTy &Args = CLI.Args;
|
2008-09-03 16:12:24 +00:00
|
|
|
for (unsigned i = 0, e = Args.size(); i != e; ++i) {
|
2009-08-10 22:56:29 +00:00
|
|
|
SmallVector<EVT, 4> ValueVTs;
|
2008-09-03 16:12:24 +00:00
|
|
|
ComputeValueVTs(*this, Args[i].Ty, ValueVTs);
|
|
|
|
for (unsigned Value = 0, NumValues = ValueVTs.size();
|
|
|
|
Value != NumValues; ++Value) {
|
2009-08-10 22:56:29 +00:00
|
|
|
EVT VT = ValueVTs[Value];
|
2012-05-25 16:35:28 +00:00
|
|
|
Type *ArgTy = VT.getTypeForEVT(CLI.RetTy->getContext());
|
2008-10-18 18:49:30 +00:00
|
|
|
SDValue Op = SDValue(Args[i].Node.getNode(),
|
|
|
|
Args[i].Node.getResNo() + Value);
|
2008-09-03 16:12:24 +00:00
|
|
|
ISD::ArgFlagsTy Flags;
|
|
|
|
unsigned OriginalAlignment =
|
2012-10-08 16:38:25 +00:00
|
|
|
getDataLayout()->getABITypeAlignment(ArgTy);
|
2008-09-03 16:12:24 +00:00
|
|
|
|
|
|
|
if (Args[i].isZExt)
|
|
|
|
Flags.setZExt();
|
|
|
|
if (Args[i].isSExt)
|
|
|
|
Flags.setSExt();
|
|
|
|
if (Args[i].isInReg)
|
|
|
|
Flags.setInReg();
|
|
|
|
if (Args[i].isSRet)
|
|
|
|
Flags.setSRet();
|
|
|
|
if (Args[i].isByVal) {
|
|
|
|
Flags.setByVal();
|
2011-07-18 04:54:35 +00:00
|
|
|
PointerType *Ty = cast<PointerType>(Args[i].Ty);
|
|
|
|
Type *ElementTy = Ty->getElementType();
|
2012-10-08 16:38:25 +00:00
|
|
|
Flags.setByValSize(getDataLayout()->getTypeAllocSize(ElementTy));
|
2008-09-03 16:12:24 +00:00
|
|
|
// For ByVal, alignment should come from FE. BE will guess if this
|
|
|
|
// info is not there but there are cases it cannot get right.
|
2011-05-22 23:23:02 +00:00
|
|
|
unsigned FrameAlign;
|
2008-09-03 16:12:24 +00:00
|
|
|
if (Args[i].Alignment)
|
|
|
|
FrameAlign = Args[i].Alignment;
|
2011-05-22 23:23:02 +00:00
|
|
|
else
|
|
|
|
FrameAlign = getByValTypeAlignment(ElementTy);
|
2008-09-03 16:12:24 +00:00
|
|
|
Flags.setByValAlign(FrameAlign);
|
|
|
|
}
|
|
|
|
if (Args[i].isNest)
|
|
|
|
Flags.setNest();
|
|
|
|
Flags.setOrigAlign(OriginalAlignment);
|
|
|
|
|
2012-12-11 10:09:23 +00:00
|
|
|
MVT PartVT = getRegisterType(CLI.RetTy->getContext(), VT);
|
2012-05-25 16:35:28 +00:00
|
|
|
unsigned NumParts = getNumRegisters(CLI.RetTy->getContext(), VT);
|
2008-09-03 16:12:24 +00:00
|
|
|
SmallVector<SDValue, 4> Parts(NumParts);
|
|
|
|
ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
|
|
|
|
|
|
|
|
if (Args[i].isSExt)
|
|
|
|
ExtendKind = ISD::SIGN_EXTEND;
|
|
|
|
else if (Args[i].isZExt)
|
|
|
|
ExtendKind = ISD::ZERO_EXTEND;
|
|
|
|
|
2012-05-25 16:35:28 +00:00
|
|
|
getCopyToParts(CLI.DAG, CLI.DL, Op, &Parts[0], NumParts,
|
2012-09-26 06:16:18 +00:00
|
|
|
PartVT, CLI.CS ? CLI.CS->getInstruction() : 0, ExtendKind);
|
2008-09-03 16:12:24 +00:00
|
|
|
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
for (unsigned j = 0; j != NumParts; ++j) {
|
2008-09-03 16:12:24 +00:00
|
|
|
// if it isn't first piece, alignment must be 1
|
2010-07-07 15:54:55 +00:00
|
|
|
ISD::OutputArg MyFlags(Flags, Parts[j].getValueType(),
|
2012-11-01 23:49:58 +00:00
|
|
|
i < CLI.NumFixedArgs,
|
|
|
|
i, j*Parts[j].getValueType().getStoreSize());
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
if (NumParts > 1 && j == 0)
|
|
|
|
MyFlags.Flags.setSplit();
|
|
|
|
else if (j != 0)
|
|
|
|
MyFlags.Flags.setOrigAlign(1);
|
|
|
|
|
2012-05-25 16:35:28 +00:00
|
|
|
CLI.Outs.push_back(MyFlags);
|
|
|
|
CLI.OutVals.push_back(Parts[j]);
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2009-01-16 06:53:46 +00:00
|
|
|
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
// Handle the incoming return values from the call.
|
2012-05-25 16:35:28 +00:00
|
|
|
CLI.Ins.clear();
|
2009-08-10 22:56:29 +00:00
|
|
|
SmallVector<EVT, 4> RetTys;
|
2012-05-25 16:35:28 +00:00
|
|
|
ComputeValueVTs(*this, CLI.RetTy, RetTys);
|
2008-09-03 16:12:24 +00:00
|
|
|
for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
|
2009-08-10 22:56:29 +00:00
|
|
|
EVT VT = RetTys[I];
|
2012-12-11 10:09:23 +00:00
|
|
|
MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), VT);
|
2012-05-25 16:35:28 +00:00
|
|
|
unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), VT);
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
for (unsigned i = 0; i != NumRegs; ++i) {
|
|
|
|
ISD::InputArg MyFlags;
|
2012-12-11 10:09:23 +00:00
|
|
|
MyFlags.VT = RegisterVT;
|
2012-05-25 16:35:28 +00:00
|
|
|
MyFlags.Used = CLI.IsReturnValueUsed;
|
|
|
|
if (CLI.RetSExt)
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
MyFlags.Flags.setSExt();
|
2012-05-25 16:35:28 +00:00
|
|
|
if (CLI.RetZExt)
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
MyFlags.Flags.setZExt();
|
2012-05-25 16:35:28 +00:00
|
|
|
if (CLI.IsInReg)
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
MyFlags.Flags.setInReg();
|
2012-05-25 16:35:28 +00:00
|
|
|
CLI.Ins.push_back(MyFlags);
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
SmallVector<SDValue, 4> InVals;
|
2012-05-25 16:35:28 +00:00
|
|
|
CLI.Chain = LowerCall(CLI, InVals);
|
2009-08-06 15:37:27 +00:00
|
|
|
|
|
|
|
// Verify that the target's LowerCall behaved as expected.
|
2012-05-25 16:35:28 +00:00
|
|
|
assert(CLI.Chain.getNode() && CLI.Chain.getValueType() == MVT::Other &&
|
2009-08-06 15:37:27 +00:00
|
|
|
"LowerCall didn't return a valid chain!");
|
2012-05-25 16:35:28 +00:00
|
|
|
assert((!CLI.IsTailCall || InVals.empty()) &&
|
2009-08-06 15:37:27 +00:00
|
|
|
"LowerCall emitted a return value for a tail call!");
|
2012-05-25 16:35:28 +00:00
|
|
|
assert((CLI.IsTailCall || InVals.size() == CLI.Ins.size()) &&
|
2009-08-06 15:37:27 +00:00
|
|
|
"LowerCall didn't emit the correct number of values!");
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
|
|
|
|
// For a tail call, the return value is merely live-out and there aren't
|
|
|
|
// any nodes in the DAG representing it. Return a special value to
|
|
|
|
// indicate that a tail call has been emitted and no more Instructions
|
|
|
|
// should be processed in the current block.
|
2012-05-25 16:35:28 +00:00
|
|
|
if (CLI.IsTailCall) {
|
|
|
|
CLI.DAG.setRoot(CLI.Chain);
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
return std::make_pair(SDValue(), SDValue());
|
|
|
|
}
|
|
|
|
|
2012-05-25 16:35:28 +00:00
|
|
|
DEBUG(for (unsigned i = 0, e = CLI.Ins.size(); i != e; ++i) {
|
2010-03-11 19:38:18 +00:00
|
|
|
assert(InVals[i].getNode() &&
|
|
|
|
"LowerCall emitted a null value!");
|
2012-05-25 16:35:28 +00:00
|
|
|
assert(EVT(CLI.Ins[i].VT) == InVals[i].getValueType() &&
|
2010-03-11 19:38:18 +00:00
|
|
|
"LowerCall emitted a value with the wrong type!");
|
|
|
|
});
|
|
|
|
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
// Collect the legal value parts into potentially illegal values
|
|
|
|
// that correspond to the original function's return values.
|
|
|
|
ISD::NodeType AssertOp = ISD::DELETED_NODE;
|
2012-05-25 16:35:28 +00:00
|
|
|
if (CLI.RetSExt)
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
AssertOp = ISD::AssertSext;
|
2012-05-25 16:35:28 +00:00
|
|
|
else if (CLI.RetZExt)
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
AssertOp = ISD::AssertZext;
|
|
|
|
SmallVector<SDValue, 4> ReturnValues;
|
|
|
|
unsigned CurReg = 0;
|
|
|
|
for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
|
2009-08-10 22:56:29 +00:00
|
|
|
EVT VT = RetTys[I];
|
2012-12-11 10:09:23 +00:00
|
|
|
MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), VT);
|
2012-05-25 16:35:28 +00:00
|
|
|
unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), VT);
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
|
2012-05-25 16:35:28 +00:00
|
|
|
ReturnValues.push_back(getCopyFromParts(CLI.DAG, CLI.DL, &InVals[CurReg],
|
2012-09-26 04:04:19 +00:00
|
|
|
NumRegs, RegisterVT, VT, NULL,
|
2010-01-28 21:51:40 +00:00
|
|
|
AssertOp));
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
CurReg += NumRegs;
|
|
|
|
}
|
|
|
|
|
|
|
|
// For a function returning void, there is no return value. We can't create
|
|
|
|
// such a node, so we just return a null return value in that case. In
|
2011-04-15 05:18:47 +00:00
|
|
|
// that case, nothing will actually look at the value.
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
if (ReturnValues.empty())
|
2012-05-25 16:35:28 +00:00
|
|
|
return std::make_pair(SDValue(), CLI.Chain);
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
|
2012-05-25 16:35:28 +00:00
|
|
|
SDValue Res = CLI.DAG.getNode(ISD::MERGE_VALUES, CLI.DL,
|
|
|
|
CLI.DAG.getVTList(&RetTys[0], RetTys.size()),
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
&ReturnValues[0], ReturnValues.size());
|
2012-05-25 16:35:28 +00:00
|
|
|
return std::make_pair(Res, CLI.Chain);
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
|
|
|
|
2009-01-21 09:00:29 +00:00
|
|
|
void TargetLowering::LowerOperationWrapper(SDNode *N,
|
|
|
|
SmallVectorImpl<SDValue> &Results,
|
2010-04-17 15:26:15 +00:00
|
|
|
SelectionDAG &DAG) const {
|
2009-01-21 09:00:29 +00:00
|
|
|
SDValue Res = LowerOperation(SDValue(N, 0), DAG);
|
2009-01-21 04:48:39 +00:00
|
|
|
if (Res.getNode())
|
|
|
|
Results.push_back(Res);
|
|
|
|
}
|
|
|
|
|
2010-04-17 15:26:15 +00:00
|
|
|
SDValue TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
|
2009-07-14 16:55:14 +00:00
|
|
|
llvm_unreachable("LowerOperation not implemented for this target!");
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
|
|
|
|
2010-04-15 01:51:59 +00:00
|
|
|
void
|
|
|
|
SelectionDAGBuilder::CopyValueToVirtualRegister(const Value *V, unsigned Reg) {
|
2010-07-01 01:59:43 +00:00
|
|
|
SDValue Op = getNonRegisterValue(V);
|
2008-09-03 16:12:24 +00:00
|
|
|
assert((Op.getOpcode() != ISD::CopyFromReg ||
|
|
|
|
cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
|
|
|
|
"Copy from a reg to the same reg!");
|
|
|
|
assert(!TargetRegisterInfo::isPhysicalRegister(Reg) && "Is a physreg");
|
|
|
|
|
2009-08-12 00:36:31 +00:00
|
|
|
RegsForValue RFV(V->getContext(), TLI, Reg, V->getType());
|
2008-09-03 16:12:24 +00:00
|
|
|
SDValue Chain = DAG.getEntryNode();
|
2012-09-26 06:16:18 +00:00
|
|
|
RFV.getCopyToRegs(Op, DAG, getCurDebugLoc(), Chain, 0, V);
|
2008-09-03 16:12:24 +00:00
|
|
|
PendingExports.push_back(Chain);
|
|
|
|
}
|
|
|
|
|
|
|
|
#include "llvm/CodeGen/SelectionDAGISel.h"
|
|
|
|
|
2011-05-05 16:53:34 +00:00
|
|
|
/// isOnlyUsedInEntryBlock - If the specified argument is only used in the
|
|
|
|
/// entry block, return true. This includes arguments used by switches, since
|
|
|
|
/// the switch may expand into multiple basic blocks.
|
2011-12-02 22:16:29 +00:00
|
|
|
static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel) {
|
2011-05-05 16:53:34 +00:00
|
|
|
// With FastISel active, we may be splitting blocks, so force creation
|
|
|
|
// of virtual registers for all non-dead arguments.
|
2011-12-02 22:16:29 +00:00
|
|
|
if (FastISel)
|
2011-05-05 16:53:34 +00:00
|
|
|
return A->use_empty();
|
|
|
|
|
|
|
|
const BasicBlock *Entry = A->getParent()->begin();
|
|
|
|
for (Value::const_use_iterator UI = A->use_begin(), E = A->use_end();
|
|
|
|
UI != E; ++UI) {
|
|
|
|
const User *U = *UI;
|
|
|
|
if (cast<Instruction>(U)->getParent() != Entry || isa<SwitchInst>(U))
|
|
|
|
return false; // Use not in entry block.
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2010-04-15 01:51:59 +00:00
|
|
|
void SelectionDAGISel::LowerArguments(const BasicBlock *LLVMBB) {
|
2008-09-03 16:12:24 +00:00
|
|
|
// If this is the entry block, emit arguments.
|
2010-04-15 01:51:59 +00:00
|
|
|
const Function &F = *LLVMBB->getParent();
|
2009-11-23 18:04:58 +00:00
|
|
|
SelectionDAG &DAG = SDB->DAG;
|
|
|
|
DebugLoc dl = SDB->getCurDebugLoc();
|
2012-10-08 16:38:25 +00:00
|
|
|
const DataLayout *TD = TLI.getDataLayout();
|
2009-11-11 19:59:24 +00:00
|
|
|
SmallVector<ISD::InputArg, 16> Ins;
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2009-11-07 02:11:54 +00:00
|
|
|
// Check whether the function can return without sret-demotion.
|
2010-07-10 09:00:22 +00:00
|
|
|
SmallVector<ISD::OutputArg, 4> Outs;
|
|
|
|
GetReturnInfo(F.getReturnType(), F.getAttributes().getRetAttributes(),
|
|
|
|
Outs, TLI);
|
|
|
|
|
2010-05-29 17:03:36 +00:00
|
|
|
if (!FuncInfo->CanLowerReturn) {
|
2009-11-11 19:59:24 +00:00
|
|
|
// Put in an sret pointer parameter before all the other parameters.
|
|
|
|
SmallVector<EVT, 1> ValueVTs;
|
|
|
|
ComputeValueVTs(TLI, PointerType::getUnqual(F.getReturnType()), ValueVTs);
|
|
|
|
|
|
|
|
// NOTE: Assuming that a pointer will never break down to more than one VT
|
|
|
|
// or one register.
|
|
|
|
ISD::ArgFlagsTy Flags;
|
|
|
|
Flags.setSRet();
|
2012-12-11 10:09:23 +00:00
|
|
|
MVT RegisterVT = TLI.getRegisterType(*DAG.getContext(), ValueVTs[0]);
|
2012-10-10 11:37:36 +00:00
|
|
|
ISD::InputArg RetArg(Flags, RegisterVT, true, 0, 0);
|
2009-11-11 19:59:24 +00:00
|
|
|
Ins.push_back(RetArg);
|
|
|
|
}
|
2009-11-07 02:11:54 +00:00
|
|
|
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
// Set up the incoming argument description vector.
|
|
|
|
unsigned Idx = 1;
|
2010-04-15 01:51:59 +00:00
|
|
|
for (Function::const_arg_iterator I = F.arg_begin(), E = F.arg_end();
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
I != E; ++I, ++Idx) {
|
2009-08-10 22:56:29 +00:00
|
|
|
SmallVector<EVT, 4> ValueVTs;
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
ComputeValueVTs(TLI, I->getType(), ValueVTs);
|
|
|
|
bool isArgValueUsed = !I->use_empty();
|
|
|
|
for (unsigned Value = 0, NumValues = ValueVTs.size();
|
|
|
|
Value != NumValues; ++Value) {
|
2009-08-10 22:56:29 +00:00
|
|
|
EVT VT = ValueVTs[Value];
|
2011-07-18 04:54:35 +00:00
|
|
|
Type *ArgTy = VT.getTypeForEVT(*DAG.getContext());
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
ISD::ArgFlagsTy Flags;
|
|
|
|
unsigned OriginalAlignment =
|
|
|
|
TD->getABITypeAlignment(ArgTy);
|
|
|
|
|
2012-10-09 07:45:08 +00:00
|
|
|
if (F.getParamAttributes(Idx).hasAttribute(Attributes::ZExt))
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
Flags.setZExt();
|
2012-10-09 07:45:08 +00:00
|
|
|
if (F.getParamAttributes(Idx).hasAttribute(Attributes::SExt))
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
Flags.setSExt();
|
2012-10-09 07:45:08 +00:00
|
|
|
if (F.getParamAttributes(Idx).hasAttribute(Attributes::InReg))
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
Flags.setInReg();
|
2012-10-09 07:45:08 +00:00
|
|
|
if (F.getParamAttributes(Idx).hasAttribute(Attributes::StructRet))
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
Flags.setSRet();
|
2012-10-09 07:45:08 +00:00
|
|
|
if (F.getParamAttributes(Idx).hasAttribute(Attributes::ByVal)) {
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
Flags.setByVal();
|
2011-07-18 04:54:35 +00:00
|
|
|
PointerType *Ty = cast<PointerType>(I->getType());
|
|
|
|
Type *ElementTy = Ty->getElementType();
|
2011-05-22 23:23:02 +00:00
|
|
|
Flags.setByValSize(TD->getTypeAllocSize(ElementTy));
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
// For ByVal, alignment should be passed from FE. BE will guess if
|
|
|
|
// this info is not there but there are cases it cannot get right.
|
2011-05-22 23:23:02 +00:00
|
|
|
unsigned FrameAlign;
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
if (F.getParamAlignment(Idx))
|
|
|
|
FrameAlign = F.getParamAlignment(Idx);
|
2011-05-22 23:23:02 +00:00
|
|
|
else
|
|
|
|
FrameAlign = TLI.getByValTypeAlignment(ElementTy);
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
Flags.setByValAlign(FrameAlign);
|
|
|
|
}
|
2012-10-09 07:45:08 +00:00
|
|
|
if (F.getParamAttributes(Idx).hasAttribute(Attributes::Nest))
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
Flags.setNest();
|
|
|
|
Flags.setOrigAlign(OriginalAlignment);
|
|
|
|
|
2012-12-11 10:09:23 +00:00
|
|
|
MVT RegisterVT = TLI.getRegisterType(*CurDAG->getContext(), VT);
|
2009-08-12 00:36:31 +00:00
|
|
|
unsigned NumRegs = TLI.getNumRegisters(*CurDAG->getContext(), VT);
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
for (unsigned i = 0; i != NumRegs; ++i) {
|
2012-10-10 11:37:36 +00:00
|
|
|
ISD::InputArg MyFlags(Flags, RegisterVT, isArgValueUsed,
|
|
|
|
Idx-1, i*RegisterVT.getStoreSize());
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
if (NumRegs > 1 && i == 0)
|
|
|
|
MyFlags.Flags.setSplit();
|
|
|
|
// if it isn't first piece, alignment must be 1
|
|
|
|
else if (i > 0)
|
|
|
|
MyFlags.Flags.setOrigAlign(1);
|
|
|
|
Ins.push_back(MyFlags);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Call the target to set up the argument values.
|
|
|
|
SmallVector<SDValue, 8> InVals;
|
|
|
|
SDValue NewRoot = TLI.LowerFormalArguments(DAG.getRoot(), F.getCallingConv(),
|
|
|
|
F.isVarArg(), Ins,
|
|
|
|
dl, DAG, InVals);
|
2009-08-06 15:37:27 +00:00
|
|
|
|
|
|
|
// Verify that the target's LowerFormalArguments behaved as expected.
|
2009-08-11 20:47:22 +00:00
|
|
|
assert(NewRoot.getNode() && NewRoot.getValueType() == MVT::Other &&
|
2009-08-06 15:37:27 +00:00
|
|
|
"LowerFormalArguments didn't return a valid chain!");
|
|
|
|
assert(InVals.size() == Ins.size() &&
|
|
|
|
"LowerFormalArguments didn't emit the correct number of values!");
|
2009-12-22 21:35:02 +00:00
|
|
|
DEBUG({
|
|
|
|
for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
|
|
|
|
assert(InVals[i].getNode() &&
|
|
|
|
"LowerFormalArguments emitted a null value!");
|
2010-11-03 11:35:31 +00:00
|
|
|
assert(EVT(Ins[i].VT) == InVals[i].getValueType() &&
|
2009-12-22 21:35:02 +00:00
|
|
|
"LowerFormalArguments emitted a value with the wrong type!");
|
|
|
|
}
|
|
|
|
});
|
2009-12-22 02:10:19 +00:00
|
|
|
|
2009-08-06 15:37:27 +00:00
|
|
|
// Update the DAG with the new chain value resulting from argument lowering.
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
DAG.setRoot(NewRoot);
|
|
|
|
|
|
|
|
// Set up the argument values.
|
|
|
|
unsigned i = 0;
|
|
|
|
Idx = 1;
|
2010-05-29 17:03:36 +00:00
|
|
|
if (!FuncInfo->CanLowerReturn) {
|
2009-11-11 19:59:24 +00:00
|
|
|
// Create a virtual register for the sret pointer, and put in a copy
|
|
|
|
// from the sret argument into it.
|
|
|
|
SmallVector<EVT, 1> ValueVTs;
|
|
|
|
ComputeValueVTs(TLI, PointerType::getUnqual(F.getReturnType()), ValueVTs);
|
2012-12-11 09:10:33 +00:00
|
|
|
MVT VT = ValueVTs[0].getSimpleVT();
|
2012-12-11 10:09:23 +00:00
|
|
|
MVT RegVT = TLI.getRegisterType(*CurDAG->getContext(), VT);
|
2009-11-11 19:59:24 +00:00
|
|
|
ISD::NodeType AssertOp = ISD::DELETED_NODE;
|
2010-03-02 01:55:18 +00:00
|
|
|
SDValue ArgValue = getCopyFromParts(DAG, dl, &InVals[0], 1,
|
2012-09-26 04:04:19 +00:00
|
|
|
RegVT, VT, NULL, AssertOp);
|
2009-11-11 19:59:24 +00:00
|
|
|
|
2009-11-23 18:04:58 +00:00
|
|
|
MachineFunction& MF = SDB->DAG.getMachineFunction();
|
2009-11-11 19:59:24 +00:00
|
|
|
MachineRegisterInfo& RegInfo = MF.getRegInfo();
|
|
|
|
unsigned SRetReg = RegInfo.createVirtualRegister(TLI.getRegClassFor(RegVT));
|
2010-05-29 17:03:36 +00:00
|
|
|
FuncInfo->DemoteRegister = SRetReg;
|
2010-01-01 04:41:22 +00:00
|
|
|
NewRoot = SDB->DAG.getCopyToReg(NewRoot, SDB->getCurDebugLoc(),
|
|
|
|
SRetReg, ArgValue);
|
2009-11-11 19:59:24 +00:00
|
|
|
DAG.setRoot(NewRoot);
|
2009-12-22 02:10:19 +00:00
|
|
|
|
2009-11-11 19:59:24 +00:00
|
|
|
// i indexes lowered arguments. Bump it past the hidden sret argument.
|
|
|
|
// Idx indexes LLVM arguments. Don't touch it.
|
|
|
|
++i;
|
|
|
|
}
|
2009-12-22 02:10:19 +00:00
|
|
|
|
2010-04-15 01:51:59 +00:00
|
|
|
for (Function::const_arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E;
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
++I, ++Idx) {
|
|
|
|
SmallVector<SDValue, 4> ArgValues;
|
2009-08-10 22:56:29 +00:00
|
|
|
SmallVector<EVT, 4> ValueVTs;
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
ComputeValueVTs(TLI, I->getType(), ValueVTs);
|
2008-09-03 16:12:24 +00:00
|
|
|
unsigned NumValues = ValueVTs.size();
|
2010-06-01 19:59:01 +00:00
|
|
|
|
|
|
|
// If this argument is unused then remember its value. It is used to generate
|
|
|
|
// debugging information.
|
|
|
|
if (I->use_empty() && NumValues)
|
|
|
|
SDB->setUnusedArgValue(I, InVals[i]);
|
|
|
|
|
2011-05-05 16:53:34 +00:00
|
|
|
for (unsigned Val = 0; Val != NumValues; ++Val) {
|
|
|
|
EVT VT = ValueVTs[Val];
|
2012-12-11 10:09:23 +00:00
|
|
|
MVT PartVT = TLI.getRegisterType(*CurDAG->getContext(), VT);
|
2009-08-12 00:36:31 +00:00
|
|
|
unsigned NumParts = TLI.getNumRegisters(*CurDAG->getContext(), VT);
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
|
|
|
|
if (!I->use_empty()) {
|
|
|
|
ISD::NodeType AssertOp = ISD::DELETED_NODE;
|
2012-10-09 07:45:08 +00:00
|
|
|
if (F.getParamAttributes(Idx).hasAttribute(Attributes::SExt))
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
AssertOp = ISD::AssertSext;
|
2012-10-09 07:45:08 +00:00
|
|
|
else if (F.getParamAttributes(Idx).hasAttribute(Attributes::ZExt))
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
AssertOp = ISD::AssertZext;
|
|
|
|
|
2010-03-02 01:55:18 +00:00
|
|
|
ArgValues.push_back(getCopyFromParts(DAG, dl, &InVals[i],
|
2009-12-22 02:10:19 +00:00
|
|
|
NumParts, PartVT, VT,
|
2012-09-26 04:04:19 +00:00
|
|
|
NULL, AssertOp));
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
}
|
2009-12-22 02:10:19 +00:00
|
|
|
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
i += NumParts;
|
|
|
|
}
|
2009-12-22 02:10:19 +00:00
|
|
|
|
2011-05-05 16:53:34 +00:00
|
|
|
// We don't need to do anything else for unused arguments.
|
|
|
|
if (ArgValues.empty())
|
|
|
|
continue;
|
|
|
|
|
2011-09-08 22:59:09 +00:00
|
|
|
// Note down frame index.
|
|
|
|
if (FrameIndexSDNode *FI =
|
2012-07-19 00:04:14 +00:00
|
|
|
dyn_cast<FrameIndexSDNode>(ArgValues[0].getNode()))
|
2011-09-08 22:59:09 +00:00
|
|
|
FuncInfo->setArgumentFrameIndex(I, FI->getIndex());
|
2010-08-31 22:22:42 +00:00
|
|
|
|
2011-05-05 16:53:34 +00:00
|
|
|
SDValue Res = DAG.getMergeValues(&ArgValues[0], NumValues,
|
|
|
|
SDB->getCurDebugLoc());
|
2011-09-08 22:59:09 +00:00
|
|
|
|
2011-05-05 16:53:34 +00:00
|
|
|
SDB->setValue(I, Res);
|
2011-12-02 22:16:29 +00:00
|
|
|
if (!TM.Options.EnableFastISel && Res.getOpcode() == ISD::BUILD_PAIR) {
|
2011-09-08 22:59:09 +00:00
|
|
|
if (LoadSDNode *LNode =
|
|
|
|
dyn_cast<LoadSDNode>(Res.getOperand(0).getNode()))
|
|
|
|
if (FrameIndexSDNode *FI =
|
|
|
|
dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
|
|
|
|
FuncInfo->setArgumentFrameIndex(I, FI->getIndex());
|
|
|
|
}
|
2011-05-05 16:53:34 +00:00
|
|
|
|
|
|
|
// If this argument is live outside of the entry block, insert a copy from
|
|
|
|
// wherever we got it to the vreg that other BB's will reference it as.
|
2011-12-02 22:16:29 +00:00
|
|
|
if (!TM.Options.EnableFastISel && Res.getOpcode() == ISD::CopyFromReg) {
|
2011-05-05 16:53:34 +00:00
|
|
|
// If we can, though, try to skip creating an unnecessary vreg.
|
|
|
|
// FIXME: This isn't very clean... it would be nice to make this more
|
2011-05-10 21:50:58 +00:00
|
|
|
// general. It's also subtly incompatible with the hacks FastISel
|
|
|
|
// uses with vregs.
|
2011-05-05 16:53:34 +00:00
|
|
|
unsigned Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg();
|
|
|
|
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
|
|
|
|
FuncInfo->ValueMap[I] = Reg;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
2011-12-02 22:16:29 +00:00
|
|
|
if (!isOnlyUsedInEntryBlock(I, TM.Options.EnableFastISel)) {
|
2011-05-05 16:53:34 +00:00
|
|
|
FuncInfo->InitializeRegForValue(I);
|
2009-11-23 18:04:58 +00:00
|
|
|
SDB->CopyToExportRegsIfNeeded(I);
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
|
|
|
}
|
2009-12-22 02:10:19 +00:00
|
|
|
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78142 91177308-0d34-0410-b5e6-96231b3b80d8
2009-08-05 01:29:28 +00:00
|
|
|
assert(i == InVals.size() && "Argument register count mismatch!");
|
2008-09-03 16:12:24 +00:00
|
|
|
|
|
|
|
// Finally, if the target has anything special to do, allow it to do so.
|
|
|
|
// FIXME: this should insert code into the DAG!
|
2010-04-14 20:17:22 +00:00
|
|
|
EmitFunctionEntryCode();
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Handle PHI nodes in successor blocks. Emit code into the SelectionDAG to
|
|
|
|
/// ensure constants are generated when needed. Remember the virtual registers
|
|
|
|
/// that need to be added to the Machine PHI nodes as input. We cannot just
|
|
|
|
/// directly add them, because expansion might result in multiple MBB's for one
|
|
|
|
/// BB. As such, the start of the BB might correspond to a different MBB than
|
|
|
|
/// the end.
|
|
|
|
///
|
|
|
|
void
|
2010-04-22 20:46:50 +00:00
|
|
|
SelectionDAGBuilder::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
|
2010-04-15 01:51:59 +00:00
|
|
|
const TerminatorInst *TI = LLVMBB->getTerminator();
|
2008-09-03 16:12:24 +00:00
|
|
|
|
|
|
|
SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
|
|
|
|
|
|
|
|
// Check successor nodes' PHI nodes that expect a constant to be available
|
|
|
|
// from this block.
|
|
|
|
for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
|
2010-04-15 01:51:59 +00:00
|
|
|
const BasicBlock *SuccBB = TI->getSuccessor(succ);
|
2008-09-03 16:12:24 +00:00
|
|
|
if (!isa<PHINode>(SuccBB->begin())) continue;
|
2010-04-22 20:46:50 +00:00
|
|
|
MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2008-09-03 16:12:24 +00:00
|
|
|
// If this terminator has multiple identical successors (common for
|
|
|
|
// switches), only handle each succ once.
|
|
|
|
if (!SuccsHandled.insert(SuccMBB)) continue;
|
2009-01-16 06:53:46 +00:00
|
|
|
|
2008-09-03 16:12:24 +00:00
|
|
|
MachineBasicBlock::iterator MBBI = SuccMBB->begin();
|
|
|
|
|
|
|
|
// At this point we know that there is a 1-1 correspondence between LLVM PHI
|
|
|
|
// nodes and Machine PHI nodes, but the incoming operands have not been
|
|
|
|
// emitted yet.
|
2010-04-15 01:51:59 +00:00
|
|
|
for (BasicBlock::const_iterator I = SuccBB->begin();
|
|
|
|
const PHINode *PN = dyn_cast<PHINode>(I); ++I) {
|
2008-09-03 16:12:24 +00:00
|
|
|
// Ignore dead phi's.
|
|
|
|
if (PN->use_empty()) continue;
|
|
|
|
|
2011-05-13 15:18:06 +00:00
|
|
|
// Skip empty types
|
|
|
|
if (PN->getType()->isEmptyTy())
|
|
|
|
continue;
|
|
|
|
|
2008-09-03 16:12:24 +00:00
|
|
|
unsigned Reg;
|
2010-04-15 01:51:59 +00:00
|
|
|
const Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
|
2008-09-03 16:12:24 +00:00
|
|
|
|
2010-04-15 01:51:59 +00:00
|
|
|
if (const Constant *C = dyn_cast<Constant>(PHIOp)) {
|
2010-04-22 20:46:50 +00:00
|
|
|
unsigned &RegOut = ConstantsOut[C];
|
2008-09-03 16:12:24 +00:00
|
|
|
if (RegOut == 0) {
|
2010-07-02 00:10:16 +00:00
|
|
|
RegOut = FuncInfo.CreateRegs(C->getType());
|
2010-04-22 20:46:50 +00:00
|
|
|
CopyValueToVirtualRegister(C, RegOut);
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
|
|
|
Reg = RegOut;
|
|
|
|
} else {
|
2010-07-01 01:33:21 +00:00
|
|
|
DenseMap<const Value *, unsigned>::iterator I =
|
|
|
|
FuncInfo.ValueMap.find(PHIOp);
|
|
|
|
if (I != FuncInfo.ValueMap.end())
|
|
|
|
Reg = I->second;
|
|
|
|
else {
|
2008-09-03 16:12:24 +00:00
|
|
|
assert(isa<AllocaInst>(PHIOp) &&
|
2010-04-22 20:46:50 +00:00
|
|
|
FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
|
2008-09-03 16:12:24 +00:00
|
|
|
"Didn't codegen value into a register!??");
|
2010-07-02 00:10:16 +00:00
|
|
|
Reg = FuncInfo.CreateRegs(PHIOp->getType());
|
2010-04-22 20:46:50 +00:00
|
|
|
CopyValueToVirtualRegister(PHIOp, Reg);
|
2008-09-03 16:12:24 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remember that this register needs to added to the machine PHI node as
|
|
|
|
// the input for this MBB.
|
2009-08-10 22:56:29 +00:00
|
|
|
SmallVector<EVT, 4> ValueVTs;
|
2008-09-03 16:12:24 +00:00
|
|
|
ComputeValueVTs(TLI, PN->getType(), ValueVTs);
|
|
|
|
for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) {
|
2009-08-10 22:56:29 +00:00
|
|
|
EVT VT = ValueVTs[vti];
|
2010-04-22 20:46:50 +00:00
|
|
|
unsigned NumRegisters = TLI.getNumRegisters(*DAG.getContext(), VT);
|
2008-09-03 16:12:24 +00:00
|
|
|
for (unsigned i = 0, e = NumRegisters; i != e; ++i)
|
2010-04-22 20:46:50 +00:00
|
|
|
FuncInfo.PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg+i));
|
2008-09-03 16:12:24 +00:00
|
|
|
Reg += NumRegisters;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2010-04-22 20:46:50 +00:00
|
|
|
ConstantsOut.clear();
|
2008-09-03 23:12:08 +00:00
|
|
|
}
|