2008-08-19 21:45:35 +00:00
|
|
|
//===-- X86FastISel.cpp - X86 FastISel implementation ---------------------===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file defines the X86-specific support for the FastISel class. Much
|
|
|
|
// of the target-specific code is generated by tablegen in the file
|
|
|
|
// X86GenFastISel.inc, which is #included here.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "X86.h"
|
2008-09-03 06:44:39 +00:00
|
|
|
#include "X86InstrBuilder.h"
|
2008-08-19 21:45:35 +00:00
|
|
|
#include "X86ISelLowering.h"
|
2008-09-03 01:04:47 +00:00
|
|
|
#include "X86RegisterInfo.h"
|
|
|
|
#include "X86Subtarget.h"
|
2008-08-22 00:20:26 +00:00
|
|
|
#include "X86TargetMachine.h"
|
2008-09-07 09:09:33 +00:00
|
|
|
#include "llvm/CallingConv.h"
|
2008-09-04 23:26:51 +00:00
|
|
|
#include "llvm/DerivedTypes.h"
|
2009-02-23 22:03:08 +00:00
|
|
|
#include "llvm/GlobalVariable.h"
|
2008-09-07 09:09:33 +00:00
|
|
|
#include "llvm/Instructions.h"
|
2009-04-12 07:36:01 +00:00
|
|
|
#include "llvm/IntrinsicInst.h"
|
2008-09-03 00:03:49 +00:00
|
|
|
#include "llvm/CodeGen/FastISel.h"
|
2008-09-05 00:06:23 +00:00
|
|
|
#include "llvm/CodeGen/MachineConstantPool.h"
|
2008-09-07 09:09:33 +00:00
|
|
|
#include "llvm/CodeGen/MachineFrameInfo.h"
|
2008-08-29 17:45:56 +00:00
|
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
2008-09-07 09:09:33 +00:00
|
|
|
#include "llvm/Support/CallSite.h"
|
2009-07-11 20:10:48 +00:00
|
|
|
#include "llvm/Support/ErrorHandling.h"
|
2008-09-18 23:23:44 +00:00
|
|
|
#include "llvm/Support/GetElementPtrTypeIterator.h"
|
2009-05-04 19:50:33 +00:00
|
|
|
#include "llvm/Target/TargetOptions.h"
|
2008-09-03 00:03:49 +00:00
|
|
|
using namespace llvm;
|
|
|
|
|
2009-03-08 18:44:31 +00:00
|
|
|
namespace {
|
|
|
|
|
2008-09-03 00:03:49 +00:00
|
|
|
class X86FastISel : public FastISel {
|
|
|
|
/// Subtarget - Keep a pointer to the X86Subtarget around so that we can
|
|
|
|
/// make the right decision when generating code for different targets.
|
|
|
|
const X86Subtarget *Subtarget;
|
2008-09-07 09:09:33 +00:00
|
|
|
|
|
|
|
/// StackPtr - Register used as the stack pointer.
|
|
|
|
///
|
|
|
|
unsigned StackPtr;
|
|
|
|
|
|
|
|
/// X86ScalarSSEf32, X86ScalarSSEf64 - Select between SSE or x87
|
|
|
|
/// floating point ops.
|
|
|
|
/// When SSE is available, use it for f32 operations.
|
|
|
|
/// When SSE2 is available, use it for f64 operations.
|
|
|
|
bool X86ScalarSSEf64;
|
|
|
|
bool X86ScalarSSEf32;
|
|
|
|
|
2008-09-03 06:44:39 +00:00
|
|
|
public:
|
2008-09-03 23:12:08 +00:00
|
|
|
explicit X86FastISel(MachineFunction &mf,
|
2008-09-23 21:53:34 +00:00
|
|
|
MachineModuleInfo *mmi,
|
2009-01-13 00:35:13 +00:00
|
|
|
DwarfWriter *dw,
|
2008-09-03 23:12:08 +00:00
|
|
|
DenseMap<const Value *, unsigned> &vm,
|
2008-09-10 20:11:02 +00:00
|
|
|
DenseMap<const BasicBlock *, MachineBasicBlock *> &bm,
|
2008-10-14 23:54:11 +00:00
|
|
|
DenseMap<const AllocaInst *, int> &am
|
|
|
|
#ifndef NDEBUG
|
|
|
|
, SmallSet<Instruction*, 8> &cil
|
|
|
|
#endif
|
|
|
|
)
|
2009-01-13 00:35:13 +00:00
|
|
|
: FastISel(mf, mmi, dw, vm, bm, am
|
2008-10-14 23:54:11 +00:00
|
|
|
#ifndef NDEBUG
|
|
|
|
, cil
|
|
|
|
#endif
|
|
|
|
) {
|
2008-09-03 01:04:47 +00:00
|
|
|
Subtarget = &TM.getSubtarget<X86Subtarget>();
|
2008-09-07 09:09:33 +00:00
|
|
|
StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP;
|
|
|
|
X86ScalarSSEf64 = Subtarget->hasSSE2();
|
|
|
|
X86ScalarSSEf32 = Subtarget->hasSSE1();
|
2008-09-03 01:04:47 +00:00
|
|
|
}
|
2008-09-03 00:03:49 +00:00
|
|
|
|
2008-09-03 23:12:08 +00:00
|
|
|
virtual bool TargetSelectInstruction(Instruction *I);
|
2008-08-28 23:21:34 +00:00
|
|
|
|
2008-09-03 00:03:49 +00:00
|
|
|
#include "X86GenFastISel.inc"
|
2008-09-03 06:44:39 +00:00
|
|
|
|
|
|
|
private:
|
2009-08-10 22:56:29 +00:00
|
|
|
bool X86FastEmitCompare(Value *LHS, Value *RHS, EVT VT);
|
2008-10-15 04:26:38 +00:00
|
|
|
|
2009-08-10 22:56:29 +00:00
|
|
|
bool X86FastEmitLoad(EVT VT, const X86AddressMode &AM, unsigned &RR);
|
2008-09-05 21:00:03 +00:00
|
|
|
|
2009-08-10 22:56:29 +00:00
|
|
|
bool X86FastEmitStore(EVT VT, Value *Val,
|
2008-10-15 05:30:52 +00:00
|
|
|
const X86AddressMode &AM);
|
2009-08-10 22:56:29 +00:00
|
|
|
bool X86FastEmitStore(EVT VT, unsigned Val,
|
2008-09-10 20:11:02 +00:00
|
|
|
const X86AddressMode &AM);
|
2008-09-08 06:35:17 +00:00
|
|
|
|
2009-08-10 22:56:29 +00:00
|
|
|
bool X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, EVT SrcVT,
|
2008-09-08 06:35:17 +00:00
|
|
|
unsigned &ResultReg);
|
2008-09-05 21:00:03 +00:00
|
|
|
|
2009-07-10 05:33:42 +00:00
|
|
|
bool X86SelectAddress(Value *V, X86AddressMode &AM);
|
|
|
|
bool X86SelectCallAddress(Value *V, X86AddressMode &AM);
|
2008-09-10 20:11:02 +00:00
|
|
|
|
2008-09-03 23:12:08 +00:00
|
|
|
bool X86SelectLoad(Instruction *I);
|
2008-09-04 07:08:58 +00:00
|
|
|
|
|
|
|
bool X86SelectStore(Instruction *I);
|
2008-09-04 23:26:51 +00:00
|
|
|
|
|
|
|
bool X86SelectCmp(Instruction *I);
|
2008-09-05 01:06:14 +00:00
|
|
|
|
|
|
|
bool X86SelectZExt(Instruction *I);
|
|
|
|
|
|
|
|
bool X86SelectBranch(Instruction *I);
|
2008-09-05 18:30:08 +00:00
|
|
|
|
|
|
|
bool X86SelectShift(Instruction *I);
|
|
|
|
|
|
|
|
bool X86SelectSelect(Instruction *I);
|
2008-09-05 21:00:03 +00:00
|
|
|
|
2008-09-07 08:47:42 +00:00
|
|
|
bool X86SelectTrunc(Instruction *I);
|
2008-10-02 22:15:21 +00:00
|
|
|
|
2008-09-10 21:02:08 +00:00
|
|
|
bool X86SelectFPExt(Instruction *I);
|
|
|
|
bool X86SelectFPTrunc(Instruction *I);
|
|
|
|
|
2008-12-09 02:42:50 +00:00
|
|
|
bool X86SelectExtractValue(Instruction *I);
|
|
|
|
|
2009-04-12 07:36:01 +00:00
|
|
|
bool X86VisitIntrinsicCall(IntrinsicInst &I);
|
2008-09-07 09:09:33 +00:00
|
|
|
bool X86SelectCall(Instruction *I);
|
|
|
|
|
|
|
|
CCAssignFn *CCAssignFnForCall(unsigned CC, bool isTailCall = false);
|
|
|
|
|
2008-09-25 15:24:26 +00:00
|
|
|
const X86InstrInfo *getInstrInfo() const {
|
2008-09-26 19:15:30 +00:00
|
|
|
return getTargetMachine()->getInstrInfo();
|
|
|
|
}
|
|
|
|
const X86TargetMachine *getTargetMachine() const {
|
|
|
|
return static_cast<const X86TargetMachine *>(&TM);
|
2008-09-25 15:24:26 +00:00
|
|
|
}
|
|
|
|
|
2008-09-10 20:11:02 +00:00
|
|
|
unsigned TargetMaterializeConstant(Constant *C);
|
|
|
|
|
|
|
|
unsigned TargetMaterializeAlloca(AllocaInst *C);
|
2008-09-07 09:09:33 +00:00
|
|
|
|
|
|
|
/// isScalarFPTypeInSSEReg - Return true if the specified scalar FP type is
|
|
|
|
/// computed in an SSE register, not on the X87 floating point stack.
|
2009-08-10 22:56:29 +00:00
|
|
|
bool isScalarFPTypeInSSEReg(EVT VT) const {
|
2009-08-11 20:47:22 +00:00
|
|
|
return (VT == MVT::f64 && X86ScalarSSEf64) || // f64 is when SSE2
|
|
|
|
(VT == MVT::f32 && X86ScalarSSEf32); // f32 is when SSE1
|
2008-09-07 09:09:33 +00:00
|
|
|
}
|
|
|
|
|
2009-08-10 22:56:29 +00:00
|
|
|
bool isTypeLegal(const Type *Ty, EVT &VT, bool AllowI1 = false);
|
2008-09-03 00:03:49 +00:00
|
|
|
};
|
2009-03-08 18:44:31 +00:00
|
|
|
|
|
|
|
} // end anonymous namespace.
|
2008-08-28 23:21:34 +00:00
|
|
|
|
2009-08-10 22:56:29 +00:00
|
|
|
bool X86FastISel::isTypeLegal(const Type *Ty, EVT &VT, bool AllowI1) {
|
2008-10-15 05:07:36 +00:00
|
|
|
VT = TLI.getValueType(Ty, /*HandleUnknown=*/true);
|
2009-08-11 20:47:22 +00:00
|
|
|
if (VT == MVT::Other || !VT.isSimple())
|
2008-09-07 09:09:33 +00:00
|
|
|
// Unhandled type. Halt "fast" selection and bail.
|
|
|
|
return false;
|
2008-10-15 05:07:36 +00:00
|
|
|
|
2008-09-30 00:48:39 +00:00
|
|
|
// For now, require SSE/SSE2 for performing floating-point operations,
|
|
|
|
// since x87 requires additional work.
|
2009-08-11 20:47:22 +00:00
|
|
|
if (VT == MVT::f64 && !X86ScalarSSEf64)
|
2008-09-30 00:48:39 +00:00
|
|
|
return false;
|
2009-08-11 20:47:22 +00:00
|
|
|
if (VT == MVT::f32 && !X86ScalarSSEf32)
|
2008-09-30 00:48:39 +00:00
|
|
|
return false;
|
|
|
|
// Similarly, no f80 support yet.
|
2009-08-11 20:47:22 +00:00
|
|
|
if (VT == MVT::f80)
|
2008-09-30 00:48:39 +00:00
|
|
|
return false;
|
2008-09-07 09:09:33 +00:00
|
|
|
// We only handle legal types. For example, on x86-32 the instruction
|
|
|
|
// selector contains all of the 64-bit instructions from x86-64,
|
|
|
|
// under the assumption that i64 won't be used if the target doesn't
|
|
|
|
// support it.
|
2009-08-11 20:47:22 +00:00
|
|
|
return (AllowI1 && VT == MVT::i1) || TLI.isTypeLegal(VT);
|
2008-09-07 09:09:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#include "X86GenCallingConv.inc"
|
|
|
|
|
|
|
|
/// CCAssignFnForCall - Selects the correct CCAssignFn for a given calling
|
|
|
|
/// convention.
|
|
|
|
CCAssignFn *X86FastISel::CCAssignFnForCall(unsigned CC, bool isTaillCall) {
|
|
|
|
if (Subtarget->is64Bit()) {
|
|
|
|
if (Subtarget->isTargetWin64())
|
|
|
|
return CC_X86_Win64_C;
|
|
|
|
else
|
|
|
|
return CC_X86_64_C;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (CC == CallingConv::X86_FastCall)
|
|
|
|
return CC_X86_32_FastCall;
|
|
|
|
else if (CC == CallingConv::Fast)
|
|
|
|
return CC_X86_32_FastCC;
|
|
|
|
else
|
|
|
|
return CC_X86_32_C;
|
|
|
|
}
|
|
|
|
|
2008-09-05 21:00:03 +00:00
|
|
|
/// X86FastEmitLoad - Emit a machine instruction to load a value of type VT.
|
2008-09-07 09:09:33 +00:00
|
|
|
/// The address is either pre-computed, i.e. Ptr, or a GlobalAddress, i.e. GV.
|
2008-09-05 21:00:03 +00:00
|
|
|
/// Return true and the result register by reference if it is possible.
|
2009-08-10 22:56:29 +00:00
|
|
|
bool X86FastISel::X86FastEmitLoad(EVT VT, const X86AddressMode &AM,
|
2008-09-05 21:00:03 +00:00
|
|
|
unsigned &ResultReg) {
|
|
|
|
// Get opcode and regclass of the output for the given load instruction.
|
|
|
|
unsigned Opc = 0;
|
|
|
|
const TargetRegisterClass *RC = NULL;
|
2009-08-11 20:47:22 +00:00
|
|
|
switch (VT.getSimpleVT().SimpleTy) {
|
2008-09-05 21:00:03 +00:00
|
|
|
default: return false;
|
2009-08-11 20:47:22 +00:00
|
|
|
case MVT::i8:
|
2008-09-05 21:00:03 +00:00
|
|
|
Opc = X86::MOV8rm;
|
|
|
|
RC = X86::GR8RegisterClass;
|
|
|
|
break;
|
2009-08-11 20:47:22 +00:00
|
|
|
case MVT::i16:
|
2008-09-05 21:00:03 +00:00
|
|
|
Opc = X86::MOV16rm;
|
|
|
|
RC = X86::GR16RegisterClass;
|
|
|
|
break;
|
2009-08-11 20:47:22 +00:00
|
|
|
case MVT::i32:
|
2008-09-05 21:00:03 +00:00
|
|
|
Opc = X86::MOV32rm;
|
|
|
|
RC = X86::GR32RegisterClass;
|
|
|
|
break;
|
2009-08-11 20:47:22 +00:00
|
|
|
case MVT::i64:
|
2008-09-05 21:00:03 +00:00
|
|
|
// Must be in x86-64 mode.
|
|
|
|
Opc = X86::MOV64rm;
|
|
|
|
RC = X86::GR64RegisterClass;
|
|
|
|
break;
|
2009-08-11 20:47:22 +00:00
|
|
|
case MVT::f32:
|
2008-09-05 21:00:03 +00:00
|
|
|
if (Subtarget->hasSSE1()) {
|
|
|
|
Opc = X86::MOVSSrm;
|
|
|
|
RC = X86::FR32RegisterClass;
|
2008-09-03 06:44:39 +00:00
|
|
|
} else {
|
2008-09-05 21:00:03 +00:00
|
|
|
Opc = X86::LD_Fp32m;
|
|
|
|
RC = X86::RFP32RegisterClass;
|
2008-09-03 06:44:39 +00:00
|
|
|
}
|
2008-09-05 21:00:03 +00:00
|
|
|
break;
|
2009-08-11 20:47:22 +00:00
|
|
|
case MVT::f64:
|
2008-09-05 21:00:03 +00:00
|
|
|
if (Subtarget->hasSSE2()) {
|
|
|
|
Opc = X86::MOVSDrm;
|
|
|
|
RC = X86::FR64RegisterClass;
|
|
|
|
} else {
|
|
|
|
Opc = X86::LD_Fp64m;
|
|
|
|
RC = X86::RFP64RegisterClass;
|
|
|
|
}
|
|
|
|
break;
|
2009-08-11 20:47:22 +00:00
|
|
|
case MVT::f80:
|
2008-09-26 01:39:32 +00:00
|
|
|
// No f80 support yet.
|
|
|
|
return false;
|
2008-09-03 06:44:39 +00:00
|
|
|
}
|
2008-09-05 21:00:03 +00:00
|
|
|
|
|
|
|
ResultReg = createResultReg(RC);
|
2009-02-13 02:33:27 +00:00
|
|
|
addFullAddress(BuildMI(MBB, DL, TII.get(Opc), ResultReg), AM);
|
2008-09-03 06:44:39 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2008-09-07 09:09:33 +00:00
|
|
|
/// X86FastEmitStore - Emit a machine instruction to store a value Val of
|
|
|
|
/// type VT. The address is either pre-computed, consisted of a base ptr, Ptr
|
|
|
|
/// and a displacement offset, or a GlobalAddress,
|
2008-09-05 21:00:03 +00:00
|
|
|
/// i.e. V. Return true if it is possible.
|
|
|
|
bool
|
2009-08-10 22:56:29 +00:00
|
|
|
X86FastISel::X86FastEmitStore(EVT VT, unsigned Val,
|
2008-09-10 20:11:02 +00:00
|
|
|
const X86AddressMode &AM) {
|
2008-09-08 16:31:35 +00:00
|
|
|
// Get opcode and regclass of the output for the given store instruction.
|
2008-09-04 07:08:58 +00:00
|
|
|
unsigned Opc = 0;
|
2009-08-11 20:47:22 +00:00
|
|
|
switch (VT.getSimpleVT().SimpleTy) {
|
|
|
|
case MVT::f80: // No f80 support yet.
|
2008-09-04 07:08:58 +00:00
|
|
|
default: return false;
|
2009-08-11 20:47:22 +00:00
|
|
|
case MVT::i8: Opc = X86::MOV8mr; break;
|
|
|
|
case MVT::i16: Opc = X86::MOV16mr; break;
|
|
|
|
case MVT::i32: Opc = X86::MOV32mr; break;
|
|
|
|
case MVT::i64: Opc = X86::MOV64mr; break; // Must be in x86-64 mode.
|
|
|
|
case MVT::f32:
|
2008-10-15 05:30:52 +00:00
|
|
|
Opc = Subtarget->hasSSE1() ? X86::MOVSSmr : X86::ST_Fp32m;
|
2008-09-04 07:08:58 +00:00
|
|
|
break;
|
2009-08-11 20:47:22 +00:00
|
|
|
case MVT::f64:
|
2008-10-15 05:30:52 +00:00
|
|
|
Opc = Subtarget->hasSSE2() ? X86::MOVSDmr : X86::ST_Fp64m;
|
2008-09-04 07:08:58 +00:00
|
|
|
break;
|
|
|
|
}
|
2008-10-15 05:30:52 +00:00
|
|
|
|
2009-02-13 02:33:27 +00:00
|
|
|
addFullAddress(BuildMI(MBB, DL, TII.get(Opc)), AM).addReg(Val);
|
2008-09-04 07:08:58 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2009-08-10 22:56:29 +00:00
|
|
|
bool X86FastISel::X86FastEmitStore(EVT VT, Value *Val,
|
2008-10-15 05:30:52 +00:00
|
|
|
const X86AddressMode &AM) {
|
|
|
|
// Handle 'null' like i32/i64 0.
|
|
|
|
if (isa<ConstantPointerNull>(Val))
|
2009-08-13 21:58:54 +00:00
|
|
|
Val = Constant::getNullValue(TD.getIntPtrType(Val->getContext()));
|
2008-10-15 05:30:52 +00:00
|
|
|
|
|
|
|
// If this is a store of a simple constant, fold the constant into the store.
|
|
|
|
if (ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
|
|
|
|
unsigned Opc = 0;
|
2009-08-11 20:47:22 +00:00
|
|
|
switch (VT.getSimpleVT().SimpleTy) {
|
2008-10-15 05:30:52 +00:00
|
|
|
default: break;
|
2009-08-11 20:47:22 +00:00
|
|
|
case MVT::i8: Opc = X86::MOV8mi; break;
|
|
|
|
case MVT::i16: Opc = X86::MOV16mi; break;
|
|
|
|
case MVT::i32: Opc = X86::MOV32mi; break;
|
|
|
|
case MVT::i64:
|
2008-10-15 05:30:52 +00:00
|
|
|
// Must be a 32-bit sign extended value.
|
|
|
|
if ((int)CI->getSExtValue() == CI->getSExtValue())
|
|
|
|
Opc = X86::MOV64mi32;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (Opc) {
|
2009-02-13 02:33:27 +00:00
|
|
|
addFullAddress(BuildMI(MBB, DL, TII.get(Opc)), AM)
|
|
|
|
.addImm(CI->getSExtValue());
|
2008-10-15 05:30:52 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned ValReg = getRegForValue(Val);
|
|
|
|
if (ValReg == 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return X86FastEmitStore(VT, ValReg, AM);
|
|
|
|
}
|
|
|
|
|
2008-09-08 06:35:17 +00:00
|
|
|
/// X86FastEmitExtend - Emit a machine instruction to extend a value Src of
|
|
|
|
/// type SrcVT to type DstVT using the specified extension opcode Opc (e.g.
|
|
|
|
/// ISD::SIGN_EXTEND).
|
2009-08-10 22:56:29 +00:00
|
|
|
bool X86FastISel::X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT,
|
|
|
|
unsigned Src, EVT SrcVT,
|
2008-09-08 06:35:17 +00:00
|
|
|
unsigned &ResultReg) {
|
2008-09-11 19:44:55 +00:00
|
|
|
unsigned RR = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc, Src);
|
|
|
|
|
|
|
|
if (RR != 0) {
|
|
|
|
ResultReg = RR;
|
|
|
|
return true;
|
|
|
|
} else
|
|
|
|
return false;
|
2008-09-08 06:35:17 +00:00
|
|
|
}
|
|
|
|
|
2008-09-10 20:11:02 +00:00
|
|
|
/// X86SelectAddress - Attempt to fill in an address from the given value.
|
|
|
|
///
|
2009-07-10 05:33:42 +00:00
|
|
|
bool X86FastISel::X86SelectAddress(Value *V, X86AddressMode &AM) {
|
2009-06-03 12:05:18 +00:00
|
|
|
User *U = NULL;
|
2008-09-18 23:23:44 +00:00
|
|
|
unsigned Opcode = Instruction::UserOp1;
|
|
|
|
if (Instruction *I = dyn_cast<Instruction>(V)) {
|
|
|
|
Opcode = I->getOpcode();
|
|
|
|
U = I;
|
|
|
|
} else if (ConstantExpr *C = dyn_cast<ConstantExpr>(V)) {
|
|
|
|
Opcode = C->getOpcode();
|
|
|
|
U = C;
|
|
|
|
}
|
2008-09-10 20:11:02 +00:00
|
|
|
|
2008-09-18 23:23:44 +00:00
|
|
|
switch (Opcode) {
|
|
|
|
default: break;
|
|
|
|
case Instruction::BitCast:
|
|
|
|
// Look past bitcasts.
|
2009-07-10 05:33:42 +00:00
|
|
|
return X86SelectAddress(U->getOperand(0), AM);
|
2008-09-18 23:23:44 +00:00
|
|
|
|
|
|
|
case Instruction::IntToPtr:
|
|
|
|
// Look past no-op inttoptrs.
|
|
|
|
if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy())
|
2009-07-10 05:33:42 +00:00
|
|
|
return X86SelectAddress(U->getOperand(0), AM);
|
2008-12-08 23:50:06 +00:00
|
|
|
break;
|
2008-09-18 23:23:44 +00:00
|
|
|
|
|
|
|
case Instruction::PtrToInt:
|
|
|
|
// Look past no-op ptrtoints.
|
|
|
|
if (TLI.getValueType(U->getType()) == TLI.getPointerTy())
|
2009-07-10 05:33:42 +00:00
|
|
|
return X86SelectAddress(U->getOperand(0), AM);
|
2008-12-08 23:50:06 +00:00
|
|
|
break;
|
2008-09-18 23:23:44 +00:00
|
|
|
|
|
|
|
case Instruction::Alloca: {
|
|
|
|
// Do static allocas.
|
|
|
|
const AllocaInst *A = cast<AllocaInst>(V);
|
2008-09-10 20:11:02 +00:00
|
|
|
DenseMap<const AllocaInst*, int>::iterator SI = StaticAllocaMap.find(A);
|
2008-09-26 19:15:30 +00:00
|
|
|
if (SI != StaticAllocaMap.end()) {
|
|
|
|
AM.BaseType = X86AddressMode::FrameIndexBase;
|
|
|
|
AM.Base.FrameIndex = SI->second;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
break;
|
2008-09-18 23:23:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
case Instruction::Add: {
|
|
|
|
// Adds of constants are common and easy enough.
|
|
|
|
if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
|
2008-09-26 20:04:15 +00:00
|
|
|
uint64_t Disp = (int32_t)AM.Disp + (uint64_t)CI->getSExtValue();
|
|
|
|
// They have to fit in the 32-bit signed displacement field though.
|
|
|
|
if (isInt32(Disp)) {
|
|
|
|
AM.Disp = (uint32_t)Disp;
|
2009-07-10 05:33:42 +00:00
|
|
|
return X86SelectAddress(U->getOperand(0), AM);
|
2008-09-26 20:04:15 +00:00
|
|
|
}
|
2008-09-18 23:23:44 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case Instruction::GetElementPtr: {
|
|
|
|
// Pattern-match simple GEPs.
|
2008-09-26 20:04:15 +00:00
|
|
|
uint64_t Disp = (int32_t)AM.Disp;
|
2008-09-18 23:23:44 +00:00
|
|
|
unsigned IndexReg = AM.IndexReg;
|
|
|
|
unsigned Scale = AM.Scale;
|
|
|
|
gep_type_iterator GTI = gep_type_begin(U);
|
2008-12-08 07:57:47 +00:00
|
|
|
// Iterate through the indices, folding what we can. Constants can be
|
|
|
|
// folded, and one dynamic index can be handled, if the scale is supported.
|
2008-09-18 23:23:44 +00:00
|
|
|
for (User::op_iterator i = U->op_begin() + 1, e = U->op_end();
|
|
|
|
i != e; ++i, ++GTI) {
|
|
|
|
Value *Op = *i;
|
|
|
|
if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
|
|
|
|
const StructLayout *SL = TD.getStructLayout(STy);
|
|
|
|
unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
|
|
|
|
Disp += SL->getElementOffset(Idx);
|
|
|
|
} else {
|
2009-05-09 07:06:46 +00:00
|
|
|
uint64_t S = TD.getTypeAllocSize(GTI.getIndexedType());
|
2008-09-18 23:23:44 +00:00
|
|
|
if (ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
|
|
|
|
// Constant-offset addressing.
|
2008-09-26 20:04:15 +00:00
|
|
|
Disp += CI->getSExtValue() * S;
|
2008-09-18 23:23:44 +00:00
|
|
|
} else if (IndexReg == 0 &&
|
2009-06-27 05:24:12 +00:00
|
|
|
(!AM.GV || !Subtarget->isPICStyleRIPRel()) &&
|
2008-09-18 23:23:44 +00:00
|
|
|
(S == 1 || S == 2 || S == 4 || S == 8)) {
|
|
|
|
// Scaled-index addressing.
|
|
|
|
Scale = S;
|
2008-12-08 07:57:47 +00:00
|
|
|
IndexReg = getRegForGEPIndex(Op);
|
2008-09-18 23:23:44 +00:00
|
|
|
if (IndexReg == 0)
|
|
|
|
return false;
|
|
|
|
} else
|
|
|
|
// Unsupported.
|
|
|
|
goto unsupported_gep;
|
|
|
|
}
|
2008-09-10 20:11:02 +00:00
|
|
|
}
|
2008-09-26 20:04:15 +00:00
|
|
|
// Check for displacement overflow.
|
|
|
|
if (!isInt32(Disp))
|
|
|
|
break;
|
2008-09-18 23:23:44 +00:00
|
|
|
// Ok, the GEP indices were covered by constant-offset and scaled-index
|
|
|
|
// addressing. Update the address state and move on to examining the base.
|
|
|
|
AM.IndexReg = IndexReg;
|
|
|
|
AM.Scale = Scale;
|
2008-09-26 20:04:15 +00:00
|
|
|
AM.Disp = (uint32_t)Disp;
|
2009-07-10 05:33:42 +00:00
|
|
|
return X86SelectAddress(U->getOperand(0), AM);
|
2008-09-18 23:23:44 +00:00
|
|
|
unsupported_gep:
|
|
|
|
// Ok, the GEP indices weren't all covered.
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Handle constant address.
|
2008-09-19 22:16:54 +00:00
|
|
|
if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
|
2008-09-25 15:24:26 +00:00
|
|
|
// Can't handle alternate code models yet.
|
2009-07-10 21:03:06 +00:00
|
|
|
if (TM.getCodeModel() != CodeModel::Small)
|
2008-09-25 15:24:26 +00:00
|
|
|
return false;
|
|
|
|
|
2008-09-26 19:15:30 +00:00
|
|
|
// RIP-relative addresses can't have additional register operands.
|
2009-06-27 05:24:12 +00:00
|
|
|
if (Subtarget->isPICStyleRIPRel() &&
|
2008-09-26 19:15:30 +00:00
|
|
|
(AM.Base.Reg != 0 || AM.IndexReg != 0))
|
|
|
|
return false;
|
|
|
|
|
2009-02-23 22:03:08 +00:00
|
|
|
// Can't handle TLS yet.
|
|
|
|
if (GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV))
|
|
|
|
if (GVar->isThreadLocal())
|
|
|
|
return false;
|
|
|
|
|
2009-07-09 06:41:35 +00:00
|
|
|
// Okay, we've committed to selecting this global. Set up the basic address.
|
2008-09-25 15:24:26 +00:00
|
|
|
AM.GV = GV;
|
Reimplement rip-relative addressing in the X86-64 backend. The new
implementation primarily differs from the former in that the asmprinter
doesn't make a zillion decisions about whether or not something will be
RIP relative or not. Instead, those decisions are made by isel lowering
and propagated through to the asm printer. To achieve this, we:
1. Represent RIP relative addresses by setting the base of the X86 addr
mode to X86::RIP.
2. When ISel Lowering decides that it is safe to use RIP, it lowers to
X86ISD::WrapperRIP. When it is unsafe to use RIP, it lowers to
X86ISD::Wrapper as before.
3. This removes isRIPRel from X86ISelAddressMode, representing it with
a basereg of RIP instead.
4. The addressing mode matching logic in isel is greatly simplified.
5. The asmprinter is greatly simplified, notably the "NotRIPRel" predicate
passed through various printoperand routines is gone now.
6. The various symbol printing routines in asmprinter now no longer infer
when to emit (%rip), they just print the symbol.
I think this is a big improvement over the previous situation. It does have
two small caveats though: 1. I implemented a horrible "no-rip" modifier for
the inline asm "P" constraint modifier. This is a short term hack, there is
a much better, but more involved, solution. 2. I had to xfail an
-aggressive-remat testcase because it isn't handling the use of RIP in the
constant-pool reading instruction. This specific test is easy to fix without
-aggressive-remat, which I intend to do next.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@74372 91177308-0d34-0410-b5e6-96231b3b80d8
2009-06-27 04:16:01 +00:00
|
|
|
|
2009-07-10 07:48:51 +00:00
|
|
|
// Allow the subtarget to classify the global.
|
|
|
|
unsigned char GVFlags = Subtarget->ClassifyGlobalReference(GV, TM);
|
|
|
|
|
|
|
|
// If this reference is relative to the pic base, set it now.
|
|
|
|
if (isGlobalRelativeToPICBase(GVFlags)) {
|
2009-07-09 06:59:17 +00:00
|
|
|
// FIXME: How do we know Base.Reg is free??
|
2008-09-30 00:58:23 +00:00
|
|
|
AM.Base.Reg = getInstrInfo()->getGlobalBaseReg(&MF);
|
2009-07-09 06:59:17 +00:00
|
|
|
}
|
2009-07-10 07:48:51 +00:00
|
|
|
|
|
|
|
// Unless the ABI requires an extra load, return a direct reference to
|
2009-07-09 06:41:35 +00:00
|
|
|
// the global.
|
2009-07-10 07:48:51 +00:00
|
|
|
if (!isGlobalStubReference(GVFlags)) {
|
2009-07-10 05:33:42 +00:00
|
|
|
if (Subtarget->isPICStyleRIPRel()) {
|
|
|
|
// Use rip-relative addressing if we can. Above we verified that the
|
|
|
|
// base and index registers are unused.
|
|
|
|
assert(AM.Base.Reg == 0 && AM.IndexReg == 0);
|
|
|
|
AM.Base.Reg = X86::RIP;
|
|
|
|
}
|
2009-07-10 07:48:51 +00:00
|
|
|
AM.GVOpFlags = GVFlags;
|
2009-07-10 05:33:42 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2009-07-10 07:48:51 +00:00
|
|
|
// Ok, we need to do a load from a stub. If we've already loaded from this
|
|
|
|
// stub, reuse the loaded pointer, otherwise emit the load now.
|
2009-07-10 05:33:42 +00:00
|
|
|
DenseMap<const Value*, unsigned>::iterator I = LocalValueMap.find(V);
|
|
|
|
unsigned LoadReg;
|
|
|
|
if (I != LocalValueMap.end() && I->second != 0) {
|
|
|
|
LoadReg = I->second;
|
|
|
|
} else {
|
|
|
|
// Issue load from stub.
|
|
|
|
unsigned Opc = 0;
|
|
|
|
const TargetRegisterClass *RC = NULL;
|
|
|
|
X86AddressMode StubAM;
|
|
|
|
StubAM.Base.Reg = AM.Base.Reg;
|
|
|
|
StubAM.GV = GV;
|
2009-07-10 07:48:51 +00:00
|
|
|
StubAM.GVOpFlags = GVFlags;
|
|
|
|
|
2009-08-11 20:47:22 +00:00
|
|
|
if (TLI.getPointerTy() == MVT::i64) {
|
2009-07-10 05:33:42 +00:00
|
|
|
Opc = X86::MOV64rm;
|
|
|
|
RC = X86::GR64RegisterClass;
|
|
|
|
|
2009-07-10 07:48:51 +00:00
|
|
|
if (Subtarget->isPICStyleRIPRel())
|
2009-07-10 05:33:42 +00:00
|
|
|
StubAM.Base.Reg = X86::RIP;
|
|
|
|
} else {
|
|
|
|
Opc = X86::MOV32rm;
|
|
|
|
RC = X86::GR32RegisterClass;
|
|
|
|
}
|
|
|
|
|
|
|
|
LoadReg = createResultReg(RC);
|
|
|
|
addFullAddress(BuildMI(MBB, DL, TII.get(Opc), LoadReg), StubAM);
|
|
|
|
|
|
|
|
// Prevent loading GV stub multiple times in same MBB.
|
|
|
|
LocalValueMap[V] = LoadReg;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now construct the final address. Note that the Disp, Scale,
|
|
|
|
// and Index values may already be set here.
|
|
|
|
AM.Base.Reg = LoadReg;
|
|
|
|
AM.GV = 0;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If all else fails, try to materialize the value in a register.
|
|
|
|
if (!AM.GV || !Subtarget->isPICStyleRIPRel()) {
|
|
|
|
if (AM.Base.Reg == 0) {
|
|
|
|
AM.Base.Reg = getRegForValue(V);
|
|
|
|
return AM.Base.Reg != 0;
|
|
|
|
}
|
|
|
|
if (AM.IndexReg == 0) {
|
|
|
|
assert(AM.Scale == 1 && "Scale with no index!");
|
|
|
|
AM.IndexReg = getRegForValue(V);
|
|
|
|
return AM.IndexReg != 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// X86SelectCallAddress - Attempt to fill in an address from the given value.
|
|
|
|
///
|
|
|
|
bool X86FastISel::X86SelectCallAddress(Value *V, X86AddressMode &AM) {
|
|
|
|
User *U = NULL;
|
|
|
|
unsigned Opcode = Instruction::UserOp1;
|
|
|
|
if (Instruction *I = dyn_cast<Instruction>(V)) {
|
|
|
|
Opcode = I->getOpcode();
|
|
|
|
U = I;
|
|
|
|
} else if (ConstantExpr *C = dyn_cast<ConstantExpr>(V)) {
|
|
|
|
Opcode = C->getOpcode();
|
|
|
|
U = C;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (Opcode) {
|
|
|
|
default: break;
|
|
|
|
case Instruction::BitCast:
|
|
|
|
// Look past bitcasts.
|
|
|
|
return X86SelectCallAddress(U->getOperand(0), AM);
|
|
|
|
|
|
|
|
case Instruction::IntToPtr:
|
|
|
|
// Look past no-op inttoptrs.
|
|
|
|
if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy())
|
|
|
|
return X86SelectCallAddress(U->getOperand(0), AM);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case Instruction::PtrToInt:
|
|
|
|
// Look past no-op ptrtoints.
|
|
|
|
if (TLI.getValueType(U->getType()) == TLI.getPointerTy())
|
|
|
|
return X86SelectCallAddress(U->getOperand(0), AM);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Handle constant address.
|
|
|
|
if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
|
|
|
|
// Can't handle alternate code models yet.
|
2009-07-10 21:03:06 +00:00
|
|
|
if (TM.getCodeModel() != CodeModel::Small)
|
2009-07-10 05:33:42 +00:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// RIP-relative addresses can't have additional register operands.
|
|
|
|
if (Subtarget->isPICStyleRIPRel() &&
|
|
|
|
(AM.Base.Reg != 0 || AM.IndexReg != 0))
|
|
|
|
return false;
|
|
|
|
|
2009-07-10 05:48:03 +00:00
|
|
|
// Can't handle TLS or DLLImport.
|
2009-07-10 05:33:42 +00:00
|
|
|
if (GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV))
|
2009-07-10 05:45:15 +00:00
|
|
|
if (GVar->isThreadLocal() || GVar->hasDLLImportLinkage())
|
2009-07-10 05:33:42 +00:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// Okay, we've committed to selecting this global. Set up the basic address.
|
|
|
|
AM.GV = GV;
|
|
|
|
|
2009-07-10 05:45:15 +00:00
|
|
|
// No ABI requires an extra load for anything other than DLLImport, which
|
|
|
|
// we rejected above. Return a direct reference to the global.
|
|
|
|
if (Subtarget->isPICStyleRIPRel()) {
|
|
|
|
// Use rip-relative addressing if we can. Above we verified that the
|
|
|
|
// base and index registers are unused.
|
|
|
|
assert(AM.Base.Reg == 0 && AM.IndexReg == 0);
|
|
|
|
AM.Base.Reg = X86::RIP;
|
2009-07-10 21:00:45 +00:00
|
|
|
} else if (Subtarget->isPICStyleStubPIC()) {
|
2009-07-10 05:45:15 +00:00
|
|
|
AM.GVOpFlags = X86II::MO_PIC_BASE_OFFSET;
|
|
|
|
} else if (Subtarget->isPICStyleGOT()) {
|
|
|
|
AM.GVOpFlags = X86II::MO_GOTOFF;
|
2009-07-09 06:41:35 +00:00
|
|
|
}
|
|
|
|
|
2008-09-19 22:16:54 +00:00
|
|
|
return true;
|
2008-09-10 20:11:02 +00:00
|
|
|
}
|
|
|
|
|
2008-09-26 19:15:30 +00:00
|
|
|
// If all else fails, try to materialize the value in a register.
|
2009-06-27 05:24:12 +00:00
|
|
|
if (!AM.GV || !Subtarget->isPICStyleRIPRel()) {
|
2008-09-26 19:15:30 +00:00
|
|
|
if (AM.Base.Reg == 0) {
|
|
|
|
AM.Base.Reg = getRegForValue(V);
|
|
|
|
return AM.Base.Reg != 0;
|
|
|
|
}
|
|
|
|
if (AM.IndexReg == 0) {
|
|
|
|
assert(AM.Scale == 1 && "Scale with no index!");
|
|
|
|
AM.IndexReg = getRegForValue(V);
|
|
|
|
return AM.IndexReg != 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
2008-09-10 20:11:02 +00:00
|
|
|
}
|
|
|
|
|
2009-07-10 05:33:42 +00:00
|
|
|
|
2008-09-05 21:00:03 +00:00
|
|
|
/// X86SelectStore - Select and emit code to implement store instructions.
|
|
|
|
bool X86FastISel::X86SelectStore(Instruction* I) {
|
2009-08-10 22:56:29 +00:00
|
|
|
EVT VT;
|
2008-10-15 05:07:36 +00:00
|
|
|
if (!isTypeLegal(I->getOperand(0)->getType(), VT))
|
2008-09-05 21:00:03 +00:00
|
|
|
return false;
|
|
|
|
|
2008-09-10 20:11:02 +00:00
|
|
|
X86AddressMode AM;
|
2009-07-10 05:33:42 +00:00
|
|
|
if (!X86SelectAddress(I->getOperand(1), AM))
|
2008-09-10 20:11:02 +00:00
|
|
|
return false;
|
2008-09-05 21:00:03 +00:00
|
|
|
|
2008-10-15 05:30:52 +00:00
|
|
|
return X86FastEmitStore(VT, I->getOperand(0), AM);
|
2008-09-05 21:00:03 +00:00
|
|
|
}
|
|
|
|
|
2008-09-03 06:44:39 +00:00
|
|
|
/// X86SelectLoad - Select and emit code to implement load instructions.
|
|
|
|
///
|
2008-09-03 23:12:08 +00:00
|
|
|
bool X86FastISel::X86SelectLoad(Instruction *I) {
|
2009-08-10 22:56:29 +00:00
|
|
|
EVT VT;
|
2008-10-15 05:07:36 +00:00
|
|
|
if (!isTypeLegal(I->getType(), VT))
|
2008-09-03 06:44:39 +00:00
|
|
|
return false;
|
|
|
|
|
2008-09-10 20:11:02 +00:00
|
|
|
X86AddressMode AM;
|
2009-07-10 05:33:42 +00:00
|
|
|
if (!X86SelectAddress(I->getOperand(0), AM))
|
2008-09-10 20:11:02 +00:00
|
|
|
return false;
|
2008-09-03 06:44:39 +00:00
|
|
|
|
2008-09-05 21:00:03 +00:00
|
|
|
unsigned ResultReg = 0;
|
2008-09-10 20:11:02 +00:00
|
|
|
if (X86FastEmitLoad(VT, AM, ResultReg)) {
|
2008-09-05 21:00:03 +00:00
|
|
|
UpdateValueMap(I, ResultReg);
|
|
|
|
return true;
|
2008-09-03 06:44:39 +00:00
|
|
|
}
|
2008-09-05 21:00:03 +00:00
|
|
|
return false;
|
2008-09-03 06:44:39 +00:00
|
|
|
}
|
|
|
|
|
2009-08-10 22:56:29 +00:00
|
|
|
static unsigned X86ChooseCmpOpcode(EVT VT) {
|
2009-08-11 20:47:22 +00:00
|
|
|
switch (VT.getSimpleVT().SimpleTy) {
|
2008-10-15 04:32:45 +00:00
|
|
|
default: return 0;
|
2009-08-11 20:47:22 +00:00
|
|
|
case MVT::i8: return X86::CMP8rr;
|
|
|
|
case MVT::i16: return X86::CMP16rr;
|
|
|
|
case MVT::i32: return X86::CMP32rr;
|
|
|
|
case MVT::i64: return X86::CMP64rr;
|
|
|
|
case MVT::f32: return X86::UCOMISSrr;
|
|
|
|
case MVT::f64: return X86::UCOMISDrr;
|
2008-10-02 22:15:21 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-10-15 04:13:29 +00:00
|
|
|
/// X86ChooseCmpImmediateOpcode - If we have a comparison with RHS as the RHS
|
|
|
|
/// of the comparison, return an opcode that works for the compare (e.g.
|
|
|
|
/// CMP32ri) otherwise return 0.
|
2009-08-10 22:56:29 +00:00
|
|
|
static unsigned X86ChooseCmpImmediateOpcode(EVT VT, ConstantInt *RHSC) {
|
2009-08-11 20:47:22 +00:00
|
|
|
switch (VT.getSimpleVT().SimpleTy) {
|
2008-10-15 04:13:29 +00:00
|
|
|
// Otherwise, we can't fold the immediate into this comparison.
|
2008-10-15 04:32:45 +00:00
|
|
|
default: return 0;
|
2009-08-11 20:47:22 +00:00
|
|
|
case MVT::i8: return X86::CMP8ri;
|
|
|
|
case MVT::i16: return X86::CMP16ri;
|
|
|
|
case MVT::i32: return X86::CMP32ri;
|
|
|
|
case MVT::i64:
|
2008-10-15 04:32:45 +00:00
|
|
|
// 64-bit comparisons are only valid if the immediate fits in a 32-bit sext
|
|
|
|
// field.
|
2008-10-15 05:30:52 +00:00
|
|
|
if ((int)RHSC->getSExtValue() == RHSC->getSExtValue())
|
2008-10-15 04:32:45 +00:00
|
|
|
return X86::CMP64ri32;
|
|
|
|
return 0;
|
|
|
|
}
|
2008-10-15 04:13:29 +00:00
|
|
|
}
|
|
|
|
|
2009-08-10 22:56:29 +00:00
|
|
|
bool X86FastISel::X86FastEmitCompare(Value *Op0, Value *Op1, EVT VT) {
|
2008-10-15 04:26:38 +00:00
|
|
|
unsigned Op0Reg = getRegForValue(Op0);
|
|
|
|
if (Op0Reg == 0) return false;
|
|
|
|
|
2008-10-15 05:18:04 +00:00
|
|
|
// Handle 'null' like i32/i64 0.
|
|
|
|
if (isa<ConstantPointerNull>(Op1))
|
2009-08-13 21:58:54 +00:00
|
|
|
Op1 = Constant::getNullValue(TD.getIntPtrType(Op0->getContext()));
|
2008-10-15 05:18:04 +00:00
|
|
|
|
2008-10-15 04:26:38 +00:00
|
|
|
// We have two options: compare with register or immediate. If the RHS of
|
|
|
|
// the compare is an immediate that we can fold into this compare, use
|
|
|
|
// CMPri, otherwise use CMPrr.
|
|
|
|
if (ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) {
|
2008-10-15 04:32:45 +00:00
|
|
|
if (unsigned CompareImmOpc = X86ChooseCmpImmediateOpcode(VT, Op1C)) {
|
2009-02-13 02:33:27 +00:00
|
|
|
BuildMI(MBB, DL, TII.get(CompareImmOpc)).addReg(Op0Reg)
|
2008-10-15 04:26:38 +00:00
|
|
|
.addImm(Op1C->getSExtValue());
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned CompareOpc = X86ChooseCmpOpcode(VT);
|
|
|
|
if (CompareOpc == 0) return false;
|
|
|
|
|
|
|
|
unsigned Op1Reg = getRegForValue(Op1);
|
|
|
|
if (Op1Reg == 0) return false;
|
2009-02-13 02:33:27 +00:00
|
|
|
BuildMI(MBB, DL, TII.get(CompareOpc)).addReg(Op0Reg).addReg(Op1Reg);
|
2008-10-15 04:26:38 +00:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2008-09-04 23:26:51 +00:00
|
|
|
bool X86FastISel::X86SelectCmp(Instruction *I) {
|
|
|
|
CmpInst *CI = cast<CmpInst>(I);
|
|
|
|
|
2009-08-10 22:56:29 +00:00
|
|
|
EVT VT;
|
2008-10-15 05:07:36 +00:00
|
|
|
if (!isTypeLegal(I->getOperand(0)->getType(), VT))
|
2008-09-05 01:33:56 +00:00
|
|
|
return false;
|
|
|
|
|
2008-09-04 23:26:51 +00:00
|
|
|
unsigned ResultReg = createResultReg(&X86::GR8RegClass);
|
2008-10-15 03:47:17 +00:00
|
|
|
unsigned SetCCOpc;
|
2008-10-15 03:52:54 +00:00
|
|
|
bool SwapArgs; // false -> compare Op0, Op1. true -> compare Op1, Op0.
|
2008-09-04 23:26:51 +00:00
|
|
|
switch (CI->getPredicate()) {
|
|
|
|
case CmpInst::FCMP_OEQ: {
|
2008-10-15 04:29:23 +00:00
|
|
|
if (!X86FastEmitCompare(CI->getOperand(0), CI->getOperand(1), VT))
|
|
|
|
return false;
|
2008-10-15 04:26:38 +00:00
|
|
|
|
2008-09-04 23:26:51 +00:00
|
|
|
unsigned EReg = createResultReg(&X86::GR8RegClass);
|
|
|
|
unsigned NPReg = createResultReg(&X86::GR8RegClass);
|
2009-02-13 02:33:27 +00:00
|
|
|
BuildMI(MBB, DL, TII.get(X86::SETEr), EReg);
|
|
|
|
BuildMI(MBB, DL, TII.get(X86::SETNPr), NPReg);
|
|
|
|
BuildMI(MBB, DL,
|
|
|
|
TII.get(X86::AND8rr), ResultReg).addReg(NPReg).addReg(EReg);
|
2008-10-15 03:47:17 +00:00
|
|
|
UpdateValueMap(I, ResultReg);
|
|
|
|
return true;
|
2008-09-04 23:26:51 +00:00
|
|
|
}
|
|
|
|
case CmpInst::FCMP_UNE: {
|
2008-10-15 04:29:23 +00:00
|
|
|
if (!X86FastEmitCompare(CI->getOperand(0), CI->getOperand(1), VT))
|
|
|
|
return false;
|
|
|
|
|
2008-09-04 23:26:51 +00:00
|
|
|
unsigned NEReg = createResultReg(&X86::GR8RegClass);
|
|
|
|
unsigned PReg = createResultReg(&X86::GR8RegClass);
|
2009-02-13 02:33:27 +00:00
|
|
|
BuildMI(MBB, DL, TII.get(X86::SETNEr), NEReg);
|
|
|
|
BuildMI(MBB, DL, TII.get(X86::SETPr), PReg);
|
|
|
|
BuildMI(MBB, DL, TII.get(X86::OR8rr), ResultReg).addReg(PReg).addReg(NEReg);
|
2008-10-15 03:47:17 +00:00
|
|
|
UpdateValueMap(I, ResultReg);
|
|
|
|
return true;
|
2008-09-04 23:26:51 +00:00
|
|
|
}
|
2008-10-15 03:52:54 +00:00
|
|
|
case CmpInst::FCMP_OGT: SwapArgs = false; SetCCOpc = X86::SETAr; break;
|
|
|
|
case CmpInst::FCMP_OGE: SwapArgs = false; SetCCOpc = X86::SETAEr; break;
|
|
|
|
case CmpInst::FCMP_OLT: SwapArgs = true; SetCCOpc = X86::SETAr; break;
|
|
|
|
case CmpInst::FCMP_OLE: SwapArgs = true; SetCCOpc = X86::SETAEr; break;
|
|
|
|
case CmpInst::FCMP_ONE: SwapArgs = false; SetCCOpc = X86::SETNEr; break;
|
|
|
|
case CmpInst::FCMP_ORD: SwapArgs = false; SetCCOpc = X86::SETNPr; break;
|
|
|
|
case CmpInst::FCMP_UNO: SwapArgs = false; SetCCOpc = X86::SETPr; break;
|
|
|
|
case CmpInst::FCMP_UEQ: SwapArgs = false; SetCCOpc = X86::SETEr; break;
|
|
|
|
case CmpInst::FCMP_UGT: SwapArgs = true; SetCCOpc = X86::SETBr; break;
|
|
|
|
case CmpInst::FCMP_UGE: SwapArgs = true; SetCCOpc = X86::SETBEr; break;
|
|
|
|
case CmpInst::FCMP_ULT: SwapArgs = false; SetCCOpc = X86::SETBr; break;
|
|
|
|
case CmpInst::FCMP_ULE: SwapArgs = false; SetCCOpc = X86::SETBEr; break;
|
|
|
|
|
|
|
|
case CmpInst::ICMP_EQ: SwapArgs = false; SetCCOpc = X86::SETEr; break;
|
|
|
|
case CmpInst::ICMP_NE: SwapArgs = false; SetCCOpc = X86::SETNEr; break;
|
|
|
|
case CmpInst::ICMP_UGT: SwapArgs = false; SetCCOpc = X86::SETAr; break;
|
|
|
|
case CmpInst::ICMP_UGE: SwapArgs = false; SetCCOpc = X86::SETAEr; break;
|
|
|
|
case CmpInst::ICMP_ULT: SwapArgs = false; SetCCOpc = X86::SETBr; break;
|
|
|
|
case CmpInst::ICMP_ULE: SwapArgs = false; SetCCOpc = X86::SETBEr; break;
|
|
|
|
case CmpInst::ICMP_SGT: SwapArgs = false; SetCCOpc = X86::SETGr; break;
|
|
|
|
case CmpInst::ICMP_SGE: SwapArgs = false; SetCCOpc = X86::SETGEr; break;
|
|
|
|
case CmpInst::ICMP_SLT: SwapArgs = false; SetCCOpc = X86::SETLr; break;
|
|
|
|
case CmpInst::ICMP_SLE: SwapArgs = false; SetCCOpc = X86::SETLEr; break;
|
2008-09-04 23:26:51 +00:00
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2008-10-15 04:26:38 +00:00
|
|
|
Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1);
|
2008-10-15 03:52:54 +00:00
|
|
|
if (SwapArgs)
|
2008-10-15 04:26:38 +00:00
|
|
|
std::swap(Op0, Op1);
|
2008-10-15 03:52:54 +00:00
|
|
|
|
2008-10-15 04:26:38 +00:00
|
|
|
// Emit a compare of Op0/Op1.
|
2008-10-15 04:29:23 +00:00
|
|
|
if (!X86FastEmitCompare(Op0, Op1, VT))
|
|
|
|
return false;
|
2008-10-15 04:26:38 +00:00
|
|
|
|
2009-02-13 02:33:27 +00:00
|
|
|
BuildMI(MBB, DL, TII.get(SetCCOpc), ResultReg);
|
2008-09-04 23:26:51 +00:00
|
|
|
UpdateValueMap(I, ResultReg);
|
|
|
|
return true;
|
|
|
|
}
|
2008-09-03 06:44:39 +00:00
|
|
|
|
2008-09-05 01:06:14 +00:00
|
|
|
bool X86FastISel::X86SelectZExt(Instruction *I) {
|
2009-03-13 20:42:20 +00:00
|
|
|
// Handle zero-extension from i1 to i8, which is common.
|
2009-08-13 21:58:54 +00:00
|
|
|
if (I->getType() == Type::getInt8Ty(I->getContext()) &&
|
|
|
|
I->getOperand(0)->getType() == Type::getInt1Ty(I->getContext())) {
|
2008-09-05 01:06:14 +00:00
|
|
|
unsigned ResultReg = getRegForValue(I->getOperand(0));
|
2008-09-05 01:15:35 +00:00
|
|
|
if (ResultReg == 0) return false;
|
2009-03-13 20:42:20 +00:00
|
|
|
// Set the high bits to zero.
|
2009-08-11 20:47:22 +00:00
|
|
|
ResultReg = FastEmitZExtFromI1(MVT::i8, ResultReg);
|
2009-03-13 20:42:20 +00:00
|
|
|
if (ResultReg == 0) return false;
|
2008-09-05 01:06:14 +00:00
|
|
|
UpdateValueMap(I, ResultReg);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2008-10-15 04:26:38 +00:00
|
|
|
|
2008-09-05 01:06:14 +00:00
|
|
|
bool X86FastISel::X86SelectBranch(Instruction *I) {
|
|
|
|
// Unconditional branches are selected by tablegen-generated code.
|
2008-10-02 22:15:21 +00:00
|
|
|
// Handle a conditional branch.
|
|
|
|
BranchInst *BI = cast<BranchInst>(I);
|
2008-09-05 01:06:14 +00:00
|
|
|
MachineBasicBlock *TrueMBB = MBBMap[BI->getSuccessor(0)];
|
|
|
|
MachineBasicBlock *FalseMBB = MBBMap[BI->getSuccessor(1)];
|
|
|
|
|
2008-10-02 22:15:21 +00:00
|
|
|
// Fold the common case of a conditional branch with a comparison.
|
|
|
|
if (CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) {
|
|
|
|
if (CI->hasOneUse()) {
|
2009-08-10 22:56:29 +00:00
|
|
|
EVT VT = TLI.getValueType(CI->getOperand(0)->getType());
|
2008-10-02 22:15:21 +00:00
|
|
|
|
|
|
|
// Try to take advantage of fallthrough opportunities.
|
|
|
|
CmpInst::Predicate Predicate = CI->getPredicate();
|
|
|
|
if (MBB->isLayoutSuccessor(TrueMBB)) {
|
|
|
|
std::swap(TrueMBB, FalseMBB);
|
|
|
|
Predicate = CmpInst::getInversePredicate(Predicate);
|
|
|
|
}
|
|
|
|
|
2008-10-15 03:58:05 +00:00
|
|
|
bool SwapArgs; // false -> compare Op0, Op1. true -> compare Op1, Op0.
|
|
|
|
unsigned BranchOpc; // Opcode to jump on, e.g. "X86::JA"
|
|
|
|
|
2008-10-02 22:15:21 +00:00
|
|
|
switch (Predicate) {
|
2008-10-21 18:24:51 +00:00
|
|
|
case CmpInst::FCMP_OEQ:
|
|
|
|
std::swap(TrueMBB, FalseMBB);
|
|
|
|
Predicate = CmpInst::FCMP_UNE;
|
|
|
|
// FALL THROUGH
|
|
|
|
case CmpInst::FCMP_UNE: SwapArgs = false; BranchOpc = X86::JNE; break;
|
2008-10-15 03:58:05 +00:00
|
|
|
case CmpInst::FCMP_OGT: SwapArgs = false; BranchOpc = X86::JA; break;
|
|
|
|
case CmpInst::FCMP_OGE: SwapArgs = false; BranchOpc = X86::JAE; break;
|
|
|
|
case CmpInst::FCMP_OLT: SwapArgs = true; BranchOpc = X86::JA; break;
|
|
|
|
case CmpInst::FCMP_OLE: SwapArgs = true; BranchOpc = X86::JAE; break;
|
|
|
|
case CmpInst::FCMP_ONE: SwapArgs = false; BranchOpc = X86::JNE; break;
|
|
|
|
case CmpInst::FCMP_ORD: SwapArgs = false; BranchOpc = X86::JNP; break;
|
|
|
|
case CmpInst::FCMP_UNO: SwapArgs = false; BranchOpc = X86::JP; break;
|
|
|
|
case CmpInst::FCMP_UEQ: SwapArgs = false; BranchOpc = X86::JE; break;
|
|
|
|
case CmpInst::FCMP_UGT: SwapArgs = true; BranchOpc = X86::JB; break;
|
|
|
|
case CmpInst::FCMP_UGE: SwapArgs = true; BranchOpc = X86::JBE; break;
|
|
|
|
case CmpInst::FCMP_ULT: SwapArgs = false; BranchOpc = X86::JB; break;
|
|
|
|
case CmpInst::FCMP_ULE: SwapArgs = false; BranchOpc = X86::JBE; break;
|
2008-10-15 04:26:38 +00:00
|
|
|
|
2008-10-15 03:58:05 +00:00
|
|
|
case CmpInst::ICMP_EQ: SwapArgs = false; BranchOpc = X86::JE; break;
|
|
|
|
case CmpInst::ICMP_NE: SwapArgs = false; BranchOpc = X86::JNE; break;
|
|
|
|
case CmpInst::ICMP_UGT: SwapArgs = false; BranchOpc = X86::JA; break;
|
|
|
|
case CmpInst::ICMP_UGE: SwapArgs = false; BranchOpc = X86::JAE; break;
|
|
|
|
case CmpInst::ICMP_ULT: SwapArgs = false; BranchOpc = X86::JB; break;
|
|
|
|
case CmpInst::ICMP_ULE: SwapArgs = false; BranchOpc = X86::JBE; break;
|
|
|
|
case CmpInst::ICMP_SGT: SwapArgs = false; BranchOpc = X86::JG; break;
|
|
|
|
case CmpInst::ICMP_SGE: SwapArgs = false; BranchOpc = X86::JGE; break;
|
|
|
|
case CmpInst::ICMP_SLT: SwapArgs = false; BranchOpc = X86::JL; break;
|
|
|
|
case CmpInst::ICMP_SLE: SwapArgs = false; BranchOpc = X86::JLE; break;
|
2008-10-02 22:15:21 +00:00
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
2008-10-15 04:02:26 +00:00
|
|
|
|
|
|
|
Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1);
|
2008-10-15 03:58:05 +00:00
|
|
|
if (SwapArgs)
|
2008-10-15 04:02:26 +00:00
|
|
|
std::swap(Op0, Op1);
|
|
|
|
|
2008-10-15 04:26:38 +00:00
|
|
|
// Emit a compare of the LHS and RHS, setting the flags.
|
|
|
|
if (!X86FastEmitCompare(Op0, Op1, VT))
|
|
|
|
return false;
|
2008-10-15 04:13:29 +00:00
|
|
|
|
2009-02-13 02:33:27 +00:00
|
|
|
BuildMI(MBB, DL, TII.get(BranchOpc)).addMBB(TrueMBB);
|
2008-10-21 18:24:51 +00:00
|
|
|
|
|
|
|
if (Predicate == CmpInst::FCMP_UNE) {
|
|
|
|
// X86 requires a second branch to handle UNE (and OEQ,
|
|
|
|
// which is mapped to UNE above).
|
2009-02-13 02:33:27 +00:00
|
|
|
BuildMI(MBB, DL, TII.get(X86::JP)).addMBB(TrueMBB);
|
2008-10-21 18:24:51 +00:00
|
|
|
}
|
|
|
|
|
2008-10-02 22:15:21 +00:00
|
|
|
FastEmitBranch(FalseMBB);
|
2008-10-07 22:10:33 +00:00
|
|
|
MBB->addSuccessor(TrueMBB);
|
2008-10-02 22:15:21 +00:00
|
|
|
return true;
|
|
|
|
}
|
2008-12-09 23:19:12 +00:00
|
|
|
} else if (ExtractValueInst *EI =
|
|
|
|
dyn_cast<ExtractValueInst>(BI->getCondition())) {
|
|
|
|
// Check to see if the branch instruction is from an "arithmetic with
|
|
|
|
// overflow" intrinsic. The main way these intrinsics are used is:
|
|
|
|
//
|
|
|
|
// %t = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2)
|
|
|
|
// %sum = extractvalue { i32, i1 } %t, 0
|
|
|
|
// %obit = extractvalue { i32, i1 } %t, 1
|
|
|
|
// br i1 %obit, label %overflow, label %normal
|
|
|
|
//
|
2009-01-07 00:15:08 +00:00
|
|
|
// The %sum and %obit are converted in an ADD and a SETO/SETB before
|
2008-12-09 23:19:12 +00:00
|
|
|
// reaching the branch. Therefore, we search backwards through the MBB
|
2009-01-07 00:15:08 +00:00
|
|
|
// looking for the SETO/SETB instruction. If an instruction modifies the
|
|
|
|
// EFLAGS register before we reach the SETO/SETB instruction, then we can't
|
|
|
|
// convert the branch into a JO/JB instruction.
|
2009-04-12 07:36:01 +00:00
|
|
|
if (IntrinsicInst *CI = dyn_cast<IntrinsicInst>(EI->getAggregateOperand())){
|
|
|
|
if (CI->getIntrinsicID() == Intrinsic::sadd_with_overflow ||
|
|
|
|
CI->getIntrinsicID() == Intrinsic::uadd_with_overflow) {
|
|
|
|
const MachineInstr *SetMI = 0;
|
|
|
|
unsigned Reg = lookUpRegForValue(EI);
|
|
|
|
|
|
|
|
for (MachineBasicBlock::const_reverse_iterator
|
|
|
|
RI = MBB->rbegin(), RE = MBB->rend(); RI != RE; ++RI) {
|
|
|
|
const MachineInstr &MI = *RI;
|
|
|
|
|
|
|
|
if (MI.modifiesRegister(Reg)) {
|
|
|
|
unsigned Src, Dst, SrcSR, DstSR;
|
|
|
|
|
|
|
|
if (getInstrInfo()->isMoveInstr(MI, Src, Dst, SrcSR, DstSR)) {
|
|
|
|
Reg = Src;
|
|
|
|
continue;
|
2008-12-10 19:44:24 +00:00
|
|
|
}
|
2008-12-09 23:19:12 +00:00
|
|
|
|
2009-04-12 07:36:01 +00:00
|
|
|
SetMI = &MI;
|
|
|
|
break;
|
2008-12-10 19:44:24 +00:00
|
|
|
}
|
|
|
|
|
2009-04-12 07:36:01 +00:00
|
|
|
const TargetInstrDesc &TID = MI.getDesc();
|
|
|
|
if (TID.hasUnmodeledSideEffects() ||
|
|
|
|
TID.hasImplicitDefOfPhysReg(X86::EFLAGS))
|
|
|
|
break;
|
|
|
|
}
|
2008-12-10 19:44:24 +00:00
|
|
|
|
2009-04-12 07:36:01 +00:00
|
|
|
if (SetMI) {
|
|
|
|
unsigned OpCode = SetMI->getOpcode();
|
|
|
|
|
|
|
|
if (OpCode == X86::SETOr || OpCode == X86::SETBr) {
|
2009-04-12 07:51:14 +00:00
|
|
|
BuildMI(MBB, DL, TII.get(OpCode == X86::SETOr ? X86::JO : X86::JB))
|
|
|
|
.addMBB(TrueMBB);
|
2009-04-12 07:36:01 +00:00
|
|
|
FastEmitBranch(FalseMBB);
|
|
|
|
MBB->addSuccessor(TrueMBB);
|
|
|
|
return true;
|
2008-12-10 19:44:24 +00:00
|
|
|
}
|
|
|
|
}
|
2008-12-09 23:19:12 +00:00
|
|
|
}
|
|
|
|
}
|
2008-10-02 22:15:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise do a clumsy setcc and re-test it.
|
|
|
|
unsigned OpReg = getRegForValue(BI->getCondition());
|
|
|
|
if (OpReg == 0) return false;
|
|
|
|
|
2009-02-13 02:33:27 +00:00
|
|
|
BuildMI(MBB, DL, TII.get(X86::TEST8rr)).addReg(OpReg).addReg(OpReg);
|
|
|
|
BuildMI(MBB, DL, TII.get(X86::JNE)).addMBB(TrueMBB);
|
2008-10-02 22:15:21 +00:00
|
|
|
FastEmitBranch(FalseMBB);
|
2008-10-07 22:10:33 +00:00
|
|
|
MBB->addSuccessor(TrueMBB);
|
2008-09-05 01:06:14 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2008-09-05 18:30:08 +00:00
|
|
|
bool X86FastISel::X86SelectShift(Instruction *I) {
|
2008-09-21 21:44:29 +00:00
|
|
|
unsigned CReg = 0, OpReg = 0, OpImm = 0;
|
2008-09-05 18:30:08 +00:00
|
|
|
const TargetRegisterClass *RC = NULL;
|
2009-08-13 21:58:54 +00:00
|
|
|
if (I->getType() == Type::getInt8Ty(I->getContext())) {
|
2008-09-05 18:30:08 +00:00
|
|
|
CReg = X86::CL;
|
|
|
|
RC = &X86::GR8RegClass;
|
|
|
|
switch (I->getOpcode()) {
|
2008-09-21 21:44:29 +00:00
|
|
|
case Instruction::LShr: OpReg = X86::SHR8rCL; OpImm = X86::SHR8ri; break;
|
|
|
|
case Instruction::AShr: OpReg = X86::SAR8rCL; OpImm = X86::SAR8ri; break;
|
|
|
|
case Instruction::Shl: OpReg = X86::SHL8rCL; OpImm = X86::SHL8ri; break;
|
2008-09-05 18:30:08 +00:00
|
|
|
default: return false;
|
|
|
|
}
|
2009-08-13 21:58:54 +00:00
|
|
|
} else if (I->getType() == Type::getInt16Ty(I->getContext())) {
|
2008-09-05 18:30:08 +00:00
|
|
|
CReg = X86::CX;
|
|
|
|
RC = &X86::GR16RegClass;
|
|
|
|
switch (I->getOpcode()) {
|
2008-09-21 21:44:29 +00:00
|
|
|
case Instruction::LShr: OpReg = X86::SHR16rCL; OpImm = X86::SHR16ri; break;
|
|
|
|
case Instruction::AShr: OpReg = X86::SAR16rCL; OpImm = X86::SAR16ri; break;
|
|
|
|
case Instruction::Shl: OpReg = X86::SHL16rCL; OpImm = X86::SHL16ri; break;
|
2008-09-05 18:30:08 +00:00
|
|
|
default: return false;
|
|
|
|
}
|
2009-08-13 21:58:54 +00:00
|
|
|
} else if (I->getType() == Type::getInt32Ty(I->getContext())) {
|
2008-09-05 18:30:08 +00:00
|
|
|
CReg = X86::ECX;
|
|
|
|
RC = &X86::GR32RegClass;
|
|
|
|
switch (I->getOpcode()) {
|
2008-09-21 21:44:29 +00:00
|
|
|
case Instruction::LShr: OpReg = X86::SHR32rCL; OpImm = X86::SHR32ri; break;
|
|
|
|
case Instruction::AShr: OpReg = X86::SAR32rCL; OpImm = X86::SAR32ri; break;
|
|
|
|
case Instruction::Shl: OpReg = X86::SHL32rCL; OpImm = X86::SHL32ri; break;
|
2008-09-05 18:30:08 +00:00
|
|
|
default: return false;
|
|
|
|
}
|
2009-08-13 21:58:54 +00:00
|
|
|
} else if (I->getType() == Type::getInt64Ty(I->getContext())) {
|
2008-09-05 18:30:08 +00:00
|
|
|
CReg = X86::RCX;
|
|
|
|
RC = &X86::GR64RegClass;
|
|
|
|
switch (I->getOpcode()) {
|
2008-09-21 21:44:29 +00:00
|
|
|
case Instruction::LShr: OpReg = X86::SHR64rCL; OpImm = X86::SHR64ri; break;
|
|
|
|
case Instruction::AShr: OpReg = X86::SAR64rCL; OpImm = X86::SAR64ri; break;
|
|
|
|
case Instruction::Shl: OpReg = X86::SHL64rCL; OpImm = X86::SHL64ri; break;
|
2008-09-05 18:30:08 +00:00
|
|
|
default: return false;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2009-08-10 22:56:29 +00:00
|
|
|
EVT VT = TLI.getValueType(I->getType(), /*HandleUnknown=*/true);
|
2009-08-11 20:47:22 +00:00
|
|
|
if (VT == MVT::Other || !isTypeLegal(I->getType(), VT))
|
2008-09-05 21:27:34 +00:00
|
|
|
return false;
|
|
|
|
|
2008-09-05 18:30:08 +00:00
|
|
|
unsigned Op0Reg = getRegForValue(I->getOperand(0));
|
|
|
|
if (Op0Reg == 0) return false;
|
2008-09-21 21:44:29 +00:00
|
|
|
|
|
|
|
// Fold immediate in shl(x,3).
|
|
|
|
if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
|
|
|
|
unsigned ResultReg = createResultReg(RC);
|
2009-02-13 02:33:27 +00:00
|
|
|
BuildMI(MBB, DL, TII.get(OpImm),
|
2008-12-20 17:19:40 +00:00
|
|
|
ResultReg).addReg(Op0Reg).addImm(CI->getZExtValue() & 0xff);
|
2008-09-21 21:44:29 +00:00
|
|
|
UpdateValueMap(I, ResultReg);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2008-09-05 18:30:08 +00:00
|
|
|
unsigned Op1Reg = getRegForValue(I->getOperand(1));
|
|
|
|
if (Op1Reg == 0) return false;
|
|
|
|
TII.copyRegToReg(*MBB, MBB->end(), CReg, Op1Reg, RC, RC);
|
2008-10-07 21:50:36 +00:00
|
|
|
|
|
|
|
// The shift instruction uses X86::CL. If we defined a super-register
|
|
|
|
// of X86::CL, emit an EXTRACT_SUBREG to precisely describe what
|
|
|
|
// we're doing here.
|
|
|
|
if (CReg != X86::CL)
|
2009-02-13 02:33:27 +00:00
|
|
|
BuildMI(MBB, DL, TII.get(TargetInstrInfo::EXTRACT_SUBREG), X86::CL)
|
2008-10-07 21:50:36 +00:00
|
|
|
.addReg(CReg).addImm(X86::SUBREG_8BIT);
|
|
|
|
|
2008-09-05 18:30:08 +00:00
|
|
|
unsigned ResultReg = createResultReg(RC);
|
2009-02-13 02:33:27 +00:00
|
|
|
BuildMI(MBB, DL, TII.get(OpReg), ResultReg).addReg(Op0Reg);
|
2008-09-05 18:30:08 +00:00
|
|
|
UpdateValueMap(I, ResultReg);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool X86FastISel::X86SelectSelect(Instruction *I) {
|
2009-08-10 22:56:29 +00:00
|
|
|
EVT VT = TLI.getValueType(I->getType(), /*HandleUnknown=*/true);
|
2009-08-11 20:47:22 +00:00
|
|
|
if (VT == MVT::Other || !isTypeLegal(I->getType(), VT))
|
2008-10-15 05:07:36 +00:00
|
|
|
return false;
|
|
|
|
|
2008-09-05 18:30:08 +00:00
|
|
|
unsigned Opc = 0;
|
|
|
|
const TargetRegisterClass *RC = NULL;
|
2009-08-11 20:47:22 +00:00
|
|
|
if (VT.getSimpleVT() == MVT::i16) {
|
2008-09-05 21:13:04 +00:00
|
|
|
Opc = X86::CMOVE16rr;
|
2008-09-05 18:30:08 +00:00
|
|
|
RC = &X86::GR16RegClass;
|
2009-08-11 20:47:22 +00:00
|
|
|
} else if (VT.getSimpleVT() == MVT::i32) {
|
2008-09-05 21:13:04 +00:00
|
|
|
Opc = X86::CMOVE32rr;
|
2008-09-05 18:30:08 +00:00
|
|
|
RC = &X86::GR32RegClass;
|
2009-08-11 20:47:22 +00:00
|
|
|
} else if (VT.getSimpleVT() == MVT::i64) {
|
2008-09-05 21:13:04 +00:00
|
|
|
Opc = X86::CMOVE64rr;
|
2008-09-05 18:30:08 +00:00
|
|
|
RC = &X86::GR64RegClass;
|
|
|
|
} else {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned Op0Reg = getRegForValue(I->getOperand(0));
|
|
|
|
if (Op0Reg == 0) return false;
|
|
|
|
unsigned Op1Reg = getRegForValue(I->getOperand(1));
|
|
|
|
if (Op1Reg == 0) return false;
|
|
|
|
unsigned Op2Reg = getRegForValue(I->getOperand(2));
|
|
|
|
if (Op2Reg == 0) return false;
|
|
|
|
|
2009-02-13 02:33:27 +00:00
|
|
|
BuildMI(MBB, DL, TII.get(X86::TEST8rr)).addReg(Op0Reg).addReg(Op0Reg);
|
2008-09-05 18:30:08 +00:00
|
|
|
unsigned ResultReg = createResultReg(RC);
|
2009-02-13 02:33:27 +00:00
|
|
|
BuildMI(MBB, DL, TII.get(Opc), ResultReg).addReg(Op1Reg).addReg(Op2Reg);
|
2008-09-05 18:30:08 +00:00
|
|
|
UpdateValueMap(I, ResultReg);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2008-09-10 21:02:08 +00:00
|
|
|
bool X86FastISel::X86SelectFPExt(Instruction *I) {
|
2008-10-15 05:07:36 +00:00
|
|
|
// fpext from float to double.
|
2009-08-13 21:58:54 +00:00
|
|
|
if (Subtarget->hasSSE2() &&
|
|
|
|
I->getType() == Type::getDoubleTy(I->getContext())) {
|
2008-10-15 05:07:36 +00:00
|
|
|
Value *V = I->getOperand(0);
|
2009-08-13 21:58:54 +00:00
|
|
|
if (V->getType() == Type::getFloatTy(I->getContext())) {
|
2008-10-15 05:07:36 +00:00
|
|
|
unsigned OpReg = getRegForValue(V);
|
|
|
|
if (OpReg == 0) return false;
|
|
|
|
unsigned ResultReg = createResultReg(X86::FR64RegisterClass);
|
2009-02-13 02:33:27 +00:00
|
|
|
BuildMI(MBB, DL, TII.get(X86::CVTSS2SDrr), ResultReg).addReg(OpReg);
|
2008-10-15 05:07:36 +00:00
|
|
|
UpdateValueMap(I, ResultReg);
|
|
|
|
return true;
|
2008-09-10 21:02:08 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool X86FastISel::X86SelectFPTrunc(Instruction *I) {
|
|
|
|
if (Subtarget->hasSSE2()) {
|
2009-08-13 21:58:54 +00:00
|
|
|
if (I->getType() == Type::getFloatTy(I->getContext())) {
|
2008-09-10 21:02:08 +00:00
|
|
|
Value *V = I->getOperand(0);
|
2009-08-13 21:58:54 +00:00
|
|
|
if (V->getType() == Type::getDoubleTy(I->getContext())) {
|
2008-09-10 21:02:08 +00:00
|
|
|
unsigned OpReg = getRegForValue(V);
|
|
|
|
if (OpReg == 0) return false;
|
|
|
|
unsigned ResultReg = createResultReg(X86::FR32RegisterClass);
|
2009-02-13 02:33:27 +00:00
|
|
|
BuildMI(MBB, DL, TII.get(X86::CVTSD2SSrr), ResultReg).addReg(OpReg);
|
2008-09-10 21:02:08 +00:00
|
|
|
UpdateValueMap(I, ResultReg);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2008-09-07 08:47:42 +00:00
|
|
|
bool X86FastISel::X86SelectTrunc(Instruction *I) {
|
|
|
|
if (Subtarget->is64Bit())
|
|
|
|
// All other cases should be handled by the tblgen generated code.
|
|
|
|
return false;
|
2009-08-10 22:56:29 +00:00
|
|
|
EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
|
|
|
|
EVT DstVT = TLI.getValueType(I->getType());
|
2009-03-13 16:36:42 +00:00
|
|
|
|
|
|
|
// This code only handles truncation to byte right now.
|
2009-08-11 20:47:22 +00:00
|
|
|
if (DstVT != MVT::i8 && DstVT != MVT::i1)
|
2008-09-07 08:47:42 +00:00
|
|
|
// All other cases should be handled by the tblgen generated code.
|
|
|
|
return false;
|
2009-08-11 20:47:22 +00:00
|
|
|
if (SrcVT != MVT::i16 && SrcVT != MVT::i32)
|
2008-09-07 08:47:42 +00:00
|
|
|
// All other cases should be handled by the tblgen generated code.
|
|
|
|
return false;
|
|
|
|
|
|
|
|
unsigned InputReg = getRegForValue(I->getOperand(0));
|
|
|
|
if (!InputReg)
|
|
|
|
// Unhandled operand. Halt "fast" selection and bail.
|
|
|
|
return false;
|
|
|
|
|
2009-04-27 16:33:14 +00:00
|
|
|
// First issue a copy to GR16_ABCD or GR32_ABCD.
|
2009-08-11 20:47:22 +00:00
|
|
|
unsigned CopyOpc = (SrcVT == MVT::i16) ? X86::MOV16rr : X86::MOV32rr;
|
|
|
|
const TargetRegisterClass *CopyRC = (SrcVT == MVT::i16)
|
2009-04-27 16:33:14 +00:00
|
|
|
? X86::GR16_ABCDRegisterClass : X86::GR32_ABCDRegisterClass;
|
2008-09-07 08:47:42 +00:00
|
|
|
unsigned CopyReg = createResultReg(CopyRC);
|
2009-02-13 02:33:27 +00:00
|
|
|
BuildMI(MBB, DL, TII.get(CopyOpc), CopyReg).addReg(InputReg);
|
2008-09-07 08:47:42 +00:00
|
|
|
|
|
|
|
// Then issue an extract_subreg.
|
2009-08-11 20:47:22 +00:00
|
|
|
unsigned ResultReg = FastEmitInst_extractsubreg(MVT::i8,
|
2009-01-22 09:10:11 +00:00
|
|
|
CopyReg, X86::SUBREG_8BIT);
|
2008-09-07 08:47:42 +00:00
|
|
|
if (!ResultReg)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
UpdateValueMap(I, ResultReg);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2008-12-09 02:42:50 +00:00
|
|
|
bool X86FastISel::X86SelectExtractValue(Instruction *I) {
|
|
|
|
ExtractValueInst *EI = cast<ExtractValueInst>(I);
|
|
|
|
Value *Agg = EI->getAggregateOperand();
|
|
|
|
|
2009-04-12 07:36:01 +00:00
|
|
|
if (IntrinsicInst *CI = dyn_cast<IntrinsicInst>(Agg)) {
|
|
|
|
switch (CI->getIntrinsicID()) {
|
|
|
|
default: break;
|
|
|
|
case Intrinsic::sadd_with_overflow:
|
|
|
|
case Intrinsic::uadd_with_overflow:
|
|
|
|
// Cheat a little. We know that the registers for "add" and "seto" are
|
|
|
|
// allocated sequentially. However, we only keep track of the register
|
|
|
|
// for "add" in the value map. Use extractvalue's index to get the
|
|
|
|
// correct register for "seto".
|
|
|
|
UpdateValueMap(I, lookUpRegForValue(Agg) + *EI->idx_begin());
|
|
|
|
return true;
|
2008-12-09 02:42:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2009-04-12 07:36:01 +00:00
|
|
|
bool X86FastISel::X86VisitIntrinsicCall(IntrinsicInst &I) {
|
2008-12-09 02:42:50 +00:00
|
|
|
// FIXME: Handle more intrinsics.
|
2009-04-12 07:36:01 +00:00
|
|
|
switch (I.getIntrinsicID()) {
|
2008-12-09 02:42:50 +00:00
|
|
|
default: return false;
|
|
|
|
case Intrinsic::sadd_with_overflow:
|
|
|
|
case Intrinsic::uadd_with_overflow: {
|
2008-12-09 07:55:31 +00:00
|
|
|
// Replace "add with overflow" intrinsics with an "add" instruction followed
|
|
|
|
// by a seto/setc instruction. Later on, when the "extractvalue"
|
|
|
|
// instructions are encountered, we use the fact that two registers were
|
|
|
|
// created sequentially to get the correct registers for the "sum" and the
|
|
|
|
// "overflow bit".
|
2008-12-09 02:42:50 +00:00
|
|
|
const Function *Callee = I.getCalledFunction();
|
|
|
|
const Type *RetTy =
|
|
|
|
cast<StructType>(Callee->getReturnType())->getTypeAtIndex(unsigned(0));
|
|
|
|
|
2009-08-10 22:56:29 +00:00
|
|
|
EVT VT;
|
2008-12-09 02:42:50 +00:00
|
|
|
if (!isTypeLegal(RetTy, VT))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
Value *Op1 = I.getOperand(1);
|
|
|
|
Value *Op2 = I.getOperand(2);
|
|
|
|
unsigned Reg1 = getRegForValue(Op1);
|
|
|
|
unsigned Reg2 = getRegForValue(Op2);
|
|
|
|
|
|
|
|
if (Reg1 == 0 || Reg2 == 0)
|
|
|
|
// FIXME: Handle values *not* in registers.
|
|
|
|
return false;
|
|
|
|
|
|
|
|
unsigned OpC = 0;
|
2009-08-11 20:47:22 +00:00
|
|
|
if (VT == MVT::i32)
|
2008-12-09 02:42:50 +00:00
|
|
|
OpC = X86::ADD32rr;
|
2009-08-11 20:47:22 +00:00
|
|
|
else if (VT == MVT::i64)
|
2008-12-09 02:42:50 +00:00
|
|
|
OpC = X86::ADD64rr;
|
|
|
|
else
|
|
|
|
return false;
|
|
|
|
|
|
|
|
unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));
|
2009-02-13 02:33:27 +00:00
|
|
|
BuildMI(MBB, DL, TII.get(OpC), ResultReg).addReg(Reg1).addReg(Reg2);
|
2009-04-12 07:51:14 +00:00
|
|
|
unsigned DestReg1 = UpdateValueMap(&I, ResultReg);
|
|
|
|
|
|
|
|
// If the add with overflow is an intra-block value then we just want to
|
|
|
|
// create temporaries for it like normal. If it is a cross-block value then
|
|
|
|
// UpdateValueMap will return the cross-block register used. Since we
|
|
|
|
// *really* want the value to be live in the register pair known by
|
|
|
|
// UpdateValueMap, we have to use DestReg1+1 as the destination register in
|
|
|
|
// the cross block case. In the non-cross-block case, we should just make
|
|
|
|
// another register for the value.
|
|
|
|
if (DestReg1 != ResultReg)
|
|
|
|
ResultReg = DestReg1+1;
|
|
|
|
else
|
2009-08-11 20:47:22 +00:00
|
|
|
ResultReg = createResultReg(TLI.getRegClassFor(MVT::i8));
|
2009-04-12 07:51:14 +00:00
|
|
|
|
2009-04-12 07:36:01 +00:00
|
|
|
unsigned Opc = X86::SETBr;
|
|
|
|
if (I.getIntrinsicID() == Intrinsic::sadd_with_overflow)
|
|
|
|
Opc = X86::SETOr;
|
|
|
|
BuildMI(MBB, DL, TII.get(Opc), ResultReg);
|
2008-12-09 02:42:50 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-09-07 09:09:33 +00:00
|
|
|
bool X86FastISel::X86SelectCall(Instruction *I) {
|
|
|
|
CallInst *CI = cast<CallInst>(I);
|
|
|
|
Value *Callee = I->getOperand(0);
|
|
|
|
|
|
|
|
// Can't handle inline asm yet.
|
|
|
|
if (isa<InlineAsm>(Callee))
|
|
|
|
return false;
|
|
|
|
|
2008-12-09 02:42:50 +00:00
|
|
|
// Handle intrinsic calls.
|
2009-04-12 07:36:01 +00:00
|
|
|
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI))
|
|
|
|
return X86VisitIntrinsicCall(*II);
|
2008-09-07 09:09:33 +00:00
|
|
|
|
|
|
|
// Handle only C and fastcc calling conventions for now.
|
|
|
|
CallSite CS(CI);
|
|
|
|
unsigned CC = CS.getCallingConv();
|
|
|
|
if (CC != CallingConv::C &&
|
|
|
|
CC != CallingConv::Fast &&
|
|
|
|
CC != CallingConv::X86_FastCall)
|
|
|
|
return false;
|
|
|
|
|
2009-05-04 19:50:33 +00:00
|
|
|
// On X86, -tailcallopt changes the fastcc ABI. FastISel doesn't
|
|
|
|
// handle this for now.
|
|
|
|
if (CC == CallingConv::Fast && PerformTailCallOpt)
|
|
|
|
return false;
|
|
|
|
|
2008-09-07 09:09:33 +00:00
|
|
|
// Let SDISel handle vararg functions.
|
|
|
|
const PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
|
|
|
|
const FunctionType *FTy = cast<FunctionType>(PT->getElementType());
|
|
|
|
if (FTy->isVarArg())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Handle *simple* calls for now.
|
|
|
|
const Type *RetTy = CS.getType();
|
2009-08-10 22:56:29 +00:00
|
|
|
EVT RetVT;
|
2009-08-13 21:58:54 +00:00
|
|
|
if (RetTy == Type::getVoidTy(I->getContext()))
|
2009-08-11 20:47:22 +00:00
|
|
|
RetVT = MVT::isVoid;
|
2008-10-15 05:07:36 +00:00
|
|
|
else if (!isTypeLegal(RetTy, RetVT, true))
|
2008-09-07 09:09:33 +00:00
|
|
|
return false;
|
|
|
|
|
2008-09-17 21:18:49 +00:00
|
|
|
// Materialize callee address in a register. FIXME: GV address can be
|
|
|
|
// handled with a CALLpcrel32 instead.
|
2008-09-19 22:16:54 +00:00
|
|
|
X86AddressMode CalleeAM;
|
2009-07-10 05:33:42 +00:00
|
|
|
if (!X86SelectCallAddress(Callee, CalleeAM))
|
2008-09-19 22:16:54 +00:00
|
|
|
return false;
|
2008-09-17 21:18:49 +00:00
|
|
|
unsigned CalleeOp = 0;
|
2008-09-19 22:16:54 +00:00
|
|
|
GlobalValue *GV = 0;
|
2009-06-27 04:50:14 +00:00
|
|
|
if (CalleeAM.GV != 0) {
|
2008-09-19 22:16:54 +00:00
|
|
|
GV = CalleeAM.GV;
|
2009-06-27 04:50:14 +00:00
|
|
|
} else if (CalleeAM.Base.Reg != 0) {
|
|
|
|
CalleeOp = CalleeAM.Base.Reg;
|
2008-09-19 22:16:54 +00:00
|
|
|
} else
|
|
|
|
return false;
|
2008-09-17 21:18:49 +00:00
|
|
|
|
2008-09-08 17:15:42 +00:00
|
|
|
// Allow calls which produce i1 results.
|
|
|
|
bool AndToI1 = false;
|
2009-08-11 20:47:22 +00:00
|
|
|
if (RetVT == MVT::i1) {
|
|
|
|
RetVT = MVT::i8;
|
2008-09-08 17:15:42 +00:00
|
|
|
AndToI1 = true;
|
|
|
|
}
|
|
|
|
|
2008-09-07 09:09:33 +00:00
|
|
|
// Deal with call operands first.
|
2008-10-15 05:38:32 +00:00
|
|
|
SmallVector<Value*, 8> ArgVals;
|
|
|
|
SmallVector<unsigned, 8> Args;
|
2009-08-10 22:56:29 +00:00
|
|
|
SmallVector<EVT, 8> ArgVTs;
|
2008-10-15 05:38:32 +00:00
|
|
|
SmallVector<ISD::ArgFlagsTy, 8> ArgFlags;
|
2008-09-07 09:09:33 +00:00
|
|
|
Args.reserve(CS.arg_size());
|
2008-10-15 05:38:32 +00:00
|
|
|
ArgVals.reserve(CS.arg_size());
|
2008-09-07 09:09:33 +00:00
|
|
|
ArgVTs.reserve(CS.arg_size());
|
|
|
|
ArgFlags.reserve(CS.arg_size());
|
|
|
|
for (CallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
|
|
|
|
i != e; ++i) {
|
|
|
|
unsigned Arg = getRegForValue(*i);
|
|
|
|
if (Arg == 0)
|
|
|
|
return false;
|
|
|
|
ISD::ArgFlagsTy Flags;
|
|
|
|
unsigned AttrInd = i - CS.arg_begin() + 1;
|
2008-09-25 21:00:45 +00:00
|
|
|
if (CS.paramHasAttr(AttrInd, Attribute::SExt))
|
2008-09-07 09:09:33 +00:00
|
|
|
Flags.setSExt();
|
2008-09-25 21:00:45 +00:00
|
|
|
if (CS.paramHasAttr(AttrInd, Attribute::ZExt))
|
2008-09-07 09:09:33 +00:00
|
|
|
Flags.setZExt();
|
|
|
|
|
|
|
|
// FIXME: Only handle *easy* calls for now.
|
2008-09-25 21:00:45 +00:00
|
|
|
if (CS.paramHasAttr(AttrInd, Attribute::InReg) ||
|
|
|
|
CS.paramHasAttr(AttrInd, Attribute::StructRet) ||
|
|
|
|
CS.paramHasAttr(AttrInd, Attribute::Nest) ||
|
|
|
|
CS.paramHasAttr(AttrInd, Attribute::ByVal))
|
2008-09-07 09:09:33 +00:00
|
|
|
return false;
|
|
|
|
|
|
|
|
const Type *ArgTy = (*i)->getType();
|
2009-08-10 22:56:29 +00:00
|
|
|
EVT ArgVT;
|
2008-10-15 05:07:36 +00:00
|
|
|
if (!isTypeLegal(ArgTy, ArgVT))
|
2008-09-07 09:09:33 +00:00
|
|
|
return false;
|
|
|
|
unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy);
|
|
|
|
Flags.setOrigAlign(OriginalAlignment);
|
|
|
|
|
|
|
|
Args.push_back(Arg);
|
2008-10-15 05:38:32 +00:00
|
|
|
ArgVals.push_back(*i);
|
2008-09-07 09:09:33 +00:00
|
|
|
ArgVTs.push_back(ArgVT);
|
|
|
|
ArgFlags.push_back(Flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Analyze operands of the call, assigning locations to each operand.
|
|
|
|
SmallVector<CCValAssign, 16> ArgLocs;
|
2009-07-09 17:57:24 +00:00
|
|
|
CCState CCInfo(CC, false, TM, ArgLocs, I->getParent()->getContext());
|
2008-09-07 09:09:33 +00:00
|
|
|
CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CCAssignFnForCall(CC));
|
|
|
|
|
|
|
|
// Get a count of how many bytes are to be pushed on the stack.
|
|
|
|
unsigned NumBytes = CCInfo.getNextStackOffset();
|
|
|
|
|
|
|
|
// Issue CALLSEQ_START
|
2008-10-01 18:28:06 +00:00
|
|
|
unsigned AdjStackDown = TM.getRegisterInfo()->getCallFrameSetupOpcode();
|
2009-02-13 02:33:27 +00:00
|
|
|
BuildMI(MBB, DL, TII.get(AdjStackDown)).addImm(NumBytes);
|
2008-09-07 09:09:33 +00:00
|
|
|
|
2008-10-15 05:30:52 +00:00
|
|
|
// Process argument: walk the register/memloc assignments, inserting
|
2008-09-07 09:09:33 +00:00
|
|
|
// copies / loads.
|
|
|
|
SmallVector<unsigned, 4> RegArgs;
|
|
|
|
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
|
|
|
|
CCValAssign &VA = ArgLocs[i];
|
|
|
|
unsigned Arg = Args[VA.getValNo()];
|
2009-08-10 22:56:29 +00:00
|
|
|
EVT ArgVT = ArgVTs[VA.getValNo()];
|
2008-09-07 09:09:33 +00:00
|
|
|
|
|
|
|
// Promote the value if needed.
|
|
|
|
switch (VA.getLocInfo()) {
|
2009-07-14 16:55:14 +00:00
|
|
|
default: llvm_unreachable("Unknown loc info!");
|
2008-09-07 09:09:33 +00:00
|
|
|
case CCValAssign::Full: break;
|
2008-09-08 06:35:17 +00:00
|
|
|
case CCValAssign::SExt: {
|
|
|
|
bool Emitted = X86FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(),
|
|
|
|
Arg, ArgVT, Arg);
|
2008-12-19 17:03:38 +00:00
|
|
|
assert(Emitted && "Failed to emit a sext!"); Emitted=Emitted;
|
2008-12-23 21:56:28 +00:00
|
|
|
Emitted = true;
|
2008-09-08 06:35:17 +00:00
|
|
|
ArgVT = VA.getLocVT();
|
2008-09-07 09:09:33 +00:00
|
|
|
break;
|
2008-09-08 06:35:17 +00:00
|
|
|
}
|
|
|
|
case CCValAssign::ZExt: {
|
|
|
|
bool Emitted = X86FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(),
|
|
|
|
Arg, ArgVT, Arg);
|
2008-12-19 17:03:38 +00:00
|
|
|
assert(Emitted && "Failed to emit a zext!"); Emitted=Emitted;
|
2008-12-23 21:56:28 +00:00
|
|
|
Emitted = true;
|
2008-09-08 06:35:17 +00:00
|
|
|
ArgVT = VA.getLocVT();
|
2008-09-07 09:09:33 +00:00
|
|
|
break;
|
2008-09-08 06:35:17 +00:00
|
|
|
}
|
|
|
|
case CCValAssign::AExt: {
|
|
|
|
bool Emitted = X86FastEmitExtend(ISD::ANY_EXTEND, VA.getLocVT(),
|
|
|
|
Arg, ArgVT, Arg);
|
2008-09-11 02:41:37 +00:00
|
|
|
if (!Emitted)
|
|
|
|
Emitted = X86FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(),
|
2008-10-15 05:07:36 +00:00
|
|
|
Arg, ArgVT, Arg);
|
2008-09-11 02:41:37 +00:00
|
|
|
if (!Emitted)
|
|
|
|
Emitted = X86FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(),
|
|
|
|
Arg, ArgVT, Arg);
|
|
|
|
|
2008-12-19 17:03:38 +00:00
|
|
|
assert(Emitted && "Failed to emit a aext!"); Emitted=Emitted;
|
2008-09-08 06:35:17 +00:00
|
|
|
ArgVT = VA.getLocVT();
|
2008-09-07 09:09:33 +00:00
|
|
|
break;
|
|
|
|
}
|
2009-08-05 05:33:42 +00:00
|
|
|
case CCValAssign::BCvt: {
|
|
|
|
unsigned BC = FastEmit_r(ArgVT.getSimpleVT(), VA.getLocVT().getSimpleVT(),
|
|
|
|
ISD::BIT_CONVERT, Arg);
|
|
|
|
assert(BC != 0 && "Failed to emit a bitcast!");
|
|
|
|
Arg = BC;
|
|
|
|
ArgVT = VA.getLocVT();
|
|
|
|
break;
|
|
|
|
}
|
2008-09-08 06:35:17 +00:00
|
|
|
}
|
2008-09-07 09:09:33 +00:00
|
|
|
|
|
|
|
if (VA.isRegLoc()) {
|
|
|
|
TargetRegisterClass* RC = TLI.getRegClassFor(ArgVT);
|
|
|
|
bool Emitted = TII.copyRegToReg(*MBB, MBB->end(), VA.getLocReg(),
|
|
|
|
Arg, RC, RC);
|
2008-12-19 17:03:38 +00:00
|
|
|
assert(Emitted && "Failed to emit a copy instruction!"); Emitted=Emitted;
|
2008-12-23 21:56:28 +00:00
|
|
|
Emitted = true;
|
2008-09-07 09:09:33 +00:00
|
|
|
RegArgs.push_back(VA.getLocReg());
|
|
|
|
} else {
|
|
|
|
unsigned LocMemOffset = VA.getLocMemOffset();
|
2008-09-10 20:11:02 +00:00
|
|
|
X86AddressMode AM;
|
|
|
|
AM.Base.Reg = StackPtr;
|
|
|
|
AM.Disp = LocMemOffset;
|
2008-10-15 05:38:32 +00:00
|
|
|
Value *ArgVal = ArgVals[VA.getValNo()];
|
|
|
|
|
|
|
|
// If this is a really simple value, emit this with the Value* version of
|
|
|
|
// X86FastEmitStore. If it isn't simple, we don't want to do this, as it
|
|
|
|
// can cause us to reevaluate the argument.
|
|
|
|
if (isa<ConstantInt>(ArgVal) || isa<ConstantPointerNull>(ArgVal))
|
|
|
|
X86FastEmitStore(ArgVT, ArgVal, AM);
|
|
|
|
else
|
|
|
|
X86FastEmitStore(ArgVT, Arg, AM);
|
2008-09-07 09:09:33 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-09-25 15:24:26 +00:00
|
|
|
// ELF / PIC requires GOT in the EBX register before function calls via PLT
|
|
|
|
// GOT pointer.
|
2009-07-09 04:39:06 +00:00
|
|
|
if (Subtarget->isPICStyleGOT()) {
|
2008-09-25 15:24:26 +00:00
|
|
|
TargetRegisterClass *RC = X86::GR32RegisterClass;
|
2008-09-30 00:58:23 +00:00
|
|
|
unsigned Base = getInstrInfo()->getGlobalBaseReg(&MF);
|
2008-09-25 15:24:26 +00:00
|
|
|
bool Emitted = TII.copyRegToReg(*MBB, MBB->end(), X86::EBX, Base, RC, RC);
|
2008-12-19 17:03:38 +00:00
|
|
|
assert(Emitted && "Failed to emit a copy instruction!"); Emitted=Emitted;
|
2008-12-23 21:56:28 +00:00
|
|
|
Emitted = true;
|
2008-09-25 15:24:26 +00:00
|
|
|
}
|
2009-07-09 06:34:26 +00:00
|
|
|
|
2008-09-07 09:09:33 +00:00
|
|
|
// Issue the call.
|
2009-07-09 06:34:26 +00:00
|
|
|
MachineInstrBuilder MIB;
|
|
|
|
if (CalleeOp) {
|
|
|
|
// Register-indirect call.
|
|
|
|
unsigned CallOpc = Subtarget->is64Bit() ? X86::CALL64r : X86::CALL32r;
|
|
|
|
MIB = BuildMI(MBB, DL, TII.get(CallOpc)).addReg(CalleeOp);
|
|
|
|
|
|
|
|
} else {
|
|
|
|
// Direct call.
|
|
|
|
assert(GV && "Not a direct call");
|
|
|
|
unsigned CallOpc =
|
|
|
|
Subtarget->is64Bit() ? X86::CALL64pcrel32 : X86::CALLpcrel32;
|
|
|
|
|
|
|
|
// See if we need any target-specific flags on the GV operand.
|
|
|
|
unsigned char OpFlags = 0;
|
|
|
|
|
|
|
|
// On ELF targets, in both X86-64 and X86-32 mode, direct calls to
|
|
|
|
// external symbols most go through the PLT in PIC mode. If the symbol
|
|
|
|
// has hidden or protected visibility, or if it is static or local, then
|
|
|
|
// we don't need to use the PLT - we can directly call it.
|
|
|
|
if (Subtarget->isTargetELF() &&
|
|
|
|
TM.getRelocationModel() == Reloc::PIC_ &&
|
|
|
|
GV->hasDefaultVisibility() && !GV->hasLocalLinkage()) {
|
|
|
|
OpFlags = X86II::MO_PLT;
|
2009-07-10 20:47:30 +00:00
|
|
|
} else if (Subtarget->isPICStyleStubAny() &&
|
2009-07-09 06:34:26 +00:00
|
|
|
(GV->isDeclaration() || GV->isWeakForLinker()) &&
|
|
|
|
Subtarget->getDarwinVers() < 9) {
|
|
|
|
// PC-relative references to external symbols should go through $stub,
|
|
|
|
// unless we're building with the leopard linker or later, which
|
|
|
|
// automatically synthesizes these stubs.
|
|
|
|
OpFlags = X86II::MO_DARWIN_STUB;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
MIB = BuildMI(MBB, DL, TII.get(CallOpc)).addGlobalAddress(GV, 0, OpFlags);
|
|
|
|
}
|
2008-09-25 15:24:26 +00:00
|
|
|
|
|
|
|
// Add an implicit use GOT pointer in EBX.
|
2009-07-09 04:39:06 +00:00
|
|
|
if (Subtarget->isPICStyleGOT())
|
2008-09-25 15:24:26 +00:00
|
|
|
MIB.addReg(X86::EBX);
|
|
|
|
|
2008-09-07 09:09:33 +00:00
|
|
|
// Add implicit physical register uses to the call.
|
2008-10-07 22:10:33 +00:00
|
|
|
for (unsigned i = 0, e = RegArgs.size(); i != e; ++i)
|
|
|
|
MIB.addReg(RegArgs[i]);
|
2008-09-07 09:09:33 +00:00
|
|
|
|
|
|
|
// Issue CALLSEQ_END
|
2008-10-01 18:28:06 +00:00
|
|
|
unsigned AdjStackUp = TM.getRegisterInfo()->getCallFrameDestroyOpcode();
|
2009-02-13 02:33:27 +00:00
|
|
|
BuildMI(MBB, DL, TII.get(AdjStackUp)).addImm(NumBytes).addImm(0);
|
2008-09-07 09:09:33 +00:00
|
|
|
|
|
|
|
// Now handle call return value (if any).
|
2009-08-11 20:47:22 +00:00
|
|
|
if (RetVT.getSimpleVT().SimpleTy != MVT::isVoid) {
|
2008-09-07 09:09:33 +00:00
|
|
|
SmallVector<CCValAssign, 16> RVLocs;
|
2009-07-09 17:57:24 +00:00
|
|
|
CCState CCInfo(CC, false, TM, RVLocs, I->getParent()->getContext());
|
2008-09-07 09:09:33 +00:00
|
|
|
CCInfo.AnalyzeCallResult(RetVT, RetCC_X86);
|
|
|
|
|
|
|
|
// Copy all of the result registers out of their specified physreg.
|
|
|
|
assert(RVLocs.size() == 1 && "Can't handle multi-value calls!");
|
2009-08-10 22:56:29 +00:00
|
|
|
EVT CopyVT = RVLocs[0].getValVT();
|
2008-09-07 09:09:33 +00:00
|
|
|
TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT);
|
|
|
|
TargetRegisterClass *SrcRC = DstRC;
|
|
|
|
|
|
|
|
// If this is a call to a function that returns an fp value on the x87 fp
|
|
|
|
// stack, but where we prefer to use the value in xmm registers, copy it
|
|
|
|
// out as F80 and use a truncate to move it from fp stack reg to xmm reg.
|
|
|
|
if ((RVLocs[0].getLocReg() == X86::ST0 ||
|
|
|
|
RVLocs[0].getLocReg() == X86::ST1) &&
|
|
|
|
isScalarFPTypeInSSEReg(RVLocs[0].getValVT())) {
|
2009-08-11 20:47:22 +00:00
|
|
|
CopyVT = MVT::f80;
|
2008-09-07 09:09:33 +00:00
|
|
|
SrcRC = X86::RSTRegisterClass;
|
|
|
|
DstRC = X86::RFP80RegisterClass;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned ResultReg = createResultReg(DstRC);
|
|
|
|
bool Emitted = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
|
|
|
|
RVLocs[0].getLocReg(), DstRC, SrcRC);
|
2008-12-19 17:03:38 +00:00
|
|
|
assert(Emitted && "Failed to emit a copy instruction!"); Emitted=Emitted;
|
2008-12-23 21:56:28 +00:00
|
|
|
Emitted = true;
|
2008-09-07 09:09:33 +00:00
|
|
|
if (CopyVT != RVLocs[0].getValVT()) {
|
|
|
|
// Round the F80 the right size, which also moves to the appropriate xmm
|
|
|
|
// register. This is accomplished by storing the F80 value in memory and
|
|
|
|
// then loading it back. Ewww...
|
2009-08-10 22:56:29 +00:00
|
|
|
EVT ResVT = RVLocs[0].getValVT();
|
2009-08-11 20:47:22 +00:00
|
|
|
unsigned Opc = ResVT == MVT::f32 ? X86::ST_Fp80m32 : X86::ST_Fp80m64;
|
2008-09-07 09:09:33 +00:00
|
|
|
unsigned MemSize = ResVT.getSizeInBits()/8;
|
2008-09-10 20:11:02 +00:00
|
|
|
int FI = MFI.CreateStackObject(MemSize, MemSize);
|
2009-02-13 02:33:27 +00:00
|
|
|
addFrameReference(BuildMI(MBB, DL, TII.get(Opc)), FI).addReg(ResultReg);
|
2009-08-11 20:47:22 +00:00
|
|
|
DstRC = ResVT == MVT::f32
|
2008-09-07 09:09:33 +00:00
|
|
|
? X86::FR32RegisterClass : X86::FR64RegisterClass;
|
2009-08-11 20:47:22 +00:00
|
|
|
Opc = ResVT == MVT::f32 ? X86::MOVSSrm : X86::MOVSDrm;
|
2008-09-07 09:09:33 +00:00
|
|
|
ResultReg = createResultReg(DstRC);
|
2009-02-13 02:33:27 +00:00
|
|
|
addFrameReference(BuildMI(MBB, DL, TII.get(Opc), ResultReg), FI);
|
2008-09-07 09:09:33 +00:00
|
|
|
}
|
|
|
|
|
2008-09-08 17:15:42 +00:00
|
|
|
if (AndToI1) {
|
|
|
|
// Mask out all but lowest bit for some call which produces an i1.
|
|
|
|
unsigned AndResult = createResultReg(X86::GR8RegisterClass);
|
2009-02-13 02:33:27 +00:00
|
|
|
BuildMI(MBB, DL,
|
|
|
|
TII.get(X86::AND8ri), AndResult).addReg(ResultReg).addImm(1);
|
2008-09-08 17:15:42 +00:00
|
|
|
ResultReg = AndResult;
|
|
|
|
}
|
|
|
|
|
2008-09-07 09:09:33 +00:00
|
|
|
UpdateValueMap(I, ResultReg);
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-08-28 23:21:34 +00:00
|
|
|
bool
|
2008-09-03 23:12:08 +00:00
|
|
|
X86FastISel::TargetSelectInstruction(Instruction *I) {
|
2008-08-28 23:21:34 +00:00
|
|
|
switch (I->getOpcode()) {
|
|
|
|
default: break;
|
2008-09-03 06:44:39 +00:00
|
|
|
case Instruction::Load:
|
2008-09-03 23:12:08 +00:00
|
|
|
return X86SelectLoad(I);
|
2008-09-04 16:48:33 +00:00
|
|
|
case Instruction::Store:
|
|
|
|
return X86SelectStore(I);
|
2008-09-04 23:26:51 +00:00
|
|
|
case Instruction::ICmp:
|
|
|
|
case Instruction::FCmp:
|
|
|
|
return X86SelectCmp(I);
|
2008-09-05 01:06:14 +00:00
|
|
|
case Instruction::ZExt:
|
|
|
|
return X86SelectZExt(I);
|
|
|
|
case Instruction::Br:
|
|
|
|
return X86SelectBranch(I);
|
2008-09-07 09:09:33 +00:00
|
|
|
case Instruction::Call:
|
|
|
|
return X86SelectCall(I);
|
2008-09-05 18:30:08 +00:00
|
|
|
case Instruction::LShr:
|
|
|
|
case Instruction::AShr:
|
|
|
|
case Instruction::Shl:
|
|
|
|
return X86SelectShift(I);
|
|
|
|
case Instruction::Select:
|
|
|
|
return X86SelectSelect(I);
|
2008-09-07 08:47:42 +00:00
|
|
|
case Instruction::Trunc:
|
|
|
|
return X86SelectTrunc(I);
|
2008-09-10 21:02:08 +00:00
|
|
|
case Instruction::FPExt:
|
|
|
|
return X86SelectFPExt(I);
|
|
|
|
case Instruction::FPTrunc:
|
|
|
|
return X86SelectFPTrunc(I);
|
2008-12-09 02:42:50 +00:00
|
|
|
case Instruction::ExtractValue:
|
|
|
|
return X86SelectExtractValue(I);
|
2009-03-13 23:53:06 +00:00
|
|
|
case Instruction::IntToPtr: // Deliberate fall-through.
|
|
|
|
case Instruction::PtrToInt: {
|
2009-08-10 22:56:29 +00:00
|
|
|
EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
|
|
|
|
EVT DstVT = TLI.getValueType(I->getType());
|
2009-03-13 23:53:06 +00:00
|
|
|
if (DstVT.bitsGT(SrcVT))
|
|
|
|
return X86SelectZExt(I);
|
|
|
|
if (DstVT.bitsLT(SrcVT))
|
|
|
|
return X86SelectTrunc(I);
|
|
|
|
unsigned Reg = getRegForValue(I->getOperand(0));
|
|
|
|
if (Reg == 0) return false;
|
|
|
|
UpdateValueMap(I, Reg);
|
|
|
|
return true;
|
|
|
|
}
|
2008-08-28 23:21:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2008-09-10 20:11:02 +00:00
|
|
|
unsigned X86FastISel::TargetMaterializeConstant(Constant *C) {
|
2009-08-10 22:56:29 +00:00
|
|
|
EVT VT;
|
2008-10-15 05:07:36 +00:00
|
|
|
if (!isTypeLegal(C->getType(), VT))
|
2008-09-05 00:06:23 +00:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// Get opcode and regclass of the output for the given load instruction.
|
|
|
|
unsigned Opc = 0;
|
|
|
|
const TargetRegisterClass *RC = NULL;
|
2009-08-11 20:47:22 +00:00
|
|
|
switch (VT.getSimpleVT().SimpleTy) {
|
2008-09-05 00:06:23 +00:00
|
|
|
default: return false;
|
2009-08-11 20:47:22 +00:00
|
|
|
case MVT::i8:
|
2008-09-05 00:06:23 +00:00
|
|
|
Opc = X86::MOV8rm;
|
|
|
|
RC = X86::GR8RegisterClass;
|
|
|
|
break;
|
2009-08-11 20:47:22 +00:00
|
|
|
case MVT::i16:
|
2008-09-05 00:06:23 +00:00
|
|
|
Opc = X86::MOV16rm;
|
|
|
|
RC = X86::GR16RegisterClass;
|
|
|
|
break;
|
2009-08-11 20:47:22 +00:00
|
|
|
case MVT::i32:
|
2008-09-05 00:06:23 +00:00
|
|
|
Opc = X86::MOV32rm;
|
|
|
|
RC = X86::GR32RegisterClass;
|
|
|
|
break;
|
2009-08-11 20:47:22 +00:00
|
|
|
case MVT::i64:
|
2008-09-05 00:06:23 +00:00
|
|
|
// Must be in x86-64 mode.
|
|
|
|
Opc = X86::MOV64rm;
|
|
|
|
RC = X86::GR64RegisterClass;
|
|
|
|
break;
|
2009-08-11 20:47:22 +00:00
|
|
|
case MVT::f32:
|
2008-09-05 00:06:23 +00:00
|
|
|
if (Subtarget->hasSSE1()) {
|
|
|
|
Opc = X86::MOVSSrm;
|
|
|
|
RC = X86::FR32RegisterClass;
|
|
|
|
} else {
|
|
|
|
Opc = X86::LD_Fp32m;
|
|
|
|
RC = X86::RFP32RegisterClass;
|
|
|
|
}
|
|
|
|
break;
|
2009-08-11 20:47:22 +00:00
|
|
|
case MVT::f64:
|
2008-09-05 00:06:23 +00:00
|
|
|
if (Subtarget->hasSSE2()) {
|
|
|
|
Opc = X86::MOVSDrm;
|
|
|
|
RC = X86::FR64RegisterClass;
|
|
|
|
} else {
|
|
|
|
Opc = X86::LD_Fp64m;
|
|
|
|
RC = X86::RFP64RegisterClass;
|
|
|
|
}
|
|
|
|
break;
|
2009-08-11 20:47:22 +00:00
|
|
|
case MVT::f80:
|
2008-09-26 01:39:32 +00:00
|
|
|
// No f80 support yet.
|
|
|
|
return false;
|
2008-09-05 00:06:23 +00:00
|
|
|
}
|
|
|
|
|
2008-09-19 22:16:54 +00:00
|
|
|
// Materialize addresses with LEA instructions.
|
2008-09-05 00:06:23 +00:00
|
|
|
if (isa<GlobalValue>(C)) {
|
2008-09-19 22:16:54 +00:00
|
|
|
X86AddressMode AM;
|
2009-07-10 05:33:42 +00:00
|
|
|
if (X86SelectAddress(C, AM)) {
|
2009-08-11 20:47:22 +00:00
|
|
|
if (TLI.getPointerTy() == MVT::i32)
|
2008-09-19 22:16:54 +00:00
|
|
|
Opc = X86::LEA32r;
|
|
|
|
else
|
|
|
|
Opc = X86::LEA64r;
|
|
|
|
unsigned ResultReg = createResultReg(RC);
|
2009-04-08 21:14:34 +00:00
|
|
|
addLeaAddress(BuildMI(MBB, DL, TII.get(Opc), ResultReg), AM);
|
2008-09-05 00:06:23 +00:00
|
|
|
return ResultReg;
|
2008-09-19 22:16:54 +00:00
|
|
|
}
|
2008-09-05 21:00:03 +00:00
|
|
|
return 0;
|
2008-09-05 00:06:23 +00:00
|
|
|
}
|
|
|
|
|
2008-09-06 01:11:01 +00:00
|
|
|
// MachineConstantPool wants an explicit alignment.
|
2009-03-13 07:51:59 +00:00
|
|
|
unsigned Align = TD.getPrefTypeAlignment(C->getType());
|
2008-09-06 01:11:01 +00:00
|
|
|
if (Align == 0) {
|
|
|
|
// Alignment of vector types. FIXME!
|
2009-05-09 07:06:46 +00:00
|
|
|
Align = TD.getTypeAllocSize(C->getType());
|
2008-09-06 01:11:01 +00:00
|
|
|
}
|
2008-09-05 00:06:23 +00:00
|
|
|
|
2008-09-30 01:21:32 +00:00
|
|
|
// x86-32 PIC requires a PIC base register for constant pools.
|
|
|
|
unsigned PICBase = 0;
|
2009-06-27 01:31:51 +00:00
|
|
|
unsigned char OpFlag = 0;
|
2009-07-10 21:00:45 +00:00
|
|
|
if (Subtarget->isPICStyleStubPIC()) { // Not dynamic-no-pic
|
2009-07-09 04:39:06 +00:00
|
|
|
OpFlag = X86II::MO_PIC_BASE_OFFSET;
|
|
|
|
PICBase = getInstrInfo()->getGlobalBaseReg(&MF);
|
|
|
|
} else if (Subtarget->isPICStyleGOT()) {
|
|
|
|
OpFlag = X86II::MO_GOTOFF;
|
|
|
|
PICBase = getInstrInfo()->getGlobalBaseReg(&MF);
|
|
|
|
} else if (Subtarget->isPICStyleRIPRel() &&
|
|
|
|
TM.getCodeModel() == CodeModel::Small) {
|
|
|
|
PICBase = X86::RIP;
|
2009-06-27 01:31:51 +00:00
|
|
|
}
|
2008-09-30 01:21:32 +00:00
|
|
|
|
|
|
|
// Create the load from the constant pool.
|
2008-09-10 20:11:02 +00:00
|
|
|
unsigned MCPOffset = MCP.getConstantPoolIndex(C, Align);
|
2008-09-19 22:16:54 +00:00
|
|
|
unsigned ResultReg = createResultReg(RC);
|
2009-06-27 01:31:51 +00:00
|
|
|
addConstantPoolReference(BuildMI(MBB, DL, TII.get(Opc), ResultReg),
|
|
|
|
MCPOffset, PICBase, OpFlag);
|
2008-09-30 01:21:32 +00:00
|
|
|
|
2008-09-05 00:06:23 +00:00
|
|
|
return ResultReg;
|
|
|
|
}
|
|
|
|
|
2008-09-10 20:11:02 +00:00
|
|
|
unsigned X86FastISel::TargetMaterializeAlloca(AllocaInst *C) {
|
2008-10-03 01:27:49 +00:00
|
|
|
// Fail on dynamic allocas. At this point, getRegForValue has already
|
|
|
|
// checked its CSE maps, so if we're here trying to handle a dynamic
|
|
|
|
// alloca, we're not going to succeed. X86SelectAddress has a
|
|
|
|
// check for dynamic allocas, because it's called directly from
|
|
|
|
// various places, but TargetMaterializeAlloca also needs a check
|
|
|
|
// in order to avoid recursion between getRegForValue,
|
|
|
|
// X86SelectAddrss, and TargetMaterializeAlloca.
|
|
|
|
if (!StaticAllocaMap.count(C))
|
|
|
|
return 0;
|
|
|
|
|
2008-09-10 20:11:02 +00:00
|
|
|
X86AddressMode AM;
|
2009-07-10 05:33:42 +00:00
|
|
|
if (!X86SelectAddress(C, AM))
|
2008-09-10 20:11:02 +00:00
|
|
|
return 0;
|
|
|
|
unsigned Opc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r;
|
|
|
|
TargetRegisterClass* RC = TLI.getRegClassFor(TLI.getPointerTy());
|
|
|
|
unsigned ResultReg = createResultReg(RC);
|
2009-04-08 21:14:34 +00:00
|
|
|
addLeaAddress(BuildMI(MBB, DL, TII.get(Opc), ResultReg), AM);
|
2008-09-10 20:11:02 +00:00
|
|
|
return ResultReg;
|
|
|
|
}
|
|
|
|
|
2008-09-03 00:03:49 +00:00
|
|
|
namespace llvm {
|
2008-09-03 23:12:08 +00:00
|
|
|
llvm::FastISel *X86::createFastISel(MachineFunction &mf,
|
2008-09-23 21:53:34 +00:00
|
|
|
MachineModuleInfo *mmi,
|
2009-01-13 00:35:13 +00:00
|
|
|
DwarfWriter *dw,
|
2008-09-03 23:12:08 +00:00
|
|
|
DenseMap<const Value *, unsigned> &vm,
|
2008-09-10 20:11:02 +00:00
|
|
|
DenseMap<const BasicBlock *, MachineBasicBlock *> &bm,
|
2008-10-14 23:54:11 +00:00
|
|
|
DenseMap<const AllocaInst *, int> &am
|
|
|
|
#ifndef NDEBUG
|
|
|
|
, SmallSet<Instruction*, 8> &cil
|
|
|
|
#endif
|
|
|
|
) {
|
2009-01-13 00:35:13 +00:00
|
|
|
return new X86FastISel(mf, mmi, dw, vm, bm, am
|
2008-10-14 23:54:11 +00:00
|
|
|
#ifndef NDEBUG
|
|
|
|
, cil
|
|
|
|
#endif
|
|
|
|
);
|
2008-09-03 00:03:49 +00:00
|
|
|
}
|
2008-08-28 23:21:34 +00:00
|
|
|
}
|