llvm-6502/lib/Target/X86/X86ISelLowering.h

704 lines
31 KiB
C
Raw Normal View History

//===-- X86ISelLowering.h - X86 DAG Lowering Interface ----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the interfaces that X86 uses to lower LLVM code into a
// selection DAG.
//
//===----------------------------------------------------------------------===//
#ifndef X86ISELLOWERING_H
#define X86ISELLOWERING_H
#include "X86Subtarget.h"
#include "X86RegisterInfo.h"
#include "X86MachineFunctionInfo.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/CodeGen/FastISel.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/CallingConvLower.h"
namespace llvm {
namespace X86ISD {
// X86 Specific DAG Nodes
enum NodeType {
// Start the numbering where the builtin ops leave off.
FIRST_NUMBER = ISD::BUILTIN_OP_END,
/// BSF - Bit scan forward.
/// BSR - Bit scan reverse.
BSF,
BSR,
/// SHLD, SHRD - Double shift instructions. These correspond to
/// X86::SHLDxx and X86::SHRDxx instructions.
SHLD,
SHRD,
/// FAND - Bitwise logical AND of floating point values. This corresponds
/// to X86::ANDPS or X86::ANDPD.
FAND,
/// FOR - Bitwise logical OR of floating point values. This corresponds
/// to X86::ORPS or X86::ORPD.
FOR,
/// FXOR - Bitwise logical XOR of floating point values. This corresponds
/// to X86::XORPS or X86::XORPD.
FXOR,
/// FSRL - Bitwise logical right shift of floating point values. These
/// corresponds to X86::PSRLDQ.
FSRL,
/// FILD, FILD_FLAG - This instruction implements SINT_TO_FP with the
/// integer source in memory and FP reg result. This corresponds to the
/// X86::FILD*m instructions. It has three inputs (token chain, address,
/// and source type) and two outputs (FP value and token chain). FILD_FLAG
/// also produces a flag).
FILD,
FILD_FLAG,
/// FP_TO_INT*_IN_MEM - This instruction implements FP_TO_SINT with the
/// integer destination in memory and a FP reg source. This corresponds
/// to the X86::FIST*m instructions and the rounding mode change stuff. It
/// has two inputs (token chain and address) and two outputs (int value
/// and token chain).
FP_TO_INT16_IN_MEM,
FP_TO_INT32_IN_MEM,
FP_TO_INT64_IN_MEM,
/// FLD - This instruction implements an extending load to FP stack slots.
/// This corresponds to the X86::FLD32m / X86::FLD64m. It takes a chain
/// operand, ptr to load from, and a ValueType node indicating the type
/// to load to.
FLD,
/// FST - This instruction implements a truncating store to FP stack
/// slots. This corresponds to the X86::FST32m / X86::FST64m. It takes a
/// chain operand, value to store, address, and a ValueType to store it
/// as.
FST,
/// CALL/TAILCALL - These operations represent an abstract X86 call
/// instruction, which includes a bunch of information. In particular the
/// operands of these node are:
///
/// #0 - The incoming token chain
/// #1 - The callee
/// #2 - The number of arg bytes the caller pushes on the stack.
/// #3 - The number of arg bytes the callee pops off the stack.
/// #4 - The value to pass in AL/AX/EAX (optional)
/// #5 - The value to pass in DL/DX/EDX (optional)
///
/// The result values of these nodes are:
///
/// #0 - The outgoing token chain
/// #1 - The first register result value (optional)
/// #2 - The second register result value (optional)
///
/// The CALL vs TAILCALL distinction boils down to whether the callee is
/// known not to modify the caller's stack frame, as is standard with
/// LLVM.
CALL,
TAILCALL,
/// RDTSC_DAG - This operation implements the lowering for
/// readcyclecounter
RDTSC_DAG,
/// X86 compare and logical compare instructions.
CMP, COMI, UCOMI,
/// X86 bit-test instructions.
BT,
/// X86 SetCC. Operand 0 is condition code, and operand 1 is the flag
/// operand produced by a CMP instruction.
SETCC,
/// X86 conditional moves. Operand 0 and operand 1 are the two values
/// to select from. Operand 2 is the condition code, and operand 3 is the
/// flag operand produced by a CMP or TEST instruction. It also writes a
/// flag result.
CMOV,
/// X86 conditional branches. Operand 0 is the chain operand, operand 1
/// is the block to branch if condition is true, operand 2 is the
/// condition code, and operand 3 is the flag operand produced by a CMP
/// or TEST instruction.
BRCOND,
/// Return with a flag operand. Operand 0 is the chain operand, operand
/// 1 is the number of bytes of stack to pop.
RET_FLAG,
/// REP_STOS - Repeat fill, corresponds to X86::REP_STOSx.
REP_STOS,
/// REP_MOVS - Repeat move, corresponds to X86::REP_MOVSx.
REP_MOVS,
/// GlobalBaseReg - On Darwin, this node represents the result of the popl
/// at function entry, used for PIC code.
GlobalBaseReg,
/// Wrapper - A wrapper node for TargetConstantPool,
/// TargetExternalSymbol, and TargetGlobalAddress.
Wrapper,
/// WrapperRIP - Special wrapper used under X86-64 PIC mode for RIP
/// relative displacements.
WrapperRIP,
/// PEXTRB - Extract an 8-bit value from a vector and zero extend it to
/// i32, corresponds to X86::PEXTRB.
PEXTRB,
/// PEXTRW - Extract a 16-bit value from a vector and zero extend it to
/// i32, corresponds to X86::PEXTRW.
PEXTRW,
/// INSERTPS - Insert any element of a 4 x float vector into any element
/// of a destination 4 x floatvector.
INSERTPS,
/// PINSRB - Insert the lower 8-bits of a 32-bit value to a vector,
/// corresponds to X86::PINSRB.
PINSRB,
/// PINSRW - Insert the lower 16-bits of a 32-bit value to a vector,
/// corresponds to X86::PINSRW.
PINSRW,
Generate better code for v8i16 shuffles on SSE2 Generate better code for v16i8 shuffles on SSE2 (avoids stack) Generate pshufb for v8i16 and v16i8 shuffles on SSSE3 where it is fewer uops. Document the shuffle matching logic and add some FIXMEs for later further cleanups. New tests that test the above. Examples: New: _shuf2: pextrw $7, %xmm0, %eax punpcklqdq %xmm1, %xmm0 pshuflw $128, %xmm0, %xmm0 pinsrw $2, %eax, %xmm0 Old: _shuf2: pextrw $2, %xmm0, %eax pextrw $7, %xmm0, %ecx pinsrw $2, %ecx, %xmm0 pinsrw $3, %eax, %xmm0 movd %xmm1, %eax pinsrw $4, %eax, %xmm0 ret ========= New: _shuf4: punpcklqdq %xmm1, %xmm0 pshufb LCPI1_0, %xmm0 Old: _shuf4: pextrw $3, %xmm0, %eax movsd %xmm1, %xmm0 pextrw $3, %xmm1, %ecx pinsrw $4, %ecx, %xmm0 pinsrw $5, %eax, %xmm0 ======== New: _shuf1: pushl %ebx pushl %edi pushl %esi pextrw $1, %xmm0, %eax rolw $8, %ax movd %xmm0, %ecx rolw $8, %cx pextrw $5, %xmm0, %edx pextrw $4, %xmm0, %esi pextrw $3, %xmm0, %edi pextrw $2, %xmm0, %ebx movaps %xmm0, %xmm1 pinsrw $0, %ecx, %xmm1 pinsrw $1, %eax, %xmm1 rolw $8, %bx pinsrw $2, %ebx, %xmm1 rolw $8, %di pinsrw $3, %edi, %xmm1 rolw $8, %si pinsrw $4, %esi, %xmm1 rolw $8, %dx pinsrw $5, %edx, %xmm1 pextrw $7, %xmm0, %eax rolw $8, %ax movaps %xmm1, %xmm0 pinsrw $7, %eax, %xmm0 popl %esi popl %edi popl %ebx ret Old: _shuf1: subl $252, %esp movaps %xmm0, (%esp) movaps %xmm0, 16(%esp) movaps %xmm0, 32(%esp) movaps %xmm0, 48(%esp) movaps %xmm0, 64(%esp) movaps %xmm0, 80(%esp) movaps %xmm0, 96(%esp) movaps %xmm0, 224(%esp) movaps %xmm0, 208(%esp) movaps %xmm0, 192(%esp) movaps %xmm0, 176(%esp) movaps %xmm0, 160(%esp) movaps %xmm0, 144(%esp) movaps %xmm0, 128(%esp) movaps %xmm0, 112(%esp) movzbl 14(%esp), %eax movd %eax, %xmm1 movzbl 22(%esp), %eax movd %eax, %xmm2 punpcklbw %xmm1, %xmm2 movzbl 42(%esp), %eax movd %eax, %xmm1 movzbl 50(%esp), %eax movd %eax, %xmm3 punpcklbw %xmm1, %xmm3 punpcklbw %xmm2, %xmm3 movzbl 77(%esp), %eax movd %eax, %xmm1 movzbl 84(%esp), %eax movd %eax, %xmm2 punpcklbw %xmm1, %xmm2 movzbl 104(%esp), %eax movd %eax, %xmm1 punpcklbw %xmm1, %xmm0 punpcklbw %xmm2, %xmm0 movaps %xmm0, %xmm1 punpcklbw %xmm3, %xmm1 movzbl 127(%esp), %eax movd %eax, %xmm0 movzbl 135(%esp), %eax movd %eax, %xmm2 punpcklbw %xmm0, %xmm2 movzbl 155(%esp), %eax movd %eax, %xmm0 movzbl 163(%esp), %eax movd %eax, %xmm3 punpcklbw %xmm0, %xmm3 punpcklbw %xmm2, %xmm3 movzbl 188(%esp), %eax movd %eax, %xmm0 movzbl 197(%esp), %eax movd %eax, %xmm2 punpcklbw %xmm0, %xmm2 movzbl 217(%esp), %eax movd %eax, %xmm4 movzbl 225(%esp), %eax movd %eax, %xmm0 punpcklbw %xmm4, %xmm0 punpcklbw %xmm2, %xmm0 punpcklbw %xmm3, %xmm0 punpcklbw %xmm1, %xmm0 addl $252, %esp ret git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@65311 91177308-0d34-0410-b5e6-96231b3b80d8
2009-02-23 08:49:38 +00:00
/// PSHUFB - Shuffle 16 8-bit values within a vector.
PSHUFB,
/// FMAX, FMIN - Floating point max and min.
///
FMAX, FMIN,
/// FRSQRT, FRCP - Floating point reciprocal-sqrt and reciprocal
/// approximation. Note that these typically require refinement
/// in order to obtain suitable precision.
FRSQRT, FRCP,
// TLSADDR - Thread Local Storage.
TLSADDR,
// SegmentBaseAddress - The address segment:0
SegmentBaseAddress,
// EH_RETURN - Exception Handling helpers.
EH_RETURN,
/// TC_RETURN - Tail call return.
/// operand #0 chain
/// operand #1 callee (register or absolute)
/// operand #2 stack adjustment
/// operand #3 optional in flag
TC_RETURN,
// LCMPXCHG_DAG, LCMPXCHG8_DAG - Compare and swap.
LCMPXCHG_DAG,
LCMPXCHG8_DAG,
// ATOMADD64_DAG, ATOMSUB64_DAG, ATOMOR64_DAG, ATOMAND64_DAG,
// ATOMXOR64_DAG, ATOMNAND64_DAG, ATOMSWAP64_DAG -
// Atomic 64-bit binary operations.
ATOMADD64_DAG,
ATOMSUB64_DAG,
ATOMOR64_DAG,
ATOMXOR64_DAG,
ATOMAND64_DAG,
ATOMNAND64_DAG,
ATOMSWAP64_DAG,
// FNSTCW16m - Store FP control world into i16 memory.
FNSTCW16m,
// VZEXT_MOVL - Vector move low and zero extend.
VZEXT_MOVL,
// VZEXT_LOAD - Load, scalar_to_vector, and zero extend.
VZEXT_LOAD,
// VSHL, VSRL - Vector logical left / right shift.
VSHL, VSRL,
// CMPPD, CMPPS - Vector double/float comparison.
CMPPD, CMPPS,
// PCMP* - Vector integer comparisons.
PCMPEQB, PCMPEQW, PCMPEQD, PCMPEQQ,
PCMPGTB, PCMPGTW, PCMPGTD, PCMPGTQ,
// ADD, SUB, SMUL, UMUL, etc. - Arithmetic operations with FLAGS results.
ADD, SUB, SMUL, UMUL,
INC, DEC,
// MUL_IMM - X86 specific multiply by immediate.
MUL_IMM
};
}
/// Define some predicates that are used for node matching.
namespace X86 {
/// isPSHUFDMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a shuffle of elements that is suitable for input to PSHUFD.
bool isPSHUFDMask(SDNode *N);
/// isPSHUFHWMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a shuffle of elements that is suitable for input to PSHUFD.
bool isPSHUFHWMask(SDNode *N);
/// isPSHUFLWMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a shuffle of elements that is suitable for input to PSHUFD.
bool isPSHUFLWMask(SDNode *N);
/// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a shuffle of elements that is suitable for input to SHUFP*.
bool isSHUFPMask(SDNode *N);
/// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a shuffle of elements that is suitable for input to MOVHLPS.
bool isMOVHLPSMask(SDNode *N);
/// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form
/// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef,
/// <2, 3, 2, 3>
bool isMOVHLPS_v_undef_Mask(SDNode *N);
/// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}.
bool isMOVLPMask(SDNode *N);
/// isMOVHPMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a shuffle of elements that is suitable for input to MOVHP{S|D}
/// as well as MOVLHPS.
bool isMOVHPMask(SDNode *N);
/// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a shuffle of elements that is suitable for input to UNPCKL.
bool isUNPCKLMask(SDNode *N, bool V2IsSplat = false);
/// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a shuffle of elements that is suitable for input to UNPCKH.
bool isUNPCKHMask(SDNode *N, bool V2IsSplat = false);
/// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form
/// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef,
/// <0, 0, 1, 1>
bool isUNPCKL_v_undef_Mask(SDNode *N);
/// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form
/// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef,
/// <2, 2, 3, 3>
bool isUNPCKH_v_undef_Mask(SDNode *N);
/// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a shuffle of elements that is suitable for input to MOVSS,
/// MOVSD, and MOVD, i.e. setting the lowest element.
bool isMOVLMask(SDNode *N);
/// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a shuffle of elements that is suitable for input to MOVSHDUP.
bool isMOVSHDUPMask(SDNode *N);
/// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a shuffle of elements that is suitable for input to MOVSLDUP.
bool isMOVSLDUPMask(SDNode *N);
/// isSplatMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a splat of a single element.
bool isSplatMask(SDNode *N);
/// isSplatLoMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a splat of zero element.
bool isSplatLoMask(SDNode *N);
/// isMOVDDUPMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a shuffle of elements that is suitable for input to MOVDDUP.
bool isMOVDDUPMask(SDNode *N);
/// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle
/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUF* and SHUFP*
/// instructions.
unsigned getShuffleSHUFImmediate(SDNode *N);
/// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle
/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFHW
/// instructions.
unsigned getShufflePSHUFHWImmediate(SDNode *N);
/// getShufflePSHUFKWImmediate - Return the appropriate immediate to shuffle
/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFLW
/// instructions.
unsigned getShufflePSHUFLWImmediate(SDNode *N);
}
//===--------------------------------------------------------------------===//
// X86TargetLowering - X86 Implementation of the TargetLowering interface
class X86TargetLowering : public TargetLowering {
int VarArgsFrameIndex; // FrameIndex for start of varargs area.
int RegSaveFrameIndex; // X86-64 vararg func register save area.
unsigned VarArgsGPOffset; // X86-64 vararg func int reg offset.
unsigned VarArgsFPOffset; // X86-64 vararg func fp reg offset.
int BytesToPopOnReturn; // Number of arg bytes ret should pop.
int BytesCallerReserves; // Number of arg bytes caller makes.
public:
explicit X86TargetLowering(X86TargetMachine &TM);
/// getPICJumpTableRelocaBase - Returns relocation base for the given PIC
/// jumptable.
SDValue getPICJumpTableRelocBase(SDValue Table,
SelectionDAG &DAG) const;
// Return the number of bytes that a function should pop when it returns (in
// addition to the space used by the return address).
//
unsigned getBytesToPopOnReturn() const { return BytesToPopOnReturn; }
// Return the number of bytes that the caller reserves for arguments passed
// to this function.
unsigned getBytesCallerReserves() const { return BytesCallerReserves; }
/// getStackPtrReg - Return the stack pointer register we are using: either
/// ESP or RSP.
unsigned getStackPtrReg() const { return X86StackPtr; }
/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
/// function arguments in the caller parameter area. For X86, aggregates
/// that contains are placed at 16-byte boundaries while the rest are at
/// 4-byte boundaries.
virtual unsigned getByValTypeAlignment(const Type *Ty) const;
/// getOptimalMemOpType - Returns the target specific optimal type for load
/// and store operations as a result of memset, memcpy, and memmove
/// lowering. It returns MVT::iAny if SelectionDAG should be responsible for
/// determining it.
virtual
MVT getOptimalMemOpType(uint64_t Size, unsigned Align,
bool isSrcConst, bool isSrcStr) const;
/// LowerOperation - Provide custom lowering hooks for some operations.
///
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG);
/// ReplaceNodeResults - Replace the results of node with an illegal result
/// type with new values built out of custom code.
///
virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
SelectionDAG &DAG);
virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
virtual MachineBasicBlock *EmitInstrWithCustomInserter(MachineInstr *MI,
MachineBasicBlock *MBB) const;
/// getTargetNodeName - This method returns the name of a target specific
/// DAG node.
virtual const char *getTargetNodeName(unsigned Opcode) const;
/// getSetCCResultType - Return the ISD::SETCC ValueType
virtual MVT getSetCCResultType(MVT VT) const;
/// computeMaskedBitsForTargetNode - Determine which of the bits specified
/// in Mask are known to be either zero or one and return them in the
/// KnownZero/KnownOne bitsets.
virtual void computeMaskedBitsForTargetNode(const SDValue Op,
const APInt &Mask,
APInt &KnownZero,
APInt &KnownOne,
const SelectionDAG &DAG,
unsigned Depth = 0) const;
virtual bool
isGAPlusOffset(SDNode *N, GlobalValue* &GA, int64_t &Offset) const;
SDValue getReturnAddressFrameIndex(SelectionDAG &DAG);
ConstraintType getConstraintType(const std::string &Constraint) const;
std::vector<unsigned>
getRegClassForInlineAsmConstraint(const std::string &Constraint,
MVT VT) const;
virtual const char *LowerXConstraint(MVT ConstraintVT) const;
/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
/// vector. If it is invalid, don't add anything to Ops. If hasMemory is
/// true it means one of the asm constraint of the inline asm instruction
/// being processed is 'm'.
virtual void LowerAsmOperandForConstraint(SDValue Op,
char ConstraintLetter,
bool hasMemory,
std::vector<SDValue> &Ops,
SelectionDAG &DAG) const;
/// getRegForInlineAsmConstraint - Given a physical register constraint
/// (e.g. {edx}), return the register number and the register class for the
/// register. This should only be used for C_Register constraints. On
/// error, this returns a register number of 0.
std::pair<unsigned, const TargetRegisterClass*>
getRegForInlineAsmConstraint(const std::string &Constraint,
MVT VT) const;
/// isLegalAddressingMode - Return true if the addressing mode represented
/// by AM is legal for this target, for a load/store of the specified type.
virtual bool isLegalAddressingMode(const AddrMode &AM, const Type *Ty)const;
/// isTruncateFree - Return true if it's free to truncate a value of
/// type Ty1 to type Ty2. e.g. On x86 it's free to truncate a i32 value in
/// register EAX to i16 by referencing its sub-register AX.
virtual bool isTruncateFree(const Type *Ty1, const Type *Ty2) const;
virtual bool isTruncateFree(MVT VT1, MVT VT2) const;
Implement support for using modeling implicit-zero-extension on x86-64 with SUBREG_TO_REG, teach SimpleRegisterCoalescing to coalesce SUBREG_TO_REG instructions (which are similar to INSERT_SUBREG instructions), and teach the DAGCombiner to take advantage of this on targets which support it. This eliminates many redundant zero-extension operations on x86-64. This adds a new TargetLowering hook, isZExtFree. It's similar to isTruncateFree, except it only applies to actual definitions, and not no-op truncates which may not zero the high bits. Also, this adds a new optimization to SimplifyDemandedBits: transform operations like x+y into (zext (add (trunc x), (trunc y))) on targets where all the casts are no-ops. In contexts where the high part of the add is explicitly masked off, this allows the mask operation to be eliminated. Fix the DAGCombiner to avoid undoing these transformations to eliminate casts on targets where the casts are no-ops. Also, this adds a new two-address lowering heuristic. Since two-address lowering runs before coalescing, it helps to be able to look through copies when deciding whether commuting and/or three-address conversion are profitable. Also, fix a bug in LiveInterval::MergeInClobberRanges. It didn't handle the case that a clobber range extended both before and beyond an existing live range. In that case, multiple live ranges need to be added. This was exposed by the new subreg coalescing code. Remove 2008-05-06-SpillerBug.ll. It was bugpoint-reduced, and the spiller behavior it was looking for no longer occurrs with the new instruction selection. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@68576 91177308-0d34-0410-b5e6-96231b3b80d8
2009-04-08 00:15:30 +00:00
/// isZExtFree - Return true if any actual instruction that defines a
/// value of type Ty1 implicit zero-extends the value to Ty2 in the result
/// register. This does not necessarily include registers defined in
/// unknown ways, such as incoming arguments, or copies from unknown
/// virtual registers. Also, if isTruncateFree(Ty2, Ty1) is true, this
/// does not necessarily apply to truncate instructions. e.g. on x86-64,
/// all instructions that define 32-bit values implicit zero-extend the
/// result out to 64 bits.
virtual bool isZExtFree(const Type *Ty1, const Type *Ty2) const;
virtual bool isZExtFree(MVT VT1, MVT VT2) const;
/// isShuffleMaskLegal - Targets can use this to indicate that they only
/// support *some* VECTOR_SHUFFLE operations, those with specific masks.
/// By default, if a target supports the VECTOR_SHUFFLE node, all mask
/// values are assumed to be legal.
virtual bool isShuffleMaskLegal(SDValue Mask, MVT VT) const;
/// isVectorClearMaskLegal - Similar to isShuffleMaskLegal. This is
/// used by Targets can use this to indicate if there is a suitable
/// VECTOR_SHUFFLE that can be used to replace a VAND with a constant
/// pool entry.
virtual bool isVectorClearMaskLegal(const std::vector<SDValue> &BVOps,
MVT EVT, SelectionDAG &DAG) const;
/// ShouldShrinkFPConstant - If true, then instruction selection should
/// seek to shrink the FP constant of the specified type to a smaller type
/// in order to save space and / or reduce runtime.
virtual bool ShouldShrinkFPConstant(MVT VT) const {
// Don't shrink FP constpool if SSE2 is available since cvtss2sd is more
// expensive than a straight movsd. On the other hand, it's important to
// shrink long double fp constant since fldt is very slow.
return !X86ScalarSSEf64 || VT == MVT::f80;
}
/// IsEligibleForTailCallOptimization - Check whether the call is eligible
/// for tail call optimization. Target which want to do tail call
/// optimization should implement this function.
virtual bool IsEligibleForTailCallOptimization(CallSDNode *TheCall,
SDValue Ret,
SelectionDAG &DAG) const;
virtual const X86Subtarget* getSubtarget() {
return Subtarget;
}
/// isScalarFPTypeInSSEReg - Return true if the specified scalar FP type is
/// computed in an SSE register, not on the X87 floating point stack.
bool isScalarFPTypeInSSEReg(MVT VT) const {
return (VT == MVT::f64 && X86ScalarSSEf64) || // f64 is when SSE2
(VT == MVT::f32 && X86ScalarSSEf32); // f32 is when SSE1
}
/// getWidenVectorType: given a vector type, returns the type to widen
/// to (e.g., v7i8 to v8i8). If the vector type is legal, it returns itself.
/// If there is no vector type that we want to widen to, returns MVT::Other
/// When and were to widen is target dependent based on the cost of
/// scalarizing vs using the wider vector type.
virtual MVT getWidenVectorType(MVT VT) const;
/// createFastISel - This method returns a target specific FastISel object,
/// or null if the target does not support "fast" ISel.
virtual FastISel *
createFastISel(MachineFunction &mf,
MachineModuleInfo *mmi, DwarfWriter *dw,
DenseMap<const Value *, unsigned> &,
DenseMap<const BasicBlock *, MachineBasicBlock *> &,
DenseMap<const AllocaInst *, int> &
#ifndef NDEBUG
, SmallSet<Instruction*, 8> &
#endif
);
private:
/// Subtarget - Keep a pointer to the X86Subtarget around so that we can
/// make the right decision when generating code for different targets.
const X86Subtarget *Subtarget;
const X86RegisterInfo *RegInfo;
const TargetData *TD;
/// X86StackPtr - X86 physical register used as stack ptr.
unsigned X86StackPtr;
/// X86ScalarSSEf32, X86ScalarSSEf64 - Select between SSE or x87
/// floating point ops.
/// When SSE is available, use it for f32 operations.
/// When SSE2 is available, use it for f64 operations.
bool X86ScalarSSEf32;
bool X86ScalarSSEf64;
SDNode *LowerCallResult(SDValue Chain, SDValue InFlag, CallSDNode *TheCall,
unsigned CallingConv, SelectionDAG &DAG);
SDValue LowerMemArgument(SDValue Op, SelectionDAG &DAG,
const CCValAssign &VA, MachineFrameInfo *MFI,
unsigned CC, SDValue Root, unsigned i);
SDValue LowerMemOpCallTo(CallSDNode *TheCall, SelectionDAG &DAG,
const SDValue &StackPtr,
const CCValAssign &VA, SDValue Chain,
SDValue Arg, ISD::ArgFlagsTy Flags);
// Call lowering helpers.
bool IsCalleePop(bool isVarArg, unsigned CallingConv);
bool CallRequiresGOTPtrInReg(bool Is64Bit, bool IsTailCall);
bool CallRequiresFnAddressInReg(bool Is64Bit, bool IsTailCall);
SDValue EmitTailCallLoadRetAddr(SelectionDAG &DAG, SDValue &OutRetAddr,
SDValue Chain, bool IsTailCall, bool Is64Bit,
int FPDiff, DebugLoc dl);
CCAssignFn *CCAssignFnForNode(unsigned CallingConv) const;
NameDecorationStyle NameDecorationForFORMAL_ARGUMENTS(SDValue Op);
unsigned GetAlignedArgumentStackSize(unsigned StackSize, SelectionDAG &DAG);
std::pair<SDValue,SDValue> FP_TO_SINTHelper(SDValue Op,
SelectionDAG &DAG);
SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG);
SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG);
SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG);
SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG);
SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG);
SDValue LowerINSERT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG);
SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG);
SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG);
SDValue LowerGlobalAddress(const GlobalValue *GV, DebugLoc dl,
int64_t Offset, SelectionDAG &DAG) const;
SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG);
SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG);
SDValue LowerExternalSymbol(SDValue Op, SelectionDAG &DAG);
SDValue LowerShift(SDValue Op, SelectionDAG &DAG);
SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG);
SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG);
SDValue LowerUINT_TO_FP_i64(SDValue Op, SelectionDAG &DAG);
SDValue LowerUINT_TO_FP_i32(SDValue Op, SelectionDAG &DAG);
SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG);
SDValue LowerFABS(SDValue Op, SelectionDAG &DAG);
SDValue LowerFNEG(SDValue Op, SelectionDAG &DAG);
SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG);
SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG);
SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG);
SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG);
SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG);
SDValue LowerMEMSET(SDValue Op, SelectionDAG &DAG);
SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG);
SDValue LowerCALL(SDValue Op, SelectionDAG &DAG);
SDValue LowerRET(SDValue Op, SelectionDAG &DAG);
SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG);
SDValue LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG);
SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG);
SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG);
SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG);
SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG);
SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG);
SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG);
SDValue LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG);
SDValue LowerEH_RETURN(SDValue Op, SelectionDAG &DAG);
SDValue LowerTRAMPOLINE(SDValue Op, SelectionDAG &DAG);
SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG);
SDValue LowerCTLZ(SDValue Op, SelectionDAG &DAG);
SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG);
SDValue LowerMUL_V2I64(SDValue Op, SelectionDAG &DAG);
SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG);
SDValue LowerCMP_SWAP(SDValue Op, SelectionDAG &DAG);
SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG);
SDValue LowerREADCYCLECOUNTER(SDValue Op, SelectionDAG &DAG);
void ReplaceATOMIC_BINARY_64(SDNode *N, SmallVectorImpl<SDValue> &Results,
SelectionDAG &DAG, unsigned NewOp);
SDValue EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl,
SDValue Chain,
SDValue Dst, SDValue Src,
SDValue Size, unsigned Align,
const Value *DstSV, uint64_t DstSVOff);
SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl,
SDValue Chain,
SDValue Dst, SDValue Src,
SDValue Size, unsigned Align,
bool AlwaysInline,
const Value *DstSV, uint64_t DstSVOff,
const Value *SrcSV, uint64_t SrcSVOff);
/// Utility function to emit atomic bitwise operations (and, or, xor).
// It takes the bitwise instruction to expand, the associated machine basic
// block, and the associated X86 opcodes for reg/reg and reg/imm.
MachineBasicBlock *EmitAtomicBitwiseWithCustomInserter(
MachineInstr *BInstr,
MachineBasicBlock *BB,
unsigned regOpc,
unsigned immOpc,
unsigned loadOpc,
unsigned cxchgOpc,
unsigned copyOpc,
unsigned notOpc,
unsigned EAXreg,
TargetRegisterClass *RC,
bool invSrc = false) const;
MachineBasicBlock *EmitAtomicBit6432WithCustomInserter(
MachineInstr *BInstr,
MachineBasicBlock *BB,
unsigned regOpcL,
unsigned regOpcH,
unsigned immOpcL,
unsigned immOpcH,
bool invSrc = false) const;
/// Utility function to emit atomic min and max. It takes the min/max
/// instruction to expand, the associated basic block, and the associated
/// cmov opcode for moving the min or max value.
MachineBasicBlock *EmitAtomicMinMaxWithCustomInserter(MachineInstr *BInstr,
MachineBasicBlock *BB,
unsigned cmovOpc) const;
/// Emit nodes that will be selected as "test Op0,Op0", or something
/// equivalent, for use with the given x86 condition code.
SDValue EmitTest(SDValue Op0, unsigned X86CC, SelectionDAG &DAG);
/// Emit nodes that will be selected as "cmp Op0,Op1", or something
/// equivalent, for use with the given x86 condition code.
SDValue EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
SelectionDAG &DAG);
};
namespace X86 {
FastISel *createFastISel(MachineFunction &mf,
MachineModuleInfo *mmi, DwarfWriter *dw,
DenseMap<const Value *, unsigned> &,
DenseMap<const BasicBlock *, MachineBasicBlock *> &,
DenseMap<const AllocaInst *, int> &
#ifndef NDEBUG
, SmallSet<Instruction*, 8> &
#endif
);
}
}
#endif // X86ISELLOWERING_H